diff --git a/CMake/BuildConfigurations/InteractiveSegmentationConfiguration.cmake b/CMake/BuildConfigurations/InteractiveSegmentationConfiguration.cmake new file mode 100644 index 0000000..82bc210 --- /dev/null +++ b/CMake/BuildConfigurations/InteractiveSegmentationConfiguration.cmake @@ -0,0 +1,24 @@ +### Inspired by https://github.com/MIC-DKFZ/MITK-Diffusion/blob/master/CMake/BuildConfigurations/DiffusionAll.cmake +### Basically when selected, the following will turn on automatically + +message(STATUS "Configuring Interactive Segmentation default optional dependencies") + +# Enable non-optional external dependencies +set(MITK_USE_OpenCV ON CACHE BOOL "" FORCE) + +# Enable/disable non-superbuild apps +set(MITK_BUILD_APP_Workbench ON CACHE BOOL "Build the MITK Workbench executable" FORCE) + +# Enable/disable non-superbuild plugins +set(MITK_BUILD_org.mitk.gui.qt.segmentation ON CACHE BOOL "Build the org.mitk.gui.qt.segmentation Plugin." FORCE) +set(MITK_BUILD_org.mitk.gui.qt.multilabelsegmentation ON CACHE BOOL "Build the org.mitk.gui.qt.multilabelsegmentation Plugin." FORCE) + +# Activate in-application help generation +set(MITK_DOXYGEN_GENERATE_QCH_FILES ON CACHE BOOL "Use doxygen to generate Qt compressed help files for MITK docs" FORCE) +set(BLUEBERRY_USE_QT_HELP ON CACHE BOOL "Enable support for integrating bundle documentation into Qt Help" FORCE) + +# Disable console window +set(MITK_SHOW_CONSOLE_WINDOW OFF CACHE BOOL "Use this to enable or disable the console window when starting MITK GUI Applications" FORCE) + +# Enable exporting of compile commands (useful for intellisense in vscode etc) +set(CMAKE_EXPORT_COMPILE_COMMANDS ON CACHE BOOL "Enable/Disable output of compile commands during generation." FORCE) diff --git a/CMake/Findyaml-cpp.cmake b/CMake/Findyaml-cpp.cmake new file mode 100644 index 0000000..9b890ec --- /dev/null +++ b/CMake/Findyaml-cpp.cmake @@ -0,0 +1 @@ +# Nothing here \ No newline at end of file diff --git a/CMake/PackageDepends/MITK_yaml-cpp_Config.cmake b/CMake/PackageDepends/MITK_yaml-cpp_Config.cmake new file mode 100644 index 0000000..53d26f0 --- /dev/null +++ b/CMake/PackageDepends/MITK_yaml-cpp_Config.cmake @@ -0,0 +1,4 @@ +list(APPEND ALL_LIBRARIES ${YAML_CPP_LIBRARIES}) +if(YAML_CPP_INCLUDE_DIR) + list(APPEND ALL_INCLUDE_DIRECTORIES ${YAML_CPP_INCLUDE_DIR}) +endif() \ No newline at end of file diff --git a/CMakeExternals/ExternalProjectList.cmake b/CMakeExternals/ExternalProjectList.cmake new file mode 100644 index 0000000..bb3c100 --- /dev/null +++ b/CMakeExternals/ExternalProjectList.cmake @@ -0,0 +1 @@ +mitkFunctionAddExternalProject(NAME yaml-cpp ON DOC "Use YAML Cpp Library") \ No newline at end of file diff --git a/CMakeExternals/yaml-cpp.cmake b/CMakeExternals/yaml-cpp.cmake new file mode 100644 index 0000000..a8781fe --- /dev/null +++ b/CMakeExternals/yaml-cpp.cmake @@ -0,0 +1,29 @@ +set(proj yaml-cpp) +set(proj_DEPENDENCIES "") + +if(MITK_USE_${proj}) + set(${proj}_DEPENDS ${proj}) + + if(DEFINED ${proj}_DIR AND NOT EXISTS ${${proj}_DIR}) + message(FATAL_ERROR "${proj}_DIR variable is defined but corresponds to non-existing directory!") + endif() + + if(NOT DEFINED ${proj}_DIR) + ExternalProject_Add(${proj} + GIT_REPOSITORY https://github.com/jbeder/yaml-cpp.git + GIT_TAG yaml-cpp-0.6.3 + CMAKE_ARGS ${ep_common_args} + CMAKE_CACHE_ARGS ${ep_common_cache_args} + -DGSL_CXX_STANDARD:STRING=${MITK_CXX_STANDARD} + -DGSL_TEST:BOOL=OFF + -DYAML_BUILD_SHARED_LIBS:BOOL=ON + -DYAML_CPP_BUILD_TESTS:BOOL=OFF + CMAKE_CACHE_DEFAULT_ARGS ${ep_common_cache_default_args} + DEPENDS ${proj_DEPENDENCIES} + ) + + set(${proj}_DIR ${ep_prefix}) + else() + mitkMacroEmptyExternalProject(${proj} "${proj_DEPENDENCIES}") + endif() +endif() diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..cec5091 --- /dev/null +++ b/LICENSE @@ -0,0 +1,12 @@ +By installing the Cancer and Phenomics Toolkit (CaPTk), the user agrees to the following licenses, which pertain to the code and its different constituents: + +1. SBIA Software License - https://www.med.upenn.edu/sbia/software-agreement.html +2. SBIA Non-Commercial Software License (applies to PHI Estimator) - https://www.med.upenn.edu/sbia/software-agreement-non-commercial.html +3. Insight Toolkit License - https://cmake.org/Wiki/ITK/License_Information +4. Visualization Toolkit License - http://www.vtk.org/licensing/ +5. Qt Open Source License - https://doc.qt.io/qt-5.10/qtcore-index.html#licenses-and-attributions; our copy can be found at https://github.com/CBICA/qt +6. CC Attribution-ShareAlike for SRI24 data - https://creativecommons.org/licenses/by-sa/3.0/us/legalcode +7. MRIcroGL License - BSD +8. YAML-CPP - MIT License and can be found at https://opensource.org/licenses/MIT +9. Eigen - MPL2 License and can be found at https://www.mozilla.org/en-US/MPL/2.0/ +10. MITK - BSD-style license http://www.mitk.org/wiki/License \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/CMakeLists.txt b/Modules/CaPTkInteractiveSegmentation/CMakeLists.txt new file mode 100644 index 0000000..963f318 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/CMakeLists.txt @@ -0,0 +1,7 @@ +mitk_create_module( + INCLUDE_DIRS PUBLIC include third_party/jsoncpp/include + PACKAGE_DEPENDS ITK Qt5|Core+Widgets PRIVATE yaml-cpp + DEPENDS PUBLIC MitkCore MitkMultilabel #MitkCaPTkCommon +) + +add_subdirectory(cmdapps) \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/README.md b/Modules/CaPTkInteractiveSegmentation/README.md new file mode 100644 index 0000000..1dafa03 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/README.md @@ -0,0 +1,7 @@ +## CaPTkInteractiveSegmentation + +Contains the algorithm part of the interactive segmentation. + +It uses brief, user-drawn seeds on co-registered input images to produce a segmentation. + +For UI look at the respective plugin. \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/cmdapps/CMakeLists.txt b/Modules/CaPTkInteractiveSegmentation/cmdapps/CMakeLists.txt new file mode 100644 index 0000000..43db009 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/cmdapps/CMakeLists.txt @@ -0,0 +1,10 @@ +option(BUILD_CaPTkInteractiveSegmentationCmdApp "Build command-line app for CaPTkInteractiveSegmentation" OFF) + +if(BUILD_CaPTkInteractiveSegmentationCmdApp) + mitkFunctionCreateCommandLineApp( + NAME CaPTkInteractiveSegmentation + CPP_FILES "${SRC_FILES}" CaPTkInteractiveSegmentation.cpp + PACKAGE_DEPENDS ITK OpenCV Qt5|Core+WebEngineWidgets + DEPENDS MitkCaPTkInteractiveSegmentation + ) +endif() diff --git a/Modules/CaPTkInteractiveSegmentation/cmdapps/CaPTkInteractiveSegmentation.cpp b/Modules/CaPTkInteractiveSegmentation/cmdapps/CaPTkInteractiveSegmentation.cpp new file mode 100644 index 0000000..ac1c098 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/cmdapps/CaPTkInteractiveSegmentation.cpp @@ -0,0 +1,245 @@ +#include +#include +#include +#include +#include + +#include +#include + +// #include + +#include +#include +#include +#include +#include + +/**Splits a string into a list using a delimiter*/ +std::vector +split(const std::string &s, char delim) { + std::stringstream ss(s); + std::string item; + std::vector elems; + while (std::getline(ss, item, delim)) { + elems.push_back(std::move(item)); + } + return elems; +} + +/** \brief command-line app for batch processing of images + * + * This command-line app takes a task and and a cohort and runs the algorithm on everything. + */ +int main(int argc, char* argv[]) +{ + mitkCommandLineParser parser; + + /**** Set general information about the command-line app ****/ + + parser.setCategory("CaPTk Cmd App Category"); + parser.setTitle("CaPTk Interactive Segmentation Cmd App"); + parser.setContributor("CBICA"); + parser.setDescription( + "This command-line app takes a task and cohort and runs the interactive segmentation algorithm on everything."); + + // How should arguments be prefixed + parser.setArgumentPrefix("--", "-"); + + /**** Add arguments. Unless specified otherwise, each argument is optional. + See mitkCommandLineParser::addArgument() for more information. ****/ + + parser.addArgument( + "images", + "i", + mitkCommandLineParser::String, + "Images", + "Paths to the input images, separated by comma", + us::Any(), + false); + + parser.addArgument( + "labels", + "l", + mitkCommandLineParser::String, + "Labels Image", + "Path to the input seeds image.", + us::Any(), + false); + + parser.addArgument( + "priorimages", + "p", + mitkCommandLineParser::String, + "Prior Images", + "These are also input images, but are generated by a deep learning algorithm", + us::Any(), + true); + + parser.addArgument( + "outputdir", + "o", + mitkCommandLineParser::String, + "Output Directory", + "Where to generate the output", + us::Any(), + false); + + // // Add arguments. Unless specified otherwise, each argument is optional. + // // See mitkCommandLineParser::addArgument() for more information. + // parser.addArgument( + // "task", + // "t", + // mitkCommandLineParser::String, + // "Task", + // "JSON file that contains information on how to run this execution.", + // us::Any(), + // false); + // parser.addArgument( + // "cohort", + // "c", + // mitkCommandLineParser::String, + // "Cohort", + // "JSON file that contains information on how to run this execution.", + // us::Any(), + // false); + + /**** Parse arguments. This method returns a mapping of long argument names to + their values. ****/ + + auto parsedArgs = parser.parseArguments(argc, argv); + + if (parsedArgs.empty()) + return EXIT_FAILURE; // Just exit, usage information was already printed. + + if (parsedArgs["task"].Empty() || parsedArgs["cohort"].Empty()) + { + MITK_INFO << parser.helpText(); + return EXIT_FAILURE; + } + + // // Parse, cast and set required arguments + // auto task = us::any_cast(parsedArgs["task"]); + // auto cohort = us::any_cast(parsedArgs["cohort"]); + + auto imagesPaths = us::any_cast(parsedArgs["images"]); + auto labelsPath = us::any_cast(parsedArgs["labels"]); + auto outputDir = us::any_cast(parsedArgs["outputdir"]); + + /**** Default values for optional arguments ****/ + + std::string priorImagesPaths = ""; + // // Parse, cast and set optional arguments + if (parsedArgs.end() != parsedArgs.find("priorimages")) + { + priorImagesPaths = us::any_cast(parsedArgs["priorimages"]); + } + + std::vector imagesPathsVector = split(imagesPaths, ','); + std::vector priorImagesPathsVector = split(priorImagesPaths, ','); + + /**** Run ****/ + + try + { + std::vector images; + std::vector priorImages; + mitk::LabelSetImage::Pointer seeds; + + /**** Read input ****/ + + for (std::string& imagePath : imagesPathsVector) + { + auto image = mitk::IOUtil::Load(imagePath); + images.push_back(image); + } + + for (auto& priorImagePath : priorImagesPathsVector) + { + auto priorImage = mitk::IOUtil::Load(priorImagePath); + priorImages.push_back(priorImage); + } + + seeds = mitk::IOUtil::Load(labelsPath); + + // auto algorithm = new CaPTkInteractiveSegmentation(nullptr); + // algorithm->Run(task, cohort); + + if (images[0]->GetDimension() == 3) + { + // [ 3D ] + + /**** Convert to itk ****/ + + std::vector::Pointer> imagesItk; + std::vector::Pointer> priorImagesItk; + typename itk::Image::Pointer seedsItk; + + for (auto& image : images) + { + typename itk::Image::Pointer imageItk; + mitk::CastToItkImage(image, imageItk); + imagesItk.push_back(imageItk); + } + + for (auto& image : priorImages) + { + typename itk::Image::Pointer imageItk; + mitk::CastToItkImage(image, imageItk); + priorImagesItk.push_back(imageItk); + } + + mitk::CastToItkImage(seeds, seedsItk); + + std::unique_ptr> geodesicTraining( + new GeodesicTrainingSegmentation::Coordinator() + ); + geodesicTraining->SetInputImages(imagesItk); + geodesicTraining->SetExtraInputImagesNotAGDable(priorImagesItk); + geodesicTraining->SetLabels(seedsItk); + geodesicTraining->SetOutputPath(outputDir); + // geodesicTraining->SetNumberOfThreads(16); + // geodesicTraining->SaveOnlyNormalSegmentation(true, "segmentation"); + geodesicTraining->SetVerbose(true); + + /**** Run algorithm ****/ + + auto result = geodesicTraining->Execute(); + + if (result->ok) + { + mitk::Image::Pointer segmNormal; + mitk::CastToMitkImage(result->labelsImage, segmNormal); + + mitk::LabelSetImage::Pointer segm = mitk::LabelSetImage::New(); + + segm->InitializeByLabeledImage(segmNormal); + + mitk::IOUtil::Save(segm, outputDir + std::string("/segmentation.nii.gz")); + } + else { + std::cout << "Algorithm finished with internal error\n"; + return EXIT_FAILURE; + } + } + else + { + // [ 2D ] + + std::cout << "2D not supported yet\n"; + return EXIT_FAILURE; + } + + return EXIT_SUCCESS; + } + catch (const std::exception &e) + { + MITK_ERROR << e.what(); + return EXIT_FAILURE; + } + catch (...) + { + MITK_ERROR << "Unexpected error!"; + return EXIT_FAILURE; + } +} diff --git a/Modules/CaPTkInteractiveSegmentation/files.cmake b/Modules/CaPTkInteractiveSegmentation/files.cmake new file mode 100644 index 0000000..47ec0cc --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/files.cmake @@ -0,0 +1,67 @@ +### Fetch the jsoncpp source files (and make them be relative to src/) +file(GLOB_RECURSE YAMLCPP_FILES RELATIVE + "${CMAKE_CURRENT_SOURCE_DIR}/src" "${CMAKE_CURRENT_SOURCE_DIR}/third_party/yaml-cpp/*" +) + +set(CPP_FILES + CaPTkInteractiveSegmentation.cpp + CaPTkInteractiveSegmentationAdapter.cpp + CaPTkInteractiveSegmentationQtPart.cpp + + AdaptiveGeodesicDistance.cpp + ConfigParserRF.cpp + ConvertionsOpenCV.cpp + ConvertionsYAML.cpp + GeodesicTrainingSegmentation.cpp + OperationsSvmGTS.cpp + RandomForestSuite.cpp + RFPrepareTrainData.cpp + RFSuiteManager.cpp + SvmSuite.cpp + SvmSuiteDescription.cpp + SvmSuiteManager.cpp + SvmSuiteOperations.cpp + SvmSuiteUtil.cpp + UtilCvMatToImageGTS.cpp + UtilGTS.cpp + UtilImageToCvMatGTS.cpp + UtilItkGTS.cpp + + ${YAMLCPP_FILES} +) + +set(UI_FILES + +) + +set(MOC_H_FILES + include/CaPTkInteractiveSegmentation.h + include/CaPTkInteractiveSegmentationAdapter.h + include/CaPTkInteractiveSegmentationQtPart.h + + include/AdaptiveGeodesicDistance.h + include/ConfigParserRF.h + include/ConvertionsOpenCV.h + include/ConvertionsYAML.h + include/GeodesicTrainingSegmentation.h + include/OperationsSvmGTS.h + include/Processing.h + include/RandomForestSuite.h + include/RFPrepareTrainData.h + include/RFSuiteManager.h + include/SusanDenoising.h + include/SvmSuite.h + include/SvmSuiteDescription.h + include/SvmSuiteManager.h + include/SvmSuiteOperations.h + include/SvmSuiteUtil.h + include/UtilCvMatToImageGTS.h + include/UtilGTS.h + include/UtilImageToCvMatGTS.h + include/UtilItkGTS.h +) + +set(RESOURCE_FILES + mll_icon2.svg + cbica-logo.jpg +) diff --git a/Modules/CaPTkInteractiveSegmentation/include/AdaptiveGeodesicDistance.h b/Modules/CaPTkInteractiveSegmentation/include/AdaptiveGeodesicDistance.h new file mode 100644 index 0000000..c71809c --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/AdaptiveGeodesicDistance.h @@ -0,0 +1,329 @@ +/** +\file AdaptiveGeodesicDistance.h +\brief The header file containing the Geodesic segmentation class, used to apply an adaptive geodesic transform +Library Dependecies: ITK 4.7+
+http://www.med.upenn.edu/sbia/software/
+software@cbica.upenn.edu +Copyright (c) 2018 University of Pennsylvania. All rights reserved.
+See COPYING file or http://www.med.upenn.edu/sbia/software/license.html +*/ +#ifndef H_CBICA_ADAPTIVE_GEODESIC_DISTANCE +#define H_CBICA_ADAPTIVE_GEODESIC_DISTANCE + +#include +#include + +#include "itkImage.h" +#include "itkConnectedThresholdImageFilter.h" +#include "itkImageRegionIterator.h" +#include "itkBSplineControlPointImageFilter.h" +#include "itkExpImageFilter.h" +#include "itkImageRegionIterator.h" +#include "itkOtsuThresholdImageFilter.h" +#include "itkShrinkImageFilter.h" +#include "itkMedianImageFunction.h" +#include "itkNeighborhoodIterator.h" +#include "itkMinimumMaximumImageCalculator.h" +#include "itkConnectedComponentImageFilter.h" +#include "itkBinaryThresholdImageFilter.h" +#include "itkThresholdImageFilter.h" + +/** +\namespace AdaptiveGeodesicDistance +\brief Applies an adaptive Geodesic filter to image +Reference: +@inproceedings{gaonkar2014adaptive, +title={Adaptive geodesic transform for segmentation of vertebrae on CT images}, +author={Gaonkar, Bilwaj and Shu, Liao and Hermosillo, Gerardo and Zhan, Yiqiang}, +booktitle={SPIE Medical Imaging}, +pages={903516--903516}, +year={2014}, +organization={International Society for Optics and Photonics} +} +*/ +namespace AdaptiveGeodesicDistance +{ + typedef int LabelsPixelType; + + typedef itk::Image< int, 3 > ImageTypeDefault; // Default image type + + template + using ImagePointer = typename TImageType::Pointer; + + /*For internal use*/ + template + double square(typename TImageType::PixelType x) { + return static_cast(x) * x; + } + + /** + Runs the Adaptive Geodesic Distance algorithm on a single image + @param input the input image. + @param labels an image the same size as input. A sample of labels for the input image. Only the pixels with value=labelOfInterest will be used. + @param labelOfInterest For which label (of the possibly many) from the labels image to perform AGD + @param limitAt255 if set to true the return image will be [0-255] + @return the AGD result + */ + template + ImagePointer< itk::Image > + Run(const ImagePointer< itk::Image > input, + const ImagePointer< itk::Image > labels, + const int labelOfInterest = 1, const bool verbose = false, const bool limitAt255 = false) + { + typedef itk::Image ImageTypeGeodesic; + + ImagePointer skipZerosGuideImage = ImageTypeGeodesic::New(); + skipZerosGuideImage->CopyInformation(input); + skipZerosGuideImage->SetRequestedRegion(input->GetLargestPossibleRegion()); + skipZerosGuideImage->SetBufferedRegion(input->GetBufferedRegion()); + skipZerosGuideImage->Allocate(); + skipZerosGuideImage->FillBuffer(1); + + return Run(input, skipZerosGuideImage, labels, labelOfInterest, verbose, limitAt255); + } + + /** + Runs the Adaptive Geodesic Distance algorithm on a single image + @param input the input image. + @param skipZerosGuideImage an image the same size as input. For the pixels that are zero no calculation will be done. (can be the input image itself) + @param labels an image the same size as input. A sample of labels for the input image. Only the pixels with value=labelOfInterest will be used. + @param labelOfInterest For which label (of the possibly many) from the labels image to perform AGD + @param limitAt255 if set to true the return image will be [0-255] + @return the AGD result + */ + template + ImagePointer< itk::Image > + Run(const ImagePointer< itk::Image > input, + const ImagePointer< itk::Image > skipZerosGuideImage, + const ImagePointer< itk::Image > labels, + const int labelOfInterest = 1, const bool verbose = false, const bool limitAt255 = false) + { + static_assert((Dimensions == 2 || Dimensions == 3), "2D or 3D Images supported"); + + typedef itk::Image ImageTypeGeodesic; + typedef itk::Image LabelsImageType; + // typedef ImagePointer ImageGeodesicPointer; + // typedef ImagePointer LabelsImagePointer; + typedef itk::ImageRegionIteratorWithIndex NormalIteratorIndexedGeo; + typedef itk::ImageRegionIteratorWithIndex NormalIteratorIndexedLabels; + typedef itk::NeighborhoodIterator NeighborhoodIteratorGeo; + + const double pixelTypeMaxVal = itk::NumericTraits< typename ImageTypeGeodesic::PixelType >::max(); + + //-------------- Allocate output image ------------------------ + + typename ImageTypeGeodesic::Pointer output = ImageTypeGeodesic::New(); + output->SetRegions(input->GetLargestPossibleRegion()); + output->SetRequestedRegion(input->GetLargestPossibleRegion()); + //output->SetBufferedRegion(input->GetBufferedRegion()); + output->Allocate(); + output->FillBuffer((limitAt255) ? 255 : static_cast(pixelTypeMaxVal - 1)); + output->SetDirection(input->GetDirection()); + output->SetOrigin(input->GetOrigin()); + output->SetSpacing(input->GetSpacing()); + + //-------------- Initialize values for output image (0 at pixels of label of interest, max elsewhere [set above]) ------------------------ + + NormalIteratorIndexedLabels labIter(labels, labels->GetLargestPossibleRegion()); + NormalIteratorIndexedGeo outIter(output, output->GetLargestPossibleRegion()); + labIter.GoToBegin(); + outIter.GoToBegin(); + + while (!labIter.IsAtEnd()) { + if (labIter.Get() == labelOfInterest) { + outIter.Set(0); + } + ++labIter; + ++outIter; + } + + //-------------- If there is need to use gamma ------------------------ + + //typename ImageTypeGeodesic::Pointer Gamma = ImageTypeGeodesic::New(); + //Gamma->CopyInformation(input); + //Gamma->SetRequestedRegion(input->GetLargestPossibleRegion()); + //Gamma->SetBufferedRegion(input->GetBufferedRegion()); + //Gamma->Allocate(); + //Gamma->FillBuffer(1); + + //--------------------------- For actual AGD -------------------------- + + // Iterators that are used in the loops + NormalIteratorIndexedGeo iterSkipZeros(skipZerosGuideImage, skipZerosGuideImage->GetLargestPossibleRegion()); + //NormalIteratorIndexedGeo iterGamma(Gamma, Gamma->GetLargestPossibleRegion()); + + typename ImageTypeGeodesic::SizeType radius; + + for (unsigned int i = 0; i < ImageTypeGeodesic::ImageDimension; i++) { + radius[i] = 1; + } + + NeighborhoodIteratorGeo outNIter(radius, output, output->GetLargestPossibleRegion()); + NeighborhoodIteratorGeo inputNIter(radius, input, input->GetLargestPossibleRegion()); + + // Variables that are used in the loops + typename ImageTypeGeodesic::PixelType inpCenterPixel; + //ImageTypeGeodesic::PixelType gamPixel; + double arr[14]; + double minVal; + + //----- Backward pass ----- + + if (verbose) { + std::cout << "AdaptiveGeodesicDistance: \tForward pass\n"; + } + + outNIter.GoToBegin(); + inputNIter.GoToBegin(); + iterSkipZeros.GoToBegin(); + //iterGamma.GoToBegin(); + + while (!iterSkipZeros.IsAtEnd()) + { + if (iterSkipZeros.Get() != 0) + { + // Forward pass + + if (ImageTypeGeodesic::ImageDimension == 2) + { + // 2D + + inpCenterPixel = inputNIter.GetPixel(4); + //gamPixel = iterGamma.Get(); + + arr[4] = outNIter.GetCenterPixel(); + arr[0] = outNIter.GetPixel(0) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(0))); + arr[1] = outNIter.GetPixel(1) + sqrt(1.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(1))); + arr[2] = outNIter.GetPixel(2) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(2))); + arr[3] = outNIter.GetPixel(3) + sqrt(1.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(3))); + + minVal = arr[4]; + for (int i = 0; i < 4; i++) + { + if (arr[i] < minVal) { + minVal = arr[i]; + } + } + } + else { + // 3D + + inpCenterPixel = inputNIter.GetPixel(13); + //gamPixel = iterGamma.Get(); + + arr[13] = outNIter.GetCenterPixel(); + arr[0] = outNIter.GetPixel(4) + sqrt(1.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(4))); + arr[1] = outNIter.GetPixel(10) + sqrt(1.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(10))); + arr[2] = outNIter.GetPixel(12) + sqrt(1.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(12))); + arr[3] = outNIter.GetPixel(1) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(1))); + arr[4] = outNIter.GetPixel(3) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(3))); + arr[5] = outNIter.GetPixel(9) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(9))); + arr[6] = outNIter.GetPixel(0) + sqrt(3.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(0))); + arr[7] = outNIter.GetPixel(7) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(7))); + arr[8] = outNIter.GetPixel(6) + sqrt(3.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(6))); + arr[9] = outNIter.GetPixel(15) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(15))); + arr[10] = outNIter.GetPixel(24) + sqrt(3.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(24))); + arr[11] = outNIter.GetPixel(21) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(21))); + arr[12] = outNIter.GetPixel(18) + sqrt(3.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(18))); + + minVal = arr[13]; + for (int i = 0; i < 13; i++) + { + if (arr[i] < minVal) { + minVal = arr[i]; + } + } + } + + outNIter.SetCenterPixel(minVal); + } + ++outNIter; + ++inputNIter; + ++iterSkipZeros; + //++iterGamma; + } + + //----- Backward pass ----- + + if (verbose) { + std::cout << "AdaptiveGeodesicDistance: \tBackward pass\n"; + } + + outNIter.GoToEnd(); + inputNIter.GoToEnd(); + --outNIter; + --inputNIter; + iterSkipZeros.GoToReverseBegin(); + //iterGamma.GoToReverseBegin(); + + while (!iterSkipZeros.IsAtReverseEnd()) + { + if (iterSkipZeros.Get() != 0) + { + // Backward pass + + if (ImageTypeGeodesic::ImageDimension == 2) + { + // 2D + + inpCenterPixel = inputNIter.GetPixel(4); + //gamPixelB = iterGamma.Get(); + + arr[4] = outNIter.GetCenterPixel(); + arr[0] = outNIter.GetPixel(5) + sqrt(1.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(5))); + arr[1] = outNIter.GetPixel(6) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(6))); + arr[2] = outNIter.GetPixel(7) + sqrt(1.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(7))); + arr[3] = outNIter.GetPixel(8) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(8))); + + minVal = arr[4]; + for (int i = 0; i < 4; i++) + { + if (arr[i] < minVal) { + minVal = arr[i]; + } + } + } + else + { + // 3D + + inpCenterPixel = inputNIter.GetPixel(13); + //gamPixelB = iterGamma.Get(); + + arr[13] = outNIter.GetCenterPixel(); + arr[0] = outNIter.GetPixel(22) + sqrt(1.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(22))); + arr[1] = outNIter.GetPixel(16) + sqrt(1.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(16))); + arr[2] = outNIter.GetPixel(14) + sqrt(1.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(14))); + arr[3] = outNIter.GetPixel(25) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(25))); + arr[4] = outNIter.GetPixel(23) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(23))); + arr[5] = outNIter.GetPixel(17) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(17))); + arr[6] = outNIter.GetPixel(26) + sqrt(3.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(26))); + arr[7] = outNIter.GetPixel(19) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(19))); + arr[8] = outNIter.GetPixel(20) + sqrt(3.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(20))); + arr[9] = outNIter.GetPixel(11) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(11))); + arr[10] = outNIter.GetPixel(2) + sqrt(3.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(2))); + arr[11] = outNIter.GetPixel(5) + sqrt(2.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(5))); + arr[12] = outNIter.GetPixel(8) + sqrt(3.0 + /*gamPixel * */ square(inpCenterPixel - inputNIter.GetPixel(8))); + + minVal = arr[13]; + for (int i = 0; i < 13; i++) + { + if (arr[i] < minVal) { + minVal = arr[i]; + } + } + } + + outNIter.SetCenterPixel(minVal); + } + --outNIter; + --inputNIter; + --iterSkipZeros; + //--iterGamma; + } + + return output; + } +} + +#endif \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/CaPTkInteractiveSegmentation.h b/Modules/CaPTkInteractiveSegmentation/include/CaPTkInteractiveSegmentation.h new file mode 100644 index 0000000..9ecbb5a --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/CaPTkInteractiveSegmentation.h @@ -0,0 +1,108 @@ +#ifndef CaPTkInteractiveSegmentation_h +#define CaPTkInteractiveSegmentation_h + +// The following header file is generated by CMake and thus it's located in +// the build directory. It provides an export macro for classes and functions +// that you want to be part of the public interface of your module. +#include + +#include "CaPTkInteractiveSegmentationAdapter.h" +#include "GeodesicTrainingSegmentation.h" + +#include "mitkImage.h" +#include "mitkLabelSetImage.h" +#include "mitkDataStorage.h" + +#include +#include +#include +#include + +/** \class CaPTkInteractiveSegmentation + * \brief Singleton class that runs the interactive segmentation + * algorithm and adds the result to the data storage + */ +class MITKCAPTKINTERACTIVESEGMENTATION_EXPORT CaPTkInteractiveSegmentation /*final*/ : + public QObject +{ + Q_OBJECT + +public: + CaPTkInteractiveSegmentation( + mitk::DataStorage::Pointer dataStorage, + QObject *parent = 0); + + ~CaPTkInteractiveSegmentation() {} + + /** \brief Runs the algorithm + * + * Execute the algorithm in a background thread. When the + * algorithm finishes, OnAlgorithmFinished() is called. + * + * @param images a list of the co-registered input images + * @param labels label image that contains the user drawn seeds + */ + void Run(std::vector& images, + mitk::LabelSetImage::Pointer& seeds); + + // void Run(Json::Value& task_json, Json::Value& cohort_json); + + // void Run(std::string task_json_path, std::string cohort_json_path); + + void SetProgressBar(QProgressBar* progressBar); + + /** \struct Result + * \brief result of the execution of the algorithm + * + * if ok == true, then segmentation is populated, + * else errorMessage is populated. + */ + typedef struct Result + { + mitk::LabelSetImage::Pointer seeds; + mitk::LabelSetImage::Pointer segmentation; + bool ok = true; + std::string errorMessage = ""; + } Result; + +public slots: + /** \brief This function runs in the main thread when + * the algorithm is finished to add the result to the data storage + */ + void OnAlgorithmFinished(); + +protected: + + /** \brief Runs the algorithm after the operations in Run + * + * This can serve as a background thread. When the + * algorithm finishes, OnAlgorithmFinished() is called. + * + * @param images a list of the co-registered input images + * @param seeds label image that contains the user drawn seeds + * @return the result struct (that contains the output or an errorMessage) + */ + Result RunThread(std::vector& images, + mitk::LabelSetImage::Pointer& seeds); + + /** \brief Used to give the appropriate name to the output segmentation. + * + * The first one is called "Segmentation". Subsequent ones "Segmentation-2" etc + */ + std::string FindNextAvailableSegmentationName(); + + /** \brief Helper function to identify if a string is a number + * + */ + bool IsNumber(const std::string& s); + + bool m_IsRunning = false; + QFutureWatcher m_Watcher; + QFuture m_FutureResult; + + mitk::DataStorage::Pointer m_DataStorage; + + QProgressBar* m_ProgressBar; +}; + +#endif // ! CaPTkInteractiveSegmentation_h diff --git a/Modules/CaPTkInteractiveSegmentation/include/CaPTkInteractiveSegmentationAdapter.h b/Modules/CaPTkInteractiveSegmentation/include/CaPTkInteractiveSegmentationAdapter.h new file mode 100644 index 0000000..b9f3413 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/CaPTkInteractiveSegmentationAdapter.h @@ -0,0 +1,62 @@ +#ifndef CaPTkInteractiveSegmentationAdapter_h +#define CaPTkInteractiveSegmentationAdapter_h + +#include "GeodesicTrainingSegmentation.h" +#include "CaPTkInteractiveSegmentationQtPart.h" + +/** \class CaPTkInteractiveSegmentationAdapter + * \brief Basically useful to override the progressUpdate function of the original algorithm + */ +template +class CaPTkInteractiveSegmentationAdapter : + public CaPTkInteractiveSegmentationQtPart, + public GeodesicTrainingSegmentation::Coordinator +{ +public: + explicit CaPTkInteractiveSegmentationAdapter(QObject* parent = nullptr) : + CaPTkInteractiveSegmentationQtPart(parent) {} + + virtual ~CaPTkInteractiveSegmentationAdapter() {} + +protected: + /** Progress update method overriden from GeodesicTrainingSegmentation */ + void progressUpdate(std::string message, int progress) override + { + std::cout << "[CaPTkInteractiveSegmentationAdapter::progressUpdate]\n"; + + if (message == "GTS: Executing" && progress == 0) + { + emit ProgressUpdate(/*QString::fromStdString(message), */0); + } + else if (message == "GTS: AGD Operations" && progress == 0) + { + emit ProgressUpdate(/*QString::fromStdString(message), */5); + } + else if (message == "GTS: AGD Operations" && progress == 100) + { + emit ProgressUpdate(/*QString::fromStdString(message), */20); + } + else if (message == "GTS: Converting" && progress == 0) + { + emit ProgressUpdate(/*QString::fromStdString(message), */25); + } + else if (message == "GTS: Converting" && progress == 100) + { + emit ProgressUpdate(/*QString::fromStdString(message), */30); + } + else if (message == "GTS: SVM Operations" && progress == 0) + { + emit ProgressUpdate(/*QString::fromStdString(message), */35); + } + else if (message == "GTS: SVM Operations" && progress == 100) + { + emit ProgressUpdate(/*QString::fromStdString(message), */99); + } + else if (message == "GTS: Finished" && progress == 0) + { + emit ProgressUpdate(/*QString::fromStdString(message), */100); + } + } +}; + +#endif // ! CaPTkInteractiveSegmentationAdapter_h \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/CaPTkInteractiveSegmentationQtPart.h b/Modules/CaPTkInteractiveSegmentation/include/CaPTkInteractiveSegmentationQtPart.h new file mode 100644 index 0000000..f7b75d3 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/CaPTkInteractiveSegmentationQtPart.h @@ -0,0 +1,23 @@ +#ifndef CaPTkInteractiveSegmentationQtPart_h +#define CaPTkInteractiveSegmentationQtPart_h + +#include + +/** \class CaPTkInteractiveSegmentationQtPart + * \brief Signals and slots functionality used by the CaPTkInteractiveSegmentationAdapter + * + * This class exists because you can't mix templates and QObject + */ +class CaPTkInteractiveSegmentationQtPart : public QObject +{ + Q_OBJECT + +public: + explicit CaPTkInteractiveSegmentationQtPart(QObject* parent = nullptr) : QObject(parent) {} + virtual ~CaPTkInteractiveSegmentationQtPart() {} + +signals: + void ProgressUpdate(/*QString message, */int progress); +}; + +#endif // ! CaPTkInteractiveSegmentationQtPart_h \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/ConfigParserRF.h b/Modules/CaPTkInteractiveSegmentation/include/ConfigParserRF.h new file mode 100644 index 0000000..6f9e029 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/ConfigParserRF.h @@ -0,0 +1,17 @@ +#include +#include +#include + +#include + +namespace ConfigParserRF +{ + void Parse(std::string filePath, double &trainingSamplePercentage, int &maxDepth, double &minSampleCountPercentage, + int &maxCategories, int &activeVarCount, int &numberOfTrees, cv::Mat &priors); + + void PrintParseResult(double trainingSamplePercentage, int maxDepth, double minSampleCountPercentage, + int maxCategories, int activeVarCount, int numberOfTrees, cv::Mat &priors); + + void PrintParseResultToFile(std::string filePath, double trainingSamplePercentage, int maxDepth, double minSampleCountPercentage, + int maxCategories, int activeVarCount, int numberOfTrees, cv::Mat &priors); +} \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/ConvertionsOpenCV.h b/Modules/CaPTkInteractiveSegmentation/include/ConvertionsOpenCV.h new file mode 100644 index 0000000..121e51a --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/ConvertionsOpenCV.h @@ -0,0 +1,32 @@ +#ifndef H_CBICA_SVM_SUITE_CONVERTIONS_OPEN_CV +#define H_CBICA_SVM_SUITE_CONVERTIONS_OPEN_CV + +#include +#include +#include + +#include + +namespace SvmSuiteConvertions { + + // OpenCV to String (and vice versa) Conversions + + cv::ml::SVM::Types TypeFromString(std::string typeString); + + std::string StringFromType(cv::ml::SVM::Types type); + + cv::ml::SVM::KernelTypes KernelTypeFromString(std::string kernelTypeString); + + std::string StringFromKernelType(cv::ml::SVM::KernelTypes kernelType); + + cv::ml::SVM::ParamTypes ParamTypeFromString(std::string paramTypeString); + + std::string StringFromParamType(cv::ml::SVM::ParamTypes paramType); + + int TermCriteriaTypeFromString(std::string termCriteriaTypeString); + + std::string StringFromTermCriteriaType(int termCriteriaType); + +} + +#endif \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/ConvertionsYAML.h b/Modules/CaPTkInteractiveSegmentation/include/ConvertionsYAML.h new file mode 100644 index 0000000..398f0be --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/ConvertionsYAML.h @@ -0,0 +1,42 @@ +#ifndef H_CBICA_SVM_SUITE_CONVERTIONS_YAML +#define H_CBICA_SVM_SUITE_CONVERTIONS_YAML + +#include +#include +#include + +#include + +#include "yaml-cpp/yaml.h" + +#include "SvmSuiteDescription.h" + +const std::string YAML_KERNEL_TYPE = "kernel_type"; +const std::string YAML_TYPE = "type"; +const std::string YAML_KFOLD = "kfold"; +const std::string YAML_NEIGHBORHOOD_RADIUS = "neighborhood_radius"; +const std::string YAML_CONSIDER_WEIGHTS = "consider_weights"; +const std::string YAML_IMPORTANCE = "importance"; +const std::string YAML_MODEL_PATH = "pretrained"; +const std::string YAML_C = "c"; +const std::string YAML_GAMMA = "gamma"; +const std::string YAML_P = "p"; +const std::string YAML_NU = "nu"; +const std::string YAML_COEF = "coef"; +const std::string YAML_DEGREE = "degree"; +const std::string YAML_AUTO = "auto"; +const std::string YAML_TERM_CRITERIA = "term_criteria"; +const std::string YAML_TERM_CRITERIA_TYPE = "criteria_type"; +const std::string YAML_TERM_CRITERIA_MAX = "max_count"; +const std::string YAML_TERM_CRITERIA_EPS = "epsilon"; +const std::string YAML_MIN_VAL = "min_value"; +const std::string YAML_MAX_VAL = "max_value"; +const std::string YAML_LOG_STEP = "log_step"; +const std::string YAML_ROOT_NODE = "svms"; + +namespace SvmSuiteConvertions +{ + YAML::Node yamlConvertSvmDescriptionToNode(SvmSuite::SvmDescription& svm_description); +} + +#endif \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/GeodesicTrainingSegmentation.h b/Modules/CaPTkInteractiveSegmentation/include/GeodesicTrainingSegmentation.h new file mode 100644 index 0000000..708010c --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/GeodesicTrainingSegmentation.h @@ -0,0 +1,2263 @@ +#ifndef H_CBICA_GEODESIC_TRAINING_SEGMENTATION +#define H_CBICA_GEODESIC_TRAINING_SEGMENTATION + +#include "itkImage.h" +#include "itkImageFileWriter.h" +#include "itkImageFileReader.h" +#include "itkExceptionObject.h" +#include "itkCastImageFilter.h" +#include "itkRescaleIntensityImageFilter.h" +#include "itkInvertIntensityImageFilter.h" +#include "itkLinearInterpolateImageFunction.h" +#include "itkResampleImageFilter.h" +#include "itkScaleTransform.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "UtilGTS.h" +#include "UtilItkGTS.h" +#include "UtilImageToCvMatGTS.h" +#include "UtilCvMatToImageGTS.h" +#include "OperationsSvmGTS.h" + +#include "AdaptiveGeodesicDistance.h" +#include "SvmSuite.h" +#include "RandomForestSuite.h" +#include "Processing.h" + +namespace GeodesicTrainingSegmentation +{ + enum MODE + { + SVM_PSEUDO, + SVM_LABELS, + AGD, + LABELS_THRESHOLD, + SEGMENT, + REVERSE_GEOTRAIN, + REVERSE_GEOTRAIN_SPORADIC, + GEOTRAIN, + GEOTRAIN_FULL, + RF, + AGD_RF, + RF_AUTO, + AGD_RF_AUTO, + CHANGE_LABELS, + GENERATE_CONFIG, + CHECK_ACCURACY + }; + + enum MODALITY_MRI + { + FLAIR, + T1, + T1CE, + T2 + }; + + typedef int AgdPixelType; + typedef int LabelsPixelType; + typedef float PseudoProbPixelType; + + const LabelsPixelType DEFAULT_LABEL_TC = 1; // Tumor core + const LabelsPixelType DEFAULT_LABEL_ET = 4; // Enhanced tumor + const LabelsPixelType DEFAULT_LABEL_ED = 2; // Edema + const LabelsPixelType DEFAULT_LABEL_HT = 3; // Healthy tissue + + const LabelsPixelType DEFAULT_LABEL_OF_INTEREST = 1; + + const std::string DEFAULT_FILE_EXTENSION = ".nii.gz"; + const float DEFAULT_THRESHOLD = 25; + const int DEFAULT_NUMBER_OF_THREADS = 16; + const int MAX_THREADS_SVM_SUITE = 32; + const long DEFAULT_MAX_IMAGE_SIZE_BEFORE_RESAMPLING = 10000000; + + const int DEFAULT_MAX_SAMPLES_SVM_SUBSAMPLE = 3000; + const bool DEFAULT_BALANCED_SUBSAMPLE = true; + const float DEFAULT_INPUT_IMAGES_TO_AGD_MAPS_RATIO = 6; + + /** Class for all the Geodesic Training Segmentation operations */ + template + class Coordinator + { + public: + + typedef itk::Image< AgdPixelType, Dimensions > AgdImageType; + typedef itk::Image< PixelType, Dimensions > InputImageType; + typedef itk::Image< LabelsPixelType, Dimensions > LabelsImageType; + typedef itk::Image< PseudoProbPixelType, Dimensions > PseudoProbImageType; + + typedef typename AgdImageType::Pointer AgdImagePointer; + typedef typename InputImageType::Pointer InputImagePointer; + typedef typename LabelsImageType::Pointer LabelsImagePointer; + typedef typename PseudoProbImageType::Pointer PseudoProbImagePointer; + + /** Everything that the class does gets returned with this. + * Not every field gets used every time + */ + typedef struct Result + { + PseudoProbImagePointer posImage; + PseudoProbImagePointer negImage; + LabelsPixelType posLabel = 0; + LabelsPixelType negLabel = 0; + LabelsImagePointer labelsImage; + InputImagePointer segmentedFloatImage; + AgdImagePointer agdMapImage; + + std::map< LabelsPixelType, double > diceScore; + std::map< LabelsPixelType, double > sensitivity; + std::map< LabelsPixelType, int > falsePositivesCount; + + double diceScoreAll; + double sensitivityAll; + int falsePositivesCountAll; + + bool ok = true; + std::string errorMessage = ""; + } Result; + + explicit Coordinator() {} + + virtual ~Coordinator() {} + + static_assert((Dimensions == 2 || Dimensions == 3), "2D or 3D Images supported"); + + /** Main execution function */ + std::shared_ptr< Result > Execute() + { + std::shared_ptr< Result > gtsResult(new Result()); + + // Check if everything that is needed is supplied + if (!validState(gtsResult)) { return gtsResult; } + + message( + std::string("-----------------------------------------\n") + + std::string("Geodesic Training - Semantic Segmentation\n") + + std::string("-----------------------------------------\n\n"), + "Executing" + ); + + startTimer(); + + //shrinkImagesIfNecessary(); + + if (!m_changed_labels_map_manually_set && m_input_images_MRI.size() > 0) { + // By default, remove healthy tissue after calculations + m_change_labels_map[m_label_HT] = 0; + } + + // // Normalize input (if applicable). Note: AGD maps are capped at 255 + // ItkUtilGTS::statisticalImageVectorNormalization(m_input_images, std::lround(255 * m_image_to_agd_maps_ratio)); + // //ItkUtilGTS::statisticalImageMapNormalization(m_input_images_MRI, std::lround(255 * m_image_to_agd_maps_ratio)); + m_processing.SetVerbose(m_verbose); + m_processing.SetSaveAll(m_save_all); + m_processing.SetOutputFolder(m_output_folder); + m_processing.SetTimerEnabled(m_timer_enabled); + m_processing.SetNumberOfThreads((m_max_threads)?32:m_number_of_threads); + + switch (m_mode) { + case SVM_PSEUDO: + { + message("Number of modalities: " + std::to_string(m_input_images.size() + m_input_images_MRI.size()) + "\n"); + m_processing.PreProcess(m_input_images, m_labels_image); + auto labelsCountMap = ParserGTS::CountsOfEachLabel(m_labels_image); + + if (!validLabels(gtsResult, labelsCountMap, 2)) { return gtsResult; } + + addToInputImagesMRI(); // Nothing special happens for the different modalities here + + addCoordinatorMapsIfNecessary(); + + // Construct pseudoprobability maps (~distance to hyperplane) + svm(m_input_images, m_labels_image, gtsResult, labelsCountMap, true); + + m_processing.template PostProcessNormalImage(gtsResult->posImage); + m_processing.template PostProcessNormalImage(gtsResult->negImage); + + } break; + case SVM_LABELS: + { + message("Number of modalities: " + std::to_string(m_input_images.size() + m_input_images_MRI.size()) + "\n"); + m_processing.PreProcess(m_input_images, m_labels_image); + auto labelsCountMap = ParserGTS::CountsOfEachLabel(m_labels_image); + + if (!validLabels(gtsResult, labelsCountMap)) { return gtsResult; } + + addToInputImagesMRI(); // Nothing special happens for the different modalities here + + addCoordinatorMapsIfNecessary(); + + // Construct a labeled image using SVM(s) + svm(m_input_images, m_labels_image, gtsResult, labelsCountMap, false); + if (!gtsResult->ok) { break; } + + m_processing.PostProcessLabelsImage(gtsResult->labelsImage); + + // Change labels if necessary + changeLabels(gtsResult->labelsImage, m_change_labels_map); + + if (m_ground_truth_set) { + checkAccuracyInRelationToGroundTruth(gtsResult->labelsImage, gtsResult); + } + + } break; + case AGD: + { + message("Number of modalities: " + std::to_string(m_input_images.size() + m_input_images_MRI.size()) + "\n"); + m_processing.PreProcess(m_input_images, m_labels_image); + auto labelsCountMap = ParserGTS::CountsOfEachLabel(m_labels_image); + + if (!validLabels(gtsResult, labelsCountMap, 1)) { return gtsResult; } + + addToInputImagesMRI(); // Nothing special happens for the different modalities here + + // Construct AGD maps + gtsResult->agdMapImage = agd(m_input_images[0], m_label_of_interest, true); + + m_processing.template PostProcessNormalImage(gtsResult->agdMapImage); + + gtsResult->labelsImage = thresholdSingleImageToLabel(gtsResult->agdMapImage, m_threshold, m_label_of_interest); + + // Change labels if necessary + changeLabels(gtsResult->labelsImage, m_change_labels_map); + + if (m_ground_truth_set) { + checkAccuracyInRelationToGroundTruth(gtsResult->labelsImage, gtsResult); + } + + } break; + case LABELS_THRESHOLD: + { + // Construct labeled image. (For each pixel: if intensity <= threshold then label of interest, else label 0) + gtsResult->labelsImage = thresholdSingleImageToLabel(m_input_images[0], m_threshold, m_label_of_interest); + + } break; + case SEGMENT: + { + auto labelsCountMap = ParserGTS::CountsOfEachLabel(m_labels_image); + + if (!validLabels(gtsResult, labelsCountMap, 1)) { return gtsResult; } + + addToInputImagesMRI(); // Nothing special happens for the different modalities here + + // Crop an image only at the labels of a particular class, according to labels image + InputImagePointer segmentedImage = segmentSingleImage(m_input_images[0], m_labels_image, m_label_of_interest); + + if (std::is_same::value) { + // In case more pixel types are supported in the future + gtsResult->segmentedFloatImage = segmentedImage; + } + + } break; + case REVERSE_GEOTRAIN_SPORADIC: + { + message("Number of modalities: " + std::to_string(m_input_images.size() + m_input_images_MRI.size()) + "\n"); + m_processing.PreProcess(m_input_images, m_labels_image); + auto labelsCountMap = ParserGTS::CountsOfEachLabel(m_labels_image); + + if (!validLabels(gtsResult, labelsCountMap)) { return gtsResult; } + + if(m_label_HT == 0 || labelsCountMap.count(m_label_HT) == 0) + { + gtsResult->ok = false; + gtsResult->errorMessage = "Either the healthy tissue label was not set or there weren't any healthy tissue labels drawn"; + errorOccured(gtsResult->errorMessage); + return gtsResult; + } + + // Construct agd maps for each input image and input input images and agd maps to a SVM that produces labels + std::vector< AgdImagePointer > agdImages; + + addToInputImagesMRI(); // No special operations for MRI images + + std::vector htKeyInList; + htKeyInList.push_back(m_label_HT); + + if (m_input_images.size() != 0) { + + std::vector< AgdImagePointer > agdImagesAllKeys = agd(m_input_images, htKeyInList, true); + agdImages.insert(agdImages.end(), agdImagesAllKeys.begin(), agdImagesAllKeys.end()); // Append + } + + message("Converting AGD maps...", "Converting"); + for (auto agdImage : agdImages) { + // Convert output of AGD to InputImageType + auto agdImageCast = ItkUtilGTS::castAndRescaleImage< AgdImageType, InputImageType>(agdImage); + + // Apply negative filter so it is 0 outside of the brain (basically where there is no information - that is used to make it faster later) + typedef itk::InvertIntensityImageFilter InvertIntensityImageFilterType; + typename InvertIntensityImageFilterType::Pointer invertIntensityFilter = InvertIntensityImageFilterType::New(); + invertIntensityFilter->SetInput(agdImageCast); + invertIntensityFilter->SetMaximum(255); + invertIntensityFilter->Update(); + + //m_input_images.push_back(ItkUtilGTS::statisticalImageNormalization(invertIntensityFilter->GetOutput(), VALUE)); + m_input_images.push_back(invertIntensityFilter->GetOutput()); + m_agd_maps_count++; + } + message("Converting AGD maps...", "Converting", 100); + + addCoordinatorMapsIfNecessary(); + + // SVM_LABELS + svm(m_input_images, m_labels_image, gtsResult, labelsCountMap, false); + if (!gtsResult->ok) { break; } + + m_processing.PostProcessLabelsImage(gtsResult->labelsImage); + + // Change labels if necessary + changeLabels(gtsResult->labelsImage, m_change_labels_map); + + if (m_ground_truth_set) { + checkAccuracyInRelationToGroundTruth(gtsResult->labelsImage, gtsResult); + } + + } break; + case REVERSE_GEOTRAIN: + { + message("Number of modalities: " + std::to_string(m_input_images.size() + m_input_images_MRI.size()) + "\n"); + m_processing.PreProcess(m_input_images, m_labels_image); + auto labelsCountMap = ParserGTS::CountsOfEachLabel(m_labels_image); + + if (!validLabels(gtsResult, labelsCountMap)) { return gtsResult; } + + // Construct agd maps for each input image and input input images and agd maps to a SVM that produces labels + std::vector< AgdImagePointer > agdImages; + + if (m_input_images.size() != 0) { + auto keys = getMapKeyset(labelsCountMap); + + std::vector< AgdImagePointer > agdImagesAllKeys = agd(m_input_images, keys, true); + agdImages.insert(agdImages.end(), agdImagesAllKeys.begin(), agdImagesAllKeys.end()); // Append + } + + if (m_input_images_MRI.size() != 0) { + // Special operations for MRI images + std::vector< AgdImagePointer > agdImagesMRI = agdMRI(m_input_images_MRI, getMapKeyset(labelsCountMap), false, true); + agdImages.insert(agdImages.end(), agdImagesMRI.begin(), agdImagesMRI.end()); // Append + addToInputImagesMRI(); + } + + message("Converting AGD maps...", "Converting"); + for (auto agdImage : agdImages) { + // Convert output of AGD to InputImageType + auto agdImageCast = ItkUtilGTS::castAndRescaleImage< AgdImageType, InputImageType>(agdImage); + + // Apply negative filter so it is 0 outside of the brain (basically where there is no information - that is used to make it faster later) + typedef itk::InvertIntensityImageFilter InvertIntensityImageFilterType; + typename InvertIntensityImageFilterType::Pointer invertIntensityFilter = InvertIntensityImageFilterType::New(); + invertIntensityFilter->SetInput(agdImageCast); + invertIntensityFilter->SetMaximum(255); + invertIntensityFilter->Update(); + + //m_input_images.push_back(ItkUtilGTS::statisticalImageNormalization(invertIntensityFilter->GetOutput(), VALUE)); + m_input_images.push_back(invertIntensityFilter->GetOutput()); + m_agd_maps_count++; + } + message("Converting AGD maps...", "Converting", 100); + + addCoordinatorMapsIfNecessary(); + + // Add non-agd images to main vector + addToInputImagesNonAgd(); + + // SVM_LABELS + svm(m_input_images, m_labels_image, gtsResult, labelsCountMap, false); + if (!gtsResult->ok) { break; } + + m_processing.PostProcessLabelsImage(gtsResult->labelsImage); + + // Change labels if necessary + changeLabels(gtsResult->labelsImage, m_change_labels_map); + + if (m_ground_truth_set) { + checkAccuracyInRelationToGroundTruth(gtsResult->labelsImage, gtsResult); + } + + } break; + case GEOTRAIN: + { + message("Number of modalities: " + std::to_string(m_input_images.size() + m_input_images_MRI.size()) + "\n"); + m_processing.PreProcess(m_input_images, m_labels_image); + auto labelsCountMap = ParserGTS::CountsOfEachLabel(m_labels_image); + + if (!validLabels(gtsResult, labelsCountMap, 2)) { return gtsResult; } + + addToInputImagesMRI(); // Nothing special happens for the different modalities here + + addCoordinatorMapsIfNecessary(); + + // SVM_PSEUDO + svm(m_input_images, m_labels_image, gtsResult, labelsCountMap, true); + if (!gtsResult->ok) { break; } + + m_processing.template PostProcessNormalImage(gtsResult->posImage); + m_processing.template PostProcessNormalImage(gtsResult->negImage); + + // AGD + AgdImagePointer agdMapPos = agd(gtsResult->posImage, gtsResult->posLabel, true); + AgdImagePointer agdMapNeg = agd(gtsResult->negImage, gtsResult->negLabel, true); + + // THRESHOLD + gtsResult->labelsImage = thresholdSingleImageToLabel(agdMapPos, m_threshold, gtsResult->posLabel); + thresholdSingleImageToLabel(agdMapNeg, m_threshold, gtsResult->negLabel); + + if (m_ground_truth_set) { + checkAccuracyInRelationToGroundTruth(gtsResult->labelsImage, gtsResult); + } + + } break; + case GEOTRAIN_FULL: + { + message("Number of modalities: " + std::to_string(m_input_images.size() + m_input_images_MRI.size()) + "\n"); + m_processing.PreProcess(m_input_images, m_labels_image); + auto labelsCountMap = ParserGTS::CountsOfEachLabel(m_labels_image); + + if (!validLabels(gtsResult, labelsCountMap, 2)) { return gtsResult; } + + // Construct agd maps for each input image and input input images and agd maps to a SVM that produces labels + std::vector< AgdImagePointer > agdImages; + + if (m_input_images.size() != 0) { + auto keys = getMapKeyset(labelsCountMap); + + std::vector< AgdImagePointer > agdImagesAllKeys = agd(m_input_images, keys, true); + agdImages.insert(agdImages.end(), agdImagesAllKeys.begin(), agdImagesAllKeys.end()); // Append + } + + if (m_input_images_MRI.size() != 0) { + // Special operations for MRI images + std::vector< AgdImagePointer > agdImagesMRI = agdMRI(m_input_images_MRI, getMapKeyset(labelsCountMap), false, true); + agdImages.insert(agdImages.end(), agdImagesMRI.begin(), agdImagesMRI.end()); // Append + addToInputImagesMRI(); + } + + for (auto agdImage : agdImages) { + // Convert output of AGD to InputImageType + auto agdImageCast = ItkUtilGTS::castAndRescaleImage< AgdImageType, InputImageType>(agdImage); + + // Apply negative filter so it is 0 outside of the brain (basically where there is no information - that is used to make it faster later) + typedef itk::InvertIntensityImageFilter InvertIntensityImageFilterType; + typename InvertIntensityImageFilterType::Pointer invertIntensityFilter = InvertIntensityImageFilterType::New(); + invertIntensityFilter->SetInput(agdImageCast); + invertIntensityFilter->SetMaximum(255); + invertIntensityFilter->Update(); + + m_input_images.push_back(invertIntensityFilter->GetOutput()); + m_agd_maps_count++; + } + + addCoordinatorMapsIfNecessary(); + + // SVM_PSEUDO + svm(m_input_images, m_labels_image, gtsResult, labelsCountMap, true); + if (!gtsResult->ok) { break; } + + // AGD + AgdImagePointer agdMapPos = agd(gtsResult->posImage, gtsResult->posLabel, true); + AgdImagePointer agdMapNeg = agd(gtsResult->negImage, gtsResult->negLabel, true); + + gtsResult->labelsImage = thresholdSingleImageToLabel(agdMapPos, m_threshold, gtsResult->posLabel); + + m_processing.PostProcessLabelsImage(gtsResult->labelsImage); + + // THRESHOLD + thresholdSingleImageToLabel(agdMapNeg, m_threshold, gtsResult->negLabel); + + if (m_ground_truth_set) { + checkAccuracyInRelationToGroundTruth(gtsResult->labelsImage, gtsResult); + } + + } break; + case RF: + { + message("Number of modalities: " + std::to_string(m_input_images.size() + m_input_images_MRI.size()) + "\n"); + m_processing.PreProcess(m_input_images, m_labels_image); + auto labelsCountMap = ParserGTS::CountsOfEachLabel(m_labels_image); + + if (!validLabels(gtsResult, labelsCountMap)) { return gtsResult; } + + addToInputImagesMRI(); // Nothing special happens for the different modalities here + + addCoordinatorMapsIfNecessary(); + + gtsResult->labelsImage = rf(false); + + m_processing.PostProcessLabelsImage(gtsResult->labelsImage); + + // Change labels if necessary + changeLabels(gtsResult->labelsImage, m_change_labels_map); + + if (m_ground_truth_set) { + checkAccuracyInRelationToGroundTruth(gtsResult->labelsImage, gtsResult); + } + } break; + case AGD_RF: + { + message("Number of modalities: " + std::to_string(m_input_images.size() + m_input_images_MRI.size()) + "\n"); + m_processing.PreProcess(m_input_images, m_labels_image); + auto labelsCountMap = ParserGTS::CountsOfEachLabel(m_labels_image); + + if (!validLabels(gtsResult, labelsCountMap)) { return gtsResult; } + + // Construct agd maps for each input image and input input images and agd maps to a SVM that produces labels + std::vector< AgdImagePointer > agdImages; //= agd(m_input_images, m_labels_list, false); + + if (m_input_images.size() != 0) { + auto keys = getMapKeyset(labelsCountMap); + + std::vector< AgdImagePointer > agdImagesAllKeys = agd(m_input_images, keys, true); + agdImages.insert(agdImages.end(), agdImagesAllKeys.begin(), agdImagesAllKeys.end()); // Append + } + + if (m_input_images_MRI.size() != 0) { + // Special operations for MRI images + std::vector< AgdImagePointer > agdImagesMRI = agdMRI(m_input_images_MRI, getMapKeyset(labelsCountMap), false, true); + agdImages.insert(agdImages.end(), agdImagesMRI.begin(), agdImagesMRI.end()); // Append + addToInputImagesMRI(); + } + + for (auto agdImage : agdImages) { + // Convert output of AGD to InputImageType + auto agdImageCast = ItkUtilGTS::castAndRescaleImage< AgdImageType, InputImageType>(agdImage); + + // Apply negative filter so it is 0 outside of the brain (basically where there is no information - that is used to make it faster later) + typedef itk::InvertIntensityImageFilter InvertIntensityImageFilterType; + typename InvertIntensityImageFilterType::Pointer invertIntensityFilter = InvertIntensityImageFilterType::New(); + invertIntensityFilter->SetInput(agdImageCast); + invertIntensityFilter->SetMaximum(255); + invertIntensityFilter->Update(); + + m_input_images.push_back(invertIntensityFilter->GetOutput()); + m_agd_maps_count++; + } + + addCoordinatorMapsIfNecessary(); + + // RF + gtsResult->labelsImage = rf(false); + + m_processing.PostProcessLabelsImage(gtsResult->labelsImage); + + // Change labels if necessary + changeLabels(gtsResult->labelsImage, m_change_labels_map); + + if (m_ground_truth_set) { + checkAccuracyInRelationToGroundTruth(gtsResult->labelsImage, gtsResult); + } + } break; + case RF_AUTO: + { + message("Number of modalities: " + std::to_string(m_input_images.size() + m_input_images_MRI.size()) + "\n"); + m_processing.PreProcess(m_input_images, m_labels_image); + auto labelsCountMap = ParserGTS::CountsOfEachLabel(m_labels_image); + + if (!validLabels(gtsResult, labelsCountMap)) { return gtsResult; } + + addToInputImagesMRI(); // Nothing special happens for the different modalities here + + addCoordinatorMapsIfNecessary(); + + gtsResult->labelsImage = rf(true); + + m_processing.PostProcessLabelsImage(gtsResult->labelsImage); + + // Change labels if necessary + changeLabels(gtsResult->labelsImage, m_change_labels_map); + + if (m_ground_truth_set) { + checkAccuracyInRelationToGroundTruth(gtsResult->labelsImage, gtsResult); + } + } break; + case AGD_RF_AUTO: + { + message("Number of modalities: " + std::to_string(m_input_images.size() + m_input_images_MRI.size()) + "\n"); + m_processing.PreProcess(m_input_images, m_labels_image); + auto labelsCountMap = ParserGTS::CountsOfEachLabel(m_labels_image); + + if (!validLabels(gtsResult, labelsCountMap)) { return gtsResult; } + + // Construct agd maps for each input image and input input images and agd maps to a SVM that produces labels + std::vector< AgdImagePointer > agdImages; + + if (m_input_images.size() != 0) { + auto keys = getMapKeyset(labelsCountMap); + + std::vector< AgdImagePointer > agdImagesAllKeys = agd(m_input_images, keys, true); + agdImages.insert(agdImages.end(), agdImagesAllKeys.begin(), agdImagesAllKeys.end()); // Append + } + + if (m_input_images_MRI.size() != 0) { + // Special operations for MRI images + std::vector< AgdImagePointer > agdImagesMRI = agdMRI(m_input_images_MRI, getMapKeyset(labelsCountMap), false, true); + agdImages.insert(agdImages.end(), agdImagesMRI.begin(), agdImagesMRI.end()); // Append + addToInputImagesMRI(); + } + + for (auto agdImage : agdImages) { + // Convert output of AGD to InputImageType + auto agdImageCast = ItkUtilGTS::castAndRescaleImage< AgdImageType, InputImageType>(agdImage); + + // Apply negative filter so it is 0 outside of the brain (basically where there is no information - that is used to make it faster later) + typedef itk::InvertIntensityImageFilter InvertIntensityImageFilterType; + typename InvertIntensityImageFilterType::Pointer invertIntensityFilter = InvertIntensityImageFilterType::New(); + invertIntensityFilter->SetInput(agdImageCast); + invertIntensityFilter->SetMaximum(255); + invertIntensityFilter->Update(); + + m_input_images.push_back(invertIntensityFilter->GetOutput()); + m_agd_maps_count++; + } + + addCoordinatorMapsIfNecessary(); + + gtsResult->labelsImage = rf(true); + + m_processing.PostProcessLabelsImage(gtsResult->labelsImage); + + // Change labels if necessary + changeLabels(gtsResult->labelsImage, m_change_labels_map); + + if (m_ground_truth_set) { + checkAccuracyInRelationToGroundTruth(gtsResult->labelsImage, gtsResult); + } + } break; + case CHANGE_LABELS: + { + gtsResult->labelsImage = changeLabels(m_labels_image, m_change_labels_map); + } break; + case GENERATE_CONFIG: + { + // Generate .yaml config from pretrained models (.xml) using their respective neighborhood radius and importance value + std::vector neighborhoodRadii(m_importance_values.size(), 1); // Neighborhood radius should be removed from SvmSuite in the future + SvmSuite::generateConfig(m_pretrained_models_paths, neighborhoodRadii, m_importance_values, m_output_folder + "/config.yaml"); + } break; + case CHECK_ACCURACY: + { + // changeLabels won't do anything if there is no need + changeLabels(m_labels_image, m_change_labels_map); + + checkAccuracyInRelationToGroundTruth(m_labels_image, gtsResult); + } break; + } + + stopTimerAndReport("Total"); + message("", "Finished"); + + return gtsResult; + } + + //Setters + + void SetMode(MODE mode) { + m_mode = mode; + } + void SetInputImages(std::vector< InputImagePointer > inputImages) { + m_input_images = inputImages; + } + void SetInputImages(std::vector< std::string > inputImagesPaths) { + if (inputImagesPaths.size() != 0) { + std::vector< InputImagePointer > inputImages; + + for (auto inputImagePath : inputImagesPaths) { + std::string extension = getFileExtension(inputImagePath); + + if (!m_file_extension_set_manually) { + m_file_extension = extension; + } + + inputImages.push_back(readImage(inputImagePath)); + } + + m_input_images = inputImages; // Replace possible older values + } + } + void SetInputImage(typename itk::Image< float, 3 >::Pointer inputImage) { + m_input_images = std::vector< InputImagePointer >(); + m_input_images.push_back(inputImage); + } + void SetInputImage(std::string inputImagePath) { + m_input_images = std::vector< InputImagePointer >(); + m_input_images.push_back(readImage(inputImagePath)); + } + void SetExtraInputImagesNotAGDable(std::vector< InputImagePointer > inputImagesNonAgd) { + m_input_images_non_agd = inputImagesNonAgd; // Replace possible older values + } + void SetExtraInputImagesNotAGDable(std::vector< std::string > inputImagesNonAgdPaths) { + if (inputImagesNonAgdPaths.size() != 0) { + std::vector< InputImagePointer > inputImagesNonAgd; + + for (auto inputImageNonAgdPath : inputImagesNonAgdPaths) { + std::string extension = getFileExtension(inputImageNonAgdPath); + inputImagesNonAgd.push_back(readImage(inputImageNonAgdPath)); + } + + m_input_images_non_agd = inputImagesNonAgd; // Replace possible older values + } + } + void SetLabels(LabelsImagePointer labels) { + m_labels_image = labels; + } + void SetLabels(std::string labelsPath) { + if (labelsPath != "") { + std::string extension = getFileExtension(labelsPath); + + if (!m_file_extension_set_manually) { + m_file_extension = extension; + } + + m_labels_image = readImage(labelsPath); + } + } + void SetOutputPath(std::string path) { + m_output_folder = path; + if (!UtilGTS::directoryExists(m_output_folder)) { + UtilGTS::createDir(m_output_folder); + } + } + void SetOutputPath(std::string path, std::string datasetName, std::string tag = "", bool includeDateTimeInTag = true) { + if (datasetName != "") { + if (!UtilGTS::directoryExists(path)) { + UtilGTS::createDir(path); + } + m_output_folder = path + "\\" + datasetName; + if (!UtilGTS::directoryExists(m_output_folder)) { + UtilGTS::createDir(m_output_folder); + } + std::string subfolderName = + ((includeDateTimeInTag || tag == "") ? + UtilGTS::currentDateTime() + : "") + + ((tag != "") ? + ((includeDateTimeInTag) ? + " " + : "") + tag + : ""); + + //std::string subfolderName = current_date() + " " + current_time(); + //std::string subfolderName = "output"; + + m_output_folder += "\\" + subfolderName; + if (!UtilGTS::directoryExists(m_output_folder)) { + UtilGTS::createDir(m_output_folder); + } + } + else { + this->SetOutputPath(path); + } + } + void SetDoCoordinateMaps(bool doCoordinateMaps = true) + { + m_do_coordinate_maps = doCoordinateMaps; + } + void SetProcessing(bool onOrOff, float imageToAgdMapsRatio = 6, + bool doStatFilter = true, bool doCurvatureFilter = false, + bool limitPixels = true, int pixelLimit = 5000000) + { + m_process = onOrOff; + + if (m_process) + { + m_processing.SetLimitPixels(limitPixels, pixelLimit); + m_processing.SetDoStatisticalNormalization(doStatFilter, imageToAgdMapsRatio); + m_processing.SetDoCurvatureAnisotropic(doCurvatureFilter); + } + } + void SetSaveAll(bool saveAll) { + m_save_all = saveAll; + } + void SaveOnlyNormalSegmentation(bool saveOnlySeg = true, std::string segName = "labels_res") + { + m_save_only_seg = saveOnlySeg; + m_save_only_seg_name = segName; + } + void SetTimerEnabled(bool timerEnabled) { + m_timer_enabled = timerEnabled; + } + void SetConfigFile(std::string configFilePath) { + if (configFilePath != "") { + m_config_file_path = configFilePath; + } + } + void SetRfConfigFile(std::string rfConfigFilePath) { + if (rfConfigFilePath != "") { + m_rf_config_file_path = rfConfigFilePath; + } + } + void SetThreshold(float threshold) { + m_threshold = threshold; + } + void SetChangeLabelsMap(std::unordered_map< LabelsPixelType, LabelsPixelType > changeLabelsMap) { + m_change_labels_map = changeLabelsMap; + m_changed_labels_map_manually_set = true; + } + void SetImportanceValues(std::vector< double > importanceValues) + { + m_importance_values = importanceValues; + } + void SetPretrainedModelsPaths(std::vector< std::string > modelsPaths) { + m_pretrained_models_paths = modelsPaths; + } + void SetNumberOfThreads(int numberOfThreads) { + if (numberOfThreads > 0) { + m_number_of_threads = numberOfThreads; + } + } + void SetNumberOfThreadsMax(bool maxThreads) { + m_max_threads = maxThreads; + } + void SetGroundTruth(std::string groundTruthPath, std::vector groundTruthSkip = std::vector()) + { + if (groundTruthPath != "") { + std::string extension = getFileExtension(groundTruthPath); + + if (!m_file_extension_set_manually) { + m_file_extension = extension; + } + + m_ground_truth_set = true; + m_ground_truth = readImage(groundTruthPath); + m_ground_truth_skip = groundTruthSkip; + } + } + void SetGroundTruth(LabelsImagePointer groundTruth, std::vector groundTruthSkip = std::vector()) + { + m_ground_truth_set = true; + m_ground_truth = groundTruth; + m_ground_truth_skip = groundTruthSkip; + } + void SetOutputImageFileExtension(std::string fileExtension) { + m_file_extension = fileExtension; + m_file_extension_set_manually = true; + } + void SetInputImageMRI(MODALITY_MRI modality, InputImagePointer imageMRI) { + m_input_images_MRI[modality] = imageMRI; + } + void SetInputImageMRI(MODALITY_MRI modality, std::string imagePathMRI) { + if (imagePathMRI != "") { + std::string extension = getFileExtension(imagePathMRI); + + if (!m_file_extension_set_manually) { + m_file_extension = extension; + } + + m_input_images_MRI[modality] = readImage(imagePathMRI); + } + } + void SetTumorCoreLabelMRI(LabelsPixelType labelTC) { + m_label_TC = labelTC; + } + void SetEnhancedTumorLabelMRI(LabelsPixelType labelET) { + m_label_ET = labelET; + } + void SetEdemaLabelMRI(LabelsPixelType labelED) { + m_label_ED = labelED; + } + void SetHealthyTissueLabelMRI(LabelsPixelType labelHT) { + m_label_HT = labelHT; + } + void SetLabelOfInterest(LabelsPixelType labelOfInterest) { + m_label_of_interest = labelOfInterest; + } + void SetVerbose(bool verbose) { + m_verbose = verbose; + } + void SetSubsampling(bool subsample, int maxSamples = 3000) { + m_subsample = subsample; + m_max_samples_svm_subsample = maxSamples; + } + void SetBalancedSubsampling(bool balancedSubsample) { + m_balanced_subsample = balancedSubsample; + } + // void SetInputImageToAgdMapsRatio(float ratio) { + // m_image_to_agd_maps_ratio = ratio; + // } + + private: + LabelsImagePointer m_labels_image; + std::vector< InputImagePointer > m_input_images, m_input_images_non_agd; + InputImagePointer m_reference_image; + GeodesicTrainingSegmentation::MODE m_mode = GeodesicTrainingSegmentation::MODE::REVERSE_GEOTRAIN; + std::vector< double > m_importance_values; // For mode: generateconfig + std::unordered_map< LabelsPixelType, LabelsPixelType > m_change_labels_map; + std::vector< std::string > m_pretrained_models_paths; // For mode: generateconfig + std::mutex m_agd_mutex; + LabelsImagePointer m_ground_truth; + std::vector m_ground_truth_skip = std::vector(); + std::map< MODALITY_MRI, InputImagePointer > m_input_images_MRI; // For MRI + SvmSuiteUtil::Timer m_timer; // For timer + int m_number_of_threads = 16, m_agd_maps_count = 0, + m_max_samples_svm_subsample = DEFAULT_MAX_SAMPLES_SVM_SUBSAMPLE; + float m_threshold = DEFAULT_THRESHOLD, + m_image_to_agd_maps_ratio = DEFAULT_INPUT_IMAGES_TO_AGD_MAPS_RATIO; + bool m_save_all = false, m_timer_enabled = false, m_subsample = true, m_balanced_subsample = DEFAULT_BALANCED_SUBSAMPLE, + m_file_extension_set_manually = false, m_verbose = false, m_were_images_shrunk = false, m_save_only_seg = false, + m_ground_truth_set = false, m_max_threads = false, m_changed_labels_map_manually_set = false, + m_do_coordinate_maps = true, m_process = true; + std::string m_config_file_path = "", m_rf_config_file_path = "", m_save_only_seg_name = "", + m_file_extension = DEFAULT_FILE_EXTENSION, m_output_folder = ""; + LabelsPixelType m_label_TC = DEFAULT_LABEL_TC, m_label_ET = DEFAULT_LABEL_ET, + m_label_ED = DEFAULT_LABEL_ED, m_label_HT = DEFAULT_LABEL_HT, + m_label_of_interest = DEFAULT_LABEL_OF_INTEREST; + GeodesicTrainingSegmentation::Processing m_processing; + + // For SVM + + template + void svm(std::vector < typename itk::Image::Pointer > images, LabelsImagePointer labels, + const std::shared_ptr& gtsResult, std::unordered_map labelsCountMap, bool predictFlags) + { + SvmManagerGTS svmManagerGTS; + + if (m_config_file_path != "") { + svmManagerGTS.AddSvmsFromConfig(m_config_file_path); + } + else { + svmManagerGTS.AddSvmDescriptions(SvmSuite::SvmDescription::GetDefaultSvmDescriptions()); + } + + svmManagerGTS.SetOutputPath(m_output_folder); + svmManagerGTS.SetTimerEnabled(m_timer_enabled); + svmManagerGTS.SetSavingModelsEnabled(m_save_all); + svmManagerGTS.SetVerbose(true); + svmManagerGTS.SetSavingModelsEnabled(m_save_all); + svmManagerGTS.SetNumberOfThreads((m_max_threads) ? MAX_THREADS_SVM_SUITE : m_number_of_threads); + //svmManagerGTS.SetInputNormalization(true); // Old way + + std::cout << "Converting input images to matrices..."; + //auto data = ParserGTS::NormalizedParse(images, labels, false); // New way + //ParserGTS::ScaleSomeOfTheColumns(data->trainingMat, + // data->trainingMat.cols - m_agd_maps_count, + // data->trainingMat.cols - 1, + // 1 / m_image_to_agd_maps_ratio + //); + auto data = ParserGTS::Parse(images, labels, false); + std::cout << "finished\n"; + + //cv::Mat sampleIdx; + + if (!m_balanced_subsample) { + svmManagerGTS.SetSubsampling(m_subsample, m_max_samples_svm_subsample); + } + else { + if (m_subsample && data->trainingMat.rows > m_max_samples_svm_subsample) { + // /*sampleIdx = */CreateBalancedSubsample + std::string errorMessageIfApplicable; + if (!CreateBalancedSubsample(data, errorMessageIfApplicable, labelsCountMap, + m_max_samples_svm_subsample)) + { + gtsResult->ok = false; + gtsResult->errorMessage = errorMessageIfApplicable; + return; + } + } + } + + message("", "SVM Operations"); + auto result = svmManagerGTS.TrainAndTestGTS(data, m_input_images, m_labels_image, predictFlags/*, sampleIdx*/); + message("", "SVM Operations", 100); + + svmManagerGTS.GenerateConfigFromBestValues(); + + if (predictFlags) { + // Swap so that label of interest is always the pos + if (result->negLabel == m_label_of_interest) { + PseudoProbImagePointer tempImage = result->negImage; + LabelsPixelType tempLabel = result->negLabel; + result->negImage = result->posImage; + result->negLabel = result->posLabel; + result->posImage = tempImage; + result->posLabel = tempLabel; + } + + writeImage< PseudoProbImageType >(result->posImage, "pseudo_class" + std::to_string(result->posLabel)); + writeImage< PseudoProbImageType >(result->negImage, "pseudo_class" + std::to_string(result->negLabel)); + } + else { + writeImage< LabelsImageType >(result->labelsImage, "labels_res"); + } + + gtsResult->posImage = result->posImage; + gtsResult->negImage = result->negImage; + gtsResult->posLabel = result->posLabel; + gtsResult->negLabel = result->negLabel; + gtsResult->labelsImage = result->labelsImage; + } + + // For AGD + + template + AgdImagePointer agd(typename itk::Image< TPixelType, Dimensions >::Pointer inputImage, + LabelsPixelType agdLabel, bool saveResults = false) + { + std::vector < typename itk::Image< TPixelType, Dimensions >::Pointer > tempInputVector; + tempInputVector.push_back(inputImage); + + return agd(tempInputVector, agdLabel, saveResults)[0]; + } + + template + std::vector< AgdImagePointer > agd(std::vector< typename itk::Image< TPixelType, Dimensions >::Pointer > inputImages, + std::vector< LabelsPixelType> agdLabels, bool saveResults = false) + { + message("AGD...", "AGD Operations"); + + std::vector< AgdImagePointer > agdResults; + + //int counterForFileName = 1; + int counterForThreadsVec = 0; + std::vector threads(inputImages.size() * agdLabels.size()); + int numberOfOpenThreads = 0; + int oldestOpenThread = 0; + + for (size_t i = 0; i < inputImages.size(); i++) + { + for (size_t j = 0; j < agdLabels.size(); j++) + { + if (!m_max_threads && numberOfOpenThreads == m_number_of_threads) { + threads[oldestOpenThread].join(); + oldestOpenThread++; + numberOfOpenThreads--; + } + //std::cout << "Starting new thread for agd.\n"; + numberOfOpenThreads++; + threads[counterForThreadsVec++] = std::thread(&Coordinator::agdThreadJob, this, + std::ref(agdResults), std::to_string(i + 1), inputImages[i], agdLabels[j], saveResults + ); + } + } + + for (size_t i = oldestOpenThread; i < inputImages.size() * agdLabels.size(); i++) { + threads[i].join(); + } + + message("AGD...", "AGD Operations", 100); + return agdResults; + } + + template + std::vector< AgdImagePointer > agd(std::vector< typename itk::Image< TPixelType, Dimensions >::Pointer > inputImages, + LabelsPixelType agdLabel, bool saveResults = false) + { + message("AGD...", "AGD Operations"); + + std::vector< AgdImagePointer > agdResults; + + //int counterForFileName = 1; + int counterForThreadsVec = 0; + std::vector threads(inputImages.size()); + int numberOfOpenThreads = 0; + int oldestOpenThread = 0; + + for (size_t i = 0; i < inputImages.size(); i++) + { + if (!m_max_threads && numberOfOpenThreads == m_number_of_threads) { + threads[oldestOpenThread].join(); + oldestOpenThread++; + numberOfOpenThreads--; + } + //std::cout << "Starting new thread for agd.\n"; + numberOfOpenThreads++; + threads[counterForThreadsVec++] = std::thread(&Coordinator::agdThreadJob, this, + std::ref(agdResults), std::to_string(i + 1), inputImages[i], agdLabel, saveResults + ); + } + + for (size_t i = oldestOpenThread; i < inputImages.size(); i++) { + threads[i].join(); + } + + message("AGD...", "AGD Operations", 100); + return agdResults; + } + + template + std::vector< AgdImagePointer > agdMRI(std::map< MODALITY_MRI, typename itk::Image< TPixelType, Dimensions >::Pointer > &inputImagesMRI, + std::vector allLabels, bool saveResults = true, bool asSvmInput = false) + { + message("AGD (MRI)...", "AGD Operations"); + typedef itk::Image AgdImageType; // The algorithm implementation needs this type + typedef typename AgdImageType::Pointer AgdImagePointer; + + std::vector< AgdImagePointer > agdResults; + + std::map< MODALITY_MRI, std::vector > epicenterForEachModality; + + bool atLeastOneEpicenterSet = false; + + if (asSvmInput) { + epicenterForEachModality[FLAIR] = { }; + epicenterForEachModality[T1] = { }; + epicenterForEachModality[T1CE] = { }; + epicenterForEachModality[T2] = { }; + + if ( std::find(allLabels.begin(), allLabels.end(), m_label_TC) != allLabels.end() ) { + epicenterForEachModality[FLAIR].push_back(m_label_TC); + epicenterForEachModality[T1].push_back(m_label_TC); + epicenterForEachModality[T1CE].push_back(m_label_TC); + epicenterForEachModality[T2].push_back(m_label_TC); + atLeastOneEpicenterSet = true; + } + if ( std::find(allLabels.begin(), allLabels.end(), m_label_ET) != allLabels.end() ) { + epicenterForEachModality[T1CE].push_back(m_label_ET); + atLeastOneEpicenterSet = true; + } + if ( std::find(allLabels.begin(), allLabels.end(), m_label_ED) != allLabels.end() ) { + epicenterForEachModality[FLAIR].push_back(m_label_ED); + atLeastOneEpicenterSet = true; + } + if ( std::find(allLabels.begin(), allLabels.end(), m_label_HT) != allLabels.end() ) { + epicenterForEachModality[FLAIR].push_back(m_label_HT); + atLeastOneEpicenterSet = true; + } + + for (LabelsPixelType label : allLabels) { + if (label != m_label_TC && label != m_label_ET && label != m_label_ED && label != m_label_HT) { + epicenterForEachModality[FLAIR].push_back(label); + epicenterForEachModality[T1].push_back(label); + epicenterForEachModality[T1CE].push_back(label); + epicenterForEachModality[T2].push_back(label); + atLeastOneEpicenterSet = true; + } + } + + /*epicenterForEachModality[FLAIR] = { m_label_TC, m_label_ED, m_label_HT }; + epicenterForEachModality[T1] = { m_label_TC }; + epicenterForEachModality[T1CE] = { m_label_TC, m_label_ET }; + epicenterForEachModality[T2] = { m_label_TC };*/ + + //epicenterForEachModality[FLAIR] = { 1, 2, 3/*, 4*/ }; // class 2 is the edema + //epicenterForEachModality[T1] = { 1/*, 2, 3, 4*/ }; // class 3 is the healthy tissue + //epicenterForEachModality[T1CE] = { 1, /*2, 3,*/ 4 }; // class 4 is the tumor border + //epicenterForEachModality[T2] = { 1/*, 2, 3, 4*/ }; // class 2 is the edema + } + else { + // RF input + if (m_label_TC != 0) { + epicenterForEachModality[FLAIR] = { m_label_TC }; + epicenterForEachModality[T1] = { }; + epicenterForEachModality[T1CE] = { m_label_TC }; + epicenterForEachModality[T2] = { }; + atLeastOneEpicenterSet = true; + } + + for (LabelsPixelType label : allLabels) { + if (label != m_label_TC && label != m_label_ET && label != m_label_ED && label != m_label_HT) { + epicenterForEachModality[FLAIR].push_back(label); + epicenterForEachModality[T1].push_back(label); + epicenterForEachModality[T1CE].push_back(label); + epicenterForEachModality[T2].push_back(label); + atLeastOneEpicenterSet = true; + } + } + + //epicenterForEachModality[FLAIR] = { 1/*, 2, 3, 4*/ }; // class 2 is the edema + //epicenterForEachModality[T1] = { /*1, 2, 3, 4*/ }; // class 3 is the healthy tissue + //epicenterForEachModality[T1CE] = { 1, /*2, 3, 4*/ }; // class 4 is the tumor border + //epicenterForEachModality[T2] = { /*1, 2, 3, 4*/ }; // class 2 is the edema + } + + if (!atLeastOneEpicenterSet) { + // Not really applicable anymore, but is kept in case anything changes in the future + if (asSvmInput) { + message("Can't perform AGD because there are no labels for TC, ET, ED or HT. Will continue without.\n", ""); + } + else { + message("Can't perform AGD because there are no labels for TC. Will continue without.\n", ""); + } + return agdResults; + } + + auto keys = getMapKeyset(inputImagesMRI); + int threadsNumber = 0; + + for (auto key : keys) { + threadsNumber += epicenterForEachModality[key].size(); + } + + //int counterForFileName = 1; + int counterForThreadsVec = 0; + std::vector threads(threadsNumber); + int numberOfOpenThreads = 0; + int oldestOpenThread = 0; + + for (auto key : keys) + { + for (LabelsPixelType epicenter : epicenterForEachModality[key]) + { + if (!m_max_threads && numberOfOpenThreads == m_number_of_threads) { + threads[oldestOpenThread].join(); + oldestOpenThread++; + numberOfOpenThreads--; + } + //std::cout << "Starting new thread for agd MRI.\n"; + numberOfOpenThreads++; + threads[counterForThreadsVec++] = std::thread(&Coordinator::agdThreadJob, this, + std::ref(agdResults), getModalityName(key), inputImagesMRI[key], epicenter, saveResults + ); + } + } + + for (int i = oldestOpenThread; i < threadsNumber; i++) { + threads[i].join(); + } + + message("AGD (MRI)...", "AGD Operations", 100); + return agdResults; + } + + template + void agdThreadJob(std::vector< AgdImagePointer > &outputVec, std::string imageName, + typename itk::Image< TPixelType, Dimensions >::Pointer inputImage, LabelsPixelType agdLabel, bool saveResults = true) + { + if (agdLabel == 0) { + return; + } + + typedef itk::Image AgdOriginalImageType; + typedef itk::Image AgdImageType; // The algorithm implementation needs this type + typedef typename AgdImageType::Pointer AgdImagePointer; + + // Normalize input to [0,255] (pseudoprob maps will be in [0,1], converting them to int would have been bad + auto agdNormInputImage = ItkUtilGTS::normalizeImage(inputImage); + + /*std::cout << "Saving agd norm input image..."; + writeImage(agdNormInputImage, "agd" + std::to_string(counterForFileName) + + "_l" + std::to_string(agdLabels[i]) + "_normoriginput.nii.gz" + ); + std::cout << "finished\n";*/ + + // Convert input to AgdImageType + //AgdImage3DPointer agdInput; + //if (Dimensions < 3) { + // auto temp = castImage< AgdOriginalImageType, AgdImageType >(agdNormInputImage); + // + // typedef itk::JoinImageFilter JoinImageFilterType; + // + // JoinImageFilterType::Pointer joinFilter = JoinImageFilterType::New(); + // joinFilter->SetInput(agdNormInputImage, agdInput); + // agdInput = joinFilter->GetOutput(); + //} + //else { + AgdImagePointer agdInput = ItkUtilGTS::castAndRescaleImage< AgdOriginalImageType, AgdImageType >(agdNormInputImage); + //} + + /*std::cout << "Saving agd input image..."; + writeImage(agdInput, "agd" + std::to_string(counterForFileName) + + "_l" + std::to_string(agdLabels[i]) + "_input.nii.gz" + ); + std::cout << "finished\n";*/ + + // Get raw result of AGD algorithm (not in [0,255]) + AgdImagePointer agdOutRaw = AdaptiveGeodesicDistance::Run( + agdInput, agdInput, m_labels_image, agdLabel, false, true + ); + + // Normalize results to [0,255] + //AgdImage3DPointer agdOutNorm = normalizeImage(agdOutRaw); + + // Convert output to correct dimensions + //AgdImagePointer agdOut = castImage< AgdImage3DType, AgdImageType >(agdOutNorm); + //AgdImagePointer agdOut = agdOutNorm; + AgdImagePointer agdOut = ItkUtilGTS::normalizeImage(agdOutRaw); + + if (saveResults) { + // Write results to file + //std::cout << "Saving agd image..."; + writeImage(agdOut, "agd_" + imageName + "_class" + std::to_string(agdLabel)); + //std::cout << "finished\n"; + } + + std::lock_guard lg(m_agd_mutex); + outputVec.push_back(agdOut); + } + + // For LABELSTHRES + + template + LabelsImagePointer thresholdSingleImageToLabel(typename UImageType::Pointer image, double threshold, LabelsPixelType labelOfInterest) + { + message("Thresholding...", "Thresholding"); + LabelsImagePointer res = ItkUtilGTS::initializeOutputImageBasedOn(image); + + itk::ImageRegionIterator iter_i(image, image->GetRequestedRegion()); + itk::ImageRegionIterator iter_r(res, res->GetRequestedRegion()); + + for (iter_i.GoToBegin(), iter_r.GoToBegin(); !iter_i.IsAtEnd(); ++iter_i, ++iter_r) + { + if (iter_i.Get() <= threshold) { + iter_r.Set(labelOfInterest); + } + else { + iter_r.Set(0); + } + } + + message("Thresholding...", "Thresholding", 100); + writeImage(res, "labelsthres_class" + std::to_string(labelOfInterest) + "_t" + std::to_string(m_threshold)); + + return res; + } + + // For SEGMENT + + template + typename UImageType::Pointer + segmentSingleImage(typename UImageType::Pointer image, LabelsImagePointer labels, LabelsPixelType labelOfInterest) + { + typename UImageType::Pointer res = ItkUtilGTS::initializeOutputImageBasedOn(image); + + itk::ImageRegionIterator iter_i(image, image->GetRequestedRegion()); + itk::ImageRegionIterator iter_l(labels, labels->GetRequestedRegion()); + itk::ImageRegionIterator iter_r(res, res->GetRequestedRegion()); + + for (iter_i.GoToBegin(), iter_l.GoToBegin(), iter_r.GoToBegin(); !iter_i.IsAtEnd(); ++iter_i, ++iter_l, ++iter_r) + { + if (iter_l.Get() == labelOfInterest) + { + iter_r.Set(iter_i.Get()); + } + else { + iter_r.Set(0); + } + } + writeImage(res, "segm"); + + return res; + } + + // For CHANGE_LABELS + + LabelsImagePointer changeLabels(LabelsImagePointer labels, std::unordered_map< int, int > &changeLabelsMap) + { + // isZeroIncludedInMap is for making it faster (usually 0 is not included) and map::find is time consuming + bool isZeroIncludedInMap = false; + + if (changeLabelsMap.find(0) != changeLabelsMap.end()) { + isZeroIncludedInMap = true; + } + + if (changeLabelsMap.size() != 0) { + message("Renaming labels...", "Changing labels"); + + int howManyChanged = 0; + + itk::ImageRegionIteratorWithIndex< LabelsImageType > iter_l(labels, labels->GetRequestedRegion()); + + for (iter_l.GoToBegin(); !iter_l.IsAtEnd(); ++iter_l) + { + LabelsPixelType prev_label = iter_l.Get(); + + if ((prev_label == 0) && (!isZeroIncludedInMap)) { + continue; + } + + if (changeLabelsMap.find(prev_label) != changeLabelsMap.end()) { + iter_l.Set(changeLabelsMap[prev_label]); + howManyChanged++; + } + } + + writeImage(labels, "labels_res_renamed"); + message("Renaming labels...", "Changing labels", 100); + message("\t(Changed " + std::to_string(howManyChanged) + " labels)\n", ""); + } + return labels; + } + + // For RF + + LabelsImagePointer rf(bool trainAutoFlag) + { + message("Converting input images to matrices...", "Converting"); + auto data = ParserGTS::Parse(m_input_images, m_labels_image, false); + message("Converting input images to matrices...", "Converting", 100); + + RFSuite::Manager rfManager; + rfManager.SetTrainDataFromMats(data->trainingMat, data->labelsMat); + rfManager.SetOutputPath(m_output_folder); + rfManager.SetSaveAll(m_save_all); + rfManager.SetVerbose(true); + + if (m_rf_config_file_path != "") { + rfManager.SetParametersFromConfig(m_rf_config_file_path); + } + + message("", "Training"); + if (!trainAutoFlag) { + rfManager.Train(); + } + else { + rfManager.TrainAuto(); + } + message("", "Training", 100); + + auto resMatPtr = rfManager.Test(data->testingMat, data->testingMat); + + message("Converting output matrix to image...", "Converting"); + LabelsImagePointer resImage = ItkUtilGTS::initializeOutputImageBasedOn(m_labels_image); + CvMatToImageGTS::FillImage(resImage, *resMatPtr); + writeImage(resImage, "res_rf"); + message("Converting output matrix to image...", "Converting", 100); + + return resImage; + } + + // FOR CHECK_ACCURACY + + void checkAccuracyInRelationToGroundTruth(LabelsImagePointer labels, const std::shared_ptr& gtsResult) + { + // Convert vector to set for faster lookup + + std::set groundTruthSkip; + groundTruthSkip.insert(0); + + for (LabelsPixelType skip : m_ground_truth_skip) { + if (skip != 0) { + message("Will skip label " + std::to_string(skip) + " at accuracy check.\n", ""); + } + groundTruthSkip.insert(skip); + } + + auto diffImage = ItkUtilGTS::initializeOutputImageBasedOn(labels); + + message("Checking accuracy...", "Checking accuracy"); + + itk::ImageRegionIteratorWithIndex< LabelsImageType > iter_l(labels, labels->GetRequestedRegion()); + itk::ImageRegionIteratorWithIndex< LabelsImageType > iter_d(diffImage, diffImage->GetRequestedRegion()); + itk::ImageRegionIteratorWithIndex< LabelsImageType > iter_g(m_ground_truth, m_ground_truth->GetRequestedRegion()); + + LabelsPixelType lb; + LabelsPixelType gt; + + std::map< LabelsPixelType, int > agreed, p1, t1, falsePositivesCount; + + bool foundValueLabels, foundValueGroundTruth; + + for (iter_l.GoToBegin(), iter_d.GoToBegin(), iter_g.GoToBegin(); !iter_l.IsAtEnd(); ++iter_l, ++iter_d, ++iter_g) + { + lb = iter_l.Get(); + gt = iter_g.Get(); + + foundValueLabels = (groundTruthSkip.find(lb) == groundTruthSkip.end()); + foundValueGroundTruth = (groundTruthSkip.find(gt) == groundTruthSkip.end()); + + // Increment counters if necessary + if (foundValueLabels) { + incrementMapForCheckAccuracy(p1, lb); + } + if (foundValueGroundTruth) { + incrementMapForCheckAccuracy(t1, gt); + } + if (foundValueLabels && foundValueGroundTruth && (lb == gt)) { + incrementMapForCheckAccuracy(agreed, gt); + } + if (foundValueLabels && !foundValueGroundTruth) { + incrementMapForCheckAccuracy(falsePositivesCount, lb); + } + /*if (foundValueLabels || foundValueGroundTruth) { + correctSum += (lb == gt); + allSum++; + }*/ + + // For saving the differences + if (m_save_all) { + /*if (foundValueLabels && !foundValueGroundTruth) { + // False positive + iter_l.Set(7); // Nothing particularly special about 7, it's just a high value + } + else*/ if (!foundValueLabels && !foundValueGroundTruth) { + // Irrelevant voxel + iter_d.Set(*groundTruthSkip.begin()); + } + else if (foundValueLabels && foundValueGroundTruth && (lb == gt)) { + // Voxel that the two segmentations agreed upon + iter_d.Set(*groundTruthSkip.begin()); + } + else { + // Voxels where there was an actual difference + iter_d.Set(5+gt); // Sets what it should have been + 5 (to differentiate false positives) + } + } + } + + message("Checking accuracy...", "Checking accuracy", 100); + + // Save the differences + writeImage(diffImage, "labels_diff_to_gt"); + + // Make sure that each label exists everywhere + + for (auto p1k : getMapKeyset(p1)) + { + if (t1.find(p1k) == t1.end()) { + t1[p1k] = 0; + } + if (agreed.find(p1k) == agreed.end()) { + agreed[p1k] = 0; + } + if (falsePositivesCount.find(p1k) == falsePositivesCount.end()) { + falsePositivesCount[p1k] = 0; + } + } + + for (auto t1k : getMapKeyset(t1)) + { + if (p1.find(t1k) == p1.end()) { + p1[t1k] = 0; + } + if (agreed.find(t1k) == agreed.end()) { + agreed[t1k] = 0; + } + if (falsePositivesCount.find(t1k) == falsePositivesCount.end()) { + falsePositivesCount[t1k] = 0; + } + } + + // Get the different labels + auto keys = getMapKeyset(p1); + + // Evaluation metrics for each label + std::map< LabelsPixelType, double > diceScore, sensitivity; + + for (auto key : keys) + { + int valP1 = p1[key]; + int valT1 = t1[key]; + int valAgreed = agreed[key]; + + if (valP1 + valT1 != 0) { + diceScore[key] = (static_cast(valAgreed) * 2 * 100) / (valP1 + valT1); + } + else { + std::cerr << "Error: checkAccuracyInRelationToGroundTruth: DSC DIV0\n"; + } + + if (valT1 != 0) { + sensitivity[key] = static_cast(valAgreed) * 100 / (valT1); + } + else { + std::cerr << "Error: checkAccuracyInRelationToGroundTruth: Sens DIV0\n"; + } + + + } + + // Raw results + + message("\n", ""); + + for (auto key : keys) { + if (m_verbose) { + std::cout << "|P1| [LABEL " << key << "] (Output labels size): " << p1[key] << "\n"; + std::cout << "|T1| [LABEL " << key << "] (Ground truth size): " << t1[key] << "\n"; + std::cout << "|P1| /\\ |T1| [LABEL " << key << "] (Agreed upon): " << agreed[key] << "\n\n"; + //std::cout << "Number of false positives [LABEL " << key << "]: " << falsePositivesCount[key] << "\n\n"; + } + + if (m_save_all) { + std::ofstream accuracyFile; + accuracyFile.open(m_output_folder + "/accuracy_report.txt", std::ios_base::app); //append file + + accuracyFile << "|P1| [LABEL " << key << "] (Output labels size): " << p1[key] << "\n"; + accuracyFile << "|T1| [LABEL " << key << "] (Ground truth size): " << t1[key] << "\n"; + accuracyFile << "|P1| /\\ |T1| [LABEL " << key << "] (Agreed upon): " << agreed[key] << "\n"; + accuracyFile << "Number of false positives [LABEL " << key << "]: " << falsePositivesCount[key] << "\n\n"; + + std::ofstream accuracyFileCompact; + accuracyFileCompact.open(m_output_folder + "/accuracy_report_compact.txt", std::ios_base::app); //append file + + accuracyFileCompact << key << " " << p1[key] << "\n"; + accuracyFileCompact << key << " " << t1[key] << "\n"; + accuracyFileCompact << key << " " << agreed[key] << "\n"; + accuracyFileCompact << key << " " << falsePositivesCount[key] << "\n"; + } + } + + // Evaluation metrics for all the labels combined + + double diceScoreAll = 0, sensitivityAll = 0; + int p1All = 0, t1All = 0, agreedAll = 0, falsePositivesAll = 0; + + for (auto key : keys) { + p1All += p1[key]; + t1All += t1[key]; + agreedAll += agreed[key]; + falsePositivesAll += falsePositivesCount[key]; + } + + diceScoreAll = (static_cast(agreedAll) * 2 * 100) / (p1All + t1All); + sensitivityAll = static_cast(agreedAll) * 100 / (t1All); + + // Raw results for all + + if (m_verbose) { + std::cout << "|P1| [ALL] (Output labels size): " << p1All << "\n"; + std::cout << "|T1| [ALL] (Ground truth size): " << t1All << "\n"; + std::cout << "|P1| /\\ |T1| [ALL] (Agreed upon): " << agreedAll << "\n"; + std::cout << "Number of false positives [ALL]: " << falsePositivesAll << "\n\n"; + } + + if (m_save_all) { + std::ofstream accuracyFile; + accuracyFile.open(m_output_folder + "/accuracy_report.txt", std::ios_base::app); //append file + + accuracyFile << "|P1| [ALL] (Output labels size): " << p1All << "\n"; + accuracyFile << "|T1| [ALL] (Ground truth size): " << t1All << "\n"; + accuracyFile << "|P1| /\\ |T1| [ALL] (Agreed upon): " << agreedAll << "\n"; + accuracyFile << "Number of false positives [ALL]: " << falsePositivesAll << "\n\n"; + + std::ofstream accuracyFileCompact; + accuracyFileCompact.open(m_output_folder + "/accuracy_report_compact.txt", std::ios_base::app); //append file + + accuracyFileCompact << 0 << " " << p1All << "\n"; + accuracyFileCompact << 0 << " " << t1All << "\n"; + accuracyFileCompact << 0 << " " << agreedAll << "\n"; + accuracyFileCompact << 0 << " " << falsePositivesAll << "\n"; + } + + // DSC For each label + + for (auto key : keys) { + if (m_verbose) { + std::cout << "DSC [" << key << "] (%): " << diceScore[key] << "\n"; + } + + if (m_save_all) { + std::ofstream accuracyFile; + accuracyFile.open(m_output_folder + "/accuracy_report.txt", std::ios_base::app); //append file + + accuracyFile << "DSC [" << key << "] (%): " << diceScore[key] << "\n"; + + std::ofstream accuracyFileCompact; + accuracyFileCompact.open(m_output_folder + "/accuracy_report_compact.txt", std::ios_base::app); //append file + + accuracyFileCompact << key << " " << diceScore[key] << "\n"; + } + } + + // DSC For all + + if (m_verbose) { + std::cout << "DSC [ALL] (%): " << diceScoreAll << "\n"; + } + + if (m_save_all) { + std::ofstream accuracyFile; + accuracyFile.open(m_output_folder + "/accuracy_report.txt", std::ios_base::app); //append file + + accuracyFile << "DSC [ALL] (%): " << diceScoreAll << "\n"; + + std::ofstream accuracyFileCompact; + accuracyFileCompact.open(m_output_folder + "/accuracy_report_compact.txt", std::ios_base::app); //append file + + accuracyFileCompact << 0 << " " << diceScoreAll << "\n"; + } + + + // Sensitivity For each label + + if (m_verbose) { + std::cout << "\n"; + } + + for (auto key : keys) { + if (m_verbose) { + std::cout << "Sens [" << key << "] (%): " << sensitivity[key] << "\n"; + } + + if (m_save_all) { + std::ofstream accuracyFile; + accuracyFile.open(m_output_folder + "/accuracy_report.txt", std::ios_base::app); //append file + + accuracyFile << "Sens [" << key << "] (%): " << sensitivity[key] << "\n"; + + std::ofstream accuracyFileCompact; + accuracyFileCompact.open(m_output_folder + "/accuracy_report_compact.txt", std::ios_base::app); //append file + + accuracyFileCompact << key << " " << sensitivity[key] << "\n"; + } + } + + // Sensitivity For all + + if (m_verbose) { + std::cout << "Sens [ALL] (%): " << sensitivityAll << "\n"; + } + + if (m_save_all) { + std::ofstream accuracyFile; + accuracyFile.open(m_output_folder + "/accuracy_report.txt", std::ios_base::app); //append file + + accuracyFile << "Sens [ALL] (%): " << sensitivityAll << "\n"; + + std::ofstream accuracyFileCompact; + accuracyFileCompact.open(m_output_folder + "/accuracy_report_compact.txt", std::ios_base::app); //append file + + accuracyFileCompact << 0 << " " << sensitivityAll << "\n"; + } + + // Update gtsResult + + gtsResult->diceScore = diceScore; + gtsResult->sensitivity = sensitivity; + gtsResult->falsePositivesCount = falsePositivesCount; + gtsResult->diceScoreAll = diceScoreAll; + gtsResult->sensitivityAll = sensitivityAll; + gtsResult->falsePositivesCountAll = falsePositivesAll; + + message("", "Checking accuracy", 100); + } + + void incrementMapForCheckAccuracy(std::map< LabelsPixelType, int > &map, LabelsPixelType key) + { + if (map.find(key) == map.end()) { + map[key] = 1; + } + else { + map[key] += 1; + } + } + + // Util + + void addCoordinatorMapsIfNecessary() + { + if (m_do_coordinate_maps) + { + for (const InputImagePointer& image : GenerateCoordinateMaps(m_input_images[0])) + { + m_input_images.push_back(image); + } + } + } + + /** Coordinate maps means that for each dimension, a 3D map is generated + * where the pixel values are distance to the leftmost pixels of that dimension. */ + template + static std::vector + GenerateCoordinateMaps(typename ImageType::Pointer reference) + { + typedef itk::ImageRegionIteratorWithIndex Iterator; + + std::vector outputVector; + std::vector outputIterators; + + for (size_t i=0; i(reference)); + outputIterators.push_back( + Iterator(outputVector[i], outputVector[i]->GetLargestPossibleRegion()) + ); + // outputIterators[i].GoToBegin(); + } + + Iterator iter_i(reference, reference->GetLargestPossibleRegion()); + + // These needs to be initialized beforehand for optimization + itk::Index index; + unsigned int i; + + for (iter_i.GoToBegin(); !iter_i.IsAtEnd(); + ++iter_i) + { + if (iter_i.Get() == 0) { continue; } // Save some time + index = iter_i.GetIndex(); + + for (i=0; iGetLargestPossibleRegion(); + typename InputImageType::SizeType size = region.GetSize(); + typename InputImageType::SpacingType spacing = m_reference_image->GetSpacing(); + + long totalSize = 1; + + itk::Index< Dimensions > centralPixel; + std::string imageSizeString = "Image dimensions: "; + for (size_t i = 0; i < Dimensions; i++) { + totalSize *= size[i]; + imageSizeString += std::to_string(size[i]) + " "; + centralPixel[i] = size[i] / 2; + } + + imageSizeString += "(Total: " + std::to_string(totalSize) + ")\n"; + message(imageSizeString); + + if (totalSize <= DEFAULT_MAX_IMAGE_SIZE_BEFORE_RESAMPLING) { return; } + m_were_images_shrunk = true; + + itk::Point< double, Dimensions > centralPoint; + for (size_t i = 0; i < Dimensions; i++) { + centralPoint[i] = centralPixel[i]; + } + + using ScaleTransformType = itk::ScaleTransform< double, Dimensions >; + typename ScaleTransformType::Pointer scaleTransform = ScaleTransformType::New(); + + typename ScaleTransformType::ParametersType parameters = scaleTransform->GetParameters(); + float scale = 0.5; + for (size_t i = 0; i < Dimensions; i++) { + parameters[i] = scale; //??? + } + + scaleTransform->SetParameters(parameters); + scaleTransform->SetCenter(centralPoint); + + // For images + + using LinearInterpolatorType = itk::LinearInterpolateImageFunction< InputImageType, double >; + typename LinearInterpolatorType::Pointer interpolator = LinearInterpolatorType::New(); + + using ResampleFilterType = itk::ResampleImageFilter< InputImageType, InputImageType >; + typename ResampleFilterType::Pointer resampleFilter = ResampleFilterType::New(); + + resampleFilter->SetTransform(scaleTransform); + resampleFilter->SetInterpolator(interpolator); + resampleFilter->SetSize(size); + resampleFilter->SetOutputSpacing(spacing); + + for (size_t i = 0; i < m_input_images.size(); i++) + { + resampleFilter->SetInput(m_input_images[i]); + m_input_images[i] = resampleFilter->GetOutput(); + } + + for (auto key : mriImagesKeys) + { + resampleFilter->SetInput(m_input_images_MRI[key]); + m_input_images_MRI[key] = resampleFilter->GetOutput(); + } + + // For labels + + using LinearInterpolatorLabelsType = itk::LinearInterpolateImageFunction< LabelsImageType, double >; + typename LinearInterpolatorLabelsType::Pointer interpolatorLabels = LinearInterpolatorLabelsType::New(); + + using ResampleFilterLabelsType = itk::ResampleImageFilter< LabelsImageType, LabelsImageType >; + typename ResampleFilterLabelsType::Pointer resampleFilterLabels = ResampleFilterLabelsType::New(); + + resampleFilterLabels->SetTransform(scaleTransform); + resampleFilterLabels->SetInterpolator(interpolatorLabels); + resampleFilterLabels->SetSize(size); + resampleFilterLabels->SetOutputSpacing(spacing); + + resampleFilterLabels->SetInput(m_labels_image); + resampleFilterLabels->Update(); + m_labels_image = resampleFilterLabels->GetOutput(); + + // Remove these + if (m_input_images.size() != 0) { writeImage(m_input_images[0], "downsampled_example"); } + else { writeImage(m_input_images_MRI[mriImagesKeys[0]], "downsampled_example"); } + } + + template + typename TImageType::Pointer unshrinkImageIfNecessary(typename TImageType::Pointer image, typename TImageType::Pointer referenceImage) + { + + } + + void addToInputImagesMRI() + { + auto keys = getMapKeyset(m_input_images_MRI); + + for (auto key : keys) { + m_input_images.push_back(m_input_images_MRI[key]); + } + } + + void addToInputImagesNonAgd() + { + for (auto const& image : m_input_images_non_agd) { + m_input_images.push_back(image); + } + } + + bool validState(std::shared_ptr gtsResult) { + switch (m_mode) { + case SVM_PSEUDO: // Same as GEOTRAIN + case SVM_LABELS: // Same as GEOTRAIN + case REVERSE_GEOTRAIN: // Same as GEOTRAIN + case REVERSE_GEOTRAIN_SPORADIC: // Same as GEOTRAIN + case GEOTRAIN_FULL: + case GEOTRAIN: + { + if (m_input_images.size() + m_input_images_MRI.size() == 0 || m_labels_image == nullptr) { + std::string errorMessage = "Need to supply input images and labels image."; + errorOccured(errorMessage); + gtsResult->errorMessage = errorMessage; + gtsResult->ok = false; + return false; + } + } break; + case RF: // Same as AGD_RF + case AGD_RF: + { + // TODO: fill this + } break; + case RF_AUTO: + case AGD_RF_AUTO: + { + // TODO: fill this + } break; + case AGD: + { + if (m_input_images.size() + m_input_images_MRI.size() == 0 || m_labels_image == nullptr) { + std::string errorMessage = "Need to supply input images and labels image and a list containing the label of interest of each image for mode agd"; + errorOccured(errorMessage); + gtsResult->errorMessage = errorMessage; + gtsResult->ok = false; + return false; + } + /*if (m_input_images.size() != m_labels_list.size()) { + errorOccured("Number of input images must be equal to the number of agdlabels for mode agd"); + return false; + }*/ + } break; + case LABELS_THRESHOLD: + { + if (m_input_images.size() == 0) { + std::string errorMessage = "Need to supply input images and list of labels for values<=threshold for mode labelsthres"; + errorOccured(errorMessage); + gtsResult->errorMessage = errorMessage; + gtsResult->ok = false; + return false; + } + } break; + case SEGMENT: + { + if ((m_input_images.size() != 1) || (m_labels_image == nullptr)) + { + std::string errorMessage = "Need to supply one input image, one labels image and one label of interest for mode segment"; + errorOccured(errorMessage); + gtsResult->errorMessage = errorMessage; + gtsResult->ok = false; + return false; + } + } break; + case CHANGE_LABELS: + { + if (m_labels_image == nullptr) { + std::string errorMessage = "Need to supply labels image for mode changelabels"; + errorOccured(errorMessage); + gtsResult->errorMessage = errorMessage; + gtsResult->ok = false; + return false; + } + } break; + case GENERATE_CONFIG: + { + if ((m_pretrained_models_paths.size() == 0) || (m_importance_values.size() == 0)) { + std::string errorMessage = "Need to supply lists of input models and improtance values for each model for mode generateconfig."; + errorOccured(errorMessage); + gtsResult->errorMessage = errorMessage; + gtsResult->ok = false; + return false; + } + if (m_pretrained_models_paths.size() != m_importance_values.size()) { + std::string errorMessage = "# of input models and # of importance values must be equal for mode generateconfig."; + errorOccured(errorMessage); + gtsResult->errorMessage = errorMessage; + gtsResult->ok = false; + return false; + } + } break; + case CHECK_ACCURACY: + { + if ((m_labels_image == nullptr) || (m_ground_truth == nullptr)) { + std::string errorMessage = "Need to supply labels image and ground truth image for mode accuracy"; + errorOccured(errorMessage); + gtsResult->errorMessage = errorMessage; + gtsResult->ok = false; + return false; + } + } break; + } + + return true; + } + + /** Checks if the labels input image is valid. Also changes m_label_of_interest/m_label_TC/etc.. if necessary */ + bool validLabels(std::shared_ptr gtsResult, std::unordered_map labelsCountMap, int limitedNumberOfClasses = 0) + { + message("Labels detected: "); + auto keys = getMapKeyset(labelsCountMap); + std::sort(keys.begin(), keys.end()); + + for (LabelsPixelType key : keys) { + message(std::to_string(key) + " "); + } + message("\n"); + + if (limitedNumberOfClasses == 1) { + if (labelsCountMap.size() == 0) { + std::string errorMessage = "No labels were found in the image"; + errorOccured(errorMessage); + gtsResult->errorMessage = errorMessage; + gtsResult->ok = false; + return false; + } + + if (labelsCountMap.find(m_label_of_interest) == labelsCountMap.end()) { + message("Label of interest (" + std::to_string(m_label_of_interest) + ") was not found.\n", ""); + auto keys = getMapKeyset(labelsCountMap); + m_label_of_interest = labelsCountMap[keys[0]]; + message("\tUsing " + std::to_string(m_label_of_interest) + " instead.\n" + + "\tPlease provide a different one if you don't want this one to be used\n", ""); + } + } + else if (limitedNumberOfClasses == 2) { + if (labelsCountMap.size() != 2) { + std::string errorMessage = "There should only be two (non-zero) labels for 2-class modes"; + errorOccured(errorMessage); + gtsResult->errorMessage = errorMessage; + gtsResult->ok = false; + return false; + } + + if (labelsCountMap.find(m_label_of_interest) == labelsCountMap.end()) { + message("Label of interest (" + std::to_string(m_label_of_interest) + ") was not found.\n", ""); + auto keys = getMapKeyset(labelsCountMap); + m_label_of_interest = labelsCountMap[keys[0]]; + message("\tUsing " + std::to_string(m_label_of_interest) + " instead.\n" + + "\tPlease provide a different one if you don't want this one to be used\n", ""); + } + + if (m_input_images_MRI.size() != 0) { + // For MRI. For each region set the label to zero if they were not included in the sample + if (labelsCountMap.find(m_label_HT) == labelsCountMap.end()) { + m_label_HT = 0; + } + if (labelsCountMap.find(m_label_TC) == labelsCountMap.end()) { + m_label_TC = 0; + } + if (labelsCountMap.find(m_label_ET) == labelsCountMap.end()) { + m_label_ET = 0; + } + if (labelsCountMap.find(m_label_ED) == labelsCountMap.end()) { + m_label_ED = 0; + } + + if (m_label_TC == 0 && m_label_ET == 0 && m_label_ED == 0 && m_label_HT == 0) { + message("Note: No labels were found for TC/ET/ED/HT even though images were supplied through the special modalities input.\n"); + message(" Will proceed with performing AGD on all labels (if applicable)\n"); + addToInputImagesMRI(); + m_input_images_MRI.clear(); + } + } + } + else { + // n-classes + if (labelsCountMap.size() < 2) { + std::string errorMessage = + std::string("There should be at least two (non-zero) labels for multiclass modes. ") + + std::string("Please remember to draw a label for the background tissue too."); + errorOccured(errorMessage); + gtsResult->errorMessage = errorMessage; + gtsResult->ok = false; + return false; + } + + if (m_input_images_MRI.size() != 0) { + // For MRI. For each region set the label to zero if they were not included in the sample + if (labelsCountMap.find(m_label_HT) == labelsCountMap.end()) { + m_label_HT = 0; + } + if (labelsCountMap.find(m_label_TC) == labelsCountMap.end()) { + m_label_TC = 0; + } + if (labelsCountMap.find(m_label_ET) == labelsCountMap.end()) { + m_label_ET = 0; + } + if (labelsCountMap.find(m_label_ED) == labelsCountMap.end()) { + m_label_ED = 0; + } + + if (m_label_TC == 0 && m_label_ET == 0 && m_label_ED == 0 && m_label_HT == 0) { + message("Note: No labels were found for TC/ET/ED/HT even though images were supplied through the special modalities input.\n"); + message(" Will proceed with performing AGD on all labels (if applicable)\n"); + addToInputImagesMRI(); + m_input_images_MRI.clear(); + } + } + } + + return true; + } + + void errorOccured(std::string msg) { + std::cerr << "GeodesicTrainingSegmentation error: " << msg << std::endl; + } + + std::string getFileExtension(std::string fName) { + std::string extension = UtilGTS::getFileExtension(fName); + std::transform(extension.begin(), extension.end(), extension.begin(), ::tolower); + + return extension; + } + + template< typename U, typename V > + std::vector< U > getMapKeyset(std::map< U, V > map) + { + // Find a list of the different keys + std::vector< U > res; + + res.reserve(map.size()); + for (auto const& imap : map) { + res.push_back(imap.first); + } + + return res; + } + + template< typename U, typename V > + std::vector< U > getMapKeyset(std::unordered_map< U, V > map) + { + // Find a list of the different keys + std::vector< U > res; + + res.reserve(map.size()); + for (auto const& imap : map) { + res.push_back(imap.first); + } + + return res; + } + + std::string getModalityName(MODALITY_MRI modality) { + switch (modality) { + case FLAIR: + return "flair"; + case T1: + return "t1"; + case T1CE: + return "t1ce"; + case T2: + return "t2"; + } + return ""; + } + + // I/O + + template + typename TImageType::Pointer readImage(std::string filename) + { + using ReaderType = itk::ImageFileReader; + typename ReaderType::Pointer reader = ReaderType::New(); + reader->SetFileName(filename); + reader->Update(); + return reader->GetOutput(); + } + + template + void writeImage(typename TImageType::Pointer image, std::string filename) + { + bool isOutputSegmentation = (filename == "labels_res" || filename == "labels_res_renamed"); + + if (m_save_only_seg) + { + filename = m_save_only_seg_name; + } + + if (m_save_all || (m_save_only_seg && isOutputSegmentation)) + { + std::string fileFullPath = m_output_folder + "/" + filename + m_file_extension; + + typedef typename itk::ImageFileWriter WriterType; + typename WriterType::Pointer writer = WriterType::New(); + writer->SetInput(image); + writer->SetFileName(fileFullPath); + writer->Update(); + } + } + + /** This function is meant to be overriden if someone who uses the library wishes to */ + virtual void progressUpdate(std::string message, int progress) { + // Bypass unused variables (for compiling with warnings counting as errors) + message = message; + progress = progress; + } + + void message(std::string message, std::string shortMessage = "", int progress = -1) + { + if (m_verbose) { + if (message != "") { + std::string printMessage = (progress != -1) ? "\r" : ""; + printMessage += message; + + if (progress == 100) { + printMessage += "finished\n"; + } + else if (progress != -1) { + printMessage += " [" + std::to_string(progress) + "%]"; + } + + std::cout << printMessage; + } + } + + // Update progress + if (shortMessage != "") { + if (progress == -1) { + progressUpdate("GTS: " + shortMessage, 0); + } + else { + progressUpdate("GTS: " + shortMessage, progress); + } + } + } + + // For timer + + void startTimer() { + if (m_timer_enabled) { + m_timer.Reset(); + } + } + + void stopTimerAndReport(std::string desc) { + if (m_timer_enabled) { + float diff = m_timer.Diff(); + + std::ofstream timerFile; + timerFile.open(m_output_folder + "/time_report.txt", std::ios_base::app); //append file + timerFile << desc << ": " << diff << "s\n"; + } + } + + }; + +} + +#endif diff --git a/Modules/CaPTkInteractiveSegmentation/include/OperationsSvmGTS.h b/Modules/CaPTkInteractiveSegmentation/include/OperationsSvmGTS.h new file mode 100644 index 0000000..8c9adb1 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/OperationsSvmGTS.h @@ -0,0 +1,95 @@ +#ifndef H_CBICA_SVM_GTS +#define H_CBICA_SVM_GTS + +#include "itkImage.h" +#include "itkImageFileWriter.h" +#include "itkImageFileReader.h" +#include "itkExceptionObject.h" +#include "itkCastImageFilter.h" +#include "itkRescaleIntensityImageFilter.h" +#include "itkInvertIntensityImageFilter.h" + +#include +#include +#include + +#include "UtilItkGTS.h" +#include "UtilImageToCvMatGTS.h" +#include "UtilCvMatToImageGTS.h" +#include "SvmSuite.h" + +#include +#include +#include + +namespace GeodesicTrainingSegmentation +{ + template + class SvmManagerGTS : public SvmSuite::Manager + { + public: + typedef int LabelsPixelType; + typedef float PseudoProbPixelType; + + typedef itk::Image< TPixelType, TDimensions > InputImageType; + typedef itk::Image< LabelsPixelType, TDimensions > LabelsImageType; + typedef itk::Image< PseudoProbPixelType, TDimensions > PseudoProbImageType; + + typedef typename InputImageType::Pointer InputImagePointer; + typedef typename LabelsImageType::Pointer LabelsImagePointer; + typedef typename PseudoProbImageType::Pointer PseudoProbImagePointer; + + typedef struct ResultSvmGTS { + PseudoProbImagePointer posImage; + PseudoProbImagePointer negImage; + LabelsPixelType posLabel = 0; + LabelsPixelType negLabel = 0; + LabelsImagePointer labelsImage; + } ResultSvmGTS; + + std::shared_ptr TrainAndTestGTS(std::shared_ptr data, std::vector images, + LabelsImagePointer labels, bool predictFlags/*, cv::Mat sampleIdx = cv::Mat()*/) + { + images = images; // To bypass unused parameter warning for now. + + this->SetTrainData(data->trainingMat, data->labelsMat, data->weightsMat/*, sampleIdx*/); + + this->Train(); + auto result = this->Test(data->testingMat, data->skipZerosMat, predictFlags, true); + + std::shared_ptr resultGTS(new ResultSvmGTS()); + + if (predictFlags) { + // Pseudoprob + resultGTS->posImage = ItkUtilGTS::initializeOutputImageBasedOn(labels); + resultGTS->negImage = ItkUtilGTS::initializeOutputImageBasedOn(labels); + + std::cout << "Converting output matrices to images..."; + CvMatToImageGTS::FillImage(resultGTS->posImage, result->posMat); + CvMatToImageGTS::FillImage(resultGTS->negImage, result->negMat); + std::cout << "finished\n"; + + resultGTS->posLabel = result->posLabel; + resultGTS->negLabel = result->negLabel; + } + else { + // Labels + resultGTS->labelsImage = ItkUtilGTS::initializeOutputImageBasedOn(labels); + + std::cout << "Converting output matrix to image..."; + CvMatToImageGTS::FillImage(resultGTS->labelsImage, result->labelsMat); + std::cout << "finished\n"; + } + + return resultGTS; + } + }; + + /** Balanced Subsampling */ + bool /*cv::Mat*/ CreateBalancedSubsample(std::shared_ptr& data, + std::string& errorMessageIfApplicable, + std::unordered_map labelsCountMap, int maxSamples); + +} + +#endif // !H_CBICA_SVM_GTS diff --git a/Modules/CaPTkInteractiveSegmentation/include/Processing.h b/Modules/CaPTkInteractiveSegmentation/include/Processing.h new file mode 100644 index 0000000..b8882e4 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/Processing.h @@ -0,0 +1,557 @@ +#ifndef H_CBICA_PROCESSING +#define H_CBICA_PROCESSING + +#include +#include + +#include +#include +#include + +#include "SusanDenoising.h" +#include "UtilItkGTS.h" +#include "SvmSuite.h" + +namespace GeodesicTrainingSegmentation +{ + template + void writeImage(typename TImageType::Pointer image, std::string filename) + { + typedef typename itk::ImageFileWriter WriterType; + typename WriterType::Pointer writer = WriterType::New(); + writer->SetInput(image); + writer->SetFileName(filename); + writer->Update(); + } + + /** This class handles preprocessing and postprocessing operations */ + template + class Processing + { + public: + typedef int AgdPixelType; + typedef int LabelsPixelType; + typedef float PseudoProbPixelType; + + typedef itk::Image< AgdPixelType, Dimensions > AgdImageType; + typedef itk::Image< PixelType, Dimensions > InputImageType; + typedef itk::Image< LabelsPixelType, Dimensions > LabelsImageType; + typedef itk::Image< PseudoProbPixelType, Dimensions > PseudoProbImageType; + + typedef typename AgdImageType::Pointer AgdImagePointer; + typedef typename InputImageType::Pointer InputImagePointer; + typedef typename LabelsImageType::Pointer LabelsImagePointer; + typedef typename PseudoProbImageType::Pointer PseudoProbImagePointer; + + void SetLimitPixels(bool limitPixels, int pixelLimit = 10000000) + { + m_limit_pixels = limitPixels; + m_pixel_limit = pixelLimit; + } + + void SetDoStatisticalNormalization(bool doStatisticalNormalization, float imageToAgdMapsRatio = 6) + { + m_do_statistical_normalization = doStatisticalNormalization; + m_image_to_agd_maps_ratio = imageToAgdMapsRatio; + } + + void SetDoCurvatureAnisotropic(bool doCurvatureAnisotropic) + { + m_do_curvature_anisotropic = doCurvatureAnisotropic; + } + + void SetDoSusanDenoising(bool doSusanDenoising) + { + m_do_susan_denoising = doSusanDenoising; + } + + void SetVerbose(bool verbose) + { + m_verbose = verbose; + } + + void SetSaveAll(bool saveAll) + { + m_save_all = saveAll; + } + + void SetOutputFolder(std::string outputFolder) + { + m_output_folder = outputFolder; + } + + void SetTimerEnabled(bool timerEnabled) + { + m_timer_enabled = timerEnabled; + } + + void SetNumberOfThreads(int numberOfThreads) + { + m_number_of_threads = numberOfThreads; + } + + void PreProcess(std::vector& inputImages, LabelsImagePointer& labels) + { + if (m_verbose) { std::cout << "Preprocessing:\n"; } + + m_original_input_image_size = inputImages[0]->GetLargestPossibleRegion().GetSize(); + m_original_labels_image_size = labels->GetLargestPossibleRegion().GetSize(); + m_original_input_image_spacing = inputImages[0]->GetSpacing(); + m_original_labels_image_spacing = labels->GetSpacing(); + m_original_input_image_origin = inputImages[0]->GetOrigin(); + m_original_labels_image_origin = labels->GetOrigin(); + + /* ------ Find the pixel count of the original image ------ */ + + unsigned long originalPixelCount = 1; + for (unsigned int i=0; i < InputImageType::ImageDimension; i++) { + originalPixelCount *= m_original_input_image_size[i]; + } + + /* ------ Print input information ------ */ + + if (m_verbose) + { + std::cout << "\tImage size: " << m_original_input_image_size << "\n"; + std::cout << "\tLabels size: " << m_original_labels_image_size << "\n"; + std::cout << "\tImage spacing: " << m_original_input_image_spacing << "\n"; + std::cout << "\tLabels spacing: " << m_original_labels_image_spacing << "\n"; + std::cout << "\tPixel count: " << originalPixelCount << "\n"; + std::cout << "\tPixel limit: " << ((m_limit_pixels)? + std::to_string(m_pixel_limit) : + "none") << "\n"; + std::cout << "\t---\n"; + } + + /* ------ Find out if spacing should be normalized ------ */ + + // Find minimum, maximum and average spacing + auto minSpacing = m_original_input_image_spacing[0]; + auto maxSpacing = m_original_input_image_spacing[0]; + double avgSpacing = m_original_input_image_spacing[0]; + for(unsigned int i=1; i maxSpacing) { + maxSpacing = m_original_input_image_spacing[i]; + } + if (m_original_input_image_spacing[i] < minSpacing) { + minSpacing = m_original_input_image_spacing[i]; + } + avgSpacing += m_original_input_image_spacing[i]; + } + avgSpacing /= InputImageType::ImageDimension; + + // This variable holds the normal image size if spacing will not be changed, + // or holds the value of the new size that will result from (theoretically) changing the spacing. + typename InputImageType::SizeType tSize = m_original_input_image_size; + + // Find if spacing should be normalized (i.e. there is a big difference across dimensions) + bool shouldNormalizeSpacing = false; + for(unsigned int i=0; i 0.1 || dimSpacingDiff < -0.1) + { + shouldNormalizeSpacing = true; + + // Find how the size would look if we (theoretically) just normalized the spacing + for (unsigned int i=0; i < InputImageType::ImageDimension; i++) { + tSize[i] = m_original_input_image_size[i] * ( + static_cast(m_original_input_image_spacing[i]) / minSpacing + ); + } + break; + } + } + // Check for weird case where a 2D image is actually 3D (with one dimension with size 1) + if (InputImageType::ImageDimension == 3) + { + bool sneaky2D = false; + unsigned int occursInDim; + for(int i=0; i<3; i++) + { + if (m_original_input_image_size[i] == 1) + { + sneaky2D = true; + occursInDim = i; + break; + } + } + if (sneaky2D) + { + float spacing1 = -1, spacing2 = -1; + for (unsigned int i=0; i<3; i++) + { + if (i == occursInDim) { continue; } + if (spacing1 == -1) { spacing1 = m_original_input_image_spacing[i]; } + else { spacing2 = m_original_input_image_spacing[i]; } + } + float dimSpacingDiff = spacing1 - spacing2; + if (dimSpacingDiff > 0.1 || dimSpacingDiff < -0.1) + { + shouldNormalizeSpacing = true; // Keep the previous tSize for convinience + } + else { + shouldNormalizeSpacing = false; + } + } + } + + // Find the (theoretical) size that will result from potentially normalizing spacing + unsigned long tPixelCount = 1; + for(unsigned int i=0; i (unsigned int)m_pixel_limit) + { + shouldNormalizeSize = true; + } + + /* ------ Find target size and spacing ------ */ + + typename InputImageType::SizeType targetSize; + typename InputImageType::SpacingType targetSpacing = tSpacing; + + // Calculate target size if there is (theoretically) no size normalization + for(unsigned int i=0; i( + image, targetSize, targetSpacing + ); + } + stopTimerAndReport("Resampling images"); + + startTimer(); + labels = ItkUtilGTS::resampleLabelsImage( + labels, targetSizeLabels, targetSpacingLabels, m_verbose + ); + stopTimerAndReport("Resampling labels"); + } + + /* ------ Print image information if anything changed ------ */ + + if ((shouldNormalizeSize || shouldNormalizeSpacing) && m_verbose) + { + std::cout << "\t After normalizing spacing and size:\n"; + std::cout << "\t\t Size: " << labels->GetLargestPossibleRegion().GetSize() << "\n"; + std::cout << "\t\t Spacing: " << labels->GetSpacing() << "\n"; + } + + /* Save the intermediate images if saveall is set */ + + if (m_save_all) + { + int y = 0; + for (auto& image : inputImages) + { + writeImage( image, m_output_folder + "/" + + std::string("preprocessed_image_just_size_and_spacing") + std::to_string(++y) + ".nii.gz" + ); + } + writeImage( labels, + m_output_folder + std::string("/") + "preprocessed_labels_just_size_and_spacing.nii.gz" + ); + } + + /* ------ Normal filters after size and spacing are settled ------ */ + + if (m_do_statistical_normalization) + { + startTimer(); + if (m_verbose) { std::cout << "\tNormalizing images\n"; } + + // Statistical Image Normalization is fast, no need for threads + ItkUtilGTS::statisticalImageVectorNormalization( + inputImages, + std::lround(255 * m_image_to_agd_maps_ratio) + ); + stopTimerAndReport("Preprocessing: Statistical image normalization"); + + if (m_save_all) + { + int w = 0; + for (auto& image : inputImages) + { + writeImage( image, m_output_folder + "/" + + std::string("statistical_norm_image") + std::to_string(++w) + std::string(".nii.gz") + ); + } + } + } + + // { + // startTimer(); + // if (m_verbose) { std::cout << "\tBilateral filter\n"; } + // for (auto& image : inputImages) + // { + // std::cout << "new\n"; + // image = ItkUtilGTS::BilateralImageFilter( + // image, true, 255 * m_image_to_agd_maps_ratio + // ); + + // writeImage( image, m_output_folder + "/" + + // std::string("bilateral_norm_image_ignore") + std::string(".nii.gz") + // ); + // } + // stopTimerAndReport("Bilateral image filter"); + + // if (m_save_all) + // { + // int w = 0; + // for (auto& image : inputImages) + // { + // writeImage( image, m_output_folder + "/" + + // std::string("bilateral_norm_image") + std::to_string(++w) + std::string(".nii.gz") + // ); + // } + // } + // } + + // { + // startTimer(); + // if (m_verbose) { std::cout << "\tGADF filter\n"; } + // for (auto& image : inputImages) + // { + // std::cout << "new\n"; + // image = ItkUtilGTS::gradientAnisotropicDiffusionImageFilter( + // image, true, 255 * m_image_to_agd_maps_ratio + // ); + + // writeImage( image, m_output_folder + "/" + + // std::string("gadf_norm_image_ignore") + std::string(".nii.gz") + // ); + // } + // stopTimerAndReport("GADF image filter"); + + // if (m_save_all) + // { + // int w = 0; + // for (auto& image : inputImages) + // { + // writeImage( image, m_output_folder + "/" + + // std::string("gadf_norm_image") + std::to_string(++w) + std::string(".nii.gz") + // ); + // } + // } + // } + + // Susan denoising + if (m_do_susan_denoising) { + startTimer(); + // TODO: Background threads + if (m_verbose) { std::cout << "\tDenoising images\n"; } + SusanDenoising susan; + for (size_t i = 0; i < inputImages.size(); i++) + { + inputImages[i] = susan.Run(inputImages[i]); + } + stopTimerAndReport("Preprocessing: Susan Denoising Filter"); + + // Optionally save + int z = 0; + if (m_save_all) + { + for (auto& image : inputImages) { + writeImage( image, m_output_folder + "/" + + std::string("susan_denoised_image") + std::to_string(++z) + std::string(".nii.gz") + ); + } + } + } + + if (m_do_curvature_anisotropic) + { + startTimer(); + if (m_verbose) { std::cout << "\tApplying CADF... [Warning: This might take a long time]\n"; } + + int counterForThreadsVec = 0; + std::vector threads(inputImages.size()); + int numberOfOpenThreads = 0; + int oldestOpenThread = 0; + + for (size_t i = 0; i < inputImages.size(); i++) + { + if (numberOfOpenThreads == m_number_of_threads) { + threads[oldestOpenThread].join(); + oldestOpenThread++; + numberOfOpenThreads--; + } + + numberOfOpenThreads++; + threads[counterForThreadsVec++] = std::thread( + [&](int ii, std::vector& inputImages) mutable + { + inputImages[ii] = ItkUtilGTS::curvatureAnisotropicDiffusionImageFilter( + inputImages[ii] + ); + }, + i, + std::ref(inputImages) + ); + } + + for (size_t i = oldestOpenThread; i < inputImages.size(); i++) { + threads[i].join(); + } + stopTimerAndReport("Preprocessing: Curvature Anisotropic Diffusion Image Filter"); + + // Optionally save + int z = 0; + if (m_save_all) + { + for (auto& image : inputImages) { + // image = ItkUtilGTS::curvatureAnisotropicDiffusionImageFilter(image); + writeImage( image, m_output_folder + "/" + + std::string("cadf_image") + std::to_string(++z) + std::string(".nii.gz") + ); + } + } + } + } + + void PostProcessLabelsImage(LabelsImagePointer& labels) + { + // Make the output the same as the original input + if (m_verbose) { std::cout << "Postprocessing labels image...\n"; } + + if (m_save_all) + { + writeImage( labels, m_output_folder + "/" + + std::string("labels_before_post_processing.nii.gz") + ); + } + + startTimer(); + labels = ItkUtilGTS::changeImageSpacing( + labels, m_original_labels_image_spacing, true, + true, m_original_labels_image_size, + true, m_original_labels_image_origin + ); + stopTimerAndReport("Postprocessing: Changing size and spacing back"); + + if (m_save_all) + { + writeImage( labels, m_output_folder + "/" + + std::string("labels_res.nii.gz") + ); + } + } + + template + void PostProcessNormalImages(std::vector& images) + { + for (auto& image : images) { + image = ItkUtilGTS::changeImageSpacing( + image, m_original_input_image_spacing, false, + true, m_original_input_image_size + ); + } + } + + template + void PostProcessNormalImage(typename TImageType::Pointer& image) + { + if (m_verbose) { std::cout << "Postprocessing image\n"; } + startTimer(); + std::vector imageInVector; + imageInVector.push_back(image); + PostProcessNormalImages(imageInVector); + stopTimerAndReport("Postprocessing: Changing a non-label image's size and spacing back"); + } + + private: + typename InputImageType::SpacingType m_original_input_image_spacing; + typename LabelsImageType::SpacingType m_original_labels_image_spacing; + + typename InputImageType::SizeType m_original_input_image_size; + typename LabelsImageType::SizeType m_original_labels_image_size; + + typename InputImageType::PointType m_original_input_image_origin; + typename LabelsImageType::PointType m_original_labels_image_origin; + + bool m_limit_pixels = true, m_do_curvature_anisotropic = false, + m_do_susan_denoising = false, m_do_statistical_normalization = true, + m_verbose = false, m_save_all = false, m_timer_enabled = false; + int m_pixel_limit = 10000000, m_number_of_threads = 16; + float m_image_to_agd_maps_ratio = 6; + std::string m_output_folder = ""; + SvmSuiteUtil::Timer m_timer; + + void startTimer() { + if (m_timer_enabled) { + m_timer.Reset(); + } + } + + void stopTimerAndReport(std::string desc) { + if (m_timer_enabled) { + float diff = m_timer.Diff(); + + std::ofstream timerFile; + timerFile.open(m_output_folder + "/time_report.txt", std::ios_base::app); //append file + timerFile << desc << ": " << diff << "s\n"; + } + } + + }; +} + +#endif // ! H_CBICA_PROCESSING diff --git a/Modules/CaPTkInteractiveSegmentation/include/RFPrepareTrainData.h b/Modules/CaPTkInteractiveSegmentation/include/RFPrepareTrainData.h new file mode 100644 index 0000000..79058b5 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/RFPrepareTrainData.h @@ -0,0 +1,44 @@ +#ifndef H_CBICA_RF_PREPARE_TRAIN_DATA +#define H_CBICA_RF_PREPARE_TRAIN_DATA + +#include +#include +#include + +#include + +namespace RFSuiteTrainData +{ + /** + Creates TrainData where random x rows will be the training set (and random (data.rows-x) will be the testing set) + [NOT USED] + @param data the input matrix + @param responses the input input responses + @param ntrain_samples the x + @return pointer to the TrainData object + */ + cv::Ptr + PrepareTrainData(const cv::Mat &data, const cv::Mat &responses, int ntrain_samples); + + /** + Shuffles rows of two "parallel" cv::Mat matrices + [NOT USED] + @param matrix the first matrix + @param resRandMatrix the result randomized first matrix + @param responses the second matrix + @param resRandResponses the result randomized second matrix + */ + void + shuffleDataAndResponses(const cv::Mat &matrix, cv::Mat &resRandMatrix, const cv::Mat &responses, cv::Mat &resRandResponses); + + /** + Shuffles rows of a cv::Mat + [NOT USED] + @param matrix the input matrix + @return the shuffled matrix + */ + cv::Mat + shuffleRows(const cv::Mat &matrix); +} + +#endif // !H_CBICA_RF_PREPARE_TRAIN_DATA \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/RFSuiteManager.h b/Modules/CaPTkInteractiveSegmentation/include/RFSuiteManager.h new file mode 100644 index 0000000..da6bdc8 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/RFSuiteManager.h @@ -0,0 +1,144 @@ +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "ConfigParserRF.h" +#include "RFPrepareTrainData.h" + +namespace RFSuite +{ + class Manager + { + public: + const double DEFAULT_TRAINING_SAMPLE_PERCENTAGE = 75.0; + const int DEFAULT_MAX_DEPTH = 10; + const double DEFAULT_MIN_SAMPLE_COUNT_PERCENTAGE = 10.0; + const int DEFAULT_MAX_CATEGORIES = 24; + const int DEFAULT_ACTIVE_VAR_COUNT = 0; // 0 is the default and it is sqrt(# of features) + const int DEFAULT_NUMBER_OF_TREES = 150; + + explicit Manager() {} + + virtual ~Manager() {} + + /** + Trains while tuning parameters with prespecified ranges + @return lower training error + */ + float TrainAuto(); + + /** + Trains while tuning parameters with ranges specified in the parameters + Every ParamGrid has fields: minVal, maxVal, logStep(>=1). + if (logStep>1) then values: minVal, minVal*logStep^1, minVal*logStep^2,... are tried + while minVal*logStep^n < maxVal (see definition of cv::ml::ParamGrid for more) + if (logStep=1) then values: minVal, minVal+1, minVal+2,... are tried + while minVal+n < maxVal + Note: If there is no need for tuning for a parameter and value X is to be used, + give a ParamGrid with minVal=X, maxVal=X+1, logStep=2 (only X will be tried) + @param maxDepthGrid ParamGrid for max depth + @param minSampleCountPercentageGrid ParamGrid for min sample count percentage + @param maxCategoriesGrid ParamGrid for max categories + @param activeVarCountGrid ParamGrid for active variable count + @return lower training error + */ + float TrainAuto(cv::ml::ParamGrid maxDepthGrid, cv::ml::ParamGrid minSampleCountPercentageGrid, + cv::ml::ParamGrid maxCategoriesGrid, cv::ml::ParamGrid activeVarCountGrid); + + /** + Trains will values specified by setters + @return training error + */ + float Train(); + + /** + Trains with specified parameters + Note: Training sample percentage should still be changed from SetTrainingSamplePercentage() + @param maxDepth the max depth for the decision trees + @param minSampleCountPercentage percentage of training data to be randomly selected for a tree + @param maxCategories Max categories in training for each class (Relevant only in n>2 classification) + @param activeVarCount subset of features for each tree + @param numberOfTrees size of the forest + @return training error + */ + float Train(int maxDepth, double minSampleCountPercentage, int maxCategories, int activeVarCount, int numberOfTrees); + + /** + Make predictions + @param testingMat matrix with one sample per row, each column is a feature + @return pointer to the responses mat + */ + std::shared_ptr Test(cv::Mat &testingMat); + + /** + Make predictions + @param testingMat matrix with one sample per row, each column is a feature + @param skipZerosMat matrix with the same number of rows as trainingMat (if val at [i][0] is 0 -> no prediction will happen) + @param skipZeros whether skipZerosMat will be used + @return pointer to the responses mat + */ + std::shared_ptr Test(cv::Mat &testingMat, cv::Mat &skipZerosMat, bool skipZeros = true); + + /** + Save random forest model to file + @param filename full path to desired save location + */ + void SaveModel(const std::string filename); + + // Setters + + void SetTrainDataFromMats(cv::Mat &trainingMat, cv::Mat &labelsMat); + + void SetPriorsMat(cv::Mat &priorsMat); + + void SetOutputPath(std::string path); + + void SetSaveAll(bool saveAll); + + void SetVerbose(bool verbose); + + void SetParametersFromConfig(std::string filePath); + + void SetTrainingSamplePercentage(double trainingSamplePercentage); + + void SetMaxDepth(int maxDepth); + + void SetMinSampleCountPercentage(double minSampleCountPercentage); + + void SetMaxCategories(int maxCategories); + + void SetActiveVarCount(int activeVarCount); + + void SetNumberOfTrees(int numberOfTrees); + + private: + cv::Ptr m_rtrees = cv::ml::RTrees::create(); + cv::Mat m_priors_mat = cv::Mat(); + cv::Ptr m_traindata; + std::string m_output_path = "./", m_rf_config_file_path = ""; + bool m_save_all = false, m_verbose = false; + + // Parameters + // Note on m_training_sample_percentage: + // From the training data, only m_training_sample_percentage % will actually be used for training + // The rest will be used for checking the accuracy of the model + double m_training_sample_percentage = DEFAULT_TRAINING_SAMPLE_PERCENTAGE; + int m_max_depth = DEFAULT_MAX_DEPTH; + double m_min_sample_count_percentage = DEFAULT_MIN_SAMPLE_COUNT_PERCENTAGE; + int m_max_categories = DEFAULT_MAX_CATEGORIES; + int m_active_var_count = DEFAULT_ACTIVE_VAR_COUNT; + int m_number_of_trees = DEFAULT_NUMBER_OF_TREES; + + /** + Trains the classifier + @return training error + */ + float train_and_print_errs(cv::Ptr model, const cv::Ptr& data); + }; +} diff --git a/Modules/CaPTkInteractiveSegmentation/include/RandomForestSuite.h b/Modules/CaPTkInteractiveSegmentation/include/RandomForestSuite.h new file mode 100644 index 0000000..cd56259 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/RandomForestSuite.h @@ -0,0 +1,6 @@ +#ifndef H_CBICA_RANDOM_FOREST_SUITE +#define H_CBICA_RANDOM_FOREST_SUITE + +#include "RFSuiteManager.h" + +#endif // !H_CBICA_RANDOM_FOREST_SUITE diff --git a/Modules/CaPTkInteractiveSegmentation/include/SusanDenoising.h b/Modules/CaPTkInteractiveSegmentation/include/SusanDenoising.h new file mode 100644 index 0000000..93df490 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/SusanDenoising.h @@ -0,0 +1,151 @@ +/** +\file SusanDenoising.h +This file holds the declaration of the class SusanDenoising. +http://www.med.upenn.edu/sbia/software/
+software@cbica.upenn.edu +Copyright (c) 2018 University of Pennsylvania. All rights reserved.
+See COPYING file or https://www.med.upenn.edu/sbia/software-agreement.html +*/ + +#ifndef H_SUSAN_DENOISING +#define H_SUSAN_DENOISING + +#include "iostream" +#include "itkImage.h" +#include "itkConnectedThresholdImageFilter.h" +#include "itkImageRegionIterator.h" +#include "itkMedianImageFunction.h" +#include "itkNeighborhoodIterator.h" +#include "itkImageDuplicator.h" + +/** +\class SusanDenoising +\brief This class implements the SUSAN denoising algorithm. +Reference: +@inproceedings{SmithBrady1997, +title={SUSAN - a new approach to low level image processing}, +author={Smith, S.M., and Brady, J.M.}, +journal={International Journal of Computer Vision}, +pages={45�78}, +year={1997}, +organization={Springer} +} +*/ +class SusanDenoising +{ + +public: + SusanDenoising() {}; + ~SusanDenoising() {}; + + template + typename ImageType::Pointer Run(typename ImageType::Pointer image); + + void SetSigma(float input) + { + m_sigma = input; + } + + void SetIntensityVariationThreshold(float input) + { + m_intensityVariationThreshold = input; + } + + void SetRadius(size_t input) + { + m_radius = input; + } + + +private: + float m_sigma = 0.5; + float m_intensityVariationThreshold = 80; + size_t m_radius = 1; +}; + +template +typename ImageType::Pointer SusanDenoising::Run(const typename ImageType::Pointer image) +{ + + //messageUpdate("SUSAN Denoising"); + //progressUpdate(0); + //qApp->processEvents(); + + typedef itk::ImageDuplicator DuplicatorType; + typename DuplicatorType::Pointer outputImageFilter = DuplicatorType::New(); + outputImageFilter->SetInputImage(image); + outputImageFilter->Update(); + + double sigma = m_sigma; + double intensityVariationThreshold = m_intensityVariationThreshold; + + // typename ImageType::SizeType imageSize = image->GetLargestPossibleRegion().GetSize(); + + typedef typename itk::MedianImageFunction< ImageType > MedianImageFunctionType; + typename MedianImageFunctionType::Pointer medianImageFunction = MedianImageFunctionType::New(); + medianImageFunction->SetInputImage(outputImageFilter->GetOutput()); + + + typedef typename itk::NeighborhoodIterator NeighborhoodIteratorType; + typename NeighborhoodIteratorType::RadiusType radius; + radius.Fill(m_radius); + NeighborhoodIteratorType ImageIterator(radius, outputImageFilter->GetOutput(), outputImageFilter->GetOutput()->GetLargestPossibleRegion()); + + //progressUpdate(10); + //qApp->processEvents(); + + for (ImageIterator.GoToBegin(); !ImageIterator.IsAtEnd(); ++ImageIterator) + { + double NumeratorSum = 0; + double DenominatorSum = 0; + + typename itk::NeighborhoodIterator::IndexType currentindex = ImageIterator.GetIndex(); + float CenterIntensityValue = ImageIterator.GetCenterPixel(); + + // make parallel here, perhaps? + for (unsigned int LocalNeighborhoodIterator = 0; LocalNeighborhoodIterator < ImageIterator.Size(); ++LocalNeighborhoodIterator) + { + typename ImageType::OffsetType offsetType1 = ImageIterator.ComputeInternalIndex(LocalNeighborhoodIterator); + + float NeighborIntensityValue = ImageIterator.GetPixel(LocalNeighborhoodIterator); + + typename itk::NeighborhoodIterator::IndexType neighborindex; + for (size_t index = 0; index < ImageType::ImageDimension; index++) + neighborindex[index] = (currentindex[index] - radius[index]) + offsetType1[index]; + + typename ImageType::IndexType LocalizedVoxelIndex; + + for (unsigned int index = 0; index < LocalizedVoxelIndex.GetIndexDimension(); index++) + LocalizedVoxelIndex[index] = neighborindex[index] - currentindex[index]; + + if (LocalizedVoxelIndex == currentindex) + continue; + + double radius = 0; + for (unsigned int index = 0; index < LocalizedVoxelIndex.GetIndexDimension(); index++) + radius = radius + pow(LocalizedVoxelIndex[index], 2); + radius = sqrt(radius); + + double weightfactor = exp(-pow(radius, 2) / (2 * pow(sigma, 2)) - pow((NeighborIntensityValue - CenterIntensityValue), 2) / pow(intensityVariationThreshold, 2)); + DenominatorSum = DenominatorSum + weightfactor; + NumeratorSum = NumeratorSum + (NeighborIntensityValue* weightfactor); + } + if (DenominatorSum == 0) + { + //medianImageFunction->EvaluateAtIndex(currentindex); + //ImageIterator->SetPixel() + ImageIterator.SetPixel(ImageIterator.Size() - 1, medianImageFunction->EvaluateAtIndex(currentindex)); + } + else + { + // double previousval = image->GetPixel(currentindex); + double newval = std::round(NumeratorSum / DenominatorSum); + outputImageFilter->GetOutput()->SetPixel(currentindex, newval); + //ImageIterator->SetPixel(currentindex, newval); + } + } + //progressUpdate(100); + return outputImageFilter->GetOutput(); +} + +#endif // ! H_SUSAN_DENOISING \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/SvmSuite.h b/Modules/CaPTkInteractiveSegmentation/include/SvmSuite.h new file mode 100644 index 0000000..abbf19a --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/SvmSuite.h @@ -0,0 +1,8 @@ +#ifndef H_CBICA_SVM_SUITE +#define H_CBICA_SVM_SUITE + +#include "SvmSuiteDescription.h" +#include "SvmSuiteOperations.h" +#include "SvmSuiteManager.h" + +#endif \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/SvmSuiteDescription.h b/Modules/CaPTkInteractiveSegmentation/include/SvmSuiteDescription.h new file mode 100644 index 0000000..f61d392 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/SvmSuiteDescription.h @@ -0,0 +1,131 @@ +#ifndef H_CBICA_SVM_SUITE_DESCRIPTION +#define H_CBICA_SVM_SUITE_DESCRIPTION + +#include +#include +#include + +#include + +namespace SvmSuite +{ + // Each SVM in the configuration (there might be more than one) gets stored in one instance of SvmDescription + class SvmDescription + { + public: + /** + Constructor (initializes ParamGrids) + */ + SvmDescription(); + + ~SvmDescription() {} + + // Setters + + void SetKernelType(cv::ml::SVM::KernelTypes kernelType); + void SetKernelType(std::string kernelType); + void SetType(cv::ml::SVM::Types type); + void SetType(std::string type); + void SetKfold(int kfold); + void SetKfold(std::string kfold); + void SetNeighborhoodRadius(int neighborhoodRadius); + void SetNeighborhoodRadius(std::string neighborhoodRadius); + void SetConsiderWeights(bool considerWeights); + void SetConsiderWeights(std::string considerWeights); + void SetImportance(double importance); + void SetImportance(std::string importance); + void SetModelPath(std::string modelPath); + void SetModel(cv::Ptr model); + + // Note: setting a parameters to value X will also change the paramgrid to {X, X+1, 2} (that way only value X is tried in OpenCV's trainAuto) + + void SetParameter(cv::ml::SVM::ParamTypes param, double val); + void SetParameter(std::string param, double val); + void SetParameter(std::string param, std::string val); + + void SetC(double c); + void SetC(std::string c); + void SetGamma(double gamma); + void SetGamma(std::string gamma); + void SetP(double p); + void SetP(std::string p); + void SetNu(double nu); + void SetNu(std::string nu); + void SetCoef(double coef); + void SetCoef(std::string coef); + void SetDegree(double degree); + void SetDegree(std::string degree); + + void SetParameterRangeAuto(cv::ml::SVM::ParamTypes param); + void SetParameterRangeAuto(std::string param); + + void SetParameterRange(cv::ml::SVM::ParamTypes param, double minVal, double maxVal, double logStep); + void SetParameterRange(std::string param, double minVal, double maxVal, double logStep); + + void SetTermCriteria(int criteriaType, int maxCount, double epsilon); + void SetTermCriteria(std::string criteriaType, int maxCount, double epsilon); + void SetTermCriteria(std::string criteriaType, std::string maxCount, std::string epsilon); + + // Getters + + cv::ml::SVM::KernelTypes GetKernelType(); + std::string GetKernelTypeAsString(); + cv::ml::SVM::Types GetType(); + std::string GetTypeAsString(); + int GetKfold(); + int GetNeighborhoodRadius(); + bool GetConsiderWeights(); + double GetImportance(); + std::string GetModelPath(); + cv::Ptr GetModel(); + + double GetC(); + double GetGamma(); + double GetP(); + double GetNu(); + double GetCoef(); + double GetDegree(); + + double GetParameter(cv::ml::SVM::ParamTypes param); + double GetParameter(std::string param); + bool isParameterSetToSpecificValue(cv::ml::SVM::ParamTypes param); + bool isParameterSetToSpecificValue(std::string param); + + cv::ml::ParamGrid GetParamGridForParameter(cv::ml::SVM::ParamTypes param); + cv::ml::ParamGrid GetParamGridForParameter(std::string param); + cv::TermCriteria GetTermCriteria(); + std::string GetTermCriteriaTypeAsString(); + + /** + Get default ensemble of SvmDescriptions + @return Default vector of SvmDescriptions + */ + static std::vector GetDefaultSvmDescriptions(); + + private: + // Everything is initialized with OpenCV's default values + + cv::ml::SVM::KernelTypes kernelType = cv::ml::SVM::KernelTypes::RBF; + cv::ml::SVM::Types type = cv::ml::SVM::Types::C_SVC; + std::vector< cv::ml::ParamGrid > parametersRanges; + std::vector< bool > isParameterSetToSpecificValueVector; + cv::TermCriteria termCriteria = cv::TermCriteria(cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS, 1000, FLT_EPSILON); + bool considerWeights = false; + int kfold = 10; + int neighborhoodRadius = 0; + double importance = 1.0; + std::string modelPath = ""; + cv::Ptr model; + double c = 1.0; + double gamma = 1.0; + double p = 0.0; + double nu = 0.0; + double coef = 0.0; + double degree = 0.0; + + void errorOccured(std::string msg); + }; + +} + +#endif \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/SvmSuiteManager.h b/Modules/CaPTkInteractiveSegmentation/include/SvmSuiteManager.h new file mode 100644 index 0000000..3861a75 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/SvmSuiteManager.h @@ -0,0 +1,129 @@ +#ifndef H_CBICA_SVM_SUITE_MANAGER +#define H_CBICA_SVM_SUITE_MANAGER + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "SvmSuiteDescription.h" +#include "SvmSuiteOperations.h" +#include "SvmSuiteUtil.h" + +namespace SvmSuite +{ + class Manager + { + public: + + typedef int LabelsType; + typedef float PseudoProbType; + + explicit Manager() {} + + virtual ~Manager() {} + + typedef struct Result { + cv::Mat posMat; + cv::Mat negMat; + LabelsType posLabel = 0; + LabelsType negLabel = 0; + cv::Mat labelsMat; + } Result; + + void GenerateConfigFromBestValues(std::string outputFileName); + + void GenerateConfigFromBestValues(); + + /** + Train the models specified in the svm descriptions + */ + void Train(); + + /** + Test using the ensemble of trained svm models + @param testingMat cv::Mat where the rows are the samples to test and columns are the features + @param pseudoProbMapResult true-> the result would be pos and neg pseudoprobability maps (only for n=2 classification) + false-> the result would be the predicted labels (n>=2 classification) + @return pointer to SvmSuite::Manager::Result object (which contains the result images) + */ + std::shared_ptr Test(cv::Mat &testingMat, bool pseudoProbMapResult = false); + + /** + Test using the ensemble of trained svm models + @param testingMat cv::Mat where the rows are the samples to test and columns are the features + @param skipZerosMat cv::Mat that has as many rows as testingMat, where for each row if skipZerosMat has zero the line will be skipped + and the label 0 will be set. In practice you can pass testingMat two times (extra columns would not matter) + @param pseudoProbMapResult true-> the result would be pos and neg pseudoprobability maps (only for n=2 classification) + false-> the result would be the predicted labels (n>=2 classification) + @param skipZeros whether to user skipZerosMat + @return pointer to SvmSuite::Manager::Result object (which contains the result images) + */ + std::shared_ptr Test(cv::Mat &testingMat, cv::Mat &skipZerosMat, bool pseudoProbMapResult = false, bool skipZeros = true); + + // Adders and Setters + + void AddSvmDescriptionToList(SvmDescription svmDesc); + void AddSvmDescriptions(std::vector< SvmDescription > svmDescs); + void AddPretrainedModel(std::string pretrainedModelPath, int neighborhoodRadius = 0); + void AddSvmsFromConfig(std::string configPath); + void SetTrainData(cv::Mat &trainingMat, cv::Mat &labelsMat, cv::Mat &weightsMat/*, cv::Mat sampleIdx = cv::Mat()*/); + void SetVerbose(bool verbose); + void SetOutputPath(std::string path); + void SetSavingModelsEnabled(bool modelsEnabled); + void SetTimerEnabled(bool timerEnabled); + void SetNumberOfThreads(int numberOfThreads); + void SetSubsampling(bool subsample, int maxSamples = 3000); + void SetInputNormalization(bool normalize); + + private: + std::vector< SvmDescription > m_svm_descriptions; + cv::Ptr m_traindata; + cv::Mat m_weights_mat; + std::set m_different_labels; + std::string m_output_path = "./"; + SvmSuiteUtil::Timer m_timer; + int m_number_of_threads = 32, m_max_samples = 3000; + bool m_timer_enabled = false, m_save_models = false, m_verbose = false, m_subsample = false, m_normalize = false; + + void testingLabelsThreadJob(cv::Mat &testingMat, int iStart, int interval, cv::Mat &resultLabelsMat, + bool skipZeros, cv::Mat &skipZerosMat, std::vector< double > &importanceValues); + + // For timer + + void startTimer(); + + void stopTimerAndReport(std::string desc); + + // Util + + void message(std::string message, bool overdraw = false, bool finished = false, int progress = -1); + + void errorOccured(std::string msg); + + template< typename U, typename V > + std::vector< U > getMapKeyset(std::map< U, V > map) + { + // Find a list of the different keys + std::vector< U > res; + + res.reserve(map.size()); + for (auto const& imap : map) { + res.push_back(imap.first); + } + + return res; + } + + }; + +} + +#endif \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/SvmSuiteOperations.h b/Modules/CaPTkInteractiveSegmentation/include/SvmSuiteOperations.h new file mode 100644 index 0000000..f7e0ea0 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/SvmSuiteOperations.h @@ -0,0 +1,45 @@ +#ifndef H_CBICA_SVM_SUITE_OPERATIONS +#define H_CBICA_SVM_SUITE_OPERATIONS + +#include +#include +#include + +#include +#include +#include + +#include "SvmSuiteDescription.h" + +namespace SvmSuite +{ + /** Normalizes input for the SVMs + @param mat the input matrix + */ + void NormalizeInput(cv::Mat& mat); + + std::vector< SvmSuite::SvmDescription > getSvmDescriptionsFromConfig(std::string configPath); + + SvmSuite::SvmDescription convertModelToSvmDescription(cv::Ptr svm_model, + int neighborhood_radius, double importance); + + SvmSuite::SvmDescription convertModelToSvmDescription(std::string model_path, + int neighborhood_radius, double importance); + + void generateConfig(std::vector< SvmSuite::SvmDescription > &svm_descriptions, + std::string outputFilePath, bool m_save_models = false); + + void generateConfig(SvmSuite::SvmDescription &svm_description, + std::string outputFilePath, bool m_save_models = false); + + void generateConfig(cv::Ptr model, int neighborhood_radius, + double importance, std::string outputFilePath, bool m_save_models = false); + + void generateConfig(std::vector< cv::Ptr > multiple_models, std::vector< int > neighborhood_radii, + std::vector< double > importance_values, std::string outputFilePath, bool m_save_models = false); + + void generateConfig(std::vector< std::string > multiple_models_paths, std::vector< int > neighborhood_radii, + std::vector< double > importance_values, std::string outputFilePath, bool m_save_models = false); +} + +#endif \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/SvmSuiteUtil.h b/Modules/CaPTkInteractiveSegmentation/include/SvmSuiteUtil.h new file mode 100644 index 0000000..d296401 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/SvmSuiteUtil.h @@ -0,0 +1,34 @@ +#ifndef H_CBICA_SVM_SUITE_UTIL +#define H_CBICA_SVM_SUITE_UTIL + +#include +#include + +namespace SvmSuiteUtil +{ + class Timer { + public: + /** + Timer Constructor + Also instantiates interval start. + Use Reset() to reset it again at another time. + */ + Timer(); + + /** + Resets the start of the interval + */ + void Reset(); + + /** + Calculate the difference between reset (or constructor called) and now + @return the difference in float seconds + */ + float Diff(); + private: + std::chrono::high_resolution_clock::time_point m_timestamp; + }; + +} + +#endif \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/UtilCvMatToImageGTS.h b/Modules/CaPTkInteractiveSegmentation/include/UtilCvMatToImageGTS.h new file mode 100644 index 0000000..246279f --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/UtilCvMatToImageGTS.h @@ -0,0 +1,40 @@ +#ifndef H_CBICA_CV_MAT_TO_IMAGE_GTS +#define H_CBICA_CV_MAT_TO_IMAGE_GTS + +#include "itkImage.h" +#include "itkImageFileWriter.h" +#include "itkImageFileReader.h" +#include "itkExceptionObject.h" +#include "itkCastImageFilter.h" +#include "itkRescaleIntensityImageFilter.h" +#include "itkNeighborhoodIterator.h" +#include "itkImageRegionIterator.h" + +#include +#include +#include + +namespace GeodesicTrainingSegmentation +{ + namespace CvMatToImageGTS + { + /** + Fills an image with the contents of a cv::Mat + @param image the itk image that will be filled + @param mat the input cv::mat, only the first item of each row will be used + */ + template + void FillImage(const typename itk::Image::Pointer &image, const cv::Mat &mat) + { + itk::ImageRegionIteratorWithIndex> iter_i(image, image->GetRequestedRegion()); + + int i; + for (iter_i.GoToBegin(), i = 0; !iter_i.IsAtEnd(); ++iter_i, i++) + { + iter_i.Set(mat.at(i, 0)); + } + } + } +} + +#endif // !H_CBICA_CV_MAT_TO_IMAGE_GTS diff --git a/Modules/CaPTkInteractiveSegmentation/include/UtilGTS.h b/Modules/CaPTkInteractiveSegmentation/include/UtilGTS.h new file mode 100644 index 0000000..309df71 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/UtilGTS.h @@ -0,0 +1,40 @@ +#ifndef H_CBICA_UTIL_GTS +#define H_CBICA_UTIL_GTS + +// #pragma warning(disable : 4996) //_CRT_SECURE_NO_WARNINGS + +#include +#include +#include +#include + +namespace GeodesicTrainingSegmentation +{ + namespace UtilGTS + { + /** + Get current date and time + @return current date and time as %Y-%m-%d %H.%M.%S + */ + std::string currentDateTime(); + + /** Find if a directory exists + @param dName the name of the directory + @return bool if the directory exists + */ + bool directoryExists(const std::string &dName); + + /**Creates a directory*/ + bool createDir(const std::string &dName); + + /**Splits a string into a list using a delimiter*/ + std::vector + split_string(const std::string &s, char delim); + + /**Find the file extension of the file*/ + std::string getFileExtension(const std::string filePath); + } + +} + +#endif \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/include/UtilImageToCvMatGTS.h b/Modules/CaPTkInteractiveSegmentation/include/UtilImageToCvMatGTS.h new file mode 100644 index 0000000..393ad63 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/UtilImageToCvMatGTS.h @@ -0,0 +1,346 @@ +#ifndef H_CBICA_UTIL_IMAGE_TO_CV_MAT_GTS +#define H_CBICA_UTIL_IMAGE_TO_CV_MAT_GTS + +#include "itkImage.h" +#include "itkImageFileWriter.h" +#include "itkImageFileReader.h" +#include "itkExceptionObject.h" +#include "itkCastImageFilter.h" +#include "itkRescaleIntensityImageFilter.h" +#include "itkNeighborhoodIterator.h" +#include "itkImageRegionIterator.h" + +#include +#include +#include + +#include +#include +#include + +namespace GeodesicTrainingSegmentation +{ + namespace ParserGTS + { + typedef int LabelsPixelType; + + typedef struct Result { + cv::Mat trainingMat; + cv::Mat labelsMat; + cv::Mat weightsMat; + cv::Mat testingMat; + cv::Mat skipZerosMat; + std::set differentLabels; + } Result; + + template + int getNumberOfPixels(typename TImageType::Pointer image) + { + // Get number of pixels of the images + typename TImageType::RegionType region = image->GetLargestPossibleRegion(); + typename TImageType::SizeType imageSize = region.GetSize(); + int numberOfPixels = 1; + + for (unsigned int i = 0; i < TImageType::ImageDimension; i++) { + // Multiply with image size in each dimension + numberOfPixels *= imageSize[i]; + } + + return numberOfPixels; + } + + template + int getNumberOfNotZerosFromLabels(typename itk::Image::Pointer labels, + typename itk::Image::Pointer oneOfInput, bool considerZeros) + { + typedef itk::Image TImageType; + typedef itk::Image LabelsImageType; + + itk::ImageRegionIteratorWithIndex iter_l(labels, labels->GetRequestedRegion()); + itk::ImageRegionIteratorWithIndex iter_i(oneOfInput, oneOfInput->GetRequestedRegion()); + int labeled = 0; + + for (iter_l.GoToBegin(), iter_i.GoToBegin(); !iter_l.IsAtEnd(); ++iter_l, ++iter_i) { + if (iter_l.Get() != 0) { + labeled++; + + if (!considerZeros && iter_i.Get() == 0) { + //std::cout << "One of the labels is at a voxel out of bounds and will not be counted." + // << " Consider redrawing the mask\n"; + labeled--; + } + } + } + + return labeled; + } + + /** + Finds how many samples are there for each label used in an itk::Image + The PixelType of the input image should always be int + @param labels the input labels image + @return unordered_map with the different labels as keys and the counts for each label as values + */ + template + static std::unordered_map CountsOfEachLabel(const typename TImageType::Pointer labels) + { + std::unordered_map labelsCountMap; + + itk::ImageRegionIteratorWithIndex iter_l(labels, labels->GetRequestedRegion()); + + LabelsPixelType val; + + for (iter_l.GoToBegin(); !iter_l.IsAtEnd(); ++iter_l) { + val = iter_l.Get(); + + if (val != 0) { + if (labelsCountMap.find(val) == labelsCountMap.end()) { + labelsCountMap[val] = 1; + } + else { + labelsCountMap[val] += 1; + } + } + } + + return labelsCountMap; + } + + template + std::shared_ptr Parse(const std::vector< typename itk::Image::Pointer > &input_images, + const typename itk::Image::Pointer &input_labels, bool considerZeros = false) + { + typedef itk::Image InputImageType; + typedef itk::Image LabelsImageType; + + std::shared_ptr res(new Result()); + + // Initialize the labels iterator + itk::ImageRegionIteratorWithIndex iter_labels(input_labels, input_labels->GetRequestedRegion()); + + // Get number of pixels of the images + int numberOfPixels = getNumberOfPixels(input_images[0]); + + // Number of voxels in the labels image that are not zero (and are usable labels) + int labelsNumber = getNumberOfNotZerosFromLabels(input_labels, input_images[0], considerZeros); + + // Initialize result + res->testingMat = cv::Mat::zeros(numberOfPixels, input_images.size(), CV_32F); + res->skipZerosMat = cv::Mat::zeros(numberOfPixels, input_images.size(), CV_32F); + res->trainingMat = cv::Mat::zeros(labelsNumber, input_images.size(), CV_32F); + res->labelsMat = cv::Mat::zeros(labelsNumber, 1, CV_32S); + + // If there is a label for this voxel (use 1,2,... for labels, not zero) + cv::Mat features = cv::Mat::zeros(1, input_images.size(), CV_32F); + std::unordered_map< LabelsPixelType, int > weightSums; + unsigned long allWeightsSum = 0; + + float val; + LabelsPixelType label; + bool isInputInt = (typeid(TPixelType).name() == typeid(int).name()) ? true : false; + size_t inputImagesSize = input_images.size(); + bool skipPixel; + int row_i = 0, row_testing_i = 0; + size_t fi; + + for (iter_labels.GoToBegin(); !iter_labels.IsAtEnd(); ++iter_labels) + { + skipPixel = false; + + // Set the values of the feature vector + for (fi = 0; fi < inputImagesSize; fi++) { + val = input_images[fi]->GetPixel(iter_labels.GetIndex()); + + // Only the first image is used to skip zeros + if ((fi == 0) && (val == 0) && (!considerZeros)) { + skipPixel = true; + break; + } + + features.ptr< TPixelType >(0)[fi] = (isInputInt) ? std::lround(val) : val; + } + + // Populate matrices + if (!skipPixel || considerZeros) { + features.copyTo(res->testingMat.row(row_i++)); + + label = iter_labels.Get(); + if (label != 0) { + // Set training and labels data + + res->labelsMat.ptr< LabelsPixelType >(row_testing_i)[0] = label; + res->differentLabels.insert(label); + + features.copyTo(res->trainingMat.row(row_testing_i++)); + + // For weights calculation + if (weightSums.find(label) == weightSums.end()) { + weightSums[label] = 1; + } + else { + weightSums[label] += 1; + } + allWeightsSum++; + } + } + else { + row_i++; + } + } + + // Extra for weights + res->weightsMat = cv::Mat::zeros(res->differentLabels.size(), 1, CV_32F); + + int wi = 0; + for (LabelsPixelType label : res->differentLabels) + { + if (allWeightsSum != 0) { + res->weightsMat.at< float >(wi++, 0) = static_cast(allWeightsSum - weightSums[label]) / allWeightsSum; + } + } + + res->testingMat.copyTo(res->skipZerosMat); + + return res; + } + + template + std::shared_ptr NormalizedParse(const std::vector< typename itk::Image::Pointer > &input_images, + const typename itk::Image::Pointer &input_labels, bool considerZeros = false) + { + typedef itk::Image InputImageType; + typedef itk::Image LabelsImageType; + + std::shared_ptr res(new Result()); + + // Initialize the labels iterator + itk::ImageRegionIteratorWithIndex iter_labels(input_labels, input_labels->GetRequestedRegion()); + + // Get number of pixels of the images + int numberOfPixels = getNumberOfPixels(input_images[0]); + + // Number of voxels in the labels image that are not zero (and are usable labels) + int labelsNumber = getNumberOfNotZerosFromLabels(input_labels, input_images[0], considerZeros); + + // Initialize result + res->testingMat = cv::Mat::zeros(numberOfPixels, input_images.size(), CV_32F); + res->skipZerosMat = cv::Mat::zeros(numberOfPixels, input_images.size(), CV_32F); + res->trainingMat = cv::Mat::zeros(labelsNumber, input_images.size(), CV_32F); + res->labelsMat = cv::Mat::zeros(labelsNumber, 1, CV_32S); + + // If there is a label for this voxel (use 1,2,... for labels, not zero) + cv::Mat features = cv::Mat::zeros(1, input_images.size(), CV_32F); + std::unordered_map< LabelsPixelType, int > weightSums; + unsigned long allWeightsSum = 0; + + float val; + LabelsPixelType label; + bool isInputInt = (typeid(TPixelType).name() == typeid(int).name()) ? true : false; + int inputImagesSize = input_images.size(); + bool skipPixel; + int row_i = 0, row_testing_i = 0; + size_t fi; + + for (iter_labels.GoToBegin(); !iter_labels.IsAtEnd(); ++iter_labels) + { + skipPixel = false; + + // Set the values of the feature vector + for (fi = 0; fi < inputImagesSize; fi++) { + val = input_images[fi]->GetPixel(iter_labels.GetIndex()); + + // Only the first image is used to skip zeros + if ((fi == 0) && (val == 0) && (!considerZeros)) { + skipPixel = true; + break; + } + + features.ptr< TPixelType >(0)[fi] = (isInputInt) ? std::lround(val) : val; + } + + // Populate matrices + if (!skipPixel || considerZeros) { + features.copyTo(res->testingMat.row(row_i++)); + + label = iter_labels.Get(); + if (label != 0) { + // Set training and labels data + + res->labelsMat.ptr< LabelsPixelType >(row_testing_i)[0] = label; + res->differentLabels.insert(label); + + features.copyTo(res->trainingMat.row(row_testing_i++)); + + // For weights calculation + if (weightSums.find(label) == weightSums.end()) { + weightSums[label] = 1; + } + else { + weightSums[label] += 1; + } + allWeightsSum++; + } + } + else { + row_i++; + } + } + + // Extra for weights + res->weightsMat = cv::Mat::zeros(res->differentLabels.size(), 1, CV_32F); + + int wi = 0; + for (LabelsPixelType label : res->differentLabels) + { + if (allWeightsSum != 0) { + res->weightsMat.at< float >(wi++, 0) = static_cast(allWeightsSum - weightSums[label]) / allWeightsSum; + } + } + + res->testingMat.copyTo(res->skipZerosMat); + + // Normalization + + // Collect only the relevant samples + cv::Mat onlyRelevantSamples; + if (considerZeros) { + res->trainingMat.copyTo(onlyRelevantSamples); + } + else { + for (int row_i = 0; row_i < res->trainingMat.rows; row_i++) + { + if (!(res->trainingMat.at(row_i, 0) == 0)) { + onlyRelevantSamples.push_back(res->trainingMat.row(row_i)); + } + } + } + + // Normalize by subtracting mean and dividing by twice the standard deviation + cv::Scalar meanValue, stdValue; + double mean, std; + + for (int i_col = 0; i_col < onlyRelevantSamples.cols; i_col++) + { + cv::meanStdDev(onlyRelevantSamples.col(i_col), meanValue, stdValue); + + mean = meanValue[0]; + std = (stdValue[0] == 0) ? 0.001 : stdValue[0]; + + res->trainingMat.col(i_col).convertTo(res->trainingMat.col(i_col), CV_32F, 1, -1 * mean); + res->trainingMat.col(i_col).convertTo(res->trainingMat.col(i_col), CV_32F, 1 / (2 * std), 0); + + res->testingMat.col(i_col).convertTo(res->testingMat.col(i_col), CV_32F, 1, -1 * mean); + res->testingMat.col(i_col).convertTo(res->testingMat.col(i_col), CV_32F, 1 / (2 * std), 0); + } + + return res; + } + + void ScaleSomeOfTheColumns(cv::Mat& mat, int colStart, int colEnd, double ratio); + + } + +} + +#endif // !H_CBICA_UTIL_IMAGE_TO_CV_MAT_GTS + diff --git a/Modules/CaPTkInteractiveSegmentation/include/UtilItkGTS.h b/Modules/CaPTkInteractiveSegmentation/include/UtilItkGTS.h new file mode 100644 index 0000000..e010d5c --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/include/UtilItkGTS.h @@ -0,0 +1,655 @@ +#ifndef H_CBICA_ITK_UTIL_GTS +#define H_CBICA_ITK_UTIL_GTS + +#include "itkImage.h" +#include "itkImageFileWriter.h" +#include "itkImageFileReader.h" +#include "itkExceptionObject.h" +#include "itkCastImageFilter.h" +#include "itkRescaleIntensityImageFilter.h" +#include "itkInvertIntensityImageFilter.h" +#include "itkMinimumMaximumImageCalculator.h" +#include "itkNormalizeImageFilter.h" +#include "itkIdentityTransform.h" +#include "itkResampleImageFilter.h" +#include "itkCurvatureAnisotropicDiffusionImageFilter.h" +#include "itkBilateralImageFilter.h" +#include "itkGradientAnisotropicDiffusionImageFilter.h" +#include "itkNearestNeighborInterpolateImageFunction.h" +#include "itkShapedNeighborhoodIterator.h" +#include "itkBinaryBallStructuringElement.h" + +namespace GeodesicTrainingSegmentation +{ + namespace ItkUtilGTS + { + /**Normalizes image values to be inside [0,255]*/ + template + typename UImageType::Pointer normalizeImage(typename UImageType::Pointer image, typename UImageType::PixelType max = 255) + { + auto filter = itk::RescaleIntensityImageFilter< UImageType, UImageType >::New(); + filter->SetInput(image); + filter->SetOutputMinimum(0); + filter->SetOutputMaximum(max); + filter->Update(); + return filter->GetOutput(); + } + + /**Normalizes image values to be inside [0,255]*/ + template + void normalizeImageVector(std::vector& images) + { + for (size_t i = 0; i < images.size(); i++) { + images[i] = normalizeImage(images[i]); + } + } + + /**Normalizes image values to be inside [0,255]*/ + template + void normalizeImageMap(std::map< KeysType, typename UImageType::Pointer>& map) + { + if (map.size() == 0) { return; } + + // Find a list of the different keys + std::vector keys; + keys.reserve(map.size()); + for (auto const& imap : map) { + keys.push_back(imap.first); + } + + // Normalize + for (auto key : keys) { + map[key] = normalizeImage(map[key]); + } + } + + /**Calculates the maximum pixel value of an image*/ + template + typename UImageType::PixelType getImageMaximum(typename UImageType::Pointer image) + { + auto imageCalculatorFilter = itk::MinimumMaximumImageCalculator::New(); + imageCalculatorFilter->SetImage(image); + imageCalculatorFilter->ComputeMaximum(); + return imageCalculatorFilter->GetMaximum(); + } + + /**Changes the type of an image and rescales it to be inside [0,255]*/ + template + typename UImageType::Pointer castAndRescaleImage(typename TImageType::Pointer input) + { + typedef itk::CastImageFilter CastFilterType; + + typename CastFilterType::Pointer castFilter = CastFilterType::New(); + castFilter->SetInput(input); + + auto filter = itk::RescaleIntensityImageFilter< UImageType, UImageType >::New(); + filter->SetInput(castFilter->GetOutput()); + filter->SetOutputMinimum(0); + filter->SetOutputMaximum(255); + filter->Update(); + return filter->GetOutput(); + } + + /**Create a zero image with exactly the same dimensions as another image*/ + template + typename UImageType::Pointer initializeOutputImageBasedOn(typename TImageType::Pointer image) + { + typename UImageType::Pointer res = UImageType::New(); + + res->SetRegions(image->GetLargestPossibleRegion()); + res->SetRequestedRegion(image->GetRequestedRegion()); + res->SetBufferedRegion(image->GetBufferedRegion()); + res->Allocate(); + res->FillBuffer(0); + res->SetDirection(image->GetDirection()); + res->SetOrigin(image->GetOrigin()); + res->SetSpacing(image->GetSpacing()); + + return res; + } + + /**Normalize an image by setting its mean to 0 and variance to 1*/ + template + static typename VImageType::Pointer statisticalImageNormalization(typename UImageType::Pointer image, typename VImageType::PixelType normMax = 255) + { + auto normalizeFilter = itk::NormalizeImageFilter::New(); + normalizeFilter->SetInput(image); + normalizeFilter->Update(); + return normalizeImage(normalizeFilter->GetOutput(), normMax); + } + + /**Normalize an image by setting its mean to 0 and variance to 1*/ + template + static void statisticalImageVectorNormalization(std::vector& images, typename VImageType::PixelType normMax = 255) + { + for (size_t i = 0; i < images.size(); i++) { + images[i] = statisticalImageNormalization(images[i], normMax); + } + } + + /**Normalize an image by setting its mean to 0 and variance to 1*/ + template + static void statisticalImageMapNormalization(std::map< KeysType, typename UImageType::Pointer>& map, typename VImageType::PixelType normMax = 255) + { + if (map.size() == 0) { return; } + + // Find a list of the different keys + std::vector keys; + keys.reserve(map.size()); + for (auto const& imap : map) { + keys.push_back(imap.first); + } + + // Statistical Normalize + for (auto key : keys) { + map[key] = statisticalImageNormalization(map[key], normMax); + } + } + + template + static typename TImageType::Pointer + changeImageSpacing(typename TImageType::Pointer input, + typename TImageType::SpacingType outputSpacing, + bool isLabelsImage = false, + bool forceCertainSize = false, + typename TImageType::SizeType forcedSize = typename TImageType::SizeType(), + bool forceCertainOrigin = false, + typename TImageType::PointType forcedOrigin = typename TImageType::PointType()) + { + typename TImageType::SizeType inputSize = input->GetLargestPossibleRegion().GetSize(); + typename TImageType::SpacingType inputSpacing = input->GetSpacing(); + const unsigned int dimensions = inputSize.GetSizeDimension(); + + forceCertainOrigin = forceCertainOrigin; // Bypass unused parameter warning for now + + bool isAnythingDifferent = false; + for (unsigned int i=0; i < dimensions; i++) { + if (inputSpacing[i] != outputSpacing[i]) { + isAnythingDifferent = true; + break; + } + } + if (!isAnythingDifferent) { return input; } + + // Print input information + std::cout << "\tInput image size: " << inputSize << "\n"; + std::cout << "\tInput image spacing: " << inputSpacing << "\n"; + + // Create the new image + std::cout << "\tOperating...\n"; + typename TImageType::SizeType outputSize; + + for (unsigned int i=0; i < dimensions; i++) { + outputSize[i] = (inputSpacing[i] / outputSpacing[i]) * inputSize[i]; + } + + typedef itk::IdentityTransform TransformType; + + typedef itk::ResampleImageFilter ResampleImageFilterType; + typename ResampleImageFilterType::Pointer resample = ResampleImageFilterType::New(); + resample->SetInput(input); + resample->SetOutputParametersFromImage(input); + resample->SetOutputStartIndex(input->GetLargestPossibleRegion().GetIndex()); + resample->SetOutputSpacing(outputSpacing); + resample->SetOutputDirection(input->GetDirection()); + resample->SetTransform(TransformType::New()); + + if (forceCertainSize) { + resample->SetSize(forcedSize); + } + else { + resample->SetSize(outputSize); + } + + if (forceCertainSize) { + resample->SetOutputOrigin(forcedOrigin); + } + else { + resample->SetOutputOrigin(input->GetOrigin()); + } + + if (isLabelsImage) { + typedef itk::NearestNeighborInterpolateImageFunction Interpolator; + resample->SetInterpolator(Interpolator::New()); + } + + resample->UpdateLargestPossibleRegion(); + + // Print output information + std::cout << "\tOutput image size: " << ((forceCertainSize)? forcedSize : outputSize) << "\n"; + std::cout << "\tOutput image spacing: " << outputSpacing << "\n"; + + return resample->GetOutput(); + } + + /**Change the image spacing to be equal in all directions. Picks the the smallest value + * from the original spacing for everything. Example [0.2,0.5,2] -> [0.2,0.2,0.2]*/ + template + static typename TImageType::Pointer + normalizeImageSpacing(typename TImageType::Pointer input, + bool isLabelsImage = false) + { + typename TImageType::SizeType inputSize = input->GetLargestPossibleRegion().GetSize(); + typename TImageType::SpacingType inputSpacing = input->GetSpacing(); + const unsigned int dimensions = inputSize.GetSizeDimension(); + + // Find the minimum value from input spacing + float minSpacing = inputSpacing[0]; + for (unsigned int i=1; i < dimensions; i++) { + if (inputSpacing[i] < minSpacing) { + minSpacing = inputSpacing[i]; + } + } + + // Set it as the output spacing + typename TImageType::SpacingType outputSpacing; + + for (unsigned int i=0; i < dimensions; i++) { + outputSpacing[i] = minSpacing; + } + + return changeImageSpacing(input, outputSpacing, isLabelsImage); + } + + template + static typename TImageType::Pointer + resizeImage( + typename TImageType::Pointer input, + typename TImageType::SizeType outputSize, + bool isLabelsImage = false) + { + typename TImageType::SizeType inputSize = input->GetLargestPossibleRegion().GetSize(); + const unsigned int dimensions = inputSize.GetSizeDimension(); + + // Print input information + std::cout << "Input image size: " << inputSize << "\n"; + + // Resize + typename TImageType::SpacingType outputSpacing; + + //outputSpacing = input->GetSpacing(); + for (unsigned int i=0; i < dimensions; i++) { + outputSpacing[i] = input->GetSpacing()[i] * + (static_cast(inputSize[i]) / static_cast(outputSize[i])); + } + + + std::cout << "Resizing...\n"; + typedef itk::IdentityTransform TransformType; + + typedef itk::ResampleImageFilter ResampleImageFilterType; + typename ResampleImageFilterType::Pointer resample = ResampleImageFilterType::New(); + resample->SetInput(input); + resample->SetOutputParametersFromImage(input); + resample->SetSize(outputSize); + resample->SetOutputSpacing(outputSpacing); + resample->SetOutputOrigin(input->GetOrigin()); + resample->SetOutputStartIndex(input->GetLargestPossibleRegion().GetIndex()); + resample->SetOutputDirection(input->GetDirection()); + resample->SetTransform(TransformType::New()); + + if (isLabelsImage) { + typedef itk::NearestNeighborInterpolateImageFunction Interpolator; + resample->SetInterpolator(Interpolator::New()); + } + + resample->UpdateLargestPossibleRegion(); + return resample->GetOutput(); + } + + /**Resize an image to have about requestedNumberOfPixels pixels*/ + template + static typename TImageType::Pointer + resizeImageMaximumPixelNumber( + typename TImageType::Pointer input, + unsigned int requestedNumberOfPixels, + bool isLabelsImage = false) + { + typename TImageType::SizeType inputSize = input->GetLargestPossibleRegion().GetSize(); + const unsigned int dimensions = inputSize.GetSizeDimension(); + + unsigned int inputPixelTotal = 1; + for (unsigned int i=0; i < dimensions; i++) { + inputPixelTotal *= inputSize[i]; + } + + if (inputPixelTotal <= requestedNumberOfPixels) { return input; } + + // Print input information + std::cout << "\tInput image size: " << inputSize << "\n"; + std::cout << "\tInput total # of pixels: " << inputPixelTotal << "\n"; + std::cout << "\tRequested # of pixels: " << requestedNumberOfPixels << "\n---\n"; + + double ratio = 1.0 * requestedNumberOfPixels / inputPixelTotal; + double ratioPerDimension = std::pow(ratio, 1.0 / dimensions); + + typename TImageType::SizeType outputSize; + + for (unsigned int i=0; i < dimensions; i++) { + outputSize[i] = std::lround(inputSize[i] * ratioPerDimension); + if (outputSize[i] == 0) { outputSize[i] = 1; } + } + + unsigned int outputPixelTotal = 1; + for (unsigned int i=0; i < dimensions; i++) { + outputPixelTotal *= outputSize[i]; + } + std::cout << "\tOutput image size: " << outputSize << "\n"; + std::cout << "\tOutput total # of pixels: " << outputPixelTotal << "\n---\n"; + std::cout << "\tProposed ratio per dimensions: " << ratioPerDimension << "\n---\n"; + + // Resize + return resizeImage(input, outputSize, isLabelsImage); + } + + template + static typename UImageType::Pointer + curvatureAnisotropicDiffusionImageFilter(typename TImageType::Pointer input, + bool useImageSpacing = true, + bool rescaleAtTheEnd = false, + float rescaleMax = 255) + { + const unsigned int numberOfIterations = 5; + const double timeStep = ((TImageType::ImageDimension == 2) ? 0.12 : 0.05); + const double conductance = 3; + + using FilterType = itk::CurvatureAnisotropicDiffusionImageFilter< TImageType, UImageType >; + typename FilterType::Pointer filter = FilterType::New(); + filter->SetInput( input ); + filter->SetNumberOfIterations( numberOfIterations ); + filter->SetTimeStep( timeStep ); + filter->SetConductanceParameter( conductance ); + + if (useImageSpacing) + { + filter->UseImageSpacingOn(); + } + + filter->Update(); + + auto output = filter->GetOutput(); + + if (rescaleAtTheEnd) + { + using RescaleFilterType = itk::RescaleIntensityImageFilter; + typename RescaleFilterType::Pointer rescaler = RescaleFilterType::New(); + rescaler->SetOutputMinimum( 0 ); + rescaler->SetOutputMaximum( rescaleMax ); + rescaler->SetInput( output ); + rescaler->Update(); + output = rescaler->GetOutput(); + } + + return output; + } + + template + static typename UImageType::Pointer + gradientAnisotropicDiffusionImageFilter(typename TImageType::Pointer input, + bool rescaleAtTheEnd = false, + float rescaleMax = 255) + { + const unsigned int numberOfIterations = 15; + const double timeStep = ((TImageType::ImageDimension == 2) ? 0.12 : 0.05); + const double conductance = 3; + + using FilterType = itk::GradientAnisotropicDiffusionImageFilter< TImageType, UImageType >; + typename FilterType::Pointer filter = FilterType::New(); + filter->SetInput( input ); + filter->SetNumberOfIterations( numberOfIterations ); + filter->SetTimeStep( timeStep ); + filter->SetConductanceParameter( conductance ); + + filter->Update(); + + typename UImageType::Pointer output = filter->GetOutput(); + + if (rescaleAtTheEnd) + { + using RescaleFilterType = itk::RescaleIntensityImageFilter; + typename RescaleFilterType::Pointer rescaler = RescaleFilterType::New(); + rescaler->SetOutputMinimum( 0 ); + rescaler->SetOutputMaximum( rescaleMax ); + rescaler->SetInput( output ); + rescaler->Update(); + output = rescaler->GetOutput(); + } + + return output; + } + + template + static typename UImageType::Pointer + BilateralImageFilter(typename TImageType::Pointer input, + bool rescaleAtTheEnd = false, float rescaleMax = 255) + { + using FilterType = itk::BilateralImageFilter< TImageType, UImageType >; + typename FilterType::Pointer filter = FilterType::New(); + filter->SetInput( input ); + filter->SetDomainSigma( 3.0 ); + filter->SetRangeSigma( 30.0 ); + + filter->Update(); + + typename UImageType::Pointer output = filter->GetOutput(); + + if (rescaleAtTheEnd) + { + std::cout << "Rescaling...\n"; + using RescaleFilterType = itk::RescaleIntensityImageFilter; + typename RescaleFilterType::Pointer rescaler = RescaleFilterType::New(); + rescaler->SetOutputMinimum( 0 ); + rescaler->SetOutputMaximum( rescaleMax ); + rescaler->SetInput( output ); + rescaler->Update(); + output = rescaler->GetOutput(); + } + + return output; + } + + + /** Increase the radius of points in a labels image. + * Obviously useful for hand drawings, rather than an image where labels cover everything. + * Think of it like increasing the "marker size" after drawing. + @param inputLabels the input labels image + @param boldRadius an array with size equal to the image dimensions. It has the desired bolding. + @return the bolded labels image + */ + template + static typename TImageType::Pointer + boldLabelsImage(const typename TImageType::Pointer& inputLabels, + const int (&boldRadius)[TImageType::ImageDimension]) + { + typedef itk::ImageRegionIterator Iterator; + typedef itk::ShapedNeighborhoodIterator NIterator; + typedef itk::BinaryBallStructuringElement StructuringElementType; + + typename StructuringElementType::RadiusType elementRadius; + for (unsigned int i = 0; i < TImageType::ImageDimension; i++) { + elementRadius[i] = boldRadius[i]; + } + + // Structuring element is used to give shape to the neighborhood iterator (here a ellipsoid) + StructuringElementType structuringElement; + structuringElement.SetRadius(elementRadius); + structuringElement.CreateStructuringElement(); + + // Initialize the output image + typename TImageType::Pointer outputLabels = initializeOutputImageBasedOn(inputLabels); + + // Neighborhood iterators for input and output + // NIterator snIterInput(structuringElement.GetRadius(), inputLabels, inputLabels->GetRequestedRegion()); + // snIterInput.CreateActiveListFromNeighborhood(structuringElement); + // snIterInput.NeedToUseBoundaryConditionOff(); + Iterator iterInput(inputLabels, inputLabels->GetRequestedRegion()); + + NIterator snIterOutput(structuringElement.GetRadius(), outputLabels, outputLabels->GetRequestedRegion()); + snIterOutput.CreateActiveListFromNeighborhood(structuringElement); + snIterOutput.NeedToUseBoundaryConditionOff(); + + // Helper variables for the loop + typename TImageType::PixelType val; + // bool statusIgnore; + // typename NIterator::Iterator nIterInput; + typename NIterator::Iterator nIterOutput; + + // Iterate throughout the input/output images + for (/*snIterInput*/iterInput.GoToBegin(), snIterOutput.GoToBegin(); + !iterInput/*snIterInput*/.IsAtEnd(); + ++iterInput/*snIterInput*/, ++snIterOutput) + { + val = iterInput/*nIterInput*/.Get(); + if (val == 0) { continue; } // 0 means no label + + // Iterate through the neighborhood + for (nIterOutput = snIterOutput.Begin(); !nIterOutput.IsAtEnd(); ++nIterOutput) + { + nIterOutput.Set(val); + } + } + + return outputLabels; + } + + template + static typename TImageType::Pointer + resampleNormalImage(const typename TImageType::Pointer& image, + const typename TImageType::SizeType& desiredSize, + const typename TImageType::SpacingType& desiredSpacing) + { + // typename TImageType::SizeType inputSize = image->GetLargestPossibleRegion().GetSize(); + // typename TImageType::SpacingType inputSpacing = image->GetSpacing(); + + using TransformType = itk::IdentityTransform; + using ResampleFilterType = itk::ResampleImageFilter; + + // Instantiate the transform and specify it should be the identity transform. + typename TransformType::Pointer transform = TransformType::New(); + transform->SetIdentity(); + + // Initiate the resampler + typename ResampleFilterType::Pointer resampleFilter = ResampleFilterType::New(); + resampleFilter->SetTransform(transform); + resampleFilter->SetDefaultPixelValue(0); + resampleFilter->SetOutputParametersFromImage(image); + resampleFilter->SetOutputStartIndex(image->GetLargestPossibleRegion().GetIndex()); + resampleFilter->SetOutputOrigin(image->GetOrigin()); + resampleFilter->SetOutputDirection(image->GetDirection()); + + resampleFilter->SetSize(desiredSize); + resampleFilter->SetOutputSpacing(desiredSpacing); + + // Execute the filter + resampleFilter->SetInput(image); + resampleFilter->UpdateLargestPossibleRegion(); + + return resampleFilter->GetOutput(); + } + + template + static typename TImageType::Pointer + resampleLabelsImage(const typename TImageType::Pointer& inputLabels, + const typename TImageType::SizeType& desiredSize, + const typename TImageType::SpacingType& desiredSpacing, + bool verbose = false) + { + typename TImageType::SizeType inputSize = inputLabels->GetLargestPossibleRegion().GetSize(); + // typename TImageType::SpacingType inputSpacing = inputLabels->GetSpacing(); + + using TransformType = itk::IdentityTransform; + // using GaussianInterpolatorType = + // itk::LabelImageGaussianInterpolateImageFunction; + using NearestNeighborInterpolatorType = + itk::NearestNeighborInterpolateImageFunction; + using ResampleFilterType = itk::ResampleImageFilter; + + // Instantiate the transform and specify it should be the identity transform. + typename TransformType::Pointer transform = TransformType::New(); + transform->SetIdentity(); + + // Initiate the interpolator + typename NearestNeighborInterpolatorType::Pointer nearestNeighborInterpolator = + NearestNeighborInterpolatorType::New(); + + // Initiate the resampler + typename ResampleFilterType::Pointer resampleFilter = ResampleFilterType::New(); + resampleFilter->SetTransform(transform); + resampleFilter->SetInterpolator(nearestNeighborInterpolator); + resampleFilter->SetDefaultPixelValue(0); + resampleFilter->SetOutputParametersFromImage(inputLabels); + resampleFilter->SetOutputStartIndex(inputLabels->GetLargestPossibleRegion().GetIndex()); + resampleFilter->SetOutputOrigin(inputLabels->GetOrigin()); + resampleFilter->SetOutputDirection(inputLabels->GetDirection()); + + resampleFilter->SetSize(desiredSize); + resampleFilter->SetOutputSpacing(desiredSpacing); + + // The input labels might need to be bolded before rescaling + // Find the bold radius for each dimension + int boldRadius[TImageType::ImageDimension]; + bool shouldBold = false; + if (verbose) { std::cout << "\tboldRadius: ["; } + for (unsigned int i=0; i= 1) { shouldBold = true; } + if (boldRadius[i] < 1) { boldRadius[i] = 0; } + if (verbose) { std::cout << boldRadius[i] << ((i+1==TImageType::ImageDimension)?"":", "); } + } + if (verbose) { std::cout << "]\n"; } + + // Bolding to more that 1 is probably pointless, though. + // It's better to keep the above code though, as it might be useful in the future. + // So this is a hack to use a maximum of 1 if there is no zeros, or 2 otherwise + { + if (shouldBold) + { + // Find if there are zeros + bool areThereZeros = false; + bool areThereValuesBiggerThanOne = false; + for (unsigned int i=0; i 1) { + areThereValuesBiggerThanOne = true; + } + } + + int maxRadius = ((areThereValuesBiggerThanOne) ? 2 : 1); + maxRadius = ((areThereZeros) ? maxRadius : 1); + + if (verbose) { std::cout << "\tActually used boldRadius: ["; } + for (unsigned int i=0; i 1) { + boldRadius[i] = maxRadius; + } + if (verbose) { std::cout << boldRadius[i] << ((i+1==TImageType::ImageDimension)?"":", "); } + } + if (verbose) { std::cout << "]\n"; } + } + } + + // Bold the labels image if necessary + if (shouldBold) + { + if (verbose) { std::cout << "\tBolding...\n"; } + resampleFilter->SetInput(boldLabelsImage(inputLabels, boldRadius)); + } + else { + resampleFilter->SetInput(inputLabels); + } + + // Execute the filter + std::cout << "\tUpdating Nearest Neighbor Interpolator Rescaling...\n"; + resampleFilter->UpdateLargestPossibleRegion(); + + return resampleFilter->GetOutput(); + } + + } +} + +#endif // !H_CBICA_ITK_UTIL_GTS \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/resource/cbica-logo.jpg b/Modules/CaPTkInteractiveSegmentation/resource/cbica-logo.jpg new file mode 100644 index 0000000..f45f5e7 Binary files /dev/null and b/Modules/CaPTkInteractiveSegmentation/resource/cbica-logo.jpg differ diff --git a/Modules/CaPTkInteractiveSegmentation/resource/mll_icon2.svg b/Modules/CaPTkInteractiveSegmentation/resource/mll_icon2.svg new file mode 100644 index 0000000..c57f68e --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/resource/mll_icon2.svg @@ -0,0 +1,79 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + + diff --git a/Modules/CaPTkInteractiveSegmentation/src/AdaptiveGeodesicDistance.cpp b/Modules/CaPTkInteractiveSegmentation/src/AdaptiveGeodesicDistance.cpp new file mode 100644 index 0000000..4acee8c --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/AdaptiveGeodesicDistance.cpp @@ -0,0 +1,3 @@ +#include "AdaptiveGeodesicDistance.h" + +// No implementation for templated class \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/CaPTkInteractiveSegmentation.cpp b/Modules/CaPTkInteractiveSegmentation/src/CaPTkInteractiveSegmentation.cpp new file mode 100644 index 0000000..ec39aee --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/CaPTkInteractiveSegmentation.cpp @@ -0,0 +1,575 @@ +#include "CaPTkInteractiveSegmentation.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "itkExtractImageFilter.h" + +#include +#include + +#include +#include + +CaPTkInteractiveSegmentation::CaPTkInteractiveSegmentation( + mitk::DataStorage::Pointer dataStorage, + QObject *parent) + : QObject(parent) +{ + m_DataStorage = dataStorage; + + connect(&m_Watcher, SIGNAL(finished()), this, SLOT(OnAlgorithmFinished())); +} + +void CaPTkInteractiveSegmentation::Run(std::vector &images, + mitk::LabelSetImage::Pointer &seeds) +{ + std::cout << "[CaPTkInteractiveSegmentation::Run] " + << "Number of images: " << std::to_string(images.size()) << "\n"; + + /* ---- Check if it's already running ---- */ + + if (m_IsRunning) + { + QMessageBox msgError; + msgError.setText( + "The algorithm is already running!\nPlease wait for it to finish." + ); + msgError.setIcon(QMessageBox::Critical); + msgError.setWindowTitle("Please wait"); + msgError.exec(); + return; + } + m_IsRunning = true; + + /* ---- Check requirements ---- */ + + bool ok = true; // Becomes false if there is an issue + std::string problemStr = ""; // Populated if there is an issue + + // Check if there is at least one image + if (images.size() == 0) + { + ok = false; + problemStr = std::string("No input images. At least one image") + + std::string(" should be loaded ") + + std::string(" in the data manager."); + } + + // Check if the seeds image exists + if (ok && seeds == nullptr) + { + ok = false; + problemStr = "Please create a seeds image"; + } + + // Check if all the input images and the seeds have the same dimensionality + if (ok) + { + auto refDim = images[0]->GetDimension(); + + // Check if the other images are on par with the ref + for (size_t i = 1; i < images.size(); i++) + { + auto dim = images[i]->GetDimension(); + + if (dim != refDim) + { + ok = false; + } + + if (!ok) + { + problemStr = "All the images should have the same dimension."; + break; + } + } + + // Check if the seeds are on par with the ref + if (ok) + { + if (refDim == 3 && refDim != seeds->GetDimension()) + { + ok = false; + problemStr = "The seeds should have the same dimension as the images."; + } + } + } + + // Check if the images and the seeds have the same size, spacing, origin, direction + if (ok) + { + auto ref = images[0]; + + // Check if the other images are on par with the ref + for (size_t i = 1; i < images.size(); i++) + { + auto im = images[i]; + + // TODO + + if (!ok) + { + problemStr = std::string("All the images should have the same ") + + std::string("size, spacing, origin, direction."); + break; + } + } + + // Check if the seeds are on par with the ref + if (ok) + { + // TODO + + if (!ok) + { + problemStr = std::string("The seeds should have the same size, ") + + std::string("spacing, origin, direction as the images."); + } + } + } + + // Check if there are at least two labels present in the seeds + if (ok) + { + bool foundOne = false; + bool foundTwo = false; + // Iterate through the labels in the seeds + // TODO: ! + foundTwo = foundOne = true; // TODO: Delete this + + if (!foundTwo) + { + ok = false; + problemStr = std::string("Please draw with at least two labels.") + + std::string(" One has to always be for the background tissue."); + } + } + + // Return if there is an issue + if (!ok) + { + QMessageBox msgError; + msgError.setText(problemStr.c_str()); + msgError.setIcon(QMessageBox::Critical); + msgError.setWindowTitle("Incorrect state."); + msgError.exec(); + m_IsRunning = false; + return; + } + + /* ---- Run ---- */ + + m_FutureResult = QtConcurrent::run(this, &CaPTkInteractiveSegmentation::RunThread, + images, seeds); + m_Watcher.setFuture(m_FutureResult); +} + +// void CaPTkInteractiveSegmentation::Run(Json::Value& task_json, Json::Value& cohort_json) +// { +// /* ---- Parse the task ---- */ + +// std::string task_name = task_json.get("task_name", "UTF-8" ).asString(); +// std::string application = task_json.get("application", "UTF-8" ).asString(); +// std::string task_type = task_json.get("task_type", "UTF-8" ).asString(); + +// std::string results_dir = task_json.get("results_dir", "UTF-8" ).asString(); + +// /* ---- Run for each subject of the cohort ---- */ + +// std::string cohort_name = cohort_json.get("cohort_name", "UTF-8" ).asString(); + +// for (auto& subj : cohort_json["subjects"]) +// { +// auto name = subj["name"]; +// std::cout << name << std::endl; +// // std::vector images; +// // mitk::LabelSetImage::Pointer seeds; + +// // Find all the images +// for (auto& image : subj["images"]) +// { +// auto modality = image["modality"]; +// std::cout << modality << std::endl; +// } +// } + +// std::cout << "cohort name: " << cohort_name << std::endl; +// } + +// void CaPTkInteractiveSegmentation::Run(std::string task_json_path, std::string cohort_json_path) +// { +// try +// { +// Json::Value taskRoot, cohortRoot; + +// // Read the two JSON from file +// std::ifstream taskStream(task_json_path, std::ifstream::binary); +// taskStream >> taskRoot; +// std::ifstream cohortStream(cohort_json_path, std::ifstream::binary); +// cohortStream >> cohortRoot; + +// this->Run(taskRoot, cohortRoot); +// } +// catch (const std::exception &e) +// { +// MITK_ERROR << e.what(); +// } +// catch (...) +// { +// MITK_ERROR << "Unexpected error!"; +// } +// } + +void CaPTkInteractiveSegmentation::SetProgressBar(QProgressBar* progressBar) +{ + m_ProgressBar = progressBar; + // if (m_ProgressBar) { m_ProgressBar->setValue(42); } +} + +void CaPTkInteractiveSegmentation::OnAlgorithmFinished() +{ + std::cout << "[CaPTkInteractiveSegmentation::OnAlgorithmFinished]\n"; + + mitk::DataNode::Pointer node; + + if (m_FutureResult.result().ok) + { + /* ---- Make seeds invisible ---- */ + mitk::DataStorage::SetOfObjects::ConstPointer all = + m_DataStorage->GetAll(); + for (mitk::DataStorage::SetOfObjects::ConstIterator it = all->Begin(); + it != all->End(); ++it) + { + if (it->Value().IsNotNull()) + { + std::string name = it->Value()->GetName(); + if (name.rfind("Seeds", 0) == 0) // Starts with + { + it->Value()->SetVisibility(false); + } + else if (name.rfind("Segmentation", 0) == 0) // Starts with + { + it->Value()->SetVisibility(false); + } + } + } + + /* ---- Add segmentation ---- */ + node = mitk::DataNode::New(); + node->SetData(m_FutureResult.result().segmentation); + node->SetName(FindNextAvailableSegmentationName()); + node->SetBoolProperty("captk.interactive.segmentation.output", true); + m_DataStorage->Add(node); + node->SetVisibility(true); + } + else + { + // Something went wrong + QMessageBox msgError; + msgError.setText(m_FutureResult.result().errorMessage.c_str()); + msgError.setIcon(QMessageBox::Critical); + msgError.setWindowTitle("CaPTk Interactive Segmentation Error!"); + msgError.exec(); + } + + m_FutureResult = QFuture(); // Don't keep the results indefinitely + + // Change the layer of every image to 1 + auto predicateIsImage = // Predicate to find if node is mitk::Image + mitk::TNodePredicateDataType::New(); + auto predicatePropertyIsHelper = // Predicate property to find if node is a helper object + mitk::NodePredicateProperty::New("helper object"); + auto predicateFinal = mitk::NodePredicateAnd::New(); + predicateFinal->AddPredicate(predicateIsImage); + predicateFinal->AddPredicate( + mitk::NodePredicateNot::New(predicatePropertyIsHelper)); + mitk::DataStorage::SetOfObjects::ConstPointer all = + m_DataStorage->GetSubset(predicateFinal); + for (mitk::DataStorage::SetOfObjects::ConstIterator it = all->Begin(); + it != all->End(); + ++it) + { + if (it->Value().IsNotNull()) + { + it->Value()->SetProperty("layer", mitk::IntProperty::New(1)); + } + } + + mitk::RenderingManager::GetInstance()->RequestUpdateAll(); + mitk::RenderingManager::GetInstance()->ForceImmediateUpdateAll(); + + node->SetProperty("layer", mitk::IntProperty::New(10)); + mitk::RenderingManager::GetInstance()->RequestUpdateAll(); + + m_IsRunning = false; +} + +CaPTkInteractiveSegmentation::Result +CaPTkInteractiveSegmentation::RunThread(std::vector &images, + mitk::LabelSetImage::Pointer &seeds) +{ + std::cout << "[CaPTkInteractiveSegmentation::RunThread]\n"; + + CaPTkInteractiveSegmentation::Result runResult; + runResult.seeds = seeds; + + mitk::LabelSetImage::Pointer segm = mitk::LabelSetImage::New(); + + if (images[0]->GetDimension() == 3) + { + // [ 3D ] + + /* ---- Convert images from mitk to itk ---- */ + + std::vector::Pointer> imagesItk; + for (auto &image : images) + { + typename itk::Image::Pointer imageItk; + mitk::CastToItkImage(image, imageItk); + imagesItk.push_back(imageItk); + } + + /* ---- Convert seeds from mitk to itk ---- */ + + typedef itk::Image LabelsImageType3D; + typename LabelsImageType3D::Pointer seedsItk; + mitk::CastToItkImage(seeds, seedsItk); + + /* ---- Run algorithm ---- */ + + CaPTkInteractiveSegmentationAdapter<3> *algorithm = + new CaPTkInteractiveSegmentationAdapter<3>(); + if (m_ProgressBar) + { + std::cout << "[CaPTkInteractiveSegmentation::RunThread] " + << "Connecting Progress Bar\n"; + connect(algorithm, SIGNAL(ProgressUpdate(int)), + m_ProgressBar, SLOT(setValue(int))); + } + algorithm->SetInputImages(imagesItk); + algorithm->SetLabels(seedsItk); + auto result = algorithm->Execute(); + + /* ---- Parse result ---- */ + + if (result->ok) + { + mitk::Image::Pointer segmNormal; + mitk::CastToMitkImage(result->labelsImage, segmNormal); + segm->InitializeByLabeledImage(segmNormal); + runResult.segmentation = segm; + } + + delete algorithm; + runResult.ok = result->ok; + runResult.errorMessage = result->errorMessage; + } + else + { + // [ 2D ] + + // QMessageBox msgError; + // msgError.setText("Please use a 3D image"); + // msgError.setIcon(QMessageBox::Critical); + // msgError.setWindowTitle("2D is not supported yet"); + // msgError.exec(); + + /* ---- Convert images from mitk to itk ---- */ + + std::cout << "Transforming images...\n"; + std::vector::Pointer> imagesItk; + for (auto &image : images) + { + typename itk::Image::Pointer imageItk; + try + { + mitk::CastToItkImage(image, imageItk); + } + catch(const std::exception& e) + { + // Image type is not supported (probably a png or something) + std::cerr << e.what() << '\n'; + runResult.ok = false; + runResult.errorMessage = "Image type is not supported"; + return runResult; + } + + imagesItk.push_back(imageItk); + } + std::cout << "Transforming images finished.\n"; + + /* ---- Convert seeds from mitk to itk ---- */ + + typename itk::Image::Pointer seedsItk; + // (mitk::LabelSetImage is always 3D) + { + typedef itk::Image LabelsImageType2D; + typedef itk::Image LabelsImageType3D; + typename LabelsImageType3D::Pointer seedsItk3D; + mitk::CastToItkImage(seeds, seedsItk3D); + auto regionSize = seedsItk3D->GetLargestPossibleRegion().GetSize(); + regionSize[2] = 0; // Only 2D image is needed + LabelsImageType3D::IndexType regionIndex; + regionIndex.Fill(0); + LabelsImageType3D::RegionType desiredRegion(regionIndex, regionSize); + auto extractor = + itk::ExtractImageFilter< LabelsImageType3D, LabelsImageType2D >::New(); + extractor->SetExtractionRegion(desiredRegion); + extractor->SetInput(seedsItk3D); + extractor->SetDirectionCollapseToIdentity(); + extractor->Update(); + seedsItk = extractor->GetOutput(); + seedsItk->DisconnectPipeline(); + } + std::cout << "Transformed seeds.\n"; + + /* ---- Run algorithm ---- */ + + CaPTkInteractiveSegmentationAdapter<2> *algorithm = + new CaPTkInteractiveSegmentationAdapter<2>(); + if (m_ProgressBar) + { + std::cout << "[CaPTkInteractiveSegmentation::RunThread] " + << "Connecting Progress Bar\n"; + connect(algorithm, SIGNAL(ProgressUpdate(int)), + m_ProgressBar, SLOT(setValue(int))); + } + algorithm->SetInputImages(imagesItk); + algorithm->SetLabels(seedsItk); + auto result = algorithm->Execute(); + + /* ---- Parse result ---- */ + + if (result->ok) + { + mitk::Image::Pointer segmNormal; + + // Convert to 3D + mitk::CastToMitkImage(result->labelsImage, segmNormal); + mitk::Convert2Dto3DImageFilter::Pointer filter = + mitk::Convert2Dto3DImageFilter::New(); + filter->SetInput(segmNormal); + filter->Update(); + segmNormal = filter->GetOutput(); + + segm->InitializeByLabeledImage(segmNormal); + runResult.segmentation = segm; + } + + delete algorithm; + runResult.ok = result->ok; + runResult.errorMessage = result->errorMessage; + } + + // Copy the labels from seeds image (same for 2D and 3D) + { + mitk::LabelSet::Pointer referenceLabelSet = seeds->GetActiveLabelSet(); + mitk::LabelSet::Pointer outputLabelSet = segm->GetActiveLabelSet(); + + mitk::LabelSet::LabelContainerConstIteratorType itR; + mitk::LabelSet::LabelContainerConstIteratorType it; + + for (itR = referenceLabelSet->IteratorConstBegin(); + itR != referenceLabelSet->IteratorConstEnd(); + ++itR) + { + for (it = outputLabelSet->IteratorConstBegin(); + it != outputLabelSet->IteratorConstEnd(); + ++it) + { + if (itR->second->GetValue() == it->second->GetValue()) + { + it->second->SetColor(itR->second->GetColor()); + it->second->SetName(itR->second->GetName()); + + segm->GetActiveLabelSet()->UpdateLookupTable(it->second->GetValue()); // Update it + } + } + } + } + + return runResult; +} + +std::string CaPTkInteractiveSegmentation::FindNextAvailableSegmentationName() +{ + // Predicate to find if node is mitk::LabelSetImage + auto predicateIsLabelSetImage = + mitk::TNodePredicateDataType::New(); + + // Predicate property to find if node is a helper object + auto predicatePropertyIsHelper = + mitk::NodePredicateProperty::New("helper object"); + + // The images we want are but mitk::LabelSetImage and not helper obj + auto predicateFinal = mitk::NodePredicateAnd::New(); + predicateFinal->AddPredicate(predicateIsLabelSetImage); + predicateFinal->AddPredicate( + mitk::NodePredicateNot::New(predicatePropertyIsHelper) + ); + + int lastFound = 0; + + // Get those images + mitk::DataStorage::SetOfObjects::ConstPointer all = + m_DataStorage->GetSubset(predicateFinal); + for (mitk::DataStorage::SetOfObjects::ConstIterator it = all->Begin(); + it != all->End(); ++it) + { + if (it->Value().IsNotNull()) + { + std::string name = it->Value()->GetName(); + if (name.rfind("Segmentation", 0) == 0) // Starts with + { + if (name.length() == std::string("Segmentation").length()) + { + // Special case + if (lastFound < 1) + { + lastFound = 1; + } + } + else + { + if (name.rfind("Segmentation-", 0) == 0) // Starts with + { + std::string numStr = name.erase( + 0, std::string("Segmentation-").length() + ); + if (IsNumber(numStr)) + { + int num = std::stoi(numStr); + if (lastFound < num) + { + lastFound = num; + } + } + } + } + } + } + } + + // Construct and return the correct string + if (lastFound == 0) + { + return "Segmentation"; + } + else + { + return std::string("Segmentation-") + std::to_string(lastFound + 1); + } +} + +bool CaPTkInteractiveSegmentation::IsNumber(const std::string &s) +{ + return !s.empty() && std::find_if(s.begin(), + s.end(), [](char c) { return !std::isdigit(c); }) == s.end(); +} \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/CaPTkInteractiveSegmentationAdapter.cpp b/Modules/CaPTkInteractiveSegmentation/src/CaPTkInteractiveSegmentationAdapter.cpp new file mode 100644 index 0000000..db27100 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/CaPTkInteractiveSegmentationAdapter.cpp @@ -0,0 +1,3 @@ +#include "CaPTkInteractiveSegmentationAdapter.h" + +//templated class \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/CaPTkInteractiveSegmentationQtPart.cpp b/Modules/CaPTkInteractiveSegmentation/src/CaPTkInteractiveSegmentationQtPart.cpp new file mode 100644 index 0000000..98a9fde --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/CaPTkInteractiveSegmentationQtPart.cpp @@ -0,0 +1,2 @@ +#include "CaPTkInteractiveSegmentationQtPart.h" + diff --git a/Modules/CaPTkInteractiveSegmentation/src/ConfigParserRF.cpp b/Modules/CaPTkInteractiveSegmentation/src/ConfigParserRF.cpp new file mode 100644 index 0000000..9a9a07d --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/ConfigParserRF.cpp @@ -0,0 +1,55 @@ +#include "ConfigParserRF.h" + +void ConfigParserRF::Parse(std::string filePath, double & trainingSamplePercentage, int & maxDepth, double & minSampleCountPercentage, int & maxCategories, int & activeVarCount, int & numberOfTrees, cv::Mat &priors) +{ + std::ifstream infile(filePath); + + infile >> trainingSamplePercentage >> maxDepth >> minSampleCountPercentage >> maxCategories >> activeVarCount >> numberOfTrees; + + float val; + while (infile >> val) { + if (val == 0) { + break; // No priors will be used + } + priors.push_back(val); + } +} + +void ConfigParserRF::PrintParseResult(double trainingSamplePercentage, int maxDepth, double minSampleCountPercentage, int maxCategories, int activeVarCount, int numberOfTrees, cv::Mat & priors) +{ + std::cout << "\nRF CONFIG:"; + std::cout << "\n\tTRAINING SAMPLE %: " << trainingSamplePercentage; + std::cout << "\n\tMAX DEPTH: " << maxDepth; + std::cout << "\n\tMIN SAMPLE COUNT %: " << minSampleCountPercentage; + std::cout << "\n\tMAX CATEGORIES: " << maxCategories; + std::cout << "\n\tACTIVE VAR COUNT: " << activeVarCount; + std::cout << "\n\tNUMBER OF TREES: " << numberOfTrees; + std::cout << "\n\tPRIORS:"; + + for (int i = 0; i < priors.rows; i++) + { + std::cout << " " << priors.ptr(i)[0]; + } + std::cout << "\n\n"; +} + +void ConfigParserRF::PrintParseResultToFile(std::string filePath, double trainingSamplePercentage, int maxDepth, double minSampleCountPercentage, int maxCategories, int activeVarCount, int numberOfTrees, cv::Mat & priors) +{ + std::ofstream rfReportFile; + rfReportFile.open(filePath, std::ios_base::app); //append file + + rfReportFile << "\nRF CONFIG:"; + rfReportFile << "\n\tTRAINING SAMPLE %: " << trainingSamplePercentage; + rfReportFile << "\n\tMAX DEPTH: " << maxDepth; + rfReportFile << "\n\tMIN SAMPLE COUNT %: " << minSampleCountPercentage; + rfReportFile << "\n\tMAX CATEGORIES: " << maxCategories; + rfReportFile << "\n\tACTIVE VAR COUNT: " << activeVarCount; + rfReportFile << "\n\tNUMBER OF TREES: " << numberOfTrees; + rfReportFile << "\n\tPRIORS:"; + + for (int i = 0; i < priors.rows; i++) + { + rfReportFile << " " << priors.ptr(i)[0]; + } + rfReportFile << "\n\n"; +} \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/ConvertionsOpenCV.cpp b/Modules/CaPTkInteractiveSegmentation/src/ConvertionsOpenCV.cpp new file mode 100644 index 0000000..0659297 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/ConvertionsOpenCV.cpp @@ -0,0 +1,160 @@ +#include "ConvertionsOpenCV.h" + +cv::ml::SVM::Types SvmSuiteConvertions::TypeFromString(std::string typeString) { + if (typeString == "C_SVC") { + return cv::ml::SVM::Types::C_SVC; + } + else if (typeString == "NU_SVC") { + return cv::ml::SVM::Types::NU_SVC; + } + else if (typeString == "ONE_CLASS") { + return cv::ml::SVM::Types::ONE_CLASS; + } + else if (typeString == "EPS_SVR") { + return cv::ml::SVM::Types::EPS_SVR; + } + else if (typeString == "NU_SVR") { + return cv::ml::SVM::Types::NU_SVR; + } + else { + //invalid + return cv::ml::SVM::Types::C_SVC; + } +} + +std::string SvmSuiteConvertions::StringFromType(cv::ml::SVM::Types type) { + switch (type) { + case cv::ml::SVM::Types::C_SVC: + return "C_SVC"; + case cv::ml::SVM::Types::NU_SVC: + return "NU_SVC"; + case cv::ml::SVM::Types::ONE_CLASS: + return "ONE_CLASS"; + case cv::ml::SVM::Types::EPS_SVR: + return "EPS_SVR"; + case cv::ml::SVM::Types::NU_SVR: + return "NU_SVR"; + default: + return "invalid"; + } +} + +cv::ml::SVM::KernelTypes SvmSuiteConvertions::KernelTypeFromString(std::string kernelTypeString) { + if (kernelTypeString == "linear") { + return cv::ml::SVM::KernelTypes::LINEAR; + } + else if (kernelTypeString == "rbf") { + return cv::ml::SVM::KernelTypes::RBF; + } + else if (kernelTypeString == "poly") { + return cv::ml::SVM::KernelTypes::POLY; + } + else if (kernelTypeString == "sigmoid") { + return cv::ml::SVM::KernelTypes::SIGMOID; + } + else if (kernelTypeString == "chi2") { + return cv::ml::SVM::KernelTypes::CHI2; + } + else if (kernelTypeString == "inter") { + return cv::ml::SVM::KernelTypes::INTER; + } + else { + //invalid + return cv::ml::SVM::KernelTypes::RBF; + } +} + +std::string SvmSuiteConvertions::StringFromKernelType(cv::ml::SVM::KernelTypes kernelType) { + switch (kernelType) + { + case cv::ml::SVM::LINEAR: + return "linear"; + case cv::ml::SVM::RBF: + return "rbf"; + case cv::ml::SVM::POLY: + return "poly"; + case cv::ml::SVM::SIGMOID: + return "sigmoid"; + case cv::ml::SVM::CHI2: + return "chi2"; + case cv::ml::SVM::INTER: + return "inter"; + default: + return "invalid"; + } +} + +cv::ml::SVM::ParamTypes SvmSuiteConvertions::ParamTypeFromString(std::string paramTypeString) { + if (paramTypeString == "c") { + return cv::ml::SVM::ParamTypes::C; + } + else if (paramTypeString == "gamma") { + return cv::ml::SVM::ParamTypes::GAMMA; + } + else if (paramTypeString == "p") { + return cv::ml::SVM::ParamTypes::P; + } + else if (paramTypeString == "nu") { + return cv::ml::SVM::ParamTypes::NU; + } + else if (paramTypeString == "coef") { + return cv::ml::SVM::ParamTypes::COEF; + } + else if (paramTypeString == "degree") { + return cv::ml::SVM::ParamTypes::DEGREE; + } + else { + //invalid + return cv::ml::SVM::ParamTypes::GAMMA; + } +} + +std::string SvmSuiteConvertions::StringFromParamType(cv::ml::SVM::ParamTypes paramType) { + switch (paramType) + { + case cv::ml::SVM::C: + return "c"; + case cv::ml::SVM::GAMMA: + return "gamma"; + case cv::ml::SVM::P: + return "p"; + case cv::ml::SVM::NU: + return "nu"; + case cv::ml::SVM::COEF: + return "coef"; + case cv::ml::SVM::DEGREE: + return "degree"; + default: + return "invalid"; + } +} + +int SvmSuiteConvertions::TermCriteriaTypeFromString(std::string termCriteriaTypeString) { + if (termCriteriaTypeString == "MAX_ITER") { + return cv::TermCriteria::MAX_ITER; + } + else if (termCriteriaTypeString == "EPS") { + return cv::TermCriteria::EPS; + } + else if (termCriteriaTypeString == "MAX_ITER+EPS") { + return cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS; + } + else { + //invalid + return cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS; + } +} + +std::string SvmSuiteConvertions::StringFromTermCriteriaType(int termCriteriaType) { + switch (termCriteriaType) + { + case cv::TermCriteria::MAX_ITER: + return "MAX_ITER"; + case cv::TermCriteria::EPS: + return "EPS"; + case (cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS): + return "MAX_ITER+EPS"; + default: + return "invalid"; + } +} \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/ConvertionsYAML.cpp b/Modules/CaPTkInteractiveSegmentation/src/ConvertionsYAML.cpp new file mode 100644 index 0000000..cabac1c --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/ConvertionsYAML.cpp @@ -0,0 +1,108 @@ +#include "ConvertionsYAML.h" + +#include "ConvertionsOpenCV.h" + +YAML::Node SvmSuiteConvertions::yamlConvertSvmDescriptionToNode(SvmSuite::SvmDescription& svm_description) +{ + YAML::Node node; + node[YAML_KERNEL_TYPE] = svm_description.GetKernelTypeAsString(); + node[YAML_TYPE] = svm_description.GetTypeAsString(); + + if (svm_description.GetKfold() != 10) { + node[YAML_KFOLD] = svm_description.GetKfold(); + } + if (svm_description.GetNeighborhoodRadius() != 0) { + node[YAML_NEIGHBORHOOD_RADIUS] = svm_description.GetNeighborhoodRadius(); + } + if (svm_description.GetConsiderWeights()) { + node[YAML_CONSIDER_WEIGHTS] = svm_description.GetConsiderWeights(); + } + if (svm_description.GetImportance() != 1.0) { + node[YAML_IMPORTANCE] = svm_description.GetImportance(); + } + if (svm_description.GetModelPath() != "") { + node[YAML_MODEL_PATH] = svm_description.GetModelPath(); + } + + for (const auto param : { cv::ml::SVM::ParamTypes::C, cv::ml::SVM::ParamTypes::GAMMA, cv::ml::SVM::ParamTypes::P, + cv::ml::SVM::ParamTypes::NU, cv::ml::SVM::ParamTypes::COEF, cv::ml::SVM::ParamTypes::DEGREE }) + { + if (svm_description.isParameterSetToSpecificValue(param)) + { + switch (param) + { + case cv::ml::SVM::ParamTypes::C: { + double c = svm_description.GetC(); + if (c != 1) { + node[YAML_C] = c; + } + } break; + case cv::ml::SVM::ParamTypes::GAMMA: { + double gamma = svm_description.GetGamma(); + if (gamma != 1) { + node[YAML_GAMMA] = gamma; + } + } break; + case cv::ml::SVM::ParamTypes::P: { + double p = svm_description.GetP(); + if (p != 0) { + node[YAML_P] = p; + } + } break; + case cv::ml::SVM::ParamTypes::NU: { + double nu = svm_description.GetNu(); + if (nu != 0) { + node[YAML_NU] = nu; + } + } break; + case cv::ml::SVM::ParamTypes::COEF: { + double coef = svm_description.GetCoef(); + if (coef != 0) { + node[YAML_COEF] = coef; + } + } break; + case cv::ml::SVM::ParamTypes::DEGREE: { + double degree = svm_description.GetDegree(); + if (degree != 0) { + node[YAML_DEGREE] = degree; + } + } break; + default: + //invalid + break; + } + } + else { + auto grid = svm_description.GetParamGridForParameter(param); + auto default_grid = cv::ml::SVM::getDefaultGrid(param); + + if ((grid.minVal == default_grid.minVal) && (grid.maxVal == default_grid.maxVal) && (grid.logStep == default_grid.logStep)) { + node[StringFromParamType(param)] = YAML_AUTO; + } + else { + YAML::Node map; + map[YAML_MIN_VAL] = grid.minVal; + map[YAML_MAX_VAL] = grid.maxVal; + map[YAML_LOG_STEP] = grid.logStep; + + node[StringFromParamType(param)] = map; + } + } + } + + // For Term Criteria + + std::string termCriteriaType = StringFromTermCriteriaType(svm_description.GetTermCriteria().type); + int termCriteriaMax = svm_description.GetTermCriteria().maxCount; + double termCriteriaEps = svm_description.GetTermCriteria().epsilon; + + if ((termCriteriaType != "MAX_ITER+EPS") && (termCriteriaMax != 1000) && (termCriteriaEps != FLT_EPSILON)) { + YAML::Node map; + map[YAML_TERM_CRITERIA_TYPE] = termCriteriaType; + map[YAML_TERM_CRITERIA_MAX] = termCriteriaMax; + map[YAML_TERM_CRITERIA_EPS] = termCriteriaEps; + node[YAML_TERM_CRITERIA] = map; + } + + return node; +} \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/EXTRA_ADVANCED.md b/Modules/CaPTkInteractiveSegmentation/src/EXTRA_ADVANCED.md new file mode 100644 index 0000000..ddfe4a9 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/EXTRA_ADVANCED.md @@ -0,0 +1,219 @@ +# Advanced Usage + +### Contents +- [Use different labels](#use-different-labels) +- [Change labels in a labels image](#change-labels-in-a-labels-image) +- [Compare segmentation to ground truth](#compare-segmentation-to-ground-truth) +- [More optional parameters](#more-optional-parameters) +- [Isolate an area in an image](#isolate-an-area-in-an-image) +- [Custom SVM ensemble](#custom-svm-ensemble) + + +## Use different labels + +If you want to use different labels than the ones specified [here](README.md#input-labels) use methods: +```cpp +void SetTumorCoreLabelMRI(int labelTC); +``` +```cpp +void SetEnhancedTumorLabelMRI(int labelET); +``` +```cpp +void SetEdemaLabelMRI(int labelED); +``` +```cpp +void SetHealthyTissueLabelMRI(int labelHT); +``` + +where labelTC/labelET/labelED/labelHT are non-zero integers. + + +## Change labels in a labels image + +```cpp +#include "GeodesicTrainingSegmentation.h" +... +typedef typename itk::Image< int, 3 >::Pointer LabelsImagePointer; +LabelsImagePointer labelsPtr = ...; + +std::unordered_map changeLabelsMap; + +changeLabelsMap[2] = 1; // Will change label 2 to 1 +changeLabelsMap[3] = 0; // Will change label 3 to 0 + +GeodesicTrainingSegmentation::Coordinator<> gts(); +gts.SetMode(GeodesicTrainingSegmentation::MODE::CHANGE_LABELS); +gts.SetLabels(labelsPtr); +gts.SetChangeLabelsMap(changeLabelsMap); + +auto executeResult = gts.Execute(); + +if (executeResult->ok) { + LabelsImagePointer labelsRenamedPtr = executeResult->labelsImage; +} +else { + std::cout << executeResult->errorMessage << "\n"; +} + +``` + +```SetChangeLabelsMap``` can also be used in all the modes, like the code described [here](README.md/#run). Note if ```SetChangeLabelsMap``` is not used, labels for healthy tissue will be removed. Pass an empty changeLabelsMap, if you don't want that. + + +## Compare segmentation to ground truth + +```cpp +#include "GeodesicTrainingSegmentation.h" +... +typedef typename itk::Image< int, 3 >::Pointer LabelsImagePointer; +LabelsImagePointer labelsPtr = ...; +LabelsImagePointer groundTruthPtr = ...; + +GeodesicTrainingSegmentation::Coordinator<> gts(); +gts.SetMode(GeodesicTrainingSegmentation::MODE::CHECK_ACCURACY); +gts.SetLabels(labelsPtr); // The segmentation to be compared to ground truth +gts.SetGroundTruth(groundTruthPtr); + +auto executeResult = gts.Execute(); + + +if (executeResult->ok) { + double diceScore = executeResult->diceScoreAll; + double sensitivity = executeResult->sensitivityAll; + int falsePositives = executeResult->falsePositiveCountAll; +} +else { + std::cout << executeResult->errorMessage << "\n"; +} +``` + +If you want some labels to be skipped when comparing with a ground truth segmentation, change before calling ```Execute```: + +```cpp +std::vector groundTruthSkipLabels; +groundTruthSkipLabels.push_back(1); // Will skip label 1 when comparing + +gts.SetGroundTruth(groundTruthPtr, groundTruthSkipLabels); + +``` + +Please note that label 0 is the null label and is not used at the calculations regardless. + +Additionally, the ```executeResult``` contains dice scores, sensitivity and false positives for each individual label used. + +```cpp +std::map< int, double> diceScoreForLabel = executeResult->diceScore; +std::map< int, double> sensitivityForLabel = executeResult->sensitivity; +std::map< int, int> falsePositives = executeResult->falsePositivesCount; + +// For example the diceScore for label 2 can be found doing: +double diceScoreForLabel2 = diceScoreForLabel[2]; +``` + +Comparing to a ground truth segmentation can also be used in all the modes, like the code described [here](README.md/#run). + + +## More optional parameters + +* ```SetTimerEnabled(bool)``` Records duration and saves it to "time_report.txt" +* ```SetNumberOfThreads(int)``` Sets the number of threads manually. Default is 16. + +To also save the results to a file use both parameters: + +```cpp +gts.SetOutputPath(fullPathToOutputFolderAsStdString); +gts.SetSaveAll(true); +``` + + +## Isolate an area in an image + +```cpp +#include "GeodesicTrainingSegmentation.h" +... +typedef typename itk::Image< float, 3 >::Pointer InputImagePointer; +typedef typename itk::Image< int, 3 >::Pointer LabelsImagePointer; +LabelsImagePointer labelsPtr = ...; // The labels here will be used +InputImagePointer inputImagePtr = ...; // The MRI image + +// This will keep only the voxels from inputImagePtr +// that have the label 2 in labelsPtr +int labelOfInterest = 2; + +GeodesicTrainingSegmentation::Coordinator<> gts(); +gts.SetMode(GeodesicTrainingSegmentation::MODE::SEGMENT); +gts.SetInputImage(inputImagePtr); +gts.SetLabels(labelsPtr); +gts.SetLabelOfInterest(labelOfInterest); + +auto executeResult = gts.Execute(); + +if (executeResult->ok) { + InputImagePointer isolatedImage = executeResult->segmentedFloatImage; +} +else { + std::cout << executeResult->errorMessage << "\n"; +} +``` + +The output will be the input image where the voxels that *don't* have the label specified in variable labelOfInterest become zero. + + +## Custom SVM ensemble + +By default no configuration file need to be provided and an ensemble of SVMs with RBF, chi2 and histogram intersection kernels are trained, but a custom YAML configuration file can be provided using ```void SetConfigFile(std::string);``` + + +##### The different keywords are: + +* __svms__ The root node should always be "svms" +* __kernel_type__ Different kernels available are: linear, rbf, poly, sigmoid, chi2, inter +* __type__ If unsure of what SVM type is remove it, or leave it as C_SVC. See OpenCV's docs for more info. +* __kfold__ If unsure of what kfold is remove it, or leave it at 10 (default) or 5 (faster) +* __consider_weights__ Misclassifying a class that has less label samples is penalized more during training. C_SVC only. +* __importance__ Remove it for using importance=1 (default) + * In case of normal classification, each vote from a SVM is weighted by its importance value. For example in case of 3 SVMs if SVM_A has importance 0.4 and SVM_B, SVM_C both have 0.3 then the vote of SVM_A is always used as output, except if SVM_B and SVM_C both vote for the same different class. + * In the modes that produce pseudoprobability maps the average of all the pseudoprobabilities of the different SVMs is taken but it can also be weighted using importance so that higher importance impacts the result more. +* __term_criteria__ Remove it for default term criteria + * __criteria_type__ Can be "MAX_ITER", or "EPS" or "MAX_ITER+EPS" + * __max_count__ Max iterations + * __epsilon__ Max error +* __pretrained__ Use a pretrained OpenCV svm model that was saved to a file +* __c/gamma/p/nu/coef/degree__ Values/Ranges for different parameters (See OpenCV docs for more info). A default value is used if one of this keywords is missing. Different options are: + * *Specific value* + * __auto__ For using the default grid of the parameter for optimization. + * __min_value,max_value,log_step__ {min_value, min_value * log_step, min_value * log_step^2, ...} will be tried, while min_value * log_step ^ n < max_value. + +The different SVM configurations start with "-" (list items). Example configuration files can be found at the [extra/configurations/svm](../extra/configurations/svm) directory. The following configuration is for demonstration purposes and not to be used as input. + +```yaml +--- +# This is a comment +svms: + - kernel_type: linear + kfold: 5 + consider_weights: false + c: 0.8 + - kernel_type: rbf + type: C_SVC + kfold: 10 + importance: 0.4 + consider_weights: true + c: 0.7 + gamma: + min_value: 0.001 + max_value: 1.0 + log_step: 2.0 + - kernel_type: sigmoid + consider_weights: true + term_criteria: + criteria_type: MAX_ITER + max_count: 900 + epsilon: 0.01 + c: 0.9 + gamma: auto + coef: auto + - pretrained: C:\GeodesicTraining\model_linear1.xml + importance: 0.2 +... +``` diff --git a/Modules/CaPTkInteractiveSegmentation/src/EXTRA_MODES.md b/Modules/CaPTkInteractiveSegmentation/src/EXTRA_MODES.md new file mode 100644 index 0000000..5f85aed --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/EXTRA_MODES.md @@ -0,0 +1,239 @@ +# Alternative modes + +This section covers methods for segmentation different than the default one. Please note that the default mode was chosen for producing the best results. Despite that, [geotrainfull](#geotrainfull) mode can also produce good results but is useful only for 2-class classification and requires thresholding to produce labels. Also, [agd](#agd) mode [(related paper)](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4395536/) can produce some fast segmentations of a single region. The rest of the modes are provided mostly for documentation purposes. + +### Contents +- [Methods using just SVM](#methods-using-just-svm) +- [Methods using just AGD](#methods-using-just-agd) +- [Methods using SVM and AGD](#methods-using-svm-and-agd) +- [Methods using random forests](#methods-using-random-forests) + +To specify a mode, before using ```gts.Execute();``` use +```cpp +void SetMode(GeodesicTrainingSegmentation::MODE); +``` + +For example for mode SVM_LABELS, do: + +```cpp +gts.SetMode(GeodesicTrainingSegmentation::MODE::SVM_LABELS); +``` + + +## Methods using just SVM + +###### SVM_LABELS + +Uses an ensemble of SVMs to segment an image. Apart from ```SetMode```, usage is the same as the default mode. In general, as with all the methods not using some sort of AGD map which provides locality to the algorithm, results tend to contain a lot of false positives. + +###### SVM_PSEUDO + +Only for 2 classes. There should be one label for the voxels inside the area of interest and one label for the voxels outside of it. Use whichever 2 non-zeros labels you want and provide all the images through either ```void SetInputImages(std::vector);``` or ```void SetInputImages(std::vector);```. + +The output (if saving to file is enabled) is two images, one for each class, containing high values at the voxels with a high probability for being part of that class and low values for the opposite. The values range is \[0,1\]. + +Programmatically, the results can be obtained using (after ```auto executionResult = gts.Execute();```): + +```cpp +typename itk::Image::Pointer PseudoProbImagePointer; +PseudoProbImagePointer posPseudoMap = executionResult->posImage; +PseudoProbImagePointer negPseudoMap = executionResult->negImage; +int posLabel = executionResult->posLabel; // For which label is the posPseudoMap +int negLabel = executionResult->negLabel; // For which label is the negPseudoMap +``` + +This method is not recommended and converting this pseudoprobability images to labels is not supported. + + +## Methods using just AGD + + +###### agd + +This method takes *only one* image and the "sample of labels" image as input. Samples should exist only for one label (or if there are more and you don't want to transform it, pass through ```void SetLabelOfInterest(int);``` which label to use for AGD). In other words, pass samples only for the area of interest and not for anything else. Use ```void SetThreshold(double);``` to set a custom threshold (Default is 25). + +The method (if saving to file is enabled) outputs a distance map (named agd_X_classY.nii.gz). This map is then thresholded and the output is named labels_thres_classY_tZ.nii.gz. + +Programmatically, the results can be obtained using (after ```auto executionResult = gts.Execute();```): + +```cpp +typename itk::Image::Pointer AgdImagePointer; +typename itk::Image::Pointer LabelsImagePointer; +AgdImagePointer agdMapImage = executionResult->agdMapImage; +LabelsImagePointer labelsImage = executionResult->labelsImage; +``` + +A lot of the times, the threshold value used might not be the optimal one (because it is case dependent). The user can look at the output distance map image and decide that a different value should be used instead. In that case there is no need to run the algorithm again. Instead you can use: + +```cpp +#include "GeodesicTrainingSegmentation.h" + +... + +GeodesicTrainingSegmentation::Coordinator<> gts; + +std::string fullPathToDistanceMap = "C:/GeodesicTraining/agd_X_classY.nii.gz"; +gts.SetInputImage(fullPathToDistanceMap); // You can also pass an itk image pointer + +int newThreshold = 30; // The better threshold that was decided +gts.SetThreshold(newThreshold); + +int labelToUse = 2; // Which label to put at the voxels lower than the threshold (0 is used elsewhere) +gts.SetLabelOfInterest(labelToUse); + +// Optionally you can save the result to a file using SetOutputPath(std::string), SetSaveAll(bool) + +// Executing +auto executeResult = gts.Execute(); + +if (executeResult->ok) { + // You can ignore labelsRenamedPtr if you only want the results saved to file + typedef typename itk::Image< int, 3 >::Pointer LabelsImagePointer; + LabelsImagePointer newLabelsImage = executeResult->labelsImage; // The result segmantation mask +} +else { + std::cout << executeResult->errorMessage << "\n"; +} +``` + + +## Methods using SVM and AGD + +###### geotrain + +This method is not recommended. For distance maps it is recommended to use [geotrainfull](#geotrainfull) for better results or [agd](#agd) for faster results. + +Only for 2 classes. There should be one label for the voxels inside the area of interest and one label for the voxels outside of it. Provide all the images through either ```void SetInputImages(std::vector);``` or ```void SetInputImages(std::vector);``` and use whichever 2 non-zeros labels you want. Specify which of the two labels is the label for the area of interest using ```void SetLabelOfInterest(int);```. Default label of interest is 1. + +The methods (if saving to file is enabled) outputs two distance maps (named agd_X_classY.nii.gz). The map for the LABEL_OF_INTEREST is then thresholded with the value provided in ```void SetThreshold(double);``` (the output labels image is named labels_thres_classY_tZ.nii.gz). Default threshold value if the parameter is not provided is 25, but 25 is not recommended for this mode, a value like 100 is better. + +Programmatically, the results can be obtained using (after ```auto executionResult = gts.Execute();```): + +```cpp +typename itk::Image::Pointer AgdImagePointer; +typename itk::Image::Pointer LabelsImagePointer; +AgdImagePointer agdMapImage = executionResult->agdMapImage; +LabelsImagePointer labelsImage = executionResult->labelsImage; +``` + +A lot of the times, the threshold value used might not be the optimal one (because it is case dependent). The user can look at the output distance map image and decide that a different value should be used instead. In that case there is no need to run the algorithm again. Instead you can use: + +```cpp +#include "GeodesicTrainingSegmentation.h" + +... + +GeodesicTrainingSegmentation::Coordinator<> gts; + +std::string fullPathToDistanceMap = "C:/GeodesicTraining/agd_X_classY.nii.gz"; +gts.SetInputImage(fullPathToDistanceMap); // You can also pass an itk image pointer + +int newThreshold = 30; // The better threshold that was decided +gts.SetThreshold(newThreshold); + +int labelToUse = 2; // Which label to put at the voxels lower than the threshold (0 is used elsewhere) +gts.SetLabelOfInterest(labelToUse); + +// Optionally you can save the result to a file using SetOutputPath(std::string), SetSaveAll(bool) + +// Executing +auto executeResult = gts.Execute(); + +if (executeResult->ok) { + // You can ignore labelsRenamedPtr if you only want the results saved to file + typedef typename itk::Image< int, 3 >::Pointer LabelsImagePointer; + LabelsImagePointer newLabelsImage = executeResult->labelsImage; // The result segmantation mask +} +else { + std::cout << executeResult->errorMessage << "\n"; +} +``` + + +where LABEL_OF_INTEREST is which value to put where the distance map has values lower than the threshold (0 is used elsewhere) + + +###### geotrainfull + +Only for 2 classes. There should be one label for the voxels inside the area of interest and one label for the voxels outside of it. Provide all the images through either ```void SetInputImages(std::vector);``` or ```void SetInputImages(std::vector);``` and use whichever 2 non-zeros labels you want. Specify which of the two labels is the label for the area of interest using ```void SetLabelOfInterest(int);```. Default label of interest is 1. + +The methods (if saving to file is enabled) outputs two distance maps (named agd_X_classY.nii.gz). The map for the LABEL_OF_INTEREST is then thresholded with the value provided in ```void SetThreshold(double);``` (the output labels image is named labels_thres_classY_tZ.nii.gz). Default threshold value if the parameter is not provided is 25, but 25 is not recommended for this mode, a value like 100 is better. + +Programmatically, the results can be obtained using (after ```auto executionResult = gts.Execute();```): + +```cpp +typename itk::Image::Pointer AgdImagePointer; +typename itk::Image::Pointer LabelsImagePointer; +AgdImagePointer agdMapImage = executionResult->agdMapImage; +LabelsImagePointer labelsImage = executionResult->labelsImage; +``` + +A lot of the times, the threshold value used might not be the optimal one (because it is case dependent). The user can look at the output distance map image and decide that a different value should be used instead. In that case there is no need to run the algorithm again. Instead you can use: + +```cpp +#include "GeodesicTrainingSegmentation.h" + +... + +GeodesicTrainingSegmentation::Coordinator<> gts; + +std::string fullPathToDistanceMap = "C:/GeodesicTraining/agd_X_classY.nii.gz"; +gts.SetInputImage(fullPathToDistanceMap); // You can also pass an itk image pointer + +int newThreshold = 30; // The better threshold that was decided +gts.SetThreshold(newThreshold); + +int labelToUse = 2; // Which label to put at the voxels lower than the threshold (0 is used elsewhere) +gts.SetLabelOfInterest(labelToUse); + +// Optionally you can save the result to a file using SetOutputPath(std::string), SetSaveAll(bool) + +// Executing +auto executeResult = gts.Execute(); + +if (executeResult->ok) { + // You can ignore labelsRenamedPtr if you only want the results saved to file + typedef typename itk::Image< int, 3 >::Pointer LabelsImagePointer; + LabelsImagePointer newLabelsImage = executeResult->labelsImage; // The result segmantation mask +} +else { + std::cout << executeResult->errorMessage << "\n"; +} +``` + +where LABEL_OF_INTEREST is which value to put where the distance map has values lower than the threshold (0 is used elsewhere) + + +## Methods using random forests + +Generally Random Forests where found to be worse than SVMs for the task. + + +###### rf + +The simplest random forest mode. + +Usage is the same as the default mode, except that you can optionally pass custom parameters for the random forest using ```void SetRfConfigFile(std::string);```. [See here](../extra/configurations/rf/rf_example_conf.config) for an example custom RF configuration. + +In general, as with all the methods not using some sort of AGD map which provides locality to the algorithm, results tend to contain a lot of false positives. + +###### rfauto + +Usage is the same as the default mode. Difference with [rf](#rf) is that the parameters are tuned automatically. + +In general, as with all the methods not using some sort of AGD map which provides locality to the algorithm, results tend to contain a lot of false positives. + + +###### agdrf + +Uses AGD maps alongside the (unprocessed) input images as input to the RF. + +Usage is the same as the default mode, except that you can optionally pass custom parameters for the random forest using ```void SetRfConfigFile(std::string);```. [See here](../extra/configurations/rf/rf_example_conf.config) for an example custom RF configuration. + +Uses AGD maps alongside the (unprocessed) input images as input to the RF. + +###### agdrfauto + +Uses AGD maps alongside the (unprocessed) input images as input to the RF. + +Usage is the same as the default mode. Difference with [agdrf](#agdrf) is that the parameters are tuned automatically. diff --git a/Modules/CaPTkInteractiveSegmentation/src/GeodesicTrainingSegmentation.cpp b/Modules/CaPTkInteractiveSegmentation/src/GeodesicTrainingSegmentation.cpp new file mode 100644 index 0000000..4098e0f --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/GeodesicTrainingSegmentation.cpp @@ -0,0 +1,3 @@ +#include "GeodesicTrainingSegmentation.h" + +// Coordinator is a templated class and thereby has no implementation diff --git a/Modules/CaPTkInteractiveSegmentation/src/OperationsSvmGTS.cpp b/Modules/CaPTkInteractiveSegmentation/src/OperationsSvmGTS.cpp new file mode 100644 index 0000000..fb5f432 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/OperationsSvmGTS.cpp @@ -0,0 +1,84 @@ +#include "OperationsSvmGTS.h" + +bool GeodesicTrainingSegmentation::CreateBalancedSubsample(std::shared_ptr& data, + std::string& errorMessageIfApplicable, + std::unordered_map labelsCountMap, int maxSamples) + { + typedef int LabelsPixelType; + + // Find how many samples to keep for each label + std::unordered_map maxForEachLabel; + double totalLabels = 0; + + for (std::set::iterator it = data->differentLabels.begin(); it != data->differentLabels.end(); it++) + { + // Initialize + maxForEachLabel[*it] = 0; + } + + for (std::set::iterator it = data->differentLabels.begin(); it != data->differentLabels.end(); it++) + { + totalLabels += labelsCountMap[*it]; + } + + std::cout << "\n"; + for (std::set::iterator it = data->differentLabels.begin(); it != data->differentLabels.end(); it++) + { + maxForEachLabel[*it] = std::lround(maxSamples * labelsCountMap[*it] / totalLabels); + std::cout << "MAX FOR " << *it << ": " << maxForEachLabel[*it] << "\n"; + if (maxForEachLabel[*it] < 3) { + errorMessageIfApplicable = std::string("Cannot create a balanced subsample, ") + + std::string("because one (or more) labels is outweighted by the others a lot. ") + + std::string("Draw more for the labels that you haven't drawn a lot for, or remove ") + + std::string("some samples from the ones that are large."); + return false; + } + } + + // Shuffle training matrix and labels (with the same seed) + std::vector seeds; + for (int cont = 0; cont < data->trainingMat.rows; cont++) { + seeds.push_back(cont); + } + + cv::randShuffle(seeds); + + cv::Mat randTrainingMat, randLabelsMat; + for (int cont = 0; cont < data->trainingMat.rows; cont++) { + randTrainingMat.push_back(data->trainingMat.row(seeds[cont])); + randLabelsMat.push_back(data->labelsMat.row(seeds[cont])); + } + + data->trainingMat = randTrainingMat; + data->labelsMat = randLabelsMat; + + // // Create a mask for which samples to keep for training + // cv::Mat sampleIdx = cv::Mat::zeros(data->trainingMat.rows, 1, CV_8U); + + std::unordered_map keptForEachLabel; + + for (std::set::iterator it = data->differentLabels.begin(); it != data->differentLabels.end(); it++) + { + // Initialize + keptForEachLabel[*it] = 0; + } + + cv::Mat trainingMatNew, labelsMatNew; + + for (int row_i = 0; row_i < data->trainingMat.rows; row_i++) + { + LabelsPixelType label = data->labelsMat.ptr(row_i)[0]; + + if (keptForEachLabel[label] <= maxForEachLabel[label]) { + //sampleIdx.ptr(row_i)[0] = 1; + keptForEachLabel[label] += 1; + trainingMatNew.push_back(data->trainingMat.row(row_i).clone()); + labelsMatNew.push_back(data->labelsMat.row(row_i).clone()); + } + } + data->trainingMat = trainingMatNew; + data->labelsMat = labelsMatNew; + + return true; + //return sampleIdx; + } \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/README.md b/Modules/CaPTkInteractiveSegmentation/src/README.md new file mode 100644 index 0000000..8bcb61c --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/README.md @@ -0,0 +1,245 @@ +# Geodesic Training - Tumor Segmentation (Library) + +This library is developed and maintained by the [Center for Biomedical Image Computing and Analytics (CBICA)](https://www.cbica.upenn.edu/) at the University of Pennsylvania. + +### Contents +- [What can it do?](#what-can-it-do) +- [How to build?](#how-to-build) +- [How to use?](#how-to-use) + + + +## What can it do? + +Produce masks for the different areas specified in the "sample of labels" image. + +For brain tumors specifically, produce masks for: +- necrotic and non-enhancing tumor core +- GD-enhancing tumor +- peritumoral edema +- other areas + +See [advanced usage](EXTRA_ADVANCED.md) section for complementary operations. + +See [alternative modes](EXTRA_MODES.md) section for alternative methods for segmentation. + + +## How to build + +Depends on ITK, OpenCV. Requires a C++11 compliant compiler and cmake. + +Assuming the GeodesicTrainingSegmentation directory has been copied inside your project's root directory and your sources are at src directory, an example CMakeLists.txt file at your the root directory of your project could look like this: + +```cmake +CMAKE_MINIMUM_REQUIRED(VERSION 3.0) + +SET( PROJECT_NAME YourProjectName ) +PROJECT( ${PROJECT_NAME} ) + +# For ITK +FIND_PACKAGE( ITK REQUIRED ) +SET(ITK_NO_IO_FACTORY_REGISTER_MANAGER "OFF") +INCLUDE( ${ITK_USE_FILE} ) + +# For OpenCV +FIND_PACKAGE( OpenCV REQUIRED ) +#INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) + +# For C++11 +SET(CMAKE_CXX_STANDARD 11) +SET(CMAKE_CXX_STANDARD_REQUIRED YES) + +# For OpenMP +FIND_PACKAGE(OpenMP REQUIRED) +SET( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}" ) +SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}" ) + +# Finding your source files +FILE( GLOB_RECURSE sources "${PROJECT_SOURCE_DIR}/src/*.*" ) + +# Including the paths for headers +INCLUDE_DIRECTORIES( + ${PROJECT_SOURCE_DIR}/GeodesicTrainingSegmentation/include/GeodesicTrainingSegmentation +) + +# Adding the subproject +add_subdirectory(GeodesicTrainingSegmentation) + +# Add your sources to the executable +ADD_EXECUTABLE( ${PROJECT_NAME} + ${sources} +) + +# Link the libraries to be used +TARGET_LINK_LIBRARIES( ${PROJECT_NAME} + ${ITK_LIBRARIES} + ${OpenCV_LIBRARIES} + GeodesicTrainingSegmentation +) +``` + +ITK and OpenCV should be installed on your system. You have to pass the paths to ITK and OpenCV include directories to cmake. + +###### Linux + +An example cmake command on linux could look like this (but the CMAKE_INSTALL_PREFIX and the ITK version might be different): + +```console +cmake -DCMAKE_INSTALL_PREFIX="/usr/local" -DCMAKE_PREFIX_PATH="${CMAKE_INSTALL_PREFIX}/include/ITK-4.13;${CMAKE_INSTALL_PREFIX}/include/opencv4" -DCMAKE_MODULE_PATH="${CMAKE_INSTALL_PREFIX}/include/opencv4" .. +``` + +For more information on installing the dependencies on linux you can see [here for ITK](https://itk.org/Wiki/ITK_Configuring_and_Building_for_Ubuntu_Linux) and [here for OpenCV](https://docs.opencv.org/3.4/d7/d9f/tutorial_linux_install.html), but you can also try to look for newer versions. + +###### Windows + +This library can be built on Windows by providing the paths to ITK and OpenCV. See [here for ITK](https://itk.org/ITK/resources/software.html) and [here for OpenCV](https://docs.opencv.org/2.4/doc/tutorials/introduction/windows_install/windows_install.html), but you can also try to look for newer versions. + +In general, the ```CMAKE_PREFIX_PATH``` should look something like this ```"C:/Libraries/InsightToolkit-4.13.1/build;C:/Libraries/opencv/build/x64/vc15/lib"``` and ```CMAKE_MODULE_PATH``` something like this ```"C:/Libraries/InsightToolkit-4.13.1/build/lib/cmake/ITK-4.13"``` + +If when trying to run your application you get prompted that ```opencv_world343.dll``` or ```opencv_world343d.dll``` is missing, manually copy it from opencv's build path to the directory where the your executable is located. (The path to the dll will look something like this ```C:\Libraries\opencv\build\x64\vc15\bin```) + +## How to use? + +#### Input files + +Supports 3D NIfTI and DICOM images (.nii, .nii.gz, .dcm, .dicom files). Input files can be set either with a std::string to the full path of the image, or by a pointer to an ```itk::Image``` (see [run section](#run) below for an example). Only 3D images are supported for now. + +###### Input Images + +The images can be MR, CT, PET or anything else (or a combination of all). The restriction is that all images should have exactly the same size and be co-registered images of one subject taken during a single session. + +Either set by using a std::string to the full path of the image or by a pointer to an ```itk::Image< float, 3 >```. (If your input image is of another type like ``` itk::Image< int, 3 >``` you can use ITK's CastImageFilter to convert it to float, or pass it using its path). + +Special operations are performed for the modalities FLAIR, T1, T1CE (Gd) and T2 when segmenting brain tumors. Although, none of them are mandatory and other modalities can be provided too. + + +###### Input Labels + +The sample of labels image should also be an image that has exactly the same size as the different input images. + +The unlabeled voxels should have the value 0. Any other non-zero integer label can be used for the different areas. There should be at least two different labels. + + +###### Brain tumor + +In the special case of brain tumors, the values of the voxels of this image should be: +* __0__ At the unlabeled voxels +* __1__ At the necrotic and non-enhancing tumor core samples +* __2__ At the peritumoral edema samples +* __3__ At the healthy tissue samples +* __4__ At the GD-enhancing tumor samples +* __5+__ At other areas the user wants to mark + +Please note that *none* of the brain tumor specific classes (1,2,3,4) are mandatory even when segmenting brain tumors, but there should be at least 2 different classes in the image (there could all be 5+). Also note that apart from the expect output mask, a second one will be created where the label for healthy tissue is removed. + +Either set by using a std::string to the full path of the image or by a pointer to an ```itk::Image< int, 3 >```. Please notice that the input images should have __float__ pixel type, while the labels image should have __int__. (If your input labels are of another type like ``` itk::Image< short, 3 >``` you can use ITK's CastImageFilter to convert it to int, or pass it using its path). + +See [here](EXTRA_ADVANCED.md#use-different-labels) for instructions on how to use different labels for the tumor-specific classes. + +Please also note that the quality of the segmentations are highly correlated to the quality of the provided "sample of labels" image. Also, especially in the more difficult cases, there might be misclassified regions in the output segmentation. If that is the case the user can add more labels in the "sample of labels" images that are within these regions and run the tool again. + +[CaPTk](https://www.med.upenn.edu/cbica/captk/) can be used to draw the labels image. Drag and drop the MRI images in the "Images" area, draw labels through the "Drawing" tab and export using File>Save>ROI. For drawing more labels to an old labels image use File>Load>ROI, draw the new labels and export. + + +#### Run + +All the interaction is done using the class +``` +GeodesicTrainingSegmentation::Coordinator +``` + +Please note that the template refers to the input images. Right now only float PixelType is supported, but it is kept in case there is reason to support more in the future. Dimensions can be 2 or 3. The labels image should be itk::Image. + +*Note:* If you are segmenting brain tumors and at least one of the FLAIR/T1/T1CE/T2 MRI modalities are present, pass these modalities using one of the: + +``` +// For 2D +void GeodesicTrainingSegmentation::Coordinator::SetInputImageMRI(GeodesicTrainingSegmentation::MODALITY_MRI, std::string); +void GeodesicTrainingSegmentation::Coordinator::SetInputImageMRI(GeodesicTrainingSegmentation::MODALITY_MRI, typename itk::Image::Pointer); + +// For 3D +void GeodesicTrainingSegmentation::Coordinator::SetInputImageMRI(GeodesicTrainingSegmentation::MODALITY_MRI, std::string); +void GeodesicTrainingSegmentation::Coordinator::SetInputImageMRI(GeodesicTrainingSegmentation::MODALITY_MRI, typename itk::Image::Pointer); +``` + +Pass other images (either in the general non brain tumor case, or extra images that are not FLAIR/T1/T1CE/T2 for brain tumors using: + +``` +// For 2D +void GeodesicTrainingSegmentation::Coordinator::SetInputImages(GeodesicTrainingSegmentation::MODALITY_MRI, std::vector); +void GeodesicTrainingSegmentation::Coordinator::SetInputImages(GeodesicTrainingSegmentation::MODALITY_MRI, std::vector< typename itk::Image::Pointer >); + +// For 3D +void GeodesicTrainingSegmentation::Coordinator::SetInputImages(GeodesicTrainingSegmentation::MODALITY_MRI, std::vector); +void GeodesicTrainingSegmentation::Coordinator::SetInputImages(GeodesicTrainingSegmentation::MODALITY_MRI, std::vector< typename itk::Image::Pointer >); + +``` + +You can supply some FLAIR/T1/T1CE/T2 images using paths and some using pointers. ```SetInputImages``` will not override the changes made by ```SetInputImageMRI```. + +Either supply all the other images using a vector of paths or a vector of pointers. Don't call ```SetInputImages``` twice. + +In the following example all images are 3D and +- flair image is passed by it's full path +- t1ce image is passed using a pointer to an itk::Image +- 2 other images are passed using their full path +- the "sample of labels" image is passed using a pointer to an itk::Image + +```cpp +#include "GeodesicTrainingSegmentation.h" +... + +typedef itk::Image InputImageType; +typedef itk::Image LabelsImageType; +typedef typename InputImageType::Pointer InputImagePointer; +typedef typename LabelsImageType::Pointer LabelsImagePointer; + +std::string flairImageFullPath = "C:/GeodesicTraining/flair.nii.gz"; // The flair MRI image +std::string otherImage1FullPath = "C:/GeodesicTraining/other_image1.nii.gz"; // Another image +std::string otherImage2FullPath = "C:/GeodesicTraining/other_image2.nii.gz"; // Another image + +std::string t1ceImageFullPath = "C:/GeodesicTraining/t1ce.nii.gz"; // The t1ce MRI image +std::string labelsFullPath = "C:/GeodesicTraining/mask.nii.gz"; // The sample of labels image + +typedef itk::ImageFileReader InputReaderType; +typedef itk::ImageFileReader LabelsReaderType; + +typename ReaderType::Pointer inputReader = InputReaderType::New(); +typename ReaderType::Pointer labelsReader = LabelsReaderType::New(); + +inputReader->SetFileName(t1ceImageFullPath); +InputImagePointer t1ceImage = inputReader->GetOutput(); + +labelsReader->SetFileName(labelsFullPath); +LabelsImagePointer labelsImage = labelsReader->GetOutput(); + +std::vector otherInputImagesPaths = { otherImage1FullPath, otherImage2FullPath }; + + +// All the interaction happens through the coordinator class +GeodesicTrainingSegmentation::Coordinator gts; + +// Setting the parameters +gts.SetInputImageMRI(GeodesicTrainingSegmentation::MODALITY_MRI::FLAIR, flairImageFullPath); +gts.SetInputImageMRI(GeodesicTrainingSegmentation::MODALITY_MRI::T1CE, t1ceImage); +gts.SetInputImages(otherInputImagesPaths); // Will not override FLAIR, T1, T1CE or T2 +gts.SetLabels(labelsImage); + +// Also optionally you can save the result to a file using SetOutputPath(std::string), SetSaveAll(bool) +std::string outputDirWhichWillContainTheResult = "C:/GeodesicTraining/output"; +gts.SetOutputPath(outputDirWhichWillContainTheResult); +gts.SetSaveAll(true); + +// Executing +auto executeResult = gts.Execute(); + +if (executeResult->ok) { + // You can ignore labelsRenamedPtr if you only want the results saved to file + LabelsImagePointer labelsRenamedPtr = executeResult->labelsImage; // The result segmantation mask +} +else { + std::cerr << executeResult->errorMessage << "\n"; +} +``` + +For more advanced usage (other optional parameters, using different labels, comparing segmentation to ground truth, optional parameters, isolating areas in an image and custom SVM configurations) see the [advanced usage](EXTRA_ADVANCED.md) section. diff --git a/Modules/CaPTkInteractiveSegmentation/src/RFPrepareTrainData.cpp b/Modules/CaPTkInteractiveSegmentation/src/RFPrepareTrainData.cpp new file mode 100644 index 0000000..2cf063c --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/RFPrepareTrainData.cpp @@ -0,0 +1,52 @@ +#include "RFPrepareTrainData.h" + +cv::Ptr RFSuiteTrainData::PrepareTrainData(const cv::Mat &data, const cv::Mat &responses, int ntrain_samples) +{ + // Mask for which of the first ntrain_samples of the data to be chosen as training data + cv::Mat sample_idx = cv::Mat::zeros(1, data.rows, CV_8U); + cv::Mat train_samples = sample_idx.colRange(0, ntrain_samples); + train_samples.setTo(cv::Scalar::all(1)); + + // The types (see definition of TrainData) + int nvars = data.cols; + cv::Mat var_type(nvars + 1, 1, CV_8U); + var_type.setTo(cv::Scalar::all(cv::ml::VAR_ORDERED)); + var_type.at(nvars) = cv::ml::VAR_CATEGORICAL; + + // Randomize data with the same seed + cv::Mat randData, randResp; + shuffleDataAndResponses(data, randData, responses, randResp); + + return cv::ml::TrainData::create(randData, cv::ml::ROW_SAMPLE, randResp, + cv::noArray(), sample_idx, cv::noArray(), var_type); +} + +void RFSuiteTrainData::shuffleDataAndResponses(const cv::Mat &matrix, cv::Mat &resRandMatrix, const cv::Mat &responses, cv::Mat &resRandResponses) +{ + std::vector seeds; + for (int cont = 0; cont < matrix.rows; cont++) { + seeds.push_back(cont); + } + + cv::randShuffle(seeds); + + for (int cont = 0; cont < matrix.rows; cont++) { + resRandMatrix.push_back(matrix.row(seeds[cont])); + resRandResponses.push_back(responses.row(seeds[cont])); + } +} + +cv::Mat RFSuiteTrainData::shuffleRows(const cv::Mat &matrix) +{ + std::vector seeds; + for (int cont = 0; cont < matrix.rows; cont++) + seeds.push_back(cont); + + cv::randShuffle(seeds); + + cv::Mat output; + for (int cont = 0; cont < matrix.rows; cont++) + output.push_back(matrix.row(seeds[cont])); + + return output; +} \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/RFSuiteManager.cpp b/Modules/CaPTkInteractiveSegmentation/src/RFSuiteManager.cpp new file mode 100644 index 0000000..aab4a9b --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/RFSuiteManager.cpp @@ -0,0 +1,379 @@ +#include "RFSuiteManager.h" + +float RFSuite::Manager::TrainAuto() +{ + cv::ml::ParamGrid maxDepthGrid = cv::ml::ParamGrid(2, 19, 1.75); + cv::ml::ParamGrid minSampleCountPercentageGrid = cv::ml::ParamGrid(10, 11, 2); //5,27,1.75 + cv::ml::ParamGrid maxCategoriesGrid = cv::ml::ParamGrid(16, 17, 2); + cv::ml::ParamGrid activeVarCountGrid = cv::ml::ParamGrid(1, m_traindata->getTrainSamples().cols + 1, 1); + + return TrainAuto(maxDepthGrid, minSampleCountPercentageGrid, maxCategoriesGrid, activeVarCountGrid); +} + +float RFSuite::Manager::TrainAuto(cv::ml::ParamGrid maxDepthGrid, cv::ml::ParamGrid minSampleCountPercentageGrid, + cv::ml::ParamGrid maxCategoriesGrid, cv::ml::ParamGrid activeVarCountGrid) +{ + if (m_verbose) { + std::cout << "RF Manager:\t Training random forest while tuning parameters...\n"; + } + + if (maxDepthGrid.logStep < 1 || minSampleCountPercentageGrid.logStep < 1 || + maxCategoriesGrid.logStep < 1 || activeVarCountGrid.logStep < 1) + { + std::cerr << "logStep should be >= 1. (if 1 then the values: minVal, minVal+1, minVal+2... are tried, else see ParamGrid definition)\n"; + return -1; + } + + float minError = 100; + cv::Ptr best_rforest; // = cv::ml::RTrees::create(); + + int i_maxDepth = 0; + + for (double maxDepth = maxDepthGrid.minVal; + ((maxDepthGrid.logStep == 1) ? + (maxDepth < maxDepthGrid.maxVal) : + (maxDepthGrid.minVal * std::pow(maxDepthGrid.logStep, i_maxDepth) < maxDepthGrid.maxVal)); + i_maxDepth++) + { + int i_minSampleCountPercentage = 0; + for (double minSampleCountPercentage = minSampleCountPercentageGrid.minVal; + ((minSampleCountPercentageGrid.logStep == 1) ? + (minSampleCountPercentage < minSampleCountPercentageGrid.maxVal) : + (minSampleCountPercentageGrid.minVal * std::pow(minSampleCountPercentageGrid.logStep, i_minSampleCountPercentage) < + minSampleCountPercentageGrid.maxVal)); + i_minSampleCountPercentage++) + { + int i_maxCategories = 0; + + for (double maxCategories = maxCategoriesGrid.minVal; + ((maxCategoriesGrid.logStep == 1) ? + (maxCategories < maxCategoriesGrid.maxVal) : + (maxCategoriesGrid.minVal * std::pow(maxCategoriesGrid.logStep, i_maxCategories) < maxCategoriesGrid.maxVal)); + i_maxCategories++) + { + int i_activeVarCount = 0; + + for (double activeVarCount = activeVarCountGrid.minVal; + ((activeVarCountGrid.logStep == 1) ? + (activeVarCount < activeVarCountGrid.maxVal) : + (activeVarCountGrid.minVal * std::pow(activeVarCountGrid.logStep, i_activeVarCount) < activeVarCountGrid.maxVal)); + i_activeVarCount++) + { + if (m_verbose) { + std::cout << "---\n" << maxDepth << "," << minSampleCountPercentage << "," << + maxCategories << "," << activeVarCount << "\n---\n"; + } + if (m_save_all) { + std::ofstream rfReportFile; + rfReportFile.open(m_output_path + "/rf_report.txt", std::ios_base::app); //append file + + rfReportFile << "---\n" << maxDepth << "," << minSampleCountPercentage << "," << + maxCategories << "," << activeVarCount << "\n---\n"; + } + + m_rtrees = cv::ml::RTrees::create(); + float val = Train(static_cast(std::floor(maxDepth)), + minSampleCountPercentage, + static_cast(std::floor(maxCategories)), + static_cast(std::floor(activeVarCount)), + static_cast(std::floor(m_number_of_trees))); + + if (val < minError) { + minError = val; + best_rforest = m_rtrees; + + if (m_verbose) { + std::cout << "Last model is current best with error: " << minError << "\n"; + } + if (m_save_all) { + std::ofstream rfReportFile; + rfReportFile.open(m_output_path + "/rf_report.txt", std::ios_base::app); //append file + + rfReportFile << "Last model is current best with error: " << minError << "\n"; + } + } + + if (activeVarCountGrid.logStep == 1) { + activeVarCount++; + } + else { + activeVarCount *= activeVarCountGrid.logStep; + } + } + + if (maxCategoriesGrid.logStep == 1) { + maxCategories++; + } + else { + maxCategories *= maxCategoriesGrid.logStep; + } + } + + if (minSampleCountPercentageGrid.logStep == 1) { + minSampleCountPercentage++; + } + else { + minSampleCountPercentage *= minSampleCountPercentageGrid.logStep; + } + } + + if (maxDepthGrid.logStep == 1) { + maxDepth++; + } + else { + maxDepth *= maxDepthGrid.logStep; + } + } + + if (m_verbose) { + std::cout << "Best model has error: " << minError; + std::cout << "\n\t MAX DEPTH = " << best_rforest->getMaxDepth(); + std::cout << "\n\t MIN SAMPLE COUNT % = " << 100 * best_rforest->getMinSampleCount() / m_traindata->getTrainSamples().rows; + std::cout << "\n\t MAX CATEGORIES = " << best_rforest->getMaxCategories(); + std::cout << "\n\t ACTIVE VAR COUNT = " << best_rforest->getActiveVarCount() << "\n"; + } + if (m_save_all) { + std::ofstream rfReportFile; + rfReportFile.open(m_output_path + "/rf_report.txt", std::ios_base::app); //append file + + rfReportFile << "\n[Best model has error: " << minError << "]"; + rfReportFile << "\n\t MAX DEPTH = " << best_rforest->getMaxDepth(); + rfReportFile << "\n\t MIN SAMPLE COUNT % = " << 100 * best_rforest->getMinSampleCount() / m_traindata->getTrainSamples().rows; + rfReportFile << "\n\t MAX CATEGORIES = " << best_rforest->getMaxCategories(); + rfReportFile << "\n\t ACTIVE VAR COUNT = " << best_rforest->getActiveVarCount() << "\n"; + } + m_rtrees = best_rforest; + + return minError; +} + +float RFSuite::Manager::Train() +{ + return Train(m_max_depth, m_min_sample_count_percentage, m_max_categories, m_active_var_count, m_number_of_trees); +} + +float RFSuite::Manager::Train(int maxDepth, double minSampleCountPercentage, int maxCategories, int activeVarCount, int numberOfTrees) +{ + /*if (m_verbose) { + ConfigParserRF::PrintParseResult(maxDepth, 0, true, minSampleCountPercentage, + maxCategories, m_active_var_count, m_number_of_trees, m_priors_mat); + }*/ + + if (maxDepth != 0) { + m_rtrees->setMaxDepth(maxDepth); + } + m_rtrees->setMinSampleCount(std::lround((minSampleCountPercentage / 100) * m_traindata->getTrainSamples().rows)); + m_rtrees->setRegressionAccuracy(0); // Not useful for classification + m_rtrees->setUseSurrogates(false); // Probably not useful + m_rtrees->setMaxCategories(maxCategories); + m_rtrees->setPriors(m_priors_mat); + m_rtrees->setCalculateVarImportance(true); + m_rtrees->setActiveVarCount(activeVarCount); + //m_rtrees->setTermCriteria(cv::TermCriteria(cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS, 100, FLT_EPSILON)); + m_rtrees->setTermCriteria(cv::TermCriteria(cv::TermCriteria::MAX_ITER, numberOfTrees, 0)); + + return train_and_print_errs(m_rtrees, m_traindata); +} + +std::shared_ptr RFSuite::Manager::Test(cv::Mat &testingMat) +{ + cv::Mat empty; // unnecessary but can't provide argument cv::Mat() in linux + return Test(testingMat, empty, false); +} + +std::shared_ptr RFSuite::Manager::Test(cv::Mat &testingMat, cv::Mat &skipZerosMat, bool skipZeros) +{ + int testSize = testingMat.rows; + + std::shared_ptr res(new cv::Mat()); + //std::shared_ptr res(cv::Mat::zeros(testSize, 1, CV_32S)); + *res = cv::Mat::zeros(testSize, 1, CV_32S); + //cv::Mat predictLabels; + + int progress = 0; + int realProgress; + int val; + + if (m_verbose) { + std::cout << "RF Manager:\t Testing...0%"; + } + for (int i = 0; i < testSize; i++) + { + if (skipZeros && skipZerosMat.ptr(i)[0] != 0) + { + //val = m_rtrees->predict(testingMat.row(i), predictLabels); + val = std::lround(m_rtrees->predict(testingMat.row(i))); + res->ptr< int >(i)[0] = val; + } + + if (m_verbose) { + realProgress = 100 * i / testSize; + + if (realProgress >= progress + 5) { + progress = realProgress; + std::cout << "\r" << "RF Manager:\t Testing..." << progress << "%"; + } + } + } + //m_rtrees->predict(testingMat, predictLabels); + + if (m_verbose) { + std::cout << "\r" << "RF Manager:\t Testing...finished\n"; + } + + if (m_verbose || m_save_all) { + cv::Mat variable_importance = m_rtrees->getVarImportance(); + + if (m_verbose) { + std::cout << "RF Manager:\t \tEstimated variable importance:\n"; + + for (int i = 0; i < variable_importance.rows; i++) { + std::cout << "RF Manager:\t \t\tVariable " << i + 1 << ": " << variable_importance.at(i, 0) << "\n"; + } + } + if (m_save_all) { + std::ofstream rfReportFile; + rfReportFile.open(m_output_path + "/rf_report.txt", std::ios_base::app); //append file + + rfReportFile << "Estimated variable importance:\n"; + + for (int i = 0; i < variable_importance.rows; i++) { + rfReportFile << "\tVariable " << i + 1 << ": " << variable_importance.at(i, 0) << "\n"; + } + rfReportFile << "\n"; + } + } + + return res; +} + +void RFSuite::Manager::SaveModel(const std::string filename) +{ + m_rtrees->save(filename); +} + +float RFSuite::Manager::train_and_print_errs(cv::Ptr model, const cv::Ptr& data) +{ + if (m_verbose) { + std::cout << "RF Manager:\t Training..."; + } + + bool ok = model->train(data); + if (!ok) + { + if (m_verbose) { + std::cout << "FAILED\n"; + } + + return -1; + } + else + { + if (m_verbose) { + std::cout << "finished\n"; + + //std::cout << "# of train: " << data->getTrainSamples().rows << "\n"; + } + + float calcErrorTrain, calcErrorTest; + + calcErrorTest = model->calcError(data, true, cv::noArray()); + + if (m_verbose || m_save_all) { + calcErrorTrain = model->calcError(data, false, cv::noArray()); + + if (m_verbose) { + printf("RF Manager:\t \tTrain error (train part): %f\n", calcErrorTrain); + printf("RF Manager:\t \tTrain error (test part): %f\n", calcErrorTest); + } + if (m_save_all) { + std::ofstream rfReportFile; + rfReportFile.open(m_output_path + "/rf_report.txt", std::ios_base::app); //append file + rfReportFile << "Train error (train part): " << calcErrorTrain << "\n"; + rfReportFile << "Train error (test part): " << calcErrorTest << "\n\n"; + } + } + + return calcErrorTest; + } +} + +void RFSuite::Manager::SetTrainDataFromMats(cv::Mat &trainingMat, cv::Mat &labelsMat) +{ + if (m_verbose) { + std::cout << "RF Manager:\t Number of samples: " << trainingMat.rows << "\n"; + } + if (m_save_all) { + std::ofstream rfReportFile; + rfReportFile.open(m_output_path + "/rf_report.txt", std::ios_base::app); //append file + rfReportFile << "Number of samples: " << trainingMat.rows << "\n"; + } + + m_traindata = cv::ml::TrainData::create(trainingMat, cv::ml::ROW_SAMPLE, labelsMat); + m_traindata->setTrainTestSplitRatio(m_training_sample_percentage / 100, true); +} + +void RFSuite::Manager::SetPriorsMat(cv::Mat &priorsMat) +{ + m_priors_mat = priorsMat; +} + +void RFSuite::Manager::SetOutputPath(std::string path) { + m_output_path = path; +} +void RFSuite::Manager::SetSaveAll(bool saveAll) { + m_save_all = saveAll; +} +void RFSuite::Manager::SetVerbose(bool verbose) { + m_verbose = verbose; +} + +void RFSuite::Manager::SetParametersFromConfig(std::string filePath) { + if (filePath != "") { + ConfigParserRF::Parse(filePath, m_training_sample_percentage, m_max_depth, m_min_sample_count_percentage, + m_max_categories, m_active_var_count, m_number_of_trees, m_priors_mat); + + if (m_verbose) { + ConfigParserRF::PrintParseResult(m_training_sample_percentage, m_max_depth, m_min_sample_count_percentage, + m_max_categories, m_active_var_count, m_number_of_trees, m_priors_mat); + } + if (m_save_all) { + ConfigParserRF::PrintParseResultToFile(m_output_path + "/rf_report.txt", + m_training_sample_percentage, m_max_depth, m_min_sample_count_percentage, + m_max_categories, m_active_var_count, m_number_of_trees, m_priors_mat); + } + + if (m_traindata != nullptr) { + m_traindata->setTrainTestSplitRatio(m_training_sample_percentage / 100, true); + } + } +} + +void RFSuite::Manager::SetTrainingSamplePercentage(double trainingSamplePercentage) { + m_training_sample_percentage = trainingSamplePercentage; + + if (m_traindata != nullptr) { + m_traindata->setTrainTestSplitRatio(m_training_sample_percentage / 100, true); + } +} + +void RFSuite::Manager::SetMaxDepth(int maxDepth) { + m_max_depth = maxDepth; +} +void RFSuite::Manager::SetMinSampleCountPercentage(double minSampleCountPercentage) { + if (minSampleCountPercentage >= 0 && minSampleCountPercentage <= 100) { + m_min_sample_count_percentage = minSampleCountPercentage; + } + else { + std::cerr << "Min sample percentage should be in [0,100]\n"; + } +} +void RFSuite::Manager::SetMaxCategories(int maxCategories) { + m_max_categories = maxCategories; +} +void RFSuite::Manager::SetActiveVarCount(int activeVarCount) { + m_active_var_count = activeVarCount; +} +void RFSuite::Manager::SetNumberOfTrees(int numberOfTrees) { + m_number_of_trees = numberOfTrees; +} \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/RandomForestSuite.cpp b/Modules/CaPTkInteractiveSegmentation/src/RandomForestSuite.cpp new file mode 100644 index 0000000..cae41d4 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/RandomForestSuite.cpp @@ -0,0 +1 @@ +#include "RandomForestSuite.h" \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/SvmSuite.cpp b/Modules/CaPTkInteractiveSegmentation/src/SvmSuite.cpp new file mode 100644 index 0000000..932e05f --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/SvmSuite.cpp @@ -0,0 +1 @@ +#include "SvmSuite.h" \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/SvmSuiteDescription.cpp b/Modules/CaPTkInteractiveSegmentation/src/SvmSuiteDescription.cpp new file mode 100644 index 0000000..ada8916 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/SvmSuiteDescription.cpp @@ -0,0 +1,407 @@ +#include "SvmSuiteDescription.h" + +#include "ConvertionsOpenCV.h" + +SvmSuite::SvmDescription::SvmDescription() +{ + // Initialize the ParamGrid for each parameter + parametersRanges.push_back(cv::ml::ParamGrid(1, 1, 1)); //C + parametersRanges.push_back(cv::ml::ParamGrid(1, 1, 1)); //GAMMA + parametersRanges.push_back(cv::ml::ParamGrid(0, 0, 1)); //P + parametersRanges.push_back(cv::ml::ParamGrid(0, 0, 1)); //NU + parametersRanges.push_back(cv::ml::ParamGrid(0, 0, 1)); //COEF + parametersRanges.push_back(cv::ml::ParamGrid(0, 0, 1)); //DEGREE + + // Initialize the flag vector for setting specific value to a parameter + isParameterSetToSpecificValueVector.resize(6, true); +} + +void SvmSuite::SvmDescription::SetParameterRangeAuto(cv::ml::SVM::ParamTypes param) +{ + isParameterSetToSpecificValueVector[param] = false; + parametersRanges[param] = cv::ml::SVM::getDefaultGrid(param); +} + +void SvmSuite::SvmDescription::SetParameterRangeAuto(std::string param) +{ + SvmDescription::SetParameterRangeAuto(SvmSuiteConvertions::ParamTypeFromString(param)); +} + +void SvmSuite::SvmDescription::SetParameterRange(cv::ml::SVM::ParamTypes param, double minVal, double maxVal, double logStep) +{ + isParameterSetToSpecificValueVector[param] = false; + parametersRanges[param].minVal = minVal; + parametersRanges[param].maxVal = maxVal; + parametersRanges[param].logStep = logStep; +} + +void SvmSuite::SvmDescription::SetParameterRange(std::string param, double minVal, double maxVal, double logStep) +{ + SvmDescription::SetParameterRange(SvmSuiteConvertions::ParamTypeFromString(param), minVal, maxVal, logStep); +} + +void SvmSuite::SvmDescription::SetTermCriteria(int criteriaType, int maxCount, double epsilon) +{ + termCriteria = cv::TermCriteria(criteriaType, maxCount, epsilon); +} + +void SvmSuite::SvmDescription::SetTermCriteria(std::string criteriaType, int maxCount, double epsilon) +{ + termCriteria = cv::TermCriteria(SvmSuiteConvertions::TermCriteriaTypeFromString(criteriaType), maxCount, epsilon); +} + +void SvmSuite::SvmDescription::SetTermCriteria(std::string criteriaType, std::string maxCount, std::string epsilon) +{ + termCriteria = cv::TermCriteria(SvmSuiteConvertions::TermCriteriaTypeFromString(criteriaType), std::stoi(maxCount), std::stod(epsilon)); +} + +cv::ml::SVM::KernelTypes SvmSuite::SvmDescription::GetKernelType() +{ + return kernelType; +} + +std::string SvmSuite::SvmDescription::GetKernelTypeAsString() { + return SvmSuiteConvertions::StringFromKernelType(this->kernelType); +} + +cv::ml::SVM::Types SvmSuite::SvmDescription::GetType() +{ + return type; +} + +std::string SvmSuite::SvmDescription::GetTypeAsString() { + return SvmSuiteConvertions::StringFromType(this->type); +} + +int SvmSuite::SvmDescription::GetKfold() +{ + return kfold; +} + +int SvmSuite::SvmDescription::GetNeighborhoodRadius() +{ + return neighborhoodRadius; +} + +bool SvmSuite::SvmDescription::GetConsiderWeights() +{ + return considerWeights; +} + +double SvmSuite::SvmDescription::GetImportance() +{ + return importance; +} + +std::string SvmSuite::SvmDescription::GetModelPath() +{ + return this->modelPath; +} + +cv::Ptr SvmSuite::SvmDescription::GetModel() +{ + return this->model; +} + +double SvmSuite::SvmDescription::GetC() +{ + return c; +} + +double SvmSuite::SvmDescription::GetGamma() +{ + return gamma; +} + +double SvmSuite::SvmDescription::GetP() +{ + return p; +} + +double SvmSuite::SvmDescription::GetNu() +{ + return nu; +} + +double SvmSuite::SvmDescription::GetCoef() +{ + return coef; +} + +double SvmSuite::SvmDescription::GetDegree() +{ + return degree; +} + +double SvmSuite::SvmDescription::GetParameter(cv::ml::SVM::ParamTypes param) +{ + switch (param) + { + case cv::ml::SVM::C: + return c; + break; + case cv::ml::SVM::GAMMA: + return gamma; + break; + case cv::ml::SVM::P: + return p; + break; + case cv::ml::SVM::NU: + return nu; + break; + case cv::ml::SVM::COEF: + return coef; + break; + case cv::ml::SVM::DEGREE: + return degree; + break; + default: + //invalid + return c; + break; + } +} + +double SvmSuite::SvmDescription::GetParameter(std::string param) +{ + return SvmDescription::GetParameter(SvmSuiteConvertions::ParamTypeFromString(param)); +} + +bool SvmSuite::SvmDescription::isParameterSetToSpecificValue(cv::ml::SVM::ParamTypes param) +{ + return isParameterSetToSpecificValueVector[param]; +} + +bool SvmSuite::SvmDescription::isParameterSetToSpecificValue(std::string param) +{ + return isParameterSetToSpecificValueVector[SvmSuiteConvertions::ParamTypeFromString(param)]; +} + +cv::ml::ParamGrid SvmSuite::SvmDescription::GetParamGridForParameter(cv::ml::SVM::ParamTypes param) +{ + return parametersRanges[param]; +} + +cv::ml::ParamGrid SvmSuite::SvmDescription::GetParamGridForParameter(std::string param) +{ + return SvmDescription::GetParamGridForParameter(SvmSuiteConvertions::ParamTypeFromString(param)); +} + +cv::TermCriteria SvmSuite::SvmDescription::GetTermCriteria() +{ + return termCriteria; +} + +std::string SvmSuite::SvmDescription::GetTermCriteriaTypeAsString() { + return SvmSuiteConvertions::StringFromTermCriteriaType(this->termCriteria.type); +} + +void SvmSuite::SvmDescription::SetKernelType(cv::ml::SVM::KernelTypes kernelType) +{ + this->kernelType = kernelType; +} + +void SvmSuite::SvmDescription::SetKernelType(std::string kernelType) { + this->kernelType = SvmSuiteConvertions::KernelTypeFromString(kernelType); +} + +void SvmSuite::SvmDescription::SetType(cv::ml::SVM::Types type) +{ + this->type = type; +} + +void SvmSuite::SvmDescription::SetType(std::string type) +{ + this->type = SvmSuiteConvertions::TypeFromString(type); +} + +void SvmSuite::SvmDescription::SetKfold(int kfold) +{ + this->kfold = kfold; +} + +void SvmSuite::SvmDescription::SetKfold(std::string kfold) +{ + this->kfold = std::stoi(kfold); +} + +void SvmSuite::SvmDescription::SetNeighborhoodRadius(int neighborhoodRadius) +{ + this->neighborhoodRadius = neighborhoodRadius; +} + +void SvmSuite::SvmDescription::SetNeighborhoodRadius(std::string neighborhoodRadius) +{ + this->neighborhoodRadius = std::stoi(neighborhoodRadius); +} + +void SvmSuite::SvmDescription::SetConsiderWeights(bool considerWeights) +{ + this->considerWeights = considerWeights; +} + +void SvmSuite::SvmDescription::SetConsiderWeights(std::string considerWeights) +{ + this->considerWeights = (considerWeights == "true") ? true : false; +} + +void SvmSuite::SvmDescription::SetImportance(double importance) +{ + this->importance = importance; +} + +void SvmSuite::SvmDescription::SetImportance(std::string importance) +{ + this->importance = std::stod(importance); +} + +void SvmSuite::SvmDescription::SetModelPath(std::string modelPath) +{ + this->modelPath = modelPath; +} + +void SvmSuite::SvmDescription::SetModel(cv::Ptr model) +{ + this->model = model; +} + +void SvmSuite::SvmDescription::SetParameter(cv::ml::SVM::ParamTypes param, double val) +{ + this->isParameterSetToSpecificValueVector[param] = true; + + // Set the ParamGrid values for trainAuto (don't use SetParameterRanges(), it changes isParameterSetToSpecificValueVector) + this->parametersRanges[param].minVal = val; + this->parametersRanges[param].maxVal = val + 1; + this->parametersRanges[param].logStep = 2; + + switch (param) { + case cv::ml::SVM::C: + c = val; + break; + case cv::ml::SVM::GAMMA: + gamma = val; + break; + case cv::ml::SVM::P: + p = val; + break; + case cv::ml::SVM::NU: + nu = val; + break; + case cv::ml::SVM::COEF: + coef = val; + break; + case cv::ml::SVM::DEGREE: + degree = val; + break; + default: + //invalid + break; + } +} + +void SvmSuite::SvmDescription::SetParameter(std::string param, double val) +{ + this->SetParameter(SvmSuiteConvertions::ParamTypeFromString(param), val); +} + +void SvmSuite::SvmDescription::SetParameter(std::string param, std::string val) +{ + this->SetParameter(SvmSuiteConvertions::ParamTypeFromString(param), std::stod(val)); +} + +void SvmSuite::SvmDescription::SetC(double c) +{ + this->SetParameter(cv::ml::SVM::C, c); +} + +void SvmSuite::SvmDescription::SetC(std::string c) +{ + this->SetParameter(cv::ml::SVM::C, std::stod(c)); +} + +void SvmSuite::SvmDescription::SetGamma(double gamma) +{ + this->SetParameter(cv::ml::SVM::GAMMA, gamma); +} + +void SvmSuite::SvmDescription::SetGamma(std::string gamma) +{ + this->SetParameter(cv::ml::SVM::GAMMA, std::stod(gamma)); +} + +void SvmSuite::SvmDescription::SetP(double p) +{ + this->SetParameter(cv::ml::SVM::P, p); +} + +void SvmSuite::SvmDescription::SetP(std::string p) +{ + this->SetParameter(cv::ml::SVM::P, std::stod(p)); +} + +void SvmSuite::SvmDescription::SetNu(double nu) +{ + this->SetParameter(cv::ml::SVM::NU, nu); +} + +void SvmSuite::SvmDescription::SetNu(std::string nu) +{ + this->SetParameter(cv::ml::SVM::NU, std::stod(nu)); +} + +void SvmSuite::SvmDescription::SetCoef(double coef) +{ + this->SetParameter(cv::ml::SVM::COEF, coef); +} + +void SvmSuite::SvmDescription::SetCoef(std::string coef) +{ + this->SetParameter(cv::ml::SVM::COEF, std::stod(coef)); +} + +void SvmSuite::SvmDescription::SetDegree(double degree) +{ + this->SetParameter(cv::ml::SVM::DEGREE, degree); +} + +void SvmSuite::SvmDescription::SetDegree(std::string degree) +{ + this->SetParameter(cv::ml::SVM::DEGREE, std::stod(degree)); +} + +std::vector SvmSuite::SvmDescription::GetDefaultSvmDescriptions() +{ + SvmDescription rbfDesc; + rbfDesc.SetKernelType(cv::ml::SVM::KernelTypes::RBF); + // rbfDesc.SetParameterRangeAuto(cv::ml::SVM::C); + rbfDesc.SetParameterRange(cv::ml::SVM::C, 1.0, 400, 2.5); + rbfDesc.SetParameterRangeAuto(cv::ml::SVM::GAMMA); + rbfDesc.SetConsiderWeights(true); + rbfDesc.SetImportance(0.4); + + SvmDescription chi2Desc; + chi2Desc.SetKernelType(cv::ml::SVM::KernelTypes::CHI2); + chi2Desc.SetParameterRange(cv::ml::SVM::C, 1.0, 400, 2.5); + // chi2Desc.SetParameterRangeAuto(cv::ml::SVM::C); + chi2Desc.SetParameterRangeAuto(cv::ml::SVM::GAMMA); + chi2Desc.SetConsiderWeights(true); + chi2Desc.SetImportance(0.3); + + SvmDescription interDesc; + interDesc.SetKernelType(cv::ml::SVM::KernelTypes::INTER); + interDesc.SetParameterRangeAuto(cv::ml::SVM::C); + interDesc.SetConsiderWeights(true); + interDesc.SetImportance(0.3); + + std::vector defDescs; + defDescs.push_back(rbfDesc); + defDescs.push_back(chi2Desc); + defDescs.push_back(interDesc); + + return defDescs; +} + +void SvmSuite::SvmDescription::errorOccured(std::string msg) +{ + std::cerr << "Manager error: " << msg << std::endl; +} \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/SvmSuiteManager.cpp b/Modules/CaPTkInteractiveSegmentation/src/SvmSuiteManager.cpp new file mode 100644 index 0000000..5b4e90a --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/SvmSuiteManager.cpp @@ -0,0 +1,428 @@ +#include "SvmSuiteManager.h" + +void SvmSuite::Manager::GenerateConfigFromBestValues(std::string outputFileName) { + generateConfig(m_svm_descriptions, outputFileName, m_save_models); +} + +void SvmSuite::Manager::GenerateConfigFromBestValues() { + this->GenerateConfigFromBestValues(m_output_path + "/config.yaml"); +} + +void SvmSuite::Manager::Train() +{ + // Normalize + + if (m_normalize) { + startTimer(); + cv::Mat trainSamples = m_traindata->getTrainSamples(); + NormalizeInput(trainSamples); + stopTimerAndReport("Normalizing SVM input (Training)"); + } + + // Subsampling + + if (m_subsample && m_traindata->getTrainSamples().rows > m_max_samples) { + m_traindata->setTrainTestSplit(m_max_samples); + message("Number of samples after subsampling: " + std::to_string(m_traindata->getTrainSamples().rows) + "\n"); + } + + // Training + + int counterForFileName = 1; + + for (SvmDescription& svm_desc : m_svm_descriptions) + { + if (svm_desc.GetModel() != nullptr) { + // A pretrained model is used + continue; + } + else if (svm_desc.GetModelPath() != "") { + // A pretrained model is used + svm_desc.SetModel(cv::ml::SVM::load(svm_desc.GetModelPath())); + } + else { + startTimer(); + + // Train + auto svm = cv::ml::SVM::create(); + svm->setKernel(svm_desc.GetKernelType()); + svm->setType(svm_desc.GetType()); + svm->setTermCriteria(svm_desc.GetTermCriteria()); + if (svm_desc.GetConsiderWeights()) { + svm->setClassWeights(m_weights_mat); + } + + bool res = false; + try + { + message("Training " + svm_desc.GetKernelTypeAsString() + std::string(" kernel...")); + + res = svm->trainAuto( + m_traindata, + svm_desc.GetKfold(), + svm_desc.GetParamGridForParameter(cv::ml::SVM::ParamTypes::C), + svm_desc.GetParamGridForParameter(cv::ml::SVM::ParamTypes::GAMMA), + svm_desc.GetParamGridForParameter(cv::ml::SVM::ParamTypes::P), + svm_desc.GetParamGridForParameter(cv::ml::SVM::ParamTypes::NU), + svm_desc.GetParamGridForParameter(cv::ml::SVM::ParamTypes::COEF), + svm_desc.GetParamGridForParameter(cv::ml::SVM::ParamTypes::DEGREE), + false + ); + message("", false, true); + } + catch (cv::Exception ex) { + errorOccured(ex.what()); + } + + stopTimerAndReport("Training of " + svm_desc.GetKernelTypeAsString() + + std::string(" (n=") + std::to_string(m_traindata->getResponses().rows) + std::string(")") + ); + + // Set and Save results of training + if (res) { + // Save svm model + if (m_save_models) { + // Create directory for this neighborhood radius if it doesn't exist + //auto extraDir = m_output_path + "/NRadius" + std::to_string(svm_desc.GetNeighborhoodRadius()); + + //auto extraDir = m_output_path + "/models"; + auto extraDir = m_output_path; + + // Save model + std::string savePath = extraDir + "/model" + std::to_string(counterForFileName) + "_" + + svm_desc.GetKernelTypeAsString() + ".xml"; + + svm->save(savePath); + svm_desc.SetModelPath(savePath); + } + + // Set svm to SvmDescription + svm_desc.SetModel(svm); + + // Set svm parameters to SvmDescription + svm_desc.SetC(svm->getC()); + svm_desc.SetGamma(svm->getGamma()); + svm_desc.SetP(svm->getP()); + svm_desc.SetNu(svm->getNu()); + svm_desc.SetCoef(svm->getCoef0()); + svm_desc.SetDegree(svm->getDegree()); + } + counterForFileName++; + } + } +} + +std::shared_ptr SvmSuite::Manager::Test(cv::Mat &testingMat, bool pseudoProbMapResult) { + cv::Mat empty; // unnecessary but can't provide argument cv::Mat() in linux + return Test(testingMat, empty, pseudoProbMapResult, false); +} + +std::shared_ptr SvmSuite::Manager::Test(cv::Mat &testingMat, cv::Mat &skipZerosMat, bool pseudoProbMapResult, bool skipZeros) +{ + message("Testing..."); + + + // Normalize + + if (m_normalize) { + startTimer(); + NormalizeInput(testingMat); + stopTimerAndReport("Normalizing SVM input (Testing)"); + } + + startTimer(); + + // Save importance values for each svm used and the sum + double importanceSum = 0; + std::vector< double > importanceValues; + + for (SvmDescription& svm_desc : m_svm_descriptions) + { + double importance = svm_desc.GetImportance(); + importanceSum += importance; + importanceValues.push_back(importance); + } + + // Initialize output + std::shared_ptr res(new Result()); + + // pseudoProbMapResult==true -> Pseudoprobability maps for 2-class, false -> n-class classification + + if (pseudoProbMapResult) { + res->posMat = cv::Mat::zeros(testingMat.rows, 1, CV_32F); + res->negMat = cv::Mat::zeros(testingMat.rows, 1, CV_32F); + + bool posLabelSet = false, negLabelSet = false; + + cv::Mat predicted(1, 1, CV_32F); + std::vector outputPseudo(m_svm_descriptions.size()); + + PseudoProbType decisionAccu, decision, pos, neg; + float dist; + + // Iterate through the images and make predictions for each pixel/voxel + int isvm; + + for (int iTest = 0; iTest < testingMat.rows; iTest++) + { + if (skipZeros && (skipZerosMat.row(iTest).at(0, 0) == 0)) { + continue; + } + + isvm = 0; + // For each svm + for (SvmDescription& svm_desc : m_svm_descriptions) + { + // Predict + if (svm_desc.GetModel() == nullptr) { + errorOccured("Trying to make predictions on untrained SVM"); + return res; + } + else { + svm_desc.GetModel()->predict(testingMat.row(iTest), predicted, pseudoProbMapResult); + + dist = predicted.at(0, 0); + if ((!posLabelSet) && (dist > 0)) { + // The label hasn't been set for pos image + svm_desc.GetModel()->predict(testingMat.row(iTest), predicted, false); + res->posLabel = std::lround(predicted.at(0, 0)); + posLabelSet = true; + } + else if ((!negLabelSet) && (dist < 0)) { + // The label hasn't been set for neg image + svm_desc.GetModel()->predict(testingMat.row(iTest), predicted, false); + res->negLabel = std::lround(predicted.at(0, 0)); + negLabelSet = true; + } + + // The value is distance to the hyperplane (signed positive for one label, negative for the other) + outputPseudo[isvm++] = dist; + } + + decisionAccu = 0; + decision = 0; + + for (size_t i = 0; i < outputPseudo.size(); i++) { + decisionAccu += static_cast(outputPseudo[i] * importanceValues[i]); + } + + if (importanceSum != 0) { + // Average of the each different svm's decision + decision = static_cast(decisionAccu / importanceSum); + } + + // By using the sigmoid function: f(x) = 1 / (1+e^(-x)) the values are normalized to be doubles between [0,1] + pos = static_cast(1.0 / (1.0 + std::exp(-decision))); // Accounts for the distances for one label (the one with pos distances) + neg = static_cast(1.0 / (1.0 + std::exp(decision))); // Accounts for the distances for the other label (the one with neg distances) + + // Set values to output + res->posMat.ptr(iTest)[0] = pos; + res->negMat.ptr(iTest)[0] = neg; + } + } + } + else { + //Create output image with the same dimensions as the input images for this subject + res->labelsMat = cv::Mat::zeros(testingMat.rows, 1, CV_32S); + + size_t totalThreadsNumber = (m_number_of_threads > testingMat.rows) ? 1 : m_number_of_threads; + int counterForThreadsVec = 0; + std::vector threads(totalThreadsNumber); + + for (size_t iStart = 0; iStart < totalThreadsNumber; iStart++) + { + threads[counterForThreadsVec++] = std::thread(&SvmSuite::Manager::testingLabelsThreadJob, this, + std::ref(testingMat), static_cast(iStart), static_cast(totalThreadsNumber), + std::ref(res->labelsMat), skipZeros, std::ref(skipZerosMat), std::ref(importanceValues)); + } + + for (size_t i = 0; i < totalThreadsNumber; i++) { + threads[i].join(); + } + } + + stopTimerAndReport("SVM predictions"); + + message("", false, true); + + return res; +} + +void SvmSuite::Manager::AddSvmDescriptionToList(SvmDescription svmDesc) { + m_svm_descriptions.push_back(svmDesc); +} + +void SvmSuite::Manager::AddSvmDescriptions(std::vector< SvmDescription > svmDescs) { + for (size_t i = 0; i < svmDescs.size(); i++) { + m_svm_descriptions.push_back(svmDescs[i]); + } +} + +void SvmSuite::Manager::AddPretrainedModel(std::string pretrainedModelPath, int neighborhoodRadius) { + SvmDescription svm_desc; + svm_desc.SetModelPath(pretrainedModelPath); + svm_desc.SetNeighborhoodRadius(neighborhoodRadius); + m_svm_descriptions.push_back(svm_desc); +} + +void SvmSuite::Manager::AddSvmsFromConfig(std::string configPath) { + this->AddSvmDescriptions(getSvmDescriptionsFromConfig(configPath)); +} + +void SvmSuite::Manager::SetTrainData(cv::Mat &trainingMat, cv::Mat &labelsMat, cv::Mat &weightsMat/*, cv::Mat sampleIdx*/) +{ + message("Number of samples: " + std::to_string(trainingMat.rows) + std::string("\n")); + message("Number of features: " + std::to_string(trainingMat.cols) + std::string("\n")); + + m_traindata = cv::ml::TrainData::create(trainingMat, cv::ml::ROW_SAMPLE, labelsMat/*, cv::Mat(), sampleIdx*/); + + m_weights_mat = weightsMat; + + for (int i = 0; i < m_traindata->getClassLabels().rows; i++) { + m_different_labels.insert(m_traindata->getClassLabels().at(i, 0)); + } +} + +void SvmSuite::Manager::SetVerbose(bool verbose) { + m_verbose = verbose; +} + +void SvmSuite::Manager::SetOutputPath(std::string path) { + m_output_path = path; +} + +void SvmSuite::Manager::SetSavingModelsEnabled(bool modelsEnabled) { + m_save_models = modelsEnabled; +} + +void SvmSuite::Manager::SetTimerEnabled(bool timerEnabled) { + m_timer_enabled = timerEnabled; +} + +void SvmSuite::Manager::SetNumberOfThreads(int numberOfThreads) { + m_number_of_threads = numberOfThreads; +} + +void SvmSuite::Manager::SetSubsampling(bool subsample, int maxSamples) { + m_subsample = subsample; + m_max_samples = maxSamples; +} + +void SvmSuite::Manager::SetInputNormalization(bool normalize) { + m_normalize = normalize; +} + +void SvmSuite::Manager::testingLabelsThreadJob(cv::Mat& testingMat, int iStart, int interval, cv::Mat& resultLabelsMat, + bool skipZeros, cv::Mat& skipZerosMat, std::vector< double >& importanceValues) +{ + std::map< LabelsType, double > decisionImportanceValues; + + cv::Mat predicted(1, 1, CV_32F); + std::vector outputLabels(m_svm_descriptions.size()); + int isvm; + + // Iterate through the images and make predictions for each pixel/voxel + for (int iTest = iStart; iTest < testingMat.rows; iTest += interval) + { + if (skipZeros && (skipZerosMat.row(iTest).at(0, 0) == 0)) { + continue; + } + + isvm = 0; + + // For each svm + for (SvmDescription& svm_desc : m_svm_descriptions) + { + // Predict + if (svm_desc.GetModel() == nullptr) { + errorOccured("Trying to make predictions on untrained SVM"); + //return res; + } + else { + svm_desc.GetModel()->predict(testingMat.row(iTest), predicted, false); + + // The value is the predicted label + outputLabels[isvm++] = static_cast(std::lround(predicted.at(0, 0))); + //outputLabels[isvm++] = std::round(predicted.at(0, 0)); + } + } + + // The values are the predicted labels + // In case of different svms predicting a different label + // then the prediction with the most accumulated importance will be used + // In case of a draw the decision from the first in order svm is used (draws should be avoided in the configuration) + for (LabelsType label : m_different_labels) { + // This is done for time optimization (not creating a new object each time) + decisionImportanceValues[label] = 0.0; + } + + // For each different label in the predictions, find the sum of the importance values of the svms that predicted it + for (size_t i = 0; i < outputLabels.size(); i++) { + //if (decisionImportanceValues.find(outputLabels[i]) == decisionImportanceValues.end()) + //{ + // decisionImportanceValues[outputLabels[i]] = importanceValues[i]; + //} + //else { + decisionImportanceValues[outputLabels[i]] += importanceValues[i]; + //} + } + + // Find the label with the most importance value + //std::vector< int > keys = getMapKeyset(decisionImportanceValues); + int decision = 0; + double bestDecisionImportance = 0.0; + + for (size_t decisionCandidate : m_different_labels) + { + if (decisionImportanceValues[static_cast(decisionCandidate)] > bestDecisionImportance) + { + decision = static_cast(decisionCandidate); + bestDecisionImportance = static_cast(decisionImportanceValues[static_cast(decisionCandidate)]); + } + } + + // Set value to output + resultLabelsMat.ptr(iTest)[0] = static_cast(decision); + } +} + +void SvmSuite::Manager::startTimer() { + if (m_timer_enabled) { + m_timer.Reset(); + } +} + +void SvmSuite::Manager::stopTimerAndReport(std::string desc) { + if (m_timer_enabled) { + float diff = m_timer.Diff(); + + std::ofstream timerFile; + timerFile.open(m_output_path + "/time_report.txt", std::ios_base::app); //append file + timerFile << desc << ": " << diff << "s\n"; + } +} + +void SvmSuite::Manager::message(std::string message, bool overdraw, bool finished, int progress) +{ + if (m_verbose) { + if (overdraw) { + std::cout << "\r"; + } + + if (message != "") { + std::cout << "SVM Manager:\t" << message; + + if (progress != -1) { + std::cout << " [" << progress << "%]"; + } + } + + if (finished) { + std::cout << "finished\n"; + } + } +} + +void SvmSuite::Manager::errorOccured(std::string msg) { + std::cerr << "SVM Manager error: " << msg << std::endl; +} diff --git a/Modules/CaPTkInteractiveSegmentation/src/SvmSuiteOperations.cpp b/Modules/CaPTkInteractiveSegmentation/src/SvmSuiteOperations.cpp new file mode 100644 index 0000000..8f4a64e --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/SvmSuiteOperations.cpp @@ -0,0 +1,266 @@ +#include "SvmSuiteOperations.h" + +#include "ConvertionsYAML.h" + +void SvmSuite::NormalizeInput(cv::Mat& mat) +{ + //cv::Mat meanValue = cv::Mat::zeros(1,1,CV_32F), stdValue = cv::Mat::zeros(1, 1, CV_32F); + cv::Scalar meanValue, stdValue; + double mean, std; + + for (int i_col = 0; i_col < mat.cols; i_col++) + { + cv::meanStdDev(mat.col(i_col), meanValue, stdValue); + + //mean = meanValue.at(0,0); + //std = (stdValue.at(0, 0) == 0) ? 0.001 : stdValue.at(0, 0); + mean = meanValue[0]; + std = (stdValue[0] == 0) ? 0.001 : stdValue[0]; + + mat.col(i_col).convertTo(mat.col(i_col), CV_32F, 1, -1 * mean); + mat.col(i_col).convertTo(mat.col(i_col), CV_32F, 1 / (2 * std), 0); + } +} + +std::vector< SvmSuite::SvmDescription > SvmSuite::getSvmDescriptionsFromConfig(std::string configPath) +{ + std::vector< SvmSuite::SvmDescription > svm_descriptions; + YAML::Node root = YAML::LoadFile(configPath); + + // Check if the root node of the document is named as stated in YAML_ROOT_NODE + if (root[YAML_ROOT_NODE]) + { + // Iterate through the different SVMs in the config + for (YAML::const_iterator it_svms = root[YAML_ROOT_NODE].begin(); it_svms != root[YAML_ROOT_NODE].end(); ++it_svms) + { + SvmSuite::SvmDescription svm; + + // Iterate through the different options for each SVM in the config + for (YAML::const_iterator it_single_svm = it_svms->begin(); it_single_svm != it_svms->end(); ++it_single_svm) + { + std::string configOption = it_single_svm->first.as(); + + if (configOption == YAML_KERNEL_TYPE) { + std::string val = it_single_svm->second.as(); + svm.SetKernelType(val); + } + else if (configOption == YAML_TYPE) { + std::string val = it_single_svm->second.as(); + svm.SetType(val); + } + else if (configOption == YAML_KFOLD) { + int val = it_single_svm->second.as(); + svm.SetKfold(val); + } + else if (configOption == YAML_NEIGHBORHOOD_RADIUS) { + int val = it_single_svm->second.as(); + svm.SetNeighborhoodRadius(val); + } + else if (configOption == YAML_IMPORTANCE) { + double val = it_single_svm->second.as(); + svm.SetImportance(val); + } + else if (configOption == YAML_CONSIDER_WEIGHTS) { + bool val = it_single_svm->second.as(); + svm.SetConsiderWeights(val); + } + else if (configOption == YAML_MODEL_PATH) { + std::string val = it_single_svm->second.as(); + svm.SetModelPath(val); + } + else if (configOption == YAML_TERM_CRITERIA) { + // Term criteria must have the following variables + std::string criteria_type = ""; + std::string max_count = ""; + std::string epsilon = ""; + + for (YAML::const_iterator it_term = it_single_svm->second.begin(); it_term != it_single_svm->second.end(); ++it_term) + { + std::string term_param_name = it_term->first.as(); + std::string term_param_val = it_term->second.as(); + + if (term_param_name == YAML_TERM_CRITERIA_TYPE) { + criteria_type = term_param_val; + } + else if (term_param_name == YAML_TERM_CRITERIA_MAX) { + max_count = term_param_val; + } + else if (term_param_name == YAML_TERM_CRITERIA_EPS) { + epsilon = term_param_val; + } + else { + //invalid + } + } + + svm.SetTermCriteria(criteria_type, max_count, epsilon); + } + else if ((configOption == YAML_C) || (configOption == YAML_GAMMA) || (configOption == YAML_P) || + (configOption == YAML_NU) || (configOption == YAML_COEF) || (configOption == YAML_DEGREE)) + { + if (it_single_svm->second.IsScalar()) + { + std::string val = it_single_svm->second.as(); + if (val == YAML_AUTO) { + // Parameter was set to auto automatically + svm.SetParameterRangeAuto(configOption); + } + else { + // Specific value for the parameter was given + svm.SetParameter(configOption, val); + } + } + else + { + // Parameter range must have the following variables + double minVal = 0.0; + double maxVal = 0.0; + double logStep = 0.0; + + for (YAML::const_iterator it_param = it_single_svm->second.begin(); it_param != it_single_svm->second.end(); ++it_param) + { + std::string range_param_name = it_param->first.as(); + double range_param_val = it_param->second.as(); + + if (range_param_name == YAML_MIN_VAL) { + minVal = range_param_val; + } + else if (range_param_name == YAML_MAX_VAL) { + maxVal = range_param_val; + } + else if (range_param_name == YAML_LOG_STEP) { + logStep = range_param_val; + } + else { + //invalid + } + } + + svm.SetParameterRange(configOption, minVal, maxVal, logStep); + } + } + } + + svm_descriptions.push_back(svm); + } + } + else { + //invalid + } + + return svm_descriptions; +} + +SvmSuite::SvmDescription SvmSuite::convertModelToSvmDescription(cv::Ptr svm_model, int neighborhood_radius, double importance) +{ + SvmSuite::SvmDescription svm_description; + svm_description.SetKernelType(static_cast(svm_model->getKernelType())); + svm_description.SetType(static_cast(svm_model->getType())); + + if (svm_model->getC() != 1.0) { + svm_description.SetC(svm_model->getC()); + } + if (svm_model->getGamma() != 1.0) { + svm_description.SetGamma(svm_model->getGamma()); + } + if (svm_model->getP() != 0.0) { + svm_description.SetP(svm_model->getP()); + } + if (svm_model->getNu() != 0.0) { + svm_description.SetNu(svm_model->getNu()); + } + if (svm_model->getCoef0() != 0.0) { + svm_description.SetCoef(svm_model->getCoef0()); + } + if (svm_model->getDegree() != 0) { + svm_description.SetDegree(svm_model->getDegree()); + } + + if (neighborhood_radius != 0) { + svm_description.SetNeighborhoodRadius(neighborhood_radius); + } + if (importance != 1.0) { + svm_description.SetImportance(importance); + } + + svm_description.SetTermCriteria(svm_model->getTermCriteria().type, svm_model->getTermCriteria().maxCount, svm_model->getTermCriteria().epsilon); + + return svm_description; +} + +SvmSuite::SvmDescription SvmSuite::convertModelToSvmDescription(std::string model_path, int neighborhood_radius, double importance) +{ + SvmSuite::SvmDescription svm_desc = convertModelToSvmDescription(cv::ml::SVM::load(model_path), neighborhood_radius, importance); + svm_desc.SetModelPath(model_path); + return svm_desc; +} + +void SvmSuite::generateConfig(std::vector< SvmSuite::SvmDescription > &svm_descriptions, std::string outputFilePath, bool m_save_models) +{ + YAML::Node node; + + for (SvmSuite::SvmDescription& svm_desc : svm_descriptions) { + YAML::Node toAdd = SvmSuiteConvertions::yamlConvertSvmDescriptionToNode(svm_desc); + node[YAML_ROOT_NODE].push_back(toAdd); + } + + if (m_save_models) { + std::ofstream out(outputFilePath); + out << "---\n" << node << "\n..."; + out.close(); + } +} + +void SvmSuite::generateConfig(SvmSuite::SvmDescription &svm_description, std::string outputFilePath, bool m_save_models) +{ + m_save_models = m_save_models; // So it's not unused (sorry) + + std::vector< SvmSuite::SvmDescription > svm_descriptions; + svm_descriptions.push_back(svm_description); + + generateConfig(svm_descriptions, outputFilePath); +} + +void SvmSuite::generateConfig(cv::Ptr model, int neighborhood_radius, double importance, std::string outputFilePath, bool m_save_models) +{ + m_save_models = m_save_models; // So it's not unused (sorry) + + std::vector< SvmSuite::SvmDescription > svm_descriptions; + svm_descriptions.push_back(convertModelToSvmDescription(model, neighborhood_radius, importance)); + + generateConfig(svm_descriptions, outputFilePath); +} + +void SvmSuite::generateConfig(std::vector< cv::Ptr > multiple_models, std::vector< int > neighborhood_radii, + std::vector< double > importance_values, std::string outputFilePath, bool m_save_models) +{ + m_save_models = m_save_models; // So it's not unused (sorry) + + std::vector< SvmSuite::SvmDescription > svm_descriptions; + + for (size_t i = 0; i < multiple_models.size(); i++) { + SvmSuite::SvmDescription svm_desc = convertModelToSvmDescription(multiple_models[i], neighborhood_radii[i], importance_values[i]); + svm_desc.SetNeighborhoodRadius(neighborhood_radii[i]); + svm_desc.SetImportance(importance_values[i]); + svm_descriptions.push_back(svm_desc); + } + + generateConfig(svm_descriptions, outputFilePath); +} + +void SvmSuite::generateConfig(std::vector< std::string > multiple_models_paths, std::vector< int > neighborhood_radii, + std::vector< double > importance_values, std::string outputFilePath, bool m_save_models) +{ + m_save_models = m_save_models; // So it's not unused (sorry) + + std::vector< SvmSuite::SvmDescription > svm_descriptions; + + for (size_t i = 0; i < multiple_models_paths.size(); i++) { + SvmSuite::SvmDescription svm_desc = convertModelToSvmDescription(multiple_models_paths[i], neighborhood_radii[i], importance_values[i]); + svm_desc.SetNeighborhoodRadius(neighborhood_radii[i]); + svm_desc.SetImportance(importance_values[i]); + svm_descriptions.push_back(svm_desc); + } + + generateConfig(svm_descriptions, outputFilePath); +} diff --git a/Modules/CaPTkInteractiveSegmentation/src/SvmSuiteUtil.cpp b/Modules/CaPTkInteractiveSegmentation/src/SvmSuiteUtil.cpp new file mode 100644 index 0000000..32cb8c8 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/SvmSuiteUtil.cpp @@ -0,0 +1,14 @@ +#include "SvmSuiteUtil.h" + +SvmSuiteUtil::Timer::Timer() { + Reset(); +} + +void SvmSuiteUtil::Timer::Reset() { + m_timestamp = std::chrono::high_resolution_clock::now(); +} + +float SvmSuiteUtil::Timer::Diff() { + std::chrono::duration fs = std::chrono::high_resolution_clock::now() - m_timestamp; + return fs.count(); +} \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/UtilCvMatToImageGTS.cpp b/Modules/CaPTkInteractiveSegmentation/src/UtilCvMatToImageGTS.cpp new file mode 100644 index 0000000..bbcf14b --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/UtilCvMatToImageGTS.cpp @@ -0,0 +1 @@ +#include "UtilCvMatToImageGTS.h" \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/UtilGTS.cpp b/Modules/CaPTkInteractiveSegmentation/src/UtilGTS.cpp new file mode 100644 index 0000000..45307ef --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/UtilGTS.cpp @@ -0,0 +1,117 @@ +#include "UtilGTS.h" + +#include +#include +#include +#include +#include + +// Some of the headers below are not needed +#if (_WIN32) +#define NOMINMAX +#include +#include +#include +#include +#include +#include +#else +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif + +std::string GeodesicTrainingSegmentation::UtilGTS::currentDateTime() +{ + // Get current time + std::chrono::time_point time_now = std::chrono::system_clock::now(); + + // Convert to time_t for + std::time_t time_now_t = std::chrono::system_clock::to_time_t(time_now); + + // Format to datetime + std::tm now_tm = *std::localtime(&time_now_t); + char buf[512]; + std::strftime(buf, 512, "%Y-%m-%d %H.%M.%S", &now_tm); + std::string dateTime(buf); + + return dateTime; +} + +bool GeodesicTrainingSegmentation::UtilGTS::directoryExists(const std::string &dName) +{ + struct stat info; + std::string dName_Wrap = dName; + + if (dName_Wrap[dName_Wrap.length() - 1] == '/') + { + dName_Wrap.erase(dName_Wrap.end() - 1); + } + + if (stat(dName_Wrap.c_str(), &info) != 0) + return false; + else if (info.st_mode & S_IFDIR) // S_ISDIR() doesn't exist on windows + return true; + else + return false; +} + +bool GeodesicTrainingSegmentation::UtilGTS::createDir(const std::string &dName) +{ + //! Pure c++ based directory creation +#if defined(_WIN32) + DWORD ftyp = GetFileAttributesA(dName.c_str()); // check if directory exists or not + if (ftyp == INVALID_FILE_ATTRIBUTES) + _mkdir(dName.c_str()); + return true; +#else + DIR *pDir; + pDir = opendir(dName.c_str()); // check if directory exists or not + if (pDir == NULL) + mkdir(dName.c_str(), 0777); + return true; +#endif + return false; +} + +/**Splits a string into a list using a delimiter*/ +std::vector +GeodesicTrainingSegmentation::UtilGTS::split_string(const std::string &s, char delim) +{ + std::stringstream ss(s); + std::string item; + std::vector elems; + while (std::getline(ss, item, delim)) { + elems.push_back(std::move(item)); + } + return elems; +} + +std::string GeodesicTrainingSegmentation::UtilGTS::getFileExtension(std::string filePath) +{ + std::replace( filePath.begin(), filePath.end(), '\\', '/'); // replace all '\\' to '/' + auto fSplitVec = split_string(filePath, '/'); // Split based on '/' + auto fLastComp = fSplitVec[fSplitVec.size()-1]; // The last component (aka filename) + + auto fSplitDotsVec = split_string(fLastComp, '.'); // Split filename based on '.' + + // We basically keep the last component as file extension, except for nii.gz + if (fSplitDotsVec.size() > 2 && fSplitDotsVec[fSplitDotsVec.size()-2] == "nii" && + fSplitDotsVec[fSplitDotsVec.size()-1] == "gz") + { + return ".nii.gz"; // Special case + } + else if (fSplitDotsVec.size() == 1) + { + return ""; + } + else { + return fSplitDotsVec[fSplitDotsVec.size()-1]; // Last component + } +} \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/UtilImageToCvMatGTS.cpp b/Modules/CaPTkInteractiveSegmentation/src/UtilImageToCvMatGTS.cpp new file mode 100644 index 0000000..d0c191d --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/UtilImageToCvMatGTS.cpp @@ -0,0 +1,9 @@ +#include "UtilImageToCvMatGTS.h" + +void GeodesicTrainingSegmentation::ParserGTS::ScaleSomeOfTheColumns(cv::Mat& mat, int colStart, int colEnd, double ratio) +{ + for (int i_col = colStart; i_col <= colEnd; i_col++) + { + mat.col(i_col).convertTo(mat.col(i_col), CV_32F, ratio); + } +} \ No newline at end of file diff --git a/Modules/CaPTkInteractiveSegmentation/src/UtilItkGTS.cpp b/Modules/CaPTkInteractiveSegmentation/src/UtilItkGTS.cpp new file mode 100644 index 0000000..fd58dc1 --- /dev/null +++ b/Modules/CaPTkInteractiveSegmentation/src/UtilItkGTS.cpp @@ -0,0 +1 @@ +#include "UtilItkGTS.h" \ No newline at end of file diff --git a/Modules/ModuleList.cmake b/Modules/ModuleList.cmake new file mode 100644 index 0000000..223422a --- /dev/null +++ b/Modules/ModuleList.cmake @@ -0,0 +1,3 @@ +set(MITK_MODULES + CaPTkInteractiveSegmentation +) diff --git a/Plugins/PluginList.cmake b/Plugins/PluginList.cmake new file mode 100644 index 0000000..d2240d6 --- /dev/null +++ b/Plugins/PluginList.cmake @@ -0,0 +1,6 @@ +### Add upenn.cbica.* to the list of allowed naming schemes +list(APPEND MITK_PLUGIN_REGEX_LIST "^upenn_cbica_[a-zA-Z0-9_]+$") + +set(MITK_PLUGINS + upenn.cbica.captk.interactivesegmentation:ON +) diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/CMakeLists.txt b/Plugins/upenn.cbica.captk.interactivesegmentation/CMakeLists.txt new file mode 100644 index 0000000..0a946da --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/CMakeLists.txt @@ -0,0 +1,10 @@ +project(upenn_cbica_captk_interactivesegmentation) + +include_directories(${CTK_INCLUDE_DIRS}) + +mitk_create_plugin( + EXPORT_DIRECTIVE MITK_CAPTK_INTERACTIVE_SEGMENTATION + EXPORTED_INCLUDE_SUFFIXES src + PACKAGE_DEPENDS ITK OpenCV + MODULE_DEPENDS MitkSegmentation MitkSegmentationUI MitkCaPTkInteractiveSegmentation +) \ No newline at end of file diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/README.md b/Plugins/upenn.cbica.captk.interactivesegmentation/README.md new file mode 100644 index 0000000..905451b --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/README.md @@ -0,0 +1,3 @@ +## upenn.cbica.captk.interactivesegmentation + +UI and invoking for CaPTkInteractiveSegmentation \ No newline at end of file diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/files.cmake b/Plugins/upenn.cbica.captk.interactivesegmentation/files.cmake new file mode 100644 index 0000000..85b765c --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/files.cmake @@ -0,0 +1,48 @@ +set(SRC_CPP_FILES + +) + +set(INTERNAL_CPP_FILES + mitkPluginActivator.cpp + QmitkCaPTkInteractiveSegmentationView.cpp + Common/QmitkDataSelectionWidget.cpp +) + +set(UI_FILES + src/internal/QmitkCaPTkInteractiveSegmentationControls.ui + src/internal/Common/QmitkDataSelectionWidgetControls.ui +) + +set(MOC_H_FILES + src/internal/mitkPluginActivator.h + src/internal/QmitkCaPTkInteractiveSegmentationView.h + src/internal/Common/QmitkDataSelectionWidget.h +) + +set(CACHED_RESOURCE_FILES + resources/NewSegmentationSession_48x48.png + resources/mll_icon2.svg + plugin.xml +) + +set(QRC_FILES + resources/captkinteractivesegmentation.qrc +) + +set(CPP_FILES) + +foreach(file ${SRC_CPP_FILES}) + set(CPP_FILES ${CPP_FILES} src/${file}) +endforeach(file ${SRC_CPP_FILES}) + +#usFunctionEmbedResources( +#CPP_FILES +# LIBRARY_NAME "liborg_mitk_gui_qt_multilabelsegmentation" +#ROOT_DIR resources +#FILES Interactions/SegmentationInteraction.xml +# Interactions/ConfigSegmentation.xml +#) + +foreach(file ${INTERNAL_CPP_FILES}) + set(CPP_FILES ${CPP_FILES} src/internal/${file}) +endforeach(file ${INTERNAL_CPP_FILES}) diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/manifest_headers.cmake b/Plugins/upenn.cbica.captk.interactivesegmentation/manifest_headers.cmake new file mode 100644 index 0000000..3bab34f --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/manifest_headers.cmake @@ -0,0 +1,5 @@ +set(Plugin-Name "CaPTk Interactive Segmentation") +set(Plugin-Version "1.0.0") +set(Plugin-Vendor "CBICA") +set(Plugin-ContactAddress "https://www.med.upenn.edu/cbica/") +set(Require-Plugin org.mitk.gui.qt.common org.mitk.gui.qt.datamanager) diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/plugin.xml b/Plugins/upenn.cbica.captk.interactivesegmentation/plugin.xml new file mode 100644 index 0000000..ab70600 --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/plugin.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/resources/NewSegmentationSession_48x48.png b/Plugins/upenn.cbica.captk.interactivesegmentation/resources/NewSegmentationSession_48x48.png new file mode 100644 index 0000000..8a6b132 Binary files /dev/null and b/Plugins/upenn.cbica.captk.interactivesegmentation/resources/NewSegmentationSession_48x48.png differ diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/resources/captkinteractivesegmentation.qrc b/Plugins/upenn.cbica.captk.interactivesegmentation/resources/captkinteractivesegmentation.qrc new file mode 100644 index 0000000..f832804 --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/resources/captkinteractivesegmentation.qrc @@ -0,0 +1,5 @@ + + + NewSegmentationSession_48x48.png + + diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/resources/mll_icon2.svg b/Plugins/upenn.cbica.captk.interactivesegmentation/resources/mll_icon2.svg new file mode 100644 index 0000000..c57f68e --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/resources/mll_icon2.svg @@ -0,0 +1,79 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + + diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/Common/QmitkDataSelectionWidget.cpp b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/Common/QmitkDataSelectionWidget.cpp new file mode 100644 index 0000000..6bfa407 --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/Common/QmitkDataSelectionWidget.cpp @@ -0,0 +1,207 @@ +/*=================================================================== + +The Medical Imaging Interaction Toolkit (MITK) + +Copyright (c) German Cancer Research Center, +Division of Medical and Biological Informatics. +All rights reserved. + +This software is distributed WITHOUT ANY WARRANTY; without +even the implied warranty of MERCHANTABILITY or FITNESS FOR +A PARTICULAR PURPOSE. + +See LICENSE.txt or http://www.mitk.org for details. + +===================================================================*/ + +#include "QmitkDataSelectionWidget.h" +#include "internal/mitkPluginActivator.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static mitk::NodePredicateBase::Pointer CreatePredicate(QmitkDataSelectionWidget::PredicateType predicateType) +{ + + mitk::NodePredicateAnd::Pointer seedsPredicate = mitk::NodePredicateAnd::New(); + seedsPredicate->AddPredicate(mitk::TNodePredicateDataType::New()); + seedsPredicate->AddPredicate(mitk::NodePredicateNot::New(mitk::NodePredicateProperty::New("helper object"))); + + // CaPTk: To not detect output segmentations as seeds + seedsPredicate->AddPredicate( + mitk::NodePredicateNot::New( + mitk::NodePredicateProperty::New( + "captk.interactive.segmentation.output", + mitk::BoolProperty::New(true) + ) + ) + ); + + mitk::NodePredicateAnd::Pointer maskPredicate = mitk::NodePredicateAnd::New(); + maskPredicate->AddPredicate(mitk::NodePredicateProperty::New("binary", mitk::BoolProperty::New(true))); + maskPredicate->AddPredicate(mitk::NodePredicateNot::New(mitk::NodePredicateProperty::New("helper object", mitk::BoolProperty::New(true)))); + + mitk::NodePredicateDataType::Pointer isDwi = mitk::NodePredicateDataType::New("DiffusionImage"); + mitk::NodePredicateDataType::Pointer isDti = mitk::NodePredicateDataType::New("TensorImage"); + mitk::NodePredicateDataType::Pointer isOdf = mitk::NodePredicateDataType::New("OdfImage"); + mitk::TNodePredicateDataType::Pointer isImage = mitk::TNodePredicateDataType::New(); + + mitk::NodePredicateOr::Pointer validImages = mitk::NodePredicateOr::New(); + validImages->AddPredicate(isImage); + validImages->AddPredicate(isDwi); + validImages->AddPredicate(isDti); + validImages->AddPredicate(isOdf); + + mitk::NodePredicateAnd::Pointer imagePredicate = mitk::NodePredicateAnd::New(); + imagePredicate->AddPredicate(validImages); + imagePredicate->AddPredicate(mitk::NodePredicateNot::New(seedsPredicate)); + imagePredicate->AddPredicate(mitk::NodePredicateNot::New(mitk::NodePredicateProperty::New("binary", mitk::BoolProperty::New(true)))); + imagePredicate->AddPredicate(mitk::NodePredicateNot::New(mitk::NodePredicateProperty::New("helper object", mitk::BoolProperty::New(true)))); + + // CaPTk: To not detect output segmentations as normal images + imagePredicate->AddPredicate( + mitk::NodePredicateNot::New( + mitk::NodePredicateProperty::New( + "captk.interactive.segmentation.output", + mitk::BoolProperty::New(true) + ) + ) + ); + + mitk::NodePredicateAnd::Pointer surfacePredicate = mitk::NodePredicateAnd::New(); + surfacePredicate->AddPredicate(mitk::TNodePredicateDataType::New()); + surfacePredicate->AddPredicate(mitk::NodePredicateNot::New(mitk::NodePredicateProperty::New("helper object", mitk::BoolProperty::New(true)))); + + switch(predicateType) + { + case QmitkDataSelectionWidget::ImagePredicate: + return imagePredicate.GetPointer(); + + case QmitkDataSelectionWidget::MaskPredicate: + return maskPredicate.GetPointer(); + + case QmitkDataSelectionWidget::SegmentationPredicate: + return seedsPredicate.GetPointer(); + + case QmitkDataSelectionWidget::SurfacePredicate: + return surfacePredicate.GetPointer(); + + default: + assert(false && "Unknown predefined predicate!"); + return nullptr; + } +} + +QmitkDataSelectionWidget::QmitkDataSelectionWidget(QWidget* parent) + : QWidget(parent) +{ + m_Controls.setupUi(this); + m_Controls.helpLabel->hide(); +} + +QmitkDataSelectionWidget::~QmitkDataSelectionWidget() +{ +} + +unsigned int QmitkDataSelectionWidget::AddDataStorageComboBox(QmitkDataSelectionWidget::PredicateType predicate) +{ + return this->AddDataStorageComboBox("", predicate); +} + +unsigned int QmitkDataSelectionWidget::AddDataStorageComboBox(mitk::NodePredicateBase* predicate) +{ + return this->AddDataStorageComboBox("", predicate); +} + +unsigned int QmitkDataSelectionWidget::AddDataStorageComboBox(const QString &labelText, QmitkDataSelectionWidget::PredicateType predicate) +{ + return this->AddDataStorageComboBox(labelText, CreatePredicate(predicate)); +} + +unsigned int QmitkDataSelectionWidget::AddDataStorageComboBox(const QString &labelText, mitk::NodePredicateBase* predicate) +{ + int row = m_Controls.gridLayout->rowCount(); + + if (!labelText.isEmpty()) + { + QLabel* label = new QLabel(labelText, m_Controls.dataSelectionWidget); + label->setSizePolicy(QSizePolicy::Maximum, QSizePolicy::Preferred); + m_Controls.gridLayout->addWidget(label, row, 0); + } + + QmitkDataStorageComboBox* comboBox = new QmitkDataStorageComboBox(this->GetDataStorage(), predicate, m_Controls.dataSelectionWidget); + connect(comboBox, SIGNAL(OnSelectionChanged(const mitk::DataNode *)), this, SLOT(OnSelectionChanged(const mitk::DataNode *))); + comboBox->SetAutoSelectNewItems(true); + m_Controls.gridLayout->addWidget(comboBox, row, 1); + + m_DataStorageComboBoxes.push_back(comboBox); + return static_cast(m_DataStorageComboBoxes.size() - 1); +} + +mitk::DataStorage::Pointer QmitkDataSelectionWidget::GetDataStorage() const +{ + ctkServiceReference ref = mitk::PluginActivator::getContext()->getServiceReference(); + assert(ref == true); + + mitk::IDataStorageService* service = mitk::PluginActivator::getContext()->getService(ref); + + assert(service); + + return service->GetDefaultDataStorage()->GetDataStorage(); +} + +mitk::DataNode::Pointer QmitkDataSelectionWidget::GetSelection(unsigned int index) +{ + assert(index < m_DataStorageComboBoxes.size()); + return m_DataStorageComboBoxes[index]->GetSelectedNode(); +} + +void QmitkDataSelectionWidget::SetPredicate(unsigned int index, PredicateType predicate) +{ + this->SetPredicate(index, CreatePredicate(predicate)); +} + +void QmitkDataSelectionWidget::SetPredicate(unsigned int index, mitk::NodePredicateBase* predicate) +{ + assert(index < m_DataStorageComboBoxes.size()); + m_DataStorageComboBoxes[index]->SetPredicate(predicate); +} + +void QmitkDataSelectionWidget::SetHelpText(const QString& text) +{ + if (!text.isEmpty()) + { + m_Controls.helpLabel->setText(text); + + if (!m_Controls.helpLabel->isVisible()) + m_Controls.helpLabel->show(); + } + else + { + m_Controls.helpLabel->hide(); + } +} + +void QmitkDataSelectionWidget::OnSelectionChanged(const mitk::DataNode* selection) +{ + std::vector::iterator it = std::find(m_DataStorageComboBoxes.begin(), m_DataStorageComboBoxes.end(), sender()); + assert(it != m_DataStorageComboBoxes.end()); + + emit SelectionChanged(std::distance(m_DataStorageComboBoxes.begin(), it), selection); +} diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/Common/QmitkDataSelectionWidget.h b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/Common/QmitkDataSelectionWidget.h new file mode 100644 index 0000000..94d7b8d --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/Common/QmitkDataSelectionWidget.h @@ -0,0 +1,70 @@ +/*=================================================================== + +The Medical Imaging Interaction Toolkit (MITK) + +Copyright (c) German Cancer Research Center, +Division of Medical and Biological Informatics. +All rights reserved. + +This software is distributed WITHOUT ANY WARRANTY; without +even the implied warranty of MERCHANTABILITY or FITNESS FOR +A PARTICULAR PURPOSE. + +See LICENSE.txt or http://www.mitk.org for details. + +===================================================================*/ + +#ifndef QmitkDataSelectionWidget_h +#define QmitkDataSelectionWidget_h + +#include +#include +#include +#include + +namespace mitk +{ + class NodePredicateBase; +} + +class QmitkDataStorageComboBox; + +class QmitkDataSelectionWidget : public QWidget +{ + Q_OBJECT + +public: + enum PredicateType + { + ImagePredicate, + MaskPredicate, + SegmentationPredicate, + SurfacePredicate + }; + + explicit QmitkDataSelectionWidget(QWidget* parent = nullptr); + ~QmitkDataSelectionWidget(); + + unsigned int AddDataStorageComboBox(PredicateType predicate); + unsigned int AddDataStorageComboBox(mitk::NodePredicateBase* predicate = nullptr); + unsigned int AddDataStorageComboBox(const QString &labelText, PredicateType predicate); + unsigned int AddDataStorageComboBox(const QString &labelText, mitk::NodePredicateBase* predicate = nullptr); + + mitk::DataStorage::Pointer GetDataStorage() const; + mitk::DataNode::Pointer GetSelection(unsigned int index); + void SetPredicate(unsigned int index, PredicateType predicate); + void SetPredicate(unsigned int index, mitk::NodePredicateBase* predicate); + void SetHelpText(const QString& text); + +signals: + void SelectionChanged(unsigned int index, const mitk::DataNode* selection); + +private slots: + void OnSelectionChanged(const mitk::DataNode* selection); + +private: + Ui::QmitkDataSelectionWidgetControls m_Controls; + std::vector m_DataStorageComboBoxes; +}; + +#endif diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/Common/QmitkDataSelectionWidgetControls.ui b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/Common/QmitkDataSelectionWidgetControls.ui new file mode 100644 index 0000000..1849a49 --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/Common/QmitkDataSelectionWidgetControls.ui @@ -0,0 +1,64 @@ + + + QmitkDataSelectionWidgetControls + + + + 0 + 0 + 333 + 191 + + + + + + + + 0 + + + + + + + + Data Selection + + + + + + + 0 + + + + + + + + + 0 + 0 + + + + color: red + + + + + + true + + + + + + + + + + + diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/QmitkCaPTkInteractiveSegmentationControls.ui b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/QmitkCaPTkInteractiveSegmentationControls.ui new file mode 100644 index 0000000..a02a8d8 --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/QmitkCaPTkInteractiveSegmentationControls.ui @@ -0,0 +1,231 @@ + + + QmitkCaPTkInteractiveSegmentationControls + + + + 0 + 0 + 459 + 857 + + + + + 0 + 0 + + + + + 0 + 0 + + + + + MS Shell Dlg 2 + 8 + 50 + false + false + false + false + + + + QmitkCaPTkInteractiveSegmentation + + + + + + + 0 + 0 + + + + Qt::LeftToRight + + + Instructions + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + + + + + + + 0 + 0 + + + + • Load at least one image. +• All the loaded images should be co-registered. +• Create the seeds image. +• Draw at least 2 seed labels, + using "MultiLabel Segmentation". +• Run. +• To correct mistakes, draw more seeds and rerun. +• To inflate/deflate or fill holes, + use "Segmentation Utilities". +• To visualize, right click the output segmentation + and select 'Create smoothed polygon model'. + To change the surface representation, + right click the objects. + + + true + + + + + + + + + + + 0 + 0 + + + + Seeds selection + + + + + + Reference Image + + + + + + + + 0 + 0 + + + + + + + + Create a new segmentation session + + + ... + + + + :/captkinteractivesegmentation/NewSegmentationSession_48x48.png:/captkinteractivesegmentation/NewSegmentationSession_48x48.png + + + + 28 + 28 + + + + N + + + true + + + + + + + Seeds Image + + + + + + + + 0 + 0 + + + + + + + + + + + Generate Segmentation + + + Qt::AlignHCenter|Qt::AlignTop + + + true + + + + + + 0 + + + Qt::AlignCenter + + + + + + + Run + + + + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + + + groupBox_DataSelection + groupBox_Run + groupBoxInstructions + + + + + QmitkDataStorageComboBox + QComboBox +
QmitkDataStorageComboBox.h
+
+
+ + QmitkToolReferenceDataSelectionBox.h + QmitkToolGUIArea.h + QmitkToolSelectionBox.h + + + + + +
diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/QmitkCaPTkInteractiveSegmentationView.cpp b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/QmitkCaPTkInteractiveSegmentationView.cpp new file mode 100644 index 0000000..4f14e6a --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/QmitkCaPTkInteractiveSegmentationView.cpp @@ -0,0 +1,738 @@ +#include "QmitkCaPTkInteractiveSegmentationView.h" + +// blueberry +#include +#include + +// mitk +#include "mitkApplicationCursor.h" +#include "mitkLabelSetImage.h" +#include "mitkStatusBar.h" +#include "mitkPlanePositionManager.h" +#include "mitkPluginActivator.h" +#include "mitkSegTool2D.h" + +// Qmitk +#include "QmitkNewSegmentationDialog.h" +#include "QmitkRenderWindow.h" + +// us +#include +#include +#include +#include +#include + +// Qt +#include +#include +#include +#include + +// CaPTk +#include "CaPTkInteractiveSegmentation.h" + +#include "tinyxml.h" + +#include + +#include + +const std::string QmitkCaPTkInteractiveSegmentationView::VIEW_ID = + "upenn.cbica.captk.views.interactivesegmentation"; + +QmitkCaPTkInteractiveSegmentationView::QmitkCaPTkInteractiveSegmentationView() + : m_Parent(nullptr), + m_ReferenceNode(nullptr), + m_WorkingNode(nullptr), + m_AutoSelectionEnabled(false), + m_MouseCursorSet(false) +{ + m_CaPTkInteractiveSegmentation = + new CaPTkInteractiveSegmentation(GetDataStorage(), this); + + m_SegmentationPredicate = mitk::NodePredicateAnd::New(); + m_SegmentationPredicate->AddPredicate( + mitk::TNodePredicateDataType::New() + ); + m_SegmentationPredicate->AddPredicate + (mitk::NodePredicateNot::New(mitk::NodePredicateProperty::New("helper object")) + ); + m_SegmentationPredicate->AddPredicate + (mitk::NodePredicateNot::New(mitk::NodePredicateProperty::New("captk.interactive.segmentation.output")) + ); + + mitk::TNodePredicateDataType::Pointer isImage = mitk::TNodePredicateDataType::New(); + mitk::NodePredicateProperty::Pointer isBinary = + mitk::NodePredicateProperty::New("binary", mitk::BoolProperty::New(true)); + mitk::NodePredicateAnd::Pointer isMask = mitk::NodePredicateAnd::New(isBinary, isImage); + + mitk::NodePredicateDataType::Pointer isDwi = mitk::NodePredicateDataType::New("DiffusionImage"); + mitk::NodePredicateDataType::Pointer isDti = mitk::NodePredicateDataType::New("TensorImage"); + mitk::NodePredicateDataType::Pointer isOdf = mitk::NodePredicateDataType::New("OdfImage"); + auto isSegment = mitk::NodePredicateDataType::New("Segment"); + + mitk::NodePredicateOr::Pointer validImages = mitk::NodePredicateOr::New(); + validImages->AddPredicate(mitk::NodePredicateAnd::New(isImage, mitk::NodePredicateNot::New(isSegment))); + validImages->AddPredicate(isDwi); + validImages->AddPredicate(isDti); + validImages->AddPredicate(isOdf); + + m_ReferencePredicate = mitk::NodePredicateAnd::New(); + m_ReferencePredicate->AddPredicate(validImages); + m_ReferencePredicate->AddPredicate(mitk::NodePredicateNot::New(m_SegmentationPredicate)); + m_ReferencePredicate->AddPredicate(mitk::NodePredicateNot::New(isMask)); + m_ReferencePredicate->AddPredicate(mitk::NodePredicateNot::New(mitk::NodePredicateProperty::New("helper object"))); + m_ReferencePredicate->AddPredicate + (mitk::NodePredicateNot::New(mitk::NodePredicateProperty::New("captk.interactive.segmentation.output")) + ); +} + +QmitkCaPTkInteractiveSegmentationView::~QmitkCaPTkInteractiveSegmentationView() +{ + // Loose LabelSetConnections + OnLooseLabelSetConnection(); +} + +void QmitkCaPTkInteractiveSegmentationView::CreateQtPartControl(QWidget *parent) +{ + // setup the basic GUI of this view + m_Parent = parent; + m_Controls.setupUi(parent); + + m_CaPTkInteractiveSegmentation->SetProgressBar(m_Controls.progressBar); + + // *------------------------ + // * DATA SELECTION WIDGETS + // *------------------------ + + m_Controls.m_cbReferenceNodeSelector->SetAutoSelectNewItems(true); + m_Controls.m_cbReferenceNodeSelector->SetPredicate(m_ReferencePredicate); + m_Controls.m_cbReferenceNodeSelector->SetDataStorage(this->GetDataStorage()); + + m_Controls.m_cbWorkingNodeSelector->SetAutoSelectNewItems(true); + m_Controls.m_cbWorkingNodeSelector->SetPredicate(m_SegmentationPredicate); + m_Controls.m_cbWorkingNodeSelector->SetDataStorage(this->GetDataStorage()); + + connect(m_Controls.m_cbReferenceNodeSelector, + SIGNAL(OnSelectionChanged(const mitk::DataNode *)), + this, + SLOT(OnReferenceSelectionChanged(const mitk::DataNode *))); + + connect(m_Controls.m_cbWorkingNodeSelector, + SIGNAL(OnSelectionChanged(const mitk::DataNode *)), + this, + SLOT(OnSegmentationSelectionChanged(const mitk::DataNode *))); + + // *------------------------* + // * Connect signals/slots + // *------------------------* + + connect(m_Controls.m_pbNewSegmentationSession, SIGNAL(clicked()), this, SLOT(OnNewSegmentationSession())); + + /* CaPTk Interactive Segmentation Run Button */ + connect(m_Controls.pushButtonRun, SIGNAL(clicked()), + this, SLOT(OnRunButtonPressed()) + ); + + /* Make sure the GUI notices if appropriate data is already present on creation */ + this->OnReferenceSelectionChanged(m_Controls.m_cbReferenceNodeSelector->GetSelectedNode()); + this->OnSegmentationSelectionChanged(m_Controls.m_cbWorkingNodeSelector->GetSelectedNode()); +} + +void QmitkCaPTkInteractiveSegmentationView::Activated() +{ + // Not yet implemented +} + +void QmitkCaPTkInteractiveSegmentationView::Deactivated() +{ + // Not yet implemented +} + +void QmitkCaPTkInteractiveSegmentationView::Visible() +{ + // Not yet implemented +} + +void QmitkCaPTkInteractiveSegmentationView::Hidden() +{ + // Not yet implemented +} + +int QmitkCaPTkInteractiveSegmentationView::GetSizeFlags(bool width) +{ + if (!width) + { + return berry::Constants::MIN | berry::Constants::MAX | berry::Constants::FILL; + } + else + { + return 0; + } +} + +int QmitkCaPTkInteractiveSegmentationView::ComputePreferredSize(bool width, + int /*availableParallel*/, + int /*availablePerpendicular*/, + int preferredResult) +{ + if (width == false) + { + return 100; + } + else + { + return preferredResult; + } +} + +/************************************************************************/ +/* protected slots */ +/************************************************************************/ + +void QmitkCaPTkInteractiveSegmentationView::OnRunButtonPressed() +{ + // ---- Collect the seeds ---- + + std::string seedsNodeName = ""; + mitk::LabelSetImage::Pointer seeds = nullptr; + if (m_WorkingNode.IsNotNull()) + { + seeds = dynamic_cast( m_WorkingNode->GetData() ); + seedsNodeName = m_WorkingNode->GetName(); + } + + // ---- Collect the images ---- + + std::vector images; + + // Predicate to find if node is mitk::Image + auto predicateIsImage = + mitk::TNodePredicateDataType::New(); + + // Predicate to find if node is mitk::LabelSetImage + auto predicateIsLabelSetImage = + mitk::TNodePredicateDataType::New(); + + // Predicate property to find if node is a helper object + auto predicatePropertyIsHelper = + mitk::NodePredicateProperty::New("helper object"); + + // The images we want are mitk::Image, but not mitk::LabelSetImage and not helper obj + auto predicateFinal = mitk::NodePredicateAnd::New(); + predicateFinal->AddPredicate(predicateIsImage); + predicateFinal->AddPredicate(mitk::NodePredicateNot::New(predicateIsLabelSetImage)); + predicateFinal->AddPredicate(mitk::NodePredicateNot::New(predicatePropertyIsHelper)); + + // Get those images and add them to the vector + mitk::DataStorage::SetOfObjects::ConstPointer all = + GetDataStorage()->GetSubset(predicateFinal); + for (mitk::DataStorage::SetOfObjects::ConstIterator it = all->Begin(); + it != all->End(); ++it) + { + if (it->Value().IsNotNull()) + { + std::cout << "[QmitkCaPTkInteractiveSegmentationView::OnRunButtonPressed] " + << "Image node name: " + << it->Value()->GetName() + << "\n"; + images.push_back( dynamic_cast(it->Value()->GetData()) ); + } + } + + // ---- Call module ---- + m_CaPTkInteractiveSegmentation->Run(images, seeds); +} + +void QmitkCaPTkInteractiveSegmentationView::OnNewSegmentationSession() +{ + m_Controls.progressBar->setValue(0); + + mitk::DataNode *referenceNode = m_Controls.m_cbReferenceNodeSelector->GetSelectedNode(); + + if (!referenceNode) + { + QMessageBox::information( + m_Parent, "New Segmentation Session", "Please load a patient image before starting some action."); + return; + } + + // Reset progress bar + m_Controls.progressBar->setValue(0); + + mitk::Image* referenceImage = dynamic_cast(referenceNode->GetData()); + assert(referenceImage); + + QString newName = this->FindNextAvailableSeedsName().c_str(); + + this->WaitCursorOn(); + + mitk::LabelSetImage::Pointer workingImage = mitk::LabelSetImage::New(); + try + { + workingImage->Initialize(referenceImage); + } + catch (mitk::Exception& e) + { + this->WaitCursorOff(); + MITK_ERROR << "Exception caught: " << e.GetDescription(); + QMessageBox::information(m_Parent, "New Segmentation Session", "Could not create a new segmentation session.\n"); + return; + } + + // ---- Add default labels ---- + + std::vector labelNames; + std::vector colorNames; // Colors are RGB, float value for each of the 3 + + // Can't push back the color value easily for some reason, but this works + // Adds predetermined labels to the create seeds image + labelNames.push_back("Label 1"); + { + mitk::Color color; color.SetRed(1.00f); color.SetGreen(0.00f); color.SetBlue(0.00f); + colorNames.push_back(color); + } + labelNames.push_back("Label 2"); + { + mitk::Color color; color.SetRed(1.00f); color.SetGreen(1.00f); color.SetBlue(0.00f); + colorNames.push_back(color); + } + labelNames.push_back("Label 3"); + { + mitk::Color color; color.SetRed(0.00f); color.SetGreen(0.00f); color.SetBlue(1.00f); + colorNames.push_back(color); + } + labelNames.push_back("Label 4"); + { + mitk::Color color; color.SetRed(0.68f); color.SetGreen(0.50f); color.SetBlue(0.65f); + colorNames.push_back(color); + } + labelNames.push_back("Label 5"); + { + mitk::Color color; color.SetRed(0.33f); color.SetGreen(0.33f); color.SetBlue(0.33f); + colorNames.push_back(color); + } + labelNames.push_back("Background tissue"); + { + mitk::Color color; color.SetRed(0.03f); color.SetGreen(0.37f); color.SetBlue(0.00f); + colorNames.push_back(color); + } + for (size_t i=0; iGetActiveLabelSet()->AddLabel( + labelNames[i].toStdString(), colorNames[i] + ); + } + workingImage->GetActiveLabelSet()->SetAllLabelsLocked(false); + workingImage->GetActiveLabelSet()->SetActiveLabel(1); + + // Continue + this->WaitCursorOff(); + + mitk::DataNode::Pointer workingNode = mitk::DataNode::New(); + workingNode->SetData(workingImage); + workingNode->SetName(newName.toStdString()); + + workingImage->GetExteriorLabel()->SetProperty("name.parent", mitk::StringProperty::New(referenceNode->GetName().c_str())); + workingImage->GetExteriorLabel()->SetProperty("name.image", mitk::StringProperty::New(newName.toStdString().c_str())); + + if (!GetDataStorage()->Exists(workingNode)) + { + GetDataStorage()->Add(workingNode); + } +} + +void QmitkCaPTkInteractiveSegmentationView::OnReferenceSelectionChanged(const mitk::DataNode* node) +{ + // m_ToolManager->ActivateTool(-1); + + m_ReferenceNode = const_cast(node); + // m_ToolManager->SetReferenceData(m_ReferenceNode); + + if (m_ReferenceNode.IsNotNull()) + { + if (m_AutoSelectionEnabled) + { + // if an image is selected find a possible working / segmentation image + mitk::DataStorage::SetOfObjects::ConstPointer derivations = this->GetDataStorage()->GetDerivations(m_ReferenceNode, m_SegmentationPredicate); + if (derivations->Size() != 0) + { + // use the first segmentation child node + m_WorkingNode = derivations->ElementAt(0); + + m_Controls.m_cbWorkingNodeSelector->blockSignals(true); + m_Controls.m_cbWorkingNodeSelector->SetSelectedNode(m_WorkingNode); + m_Controls.m_cbWorkingNodeSelector->blockSignals(false); + } + else if (derivations->size() == 0) + { + m_Controls.m_cbWorkingNodeSelector->setCurrentIndex(-1); + } + + // hide all image and segmentation nodes to later show only the automatically selected ones + mitk::DataStorage::SetOfObjects::ConstPointer patientNodes = GetDataStorage()->GetSubset(m_ReferencePredicate); + for (mitk::DataStorage::SetOfObjects::const_iterator iter = patientNodes->begin(); iter != patientNodes->end(); ++iter) + { + (*iter)->SetVisibility(false); + } + + mitk::DataStorage::SetOfObjects::ConstPointer segmentationNodes = GetDataStorage()->GetSubset(m_SegmentationPredicate); + for (mitk::DataStorage::SetOfObjects::const_iterator iter = segmentationNodes->begin(); iter != segmentationNodes->end(); ++iter) + { + (*iter)->SetVisibility(false); + } + } + m_ReferenceNode->SetVisibility(true); + + // check match of segmentation and reference image geometries + if (m_WorkingNode.IsNotNull()) + { + mitk::Image* workingImage = dynamic_cast(m_WorkingNode->GetData()); + assert(workingImage); + + mitk::Image* referenceImage = dynamic_cast(node->GetData()); + assert(referenceImage); + + if (!this->CheckForSameGeometry(referenceImage, workingImage)) + { + return; + } + m_WorkingNode->SetVisibility(true); + } + } + + UpdateControls(); + if (m_WorkingNode.IsNotNull()) + { + // m_Controls.m_LabelSetWidget->ResetAllTableWidgetItems(); + mitk::RenderingManager::GetInstance()->InitializeViews(m_WorkingNode->GetData()->GetTimeGeometry(), mitk::RenderingManager::REQUEST_UPDATE_ALL, true); + } +} + +void QmitkCaPTkInteractiveSegmentationView::OnSegmentationSelectionChanged(const mitk::DataNode* node) +{ + if (node != nullptr) + { + std::cout << "[QmitkCaPTkInteractiveSegmentationView::OnSegmentationSelectionChanged] " + << "Selection changed to: " + << node->GetName() + << "\n"; + } + + if (m_WorkingNode.IsNotNull()) + { + OnLooseLabelSetConnection(); + } + + m_WorkingNode = const_cast(node); + if (m_WorkingNode.IsNotNull()) + { + // OnEstablishLabelSetConnection(); + + if (m_AutoSelectionEnabled) + { + // if a segmentation is selected find a possible reference image + mitk::DataStorage::SetOfObjects::ConstPointer sources = this->GetDataStorage()->GetSources(m_WorkingNode, m_ReferencePredicate); + if (sources->Size() != 0) + { + m_ReferenceNode = sources->ElementAt(0); + // m_ToolManager->SetReferenceData(m_ReferenceNode); + + m_Controls.m_cbReferenceNodeSelector->blockSignals(true); + m_Controls.m_cbReferenceNodeSelector->SetSelectedNode(m_ReferenceNode); + m_Controls.m_cbReferenceNodeSelector->blockSignals(false); + } + else if(sources->size() == 0) + { + m_Controls.m_cbReferenceNodeSelector->setCurrentIndex(-1); + } + + // hide all image and segmentation nodes to later show only the automatically selected ones + mitk::DataStorage::SetOfObjects::ConstPointer patientNodes = GetDataStorage()->GetSubset(m_ReferencePredicate); + for (mitk::DataStorage::SetOfObjects::const_iterator iter = patientNodes->begin(); iter != patientNodes->end(); ++iter) + { + (*iter)->SetVisibility(false); + } + + mitk::DataStorage::SetOfObjects::ConstPointer segmentationNodes = GetDataStorage()->GetSubset(m_SegmentationPredicate); + for (mitk::DataStorage::SetOfObjects::const_iterator iter = segmentationNodes->begin(); iter != segmentationNodes->end(); ++iter) + { + (*iter)->SetVisibility(false); + } + } + m_WorkingNode->SetVisibility(true); + + // check match of segmentation and reference image geometries + if (m_ReferenceNode.IsNotNull()) + { + mitk::Image* referenceImage = dynamic_cast(m_ReferenceNode->GetData()); + assert(referenceImage); + + mitk::Image* workingImage = dynamic_cast(m_WorkingNode->GetData()); + assert(workingImage); + + if (!this->CheckForSameGeometry(referenceImage, workingImage)) + { + return; + } + m_ReferenceNode->SetVisibility(true); + } + } + + UpdateControls(); + if (m_WorkingNode.IsNotNull()) + { + // m_Controls.m_LabelSetWidget->ResetAllTableWidgetItems(); + // mitk::RenderingManager::GetInstance()->InitializeViews(m_WorkingNode->GetData()->GetTimeGeometry(), mitk::RenderingManager::REQUEST_UPDATE_ALL, true); + } +} + +/************************************************************************/ +/* protected */ +/************************************************************************/ +void QmitkCaPTkInteractiveSegmentationView::OnSelectionChanged(berry::IWorkbenchPart::Pointer, const QList &nodes) +{ + if (m_AutoSelectionEnabled) + { + // automatically set the reference node and the working node of the multi label plugin + if (1 == nodes.size()) + { + mitk::DataNode::Pointer selectedNode = nodes.at(0); + if (selectedNode.IsNull()) + { + return; + } + + // check selected node + mitk::LabelSetImage::Pointer labelSetImage = dynamic_cast(selectedNode->GetData()); + if (labelSetImage.IsNotNull()) + { + // reset the image / reference node selector in case the current selected segmentation has no image parent + m_Controls.m_cbReferenceNodeSelector->setCurrentIndex(-1); + // selected a label set image (a segmentation ( working node) + m_Controls.m_cbWorkingNodeSelector->SetSelectedNode(selectedNode); + return; + } + + mitk::Image::Pointer selectedImage = dynamic_cast(selectedNode->GetData()); + if (selectedImage.IsNotNull()) + { + // reset the segmentation / working node selector in case the current selected image has no segmentation child + m_Controls.m_cbWorkingNodeSelector->setCurrentIndex(-1); + // selected an image (a reference node) + m_Controls.m_cbReferenceNodeSelector->SetSelectedNode(selectedNode); + return; + } + } + } +} + +void QmitkCaPTkInteractiveSegmentationView::OnPreferencesChanged(const berry::IBerryPreferences* /*prefs*/) +{ + +} + +void QmitkCaPTkInteractiveSegmentationView::NodeAdded(const mitk::DataNode *) +{ + +} + +void QmitkCaPTkInteractiveSegmentationView::NodeRemoved(const mitk::DataNode *node) +{ + bool isHelperObject(false); + node->GetBoolProperty("helper object", isHelperObject); + if (isHelperObject) + { + return; + } + + if (m_ReferenceNode.IsNotNull() && dynamic_cast(node->GetData())) + { + // remove all possible contour markers of the segmentation + mitk::DataStorage::SetOfObjects::ConstPointer allContourMarkers = this->GetDataStorage()->GetDerivations( + node, mitk::NodePredicateProperty::New("isContourMarker", mitk::BoolProperty::New(true))); + + ctkPluginContext *context = mitk::PluginActivator::getContext(); + ctkServiceReference ppmRef = context->getServiceReference(); + mitk::PlanePositionManagerService *service = context->getService(ppmRef); + + for (mitk::DataStorage::SetOfObjects::ConstIterator it = allContourMarkers->Begin(); it != allContourMarkers->End(); ++it) + { + std::string nodeName = node->GetName(); + unsigned int t = nodeName.find_last_of(" "); + unsigned int id = atof(nodeName.substr(t + 1).c_str()) - 1; + + service->RemovePlanePosition(id); + + this->GetDataStorage()->Remove(it->Value()); + } + + context->ungetService(ppmRef); + service = nullptr; + } +} + +void QmitkCaPTkInteractiveSegmentationView::OnEstablishLabelSetConnection() +{ + if (m_WorkingNode.IsNull()) + { + return; + } + mitk::LabelSetImage *workingImage = dynamic_cast(m_WorkingNode->GetData()); + assert(workingImage); + + workingImage->BeforeChangeLayerEvent += mitk::MessageDelegate( + this, &QmitkCaPTkInteractiveSegmentationView::OnLooseLabelSetConnection); +} + +void QmitkCaPTkInteractiveSegmentationView::OnLooseLabelSetConnection() +{ + +} + +void QmitkCaPTkInteractiveSegmentationView::SetFocus() +{ + +} + +void QmitkCaPTkInteractiveSegmentationView::UpdateControls() +{ + // Hide views that are not useful + m_Controls.label_PatientImage->setVisible(false); + m_Controls.m_cbReferenceNodeSelector->setVisible(false); + + this->RequestRenderWindowUpdate(mitk::RenderingManager::REQUEST_UPDATE_ALL); +} + +void QmitkCaPTkInteractiveSegmentationView::ResetMouseCursor() +{ + if (m_MouseCursorSet) + { + mitk::ApplicationCursor::GetInstance()->PopCursor(); + m_MouseCursorSet = false; + } +} + +void QmitkCaPTkInteractiveSegmentationView::SetMouseCursor(const us::ModuleResource resource, int hotspotX, int hotspotY) +{ + // Remove previously set mouse cursor + if (m_MouseCursorSet) + this->ResetMouseCursor(); + + if (resource) + { + us::ModuleResourceStream cursor(resource, std::ios::binary); + mitk::ApplicationCursor::GetInstance()->PushCursor(cursor, hotspotX, hotspotY); + m_MouseCursorSet = true; + } +} + +bool QmitkCaPTkInteractiveSegmentationView::CheckForSameGeometry(const mitk::Image *image1, const mitk::Image *image2) const +{ + bool isSameGeometry(true); + + if (image1 && image2) + { + mitk::BaseGeometry::Pointer geo1 = image1->GetGeometry(); + mitk::BaseGeometry::Pointer geo2 = image2->GetGeometry(); + + isSameGeometry = isSameGeometry && mitk::Equal(geo1->GetOrigin(), geo2->GetOrigin()); + isSameGeometry = isSameGeometry && mitk::Equal(geo1->GetExtent(0), geo2->GetExtent(0)); + isSameGeometry = isSameGeometry && mitk::Equal(geo1->GetExtent(1), geo2->GetExtent(1)); + isSameGeometry = isSameGeometry && mitk::Equal(geo1->GetExtent(2), geo2->GetExtent(2)); + isSameGeometry = isSameGeometry && mitk::Equal(geo1->GetSpacing(), geo2->GetSpacing()); + isSameGeometry = isSameGeometry && mitk::MatrixEqualElementWise(geo1->GetIndexToWorldTransform()->GetMatrix(), + geo2->GetIndexToWorldTransform()->GetMatrix()); + + return isSameGeometry; + } + else + { + return false; + } +} + +QString QmitkCaPTkInteractiveSegmentationView::GetLastFileOpenPath() +{ + return this->GetPreferences()->Get("LastFileOpenPath", ""); +} + +void QmitkCaPTkInteractiveSegmentationView::SetLastFileOpenPath(const QString &path) +{ + this->GetPreferences()->Put("LastFileOpenPath", path); + this->GetPreferences()->Flush(); +} + +std::string QmitkCaPTkInteractiveSegmentationView::FindNextAvailableSeedsName() +{ + // Predicate to find if node is mitk::LabelSetImage + auto predicateIsLabelSetImage = + mitk::TNodePredicateDataType::New(); + + // Predicate property to find if node is a helper object + auto predicatePropertyIsHelper = + mitk::NodePredicateProperty::New("helper object"); + + // The images we want are but mitk::LabelSetImage and not helper obj + auto predicateFinal = mitk::NodePredicateAnd::New(); + predicateFinal->AddPredicate(predicateIsLabelSetImage); + predicateFinal->AddPredicate(mitk::NodePredicateNot::New(predicatePropertyIsHelper)); + + int lastFound = 0; + + // Get those images + mitk::DataStorage::SetOfObjects::ConstPointer all = + GetDataStorage()->GetSubset(predicateFinal); + for (mitk::DataStorage::SetOfObjects::ConstIterator it = all->Begin(); + it != all->End(); ++it) + { + if (it->Value().IsNotNull()) + { + std::string name = it->Value()->GetName(); + if (name.rfind("Seeds", 0) == 0) // Starts with + { + if (name.length() == std::string("Seeds").length()) + { + // Special case + if (lastFound < 1) + { + lastFound = 1; + } + } + else + { + if (name.rfind("Seeds-", 0) == 0) // Starts with + { + std::string numStr = name.erase(0, std::string("Seeds-").length()); + + if (IsNumber(numStr)) + { + int num = std::stoi(numStr); + if (lastFound < num) + { + lastFound = num; + } + } + } + } + } + } + } + + // Construct and return the correct string + if (lastFound == 0) + { + return "Seeds"; + } + else + { + return std::string("Seeds-") + std::to_string(lastFound + 1); + } +} + +bool QmitkCaPTkInteractiveSegmentationView::IsNumber(const std::string &s) +{ + return !s.empty() && std::find_if(s.begin(), + s.end(), [](char c) { return !std::isdigit(c); }) == s.end(); +} \ No newline at end of file diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/QmitkCaPTkInteractiveSegmentationView.h b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/QmitkCaPTkInteractiveSegmentationView.h new file mode 100644 index 0000000..1d749ff --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/QmitkCaPTkInteractiveSegmentationView.h @@ -0,0 +1,124 @@ +#ifndef QmitkCaPTkInteractiveSegmentationView_h +#define QmitkCaPTkInteractiveSegmentationView_h + +#include + +#include + +#include "ui_QmitkCaPTkInteractiveSegmentationControls.h" + +class CaPTkInteractiveSegmentation; + +// berry +#include + +class QmitkRenderWindow; + +class QmitkCaPTkInteractiveSegmentationView : public QmitkAbstractView, + public mitk::ILifecycleAwarePart +{ + Q_OBJECT + +public: + static const std::string VIEW_ID; + + QmitkCaPTkInteractiveSegmentationView(); + virtual ~QmitkCaPTkInteractiveSegmentationView(); + + typedef std::map NodeTagMapType; + + // GUI setup + void CreateQtPartControl(QWidget *parent); + + // ILifecycleAwarePart interface +public: + void Activated(); + void Deactivated(); + void Visible(); + void Hidden(); + + virtual int GetSizeFlags(bool width); + virtual int ComputePreferredSize(bool width, + int /*availableParallel*/, + int /*availablePerpendicular*/, + int preferredResult); + +protected slots: + + /** \brief CaPTk Interactive Segmentation Run Button clicked slot */ + void OnRunButtonPressed(); + + /// \brief reaction to button "New Segmentation Session" + void OnNewSegmentationSession(); + + /// \brief reaction to the selection of a new patient (reference) image in the DataStorage combobox + void OnReferenceSelectionChanged(const mitk::DataNode* node); + + /// \brief reaction to the selection of a new Segmentation (working) image in the DataStorage combobox + void OnSegmentationSelectionChanged(const mitk::DataNode* node); + +protected: + + // reimplemented from QmitkAbstractView + void OnSelectionChanged(berry::IWorkbenchPart::Pointer part, const QList &nodes) override; + + // reimplemented from QmitkAbstractView + void OnPreferencesChanged(const berry::IBerryPreferences* prefs) override; + + // reimplemented from QmitkAbstractView + void NodeAdded(const mitk::DataNode* node) override; + + // reimplemented from QmitkAbstractView + void NodeRemoved(const mitk::DataNode* node) override; + + void OnEstablishLabelSetConnection(); + + void OnLooseLabelSetConnection(); + + void SetFocus(); + + void UpdateControls(); + + void ResetMouseCursor(); + + void SetMouseCursor(const us::ModuleResource, int hotspotX, int hotspotY); + + /// \brief Checks if two images have the same size and geometry + bool CheckForSameGeometry(const mitk::Image *image1, const mitk::Image *image2) const; + + QString GetLastFileOpenPath(); + + void SetLastFileOpenPath(const QString &path); + + std::string FindNextAvailableSeedsName(); + + bool IsNumber(const std::string &s); + + /// \brief the Qt parent of our GUI (NOT of this object) + QWidget *m_Parent; + + /// \brief Qt GUI file + Ui::QmitkCaPTkInteractiveSegmentationControls m_Controls; + + mitk::DataNode::Pointer m_ReferenceNode; + mitk::DataNode::Pointer m_WorkingNode; + + mitk::NodePredicateAnd::Pointer m_ReferencePredicate; + mitk::NodePredicateAnd::Pointer m_SegmentationPredicate; + + bool m_AutoSelectionEnabled; + bool m_MouseCursorSet; + + // mitk::SegmentationInteractor::Pointer m_Interactor; + + /** + * Reference to the service registration of the observer, + * it is needed to unregister the observer on unload. + */ + us::ServiceRegistration m_ServiceRegistration; + + /** The algorithm */ + CaPTkInteractiveSegmentation* m_CaPTkInteractiveSegmentation; +}; + +#endif // QmitkCaPTkInteractiveSegmentationView_h diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/mitkPluginActivator.cpp b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/mitkPluginActivator.cpp new file mode 100644 index 0000000..7318d30 --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/mitkPluginActivator.cpp @@ -0,0 +1,28 @@ +#include "mitkPluginActivator.h" + +#include "QmitkCaPTkInteractiveSegmentationView.h" +// #include "SegmentationUtilities/QmitkMultiLabelSegmentationUtilitiesView.h" + +#include + +ctkPluginContext* mitk::PluginActivator::m_Context = nullptr; + +US_INITIALIZE_MODULE //("CaPTkInteractiveSegmentation", "libupenn_cbica_captk_interactivesegmentation") + +void mitk::PluginActivator::start(ctkPluginContext *context) +{ + BERRY_REGISTER_EXTENSION_CLASS(QmitkCaPTkInteractiveSegmentationView, context) + // BERRY_REGISTER_EXTENSION_CLASS(QmitkMultiLabelSegmentationPreferencePage, context) + // BERRY_REGISTER_EXTENSION_CLASS(QmitkMultiLabelSegmentationUtilitiesView, context) + + m_Context = context; +} + +void mitk::PluginActivator::stop(ctkPluginContext*) +{ +} + +ctkPluginContext* mitk::PluginActivator::getContext() +{ + return m_Context; +} diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/mitkPluginActivator.h b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/mitkPluginActivator.h new file mode 100644 index 0000000..f4b3288 --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/src/internal/mitkPluginActivator.h @@ -0,0 +1,25 @@ +#ifndef upenn_cbica_captk_interactivesegmentation_Activator_h +#define upenn_cbica_captk_interactivesegmentation_Activator_h + +#include + +namespace mitk +{ + class PluginActivator : public QObject, public ctkPluginActivator + { + Q_OBJECT + Q_PLUGIN_METADATA(IID "upenn_cbica_captk_interactivesegmentation") + Q_INTERFACES(ctkPluginActivator) + + public: + void start(ctkPluginContext *context); + void stop(ctkPluginContext *context); + + static ctkPluginContext* getContext(); + + private: + static ctkPluginContext* m_Context; + }; +} + +#endif // ! upenn_cbica_captk_interactivesegmentation_Activator_h diff --git a/Plugins/upenn.cbica.captk.interactivesegmentation/target_libraries.cmake b/Plugins/upenn.cbica.captk.interactivesegmentation/target_libraries.cmake new file mode 100644 index 0000000..6c71bbb --- /dev/null +++ b/Plugins/upenn.cbica.captk.interactivesegmentation/target_libraries.cmake @@ -0,0 +1,3 @@ +set(target_libraries + CTKWidgets +)