diff --git a/.clang-format b/.clang-format index 3d4a7682cb..22f4c4dd1d 100644 --- a/.clang-format +++ b/.clang-format @@ -1,96 +1,13 @@ --- +BasedOnStyle: LLVM +IndentWidth: 4 +--- Language: Cpp -# BasedOnStyle: LLVM -AccessModifierOffset: -2 -AlignAfterOpenBracket: Align -AlignConsecutiveAssignments: false -AlignConsecutiveDeclarations: false -AlignEscapedNewlinesLeft: false -AlignOperands: true -AlignTrailingComments: true -AllowAllParametersOfDeclarationOnNextLine: true -AllowShortBlocksOnASingleLine: false -AllowShortCaseLabelsOnASingleLine: false -AllowShortFunctionsOnASingleLine: All -AllowShortIfStatementsOnASingleLine: false -AllowShortLoopsOnASingleLine: false -AlwaysBreakAfterDefinitionReturnType: None -AlwaysBreakAfterReturnType: None -AlwaysBreakBeforeMultilineStrings: false -AlwaysBreakTemplateDeclarations: false -BinPackArguments: true -BinPackParameters: true -BraceWrapping: - AfterClass: false - AfterControlStatement: false - AfterEnum: false - AfterFunction: false - AfterNamespace: false - AfterObjCDeclaration: false - AfterStruct: false - AfterUnion: false - BeforeCatch: false - BeforeElse: false - IndentBraces: false -BreakBeforeBinaryOperators: None -BreakBeforeBraces: Attach -BreakBeforeTernaryOperators: true -BreakConstructorInitializersBeforeComma: false -BreakAfterJavaFieldAnnotations: false -BreakStringLiterals: true -ColumnLimit: 80 -CommentPragmas: '^ IWYU pragma:' -ConstructorInitializerAllOnOneLineOrOnePerLine: false -ConstructorInitializerIndentWidth: 4 -ContinuationIndentWidth: 4 -Cpp11BracedListStyle: true -DerivePointerAlignment: false -DisableFormat: false -ExperimentalAutoDetectBinPacking: false -ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] -IncludeCategories: - - Regex: '^"(llvm|llvm-c|clang|clang-c)/' - Priority: 2 - - Regex: '^(<|"(gtest|isl|json)/)' - Priority: 3 - - Regex: '.*' - Priority: 1 -IncludeIsMainRegex: '$' -IndentCaseLabels: false -IndentWidth: 4 -IndentWrappedFunctionNames: false -JavaScriptQuotes: Leave -JavaScriptWrapImports: true -KeepEmptyLinesAtTheStartOfBlocks: true -MacroBlockBegin: '' -MacroBlockEnd: '' -MaxEmptyLinesToKeep: 1 -NamespaceIndentation: None -ObjCBlockIndentWidth: 2 -ObjCSpaceAfterProperty: false -ObjCSpaceBeforeProtocolList: true -PenaltyBreakBeforeFirstCallParameter: 19 -PenaltyBreakComment: 300 -PenaltyBreakFirstLessLess: 120 -PenaltyBreakString: 1000 -PenaltyExcessCharacter: 1000000 -PenaltyReturnTypeOnItsOwnLine: 60 -PointerAlignment: Right -ReflowComments: true -SortIncludes: true -SpaceAfterCStyleCast: false -SpaceAfterTemplateKeyword: true -SpaceBeforeAssignmentOperators: true -SpaceBeforeParens: ControlStatements -SpaceInEmptyParentheses: false -SpacesBeforeTrailingComments: 1 -SpacesInAngles: false -SpacesInContainerLiterals: true -SpacesInCStyleCastParentheses: false -SpacesInParentheses: false -SpacesInSquareBrackets: false -Standard: Cpp11 -TabWidth: 8 -UseTab: Never -... - +AlignAfterOpenBracket: BlockIndent +BreakBeforeBinaryOperators: All +BreakConstructorInitializers: BeforeComma +ColumnLimit: 80 +PointerAlignment: Left +QualifierAlignment: Custom +QualifierOrder: ['inline', 'static', 'type', 'const'] +ReferenceAlignment: Left diff --git a/.github/workflows/deploy_protected.yml b/.github/workflows/deploy_protected.yml index efbd4f79be..15dcaeade9 100644 --- a/.github/workflows/deploy_protected.yml +++ b/.github/workflows/deploy_protected.yml @@ -4,9 +4,10 @@ on: branches: - master - develop + - release** pull_request: - branches: - - master + paths: + - docker/Dockerfile workflow_dispatch: jobs: @@ -27,6 +28,10 @@ jobs: python-version: ${{ matrix.python-version }} - uses: actions/checkout@v3 - run: git archive -o docker/amici.tar.gz --format=tar.gz HEAD + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 - name: Publish to Registry uses: elgohr/Publish-Docker-Github-Action@v4 with: @@ -36,3 +41,4 @@ jobs: workdir: docker/ dockerfile: Dockerfile tag_names: true + platforms: linux/amd64,linux/arm64 diff --git a/.github/workflows/test_benchmark_collection_models.yml b/.github/workflows/test_benchmark_collection_models.yml index 93b9c2fdb8..21583df8c5 100644 --- a/.github/workflows/test_benchmark_collection_models.yml +++ b/.github/workflows/test_benchmark_collection_models.yml @@ -44,7 +44,6 @@ jobs: && sudo apt-get install -y swig libatlas-base-dev - run: echo "${HOME}/.local/bin/" >> $GITHUB_PATH - - run: echo "${GITHUB_WORKSPACE}/tests/performance/" >> $GITHUB_PATH # install AMICI - name: Create AMICI sdist @@ -61,3 +60,11 @@ jobs: git clone --depth 1 https://github.com/benchmarking-initiative/Benchmark-Models-PEtab.git \ && export BENCHMARK_COLLECTION="$(pwd)/Benchmark-Models-PEtab/Benchmark-Models/" \ && AMICI_PARALLEL_COMPILE=2 tests/benchmark-models/test_benchmark_collection.sh + + # upload results + - uses: actions/upload-artifact@v3 + with: + name: computation times + path: | + tests/benchmark-models/computation_times.csv + tests/benchmark-models/computation_times.png diff --git a/.github/workflows/test_performance.yml b/.github/workflows/test_performance.yml index 2359f8bb80..aff347aff5 100644 --- a/.github/workflows/test_performance.yml +++ b/.github/workflows/test_performance.yml @@ -68,8 +68,8 @@ jobs: - name: "Upload artifact: CS_Signalling_ERBB_RAS_AKT_petab" uses: actions/upload-artifact@v3 with: - name: CS_Signalling_ERBB_RAS_AKT - path: CS_Signalling_ERBB_RAS_AKT/CS_Signalling_ERBB_RAS_AKT_petab + name: model_performance_test + path: model_performance_test # install model package - name: Install test model diff --git a/.github/workflows/test_python_cplusplus.yml b/.github/workflows/test_python_cplusplus.yml index cf3528e182..cbe0436db7 100644 --- a/.github/workflows/test_python_cplusplus.yml +++ b/.github/workflows/test_python_cplusplus.yml @@ -71,7 +71,7 @@ jobs: - name: Cache sonar files id: cache-sonar - uses: actions/cache@v1 + uses: actions/cache@v3 with: path: sonar_cache key: ${{ runner.os }}-sonar_cache diff --git a/CHANGELOG.md b/CHANGELOG.md index 92ab14f78c..30429dd960 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,46 @@ ## v0.X Series +### v0.15.0 (2023-01-11) + +Features +* Improved logging by @dweindl in https://github.com/AMICI-dev/AMICI/pull/1907 + + For Python: Don't print messages to stdout, but collect them in ReturnData + and forward them to python logging, making it easier to filter specific + messages or to disable output completely. Messages are also available via + `ReturnData.messages`. + + **breaking change for C++ interface**: + Messages aren't printed to stdout by default, but are collected in + `ReturnData`. The user has to decide what to do with them. + +* MultiArch docker build by @FFroehlich + in https://github.com/AMICI-dev/AMICI/pull/1903 +* Added cmake target for cmake-format + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/1909 +* Updated clang-format style, fixed clang-format target + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/1908 +* Subsetting `ReturnData` fields by ID via `ReturnDataView.by_id` + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/1911 https://github.com/AMICI-dev/AMICI/pull/1916 + +Fixes +* PEtab import: fixed handling of fixed parameters for rule targets + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/1915 +* Fixed compiler warnings for matlab interface + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/1919 +* Fixed pandas DeprecationWarning for Series.iteritems() + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/1921 +* Fixed circular import in amici.petab_import_pysb + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/1922 +* Fixed 'operator ==' swig warning + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/1923 +* Prevent swig4.0.1 segfault + by @dweindl in https://github.com/AMICI-dev/AMICI/pull/1924 + +**Full Changelog**: https://github.com/AMICI-dev/AMICI/compare/v0.14.0...v0.15.0 + + ### v0.14.0 (2022-11-23) #### Features: diff --git a/CMakeLists.txt b/CMakeLists.txt index 23e6e8895f..5ead4672d4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,10 +17,10 @@ set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake) set(CMAKE_POSITION_INDEPENDENT_CODE ON) set(CMAKE_CXX_STANDARD 14) if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") - # require at least gcc 4.9, otherwise regex wont work properly - if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) - message(FATAL_ERROR "GCC version must be at least 4.9!") - endif() + # require at least gcc 4.9, otherwise regex wont work properly + if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) + message(FATAL_ERROR "GCC version must be at least 4.9!") + endif() endif() set(CMAKE_CXX_STANDARD_REQUIRED ON) @@ -28,11 +28,11 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON) include(CheckCXXCompilerFlag) set(MY_CXX_FLAGS -Wall) foreach(FLAG ${MY_CXX_FLAGS}) - unset(CUR_FLAG_SUPPORTED CACHE) - CHECK_CXX_COMPILER_FLAG(${FLAG} CUR_FLAG_SUPPORTED) - if(${CUR_FLAG_SUPPORTED}) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAG}") - endif() + unset(CUR_FLAG_SUPPORTED CACHE) + check_cxx_compiler_flag(${FLAG} CUR_FLAG_SUPPORTED) + if(${CUR_FLAG_SUPPORTED}) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAG}") + endif() endforeach(FLAG) # find dependencies @@ -40,52 +40,78 @@ include(GNUInstallDirs) option(ENABLE_HDF5 "Build with HDF5 support?" ON) if(ENABLE_HDF5) - find_package(HDF5 COMPONENTS C HL CXX REQUIRED) - set(HDF5_LIBRARIES ${HDF5_HL_LIBRARIES} ${HDF5_C_LIBRARIES} ${HDF5_CXX_LIBRARIES}) + find_package( + HDF5 + COMPONENTS C HL CXX + REQUIRED) + set(HDF5_LIBRARIES ${HDF5_HL_LIBRARIES} ${HDF5_C_LIBRARIES} + ${HDF5_CXX_LIBRARIES}) endif() set(SUITESPARSE_DIR "${CMAKE_SOURCE_DIR}/ThirdParty/SuiteSparse/") -set(SUITESPARSE_INCLUDE_DIRS "${SUITESPARSE_DIR}/include" "${CMAKE_SOURCE_DIR}/ThirdParty/sundials/src") +set(SUITESPARSE_INCLUDE_DIRS "${SUITESPARSE_DIR}/include" + "${CMAKE_SOURCE_DIR}/ThirdParty/sundials/src") set(SUITESPARSE_LIBRARIES ${SUITESPARSE_DIR}/KLU/Lib/libklu${CMAKE_STATIC_LIBRARY_SUFFIX} ${SUITESPARSE_DIR}/COLAMD/Lib/libcolamd${CMAKE_STATIC_LIBRARY_SUFFIX} ${SUITESPARSE_DIR}/BTF/Lib/libbtf${CMAKE_STATIC_LIBRARY_SUFFIX} ${SUITESPARSE_DIR}/AMD/Lib/libamd${CMAKE_STATIC_LIBRARY_SUFFIX} ${SUITESPARSE_DIR}/SuiteSparse_config/libsuitesparseconfig${CMAKE_STATIC_LIBRARY_SUFFIX} - ) +) -find_package(SUNDIALS REQUIRED PATHS "${CMAKE_SOURCE_DIR}/ThirdParty/sundials/build/lib/cmake/sundials/") +find_package( + SUNDIALS REQUIRED PATHS + "${CMAKE_SOURCE_DIR}/ThirdParty/sundials/build/lib/cmake/sundials/") set(GSL_LITE_INCLUDE_DIR "${CMAKE_SOURCE_DIR}/ThirdParty/gsl") # AMICI requires BLAS, currently Intel MKL, CBLAS or MATLAB BLAS can be used. # The latter is not supported via CMake yet. -set(BLAS "CBLAS" CACHE STRING "BLAS library to use") +set(BLAS + "CBLAS" + CACHE STRING "BLAS library to use") set_property(CACHE BLAS PROPERTY STRINGS "CBLAS" "MKL" "ACCELERATE") if(${BLAS} STREQUAL "MKL" OR DEFINED ENV{MKLROOT}) - if(DEFINED ENV{MKLROOT}) - # This is set by Environment Modules - message(STATUS "Using MKL_INCDIR and MKL_LIB from environment module") - set(BLAS "MKL" CACHE STRING "BLAS library to use" FORCE) - set(BLAS_INCLUDE_DIRS "$ENV{MKL_INCDIR}" CACHE STRING "" FORCE) - set(BLAS_LIBRARIES "$ENV{MKL_LIB}" CACHE STRING "" FORCE) - else() - set(BLAS_INCLUDE_DIRS "" CACHE STRING "") - set(BLAS_LIBRARIES -lmkl CACHE STRING "") - endif() + if(DEFINED ENV{MKLROOT}) + # This is set by Environment Modules + message(STATUS "Using MKL_INCDIR and MKL_LIB from environment module") + set(BLAS + "MKL" + CACHE STRING "BLAS library to use" FORCE) + set(BLAS_INCLUDE_DIRS + "$ENV{MKL_INCDIR}" + CACHE STRING "" FORCE) + set(BLAS_LIBRARIES + "$ENV{MKL_LIB}" + CACHE STRING "" FORCE) + else() + set(BLAS_INCLUDE_DIRS + "" + CACHE STRING "") + set(BLAS_LIBRARIES + -lmkl + CACHE STRING "") + endif() else() - set(BLAS_INCLUDE_DIRS "" CACHE STRING "") - set(BLAS_LIBRARIES -lcblas CACHE STRING "") + set(BLAS_INCLUDE_DIRS + "" + CACHE STRING "") + set(BLAS_LIBRARIES + -lcblas + CACHE STRING "") endif() add_definitions(-DAMICI_BLAS_${BLAS}) # Add target to create version file add_custom_target( - version - ${CMAKE_COMMAND} -D SRC=${CMAKE_SOURCE_DIR}/include/amici/version.in.h - -D DST=${CMAKE_BINARY_DIR}/include/amici/version.h - -P ${CMAKE_SOURCE_DIR}/cmake/configureVersion.cmake - ) + version + ${CMAKE_COMMAND} + -D + SRC=${CMAKE_SOURCE_DIR}/include/amici/version.in.h + -D + DST=${CMAKE_BINARY_DIR}/include/amici/version.h + -P + ${CMAKE_SOURCE_DIR}/cmake/configureVersion.cmake) # Library source files set(AMICI_SRC_LIST @@ -101,6 +127,7 @@ set(AMICI_SRC_LIST ${CMAKE_SOURCE_DIR}/src/solver.cpp ${CMAKE_SOURCE_DIR}/src/solver_cvodes.cpp ${CMAKE_SOURCE_DIR}/src/solver_idas.cpp + ${CMAKE_SOURCE_DIR}/src/logging.cpp ${CMAKE_SOURCE_DIR}/src/model.cpp ${CMAKE_SOURCE_DIR}/src/model_ode.cpp ${CMAKE_SOURCE_DIR}/src/model_dae.cpp @@ -122,6 +149,7 @@ set(AMICI_SRC_LIST ${CMAKE_SOURCE_DIR}/include/amici/exception.h ${CMAKE_SOURCE_DIR}/include/amici/forwardproblem.h ${CMAKE_SOURCE_DIR}/include/amici/hdf5.h + ${CMAKE_SOURCE_DIR}/include/amici/logging.h ${CMAKE_SOURCE_DIR}/include/amici/misc.h ${CMAKE_SOURCE_DIR}/include/amici/model_dae.h ${CMAKE_SOURCE_DIR}/include/amici/model_dimensions.h @@ -140,178 +168,172 @@ set(AMICI_SRC_LIST ${CMAKE_SOURCE_DIR}/include/amici/sundials_linsol_wrapper.h ${CMAKE_SOURCE_DIR}/include/amici/sundials_matrix_wrapper.h ${CMAKE_SOURCE_DIR}/include/amici/symbolic_functions.h - ${CMAKE_SOURCE_DIR}/include/amici/vector.h - ) + ${CMAKE_SOURCE_DIR}/include/amici/vector.h) if(ENABLE_HDF5) - list(APPEND AMICI_SRC_LIST ${CMAKE_SOURCE_DIR}/src/hdf5.cpp) + list(APPEND AMICI_SRC_LIST ${CMAKE_SOURCE_DIR}/src/hdf5.cpp) endif() add_library(${PROJECT_NAME} ${AMICI_SRC_LIST}) -set(AMICI_CXX_OPTIONS "" CACHE STRING "C++ options for libamici (semicolon-separated)") +set(AMICI_CXX_OPTIONS + "" + CACHE STRING "C++ options for libamici (semicolon-separated)") target_compile_options(${PROJECT_NAME} PRIVATE "${AMICI_CXX_OPTIONS}") add_dependencies(${PROJECT_NAME} version) file(GLOB PUBLIC_HEADERS include/amici/*.h) -set_target_properties(${PROJECT_NAME} PROPERTIES PUBLIC_HEADER "${PUBLIC_HEADERS}") -target_include_directories(${PROJECT_NAME} - PUBLIC $ - $ - PUBLIC $ - PUBLIC swig - PUBLIC ${GSL_LITE_INCLUDE_DIR} - PUBLIC ${SUITESPARSE_INCLUDE_DIRS} - PUBLIC ${HDF5_INCLUDE_DIRS} - ) +set_target_properties(${PROJECT_NAME} PROPERTIES PUBLIC_HEADER + "${PUBLIC_HEADERS}") +target_include_directories( + ${PROJECT_NAME} + PUBLIC $ + $ + PUBLIC $ + PUBLIC swig + PUBLIC ${GSL_LITE_INCLUDE_DIR} + PUBLIC ${SUITESPARSE_INCLUDE_DIRS} + PUBLIC ${HDF5_INCLUDE_DIRS}) if(NOT "${BLAS_INCLUDE_DIRS}" STREQUAL "") - target_include_directories(${PROJECT_NAME} PUBLIC ${BLAS_INCLUDE_DIRS}) + target_include_directories(${PROJECT_NAME} PUBLIC ${BLAS_INCLUDE_DIRS}) endif() -target_link_libraries(${PROJECT_NAME} - PUBLIC SUNDIALS::generic_static - PUBLIC SUNDIALS::nvecserial_static - PUBLIC SUNDIALS::sunmatrixband_static - PUBLIC SUNDIALS::sunmatrixdense_static - PUBLIC SUNDIALS::sunmatrixsparse_static - PUBLIC SUNDIALS::sunlinsolband_static - PUBLIC SUNDIALS::sunlinsoldense_static - PUBLIC SUNDIALS::sunlinsolpcg_static - PUBLIC SUNDIALS::sunlinsolspbcgs_static - PUBLIC SUNDIALS::sunlinsolspfgmr_static - PUBLIC SUNDIALS::sunlinsolspgmr_static - PUBLIC SUNDIALS::sunlinsolsptfqmr_static - PUBLIC SUNDIALS::sunlinsolklu_static - PUBLIC SUNDIALS::sunnonlinsolnewton_static - PUBLIC SUNDIALS::sunnonlinsolfixedpoint_static - PUBLIC SUNDIALS::cvodes_static - PUBLIC SUNDIALS::idas_static - PUBLIC ${SUITESPARSE_LIBRARIES} - PUBLIC ${HDF5_LIBRARIES} - PUBLIC ${BLAS_LIBRARIES} - PUBLIC ${CMAKE_DL_LIBS} - ) +target_link_libraries( + ${PROJECT_NAME} + PUBLIC SUNDIALS::generic_static + PUBLIC SUNDIALS::nvecserial_static + PUBLIC SUNDIALS::sunmatrixband_static + PUBLIC SUNDIALS::sunmatrixdense_static + PUBLIC SUNDIALS::sunmatrixsparse_static + PUBLIC SUNDIALS::sunlinsolband_static + PUBLIC SUNDIALS::sunlinsoldense_static + PUBLIC SUNDIALS::sunlinsolpcg_static + PUBLIC SUNDIALS::sunlinsolspbcgs_static + PUBLIC SUNDIALS::sunlinsolspfgmr_static + PUBLIC SUNDIALS::sunlinsolspgmr_static + PUBLIC SUNDIALS::sunlinsolsptfqmr_static + PUBLIC SUNDIALS::sunlinsolklu_static + PUBLIC SUNDIALS::sunnonlinsolnewton_static + PUBLIC SUNDIALS::sunnonlinsolfixedpoint_static + PUBLIC SUNDIALS::cvodes_static + PUBLIC SUNDIALS::idas_static + PUBLIC ${SUITESPARSE_LIBRARIES} + PUBLIC ${HDF5_LIBRARIES} + PUBLIC ${BLAS_LIBRARIES} + PUBLIC ${CMAKE_DL_LIBS}) option(SUNDIALS_SUPERLUMT_ENABLE "Enable sundials SuperLUMT?" OFF) if(SUNDIALS_SUPERLUMT_ENABLE) - set(SUNDIALS_LIBRARIES ${SUNDIALS_LIBRARIES} - ${SUNDIALS_LIB_DIR}/libsundials_sunlinsolsuperlumt${CMAKE_STATIC_LIBRARY_SUFFIX} - ${CMAKE_SOURCE_DIR}/ThirdParty/SuperLU_MT_3.1/lib/libsuperlu_mt_PTHREAD${CMAKE_STATIC_LIBRARY_SUFFIX} - -lblas - ) - target_include_directories(${PROJECT_NAME} - PUBLIC "${CMAKE_SOURCE_DIR}/ThirdParty/SuperLU_MT_3.1/SRC/") + set(SUNDIALS_LIBRARIES + ${SUNDIALS_LIBRARIES} + ${SUNDIALS_LIB_DIR}/libsundials_sunlinsolsuperlumt${CMAKE_STATIC_LIBRARY_SUFFIX} + ${CMAKE_SOURCE_DIR}/ThirdParty/SuperLU_MT_3.1/lib/libsuperlu_mt_PTHREAD${CMAKE_STATIC_LIBRARY_SUFFIX} + -lblas) + target_include_directories( + ${PROJECT_NAME} PUBLIC "${CMAKE_SOURCE_DIR}/ThirdParty/SuperLU_MT_3.1/SRC/") endif() # Create targets to make the sources show up in IDEs for convenience # For matlab interface set(matlab_sources - src/interface_matlab.cpp - src/returndata_matlab.cpp - include/amici/interface_matlab.h - include/amici/returndata_matlab.h -) + src/interface_matlab.cpp src/returndata_matlab.cpp + include/amici/interface_matlab.h include/amici/returndata_matlab.h) find_package(Matlab) # In case we can find Matlab, we create a respective library to compile the -# extension from cmake. Otherwise we just create a dummy target for -# the files to show up inside IDEs. -# (Set the Matlab_ROOT_DIR cmake variable if CMake cannot find your Matlab -# installation) -if (${Matlab_FOUND}) - add_library(matlabInterface - ${matlab_sources} - ) - set_target_properties(matlabInterface - PROPERTIES INCLUDE_DIRECTORIES "${Matlab_INCLUDE_DIRS}") +# extension from cmake. Otherwise we just create a dummy target for the files to +# show up inside IDEs. (Set the Matlab_ROOT_DIR cmake variable if CMake cannot +# find your Matlab installation) +if(${Matlab_FOUND}) + add_library(matlabInterface ${matlab_sources}) + set_target_properties(matlabInterface PROPERTIES INCLUDE_DIRECTORIES + "${Matlab_INCLUDE_DIRS}") - target_link_libraries(matlabInterface - PUBLIC amici - ) + target_link_libraries(matlabInterface PUBLIC amici) else() - add_custom_target(matlabInterface - SOURCES ${matlab_sources} - ) + add_custom_target(matlabInterface SOURCES ${matlab_sources}) endif() -set_property(TARGET matlabInterface APPEND - PROPERTY INCLUDE_DIRECTORIES "${CMAKE_SOURCE_DIR}/include/") +set_property( + TARGET matlabInterface + APPEND + PROPERTY INCLUDE_DIRECTORIES "${CMAKE_SOURCE_DIR}/include/") # For template files add_custom_target( - fileTemplates - SOURCES - src/CMakeLists.template.cmake - src/main.template.cpp - src/model_header.ODE_template.h - src/model.ODE_template.cpp - src/wrapfunctions.ODE_template.h - src/wrapfunctions.template.cpp - swig/CMakeLists_model.cmake - swig/modelname.template.i - ) -set_target_properties(fileTemplates - PROPERTIES INCLUDE_DIRECTORIES "${CMAKE_SOURCE_DIR}/include/") - + fileTemplates + SOURCES src/CMakeLists.template.cmake + src/main.template.cpp + src/model_header.ODE_template.h + src/model.ODE_template.cpp + src/wrapfunctions.ODE_template.h + src/wrapfunctions.template.cpp + swig/CMakeLists_model.cmake + swig/modelname.template.i) +set_target_properties(fileTemplates PROPERTIES INCLUDE_DIRECTORIES + "${CMAKE_SOURCE_DIR}/include/") if($ENV{ENABLE_GCOV_COVERAGE}) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 --coverage") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 --coverage") endif() include(clang-tools) +include(cmakelang-tools) set(AUTHORS "Fabian Froehlich, Jan Hasenauer, Daniel Weindl and Paul Stapor") set(AUTHOR_EMAIL "Fabian_Froehlich@hms.harvard.edu") # -install(TARGETS ${PROJECT_NAME} EXPORT AmiciTargets - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} - PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/amici -) -export(EXPORT AmiciTargets FILE AmiciTargets.cmake - NAMESPACE Upstream:: - ) +install( + TARGETS ${PROJECT_NAME} + EXPORT AmiciTargets + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + INCLUDES + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/amici) +export( + EXPORT AmiciTargets + FILE AmiciTargets.cmake + NAMESPACE Upstream::) include(CMakePackageConfigHelpers) include(version) configure_package_config_file( - cmake/AmiciConfig.cmake - "${CMAKE_CURRENT_BINARY_DIR}/AmiciConfig.cmake" - INSTALL_DESTINATION "${LIB_INSTALL_DIR}/cmake/" - ) -write_basic_package_version_file(AmiciConfigVersion.cmake COMPATIBILITY ExactVersion) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/AmiciConfig.cmake - ${CMAKE_CURRENT_BINARY_DIR}/AmiciTargets.cmake - ${CMAKE_CURRENT_BINARY_DIR}/AmiciConfigVersion.cmake - DESTINATION share/Amici/cmake ) + cmake/AmiciConfig.cmake "${CMAKE_CURRENT_BINARY_DIR}/AmiciConfig.cmake" + INSTALL_DESTINATION "${LIB_INSTALL_DIR}/cmake/") +write_basic_package_version_file(AmiciConfigVersion.cmake + COMPATIBILITY ExactVersion) +install( + FILES ${CMAKE_CURRENT_BINARY_DIR}/AmiciConfig.cmake + ${CMAKE_CURRENT_BINARY_DIR}/AmiciTargets.cmake + ${CMAKE_CURRENT_BINARY_DIR}/AmiciConfigVersion.cmake + DESTINATION share/Amici/cmake) # Register package option(EXPORT_PACKAGE "Export AMICI library to CMake package registry?" ON) if(EXPORT_PACKAGE) - export(PACKAGE Amici) + export(PACKAGE Amici) endif() # - # build interfaces for other languages option(ENABLE_SWIG "Build AMICI swig library?" ON) if(ENABLE_SWIG) - add_subdirectory(swig) + add_subdirectory(swig) endif() option(ENABLE_PYTHON "Create Python module?" ON) if(ENABLE_PYTHON) - add_subdirectory(python) + add_subdirectory(python) endif() option(BUILD_TESTS "Build integration tests?" ON) if(BUILD_TESTS) - if(ENABLE_HDF5) - enable_testing() + if(ENABLE_HDF5) + enable_testing() - add_subdirectory(tests/cpp) - else() - message(WARNING "Cannot build tests with ENABLE_HDF5=OFF.") - endif() + add_subdirectory(tests/cpp) + else() + message(WARNING "Cannot build tests with ENABLE_HDF5=OFF.") + endif() endif() diff --git a/cmake/clang-tools.cmake b/cmake/clang-tools.cmake index 126fcbba3b..a557deca82 100644 --- a/cmake/clang-tools.cmake +++ b/cmake/clang-tools.cmake @@ -1,34 +1,36 @@ -######### Add targets for clang-format and clang-tidy ############ +# Add targets for clang-format and clang-tidy ############ # Find all source files -execute_process(COMMAND sh -c "git ls-tree -r HEAD --name-only src/*.cpp include/*.h | tr '\n' ' '" - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - OUTPUT_VARIABLE ALL_CXX_SOURCE_FILES - ) -############ clang-tidy ############ +execute_process( + COMMAND + sh -c + "git ls-tree -r HEAD --name-only src/*.cpp include/amici/*.h | tr '\n' ' '" + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE ALL_CXX_SOURCE_FILES) +# ########### clang-tidy ############ # Try to find clang-format and add target if successful find_program(CLANG_FORMAT "clang-format") if(CLANG_FORMAT) - add_custom_target( - clang-format - COMMAND bash -c "/usr/bin/clang-format -i ${ALL_CXX_SOURCE_FILES}" - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - ) + add_custom_target( + clang-format + COMMAND bash -c "${CLANG_FORMAT} -i ${ALL_CXX_SOURCE_FILES}" + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}) else() - message(STATUS "clang-format was not found") + message(STATUS "clang-format was not found") endif() -############ clang-tidy ############ +# ########### clang-tidy ############ # Try to find clang-tidy and add target if successful find_program(CLANG_TIDY "clang-tidy") if(CLANG_TIDY) - add_custom_target( - clang-tidy - COMMAND sh -c "/usr/bin/clang-tidy ${ALL_CXX_SOURCE_FILES} -- -std=c++11 -I${CMAKE_SOURCE_DIR}" - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - ) + add_custom_target( + clang-tidy + COMMAND + sh -c + "${CLANG_TIDY} ${ALL_CXX_SOURCE_FILES} -- -std=c++14 -I${CMAKE_SOURCE_DIR}" + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}) else() - message(STATUS "clang-tidy was not found") + message(STATUS "clang-tidy was not found") endif() diff --git a/cmake/cmakelang-tools.cmake b/cmake/cmakelang-tools.cmake new file mode 100644 index 0000000000..ad489500bc --- /dev/null +++ b/cmake/cmakelang-tools.cmake @@ -0,0 +1,41 @@ +# --- Add targets for cmake-format https://cmake-format.readthedocs.io/ --- + +# Find all CMakeFiles files +set(ALL_CMAKE_FILES + CMakeLists.txt + python/CMakeLists.txt + swig/CMakeLists.txt + tests/cpp/CMakeLists.txt + tests/cpp/unittests/CMakeLists.txt + ${CMAKE_MODULE_PATH}/cmakelang-tools.cmake + ${CMAKE_MODULE_PATH}/clang-tools.cmake + ${CMAKE_MODULE_PATH}/version.cmake) +list(JOIN ALL_CMAKE_FILES " " ALL_CMAKE_FILES) + +# --- cmake-format --- + +# Try to find cmake-format and add target if successful +find_program(CMAKE_FORMAT "cmake-format") +if(CMAKE_FORMAT) + add_custom_target( + cmake-format + COMMAND bash -c "${CMAKE_FORMAT} -i ${ALL_CMAKE_FILES}" + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + COMMENT "Running cmake-format") +else() + message(STATUS "cmake-format was not found") +endif() + +# --- cmake-lint --- + +# Try to find cmake-lint and add target if successful +find_program(CMAKE_LINT "cmake-lint") +if(CMAKE_LINT) + add_custom_target( + cmake-lint + COMMAND bash -c "${CMAKE_LINT} ${ALL_CMAKE_FILES}" + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + COMMENT "Running cmake-lint") +else() + message(STATUS "cmake-lint was not found") +endif() diff --git a/cmake/version.cmake b/cmake/version.cmake index 577594e8f8..6bde218b94 100644 --- a/cmake/version.cmake +++ b/cmake/version.cmake @@ -1,16 +1,19 @@ find_package(Git) if(Git_FOUND) - execute_process(COMMAND sh -c "${GIT_EXECUTABLE} describe --abbrev=4 --dirty=-dirty --always --tags | cut -c 2- | tr -d '\n' | sed s/-/./" - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - OUTPUT_VARIABLE PROJECT_VERSION_GIT - ) + execute_process( + COMMAND + sh -c + "${GIT_EXECUTABLE} describe --abbrev=4 --dirty=-dirty --always --tags | cut -c 2- | tr -d '\n' | sed s/-/./" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE PROJECT_VERSION_GIT) endif() # get project root directory -get_filename_component(CMAKE_PARENT_LIST_DIR ${CMAKE_PARENT_LIST_FILE} DIRECTORY) +get_filename_component(CMAKE_PARENT_LIST_DIR ${CMAKE_PARENT_LIST_FILE} + DIRECTORY) get_filename_component(CMAKE_PARENT_LIST_DIR ${CMAKE_PARENT_LIST_DIR} DIRECTORY) -execute_process(COMMAND sh -c "cat version.txt | tr -d '\n'" - WORKING_DIRECTORY "${CMAKE_PARENT_LIST_DIR}" - OUTPUT_VARIABLE PROJECT_VERSION - ) +execute_process( + COMMAND sh -c "cat version.txt | tr -d '\n'" + WORKING_DIRECTORY "${CMAKE_PARENT_LIST_DIR}" + OUTPUT_VARIABLE PROJECT_VERSION) diff --git a/docker/Dockerfile b/docker/Dockerfile index bff7d07172..b6ffd1684a 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,7 +1,8 @@ FROM ubuntu:22.04 -RUN apt update \ - && apt-get install -y \ +# install dependencies + +RUN apt-get update && apt-get install -y \ g++ \ libatlas-base-dev \ python3 \ @@ -10,13 +11,14 @@ RUN apt update \ python3-venv \ swig +# prepare python install + COPY amici.tar.gz /tmp -RUN pip3 install --upgrade pip build \ - && mkdir -p /tmp/amici/ \ - && cd /tmp/amici \ - && tar -xzf ../amici.tar.gz \ - && cd /tmp/amici/python/sdist \ - && python3 -m build --sdist \ - && pip3 install -v $(ls -t /tmp/amici/python/sdist/dist/amici-*.tar.gz | head -1)[petab] \ - && rm -rf /tmp && mkdir /tmp +RUN pip3 install --upgrade pip build && \ + mkdir -p /tmp/amici/ && \ + cd /tmp/amici && \ + tar -xzf ../amici.tar.gz && cd /tmp/amici/python/sdist && \ + python3 -m build --sdist && \ + pip3 install -v $(ls -t /tmp/amici/python/sdist/dist/amici-*.tar.gz | head -1)[petab] && \ + rm -rf /tmp/* diff --git a/documentation/conf.py b/documentation/conf.py index 92327c6043..7be93c9c75 100644 --- a/documentation/conf.py +++ b/documentation/conf.py @@ -123,7 +123,7 @@ def install_amici_deps_rtd(): def install_doxygen(): """Get a more recent doxygen""" - version = '1.9.5' + version = '1.9.6' doxygen_exe = os.path.join(amici_dir, 'ThirdParty', f'doxygen-{version}', 'bin', 'doxygen') # to create a symlink to doxygen in a location that is already on PATH diff --git a/documentation/rtd_requirements.txt b/documentation/rtd_requirements.txt index 81431c0caf..37d6f7663c 100644 --- a/documentation/rtd_requirements.txt +++ b/documentation/rtd_requirements.txt @@ -1,6 +1,6 @@ sphinx==5.1.1 mock>=4.0.3 -setuptools==65.2.0 +setuptools==65.5.1 pysb>=1.11.0 matplotlib==3.5.3 pkgconfig>=1.5.5 diff --git a/include/amici/amici.h b/include/amici/amici.h index d919bbbc59..3f022361c3 100644 --- a/include/amici/amici.h +++ b/include/amici/amici.h @@ -9,98 +9,6 @@ namespace amici { -/** - * Type for function to process warnings or error messages. - */ -using outputFunctionType = std::function; - - -/*! - * @brief Prints a specified error message associated with the specified - * identifier - * - * @param id error identifier - * @param message error message - */ -void printErrMsgIdAndTxt(std::string const &id, std::string const &message); - -/*! - * @brief Prints a specified warning message associated with the specified - * identifier - * - * @param id warning identifier - * @param message warning message - */ -void printWarnMsgIdAndTxt(std::string const &id, std::string const &message); - -/** - * @brief Main class for making calls to AMICI. - * - * This class is used to provide separate AMICI contexts, for example, for use - * in multi-threaded applications where different threads want to use AMICI with - * different settings, such custom logging functions. - * - * NOTE: For this moment, the context object needs to be set manually to any - * Model and Solver object. If not set, they will use the default output - * channel. - */ -class AmiciApplication { - public: - AmiciApplication() = default; - - /** - * @brief Core integration routine. Initializes the solver and runs the - * forward and backward problem. - * - * @param solver Solver instance - * @param edata pointer to experimental data object - * @param model model specification object - * @param rethrow rethrow integration exceptions? - * @return rdata pointer to return data object - */ - std::unique_ptr runAmiciSimulation(Solver &solver, - const ExpData *edata, - Model &model, - bool rethrow = false); - - /** - * @brief Same as runAmiciSimulation, but for multiple ExpData instances. - * - * @param solver Solver instance - * @param edatas experimental data objects - * @param model model specification object - * @param failfast flag to allow early termination - * @param num_threads number of threads for parallel execution - * @return vector of pointers to return data objects - */ - std::vector> - runAmiciSimulations(Solver const &solver, - const std::vector &edatas, - Model const &model, bool failfast, int num_threads); - - /** Function to process warnings */ - outputFunctionType warning = printWarnMsgIdAndTxt; - - /** Function to process errors */ - outputFunctionType error = printErrMsgIdAndTxt; - - /** - * @brief printf interface to warning() - * @param identifier warning identifier - * @param format string with warning message printf-style format - * @param ... arguments to be formatted - */ - void warningF(const char *identifier, const char *format, ...) const; - - /** - * @brief printf interface to error() - * @param identifier warning identifier - * @param format string with error message printf-style format - * @param ... arguments to be formatted - */ - void errorF(const char *identifier, const char *format, ...) const; -}; /** * @brief Core integration routine. Initializes the solver and runs the forward diff --git a/include/amici/exception.h b/include/amici/exception.h index 9de41a31cd..5f35324d41 100644 --- a/include/amici/exception.h +++ b/include/amici/exception.h @@ -3,9 +3,9 @@ #include "amici/defines.h" // necessary for realtype -#include #include #include +#include namespace amici { @@ -15,11 +15,9 @@ namespace amici { * Has a printf style interface to allow easy generation of error messages */ class AmiException : public std::exception { -public: + public: /** - * @brief Constructor with printf style interface - * @param fmt error message with printf format - * @param ... printf formatting variables + * @brief Default ctor. */ AmiException(); @@ -34,13 +32,13 @@ class AmiException : public std::exception { * @brief Override of default error message function * @return msg error message */ - const char* what() const noexcept override; + char const* what() const noexcept override; /** * @brief Returns the stored backtrace * @return trace backtrace */ - const char *getBacktrace() const; + char const* getBacktrace() const; /** * @brief Stores the current backtrace @@ -54,42 +52,39 @@ class AmiException : public std::exception { * @param fmt error message with printf format * @param argptr pointer to variadic argument list */ - void storeMessage(const char *fmt, va_list argptr); + void storeMessage(char const* fmt, va_list argptr); -private: + private: std::array msg_; std::array trace_; }; - /** * @brief cvode exception handler class */ -class CvodeException : public AmiException { -public: +class CvodeException : public AmiException { + public: /** * @brief Constructor * @param error_code error code returned by cvode function * @param function cvode function name */ - CvodeException(int error_code, const char *function); + CvodeException(int error_code, char const* function); }; - /** * @brief ida exception handler class */ -class IDAException : public AmiException { -public: +class IDAException : public AmiException { + public: /** * @brief Constructor * @param error_code error code returned by ida function * @param function ida function name */ - IDAException(int error_code, const char *function); + IDAException(int error_code, char const* function); }; - /** * @brief Integration failure exception for the forward problem * @@ -97,7 +92,7 @@ class IDAException : public AmiException { * for this exception we can assume that we can recover from the exception * and return a solution struct to the user */ -class IntegrationFailure : public AmiException { +class IntegrationFailure : public AmiException { public: /** * @brief Constructor @@ -113,7 +108,6 @@ class IntegrationFailure : public AmiException { realtype time; }; - /** * @brief Integration failure exception for the backward problem * @@ -121,7 +115,7 @@ class IntegrationFailure : public AmiException { * for this exception we can assume that we can recover from the exception * and return a solution struct to the user */ -class IntegrationFailureB : public AmiException { +class IntegrationFailureB : public AmiException { public: /** * @brief Constructor @@ -137,7 +131,6 @@ class IntegrationFailureB : public AmiException { realtype time; }; - /** * @brief Setup failure exception * @@ -153,10 +146,8 @@ class SetupFailure : public AmiException { * @param ... printf formatting variables */ explicit SetupFailure(char const* fmt, ...); - }; - /** * @brief Newton failure exception * @@ -165,13 +156,13 @@ class SetupFailure : public AmiException { * recover from the exception and return a solution struct to the user */ class NewtonFailure : public AmiException { -public: + public: /** * @brief Constructor, simply calls AmiException constructor * @param function name of the function in which the error occurred * @param code error code */ - NewtonFailure(int code, const char *function); + NewtonFailure(int code, char const* function); /** error code returned by solver */ int error_code; diff --git a/include/amici/logging.h b/include/amici/logging.h new file mode 100644 index 0000000000..6447cf6054 --- /dev/null +++ b/include/amici/logging.h @@ -0,0 +1,103 @@ +#ifndef AMICI_LOGGER_H +#define AMICI_LOGGER_H + +#include +#include + +namespace amici { + +struct LogItem; + +/** + * @brief Severity levels for logging. + */ +enum class LogSeverity { + error, + warning, + debug, +}; + +/** + * @brief A logger, holding a list of error messages. + */ +class Logger +{ + public: + Logger() = default; + /** + * @brief Add a log entry + * @param severity Severity level + * @param identifier Short identifier for the logged event + * @param message A more detailed message + */ + void log( + LogSeverity severity, + std::string const& identifier, + std::string const& message + ); + +#if SWIG_VERSION >= 0x040002 + /** + * @brief Add a log entry with printf-like message formatting + * @param severity Severity level + * @param identifier Short identifier for the logged event + * @param format printf format string + * @param ... arguments to be formatted + */ +#else + // swig 4.0.1 segfaults on "@param ..." + // see https://github.com/swig/swig/issues/1643 + /** + * @brief Add a log entry with printf-like message formatting + * @param severity Severity level + * @param identifier Short identifier for the logged event + * @param format printf format string + */ +#endif + void + log(LogSeverity severity, std::string const& identifier, char const* format, + ...); + + /** The log items */ + std::vector items; +}; + + +/** + * @brief A log item. + */ +struct LogItem +{ + /** + * @brief Default ctor. + */ + LogItem() = default; + + /** + * @brief Construct a LogItem + * @param severity + * @param identifier + * @param message + */ + LogItem( + LogSeverity severity, + std::string const& identifier, + std::string const& message + ): + severity(severity) + ,identifier(identifier) + ,message(message) + {}; + + /** Severity level */ + LogSeverity severity; + + /** Short identifier for the logged event */ + std::string identifier; + + /** A more detailed and readable message */ + std::string message; +}; + +} // namespace amici +#endif // AMICI_LOGGER_H diff --git a/include/amici/model.h b/include/amici/model.h index 5bf1caaa33..a7103b361c 100644 --- a/include/amici/model.h +++ b/include/amici/model.h @@ -8,6 +8,7 @@ #include "amici/simulation_parameters.h" #include "amici/model_dimensions.h" #include "amici/model_state.h" +#include "amici/logging.h" #include #include @@ -18,9 +19,6 @@ namespace amici { class ExpData; class Model; class Solver; -class AmiciApplication; - -extern AmiciApplication defaultContext; } // namespace amici @@ -1417,8 +1415,8 @@ class Model : public AbstractModel, public ModelDimensions { /** Flag array for DAE equations */ std::vector idlist; - /** AMICI application context */ - AmiciApplication *app = &defaultContext; + /** Logger */ + Logger *logger = nullptr; protected: /** diff --git a/include/amici/rdata.h b/include/amici/rdata.h index 87a1f498a5..852d5b464f 100644 --- a/include/amici/rdata.h +++ b/include/amici/rdata.h @@ -5,6 +5,7 @@ #include "amici/vector.h" #include "amici/model.h" #include "amici/misc.h" +#include "amici/logging.h" #include @@ -324,7 +325,17 @@ class ReturnData: public ModelDimensions { */ std::vector s2llh; - /** status code */ + /** + * @brief Simulation status code. + * + * One of: + * + * * AMICI_SUCCESS, indicating successful simulation + * * AMICI_MAX_TIME_EXCEEDED, indicating that the simulation did not finish + * within the allowed time (see Solver.{set,get}MaxTime) + * * AMICI_ERROR, indicating that some error occurred during simulation + * (a more detailed error message will have been printed). + */ int status = 0; /** number of states (alias `nx_rdata`, kept for backward compatibility) */ @@ -376,6 +387,9 @@ class ReturnData: public ModelDimensions { /** boolean indicating whether residuals for standard deviations have been added */ bool sigma_res; + /** log messages */ + std::vector messages; + protected: /** offset for sigma_residuals */ diff --git a/include/amici/returndata_matlab.h b/include/amici/returndata_matlab.h index ae9b058858..7b82e8ae25 100644 --- a/include/amici/returndata_matlab.h +++ b/include/amici/returndata_matlab.h @@ -40,8 +40,9 @@ mxArray *initMatlabDiagnosisFields(ReturnData const *rdata); * @param fieldData Data which will be stored in the field */ template -void writeMatlabField0(mxArray *matlabStruct, const char *fieldName, - T fieldData); +void writeMatlabField0( + mxArray* matlabStruct, char const* fieldName, T fieldData +); /** * @brief initialize vector and attach to the field @@ -52,8 +53,10 @@ void writeMatlabField0(mxArray *matlabStruct, const char *fieldName, * @param dim0 Number of elements in the vector */ template -void writeMatlabField1(mxArray *matlabStruct, const char *fieldName, - gsl::span const &fieldData, const int dim0); +void writeMatlabField1( + mxArray* matlabStruct, char const* fieldName, + gsl::span const& fieldData, const mwSize dim0 +); /** * @brief initialize matrix, attach to the field and write data @@ -65,9 +68,11 @@ void writeMatlabField1(mxArray *matlabStruct, const char *fieldName, * @param perm reordering of dimensions (i.e., transposition) */ template -void writeMatlabField2(mxArray *matlabStruct, const char *fieldName, - std::vector const &fieldData, int dim0, int dim1, - std::vector perm); +void writeMatlabField2( + mxArray* matlabStruct, char const* fieldName, + std::vector const& fieldData, mwSize dim0, mwSize dim1, + std::vector perm +); /** * @brief initialize 3D tensor, attach to the field and write data @@ -80,9 +85,11 @@ void writeMatlabField2(mxArray *matlabStruct, const char *fieldName, * @param perm reordering of dimensions */ template -void writeMatlabField3(mxArray *matlabStruct, const char *fieldName, - std::vector const &fieldData, int dim0, int dim1, - int dim2, std::vector perm); +void writeMatlabField3( + mxArray* matlabStruct, char const* fieldName, + std::vector const& fieldData, mwSize dim0, mwSize dim1, mwSize dim2, + std::vector perm +); /** * @brief initialize 4D tensor, attach to the field and write data @@ -96,9 +103,11 @@ void writeMatlabField3(mxArray *matlabStruct, const char *fieldName, * @param perm reordering of dimensions */ template -void writeMatlabField4(mxArray *matlabStruct, const char *fieldName, - std::vector const &fieldData, int dim0, int dim1, - int dim2, int dim3, std::vector perm); +void writeMatlabField4( + mxArray* matlabStruct, char const* fieldName, + std::vector const& fieldData, mwSize dim0, mwSize dim1, mwSize dim2, + mwSize dim3, std::vector perm +); /** * @brief initializes the field fieldName in matlabStruct with dimension dim @@ -108,15 +117,16 @@ void writeMatlabField4(mxArray *matlabStruct, const char *fieldName, * * @return pointer to field data */ -double *initAndAttachArray(mxArray *matlabStruct, const char *fieldName, - std::vector dim); +double* initAndAttachArray( + mxArray* matlabStruct, char const* fieldName, std::vector dim +); /** * @brief checks whether fieldNames was properly allocated * @param fieldNames array of field names * @param fieldCount expected number of fields in fieldNames */ -void checkFieldNames(const char **fieldNames, const int fieldCount); +void checkFieldNames(char const** fieldNames, int const fieldCount); /** * @brief template function that reorders elements in a std::vector diff --git a/include/amici/solver.h b/include/amici/solver.h index 525a70d206..0168f67db6 100644 --- a/include/amici/solver.h +++ b/include/amici/solver.h @@ -4,6 +4,7 @@ #include "amici/defines.h" #include "amici/sundials_linsol_wrapper.h" #include "amici/vector.h" +#include "amici/logging.h" #include #include @@ -17,9 +18,7 @@ class ForwardProblem; class BackwardProblem; class Model; class Solver; -class AmiciApplication; -extern AmiciApplication defaultContext; } // namespace amici // for serialization friend in Solver @@ -54,12 +53,6 @@ class Solver { */ Solver() = default; - /** - * @brief Constructor - * @param app AMICI application context - */ - Solver(AmiciApplication *app); - /** * @brief Solver copy constructor * @param other @@ -945,8 +938,8 @@ class Solver { */ friend bool operator==(const Solver &a, const Solver &b); - /** AMICI context */ - AmiciApplication *app = &defaultContext; + /** logger */ + Logger *logger = nullptr; protected: /** diff --git a/matlab/@amimodel/compileAndLinkModel.m b/matlab/@amimodel/compileAndLinkModel.m index 8cfd8e7431..8305da7732 100644 --- a/matlab/@amimodel/compileAndLinkModel.m +++ b/matlab/@amimodel/compileAndLinkModel.m @@ -194,7 +194,7 @@ function compileAndLinkModel(modelname, modelSourceFolder, coptim, debug, funs, % generate hash for file and append debug string if we have an md5 % file, check this hash against the contained hash cppsrc = {'amici', 'symbolic_functions','spline', ... - 'edata','rdata', 'exception', ... + 'edata','rdata', 'exception', 'logging', ... 'interface_matlab', 'misc', 'simulation_parameters', ... 'solver', 'solver_cvodes', 'solver_idas', 'model_state', ... 'model', 'model_ode', 'model_dae', 'returndata_matlab', ... diff --git a/matlab/mtoc/config/Doxyfile.template b/matlab/mtoc/config/Doxyfile.template index ed2f623f17..f5eed70f73 100644 --- a/matlab/mtoc/config/Doxyfile.template +++ b/matlab/mtoc/config/Doxyfile.template @@ -1,6 +1,4 @@ -############################################################################ - -# Doxyfile 1.9.0 +# Doxyfile 1.9.7 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. @@ -14,6 +12,16 @@ # For lists, items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (\" \"). +# +# Note: +# +# Use doxygen to compare the used configuration file with the template +# configuration file: +# doxygen -x [configFile] +# Use doxygen to compare the used configuration file with the template +# configuration file without replacing the environment variables or CMake type +# replacement variables: +# doxygen -x_noenv [configFile] #--------------------------------------------------------------------------- # Project related configuration options @@ -26,6 +34,7 @@ # https://www.gnu.org/software/libiconv/ for the list of possible encodings. # The default value is: UTF-8. +############################################################################ ############################################################################ ############################################################################ ################# mtoc++ related information ############################### @@ -47,7 +56,7 @@ DOXYFILE_ENCODING = UTF-8 ############### MTOC++ RELATED CONFIGURATION ################################### ################################################################################ -PROJECT_NAME = "_ProjectName_" +PROJECT_NAME = _ProjectName_ # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version @@ -59,32 +68,44 @@ PROJECT_NUMBER = _ProjectVersion_ # for a project that appears at the top of each page and should give viewer a # quick idea about the purpose of the project. Keep the description short. -PROJECT_BRIEF = "_ProjectDescription_" +PROJECT_BRIEF = _ProjectDescription_ # With the PROJECT_LOGO tag one can specify a logo or an icon that is included # in the documentation. The maximum height of the logo should not exceed 55 # pixels and the maximum width should not exceed 200 pixels. Doxygen will copy # the logo to the output directory. -PROJECT_LOGO = "_ProjectLogo_" +PROJECT_LOGO = _ProjectLogo_ # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. -OUTPUT_DIRECTORY = "_OutputDir_" +OUTPUT_DIRECTORY = _OutputDir_ -# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create up to 4096 +# sub-directories (in 2 levels) under the output directory of each output format +# and will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where # putting all generated files in the same directory would otherwise causes -# performance problems for the file system. +# performance problems for the file system. Adapt CREATE_SUBDIRS_LEVEL to +# control the number of sub-directories. # The default value is: NO. CREATE_SUBDIRS = NO +# Controls the number of sub-directories that will be created when +# CREATE_SUBDIRS tag is set to YES. Level 0 represents 16 directories, and every +# level increment doubles the number of directories, resulting in 4096 +# directories at level 8 which is the default and also the maximum value. The +# sub-directories are organized in 2 levels, the first level always has a fixed +# number of 16 directories. +# Minimum value: 0, maximum value: 8, default value: 8. +# This tag requires that the tag CREATE_SUBDIRS is set to YES. + +CREATE_SUBDIRS_LEVEL = 8 + # If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII # characters to appear in the names of generated files. If set to NO, non-ASCII # characters will be escaped, for example _xE3_x81_x84 will be used for Unicode @@ -96,26 +117,18 @@ ALLOW_UNICODE_NAMES = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Bulgarian, +# Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, English +# (United States), Esperanto, Farsi (Persian), Finnish, French, German, Greek, +# Hindi, Hungarian, Indonesian, Italian, Japanese, Japanese-en (Japanese with +# English messages), Korean, Korean-en (Korean with English messages), Latvian, +# Lithuanian, Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, +# Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, +# Swedish, Turkish, Ukrainian and Vietnamese. # The default value is: English. OUTPUT_LANGUAGE = English -# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all generated output in the proper direction. -# Possible values are: None, LTR, RTL and Context. -# The default value is: None. - -OUTPUT_TEXT_DIRECTION = None - # If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. @@ -273,28 +286,28 @@ TAB_SIZE = 4 # the documentation. An alias has the form: # name=value # For example adding -# "sideeffect=@par Side Effects:\n" +# "sideeffect=@par Side Effects:^^" # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines (in the resulting output). You can put ^^ in the value part of an -# alias to insert a newline as if a physical newline was in the original file. -# When you need a literal { or } or , in the value part of an alias you have to -# escape them by means of a backslash (\), this can lead to conflicts with the -# commands \{ and \} for these it is advised to use the version @{ and @} or use -# a double escape (\\{ and \\}) +# "Side Effects:". Note that you cannot put \n's in the value part of an alias +# to insert newlines (in the resulting output). You can put ^^ in the value part +# of an alias to insert a newline as if a physical newline was in the original +# file. When you need a literal { or } or , in the value part of an alias you +# have to escape them by means of a backslash (\), this can lead to conflicts +# with the commands \{ and \} for these it is advised to use the version @{ and +# @} or use a double escape (\\{ and \\}) ALIASES = "synupdate=\xrefitem synupdate \"Syntax Update\" \"Syntax needs to be updated\"" \ "docupdate=\xrefitem docupdate \"Documentation Update\" \"Documentation needs to be updated\"" \ "event=\xrefitem event \"Events\" \"List of all Events\"" \ "default=\par Default:\n" \ - "type=
Type: " \ + "type=
Type:" \ "changexref{2}=\xrefitem changelog\1_\2 \"Change in \1.\2\" \"Changes in _ProjectName_ Version \1.\2\"" \ - "change{4} = \changexref{\1,\2} (\ref \3, \4) " \ - "change{3} = \changexref{\1,\2} (\ref \3, undated) " \ + "change{4} = \changexref{\1,\2} (\ref \3, \4)" \ + "change{3} = \changexref{\1,\2} (\ref \3, undated)" \ "newxref{2}=\xrefitem newfeat\1_\2 \"New in \1.\2\" \"New features in _ProjectName_ Version \1.\2\"" \ - "new{4} = \newxref{\1,\2} (\ref \3, \4) " \ - "new{3} = \newxref{\1,\2} (\ref \3, undated) " \ + "new{4} = \newxref{\1,\2} (\ref \3, \4)" \ + "new{3} = \newxref{\1,\2} (\ref \3, undated)" \ "propclass{1}=\xrefitem propclass_\1 \"Property class \1\" \"Properties with level \1\"" # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources @@ -338,8 +351,8 @@ OPTIMIZE_OUTPUT_SLICE = NO # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, JavaScript, -# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL, -# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice, +# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: # FortranFree, unknown formatted Fortran: Fortran. In the later case the parser # tries to guess whether the code is fixed or free formatted code, this is the # default for Fortran type files). For instance to make doxygen treat .inc files @@ -349,7 +362,10 @@ OPTIMIZE_OUTPUT_SLICE = NO # Note: For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. +# the files are not read by doxygen. When specifying no_extension you should add +# * to the FILE_PATTERNS. +# +# Note see also the list of default file extension mappings. ############### CRITICAL MTOC++ RELATED CONFIGURATION ########################## ################################################################################ @@ -486,13 +502,13 @@ TYPEDEF_HIDES_STRUCT = YES LOOKUP_CACHE_SIZE = 0 -# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use +# The NUM_PROC_THREADS specifies the number of threads doxygen is allowed to use # during processing. When set to 0 doxygen will based this on the number of # cores available in the system. You can set it explicitly to a value larger # than 0 to get more control over the balance between CPU load and processing # speed. At this moment only the input processing can be done using multiple # threads. Since this is still an experimental feature the default is set to 1, -# which efficively disables parallel processing. Please report any issues you +# which effectively disables parallel processing. Please report any issues you # encounter. Generating dot graphs in parallel is controlled by the # DOT_NUM_THREADS setting. # Minimum value: 0, maximum value: 32, default value: 1. @@ -562,6 +578,13 @@ EXTRACT_LOCAL_METHODS = YES EXTRACT_ANON_NSPACES = NO +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation @@ -573,7 +596,8 @@ HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. If set # to NO, these classes will be included in the various overviews. This option -# has no effect if EXTRACT_ALL is enabled. +# will also hide undocumented C++ concepts if enabled. This option has no effect +# if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_CLASSES = NO @@ -599,12 +623,20 @@ HIDE_IN_BODY_DOCS = NO INTERNAL_DOCS = NO -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# (including Cygwin) and Mac users are advised to set this option to NO. -# The default value is: system dependent. +# With the correct setting of option CASE_SENSE_NAMES doxygen will better be +# able to match the capabilities of the underlying filesystem. In case the +# filesystem is case sensitive (i.e. it supports files in the same directory +# whose names only differ in casing), the option must be set to YES to properly +# deal with such files in case they appear in the input. For filesystems that +# are not case sensitive the option should be set to NO to properly deal with +# output files written for symbols that only differ in casing, such as for two +# classes, one named CLASS and the other named Class, and to also support +# references to files without having to specify the exact matching casing. On +# Windows (including Cygwin) and MacOS, users should typically set this option +# to NO, whereas on Linux or other Unix flavors it should typically be set to +# YES. +# Possible values are: SYSTEM, NO and YES. +# The default value is: SYSTEM. CASE_SENSE_NAMES = NO @@ -622,6 +654,12 @@ HIDE_SCOPE_NAMES = YES HIDE_COMPOUND_REFERENCE= NO +# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class +# will show which file needs to be included to use the class. +# The default value is: YES. + +SHOW_HEADERFILE = YES + # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. @@ -779,7 +817,8 @@ FILE_VERSION_FILTER = # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. You can # optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. +# will be used as the name of the layout file. See also section "Changing the +# layout of pages" for information. # # Note that if you run doxygen from a directory containing a file called # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE @@ -825,24 +864,43 @@ WARNINGS = YES WARN_IF_UNDOCUMENTED = YES # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. +# potential errors in the documentation, such as documenting some parameters in +# a documented function twice, or documenting parameters that don't exist or +# using markup commands wrongly. # The default value is: YES. WARN_IF_DOC_ERROR = YES +# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete +# function parameter documentation. If set to NO, doxygen will accept that some +# parameters have no documentation without warning. +# The default value is: YES. + +WARN_IF_INCOMPLETE_DOC = YES + # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. If -# EXTRACT_ALL is set to YES then this flag will automatically be disabled. +# value. If set to NO, doxygen will only warn about wrong parameter +# documentation, but not about the absence of documentation. If EXTRACT_ALL is +# set to YES then this flag will automatically be disabled. See also +# WARN_IF_INCOMPLETE_DOC # The default value is: NO. WARN_NO_PARAMDOC = YES +# If WARN_IF_UNDOC_ENUM_VAL option is set to YES, doxygen will warn about +# undocumented enumeration values. If set to NO, doxygen will accept +# undocumented enumeration values. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: NO. + +WARN_IF_UNDOC_ENUM_VAL = NO + # If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# Possible values are: NO, YES and FAIL_ON_WARNINGS. # The default value is: NO. WARN_AS_ERROR = NO @@ -853,13 +911,27 @@ WARN_AS_ERROR = NO # and the warning text. Optionally the format may contain $version, which will # be replaced by the version of the file (if it could be obtained via # FILE_VERSION_FILTER) +# See also: WARN_LINE_FORMAT # The default value is: $file:$line: $text. WARN_FORMAT = "$file:$line: $text" +# In the $text part of the WARN_FORMAT command it is possible that a reference +# to a more specific place is given. To make it easier to jump to this place +# (outside of doxygen) the user can define a custom "cut" / "paste" string. +# Example: +# WARN_LINE_FORMAT = "'vi $file +$line'" +# See also: WARN_FORMAT +# The default value is: at line $line of file $file. + +WARN_LINE_FORMAT = "at line $line of file $file" + # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard -# error (stderr). +# error (stderr). In case the file specified cannot be opened for writing the +# warning and error messages are written to standard error. When as file - is +# specified the warning and error messages are written to standard output +# (stdout). WARN_LOGFILE = @@ -880,16 +952,26 @@ INPUT = "_SourceDir_/README.md" \ "_SourceDir_/include" \ "_SourceDir_/matlab" - # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: https://www.gnu.org/software/libiconv/) for the list of -# possible encodings. +# documentation (see: +# https://www.gnu.org/software/libiconv/) for the list of possible encodings. +# See also: INPUT_FILE_ENCODING # The default value is: UTF-8. INPUT_ENCODING = UTF-8 +# This tag can be used to specify the character encoding of the source files +# that doxygen parses The INPUT_FILE_ENCODING tag can be used to specify +# character encoding on a per file pattern basis. Doxygen will compare the file +# name with each pattern and apply the encoding instead of the default +# INPUT_ENCODING) if there is a match. The character encodings are a list of the +# form: pattern=encoding (like *.php=ISO-8859-1). See cfg_input_encoding +# "INPUT_ENCODING" for further information on supported encodings. + +INPUT_FILE_ENCODING = + # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and # *.h) to filter out the source-files in the directories. @@ -898,12 +980,14 @@ INPUT_ENCODING = UTF-8 # need to set EXTENSION_MAPPING for the extension otherwise the files are not # read by doxygen. # +# Note the list of default checked file patterns might differ from the list of +# default file extension mappings. +# # If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, -# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, -# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), -# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen -# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, +# *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, +# *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C +# comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, # *.vhdl, *.ucf, *.qsf and *.ice. FILE_PATTERNS = *.m \ @@ -973,7 +1057,7 @@ EXCLUDE_PATTERNS = "_SourceDir_/models/*" \ # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test +# ANamespace::AClass, ANamespace::*Test # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories use the pattern */test/* @@ -1021,6 +1105,11 @@ IMAGE_PATH = # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. # +# Note that doxygen will use the data processed and written to standard output +# for further processing, therefore nothing else, like debug statements or used +# commands (so in case of a Windows batch file always use @echo OFF), should be +# written to standard output. +# # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # properly processed by doxygen. @@ -1060,7 +1149,16 @@ FILTER_SOURCE_PATTERNS = # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. -USE_MDFILE_AS_MAINPAGE = "_SourceDir_/README.md" +USE_MDFILE_AS_MAINPAGE = _SourceDir_/README.md + +# The Fortran standard specifies that for fixed formatted Fortran code all +# characters from position 72 are to be considered as comment. A common +# extension is to allow longer lines before the automatic comment starts. The +# setting FORTRAN_COMMENT_AFTER will also make it possible that longer lines can +# be processed before the automatic comment starts. +# Minimum value: 7, maximum value: 10000, default value: 72. + +FORTRAN_COMMENT_AFTER = 72 #--------------------------------------------------------------------------- # Configuration options related to source browsing @@ -1159,17 +1257,11 @@ VERBATIM_HEADERS = NO ALPHABETICAL_INDEX = NO -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. +# The IGNORE_PREFIX tag can be used to specify a prefix (or a list of prefixes) +# that should be ignored while generating the index headers. The IGNORE_PREFIX +# tag works for classes, function and member names. The entity will be placed in +# the alphabetical list under the first letter of the entity name that remains +# after removing the prefix. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. IGNORE_PREFIX = @@ -1189,7 +1281,7 @@ GENERATE_HTML = YES # The default directory is: html. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_OUTPUT = "_OutputDir_" +HTML_OUTPUT = _OutputDir_ # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each # generated HTML page (for example: .htm, .php, .asp). @@ -1238,7 +1330,7 @@ HTML_FOOTER = # obsolete. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_STYLESHEET = "_ConfDir_/customdoxygen.css" +HTML_STYLESHEET = _ConfDir_/customdoxygen.css # The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined # cascading style sheets that are included after the standard style sheets @@ -1248,7 +1340,12 @@ HTML_STYLESHEET = "_ConfDir_/customdoxygen.css" # Doxygen will copy the style sheet files to the output directory. # Note: The order of the extra style sheet files is of importance (e.g. the last # style sheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. +# list). +# Note: Since the styling of scrollbars can currently not be overruled in +# Webkit/Chromium, the styling will be left out of the default doxygen.css if +# one or more extra stylesheets have been specified. So if scrollbar +# customization is desired it has to be added explicitly. For an example see the +# documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_STYLESHEET = @@ -1263,9 +1360,22 @@ HTML_EXTRA_STYLESHEET = HTML_EXTRA_FILES = +# The HTML_COLORSTYLE tag can be used to specify if the generated HTML output +# should be rendered with a dark or light theme. +# Possible values are: LIGHT always generate light mode output, DARK always +# generate dark mode output, AUTO_LIGHT automatically set the mode according to +# the user preference, use light mode if no preference is set (the default), +# AUTO_DARK automatically set the mode according to the user preference, use +# dark mode if no preference is set and TOGGLE allow to user to switch between +# light and dark mode via a button. +# The default value is: AUTO_LIGHT. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE = AUTO_LIGHT + # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see +# this color. Hue is specified as an angle on a color-wheel, see # https://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. @@ -1275,7 +1385,7 @@ HTML_EXTRA_FILES = HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A +# in the HTML output. For a value of 0 the output will use gray-scales only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -1336,10 +1446,11 @@ HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: https://developer.apple.com/xcode/), introduced with OSX -# 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in +# environment (see: +# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To +# create a documentation set, doxygen will generate a Makefile in the HTML +# output directory. Running make will produce the docset in that directory and +# running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at # startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy # genXcode/_index.html for more information. @@ -1356,6 +1467,13 @@ GENERATE_DOCSET = NO DOCSET_FEEDNAME = "_ProjectName_ documentation" +# This tag determines the URL of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDURL = + # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. @@ -1381,8 +1499,12 @@ DOCSET_PUBLISHER_NAME = _ProjectName_ # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. +# on Windows. In the beginning of 2021 Microsoft took the original page, with +# a.o. the download links, offline the HTML help workshop was already many years +# in maintenance mode). You can download the HTML help workshop from the web +# archives at Installation executable (see: +# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo +# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe). # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML @@ -1457,7 +1579,8 @@ QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace -# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). +# (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1465,8 +1588,8 @@ QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual- -# folders). +# Folders (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1474,16 +1597,16 @@ QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom -# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- -# filters). +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- -# filters). +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = @@ -1495,9 +1618,9 @@ QHP_CUST_FILTER_ATTRS = QHP_SECT_FILTER_ATTRS = -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. +# The QHG_LOCATION tag can be used to specify the location (absolute path +# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to +# run qhelpgenerator on the generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = @@ -1540,16 +1663,28 @@ DISABLE_INDEX = NO # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the # HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. +# further fine tune the look of the index (see "Fine-tuning the output"). As an +# example, the default style sheet generated by doxygen has an example that +# shows how to put an image at the root of the tree instead of the PROJECT_NAME. +# Since the tree basically has the same information as the tab index, you could +# consider setting DISABLE_INDEX to YES when enabling this option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = ALL +# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the +# FULL_SIDEBAR option determines if the side bar is limited to only the treeview +# area (value NO) or if it should extend to the full height of the window (value +# YES). Setting this to YES gives a layout similar to +# https://docs.readthedocs.io with more room for contents, but less room for the +# project logo, title, and description. If either GENERATE_TREEVIEW or +# DISABLE_INDEX is set to NO, this option has no effect. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FULL_SIDEBAR = NO + # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # @@ -1574,6 +1709,13 @@ TREEVIEW_WIDTH = 250 EXT_LINKS_IN_WINDOW = NO +# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email +# addresses. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +OBFUSCATE_EMAILS = YES + # If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg # tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see # https://inkscape.org) to generate formulas as SVG images instead of PNGs for @@ -1594,17 +1736,6 @@ HTML_FORMULA_FORMAT = png FORMULA_FONTSIZE = 10 -# Use the FORMULA_TRANSPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - # The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands # to create new LaTeX commands to be used in formulas as building blocks. See # the section "Including formulas" for details. @@ -1622,11 +1753,29 @@ FORMULA_MACROFILE = USE_MATHJAX = NO +# With MATHJAX_VERSION it is possible to specify the MathJax version to be used. +# Note that the different versions of MathJax have different requirements with +# regards to the different settings, so it is possible that also other MathJax +# settings have to be changed when switching between the different MathJax +# versions. +# Possible values are: MathJax_2 and MathJax_3. +# The default value is: MathJax_2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_VERSION = MathJax_2 + # When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. +# the MathJax output. For more details about the output format see MathJax +# version 2 (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3 +# (see: +# http://docs.mathjax.org/en/latest/web/components/output.html). # Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. +# compatibility. This is the name for Mathjax version 2, for MathJax version 3 +# this will be translated into chtml), NativeMML (i.e. MathML. Only supported +# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This +# is the name for Mathjax version 3, for MathJax version 2 this will be +# translated into HTML-CSS) and SVG. # The default value is: HTML-CSS. # This tag requires that the tag USE_MATHJAX is set to YES. @@ -1639,22 +1788,29 @@ MATHJAX_FORMAT = HTML-CSS # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of -# MathJax from https://www.mathjax.org before deployment. -# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2. +# MathJax from https://www.mathjax.org before deployment. The default value is: +# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2 +# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3 # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = http://www.mathjax.org/mathjax # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example +# for MathJax version 2 (see +# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions): # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# For example for MathJax version 3 (see +# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html): +# MATHJAX_EXTENSIONS = ams # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. @@ -1701,7 +1857,8 @@ SERVER_BASED_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: https://xapian.org/). +# Xapian (see: +# https://xapian.org/). # # See the section "External Indexing and Searching" for details. # The default value is: NO. @@ -1714,8 +1871,9 @@ EXTERNAL_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: https://xapian.org/). See the section "External Indexing and -# Searching" for details. +# Xapian (see: +# https://xapian.org/). See the section "External Indexing and Searching" for +# details. # This tag requires that the tag SEARCHENGINE is set to YES. SEARCHENGINE_URL = @@ -1824,29 +1982,31 @@ PAPER_TYPE = a4 EXTRA_PACKAGES = "_LatexExtras_" -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the -# generated LaTeX document. The header should contain everything until the first -# chapter. If it is left blank doxygen will generate a standard header. See -# section "Doxygen usage" for information on how to let doxygen write the -# default header to a separate file. +# The LATEX_HEADER tag can be used to specify a user-defined LaTeX header for +# the generated LaTeX document. The header should contain everything until the +# first chapter. If it is left blank doxygen will generate a standard header. It +# is highly recommended to start with a default header using +# doxygen -w latex new_header.tex new_footer.tex new_stylesheet.sty +# and then modify the file new_header.tex. See also section "Doxygen usage" for +# information on how to generate the default header that doxygen normally uses. # -# Note: Only use a user-defined header if you know what you are doing! The -# following commands have a special meaning inside the header: $title, -# $datetime, $date, $doxygenversion, $projectname, $projectnumber, -# $projectbrief, $projectlogo. Doxygen will replace $title with the empty -# string, for the replacement values of the other commands the user is referred -# to HTML_HEADER. +# Note: Only use a user-defined header if you know what you are doing! +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. The following +# commands have a special meaning inside the header (and footer): For a +# description of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_HEADER = -# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the -# generated LaTeX document. The footer should contain everything after the last -# chapter. If it is left blank doxygen will generate a standard footer. See +# The LATEX_FOOTER tag can be used to specify a user-defined LaTeX footer for +# the generated LaTeX document. The footer should contain everything after the +# last chapter. If it is left blank doxygen will generate a standard footer. See # LATEX_HEADER for more information on how to generate a default footer and what -# special commands can be used inside the footer. -# -# Note: Only use a user-defined footer if you know what you are doing! +# special commands can be used inside the footer. See also section "Doxygen +# usage" for information on how to generate the default footer that doxygen +# normally uses. Note: Only use a user-defined footer if you know what you are +# doing! # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_FOOTER = @@ -1891,8 +2051,7 @@ USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode # command to the generated LaTeX files. This will instruct LaTeX to keep running -# if errors occur, instead of asking the user for help. This option is also used -# when generating formulas in HTML. +# if errors occur, instead of asking the user for help. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. @@ -1905,16 +2064,6 @@ LATEX_BATCHMODE = YES LATEX_HIDE_INDICES = YES -# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source -# code with syntax highlighting in the LaTeX output. -# -# Note that which sources are shown also depends on other settings such as -# SOURCE_BROWSER. -# The default value is: NO. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_SOURCE_CODE = NO - # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. See # https://en.wikipedia.org/wiki/BibTeX and \cite for more info. @@ -1995,16 +2144,6 @@ RTF_STYLESHEET_FILE = RTF_EXTENSIONS_FILE = -# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code -# with syntax highlighting in the RTF output. -# -# Note that which sources are shown also depends on other settings such as -# SOURCE_BROWSER. -# The default value is: NO. -# This tag requires that the tag GENERATE_RTF is set to YES. - -RTF_SOURCE_CODE = NO - #--------------------------------------------------------------------------- # Configuration options related to the man page output #--------------------------------------------------------------------------- @@ -2101,15 +2240,6 @@ GENERATE_DOCBOOK = NO DOCBOOK_OUTPUT = docbook -# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the -# program listings (including syntax highlighting and cross-referencing -# information) to the DOCBOOK output. Note that enabling this will significantly -# increase the size of the DOCBOOK output. -# The default value is: NO. -# This tag requires that the tag GENERATE_DOCBOOK is set to YES. - -DOCBOOK_PROGRAMLISTING = NO - #--------------------------------------------------------------------------- # Configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- @@ -2196,7 +2326,8 @@ SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by the -# preprocessor. +# preprocessor. Note that the INCLUDE_PATH is not recursive, so the setting of +# RECURSIVE has no effect here. # This tag requires that the tag SEARCH_INCLUDES is set to YES. INCLUDE_PATH = @@ -2288,15 +2419,6 @@ EXTERNAL_PAGES = YES # Configuration options related to the dot tool #--------------------------------------------------------------------------- -# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram -# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to -# NO turns the diagrams off. Note that this option also works with HAVE_DOT -# disabled, but it is recommended to install and use dot, since it yields more -# powerful graphs. -# The default value is: YES. - -CLASS_DIAGRAMS = YES - # You can include diagrams made with dia in doxygen documentation. Doxygen will # then run dia to produce the diagram and insert it in the documentation. The # DIA_PATH tag allows you to specify the directory where the dia binary resides. @@ -2329,35 +2451,50 @@ HAVE_DOT = _HaveDot_ DOT_NUM_THREADS = 0 -# When you want a differently looking font in the dot files that doxygen -# generates you can specify the font name using DOT_FONTNAME. You need to make -# sure dot is able to find the font, which can be done by putting it in a -# standard location or by setting the DOTFONTPATH environment variable or by -# setting DOT_FONTPATH to the directory containing the font. -# The default value is: Helvetica. +# DOT_COMMON_ATTR is common attributes for nodes, edges and labels of +# subgraphs. When you want a differently looking font in the dot files that +# doxygen generates you can specify fontname, fontcolor and fontsize attributes. +# For details please see Node, +# Edge and Graph Attributes specification You need to make sure dot is able +# to find the font, which can be done by putting it in a standard location or by +# setting the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the +# directory containing the font. Default graphviz fontsize is 14. +# The default value is: fontname=Helvetica,fontsize=10. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_COMMON_ATTR = "fontname=Arial,fontsize=10" + +# DOT_EDGE_ATTR is concatenated with DOT_COMMON_ATTR. For elegant style you can +# add 'arrowhead=open, arrowtail=open, arrowsize=0.5'. Complete documentation about +# arrows shapes. +# The default value is: labelfontname=Helvetica,labelfontsize=10. # This tag requires that the tag HAVE_DOT is set to YES. -DOT_FONTNAME = Arial +DOT_EDGE_ATTR = "labelfontname=Arial,labelfontsize=10" -# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of -# dot graphs. -# Minimum value: 4, maximum value: 24, default value: 10. +# DOT_NODE_ATTR is concatenated with DOT_COMMON_ATTR. For view without boxes +# around nodes set 'shape=plain' or 'shape=plaintext' Shapes specification +# The default value is: shape=box,height=0.2,width=0.4. # This tag requires that the tag HAVE_DOT is set to YES. -DOT_FONTSIZE = 10 +DOT_NODE_ATTR = "shape=box,height=0.2,width=0.4" -# By default doxygen will tell dot to use the default font as specified with -# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set -# the path where dot can find it using this tag. +# You can set the path where dot can find font specified with fontname in +# DOT_COMMON_ATTR and others dot attributes. # This tag requires that the tag HAVE_DOT is set to YES. DOT_FONTPATH = -# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for -# each documented class showing the direct and indirect inheritance relations. -# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO. +# If the CLASS_GRAPH tag is set to YES (or GRAPH) then doxygen will generate a +# graph for each documented class showing the direct and indirect inheritance +# relations. In case HAVE_DOT is set as well dot will be used to draw the graph, +# otherwise the built-in generator will be used. If the CLASS_GRAPH tag is set +# to TEXT the direct and indirect inheritance relations will be shown as texts / +# links. +# Possible values are: NO, YES, TEXT and GRAPH. # The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. CLASS_GRAPH = YES @@ -2371,7 +2508,8 @@ CLASS_GRAPH = YES COLLABORATION_GRAPH = NO # If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for -# groups, showing the direct groups dependencies. +# groups, showing the direct groups dependencies. See also the chapter Grouping +# in the manual. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. @@ -2394,10 +2532,32 @@ UML_LOOK = NO # but if the number exceeds 15, the total amount of fields shown is limited to # 10. # Minimum value: 0, maximum value: 100, default value: 10. -# This tag requires that the tag HAVE_DOT is set to YES. +# This tag requires that the tag UML_LOOK is set to YES. UML_LIMIT_NUM_FIELDS = 10 +# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and +# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS +# tag is set to YES, doxygen will add type and arguments for attributes and +# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen +# will not generate fields with class member information in the UML graphs. The +# class diagrams will look similar to the default class diagrams but using UML +# notation for the relationships. +# Possible values are: NO, YES and NONE. +# The default value is: NO. +# This tag requires that the tag UML_LOOK is set to YES. + +DOT_UML_DETAILS = NO + +# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters +# to display on a single line. If the actual line length exceeds this threshold +# significantly it will wrapped across multiple lines. Some heuristics are apply +# to avoid ugly line breaks. +# Minimum value: 0, maximum value: 1000, default value: 17. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_WRAP_THRESHOLD = 17 + # If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and # collaboration graphs will show the relations between templates and their # instances. @@ -2464,6 +2624,13 @@ GRAPHICAL_HIERARCHY = YES DIRECTORY_GRAPH = YES +# The DIR_GRAPH_MAX_DEPTH tag can be used to limit the maximum number of levels +# of child directories generated in directory dependency graphs by dot. +# Minimum value: 1, maximum value: 25, default value: 1. +# This tag requires that the tag DIRECTORY_GRAPH is set to YES. + +DIR_GRAPH_MAX_DEPTH = 1 + # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. For an explanation of the image formats see the section # output formats in the documentation of the dot tool (Graphviz (see: @@ -2517,10 +2684,10 @@ MSCFILE_DIRS = DIAFILE_DIRS = # When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the -# path where java can find the plantuml.jar file. If left blank, it is assumed -# PlantUML is not used or called during a preprocessing step. Doxygen will -# generate a warning when it encounters a \startuml command in this case and -# will not generate output for the diagram. +# path where java can find the plantuml.jar file or to the filename of jar file +# to be used. If left blank, it is assumed PlantUML is not used or called during +# a preprocessing step. Doxygen will generate a warning when it encounters a +# \startuml command in this case and will not generate output for the diagram. PLANTUML_JAR_PATH = @@ -2558,18 +2725,6 @@ DOT_GRAPH_MAX_NODES = 100 MAX_DOT_GRAPH_DEPTH = 3 -# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent -# background. This is disabled by default, because dot on Windows does not seem -# to support this out of the box. -# -# Warning: Depending on the platform used, enabling this option may lead to -# badly anti-aliased labels on the edges of a graph (i.e. they become hard to -# read). -# The default value is: NO. -# This tag requires that the tag HAVE_DOT is set to YES. - -DOT_TRANSPARENT = YES - # Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) support @@ -2582,14 +2737,18 @@ DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page # explaining the meaning of the various boxes and arrows in the dot generated # graphs. +# Note: This tag requires that UML_LOOK isn't set, i.e. the doxygen internal +# graphical representation for inheritance and collaboration diagrams is used. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. GENERATE_LEGEND = YES -# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot +# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate # files that are used to generate the various graphs. +# +# Note: This setting is not only used for dot files but also for msc temporary +# files. # The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. DOT_CLEANUP = YES diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 95ceacd2ab..4657580721 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,40 +1,35 @@ if(DEFINED ENV{PYTHON_EXECUTABLE}) - set(Python3_EXECUTABLE $ENV{PYTHON_EXECUTABLE}) + set(Python3_EXECUTABLE $ENV{PYTHON_EXECUTABLE}) endif() if(${CMAKE_VERSION} VERSION_LESS "3.12.0") - find_package(PythonInterp 3.8 REQUIRED) + find_package(PythonInterp 3.8 REQUIRED) else() - find_package(Python3 COMPONENTS Interpreter) + find_package(Python3 COMPONENTS Interpreter) endif() - -add_custom_target(install-python - COMMENT "Installing AMICI base python package" - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/sdist - COMMAND ${Python3_EXECUTABLE} setup.py install --prefix= --user) - - -# Create python wheel -# Note that we have to run build_ext explicitely before bdist_wheel, otherwise the swig-generated -# amici.py will not be added to the module build folder, because it does not yet exist at the -# build_py stage -add_custom_target(python-wheel - COMMENT "Creating wheel for AMICI base python package" - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/sdist - COMMAND ${Python3_EXECUTABLE} setup.py build_ext - COMMAND ${Python3_EXECUTABLE} setup.py bdist_wheel --dist-dir=${CMAKE_CURRENT_BINARY_DIR} -) - - -add_custom_target(python-sdist - COMMENT "Creating sdist for AMICI base python package" - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/sdist - COMMAND ${Python3_EXECUTABLE} -m pip install build - COMMAND ${Python3_EXECUTABLE} -m build --sdist --outdir=${CMAKE_CURRENT_BINARY_DIR} -) - - -add_custom_command( - OUTPUT always_rebuild - COMMAND cmake -E echo - ) +add_custom_target( + install-python + COMMENT "Installing AMICI base python package" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/sdist + COMMAND ${Python3_EXECUTABLE} setup.py install --prefix= --user) + +# Create python wheel Note that we have to run build_ext explicitely before +# bdist_wheel, otherwise the swig-generated amici.py will not be added to the +# module build folder, because it does not yet exist at the build_py stage +add_custom_target( + python-wheel + COMMENT "Creating wheel for AMICI base python package" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/sdist + COMMAND ${Python3_EXECUTABLE} setup.py build_ext + COMMAND ${Python3_EXECUTABLE} setup.py bdist_wheel + --dist-dir=${CMAKE_CURRENT_BINARY_DIR}) + +add_custom_target( + python-sdist + COMMENT "Creating sdist for AMICI base python package" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/sdist + COMMAND ${Python3_EXECUTABLE} -m pip install build + COMMAND ${Python3_EXECUTABLE} -m build --sdist + --outdir=${CMAKE_CURRENT_BINARY_DIR}) + +add_custom_command(OUTPUT always_rebuild COMMAND cmake -E echo) diff --git a/python/sdist/amici/numpy.py b/python/sdist/amici/numpy.py index 52ea05e6fe..65df16a557 100644 --- a/python/sdist/amici/numpy.py +++ b/python/sdist/amici/numpy.py @@ -8,8 +8,9 @@ import copy import collections -from . import ExpDataPtr, ReturnDataPtr, ExpData, ReturnData -from typing import Union, List, Dict, Iterator +import amici +from . import ExpDataPtr, ReturnDataPtr, ExpData, ReturnData, Model +from typing import Union, List, Dict, Iterator, Literal class SwigPtrView(collections.abc.Mapping): @@ -238,6 +239,44 @@ def __getitem__(self, item: str) -> Union[np.ndarray, ReturnDataPtr, item = 'ts' return super(ReturnDataView, self).__getitem__(item) + def by_id( + self, + entity_id: str, + field: str = None, + model: Model = None + ) -> np.array: + """ + Get the value of a given field for a named entity. + + :param entity_id: The ID of the model entity that is to be extracted + from ``field`` (e.g. a state ID). + :param field: The requested field, e.g. 'x' for model states. This is + optional if field would be one of ``{'x', 'y', 'w'}`` + :param model: The model from which this ReturnDataView was generated. + This is optional if this ReturnData was generated with + ``solver.getReturnDataReportingMode() == amici.RDataReporting.full``. + """ + if field is None: + field = _entity_type_from_id(entity_id, self, model) + + if field in {'x', 'x0', 'x_ss', 'sx', 'sx0', 'sx_ss'}: + ids = (model and model.getStateIds()) or self._swigptr.state_ids + elif field in {'w'}: + ids = (model and model.getExpressionIds()) \ + or self._swigptr.expression_ids + elif field in {'y', 'sy', 'sigmay'}: + ids = (model and model.getObservableIds()) \ + or self._swigptr.observable_ids + elif field in {'sllh'}: + ids = (model and model.getParameterIds()) \ + or self._swigptr.parameter_ids + else: + raise NotImplementedError( + f"Subsetting {field} by ID is not implemented or not possible." + ) + col_index = ids.index(entity_id) + return getattr(self, field)[:, ..., col_index] + class ExpDataView(SwigPtrView): """ @@ -307,3 +346,31 @@ def field_as_numpy(field_dimensions: Dict[str, List[int]], return np.array(attr).reshape(field_dimensions[field]) else: return float(attr) + + +def _entity_type_from_id( + entity_id: str, + rdata: Union[amici.ReturnData, 'amici.ReturnDataView'] = None, + model: amici.Model = None, +) -> Literal['x', 'y', 'w', 'p', 'k']: + """Guess the type of some entity by its ID.""" + for entity_type, symbol in ( + ('State', 'x'), + ('Observable', 'y'), + ('Expression', 'w'), + ('Parameter', 'p'), + ('FixedParameter', 'k') + ): + if model: + if entity_id in getattr(model, f'get{entity_type}Ids')(): + return symbol + else: + if entity_id in getattr( + rdata if isinstance(rdata, amici.ReturnData) + else rdata._swigptr, + f'{entity_type.lower()}_ids'): + return symbol + + raise KeyError(f"Unknown symbol {entity_id}.") + + diff --git a/python/sdist/amici/pandas.py b/python/sdist/amici/pandas.py index ad11278630..4842bbec47 100644 --- a/python/sdist/amici/pandas.py +++ b/python/sdist/amici/pandas.py @@ -727,7 +727,7 @@ def getEdataFromDataFrame( for ir, row in conditions.iterrows(): # subselect rows that match condition selected = np.ones((len(df),), dtype=bool) - for par_label, par in row.iteritems(): + for par_label, par in row.items(): if math.isnan(par): selected = selected & np.isnan( df[par_label].astype(float).values diff --git a/python/sdist/amici/petab_import.py b/python/sdist/amici/petab_import.py index 7dc7785149..cf248c2372 100644 --- a/python/sdist/amici/petab_import.py +++ b/python/sdist/amici/petab_import.py @@ -176,6 +176,13 @@ def get_fixed_parameters( " model. Ignoring.") fixed_parameters.remove(fixed_parameter) + # exclude targets of rules or initial assignments + for fixed_parameter in fixed_parameters.copy(): + # check global parameters + if sbml_model.getInitialAssignmentBySymbol(fixed_parameter)\ + or sbml_model.getRuleByVariable(fixed_parameter): + fixed_parameters.remove(fixed_parameter) + return list(sorted(fixed_parameters)) diff --git a/python/sdist/amici/petab_import_pysb.py b/python/sdist/amici/petab_import_pysb.py index 02e5aca038..796aa3bee0 100644 --- a/python/sdist/amici/petab_import_pysb.py +++ b/python/sdist/amici/petab_import_pysb.py @@ -20,7 +20,6 @@ OBSERVABLE_FORMULA, PARAMETER_FILE, SBML_FILES, VISUALIZATION_FILES) -from . import petab_import from .logging import get_logger, log_execution_time, set_log_level logger = get_logger(__name__, logging.WARNING) @@ -360,8 +359,10 @@ def import_model_pysb( f"Offending column: {x}" ) - constant_parameters = petab_import.get_fixed_parameters( - petab_problem) + from .petab_import import ( + get_fixed_parameters, petab_noise_distributions_to_amici + ) + constant_parameters = get_fixed_parameters(petab_problem) if observable_table is None: observables = None @@ -373,8 +374,7 @@ def import_model_pysb( sigmas = {obs_id: f"{obs_id}_sigma" for obs_id in observables} - noise_distrs = petab_import.petab_noise_distributions_to_amici( - observable_table) + noise_distrs = petab_noise_distributions_to_amici(observable_table) from amici.pysb_import import pysb2amici pysb2amici(model=pysb_model, diff --git a/python/sdist/amici/swig_wrappers.py b/python/sdist/amici/swig_wrappers.py index 394f91aea8..4b303536ae 100644 --- a/python/sdist/amici/swig_wrappers.py +++ b/python/sdist/amici/swig_wrappers.py @@ -1,9 +1,16 @@ """Convenience wrappers for the swig interface""" +import logging import sys from contextlib import contextmanager, suppress from typing import List, Optional, Union, Sequence, Dict, Any + +import amici import amici.amici as amici_swig from . import numpy +from .logging import get_logger + +logger = get_logger(__name__, log_level=logging.DEBUG) + __all__ = [ 'runAmiciSimulation', 'runAmiciSimulations', 'ExpData', @@ -81,6 +88,9 @@ def runAmiciSimulation( with _capture_cstdout(): rdata = amici_swig.runAmiciSimulation( _get_ptr(solver), _get_ptr(edata), _get_ptr(model)) + _log_simulation(rdata) + if solver.getReturnDataReportingMode() == amici.RDataReporting.full: + _ids_and_names_to_rdata(rdata, model) return numpy.ReturnDataView(rdata) @@ -133,6 +143,11 @@ def runAmiciSimulations( failfast, num_threads ) + for rdata in rdata_ptr_list: + _log_simulation(rdata) + if solver.getReturnDataReportingMode() == amici.RDataReporting.full: + _ids_and_names_to_rdata(rdata, model) + return [numpy.ReturnDataView(r) for r in rdata_ptr_list] @@ -235,3 +250,34 @@ def set_model_settings( for setting, value in settings.items(): setter = setting[1] if isinstance(setting, tuple) else f'set{setting}' getattr(model, setter)(value) + + +def _log_simulation(rdata: amici_swig.ReturnData): + """Extension warnings to Python logging.""" + amici_severity_to_logging = { + amici_swig.LogSeverity_debug: logging.DEBUG, + amici_swig.LogSeverity_warning: logging.WARNING, + amici_swig.LogSeverity_error: logging.ERROR, + } + for msg in rdata.messages: + condition = f"[{rdata.id}]" if rdata.id else "" + logger.log( + amici_severity_to_logging[msg.severity], + f"{condition}[{msg.identifier}] {msg.message}" + ) + + +def _ids_and_names_to_rdata( + rdata: amici_swig.ReturnData, + model: amici_swig.Model +): + """Copy entity IDs and names from a Model to ReturnData.""" + for entity_type in ('State', 'Observable', 'Expression', + 'Parameter', 'FixedParameter'): + for name_or_id in ('Ids', 'Names'): + names_or_ids = getattr(model, f'get{entity_type}{name_or_id}')() + setattr( + rdata, + f"{entity_type.lower()}_{name_or_id.lower()}", + names_or_ids + ) diff --git a/python/tests/test_rdata.py b/python/tests/test_rdata.py new file mode 100644 index 0000000000..0e6847e689 --- /dev/null +++ b/python/tests/test_rdata.py @@ -0,0 +1,55 @@ +"""Test amici.ReturnData(View)-related functionality""" +import numpy as np +import pytest + +import amici +from numpy.testing import assert_array_equal + +@pytest.fixture(scope='session') +def rdata_by_id_fixture(sbml_example_presimulation_module): + model_module = sbml_example_presimulation_module + model = model_module.getModel() + model.setTimepoints(np.linspace(0, 60, 61)) + solver = model.getSolver() + solver.setSensitivityMethod(amici.SensitivityMethod.forward) + solver.setSensitivityOrder(amici.SensitivityOrder.first) + rdata = amici.runAmiciSimulation(model, solver) + assert rdata.status == amici.AMICI_SUCCESS + return model, rdata + + +def test_rdata_by_id(rdata_by_id_fixture): + model, rdata = rdata_by_id_fixture + + assert_array_equal( + rdata.by_id(model.getStateIds()[1]), + rdata.x[:, 1] + ) + assert_array_equal( + rdata.by_id(model.getStateIds()[1], 'x'), + rdata.x[:, 1] + ) + assert_array_equal( + rdata.by_id(model.getStateIds()[1], 'x', model), + rdata.x[:, 1] + ) + + + assert_array_equal( + rdata.by_id(model.getObservableIds()[0], 'y', model), + rdata.y[:, 0] + ) + + assert_array_equal( + rdata.by_id(model.getExpressionIds()[1]), + rdata.w[:, 1] + ) + assert_array_equal( + rdata.by_id(model.getExpressionIds()[1], 'w', model), + rdata.w[:, 1] + ) + + assert_array_equal( + rdata.by_id(model.getStateIds()[1], 'sx', model), + rdata.sx[:, :, 1] + ) diff --git a/python/tests/test_sbml_import.py b/python/tests/test_sbml_import.py index 7090075314..8c632c23b5 100644 --- a/python/tests/test_sbml_import.py +++ b/python/tests/test_sbml_import.py @@ -165,6 +165,22 @@ def test_sbml2amici_observable_dependent_error(observable_dependent_error_model) check_derivatives(model, solver, edata) +@skip_on_valgrind +def test_logging_works(observable_dependent_error_model, caplog): + """Check that warnings are forwarded to Python logging""" + model_module = observable_dependent_error_model + model = model_module.getModel() + model.setTimepoints(np.linspace(0, 60, 61)) + solver = model.getSolver() + + # this will prematurely stop the simulation + solver.setMaxSteps(1) + + rdata = amici.runAmiciSimulation(model, solver) + assert rdata.status != amici.AMICI_SUCCESS + assert "mxstep steps taken" in caplog.text + + @pytest.fixture(scope='session') def model_steadystate_module(): sbml_file = STEADYSTATE_MODEL_FILE @@ -482,8 +498,9 @@ def test_sympy_exp_monkeypatch(): """ url = 'https://www.ebi.ac.uk/biomodels/model/download/BIOMD0000000529.2?' \ 'filename=BIOMD0000000529_url.xml' - importer = amici.SbmlImporter(urlopen(url).read().decode('utf-8'), - from_file=False) + importer = amici.SbmlImporter( + urlopen(url, timeout=20).read().decode('utf-8'), from_file=False + ) module_name = 'BIOMD0000000529' with TemporaryDirectory() as outdir: diff --git a/scripts/downloadAndBuildDoxygen.sh b/scripts/downloadAndBuildDoxygen.sh index 8cae173e69..503934cb31 100755 --- a/scripts/downloadAndBuildDoxygen.sh +++ b/scripts/downloadAndBuildDoxygen.sh @@ -9,7 +9,7 @@ DOXYGEN_DIR="${AMICI_PATH}"/ThirdParty/doxygen cd "${AMICI_PATH}"/ThirdParty if [[ ! -d ${DOXYGEN_DIR} ]]; then # git clone --depth 1 https://github.com/doxygen/doxygen.git "${DOXYGEN_DIR}" - git clone --single-branch --branch Release_1_9_1 --depth 1 https://github.com/doxygen/doxygen.git "${DOXYGEN_DIR}" + git clone --single-branch --branch Release_1_9_6 --depth 1 https://github.com/doxygen/doxygen.git "${DOXYGEN_DIR}" fi cd "${DOXYGEN_DIR}" diff --git a/scripts/run-doxygen.sh b/scripts/run-doxygen.sh index 030c8fef73..c766d93eed 100755 --- a/scripts/run-doxygen.sh +++ b/scripts/run-doxygen.sh @@ -60,6 +60,11 @@ mv "${DOXY_WARNING_FILE}_tmp" "${DOXY_WARNING_FILE}" grep -v "error: Problem.* running g.*. Check your installation!" "${DOXY_WARNING_FILE}" > "${DOXY_WARNING_FILE}_tmp" || [[ $? == 1 ]] mv "${DOXY_WARNING_FILE}_tmp" "${DOXY_WARNING_FILE}" +# unclear error parsing ghostscript-generated eps files +# result seems fine despite error +grep -v "Couldn't extract bounding box from" "${DOXY_WARNING_FILE}" > "${DOXY_WARNING_FILE}_tmp" || [[ $? == 1 ]] +mv "${DOXY_WARNING_FILE}_tmp" "${DOXY_WARNING_FILE}" + # check if warnings log was created if [ -f "${DOXY_WARNING_FILE}" ]; then # check if warnings log is empty diff --git a/src/amici.cpp b/src/amici.cpp index 934e91dfc0..338937bca1 100644 --- a/src/amici.cpp +++ b/src/amici.cpp @@ -8,7 +8,7 @@ #include "amici/steadystateproblem.h" #include "amici/backwardproblem.h" #include "amici/forwardproblem.h" -#include "amici/misc.h" +#include "amici/logging.h" #include //return codes #include //realtype @@ -62,67 +62,18 @@ std::map simulation_status_to_str_map = { {AMICI_SUCCESS, "AMICI_SUCCESS"}, }; -/** AMICI default application context, kept around for convenience for using - * amici::runAmiciSimulation or instantiating Solver and Model without special - * needs. - */ -AmiciApplication defaultContext = AmiciApplication(); - -std::unique_ptr -runAmiciSimulation(Solver& solver, - const ExpData* edata, - Model& model, - bool rethrow) -{ - return defaultContext.runAmiciSimulation(solver, edata, model, rethrow); -} - -void -printErrMsgIdAndTxt(std::string const& id, std::string const& message) -{ - std::cerr << "[Error] "; - if (!id.empty()) { - std::cerr << id << ": "; - } - std::cerr << message << std::endl; -} - -void -printWarnMsgIdAndTxt(std::string const& id, std::string const& message) -{ - std::cerr << "[Warning] "; - if (!id.empty()) { - std::cerr << id << ": "; - } - std::cerr << message << std::endl; -} +std::unique_ptr runAmiciSimulation( + Solver& solver, ExpData const* edata, Model& model, bool rethrow +) { + // create a temporary logger instance for Solver and Model to capture + // messages from only this simulation + Logger logger; + solver.logger = &logger; + model.logger = &logger; + // prevent dangling pointer + auto _ = gsl::finally([&solver, &model] + { solver.logger = model.logger = nullptr; }); -std::vector> -runAmiciSimulations(const Solver& solver, - const std::vector& edatas, - const Model& model, - const bool failfast, -#if defined(_OPENMP) - int num_threads -#else - int /* num_threads */ -#endif -) -{ -#if defined(_OPENMP) - return defaultContext.runAmiciSimulations( - solver, edatas, model, failfast, num_threads); -#else - return defaultContext.runAmiciSimulations(solver, edatas, model, failfast, 1); -#endif -} - -std::unique_ptr -AmiciApplication::runAmiciSimulation(Solver& solver, - const ExpData* edata, - Model& model, - bool rethrow) -{ auto start_time_total = clock(); solver.startTimer(); @@ -196,57 +147,63 @@ AmiciApplication::runAmiciSimulation(Solver& solver, rdata->status = AMICI_MAX_TIME_EXCEEDED; if(rethrow) throw; - warningF("AMICI:simulation", - "AMICI forward simulation failed at t = %f: " - "Maximum time exceeded.\n", - ex.time); + logger.log( + LogSeverity::error, "MAXTIME_EXCEEDED", + "AMICI forward simulation failed at t = %g: " + "Maximum time exceeded in forward solve.", + ex.time + ); } else { rdata->status = ex.error_code; if (rethrow) throw; - warningF("AMICI:simulation", - "AMICI forward simulation failed at t = %f:\n%s\n", - ex.time, - ex.what()); - + logger.log( + LogSeverity::error, "FORWARD_FAILURE", + "AMICI forward simulation failed at t = %g: %s", ex.time, + ex.what() + ); } } catch (amici::IntegrationFailureB const& ex) { if(ex.error_code == AMICI_RHSFUNC_FAIL && solver.timeExceeded()) { rdata->status = AMICI_MAX_TIME_EXCEEDED; if (rethrow) throw; - warningF( - "AMICI:simulation", + logger.log( + LogSeverity::error, "MAXTIME_EXCEEDED", "AMICI backward simulation failed when trying to solve until " - "t = %f: Maximum time exceeded.\n", - ex.time); + "t = %g: Maximum time exceeded in backward solve.", + ex.time + ); } else { rdata->status = ex.error_code; if (rethrow) throw; - warningF( - "AMICI:simulation", - "AMICI backward simulation failed when trying to solve until t = %f" - " (see message above):\n%s\n", - ex.time, - ex.what()); + logger.log( + LogSeverity::error, "BACKWARD_FAILURE", + "AMICI backward simulation failed when trying to solve until t " + "= %g" + " (check debug logs for details): %s", + ex.time, ex.what() + ); } } catch (amici::AmiException const& ex) { rdata->status = AMICI_ERROR; if (rethrow) throw; - warningF("AMICI:simulation", - "AMICI simulation failed:\n%s\nError occurred in:\n%s", - ex.what(), - ex.getBacktrace()); + logger.log( + LogSeverity::error, "OTHER", + "AMICI simulation failed: %s\nError occurred in:\n%s", ex.what(), + ex.getBacktrace() + ); } catch (std::exception const& ex) { rdata->status = AMICI_ERROR; if (rethrow) throw; - warningF("AMICI:simulation", - "AMICI simulation failed:\n%s\n", - ex.what()); + logger.log( + LogSeverity::error, "OTHER", "AMICI simulation failed: %s", + ex.what() + ); } rdata->processSimulationObjects( @@ -274,23 +231,20 @@ AmiciApplication::runAmiciSimulation(Solver& solver, std::is_sorted(rdata->numstepsB.begin(), rdata->numstepsB.end()) || rdata->status != AMICI_SUCCESS ); - + rdata->messages = logger.items; return rdata; } -std::vector> -AmiciApplication::runAmiciSimulations(const Solver& solver, - const std::vector& edatas, - const Model& model, - bool failfast, +std::vector> runAmiciSimulations( + Solver const& solver, std::vector const& edatas, + Model const& model, bool failfast, #if defined(_OPENMP) - int num_threads + int num_threads #else - int /* num_threads */ + int /* num_threads */ #endif -) -{ +) { std::vector> results(edatas.size()); // is set to true if one simulation fails and we should skip the rest. // shared across threads. @@ -319,26 +273,6 @@ AmiciApplication::runAmiciSimulations(const Solver& solver, return results; } -void -AmiciApplication::warningF(const char* identifier, const char* format, ...) const -{ - va_list argptr; - va_start(argptr, format); - auto str = printfToString(format, argptr); - va_end(argptr); - warning(identifier, str); -} - -void -AmiciApplication::errorF(const char* identifier, const char* format, ...) const -{ - va_list argptr; - va_start(argptr, format); - auto str = printfToString(format, argptr); - va_end(argptr); - error(identifier, str); -} - std::string simulation_status_to_str(int status) { try { diff --git a/src/forwardproblem.cpp b/src/forwardproblem.cpp index 4e6a0ba5fd..eae3f87c1b 100644 --- a/src/forwardproblem.cpp +++ b/src/forwardproblem.cpp @@ -247,11 +247,14 @@ void ForwardProblem::handleEvent(realtype *tlastroot, const bool seflag, if (secondevent > 0) { /* Secondary events may result in wrong forward sensitivities, * if the secondary event has a bolus... */ - if (solver->computingFSA()) - solver->app->warning("AMICI:simulation", - "Secondary event was triggered. Depending on " - "the bolus of the secondary event, forward " - "sensitivities can be incorrect."); + if (solver->computingFSA() && solver->logger) + solver->logger->log( + LogSeverity::warning, + "SECONDARY_EVENT", + "Secondary event was triggered. Depending on " + "the bolus of the secondary event, forward " + "sensitivities can be incorrect." + ); handleEvent(tlastroot, true, false); } diff --git a/src/interface_matlab.cpp b/src/interface_matlab.cpp index c48cd384c5..4204e5b503 100644 --- a/src/interface_matlab.cpp +++ b/src/interface_matlab.cpp @@ -22,7 +22,7 @@ namespace amici { - int dbl2int(const double x); +int dbl2int(double const x); /** * @brief The mexRhsArguments enum takes care of the ordering of mex file @@ -62,11 +62,12 @@ char amici_blasCBlasTransToBlasTrans(BLASTranspose trans) { throw std::invalid_argument("Invalid argument to amici_blasCBlasTransToBlasTrans"); } -void amici_dgemm(BLASLayout layout, BLASTranspose TransA, - BLASTranspose TransB, const int M, const int N, - const int K, const double alpha, const double *A, - const int lda, const double *B, const int ldb, - const double beta, double *C, const int ldc) { +void amici_dgemm( + BLASLayout layout, BLASTranspose TransA, BLASTranspose TransB, int const M, + int const N, int const K, double const alpha, double const* A, + int const lda, double const* B, int const ldb, double const beta, double* C, + int const ldc +) { assert(layout == BLASLayout::colMajor); const ptrdiff_t M_ = M; @@ -75,18 +76,19 @@ void amici_dgemm(BLASLayout layout, BLASTranspose TransA, const ptrdiff_t lda_ = lda; const ptrdiff_t ldb_ = ldb; const ptrdiff_t ldc_ = ldc; - const char transA = amici_blasCBlasTransToBlasTrans(TransA); - const char transB = amici_blasCBlasTransToBlasTrans(TransB); + char const transA = amici_blasCBlasTransToBlasTrans(TransA); + char const transB = amici_blasCBlasTransToBlasTrans(TransB); FORTRAN_WRAPPER(dgemm)(&transA, &transB, &M_, &N_, &K_, &alpha, A, &lda_, B, &ldb_, &beta, C, &ldc_); } -void amici_dgemv(BLASLayout layout, BLASTranspose TransA, - const int M, const int N, const double alpha, const double *A, - const int lda, const double *X, const int incX, - const double beta, double *Y, const int incY) { +void amici_dgemv( + BLASLayout layout, BLASTranspose TransA, int const M, int const N, + double const alpha, double const* A, int const lda, double const* X, + int const incX, double const beta, double* Y, int const incY +) { assert(layout == BLASLayout::colMajor); const ptrdiff_t M_ = M; @@ -94,12 +96,14 @@ void amici_dgemv(BLASLayout layout, BLASTranspose TransA, const ptrdiff_t lda_ = lda; const ptrdiff_t incX_ = incX; const ptrdiff_t incY_ = incY; - const char transA = amici_blasCBlasTransToBlasTrans(TransA); + char const transA = amici_blasCBlasTransToBlasTrans(TransA); FORTRAN_WRAPPER(dgemv)(&transA, &M_, &N_, &alpha, A, &lda_, X, &incX_, &beta, Y, &incY_); } -void amici_daxpy(int n, double alpha, const double *x, const int incx, double *y, int incy) { +void amici_daxpy( + int n, double alpha, double const* x, int const incx, double* y, int incy +) { const ptrdiff_t n_ = n; const ptrdiff_t incx_ = incx; @@ -113,12 +117,12 @@ void amici_daxpy(int n, double alpha, const double *x, const int incx, double *y * @param length Number of elements in array * @return std::vector with data from array */ -std::vector mxArrayToVector(const mxArray *array, int length) { +std::vector mxArrayToVector(mxArray const* array, int length) { return {mxGetPr(array), mxGetPr(array) + length}; } -std::unique_ptr expDataFromMatlabCall(const mxArray *prhs[], - Model const &model) { +std::unique_ptr +expDataFromMatlabCall(mxArray const* prhs[], Model const& model) { if (!mxGetPr(prhs[RHS_DATA])) return nullptr; @@ -241,14 +245,13 @@ std::unique_ptr expDataFromMatlabCall(const mxArray *prhs[], * @param x input * @return int_x casted value */ -int dbl2int(const double x){ +int dbl2int(double const x) { if((std::round(x)-x) != 0.0) throw AmiException("Invalid non-integer value for integer option"); return(static_cast(x)); } -void setSolverOptions(const mxArray *prhs[], int nrhs, Solver &solver) -{ +void setSolverOptions(mxArray const* prhs[], int nrhs, Solver& solver) { if (mxGetPr(prhs[RHS_OPTIONS])) { if (mxGetProperty(prhs[RHS_OPTIONS], 0, "atol")) { solver.setAbsoluteTolerance(mxGetScalar(mxGetProperty(prhs[RHS_OPTIONS], 0, "atol"))); @@ -329,8 +332,7 @@ void setSolverOptions(const mxArray *prhs[], int nrhs, Solver &solver) } } -void setModelData(const mxArray *prhs[], int nrhs, Model &model) -{ +void setModelData(mxArray const* prhs[], int nrhs, Model& model) { if (mxGetPr(prhs[RHS_OPTIONS])) { if (mxGetProperty(prhs[RHS_OPTIONS], 0, "nmaxevent")) { model.setNMaxEvent(dbl2int(mxGetScalar(mxGetProperty(prhs[RHS_OPTIONS], 0, "nmaxevent")))); @@ -343,8 +345,8 @@ void setModelData(const mxArray *prhs[], int nrhs, Model &model) if (mxArray *a = mxGetProperty(prhs[RHS_OPTIONS], 0, "pscale")) { if(mxGetM(a) == 1 && mxGetN(a) == 1) { model.setParameterScale(static_cast(dbl2int(mxGetScalar(a)))); - } else if((mxGetM(a) == 1 && mxGetN(a) == model.np()) - || (mxGetN(a) == 1 && mxGetM(a) == model.np())) { + } else if((mxGetM(a) == 1 && gsl::narrow(mxGetN(a)) == model.np()) + || (mxGetN(a) == 1 && gsl::narrow(mxGetM(a)) == model.np())) { auto pscaleArray = static_cast(mxGetData(a)); std::vector pscale(model.np()); for(int ip = 0; ip < model.np(); ++ip) { @@ -368,8 +370,10 @@ void setModelData(const mxArray *prhs[], int nrhs, Model &model) if (model.np() > 0) { if (mxGetPr(prhs[RHS_PARAMETERS])) { - if (mxGetM(prhs[RHS_PARAMETERS]) * mxGetN(prhs[RHS_PARAMETERS]) == - model.np()) { + if (gsl::narrow( + mxGetM(prhs[RHS_PARAMETERS]) * mxGetN(prhs[RHS_PARAMETERS]) + ) + == model.np()) { model.setParameters(std::vector(mxGetPr(prhs[RHS_PARAMETERS]), mxGetPr(prhs[RHS_PARAMETERS]) + mxGetM(prhs[RHS_PARAMETERS]) * mxGetN(prhs[RHS_PARAMETERS]))); @@ -379,8 +383,10 @@ void setModelData(const mxArray *prhs[], int nrhs, Model &model) if (model.nk() > 0) { if (mxGetPr(prhs[RHS_CONSTANTS])) { - if (mxGetM(prhs[RHS_CONSTANTS]) * mxGetN(prhs[RHS_CONSTANTS]) == - model.nk()) { + if (gsl::narrow( + mxGetM(prhs[RHS_CONSTANTS]) * mxGetN(prhs[RHS_CONSTANTS]) + ) + == model.nk()) { model.setFixedParameters(std::vector(mxGetPr(prhs[RHS_CONSTANTS]), mxGetPr(prhs[RHS_CONSTANTS]) + mxGetM(prhs[RHS_CONSTANTS]) * mxGetN(prhs[RHS_CONSTANTS]))); @@ -404,7 +410,7 @@ void setModelData(const mxArray *prhs[], int nrhs, Model &model) if (mxGetN(x0) != 1) { throw AmiException("Number of rows in x0 field must be equal to 1!"); } - if (mxGetM(x0) != model.nx_rdata) { + if (gsl::narrow(mxGetM(x0)) != model.nx_rdata) { throw AmiException("Number of columns in x0 field " "does not agree with number of " "model states!"); @@ -421,7 +427,7 @@ void setModelData(const mxArray *prhs[], int nrhs, Model &model) if (mxGetN(x0) != 1) { throw AmiException("Number of rows in x0 field must be equal to 1!"); } - if (mxGetM(x0) != model.nx_rdata) { + if (gsl::narrow(mxGetM(x0)) != model.nx_rdata) { throw AmiException("Number of columns in x0 field " "does not agree with number of " "model states!"); @@ -434,12 +440,12 @@ void setModelData(const mxArray *prhs[], int nrhs, Model &model) mxArray *sx0 = mxGetField(prhs[RHS_INITIALIZATION], 0, "sx0"); if (sx0 && (mxGetM(sx0) * mxGetN(sx0)) > 0) { /* check dimensions */ - if (mxGetN(sx0) != model.nplist()) { + if (gsl::narrow(mxGetN(sx0)) != model.nplist()) { throw AmiException("Number of rows in sx0 field " "does not agree with number of " "model parameters!"); } - if (mxGetM(sx0) != model.nx_rdata) { + if (gsl::narrow(mxGetM(sx0)) != model.nx_rdata) { throw AmiException("Number of columns in sx0 " "field does not agree with " "number of model states!"); @@ -456,7 +462,6 @@ void setModelData(const mxArray *prhs[], int nrhs, Model &model) } // namespace amici - /*! * mexFunction is the main interface function for the MATLAB interface. It reads * in input data (udata and edata) and @@ -468,33 +473,17 @@ void setModelData(const mxArray *prhs[], int nrhs, Model &model) * @param nrhs number of input arguments of the matlab call * @param prhs pointer to the array of input arguments */ -void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { - // use matlab error reporting - amici::AmiciApplication amiciApp; - amiciApp.warning = []( - std::string const& identifier, - std::string const& message){ - mexWarnMsgIdAndTxt(identifier.c_str(), message.c_str()); - }; - amiciApp.error = []( - std::string const& identifier, - std::string const& message){ - mexErrMsgIdAndTxt(identifier.c_str(), message.c_str()); - }; - +void mexFunction(int nlhs, mxArray* plhs[], int nrhs, mxArray const* prhs[]) { if (nlhs != 1) { - amiciApp.errorF("AMICI:mex:setup", + mexErrMsgIdAndTxt("AMICI:mex:setup", "Incorrect number of output arguments (must be 1)!"); } else if(nrhs < amici::RHS_NUMARGS_REQUIRED) { - amiciApp.errorF("AMICI:mex:setup", + mexErrMsgIdAndTxt("AMICI:mex:setup", "Incorrect number of input arguments (must be at least 7)!"); }; auto model = amici::generic_model::getModel(); - model->app = &amiciApp; - auto solver = model->getSolver(); - solver->app = &amiciApp; setModelData(prhs, nrhs, *model); setSolverOptions(prhs, nrhs, *solver); @@ -503,15 +492,19 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { try { edata = amici::expDataFromMatlabCall(prhs, *model); } catch (amici::AmiException const& ex) { - amiciApp.errorF("AMICI:mex:setup","Failed to read experimental data:\n%s",ex.what()); + mexErrMsgIdAndTxt("AMICI:mex:setup","Failed to read experimental data:\n%s",ex.what()); } } else if (solver->getSensitivityOrder() >= amici::SensitivityOrder::first && solver->getSensitivityMethod() == amici::SensitivityMethod::adjoint) { - amiciApp.errorF("AMICI:mex:setup","No data provided!"); + mexErrMsgIdAndTxt("AMICI:mex:setup","No data provided!"); } /* ensures that plhs[0] is available */ auto rdata = amici::runAmiciSimulation(*solver, edata.get(), *model); plhs[0] = getReturnDataMatlabFromAmiciCall(rdata.get()); + for(auto const& msg: rdata->messages) { + auto identifier = "AMICI:simulation:" + msg.identifier; + mexWarnMsgIdAndTxt(identifier.c_str(), msg.message.c_str()); + } } diff --git a/src/logging.cpp b/src/logging.cpp new file mode 100644 index 0000000000..7e5d861486 --- /dev/null +++ b/src/logging.cpp @@ -0,0 +1,28 @@ +#include "amici/logging.h" +#include "amici/misc.h" + +#include + +namespace amici { + +void Logger::log(LogSeverity severity, + std::string const& identifier, + std::string const& message) +{ + items.emplace_back(severity, identifier, message); +} + +void Logger::log(LogSeverity severity, + std::string const& identifier, + const char *format, ...) +{ + va_list argptr; + va_start(argptr, format); + auto message = printfToString(format, argptr); + va_end(argptr); + + log(severity, identifier, message); +} + + +} // namespace amici diff --git a/src/model.cpp b/src/model.cpp index 9fea9942c5..df71ae3b8f 100644 --- a/src/model.cpp +++ b/src/model.cpp @@ -1414,13 +1414,16 @@ int Model::checkFinite(gsl::span array, gsl_ExpectsDebug(false); model_quantity_str = std::to_string(static_cast(model_quantity)); } - app->warningF(msg_id.c_str(), - "AMICI encountered a %s value for %s[%i] (%s)", - non_finite_type.c_str(), - model_quantity_str.c_str(), - gsl::narrow(flat_index), - element_id.c_str() - ); + if(logger) + logger->log( + LogSeverity::warning, + msg_id, + "AMICI encountered a %s value for %s[%i] (%s)", + non_finite_type.c_str(), + model_quantity_str.c_str(), + gsl::narrow(flat_index), + element_id.c_str() + ); // check upstream, without infinite recursion if(model_quantity != ModelQuantity::k @@ -1528,14 +1531,17 @@ int Model::checkFinite(gsl::span array, model_quantity_str = std::to_string(static_cast(model_quantity)); } - app->warningF(msg_id.c_str(), - "AMICI encountered a %s value for %s[%i] (%s, %s)", - non_finite_type.c_str(), - model_quantity_str.c_str(), - gsl::narrow(flat_index), - row_id.c_str(), - col_id.c_str() - ); + if(logger) + logger->log( + LogSeverity::warning, + msg_id, + "AMICI encountered a %s value for %s[%i] (%s, %s)", + non_finite_type.c_str(), + model_quantity_str.c_str(), + gsl::narrow(flat_index), + row_id.c_str(), + col_id.c_str() + ); // check upstream checkFinite(state_.fixedParameters, ModelQuantity::k); @@ -1617,15 +1623,19 @@ int Model::checkFinite(SUNMatrix m, ModelQuantity model_quantity, realtype t) co gsl_ExpectsDebug(false); model_quantity_str = std::to_string(static_cast(model_quantity)); } - app->warningF(msg_id.c_str(), - "AMICI encountered a %s value for %s[%i] (%s, %s) at t=%g", - non_finite_type.c_str(), - model_quantity_str.c_str(), - gsl::narrow(flat_index), - row_id.c_str(), - col_id.c_str(), - t - ); + + if(logger) + logger->log( + LogSeverity::warning, + msg_id, + "AMICI encountered a %s value for %s[%i] (%s, %s) at t=%g", + non_finite_type.c_str(), + model_quantity_str.c_str(), + gsl::narrow(flat_index), + row_id.c_str(), + col_id.c_str(), + t + ); // check upstream checkFinite(state_.fixedParameters, ModelQuantity::k); diff --git a/src/newton_solver.cpp b/src/newton_solver.cpp index 10ed11da39..1d903b7456 100644 --- a/src/newton_solver.cpp +++ b/src/newton_solver.cpp @@ -77,10 +77,14 @@ void NewtonSolver::computeNewtonSensis(AmiVectorArray &sx, Model &model, prepareLinearSystem(model, state); model.fdxdotdp(state.t, state.x, state.dx); - if (is_singular(model, state)) - model.app->warningF("AMICI:newton", - "Jacobian is singular at steadystate, " - "sensitivities may be inaccurate"); + if (model.logger && is_singular(model, state)) { + model.logger->log( + LogSeverity::warning, + "NEWTON_JAC_SINGULAR", + "Jacobian is singular at steadystate, " + "sensitivities may be inaccurate." + ); + } if (model.pythonGenerated) { for (int ip = 0; ip < model.nplist(); ip++) { diff --git a/src/returndata_matlab.cpp b/src/returndata_matlab.cpp index 528a7e1d8a..297c6d68cf 100644 --- a/src/returndata_matlab.cpp +++ b/src/returndata_matlab.cpp @@ -10,29 +10,12 @@ mxArray *getReturnDataMatlabFromAmiciCall(ReturnData const *rdata) { } mxArray *initMatlabReturnFields(ReturnData const *rdata) { - const int numFields = 22; - const char *field_names_sol[numFields] = {"status", - "llh", - "sllh", - "s2llh", - "chi2", - "t", - "x", - "sx", - "y", - "sy", - "sigmay", - "ssigmay", - "z", - "sz", - "sigmaz", - "ssigmaz", - "rz", - "srz", - "s2rz", - "x0", - "sx0", - "diagnosis"}; + int const numFields = 22; + char const* field_names_sol[numFields] + = {"status", "llh", "sllh", "s2llh", "chi2", "t", + "x", "sx", "y", "sy", "sigmay", "ssigmay", + "z", "sz", "sigmaz", "ssigmaz", "rz", "srz", + "s2rz", "x0", "sx0", "diagnosis"}; checkFieldNames(field_names_sol,numFields); @@ -101,32 +84,33 @@ mxArray *initMatlabReturnFields(ReturnData const *rdata) { } mxArray *initMatlabDiagnosisFields(ReturnData const *rdata) { - const int numFields = 25; - const char *field_names_sol[numFields] = {"xdot", - "J", - "numsteps", - "numrhsevals", - "numerrtestfails", - "numnonlinsolvconvfails", - "order", - "numstepsB", - "numrhsevalsB", - "numerrtestfailsB", - "numnonlinsolvconvfailsB", - "preeq_status", - "preeq_numsteps", - "preeq_numstepsB", - "preeq_cpu_time", - "preeq_cpu_timeB", - "preeq_t", - "preeq_wrms", - "posteq_status", - "posteq_numsteps", - "posteq_numstepsB", - "posteq_cpu_time", - "posteq_cpu_timeB", - "posteq_t", - "posteq_wrms"}; + int const numFields = 25; + char const* field_names_sol[numFields] + = {"xdot", + "J", + "numsteps", + "numrhsevals", + "numerrtestfails", + "numnonlinsolvconvfails", + "order", + "numstepsB", + "numrhsevalsB", + "numerrtestfailsB", + "numnonlinsolvconvfailsB", + "preeq_status", + "preeq_numsteps", + "preeq_numstepsB", + "preeq_cpu_time", + "preeq_cpu_timeB", + "preeq_t", + "preeq_wrms", + "posteq_status", + "posteq_numsteps", + "posteq_numstepsB", + "posteq_cpu_time", + "posteq_cpu_timeB", + "posteq_t", + "posteq_wrms"}; checkFieldNames(field_names_sol,numFields); @@ -205,10 +189,10 @@ mxArray *initMatlabDiagnosisFields(ReturnData const *rdata) { return(matlabDiagnosisStruct); } - -template -void writeMatlabField0(mxArray *matlabStruct, const char *fieldName, - T fieldData) { +template +void writeMatlabField0( + mxArray* matlabStruct, char const* fieldName, T fieldData +) { std::vector dim = {(mwSize)(1), (mwSize)(1)}; @@ -217,9 +201,11 @@ void writeMatlabField0(mxArray *matlabStruct, const char *fieldName, array[0] = static_cast(fieldData); } -template -void writeMatlabField1(mxArray *matlabStruct, const char *fieldName, - gsl::span const& fieldData, int dim0) { +template +void writeMatlabField1( + mxArray* matlabStruct, char const* fieldName, + gsl::span const& fieldData, mwSize dim0 +) { if(fieldData.size() != dim0) throw AmiException("Dimension mismatch when writing rdata->%s to " "matlab results (expected %d, got %d)", @@ -234,10 +220,12 @@ void writeMatlabField1(mxArray *matlabStruct, const char *fieldName, array[i] = static_cast(data_ptr[i]); } -template -void writeMatlabField2(mxArray *matlabStruct, const char *fieldName, - std::vector const& fieldData, int dim0, int dim1, - std::vector perm) { +template +void writeMatlabField2( + mxArray* matlabStruct, char const* fieldName, + std::vector const& fieldData, mwSize dim0, mwSize dim1, + std::vector perm +) { if(fieldData.size() != dim0*dim1) throw AmiException("Dimension mismatch when writing rdata->%s to " "matlab results (expected: %d, actual: %d)", @@ -247,11 +235,11 @@ void writeMatlabField2(mxArray *matlabStruct, const char *fieldName, if(perm.size() != 2) throw AmiException("Dimension mismatch when applying permutation!"); - std::vector dim = {(mwSize)(dim0), (mwSize)(dim1)}; + std::vector dim = {dim0, dim1}; double *array = initAndAttachArray(matlabStruct, fieldName, reorder(dim,perm)); - std::vector index = {0,0}; + std::vector index = {0, 0}; /* transform rowmajor (c++) to colmajor (matlab) and apply permutation */ for (index[0] = 0; index[0] < dim[0]; index[0]++) { for (index[1] = 0; index[1] < dim[1]; index[1]++) { @@ -261,10 +249,12 @@ void writeMatlabField2(mxArray *matlabStruct, const char *fieldName, } } -template -void writeMatlabField3(mxArray *matlabStruct, const char *fieldName, - std::vector const& fieldData, int dim0, int dim1, - int dim2, std::vector perm) { +template +void writeMatlabField3( + mxArray* matlabStruct, char const* fieldName, + std::vector const& fieldData, mwSize dim0, mwSize dim1, mwSize dim2, + std::vector perm +) { if(fieldData.size() != dim0*dim1*dim2) throw AmiException("Dimension mismatch when writing rdata->%s to matlab results",fieldName); @@ -275,7 +265,7 @@ void writeMatlabField3(mxArray *matlabStruct, const char *fieldName, double *array = initAndAttachArray(matlabStruct, fieldName, reorder(dim,perm)); - std::vector index = {0,0,0}; + std::vector index = {0, 0, 0}; /* transform rowmajor (c++) to colmajor (matlab) and apply permutation */ for (index[0] = 0; index[0] < dim[0]; index[0]++) { for (index[1] = 0; index[1] < dim[1]; index[1]++) { @@ -287,10 +277,12 @@ void writeMatlabField3(mxArray *matlabStruct, const char *fieldName, } } -template -void writeMatlabField4(mxArray *matlabStruct, const char *fieldName, - std::vector const& fieldData, int dim0, int dim1, - int dim2, int dim3, std::vector perm) { +template +void writeMatlabField4( + mxArray* matlabStruct, char const* fieldName, + std::vector const& fieldData, mwSize dim0, mwSize dim1, mwSize dim2, + mwSize dim3, std::vector perm +) { if(fieldData.size() != dim0*dim1*dim2*dim3) throw AmiException("Dimension mismatch when writing rdata->%s to matlab results!",fieldName); @@ -301,7 +293,7 @@ void writeMatlabField4(mxArray *matlabStruct, const char *fieldName, double *array = initAndAttachArray(matlabStruct, fieldName, reorder(dim,perm)); - std::vector index = {0,0,0,0}; + std::vector index = {0, 0, 0, 0}; /* transform rowmajor (c++) to colmajor (matlab) and apply permutation */ for (index[0] = 0; index[0] < dim[0]; index[0]++) { for (index[1] = 0; index[1] < dim[1]; index[1]++) { @@ -315,7 +307,9 @@ void writeMatlabField4(mxArray *matlabStruct, const char *fieldName, } } -double *initAndAttachArray(mxArray *matlabStruct, const char *fieldName, std::vector dim) { +double* initAndAttachArray( + mxArray* matlabStruct, char const* fieldName, std::vector dim +) { if(!mxIsStruct(matlabStruct)) throw AmiException("Passing non-struct mxArray to initAndAttachArray!",fieldName); @@ -328,7 +322,7 @@ double *initAndAttachArray(mxArray *matlabStruct, const char *fieldName, std::ve return(mxGetPr(array)); } -void checkFieldNames(const char **fieldNames,const int fieldCount) { +void checkFieldNames(char const** fieldNames, int const fieldCount) { for (int ifield = 0; ifield reorder(std::vector const& input, throw AmiException("Input dimension mismatch!"); std::vector reordered; reordered.resize(input.size()); - for(int i = 0; i < input.size(); i++) + for (std::vector::size_type i = 0; i < input.size(); i++) reordered[i] = input[order[i]]; - return(reordered); + return reordered; } diff --git a/src/solver.cpp b/src/solver.cpp index 7f27a15673..6bb20abe74 100644 --- a/src/solver.cpp +++ b/src/solver.cpp @@ -1,7 +1,6 @@ #include "amici/solver.h" #include "amici/exception.h" -#include "amici/amici.h" #include "amici/model.h" #include "amici/symbolic_functions.h" @@ -13,11 +12,6 @@ namespace amici { -Solver::Solver(AmiciApplication *app) : app(app) -{ - -} - Solver::Solver(const Solver &other) : ism_(other.ism_), lmm_(other.lmm_), iter_(other.iter_), interp_type_(other.interp_type_), maxsteps_(other.maxsteps_), @@ -1229,27 +1223,27 @@ void wrapErrHandlerFn(int error_code, const char *module, function, msg); switch (error_code) { case 99: - snprintf(buffid, BUF_SIZE, "AMICI:%s:%s:WARNING", module, function); + snprintf(buffid, BUF_SIZE, "%s:%s:WARNING", module, function); break; case -1: - snprintf(buffid, BUF_SIZE, "AMICI:%s:%s:TOO_MUCH_WORK", module, function); + snprintf(buffid, BUF_SIZE, "%s:%s:TOO_MUCH_WORK", module, function); break; case -2: - snprintf(buffid, BUF_SIZE, "AMICI:%s:%s:TOO_MUCH_ACC", module, function); + snprintf(buffid, BUF_SIZE, "%s:%s:TOO_MUCH_ACC", module, function); break; case -3: - snprintf(buffid, BUF_SIZE, "AMICI:%s:%s:ERR_FAILURE", module, function); + snprintf(buffid, BUF_SIZE, "%s:%s:ERR_FAILURE", module, function); break; case -4: - snprintf(buffid, BUF_SIZE, "AMICI:%s:%s:CONV_FAILURE", module, function); + snprintf(buffid, BUF_SIZE, "%s:%s:CONV_FAILURE", module, function); break; default: - snprintf(buffid, BUF_SIZE, "AMICI:%s:%s:OTHER", module, function); + snprintf(buffid, BUF_SIZE, "%s:%s:OTHER", module, function); break; } @@ -1258,7 +1252,8 @@ void wrapErrHandlerFn(int error_code, const char *module, throw std::runtime_error("eh_data unset"); } auto solver = static_cast(eh_data); - solver->app->warning(buffid, buffer); + if(solver->logger) + solver->logger->log(LogSeverity::debug, buffid, buffer); } } // namespace amici diff --git a/src/steadystateproblem.cpp b/src/steadystateproblem.cpp index e3bca92384..0f7f5361ac 100644 --- a/src/steadystateproblem.cpp +++ b/src/steadystateproblem.cpp @@ -6,7 +6,6 @@ #include "amici/model.h" #include "amici/newton_solver.h" #include "amici/solver.h" -#include "amici/amici.h" #include #include @@ -19,30 +18,39 @@ constexpr realtype conv_thresh = 1.0; namespace amici { -SteadystateProblem::SteadystateProblem(const Solver &solver, const Model &model) - : delta_(model.nx_solver), delta_old_(model.nx_solver), - ewt_(model.nx_solver), ewtQB_(model.nplist()), - x_old_(model.nx_solver), xdot_(model.nx_solver), - sdx_(model.nx_solver, model.nplist()), xB_(model.nJ * model.nx_solver), - xQ_(model.nJ * model.nx_solver), xQB_(model.nplist()), - xQBdot_(model.nplist()), max_steps_(solver.getNewtonMaxSteps()), - dJydx_(model.nJ * model.nx_solver * model.nt(), 0.0), - state_({INFINITY, // t - AmiVector(model.nx_solver), // x - AmiVector(model.nx_solver), // dx - AmiVectorArray(model.nx_solver, model.nplist()), // sx - model.getModelState()}), // state - atol_(solver.getAbsoluteToleranceSteadyState()), - rtol_(solver.getRelativeToleranceSteadyState()), - atol_sensi_(solver.getAbsoluteToleranceSteadyStateSensi()), - rtol_sensi_(solver.getRelativeToleranceSteadyStateSensi()), - atol_quad_(solver.getAbsoluteToleranceQuadratures()), - rtol_quad_(solver.getRelativeToleranceQuadratures()), - newton_solver_(NewtonSolver::getSolver(solver, model)), - damping_factor_mode_(solver.getNewtonDampingFactorMode()), - damping_factor_lower_bound_(solver.getNewtonDampingFactorLowerBound()), - newton_step_conv_(solver.getNewtonStepSteadyStateCheck()), - check_sensi_conv_(solver.getSensiSteadyStateCheck()) { +SteadystateProblem::SteadystateProblem(Solver const& solver, Model const& model) + : delta_(model.nx_solver) + , delta_old_(model.nx_solver) + , ewt_(model.nx_solver) + , ewtQB_(model.nplist()) + , x_old_(model.nx_solver) + , xdot_(model.nx_solver) + , sdx_(model.nx_solver, model.nplist()) + , xB_(model.nJ * model.nx_solver) + , xQ_(model.nJ * model.nx_solver) + , xQB_(model.nplist()) + , xQBdot_(model.nplist()) + , max_steps_(solver.getNewtonMaxSteps()) + , dJydx_(model.nJ * model.nx_solver * model.nt(), 0.0) + , state_( + {INFINITY, // t + AmiVector(model.nx_solver), // x + AmiVector(model.nx_solver), // dx + AmiVectorArray(model.nx_solver, model.nplist()), // sx + model.getModelState()} + ) + , // state + atol_(solver.getAbsoluteToleranceSteadyState()) + , rtol_(solver.getRelativeToleranceSteadyState()) + , atol_sensi_(solver.getAbsoluteToleranceSteadyStateSensi()) + , rtol_sensi_(solver.getRelativeToleranceSteadyStateSensi()) + , atol_quad_(solver.getAbsoluteToleranceQuadratures()) + , rtol_quad_(solver.getRelativeToleranceQuadratures()) + , newton_solver_(NewtonSolver::getSolver(solver, model)) + , damping_factor_mode_(solver.getNewtonDampingFactorMode()) + , damping_factor_lower_bound_(solver.getNewtonDampingFactorLowerBound()) + , newton_step_conv_(solver.getNewtonStepSteadyStateCheck()) + , check_sensi_conv_(solver.getSensiSteadyStateCheck()) { /* Check for compatibility of options */ if (solver.getSensitivityMethod() == SensitivityMethod::forward && solver.getSensitivityMethodPreequilibration() == @@ -53,9 +61,9 @@ SteadystateProblem::SteadystateProblem(const Solver &solver, const Model &model) "sensitivities during simulation"); } -void SteadystateProblem::workSteadyStateProblem(const Solver &solver, - Model &model, - int it) { +void SteadystateProblem::workSteadyStateProblem( + Solver const& solver, Model& model, int it +) { initializeForwardProblem(it, solver, model); /* Compute steady state, track computation time */ @@ -79,7 +87,8 @@ void SteadystateProblem::workSteadyStateProblem(const Solver &solver, } void SteadystateProblem::workSteadyStateBackwardProblem( - const Solver &solver, Model &model, const BackwardProblem *bwd) { + Solver const& solver, Model& model, BackwardProblem const* bwd +) { if (!initializeBackwardProblem(solver, model, bwd)) return; @@ -90,8 +99,9 @@ void SteadystateProblem::workSteadyStateBackwardProblem( cpu_timeB_ = (double)((clock() - starttime) * 1000) / CLOCKS_PER_SEC; } -void SteadystateProblem::findSteadyState(const Solver &solver, Model &model, - int it) { +void SteadystateProblem::findSteadyState( + Solver const& solver, Model& model, int it +) { steady_state_status_.resize(3, SteadyStateStatus::not_run); bool turnOffNewton = model.getSteadyStateSensitivityMode() == SteadyStateSensitivityMode::integrationOnly && @@ -145,8 +155,9 @@ void SteadystateProblem::findSteadyStateByNewtonsMethod(Model &model, } } -void SteadystateProblem::findSteadyStateBySimulation(const Solver &solver, - Model &model, int it) { +void SteadystateProblem::findSteadyStateBySimulation( + Solver const& solver, Model& model, int it +) { try { if (it < 0) { /* Preequilibration? -> Create a new solver instance for sim */ @@ -170,19 +181,26 @@ void SteadystateProblem::findSteadyStateBySimulation(const Solver &solver, SteadyStateStatus::failed_too_long_simulation; break; default: - model.app->warningF("AMICI:newton", - "AMICI newton method failed: %s\n", ex.what()); + if(model.logger) + model.logger->log( + LogSeverity::error, "NEWTON_FAILURE", + "AMICI newton method failed: %s", ex.what() + ); steady_state_status_[1] = SteadyStateStatus::failed; } } catch (AmiException const &ex) { - model.app->warningF("AMICI:equilibration", - "AMICI equilibration failed: %s\n", ex.what()); + if(model.logger) + model.logger->log( + LogSeverity::error, "EQUILIBRATION_FAILURE", + "AMICI equilibration failed: %s", ex.what() + ); steady_state_status_[1] = SteadyStateStatus::failed; } } -void SteadystateProblem::initializeForwardProblem(int it, const Solver &solver, - Model &model) { +void SteadystateProblem::initializeForwardProblem( + int it, Solver const& solver, Model& model +) { newton_solver_->reinitialize(); /* process solver handling for pre- or postequilibration */ if (it == -1) { @@ -209,9 +227,9 @@ void SteadystateProblem::initializeForwardProblem(int it, const Solver &solver, flagUpdatedState(); } -bool SteadystateProblem::initializeBackwardProblem(const Solver &solver, - Model &model, - const BackwardProblem *bwd) { +bool SteadystateProblem::initializeBackwardProblem( + Solver const& solver, Model& model, BackwardProblem const* bwd +) { newton_solver_->reinitialize(); /* note that state_ is still set from forward run */ if (bwd) { @@ -241,8 +259,9 @@ bool SteadystateProblem::initializeBackwardProblem(const Solver &solver, return true; } -void SteadystateProblem::computeSteadyStateQuadrature(const Solver &solver, - Model &model) { +void SteadystateProblem::computeSteadyStateQuadrature( + Solver const& solver, Model& model +) { /* This routine computes the quadratures: xQB = Integral[ xB(x(t), t, p) * dxdot/dp(x(t), t, p) | dt ] As we're in steady state, we have x(t) = x_ss (x_steadystate), hence @@ -301,8 +320,9 @@ void SteadystateProblem::getQuadratureByLinSolve(Model &model) { } } -void SteadystateProblem::getQuadratureBySimulation(const Solver &solver, - Model &model) { +void SteadystateProblem::getQuadratureBySimulation( + Solver const& solver, Model& model +) { /* If the Jacobian is singular, the integral over xB must be computed by usual integration over time, but simplifications can be applied: x is not time dependent, no forward trajectory is needed. */ @@ -363,9 +383,9 @@ void SteadystateProblem::writeErrorString(std::string *errorString, } } -bool SteadystateProblem::getSensitivityFlag(const Model &model, - const Solver &solver, int it, - SteadyStateContext context) { +bool SteadystateProblem::getSensitivityFlag( + Model const& model, Solver const& solver, int it, SteadyStateContext context +) { /* We need to check whether we need to compute forward sensitivities. Depending on the situation (pre-/postequilibration) and the solver settings, the logic may be involved and is handled here. @@ -433,9 +453,10 @@ bool SteadystateProblem::getSensitivityFlag(const Model &model, } } -realtype SteadystateProblem::getWrmsNorm(const AmiVector &x, - const AmiVector &xdot, realtype atol, - realtype rtol, AmiVector &ewt) const { +realtype SteadystateProblem::getWrmsNorm( + AmiVector const& x, AmiVector const& xdot, realtype atol, realtype rtol, + AmiVector& ewt +) const { /* Depending on what convergence we want to check (xdot, sxdot, xQBdot) we need to pass ewt[QB], as xdot and xQBdot have different sizes */ /* ewt = x */ @@ -602,8 +623,9 @@ bool SteadystateProblem::updateDampingFactor(bool step_successful) { return step_successful; } -void SteadystateProblem::runSteadystateSimulation(const Solver &solver, - Model &model, bool backward) { +void SteadystateProblem::runSteadystateSimulation( + Solver const& solver, Model& model, bool backward +) { if (model.nx_solver == 0) return; /* Loop over steps and check for convergence @@ -689,10 +711,9 @@ void SteadystateProblem::runSteadystateSimulation(const Solver &solver, updateSensiSimulation(solver); } -std::unique_ptr -SteadystateProblem::createSteadystateSimSolver(const Solver &solver, - Model &model, bool forwardSensis, - bool backward) const { +std::unique_ptr SteadystateProblem::createSteadystateSimSolver( + Solver const& solver, Model& model, bool forwardSensis, bool backward +) const { /* Create new CVode solver object */ auto sim_solver = std::unique_ptr(solver.clone()); @@ -728,8 +749,9 @@ SteadystateProblem::createSteadystateSimSolver(const Solver &solver, return sim_solver; } -void SteadystateProblem::computeQBfromQ(Model &model, const AmiVector &yQ, - AmiVector &yQB) const { +void SteadystateProblem::computeQBfromQ( + Model& model, AmiVector const& yQ, AmiVector& yQB +) const { /* Compute the quadrature as the inner product: yQB = dxdotdp * yQ */ /* set to zero first, as multiplication adds to existing value */ @@ -737,7 +759,7 @@ void SteadystateProblem::computeQBfromQ(Model &model, const AmiVector &yQ, /* multiply */ if (model.pythonGenerated) { /* fill dxdotdp with current values */ - const auto &plist = model.getParameterList(); + auto const& plist = model.getParameterList(); model.fdxdotdp(state_.t, state_.x, state_.dx); model.get_dxdotdp_full().multiply(yQB.getNVector(), yQ.getNVector(), plist, true); @@ -747,7 +769,7 @@ void SteadystateProblem::computeQBfromQ(Model &model, const AmiVector &yQ, } } -void SteadystateProblem::getAdjointUpdates(Model &model, const ExpData &edata) { +void SteadystateProblem::getAdjointUpdates(Model& model, ExpData const& edata) { xB_.zero(); for (int it = 0; it < model.nt(); it++) { if (std::isinf(model.getTimepoint(it))) { @@ -766,7 +788,7 @@ void SteadystateProblem::flagUpdatedState() { sensis_updated_ = false; } -void SteadystateProblem::updateSensiSimulation(const Solver &solver) { +void SteadystateProblem::updateSensiSimulation(Solver const& solver) { if (sensis_updated_) return; state_.sx = solver.getStateSensitivity(state_.t); diff --git a/swig/CMakeLists.txt b/swig/CMakeLists.txt index 3705441e4f..835e24dad4 100644 --- a/swig/CMakeLists.txt +++ b/swig/CMakeLists.txt @@ -3,13 +3,13 @@ # # Use most recent SWIG version available -SET(CMAKE_FIND_PACKAGE_SORT_DIRECTION DEC) +set(CMAKE_FIND_PACKAGE_SORT_DIRECTION DEC) find_package(SWIG REQUIRED) set(SWIG_VERSION_MIN "3.0") -if (${SWIG_VERSION} VERSION_LESS ${SWIG_VERSION_MIN}) - message(FATAL_ERROR "Requiring SWIG>=${SWIG_VERSION_MIN} " - "but found only ${SWIG_VERSION}.") +if(${SWIG_VERSION} VERSION_LESS ${SWIG_VERSION_MIN}) + message(FATAL_ERROR "Requiring SWIG>=${SWIG_VERSION_MIN} " + "but found only ${SWIG_VERSION}.") endif() include(${SWIG_USE_FILE}) @@ -28,8 +28,7 @@ set(AMICI_INTERFACE_LIST ${CMAKE_CURRENT_SOURCE_DIR}/std_unique_ptr.i ${CMAKE_CURRENT_SOURCE_DIR}/hdf5.i ${CMAKE_CURRENT_SOURCE_DIR}/abstract_model.i - ${CMAKE_CURRENT_SOURCE_DIR}/stdvec2numpy.h -) + ${CMAKE_CURRENT_SOURCE_DIR}/stdvec2numpy.h) # Add target to show files in IDE add_custom_target(swigInterface SOURCES ${AMICI_INTERFACE_LIST}) diff --git a/swig/amici.i b/swig/amici.i index e137b54480..2091ae558e 100644 --- a/swig/amici.i +++ b/swig/amici.i @@ -63,6 +63,9 @@ nonstandard type conversions. } } +// Warning 503: Can't wrap 'operator ==' unless renamed to a valid identifier. +%rename("__eq__") operator ==; + %{ #define SWIG_FILE_WITH_INIT #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION @@ -138,6 +141,7 @@ wrap_unique_ptr(ExpDataPtr, amici::ExpData) %include model.i %include model_ode.i %include model_dae.i +%include logging.i %include rdata.i #ifndef AMICI_SWIG_WITHOUT_HDF5 @@ -154,9 +158,6 @@ wrap_unique_ptr(ExpDataPtr, amici::ExpData) %} // Add necessary symbols to generated header -// Ignore due to https://github.com/swig/swig/issues/1643 -%ignore amici::AmiciApplication::warningF; -%ignore amici::AmiciApplication::errorF; %{ #include "amici/amici.h" using namespace amici; @@ -171,6 +172,7 @@ using namespace amici; // Expose vectors %template(ExpDataPtrVector) std::vector; +%template(LogItemVector) std::vector; // Convert integer values to enum class diff --git a/swig/logging.i b/swig/logging.i new file mode 100644 index 0000000000..2c96882f96 --- /dev/null +++ b/swig/logging.i @@ -0,0 +1,11 @@ +%module logging + +%nodefaultctor amici::LogItem; + +// Add necessary symbols to generated header +%{ +#include "amici/logging.h" +%} + +// Process symbols in header +%include "amici/logging.h" diff --git a/tests/benchmark-models/benchmark_models.yaml b/tests/benchmark-models/benchmark_models.yaml index 49e509f421..d9ce6d0dd9 100644 --- a/tests/benchmark-models/benchmark_models.yaml +++ b/tests/benchmark-models/benchmark_models.yaml @@ -1,27 +1,42 @@ Bachmann_MSB2011: llh: 418.40573341425295 + t_sim: 0.05 + t_fwd: 3.0 + t_adj: 4.5 note: benchmark collection reference value matches up to sign when applying log10-correction +sum(log(meas*log(10)) / 2 -Becker_Science2010: - llh: -364.118614198023 - Beer_MolBioSystems2014: llh: 58622.9145631413 + t_sim: 0.05 + t_fwd: 0.5 + t_adj: 8.0 note: benchmark collection reference parameters do not match petab, but reference llh has been confirmed for parameters reported there up to sign Boehm_JProteomeRes2014: llh: -138.22199693517703 + t_sim: 0.005 + t_fwd: 0.05 + t_adj: 0.05 note: benchmark collection reference ignores factor 1/2 Borghans_BiophysChem1997: llh: 83.3237191357272 + t_sim: 0.005 + t_fwd: 0.5 + t_adj: 0.1 note: benchmark collection reference value matches up to sign when applying log10-correction +sum(log(meas*log(10)) / 2 Brannmark_JBC2010: llh: -141.889113770537 + t_sim: 0.005 + t_fwd: 0.5 + t_adj: 0.5 Bruno_JExpBot2016: llh: 46.688176988431806 + t_sim: 0.005 + t_fwd: 0.05 + t_adj: 0.1 note: benchmark collection only reports chi2 value, but llh value can be derived Chen_MSB2009: @@ -30,56 +45,68 @@ Chen_MSB2009: Crauste_CellSystems2017: llh: -190.96521897435176 + t_sim: 0.005 + t_fwd: 0.05 + t_adj: 0.05 note: benchmark collection only reports chi2 value, but llh value can be derived Elowitz_Nature2000: llh: 63.20279991419332 + t_sim: 0.005 + t_fwd: 0.1 + t_adj: 0.1 note: benchmark collection reference value matches up to sign when applying log10-correction +sum(log(meas*log(10))) / 2 Fiedler_BMC2016: llh: 58.58390161681 + t_sim: 0.005 + t_fwd: 0.05 + t_adj: 0.1 Fujita_SciSignal2010: llh: 53.08377736998929 - -# Hass_PONE2017 None + t_sim: 0.01 + t_fwd: 0.5 + t_adj: 1.0 Isensee_JCB2018: llh: -3949.375966548649 + t_sim: 0.1 + t_fwd: 15 + t_adj: 20 note: benchmark collection reference also includes prior valuation, tested based on agreement of chi2 values Lucarelli_CellSystems2018: llh: -1681.6059879426584 - -Merkle_PCB2016: - llh: -1388.59682706751 - note: unchecked - -Raia_CancerResearch2011: - llh: 690.619495552297 - note: unchecked + t_sim: 0.05 + t_fwd: 2.5 + t_adj: 2.0 Schwen_PONE2014: llh: -943.9992988598723 + t_sim: 0.01 + t_fwd: 0.5 + t_adj: 1.5 note: benchmark collection reference value does not match, but model outputs do. maybe due to priors Sneyd_PNAS2002: llh: 319.79177818768756 + t_sim: 0.005 + t_fwd: 0.1 + t_adj: 0.5 note: benchmark collection reference ignores factor 1/2 -Sobotta_Frontiers2017: - llh: -1346.75391686389 - note: unchecked - -Swameye_PNAS2003: - llh: -142.118024712038 - note: unchecked - Weber_BMC2015: llh: -296.2017922646865 + t_sim: 0.005 + t_fwd: 0.1 + t_adj: 0.1 note: benchmark collection reference ignores factor -1/2 Zheng_PNAS2012: llh: 278.33353271001477 + t_sim: 0.005 + t_fwd: 0.05 + t_adj: 0.05 note: benchmark collection reference ignores factor 1/2 diff --git a/tests/benchmark-models/evaluate_benchmark.py b/tests/benchmark-models/evaluate_benchmark.py new file mode 100644 index 0000000000..f1d88197db --- /dev/null +++ b/tests/benchmark-models/evaluate_benchmark.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 + +""" +Aggregate computation times from different benchmarks and plot +""" +import os +import pandas as pd +import seaborn as sns +import matplotlib.pyplot as plt + +# read benchmark results for different models + +outfile = 'computation_times.csv' +df = pd.concat([ + pd.read_csv(f, header=[0], index_col=[0]).rename(columns={'0': '_'.join(f.split('_')[:2])}).T + for f in os.listdir() if f.endswith('.csv') if f != outfile +]) +df.sort_values('np', inplace=True) + +df.to_csv(outfile) + + +ratios = pd.concat( + [df[sensi]/df['t_sim'].values for sensi in ['t_fwd', 't_adj']] + [df.np], axis=1, +).reset_index().melt(id_vars=['index', 'np']).rename( + columns={'index': 'model', 'variable': 'sensitivity', 'value': 'ratio'} +) +ratios['sensitivity'] = ratios['sensitivity'].replace( + {'t_fwd': 'forward', 't_adj': 'adjoint'} +) + + +plt.figure(figsize=(10, 5)) +g = sns.barplot( + data=ratios, + order=list(df.index), + x='model', + y='ratio', + hue='sensitivity' +) +for ir, row in ratios.iterrows(): + if row.sensitivity == 'adjoint': + continue + g.text(ir, row['np'], int(row['np']), color='black', ha="center", weight='bold') + +plt.xticks(rotation=30, horizontalalignment='right') +plt.tight_layout() +plt.savefig('computation_times.png') + diff --git a/tests/benchmark-models/test_benchmark_collection.sh b/tests/benchmark-models/test_benchmark_collection.sh index d8d9e5f2f5..0494f53f41 100755 --- a/tests/benchmark-models/test_benchmark_collection.sh +++ b/tests/benchmark-models/test_benchmark_collection.sh @@ -25,35 +25,33 @@ Weber_BMC2015 Zheng_PNAS2012" # -# -# PEtab needs fixing: Bachmann_MSB2011 -# -# Unsupported: -# -# Becker_Science2010: multiple models +# not merged: +# Becker_Science2010 (multiple models) https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab/tree/model_Becker_Science2010 +# Hass_PONE2017 (???) https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab/tree/model_Hass_PONE2017 +# Korkut_eLIFE2015 (???) https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab/tree/model_Korkut_eLIFE2015 +# Casaletto_PNAS2019 (yaml missing) https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab/tree/model_Casaletto_PNAS2019 +# Merkle_PCB2016 (model missing) https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab/tree/model_Merkle_PCB2016 +# Parmar_PCB2019 (SBML extensions) https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab/tree/model_Parmar_PCB2019 +# Swameye_PNAS2003 (splines) https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab/tree/model_Swameye_PNAS2003 +# Sobotta_Frontiers2017 (???) https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab/tree/model_Sobotta_Frontiers2017 +# Raia_CancerResearch2011 (state dependent sigmas) https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab/tree/model_Raia_CancerResearch2011 # # no reference value: -# Alkan_SciSignal2018 -# Blasi_CellSystems2016 -# Hass_PONE2017 -# Korkut_eLIFE2015 -# Perelson_Science1996 -# -# -# yaml missing: -# Casaletto_PNAS2019 +# Alkan_SciSignal2018 (d2d: Alkan_DDRP_SciSignal2018) +# Bertozzi_PNAS2020 (gh: vanako, doi: https://doi.org/10.1073/pnas.2006520117, code/data: https://github.com/gomohler/pnas2020 (R)) +# Blasi_CellSystems2016 (gh: Leonard Schmiester, doi: https://doi.org/10.1016/j.cels.2016.01.002, code/data: not available) +# Giordano_Nature2020 (gh: Paul Jonas Jost, doi: https://doi.org/10.1038/s41591-020-0883-7, code/data: http://users.dimi.uniud.it/~giulia.giordano/docs/papers/SIDARTHEcode.zip (MATLAB)) +# Laske_PLOSComputBiol2019 (gh: Clemens Peiter, doi: https://doi.org/10.1128/JVI.00080-12 (?), code/data: ???) +# Okuonghae_ChaosSolitonsFractals2020 (gh: Paul Jonas Jost, doi: https://doi.org/10.1016/j.chaos.2020.110032, code/data: ???) +# Oliveira_NatCommun2021 (gh: lorenapohl, doi: https://doi.org/10.1038/s41467-020-19798-3, code: https://github.com/cidacslab/Mathematical-and-Statistical-Modeling-of-COVID19-in-Brazil (python) data: https://infovis.sei.ba.gov.br/covid19/ ) +# Perelson_Science1996 (gh: Philipp Staedter, doi: https://doi.org/10.1126/science.271.5255.1582, code/data: ???) +# Rahman_MBS2016 (gh: Yannik Schaelte, doi: https://doi.org/10.1016/j.mbs.2016.07.009, code: not available, data: table in paper ...) +# Raimundez_PCB2020 (gh: Elba Raimundez, doi: https://doi.org/10.1371/journal.pcbi.1007147, code/data: https://zenodo.org/record/2908234#.Y5hUUS8w3yw (d2d)) +# SalazarCavazos_MBoC2020 (gh: Dilan Pathirana, doi: https://doi.org/10.1091/mbc.E19-09-0548, code/data: supplement (BNGL)) +# Zhao_QuantBiol2020 (gh: Iva Ewert, doi: https://doi.org/10.1007/s40484-020-0199-0, code: not available, data: table in supp) # -# Model missing: -# Merkle_PCB2016 -# -# SBML extensions: -# Parmar_PCB2019 -# -# Events: -# Swameye_PNAS2003 -# -# state-dependent sigmas: -# Raia_CancerResearch2011 +# covered by performance test: +# Froehlich_CellSystems2018 # # Unknown reasons: # Chen_MSB2009 @@ -90,8 +88,8 @@ for model in $models; do yaml="${model_dir}"/"${model}"/"${model}".yaml amici_model_dir=test_bmc/"${model}" mkdir -p "$amici_model_dir" - cmd_import="amici_import_petab --verbose -y ${yaml} -o ${amici_model_dir} -n ${model} --flatten" - cmd_run="$script_path/test_petab_model.py --verbose -y ${yaml} -d ${amici_model_dir} -m ${model} -c" + cmd_import="amici_import_petab -y ${yaml} -o ${amici_model_dir} -n ${model} --flatten" + cmd_run="$script_path/test_petab_model.py -y ${yaml} -d ${amici_model_dir} -m ${model} -c" printf '=%.0s' {1..40} printf " %s " "${model}" @@ -110,3 +108,5 @@ for model in $models; do echo echo done + +cd "$script_path" && python evaluate_benchmark.py diff --git a/tests/benchmark-models/test_petab_model.py b/tests/benchmark-models/test_petab_model.py index f5e58e7535..1f34e8cc0e 100755 --- a/tests/benchmark-models/test_petab_model.py +++ b/tests/benchmark-models/test_petab_model.py @@ -9,6 +9,8 @@ import logging import os import sys +import pandas as pd +import numpy as np import petab import yaml @@ -66,8 +68,8 @@ def main(): args = parse_cli_args() - if args.verbose: - logger.setLevel(logging.DEBUG) + loglevel = logging.DEBUG if args.verbose else logging.INFO + logger.setLevel(loglevel) logger.info(f"Simulating '{args.model_name}' " f"({args.model_directory}) using PEtab data from " @@ -84,15 +86,49 @@ def main(): amici_model = model_module.getModel() amici_solver = amici_model.getSolver() - if args.model_name == "Isensee_JCB2018": - amici_solver.setAbsoluteTolerance(1e-12) - amici_solver.setRelativeTolerance(1e-12) - - res = simulate_petab( - petab_problem=problem, amici_model=amici_model, - solver=amici_solver, log_level=logging.DEBUG) - rdatas = res[RDATAS] - llh = res[LLH] + amici_solver.setAbsoluteTolerance(1e-8) + amici_solver.setRelativeTolerance(1e-8) + amici_solver.setMaxSteps(int(1e4)) + if args.model_name in ('Brannmark_JBC2010', 'Isensee_JCB2018'): + amici_model.setSteadyStateSensitivityMode( + amici.SteadyStateSensitivityMode.integrationOnly + ) + + times = dict() + + for label, sensi_mode in { + 't_sim': amici.SensitivityMethod.none, + 't_fwd': amici.SensitivityMethod.forward, + 't_adj': amici.SensitivityOrder.second + }.items(): + amici_solver.setSensitivityMethod(sensi_mode) + if sensi_mode == amici.SensitivityMethod.none: + amici_solver.setSensitivityOrder(amici.SensitivityOrder.none) + else: + amici_solver.setSensitivityOrder(amici.SensitivityOrder.first) + + res_repeats = [ + simulate_petab(petab_problem=problem, amici_model=amici_model, + solver=amici_solver, log_level=loglevel) + for _ in range(3) # repeat to get more stable timings + ] + res = res_repeats[0] + + times[label] = np.mean([ + sum(r.cpu_time + r.cpu_timeB for r in res[RDATAS]) / 1000 + # only forwards/backwards simulation + for res in res_repeats + ]) + + if sensi_mode == amici.SensitivityMethod.none: + rdatas = res[RDATAS] + llh = res[LLH] + + times['np'] = sum(problem.parameter_df[petab.ESTIMATE]) + + pd.Series(times).to_csv( + f'./tests/benchmark-models/{args.model_name}_benchmark.csv' + ) for rdata in rdatas: assert rdata.status == amici.AMICI_SUCCESS, \ @@ -126,22 +162,52 @@ def main(): try: ref_llh = refs[args.model_name]["llh"] - logger.info(f"Reference llh: {ref_llh}") - if abs(ref_llh - llh) < 1e-3: - logger.info(f"Computed llh {llh} matches reference " - f"{ref_llh}. Absolute difference is " - f"{ref_llh - llh}.") + rdiff = np.abs((llh - ref_llh) / ref_llh) + rtol = 1e-3 + adiff = np.abs(llh - ref_llh) + atol = 1e-3 + tolstr = f' Absolute difference is {adiff:.2e} ' \ + f'(tol {atol:.2e}) and relative difference is ' \ + f'{rdiff:.2e} (tol {rtol:.2e}).' + + if np.isclose(llh, ref_llh, rtol=rtol, atol=atol): + logger.info( + f"Computed llh {llh:.4e} matches reference {ref_llh:.4e}." + + tolstr + ) else: - logger.error(f"Computed llh {llh} does not match reference " - f"{ref_llh}. Absolute difference is " - f"{ref_llh - llh}." - f" Relative difference is {llh / ref_llh}") + logger.error( + f"Computed llh {llh:.4e} does not match reference " + f"{ref_llh:.4e}." + tolstr + ) sys.exit(1) except KeyError: logger.error("No reference likelihood found for " f"{args.model_name} in {references_yaml}") + for label, key in { + 'simulation': 't_sim', + 'adjoint sensitivity': 't_adj', + 'forward sensitivity': 't_fwd', + }.items(): + try: + ref = refs[args.model_name][key] + if times[key] > ref: + logger.error( + f"Computation time for {label} ({times[key]:.2e}) " + f"exceeds reference ({ref:.2e})." + ) + sys.exit(1) + else: + logger.info( + f"Computation time for {label} ({times[key]:.2e}) " + f"within reference ({ref:.2e})." + ) + except KeyError: + logger.error(f"No reference time for {label} found for " + f"{args.model_name} in {references_yaml}") + if __name__ == "__main__": main() diff --git a/tests/cpp/CMakeLists.txt b/tests/cpp/CMakeLists.txt index 51bed78f09..cf8ddece71 100644 --- a/tests/cpp/CMakeLists.txt +++ b/tests/cpp/CMakeLists.txt @@ -5,18 +5,18 @@ # Download and unpack googletest at configure time configure_file(CMakeLists.txt.in googletest-download/CMakeLists.txt) execute_process( - COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" . - RESULT_VARIABLE result - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download) + COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" . + RESULT_VARIABLE result + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download) if(result) - message(FATAL_ERROR "CMake step for googletest failed: ${result}") + message(FATAL_ERROR "CMake step for googletest failed: ${result}") endif() execute_process( - COMMAND ${CMAKE_COMMAND} --build . - RESULT_VARIABLE result - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download) + COMMAND ${CMAKE_COMMAND} --build . + RESULT_VARIABLE result + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/googletest-download) if(result) - message(FATAL_ERROR "Build step for googletest failed: ${result}") + message(FATAL_ERROR "Build step for googletest failed: ${result}") endif() # Prevent overriding the parent project's compiler/linker settings on Windows @@ -40,18 +40,16 @@ add_library(Upstream::amici ALIAS amici) # Amici testing library add_library(amici-testing testfunctions.cpp) -target_compile_definitions(amici-testing - PUBLIC NEW_OPTION_FILE="${CMAKE_CURRENT_SOURCE_DIR}/testOptions.h5" - PUBLIC HDFFILE="${CMAKE_CURRENT_SOURCE_DIR}/expectedResults.h5" - PUBLIC HDFFILEWRITE="${CMAKE_CURRENT_SOURCE_DIR}/writeResults.h5" - ) -target_include_directories(amici-testing - PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} - ) -target_link_libraries(amici-testing - PUBLIC Upstream::amici - PUBLIC gtest_main - ) +target_compile_definitions( + amici-testing + PUBLIC NEW_OPTION_FILE="${CMAKE_CURRENT_SOURCE_DIR}/testOptions.h5" + PUBLIC HDFFILE="${CMAKE_CURRENT_SOURCE_DIR}/expectedResults.h5" + PUBLIC HDFFILEWRITE="${CMAKE_CURRENT_SOURCE_DIR}/writeResults.h5") +target_include_directories(amici-testing PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) +target_link_libraries( + amici-testing + PUBLIC Upstream::amici + PUBLIC gtest_main) # Names of models for which tests are to be run set(TEST_MODELS @@ -59,18 +57,18 @@ set(TEST_MODELS steadystate jakstat_adjoint jakstat_adjoint_o2 - neuron neuron_o2 + neuron + neuron_o2 events nested_events robertson - calvetti - ) + calvetti) if(ENABLE_SWIG AND ENABLE_PYTHON) - add_custom_target(python-tests - COMMAND ${CMAKE_SOURCE_DIR}/scripts/run-python-tests.sh - DEPENDS - ) + add_custom_target( + python-tests + COMMAND ${CMAKE_SOURCE_DIR}/scripts/run-python-tests.sh + DEPENDS) endif() add_subdirectory(unittests) @@ -78,38 +76,38 @@ add_subdirectory(unittests) include(ExternalProject) foreach(MODEL IN ITEMS ${TEST_MODELS}) - # Build model - string(CONCAT MODEL_LIBRARY_DIR - "${CMAKE_CURRENT_BINARY_DIR}/external_model_${MODEL}-prefix/src/" - "external_model_${MODEL}-build/") - string(CONCAT MODEL_LIBRARY - "${MODEL_LIBRARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}model_${MODEL}" - "${CMAKE_STATIC_LIBRARY_SUFFIX}") - ExternalProject_Add(external_model_${MODEL} - PREFIX "" - SOURCE_DIR "${CMAKE_SOURCE_DIR}/models/model_${MODEL}/" - INSTALL_COMMAND "" - TEST_COMMAND "" - BUILD_ALWAYS 1 - DEPENDS amici - BUILD_BYPRODUCTS "${MODEL_LIBRARY}" - ) - # Rebuild if amici files are updated - ExternalProject_Add_StepDependencies(external_model_${MODEL} build amici) + # Build model + string( + CONCAT MODEL_LIBRARY_DIR + "${CMAKE_CURRENT_BINARY_DIR}/external_model_${MODEL}-prefix/src/" + "external_model_${MODEL}-build/") + string( + CONCAT MODEL_LIBRARY + "${MODEL_LIBRARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}model_${MODEL}" + "${CMAKE_STATIC_LIBRARY_SUFFIX}") + ExternalProject_Add( + external_model_${MODEL} + PREFIX "" + SOURCE_DIR "${CMAKE_SOURCE_DIR}/models/model_${MODEL}/" + INSTALL_COMMAND "" + TEST_COMMAND "" + BUILD_ALWAYS 1 + DEPENDS amici + BUILD_BYPRODUCTS "${MODEL_LIBRARY}") + # Rebuild if amici files are updated + ExternalProject_Add_StepDependencies(external_model_${MODEL} build amici) - add_library(model_${MODEL} STATIC IMPORTED) - add_dependencies(model_${MODEL} external_model_${MODEL}) + add_library(model_${MODEL} STATIC IMPORTED) + add_dependencies(model_${MODEL} external_model_${MODEL}) - set_target_properties(model_${MODEL} - PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES - "${CMAKE_SOURCE_DIR}/models/model_${MODEL}/" - IMPORTED_LOCATION "${MODEL_LIBRARY}" - ) - # Build tests for this model - add_subdirectory(${MODEL}) - if(TARGET python-tests) - add_dependencies(python-tests external_model_${MODEL}) - endif() + set_target_properties( + model_${MODEL} + PROPERTIES INTERFACE_INCLUDE_DIRECTORIES + "${CMAKE_SOURCE_DIR}/models/model_${MODEL}/" IMPORTED_LOCATION + "${MODEL_LIBRARY}") + # Build tests for this model + add_subdirectory(${MODEL}) + if(TARGET python-tests) + add_dependencies(python-tests external_model_${MODEL}) + endif() endforeach() - diff --git a/tests/cpp/unittests/CMakeLists.txt b/tests/cpp/unittests/CMakeLists.txt index 5e1f7e68ac..51bef9e9d9 100644 --- a/tests/cpp/unittests/CMakeLists.txt +++ b/tests/cpp/unittests/CMakeLists.txt @@ -2,25 +2,18 @@ project(unittests) find_package(Boost COMPONENTS serialization) -set(SRC_LIST - testMisc.cpp - testExpData.cpp -) +set(SRC_LIST testMisc.cpp testExpData.cpp) add_executable(${PROJECT_NAME} ${SRC_LIST}) target_include_directories(${PROJECT_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) if(Boost_FOUND) - target_sources(${PROJECT_NAME} PRIVATE testSerialization.cpp) - target_include_directories(${PROJECT_NAME} PRIVATE "${Boost_INCLUDE_DIR}") + target_sources(${PROJECT_NAME} PRIVATE testSerialization.cpp) + target_include_directories(${PROJECT_NAME} PRIVATE "${Boost_INCLUDE_DIR}") endif() -target_link_libraries(${PROJECT_NAME} - amici-testing - Upstream::amici - ${Boost_LIBRARIES} - gtest_main - ) +target_link_libraries(${PROJECT_NAME} amici-testing Upstream::amici + ${Boost_LIBRARIES} gtest_main) include(GoogleTest) diff --git a/tests/performance/check_time.sh b/tests/performance/check_time.sh index d8f440e1f2..70030326c3 100755 --- a/tests/performance/check_time.sh +++ b/tests/performance/check_time.sh @@ -13,7 +13,7 @@ REF="$1" # Command to time CMD="${@:2}" # Logfile -LOG=$(tempfile) +LOG=$(mktemp) # Run and time /usr/bin/time -f %e ${CMD} 2>&1 | tee "$LOG" diff --git a/version.txt b/version.txt index a803cc227f..a551051694 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.14.0 +0.15.0