From d87b87bb880c3e33308c2c757927ffb6c243d717 Mon Sep 17 00:00:00 2001 From: Radkesvat <134321679+radkesvat@users.noreply.github.com> Date: Fri, 12 Jul 2024 15:28:20 +0000 Subject: [PATCH] add mimalloc internally to fix libatomic being linked dynamically --- .gitattributes | 3 +- ww/CMakeLists.txt | 15 +- ww/managers/mimalloc/.gitattributes | 12 + ww/managers/mimalloc/.gitignore | 11 + ww/managers/mimalloc/CMakeLists.txt | 596 ++++ ww/managers/mimalloc/LICENSE | 21 + ww/managers/mimalloc/SECURITY.md | 41 + ww/managers/mimalloc/azure-pipelines.yml | 197 ++ .../mimalloc/bin/mimalloc-redirect.dll | Bin 0 -> 68096 bytes .../mimalloc/bin/mimalloc-redirect.lib | Bin 0 -> 2874 bytes .../mimalloc/bin/mimalloc-redirect32.dll | Bin 0 -> 41984 bytes .../mimalloc/bin/mimalloc-redirect32.lib | Bin 0 -> 2928 bytes ww/managers/mimalloc/bin/readme.md | 71 + ww/managers/mimalloc/cmake/JoinPaths.cmake | 23 + .../cmake/mimalloc-config-version.cmake | 19 + .../mimalloc/cmake/mimalloc-config.cmake | 14 + .../bench-c5-18xlarge-2020-01-20-a.svg | 887 ++++++ .../bench-c5-18xlarge-2020-01-20-b.svg | 1185 ++++++++ .../bench-c5-18xlarge-2020-01-20-rss-a.svg | 757 +++++ .../bench-c5-18xlarge-2020-01-20-rss-b.svg | 1028 +++++++ .../mimalloc/doc/bench-2020/bench-r5a-1.svg | 769 +++++ .../bench-r5a-12xlarge-2020-01-16-a.svg | 868 ++++++ .../bench-r5a-12xlarge-2020-01-16-b.svg | 1157 +++++++ .../mimalloc/doc/bench-2020/bench-r5a-2.svg | 983 ++++++ .../doc/bench-2020/bench-r5a-rss-1.svg | 683 +++++ .../doc/bench-2020/bench-r5a-rss-2.svg | 854 ++++++ .../doc/bench-2020/bench-spec-rss.svg | 713 +++++ .../mimalloc/doc/bench-2020/bench-spec.svg | 713 +++++ .../mimalloc/doc/bench-2020/bench-z4-1.svg | 890 ++++++ .../mimalloc/doc/bench-2020/bench-z4-2.svg | 1146 +++++++ .../doc/bench-2020/bench-z4-rss-1.svg | 796 +++++ .../doc/bench-2020/bench-z4-rss-2.svg | 974 ++++++ .../bench-amd5950x-2021-01-30-a.svg | 952 ++++++ .../bench-amd5950x-2021-01-30-b.svg | 1255 ++++++++ .../bench-c5-18xlarge-2021-01-30-a.svg | 955 ++++++ .../bench-c5-18xlarge-2021-01-30-b.svg | 1269 ++++++++ .../bench-c5-18xlarge-2021-01-30-rss-a.svg | 836 ++++++ .../bench-c5-18xlarge-2021-01-30-rss-b.svg | 1131 +++++++ .../bench-2021/bench-macmini-2021-01-30.svg | 766 +++++ ww/managers/mimalloc/doc/doxyfile | 2659 +++++++++++++++++ ww/managers/mimalloc/doc/ds-logo.jpg | Bin 0 -> 181497 bytes ww/managers/mimalloc/doc/ds-logo.png | Bin 0 -> 121150 bytes ww/managers/mimalloc/doc/mimalloc-doc.h | 1281 ++++++++ ww/managers/mimalloc/doc/mimalloc-doxygen.css | 49 + .../mimalloc/doc/mimalloc-logo-100.png | Bin 0 -> 3532 bytes ww/managers/mimalloc/doc/mimalloc-logo.png | Bin 0 -> 73097 bytes ww/managers/mimalloc/doc/mimalloc-logo.svg | 161 + ww/managers/mimalloc/doc/spades-logo.png | Bin 0 -> 34583 bytes ww/managers/mimalloc/doc/unreal-logo.svg | 43 + .../mimalloc/docker/alpine-arm32v7/Dockerfile | 28 + ww/managers/mimalloc/docker/alpine/Dockerfile | 23 + .../mimalloc/docker/manylinux-x64/Dockerfile | 23 + ww/managers/mimalloc/docker/readme.md | 10 + .../ide/vs2017/mimalloc-override-test.vcxproj | 190 ++ .../ide/vs2017/mimalloc-override.vcxproj | 260 ++ .../ide/vs2017/mimalloc-test-stress.vcxproj | 159 + .../mimalloc/ide/vs2017/mimalloc-test.vcxproj | 158 + ww/managers/mimalloc/ide/vs2017/mimalloc.sln | 71 + .../mimalloc/ide/vs2017/mimalloc.vcxproj | 260 ++ .../ide/vs2019/mimalloc-override-test.vcxproj | 190 ++ .../ide/vs2019/mimalloc-override.vcxproj | 260 ++ .../ide/vs2019/mimalloc-test-api.vcxproj | 155 + .../ide/vs2019/mimalloc-test-stress.vcxproj | 159 + .../mimalloc/ide/vs2019/mimalloc-test.vcxproj | 158 + ww/managers/mimalloc/ide/vs2019/mimalloc.sln | 81 + .../mimalloc/ide/vs2019/mimalloc.vcxproj | 258 ++ .../ide/vs2022/mimalloc-override-test.vcxproj | 190 ++ .../ide/vs2022/mimalloc-override.vcxproj | 271 ++ .../ide/vs2022/mimalloc-test-api.vcxproj | 162 + .../ide/vs2022/mimalloc-test-stress.vcxproj | 159 + .../mimalloc/ide/vs2022/mimalloc-test.vcxproj | 158 + ww/managers/mimalloc/ide/vs2022/mimalloc.sln | 81 + .../mimalloc/ide/vs2022/mimalloc.vcxproj | 264 ++ .../mimalloc/include/mimalloc-new-delete.h | 66 + .../mimalloc/include/mimalloc-override.h | 68 + ww/managers/mimalloc/include/mimalloc.h | 569 ++++ .../mimalloc/include/mimalloc/atomic.h | 393 +++ .../mimalloc/include/mimalloc/internal.h | 1018 +++++++ ww/managers/mimalloc/include/mimalloc/prim.h | 373 +++ ww/managers/mimalloc/include/mimalloc/track.h | 149 + ww/managers/mimalloc/include/mimalloc/types.h | 705 +++++ ww/managers/mimalloc/mimalloc.pc.in | 11 + ww/managers/mimalloc/readme.md | 862 ++++++ ww/managers/mimalloc/src/alloc-aligned.c | 312 ++ ww/managers/mimalloc/src/alloc-override.c | 314 ++ ww/managers/mimalloc/src/alloc-posix.c | 185 ++ ww/managers/mimalloc/src/alloc.c | 598 ++++ ww/managers/mimalloc/src/arena.c | 1108 +++++++ ww/managers/mimalloc/src/bitmap.c | 436 +++ ww/managers/mimalloc/src/bitmap.h | 115 + ww/managers/mimalloc/src/free.c | 530 ++++ ww/managers/mimalloc/src/heap.c | 653 ++++ ww/managers/mimalloc/src/init.c | 714 +++++ ww/managers/mimalloc/src/libc.c | 273 ++ ww/managers/mimalloc/src/options.c | 526 ++++ ww/managers/mimalloc/src/os.c | 678 +++++ ww/managers/mimalloc/src/page-queue.c | 343 +++ ww/managers/mimalloc/src/page.c | 943 ++++++ .../mimalloc/src/prim/emscripten/prim.c | 244 ++ .../src/prim/osx/alloc-override-zone.c | 461 +++ ww/managers/mimalloc/src/prim/osx/prim.c | 9 + ww/managers/mimalloc/src/prim/prim.c | 27 + ww/managers/mimalloc/src/prim/readme.md | 9 + ww/managers/mimalloc/src/prim/unix/prim.c | 878 ++++++ ww/managers/mimalloc/src/prim/wasi/prim.c | 280 ++ .../src/prim/windows/etw-mimalloc.wprp | 61 + ww/managers/mimalloc/src/prim/windows/etw.h | 905 ++++++ ww/managers/mimalloc/src/prim/windows/etw.man | Bin 0 -> 3926 bytes ww/managers/mimalloc/src/prim/windows/prim.c | 663 ++++ .../mimalloc/src/prim/windows/readme.md | 17 + ww/managers/mimalloc/src/random.c | 254 ++ ww/managers/mimalloc/src/segment-map.c | 155 + ww/managers/mimalloc/src/segment.c | 1524 ++++++++++ ww/managers/mimalloc/src/static.c | 41 + ww/managers/mimalloc/src/stats.c | 467 +++ ww/managers/mimalloc/test/CMakeLists.txt | 54 + .../mimalloc/test/main-override-static.c | 415 +++ ww/managers/mimalloc/test/main-override.c | 36 + ww/managers/mimalloc/test/main-override.cpp | 400 +++ ww/managers/mimalloc/test/main.c | 46 + ww/managers/mimalloc/test/readme.md | 16 + ww/managers/mimalloc/test/test-api-fill.c | 343 +++ ww/managers/mimalloc/test/test-api.c | 451 +++ ww/managers/mimalloc/test/test-stress.c | 364 +++ ww/managers/mimalloc/test/test-wrong.c | 92 + ww/managers/mimalloc/test/testhelper.h | 49 + 126 files changed, 50699 insertions(+), 15 deletions(-) create mode 100644 ww/managers/mimalloc/.gitattributes create mode 100644 ww/managers/mimalloc/.gitignore create mode 100644 ww/managers/mimalloc/CMakeLists.txt create mode 100644 ww/managers/mimalloc/LICENSE create mode 100644 ww/managers/mimalloc/SECURITY.md create mode 100644 ww/managers/mimalloc/azure-pipelines.yml create mode 100644 ww/managers/mimalloc/bin/mimalloc-redirect.dll create mode 100644 ww/managers/mimalloc/bin/mimalloc-redirect.lib create mode 100644 ww/managers/mimalloc/bin/mimalloc-redirect32.dll create mode 100644 ww/managers/mimalloc/bin/mimalloc-redirect32.lib create mode 100644 ww/managers/mimalloc/bin/readme.md create mode 100644 ww/managers/mimalloc/cmake/JoinPaths.cmake create mode 100644 ww/managers/mimalloc/cmake/mimalloc-config-version.cmake create mode 100644 ww/managers/mimalloc/cmake/mimalloc-config.cmake create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-r5a-1.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-r5a-2.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-r5a-rss-1.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-r5a-rss-2.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-spec-rss.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-spec.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-z4-1.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-z4-2.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-z4-rss-1.svg create mode 100644 ww/managers/mimalloc/doc/bench-2020/bench-z4-rss-2.svg create mode 100644 ww/managers/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg create mode 100644 ww/managers/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg create mode 100644 ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg create mode 100644 ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg create mode 100644 ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg create mode 100644 ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg create mode 100644 ww/managers/mimalloc/doc/bench-2021/bench-macmini-2021-01-30.svg create mode 100644 ww/managers/mimalloc/doc/doxyfile create mode 100644 ww/managers/mimalloc/doc/ds-logo.jpg create mode 100644 ww/managers/mimalloc/doc/ds-logo.png create mode 100644 ww/managers/mimalloc/doc/mimalloc-doc.h create mode 100644 ww/managers/mimalloc/doc/mimalloc-doxygen.css create mode 100644 ww/managers/mimalloc/doc/mimalloc-logo-100.png create mode 100644 ww/managers/mimalloc/doc/mimalloc-logo.png create mode 100644 ww/managers/mimalloc/doc/mimalloc-logo.svg create mode 100644 ww/managers/mimalloc/doc/spades-logo.png create mode 100644 ww/managers/mimalloc/doc/unreal-logo.svg create mode 100644 ww/managers/mimalloc/docker/alpine-arm32v7/Dockerfile create mode 100644 ww/managers/mimalloc/docker/alpine/Dockerfile create mode 100644 ww/managers/mimalloc/docker/manylinux-x64/Dockerfile create mode 100644 ww/managers/mimalloc/docker/readme.md create mode 100644 ww/managers/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2017/mimalloc-override.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2017/mimalloc-test.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2017/mimalloc.sln create mode 100644 ww/managers/mimalloc/ide/vs2017/mimalloc.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2019/mimalloc-override-test.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2019/mimalloc-override.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2019/mimalloc-test-api.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2019/mimalloc-test-stress.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2019/mimalloc-test.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2019/mimalloc.sln create mode 100644 ww/managers/mimalloc/ide/vs2019/mimalloc.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2022/mimalloc-override.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2022/mimalloc-test.vcxproj create mode 100644 ww/managers/mimalloc/ide/vs2022/mimalloc.sln create mode 100644 ww/managers/mimalloc/ide/vs2022/mimalloc.vcxproj create mode 100644 ww/managers/mimalloc/include/mimalloc-new-delete.h create mode 100644 ww/managers/mimalloc/include/mimalloc-override.h create mode 100644 ww/managers/mimalloc/include/mimalloc.h create mode 100644 ww/managers/mimalloc/include/mimalloc/atomic.h create mode 100644 ww/managers/mimalloc/include/mimalloc/internal.h create mode 100644 ww/managers/mimalloc/include/mimalloc/prim.h create mode 100644 ww/managers/mimalloc/include/mimalloc/track.h create mode 100644 ww/managers/mimalloc/include/mimalloc/types.h create mode 100644 ww/managers/mimalloc/mimalloc.pc.in create mode 100644 ww/managers/mimalloc/readme.md create mode 100644 ww/managers/mimalloc/src/alloc-aligned.c create mode 100644 ww/managers/mimalloc/src/alloc-override.c create mode 100644 ww/managers/mimalloc/src/alloc-posix.c create mode 100644 ww/managers/mimalloc/src/alloc.c create mode 100644 ww/managers/mimalloc/src/arena.c create mode 100644 ww/managers/mimalloc/src/bitmap.c create mode 100644 ww/managers/mimalloc/src/bitmap.h create mode 100644 ww/managers/mimalloc/src/free.c create mode 100644 ww/managers/mimalloc/src/heap.c create mode 100644 ww/managers/mimalloc/src/init.c create mode 100644 ww/managers/mimalloc/src/libc.c create mode 100644 ww/managers/mimalloc/src/options.c create mode 100644 ww/managers/mimalloc/src/os.c create mode 100644 ww/managers/mimalloc/src/page-queue.c create mode 100644 ww/managers/mimalloc/src/page.c create mode 100644 ww/managers/mimalloc/src/prim/emscripten/prim.c create mode 100644 ww/managers/mimalloc/src/prim/osx/alloc-override-zone.c create mode 100644 ww/managers/mimalloc/src/prim/osx/prim.c create mode 100644 ww/managers/mimalloc/src/prim/prim.c create mode 100644 ww/managers/mimalloc/src/prim/readme.md create mode 100644 ww/managers/mimalloc/src/prim/unix/prim.c create mode 100644 ww/managers/mimalloc/src/prim/wasi/prim.c create mode 100644 ww/managers/mimalloc/src/prim/windows/etw-mimalloc.wprp create mode 100644 ww/managers/mimalloc/src/prim/windows/etw.h create mode 100644 ww/managers/mimalloc/src/prim/windows/etw.man create mode 100644 ww/managers/mimalloc/src/prim/windows/prim.c create mode 100644 ww/managers/mimalloc/src/prim/windows/readme.md create mode 100644 ww/managers/mimalloc/src/random.c create mode 100644 ww/managers/mimalloc/src/segment-map.c create mode 100644 ww/managers/mimalloc/src/segment.c create mode 100644 ww/managers/mimalloc/src/static.c create mode 100644 ww/managers/mimalloc/src/stats.c create mode 100644 ww/managers/mimalloc/test/CMakeLists.txt create mode 100644 ww/managers/mimalloc/test/main-override-static.c create mode 100644 ww/managers/mimalloc/test/main-override.c create mode 100644 ww/managers/mimalloc/test/main-override.cpp create mode 100644 ww/managers/mimalloc/test/main.c create mode 100644 ww/managers/mimalloc/test/readme.md create mode 100644 ww/managers/mimalloc/test/test-api-fill.c create mode 100644 ww/managers/mimalloc/test/test-api.c create mode 100644 ww/managers/mimalloc/test/test-stress.c create mode 100644 ww/managers/mimalloc/test/test-wrong.c create mode 100644 ww/managers/mimalloc/test/testhelper.h diff --git a/.gitattributes b/.gitattributes index db06dd35..95579e22 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,3 @@ *.h linguist-language=C -ww/lwip/** linguist-vendored \ No newline at end of file +ww/lwip/** linguist-vendored +ww/managers/mimalloc/** linguist-vendored \ No newline at end of file diff --git a/ww/CMakeLists.txt b/ww/CMakeLists.txt index bd8e7f19..a8253c46 100644 --- a/ww/CMakeLists.txt +++ b/ww/CMakeLists.txt @@ -80,20 +80,7 @@ if(cjson_ADDED) endif() - - -CPMAddPackage( - NAME mimalloc - VERSION 2.1.7 - GITHUB_REPOSITORY microsoft/mimalloc - OPTIONS - "MI_OVERRIDE OFF" - "MI_BUILD_TESTS OFF" - "MI_BUILD_SHARED OFF" - "MI_SKIP_COLLECT_ON_EXIT ON" -) - - +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/managers/mimalloc) target_link_libraries(ww PUBLIC mimalloc-static) diff --git a/ww/managers/mimalloc/.gitattributes b/ww/managers/mimalloc/.gitattributes new file mode 100644 index 00000000..0332e031 --- /dev/null +++ b/ww/managers/mimalloc/.gitattributes @@ -0,0 +1,12 @@ +# default behavior is to always use unix style line endings +* text eol=lf +*.png binary +*.pdn binary +*.jpg binary +*.sln binary +*.suo binary +*.vcproj binary +*.patch binary +*.dll binary +*.lib binary +*.exe binary diff --git a/ww/managers/mimalloc/.gitignore b/ww/managers/mimalloc/.gitignore new file mode 100644 index 00000000..df1d58eb --- /dev/null +++ b/ww/managers/mimalloc/.gitignore @@ -0,0 +1,11 @@ +ide/vs20??/*.db +ide/vs20??/*.opendb +ide/vs20??/*.user +ide/vs20??/*.vcxproj.filters +ide/vs20??/.vs +ide/vs20??/VTune* +out/ +docs/ +*.zip +*.tar +*.gz diff --git a/ww/managers/mimalloc/CMakeLists.txt b/ww/managers/mimalloc/CMakeLists.txt new file mode 100644 index 00000000..f6aa7f9b --- /dev/null +++ b/ww/managers/mimalloc/CMakeLists.txt @@ -0,0 +1,596 @@ +cmake_minimum_required(VERSION 3.18) +project(libmimalloc C CXX) + +set(CMAKE_C_STANDARD 11) +set(CMAKE_CXX_STANDARD 17) + +option(MI_SECURE "Use full security mitigations (like guard pages, allocation randomization, double-free mitigation, and free-list corruption detection)" OFF) +option(MI_DEBUG_FULL "Use full internal heap invariant checking in DEBUG mode (expensive)" OFF) +option(MI_PADDING "Enable padding to detect heap block overflow (always on in DEBUG or SECURE mode, or with Valgrind/ASAN)" OFF) +option(MI_OVERRIDE "Override the standard malloc interface (e.g. define entry points for malloc() etc)" OFF) +option(MI_XMALLOC "Enable abort() call on memory allocation failure by default" OFF) +option(MI_SHOW_ERRORS "Show error and warning messages by default (only enabled by default in DEBUG mode)" OFF) +option(MI_TRACK_VALGRIND "Compile with Valgrind support (adds a small overhead)" OFF) +option(MI_TRACK_ASAN "Compile with address sanitizer support (adds a small overhead)" OFF) +option(MI_TRACK_ETW "Compile with Windows event tracing (ETW) support (adds a small overhead)" OFF) +option(MI_USE_CXX "Use the C++ compiler to compile the library (instead of the C compiler)" OFF) +option(MI_SEE_ASM "Generate assembly files" OFF) +option(MI_OSX_INTERPOSE "Use interpose to override standard malloc on macOS" ON) +option(MI_OSX_ZONE "Use malloc zone to override standard malloc on macOS" ON) +option(MI_WIN_REDIRECT "Use redirection module ('mimalloc-redirect') on Windows if compiling mimalloc as a DLL" ON) +option(MI_LOCAL_DYNAMIC_TLS "Use slightly slower, dlopen-compatible TLS mechanism (Unix)" OFF) +option(MI_LIBC_MUSL "Set this when linking with musl libc" OFF) +option(MI_BUILD_SHARED "Build shared library" OFF) +option(MI_BUILD_STATIC "Build static library" ON) +option(MI_BUILD_OBJECT "Build object library" ON) +option(MI_BUILD_TESTS "Build test executables" OFF) +option(MI_DEBUG_TSAN "Build with thread sanitizer (needs clang)" OFF) +option(MI_DEBUG_UBSAN "Build with undefined-behavior sanitizer (needs clang++)" OFF) +option(MI_SKIP_COLLECT_ON_EXIT "Skip collecting memory on program exit" ON) +option(MI_NO_PADDING "Force no use of padding even in DEBUG mode etc." OFF) +option(MI_INSTALL_TOPLEVEL "Install directly into $CMAKE_INSTALL_PREFIX instead of PREFIX/lib/mimalloc-version" OFF) +option(MI_NO_THP "Disable transparent huge pages support on Linux/Android for the mimalloc process only" OFF) + +# deprecated options +option(MI_CHECK_FULL "Use full internal invariant checking in DEBUG mode (deprecated, use MI_DEBUG_FULL instead)" OFF) +option(MI_USE_LIBATOMIC "Explicitly link with -latomic (on older systems) (deprecated and detected automatically)" OFF) + +include(CheckLinkerFlag) # requires cmake 3.18 +include(CheckIncludeFiles) +include(GNUInstallDirs) +include("cmake/mimalloc-config-version.cmake") + +set(mi_sources + src/alloc.c + src/alloc-aligned.c + src/alloc-posix.c + src/arena.c + src/bitmap.c + src/heap.c + src/init.c + src/libc.c + src/options.c + src/os.c + src/page.c + src/random.c + src/segment.c + src/segment-map.c + src/stats.c + src/prim/prim.c) + +set(mi_cflags "") +set(mi_cflags_static "") # extra flags for a static library build +set(mi_cflags_dynamic "") # extra flags for a shared-object library build +set(mi_defines "") +set(mi_libraries "") + +# ----------------------------------------------------------------------------- +# Convenience: set default build type depending on the build directory +# ----------------------------------------------------------------------------- + +message(STATUS "") +if (NOT CMAKE_BUILD_TYPE) + if ("${CMAKE_BINARY_DIR}" MATCHES ".*(D|d)ebug$" OR MI_DEBUG_FULL) + message(STATUS "No build type selected, default to: Debug") + set(CMAKE_BUILD_TYPE "Debug") + else() + message(STATUS "No build type selected, default to: Release") + set(CMAKE_BUILD_TYPE "Release") + endif() +endif() + +if("${CMAKE_BINARY_DIR}" MATCHES ".*(S|s)ecure$") + message(STATUS "Default to secure build") + set(MI_SECURE "ON") +endif() + + +# ----------------------------------------------------------------------------- +# Process options +# ----------------------------------------------------------------------------- + +# put -Wall early so other warnings can be disabled selectively +if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang") + list(APPEND mi_cflags -Wall -Wextra -Wpedantic) +endif() +if(CMAKE_C_COMPILER_ID MATCHES "GNU") + list(APPEND mi_cflags -Wall -Wextra) +endif() +if(CMAKE_C_COMPILER_ID MATCHES "Intel") + list(APPEND mi_cflags -Wall) +endif() + +if(CMAKE_C_COMPILER_ID MATCHES "MSVC|Intel") + set(MI_USE_CXX "ON") +endif() + +if(MI_OVERRIDE) + message(STATUS "Override standard malloc (MI_OVERRIDE=ON)") + if(APPLE) + if(MI_OSX_ZONE) + # use zone's on macOS + message(STATUS " Use malloc zone to override malloc (MI_OSX_ZONE=ON)") + list(APPEND mi_sources src/prim/osx/alloc-override-zone.c) + list(APPEND mi_defines MI_OSX_ZONE=1) + if (NOT MI_OSX_INTERPOSE) + message(STATUS " WARNING: zone overriding usually also needs interpose (use -DMI_OSX_INTERPOSE=ON)") + endif() + endif() + if(MI_OSX_INTERPOSE) + # use interpose on macOS + message(STATUS " Use interpose to override malloc (MI_OSX_INTERPOSE=ON)") + list(APPEND mi_defines MI_OSX_INTERPOSE=1) + if (NOT MI_OSX_ZONE) + message(STATUS " WARNING: interpose usually also needs zone overriding (use -DMI_OSX_INTERPOSE=ON)") + endif() + endif() + if(MI_USE_CXX AND MI_OSX_INTERPOSE) + message(STATUS " WARNING: if dynamically overriding malloc/free, it is more reliable to build mimalloc as C code (use -DMI_USE_CXX=OFF)") + endif() + endif() +endif() + +if(WIN32) + if (MI_WIN_REDIRECT) + if (MSVC_C_ARCHITECTURE_ID MATCHES "ARM") + message(STATUS "Cannot use redirection on Windows ARM (MI_WIN_REDIRECT=OFF)") + set(MI_WIN_REDIRECT OFF) + endif() + endif() + if (NOT MI_WIN_REDIRECT) + # use a negative define for backward compatibility + list(APPEND mi_defines MI_WIN_NOREDIRECT=1) + endif() +endif() + +if(MI_SECURE) + message(STATUS "Set full secure build (MI_SECURE=ON)") + list(APPEND mi_defines MI_SECURE=4) +endif() + +if(MI_TRACK_VALGRIND) + CHECK_INCLUDE_FILES("valgrind/valgrind.h;valgrind/memcheck.h" MI_HAS_VALGRINDH) + if (NOT MI_HAS_VALGRINDH) + set(MI_TRACK_VALGRIND OFF) + message(WARNING "Cannot find the 'valgrind/valgrind.h' and 'valgrind/memcheck.h' -- install valgrind first") + message(STATUS "Compile **without** Valgrind support (MI_TRACK_VALGRIND=OFF)") + else() + message(STATUS "Compile with Valgrind support (MI_TRACK_VALGRIND=ON)") + list(APPEND mi_defines MI_TRACK_VALGRIND=1) + endif() +endif() + +if(MI_TRACK_ASAN) + if (APPLE AND MI_OVERRIDE) + set(MI_TRACK_ASAN OFF) + message(WARNING "Cannot enable address sanitizer support on macOS if MI_OVERRIDE is ON (MI_TRACK_ASAN=OFF)") + endif() + if (MI_TRACK_VALGRIND) + set(MI_TRACK_ASAN OFF) + message(WARNING "Cannot enable address sanitizer support with also Valgrind support enabled (MI_TRACK_ASAN=OFF)") + endif() + if(MI_TRACK_ASAN) + CHECK_INCLUDE_FILES("sanitizer/asan_interface.h" MI_HAS_ASANH) + if (NOT MI_HAS_ASANH) + set(MI_TRACK_ASAN OFF) + message(WARNING "Cannot find the 'sanitizer/asan_interface.h' -- install address sanitizer support first") + message(STATUS "Compile **without** address sanitizer support (MI_TRACK_ASAN=OFF)") + else() + message(STATUS "Compile with address sanitizer support (MI_TRACK_ASAN=ON)") + list(APPEND mi_defines MI_TRACK_ASAN=1) + list(APPEND mi_cflags -fsanitize=address) + list(APPEND mi_libraries -fsanitize=address) + endif() + endif() +endif() + +if(MI_TRACK_ETW) + if(NOT WIN32) + set(MI_TRACK_ETW OFF) + message(WARNING "Can only enable ETW support on Windows (MI_TRACK_ETW=OFF)") + endif() + if (MI_TRACK_VALGRIND OR MI_TRACK_ASAN) + set(MI_TRACK_ETW OFF) + message(WARNING "Cannot enable ETW support with also Valgrind or ASAN support enabled (MI_TRACK_ETW=OFF)") + endif() + if(MI_TRACK_ETW) + message(STATUS "Compile with Windows event tracing support (MI_TRACK_ETW=ON)") + list(APPEND mi_defines MI_TRACK_ETW=1) + endif() +endif() + +if(MI_SEE_ASM) + message(STATUS "Generate assembly listings (MI_SEE_ASM=ON)") + list(APPEND mi_cflags -save-temps) + if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang") + message(STATUS "No GNU Line marker") + list(APPEND mi_cflags -Wno-gnu-line-marker) + endif() +endif() + +if(MI_CHECK_FULL) + message(STATUS "The MI_CHECK_FULL option is deprecated, use MI_DEBUG_FULL instead") + set(MI_DEBUG_FULL "ON") +endif() + +if (MI_SKIP_COLLECT_ON_EXIT) + message(STATUS "Skip collecting memory on program exit (MI_SKIP_COLLECT_ON_EXIT=ON)") + list(APPEND mi_defines MI_SKIP_COLLECT_ON_EXIT=1) +endif() + +if(MI_DEBUG_FULL) + message(STATUS "Set debug level to full internal invariant checking (MI_DEBUG_FULL=ON)") + list(APPEND mi_defines MI_DEBUG=3) # full invariant checking +endif() + +if(MI_NO_PADDING) + message(STATUS "Suppress any padding of heap blocks (MI_NO_PADDING=ON)") + list(APPEND mi_defines MI_PADDING=0) +else() + if(MI_PADDING) + message(STATUS "Enable explicit padding of heap blocks (MI_PADDING=ON)") + list(APPEND mi_defines MI_PADDING=1) + endif() +endif() + +if(MI_XMALLOC) + message(STATUS "Enable abort() calls on memory allocation failure (MI_XMALLOC=ON)") + list(APPEND mi_defines MI_XMALLOC=1) +endif() + +if(MI_SHOW_ERRORS) + message(STATUS "Enable printing of error and warning messages by default (MI_SHOW_ERRORS=ON)") + list(APPEND mi_defines MI_SHOW_ERRORS=1) +endif() + +if(MI_DEBUG_TSAN) + if(CMAKE_C_COMPILER_ID MATCHES "Clang") + message(STATUS "Build with thread sanitizer (MI_DEBUG_TSAN=ON)") + list(APPEND mi_defines MI_TSAN=1) + list(APPEND mi_cflags -fsanitize=thread -g -O1) + list(APPEND mi_libraries -fsanitize=thread) + else() + message(WARNING "Can only use thread sanitizer with clang (MI_DEBUG_TSAN=ON but ignored)") + endif() +endif() + +if(MI_DEBUG_UBSAN) + if(CMAKE_BUILD_TYPE MATCHES "Debug") + if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") + message(STATUS "Build with undefined-behavior sanitizer (MI_DEBUG_UBSAN=ON)") + list(APPEND mi_cflags -fsanitize=undefined -g -fno-sanitize-recover=undefined) + list(APPEND mi_libraries -fsanitize=undefined) + if (NOT MI_USE_CXX) + message(STATUS "(switch to use C++ due to MI_DEBUG_UBSAN)") + set(MI_USE_CXX "ON") + endif() + else() + message(WARNING "Can only use undefined-behavior sanitizer with clang++ (MI_DEBUG_UBSAN=ON but ignored)") + endif() + else() + message(WARNING "Can only use undefined-behavior sanitizer with a debug build (CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE})") + endif() +endif() + +if(MI_USE_CXX) + message(STATUS "Use the C++ compiler to compile (MI_USE_CXX=ON)") + set_source_files_properties(${mi_sources} PROPERTIES LANGUAGE CXX ) + set_source_files_properties(src/static.c test/test-api.c test/test-api-fill test/test-stress PROPERTIES LANGUAGE CXX ) + if(CMAKE_CXX_COMPILER_ID MATCHES "AppleClang|Clang") + list(APPEND mi_cflags -Wno-deprecated) + endif() + if(CMAKE_CXX_COMPILER_ID MATCHES "Intel" AND NOT CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM") + list(APPEND mi_cflags -Kc++) + endif() +endif() + +if(CMAKE_SYSTEM_NAME MATCHES "Linux|Android") + if(MI_NO_THP) + message(STATUS "Disable transparent huge pages support (MI_NO_THP=ON)") + list(APPEND mi_defines MI_NO_THP=1) + endif() +endif() + +if(MI_LIBC_MUSL) + message(STATUS "Assume using musl libc (MI_LIBC_MUSL=ON)") + list(APPEND mi_defines MI_LIBC_MUSL=1) +endif() + +# On Haiku use `-DCMAKE_INSTALL_PREFIX` instead, issue #788 +# if(CMAKE_SYSTEM_NAME MATCHES "Haiku") +# SET(CMAKE_INSTALL_LIBDIR ~/config/non-packaged/lib) +# SET(CMAKE_INSTALL_INCLUDEDIR ~/config/non-packaged/headers) +# endif() + +# Compiler flags +if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU") + list(APPEND mi_cflags -Wno-unknown-pragmas -fvisibility=hidden) + if(NOT MI_USE_CXX) + list(APPEND mi_cflags -Wstrict-prototypes) + endif() + if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang") + list(APPEND mi_cflags -Wno-static-in-inline) + endif() +endif() + +if(CMAKE_C_COMPILER_ID MATCHES "Intel") + list(APPEND mi_cflags -fvisibility=hidden) +endif() + +if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU|Intel" AND NOT CMAKE_SYSTEM_NAME MATCHES "Haiku") + if(MI_LOCAL_DYNAMIC_TLS) + list(APPEND mi_cflags -ftls-model=local-dynamic) + else() + if(MI_LIBC_MUSL) + # with musl we use local-dynamic for the static build, see issue #644 + list(APPEND mi_cflags_static -ftls-model=local-dynamic) + list(APPEND mi_cflags_dynamic -ftls-model=initial-exec) + message(STATUS "Use local dynamic TLS for the static build (since MI_LIBC_MUSL=ON)") + else() + list(APPEND mi_cflags -ftls-model=initial-exec) + endif() + endif() + if(MI_OVERRIDE) + list(APPEND mi_cflags -fno-builtin-malloc) + endif() +endif() + +if (MSVC AND MSVC_VERSION GREATER_EQUAL 1914) + list(APPEND mi_cflags /Zc:__cplusplus) +endif() + +if(MINGW) + add_definitions(-D_WIN32_WINNT=0x600) +endif() + +# extra needed libraries + +# we prefer -l test over `find_library` as sometimes core libraries +# like `libatomic` are not on the system path (see issue #898) +function(find_link_library libname outlibname) + check_linker_flag(C "-l${libname}" mi_has_lib${libname}) + if (mi_has_lib${libname}) + message(VERBOSE "link library: -l${libname}") + set(${outlibname} ${libname} PARENT_SCOPE) + else() + find_library(MI_LIBPATH libname) + if (MI_LIBPATH) + message(VERBOSE "link library ${libname} at ${MI_LIBPATH}") + set(${outlibname} ${MI_LIBPATH} PARENT_SCOPE) + else() + message(VERBOSE "link library not found: ${libname}") + set(${outlibname} "" PARENT_SCOPE) + endif() + endif() +endfunction() + +if(WIN32) + list(APPEND mi_libraries psapi shell32 user32 advapi32 bcrypt) +else() + find_link_library("pthread" MI_LIB_PTHREAD) + if(MI_LIB_PTHREAD) + list(APPEND mi_libraries "${MI_LIB_PTHREAD}") + endif() + find_link_library("rt" MI_LIB_RT) + if(MI_LIB_RT) + list(APPEND mi_libraries "${MI_LIB_RT}") + endif() + find_link_library("atomic-static" MI_LIB_ATOMIC) + if(MI_LIB_ATOMIC) + list(APPEND mi_libraries "${MI_LIB_ATOMIC}") + endif() +endif() + +# ----------------------------------------------------------------------------- +# Install and output names +# ----------------------------------------------------------------------------- + +# dynamic/shared library and symlinks always go to /usr/local/lib equivalent +set(mi_install_libdir "${CMAKE_INSTALL_LIBDIR}") +set(mi_install_bindir "${CMAKE_INSTALL_BINDIR}") + +# static libraries and object files, includes, and cmake config files +# are either installed at top level, or use versioned directories for side-by-side installation (default) +if (MI_INSTALL_TOPLEVEL) + set(mi_install_objdir "${CMAKE_INSTALL_LIBDIR}") + set(mi_install_incdir "${CMAKE_INSTALL_INCLUDEDIR}") + set(mi_install_cmakedir "${CMAKE_INSTALL_LIBDIR}/cmake/mimalloc") +else() + set(mi_install_objdir "${CMAKE_INSTALL_LIBDIR}/mimalloc-${mi_version}") # for static library and object files + set(mi_install_incdir "${CMAKE_INSTALL_INCLUDEDIR}/mimalloc-${mi_version}") # for includes + set(mi_install_cmakedir "${CMAKE_INSTALL_LIBDIR}/cmake/mimalloc-${mi_version}") # for cmake package info +endif() + +set(mi_basename "mimalloc") +if(MI_SECURE) + set(mi_basename "${mi_basename}-secure") +endif() +if(MI_TRACK_VALGRIND) + set(mi_basename "${mi_basename}-valgrind") +endif() +if(MI_TRACK_ASAN) + set(mi_basename "${mi_basename}-asan") +endif() +string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LC) +if(NOT(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel|none)$")) + set(mi_basename "${mi_basename}-${CMAKE_BUILD_TYPE_LC}") #append build type (e.g. -debug) if not a release version +endif() + +if(MI_BUILD_SHARED) + list(APPEND mi_build_targets "shared") +endif() +if(MI_BUILD_STATIC) + list(APPEND mi_build_targets "static") +endif() +if(MI_BUILD_OBJECT) + list(APPEND mi_build_targets "object") +endif() +if(MI_BUILD_TESTS) + list(APPEND mi_build_targets "tests") +endif() + +message(STATUS "") +message(STATUS "Library base name: ${mi_basename}") +message(STATUS "Version : ${mi_version}") +message(STATUS "Build type : ${CMAKE_BUILD_TYPE_LC}") +if(MI_USE_CXX) + message(STATUS "C++ Compiler : ${CMAKE_CXX_COMPILER}") +else() + message(STATUS "C Compiler : ${CMAKE_C_COMPILER}") +endif() +message(STATUS "Compiler flags : ${mi_cflags}") +message(STATUS "Compiler defines : ${mi_defines}") +message(STATUS "Link libraries : ${mi_libraries}") +message(STATUS "Build targets : ${mi_build_targets}") +message(STATUS "") + +# ----------------------------------------------------------------------------- +# Main targets +# ----------------------------------------------------------------------------- + +# shared library +if(MI_BUILD_SHARED) + add_library(mimalloc SHARED ${mi_sources}) + set_target_properties(mimalloc PROPERTIES VERSION ${mi_version} SOVERSION ${mi_version_major} OUTPUT_NAME ${mi_basename} ) + target_compile_definitions(mimalloc PRIVATE ${mi_defines} MI_SHARED_LIB MI_SHARED_LIB_EXPORT) + target_compile_options(mimalloc PRIVATE ${mi_cflags} ${mi_cflags_dynamic}) + target_link_libraries(mimalloc PRIVATE ${mi_libraries}) + target_include_directories(mimalloc PUBLIC + $ + $ + ) + if(WIN32 AND MI_WIN_REDIRECT) + # On windows, link and copy the mimalloc redirection dll too. + if(CMAKE_SIZEOF_VOID_P EQUAL 4) + set(MIMALLOC_REDIRECT_SUFFIX "32") + else() + set(MIMALLOC_REDIRECT_SUFFIX "") + endif() + + target_link_libraries(mimalloc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/bin/mimalloc-redirect${MIMALLOC_REDIRECT_SUFFIX}.lib) + add_custom_command(TARGET mimalloc POST_BUILD + COMMAND "${CMAKE_COMMAND}" -E copy "${CMAKE_CURRENT_SOURCE_DIR}/bin/mimalloc-redirect${MIMALLOC_REDIRECT_SUFFIX}.dll" $ + COMMENT "Copy mimalloc-redirect${MIMALLOC_REDIRECT_SUFFIX}.dll to output directory") + install(FILES "$/mimalloc-redirect${MIMALLOC_REDIRECT_SUFFIX}.dll" DESTINATION ${mi_install_bindir}) + endif() + + install(TARGETS mimalloc EXPORT mimalloc ARCHIVE DESTINATION ${mi_install_libdir} RUNTIME DESTINATION ${mi_install_bindir} LIBRARY DESTINATION ${mi_install_libdir}) + install(EXPORT mimalloc DESTINATION ${mi_install_cmakedir}) +endif() + +# static library +if (MI_BUILD_STATIC) + add_library(mimalloc-static STATIC ${mi_sources}) + set_property(TARGET mimalloc-static PROPERTY POSITION_INDEPENDENT_CODE ON) + target_compile_definitions(mimalloc-static PRIVATE ${mi_defines} MI_STATIC_LIB) + target_compile_options(mimalloc-static PRIVATE ${mi_cflags} ${mi_cflags_static}) + target_link_libraries(mimalloc-static PRIVATE ${mi_libraries}) + target_include_directories(mimalloc-static PUBLIC + $ + $ + ) + if(WIN32) + # When building both static and shared libraries on Windows, a static library should use a + # different output name to avoid the conflict with the import library of a shared one. + string(REPLACE "mimalloc" "mimalloc-static" mi_output_name ${mi_basename}) + set_target_properties(mimalloc-static PROPERTIES OUTPUT_NAME ${mi_output_name}) + else() + set_target_properties(mimalloc-static PROPERTIES OUTPUT_NAME ${mi_basename}) + endif() + + install(TARGETS mimalloc-static EXPORT mimalloc DESTINATION ${mi_install_objdir} LIBRARY) + install(EXPORT mimalloc DESTINATION ${mi_install_cmakedir}) +endif() + +# install include files +install(FILES include/mimalloc.h DESTINATION ${mi_install_incdir}) +install(FILES include/mimalloc-override.h DESTINATION ${mi_install_incdir}) +install(FILES include/mimalloc-new-delete.h DESTINATION ${mi_install_incdir}) +install(FILES cmake/mimalloc-config.cmake DESTINATION ${mi_install_cmakedir}) +install(FILES cmake/mimalloc-config-version.cmake DESTINATION ${mi_install_cmakedir}) + + +# single object file for more predictable static overriding +if (MI_BUILD_OBJECT) + add_library(mimalloc-obj OBJECT src/static.c) + set_property(TARGET mimalloc-obj PROPERTY POSITION_INDEPENDENT_CODE ON) + target_compile_definitions(mimalloc-obj PRIVATE ${mi_defines}) + target_compile_options(mimalloc-obj PRIVATE ${mi_cflags} ${mi_cflags_static}) + target_include_directories(mimalloc-obj PUBLIC + $ + $ + ) + + # Copy the generated object file (`static.o`) to the output directory (as `mimalloc.o`) + if(NOT WIN32) + set(mimalloc-obj-static "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/mimalloc-obj.dir/src/static.c${CMAKE_C_OUTPUT_EXTENSION}") + set(mimalloc-obj-out "${CMAKE_CURRENT_BINARY_DIR}/${mi_basename}${CMAKE_C_OUTPUT_EXTENSION}") + add_custom_command(OUTPUT ${mimalloc-obj-out} DEPENDS mimalloc-obj COMMAND "${CMAKE_COMMAND}" -E copy "${mimalloc-obj-static}" "${mimalloc-obj-out}") + add_custom_target(mimalloc-obj-target ALL DEPENDS ${mimalloc-obj-out}) + endif() + + # the following seems to lead to cmake warnings/errors on some systems, disable for now :-( + # install(TARGETS mimalloc-obj EXPORT mimalloc DESTINATION ${mi_install_objdir}) + + # the FILES expression can also be: $ + # but that fails cmake versions less than 3.10 so we leave it as is for now + install(FILES ${mimalloc-obj-static} + DESTINATION ${mi_install_objdir} + RENAME ${mi_basename}${CMAKE_C_OUTPUT_EXTENSION} ) +endif() + +# pkg-config file support +set(pc_libraries "") +foreach(item IN LISTS mi_libraries) + if(item MATCHES " *[-].*") + set(pc_libraries "${pc_libraries} ${item}") + else() + set(pc_libraries "${pc_libraries} -l${item}") + endif() +endforeach() + +include("cmake/JoinPaths.cmake") +join_paths(includedir_for_pc_file "\${prefix}" "${CMAKE_INSTALL_INCLUDEDIR}") +join_paths(libdir_for_pc_file "\${prefix}" "${CMAKE_INSTALL_LIBDIR}") + +configure_file(mimalloc.pc.in mimalloc.pc @ONLY) +install(FILES "${CMAKE_CURRENT_BINARY_DIR}/mimalloc.pc" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig/") + + + +# ----------------------------------------------------------------------------- +# API surface testing +# ----------------------------------------------------------------------------- + +if (MI_BUILD_TESTS) + enable_testing() + + foreach(TEST_NAME api api-fill stress) + add_executable(mimalloc-test-${TEST_NAME} test/test-${TEST_NAME}.c) + target_compile_definitions(mimalloc-test-${TEST_NAME} PRIVATE ${mi_defines}) + target_compile_options(mimalloc-test-${TEST_NAME} PRIVATE ${mi_cflags}) + target_include_directories(mimalloc-test-${TEST_NAME} PRIVATE include) + target_link_libraries(mimalloc-test-${TEST_NAME} PRIVATE mimalloc ${mi_libraries}) + + add_test(NAME test-${TEST_NAME} COMMAND mimalloc-test-${TEST_NAME}) + endforeach() +endif() + +# ----------------------------------------------------------------------------- +# Set override properties +# ----------------------------------------------------------------------------- +if (MI_OVERRIDE) + if (MI_BUILD_SHARED) + target_compile_definitions(mimalloc PRIVATE MI_MALLOC_OVERRIDE) + endif() + if(NOT WIN32) + # It is only possible to override malloc on Windows when building as a DLL. + if (MI_BUILD_STATIC) + target_compile_definitions(mimalloc-static PRIVATE MI_MALLOC_OVERRIDE) + endif() + if (MI_BUILD_OBJECT) + target_compile_definitions(mimalloc-obj PRIVATE MI_MALLOC_OVERRIDE) + endif() + endif() +endif() diff --git a/ww/managers/mimalloc/LICENSE b/ww/managers/mimalloc/LICENSE new file mode 100644 index 00000000..670b668a --- /dev/null +++ b/ww/managers/mimalloc/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018-2021 Microsoft Corporation, Daan Leijen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/ww/managers/mimalloc/SECURITY.md b/ww/managers/mimalloc/SECURITY.md new file mode 100644 index 00000000..b3c89efc --- /dev/null +++ b/ww/managers/mimalloc/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd). + + diff --git a/ww/managers/mimalloc/azure-pipelines.yml b/ww/managers/mimalloc/azure-pipelines.yml new file mode 100644 index 00000000..0247c76f --- /dev/null +++ b/ww/managers/mimalloc/azure-pipelines.yml @@ -0,0 +1,197 @@ +# Starter pipeline +# Start with a minimal pipeline that you can customize to build and deploy your code. +# Add steps that build, run tests, deploy, and more: +# https://aka.ms/yaml + +trigger: + branches: + include: + - master + - dev + - dev-slice + tags: + include: + - v* + +jobs: +- job: + displayName: Windows + pool: + vmImage: + windows-2022 + strategy: + matrix: + Debug: + BuildType: debug + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON + MSBuildConfiguration: Debug + Release: + BuildType: release + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release + MSBuildConfiguration: Release + Secure: + BuildType: secure + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON + MSBuildConfiguration: Release + steps: + - task: CMake@1 + inputs: + workingDirectory: $(BuildType) + cmakeArgs: .. $(cmakeExtraArgs) + - task: MSBuild@1 + inputs: + solution: $(BuildType)/libmimalloc.sln + configuration: '$(MSBuildConfiguration)' + msbuildArguments: -m + - script: ctest --verbose --timeout 120 -C $(MSBuildConfiguration) + workingDirectory: $(BuildType) + displayName: CTest + #- script: $(BuildType)\$(BuildType)\mimalloc-test-stress + # displayName: TestStress + #- upload: $(Build.SourcesDirectory)/$(BuildType) + # artifact: mimalloc-windows-$(BuildType) + +- job: + displayName: Linux + pool: + vmImage: + ubuntu-22.04 + strategy: + matrix: + Debug: + CC: gcc + CXX: g++ + BuildType: debug + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON + Release: + CC: gcc + CXX: g++ + BuildType: release + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release + Secure: + CC: gcc + CXX: g++ + BuildType: secure + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON + Debug++: + CC: gcc + CXX: g++ + BuildType: debug-cxx + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON + Debug Clang: + CC: clang + CXX: clang++ + BuildType: debug-clang + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON + Release Clang: + CC: clang + CXX: clang++ + BuildType: release-clang + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release + Secure Clang: + CC: clang + CXX: clang++ + BuildType: secure-clang + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON + Debug++ Clang: + CC: clang + CXX: clang++ + BuildType: debug-clang-cxx + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON + Debug ASAN Clang: + CC: clang + CXX: clang++ + BuildType: debug-asan-clang + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_TRACK_ASAN=ON + Debug UBSAN Clang: + CC: clang + CXX: clang++ + BuildType: debug-ubsan-clang + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_DEBUG_UBSAN=ON + Debug TSAN Clang++: + CC: clang + CXX: clang++ + BuildType: debug-tsan-clang-cxx + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_USE_CXX=ON -DMI_DEBUG_TSAN=ON + + steps: + - task: CMake@1 + inputs: + workingDirectory: $(BuildType) + cmakeArgs: .. $(cmakeExtraArgs) + - script: make -j$(nproc) -C $(BuildType) + displayName: Make + - script: ctest --verbose --timeout 180 + workingDirectory: $(BuildType) + displayName: CTest +# - upload: $(Build.SourcesDirectory)/$(BuildType) +# artifact: mimalloc-ubuntu-$(BuildType) + +- job: + displayName: macOS + pool: + vmImage: + macOS-latest + strategy: + matrix: + Debug: + BuildType: debug + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON + Release: + BuildType: release + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release + Secure: + BuildType: secure + cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON + steps: + - task: CMake@1 + inputs: + workingDirectory: $(BuildType) + cmakeArgs: .. $(cmakeExtraArgs) + - script: make -j$(sysctl -n hw.ncpu) -C $(BuildType) + displayName: Make + # - script: MIMALLOC_VERBOSE=1 ./mimalloc-test-api + # workingDirectory: $(BuildType) + # displayName: TestAPI + # - script: MIMALLOC_VERBOSE=1 ./mimalloc-test-stress + # workingDirectory: $(BuildType) + # displayName: TestStress + - script: ctest --verbose --timeout 120 + workingDirectory: $(BuildType) + displayName: CTest + +# - upload: $(Build.SourcesDirectory)/$(BuildType) +# artifact: mimalloc-macos-$(BuildType) + +# - job: +# displayName: Windows-2017 +# pool: +# vmImage: +# vs2017-win2016 +# strategy: +# matrix: +# Debug: +# BuildType: debug +# cmakeExtraArgs: -A x64 -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON +# MSBuildConfiguration: Debug +# Release: +# BuildType: release +# cmakeExtraArgs: -A x64 -DCMAKE_BUILD_TYPE=Release +# MSBuildConfiguration: Release +# Secure: +# BuildType: secure +# cmakeExtraArgs: -A x64 -DCMAKE_BUILD_TYPE=Release -DMI_SECURE=ON +# MSBuildConfiguration: Release +# steps: +# - task: CMake@1 +# inputs: +# workingDirectory: $(BuildType) +# cmakeArgs: .. $(cmakeExtraArgs) +# - task: MSBuild@1 +# inputs: +# solution: $(BuildType)/libmimalloc.sln +# configuration: '$(MSBuildConfiguration)' +# - script: | +# cd $(BuildType) +# ctest --verbose --timeout 120 +# displayName: CTest diff --git a/ww/managers/mimalloc/bin/mimalloc-redirect.dll b/ww/managers/mimalloc/bin/mimalloc-redirect.dll new file mode 100644 index 0000000000000000000000000000000000000000..a3a3591ff9b07edada16ad83ca1391963a126b2d GIT binary patch literal 68096 zcmeHw3v^u7b@mw{;qgOAU<@LJ8v-dvxd}p719{0|j4fpi0OBiE?2;<|74IrkEpMOvOHH_lR z-uv`r>~E+4{oDap)!)ypZVbn?NVIi(G}x@w2U}WN2tHXt0Qqja~krU`->^Gq9OSzbrn1@_!4awdZKAe9>sto7njW2|mTv>_M|GPYp`GTISlAv7XbK7N?T5<$k+Tmxl$ zP}q(@6<9ug#!^cng0Px^L|d5~@y{(pNcu@$SkQHnF`_M8xBt#T7mY=!2^>3MC^`tS z<>M!L(NI$>2zQe_V9MoPI)UyM3)>%M@)i1wTsoJrkvFE}>#H~5YZ?rHWZt#-diFYe z4ZaIskJ9DO=HY7uT{!Y0>|mh16#`+R(D zCEms8j*(81w;6pg^4pv6^%Rw^q$*2D=&#UyBX1G!5PD?fnFaW|frRdQ558W39dVt1 zPrvlNRUWs#x7t%$wYSn!R)tGV?>JMgcUO9J9a&mc7rykaYL8#UyoV%(-9VQ<)^-mmp_QCsouYx7R1aNJ>G4v2rN_n7vOwY?n(Qk% zRp==>RpoK5em-7oUMze*U6}r>6)Ci>?A+T%E#&R%gt;jA%k-BEJ!AptnKmGO`i+I2 zSeXJ_(=Qp+yPhXyyq)g!tLIjCrW%1_(MUTPP3}5Hud0fkV|!BZTlHOQ|NB808rWhV zfK{b`MbPaWDZgK(=eXv+SQ&-ewRk^Aul zZRe4;ulmB<@MR*E@`1sVo+Yn7NLN&W$M^z- zW(h!1AbrTDMjNl1>g*~$xC8Ysq^>0`d{lQtzY(d}4Zyw~r5yWfNPeS1%Nru`No)0` zzfC^Gi%w|sIDRlG1$ZFliizl8TVRp}Z>W9LeWDrm6n6aE2axJ6 zdi`yJp?DNmWW~AKVX;c|lDmWtqBVp}sYvqPMXl{Klmyz=S~_RRbQ3dsGbxCY%Ko2;W4m z)hj8ksoZLBIvo=eGs z#57)zo~Ibl6pg1PNPeO1N$!u&qL>{Yp#mI49ZA$hA64@-4H$oXk*F0~1%+0kcBl-0 zXKWC8KvP>iDEiT_v=TySVuaxIXG}TQm~!|e%HcjEei`*lA{Tu=U87wZR$QOos%XXFJ~w;~xJQy=M< zIRTcKwjwohfG)Ys@MLCx35vfMm{qP^6Wlqm*)*0ILJ5-(F0=H~pfKljfaXUcdxJH{$@ zUM*AiJX|bNDGj32@SqTm6s^f75h(0!;`+SgRMF*cGOTbC1!-TN#ri-Li}i(QE*9%A z{t{yd&DVJI^AFF7_LtNw zqP$&e|CH7#?U<^HMqZ{IAPB#{kF%;JA^B52v6M@=%LyJIT30mkDehWu)iE|dUP2~@ z^$#5XNPH^ND#7$prDwTD+6G8lTvdeewTx1Jm;|qCAy!->r6q2nNy7&fmz}rr|A57d zl(RUoiYnl1_45MYlNlL4E@Qcb>O4T)kdd)$=N(d`8y~I|DZ{7lC*u#Ccl+MrgP$Yz zpov&h2C<6smiTB!Vl1&z@7nF5(;U4cgfbs6qRoDV{n zO;R`^q!J~24v{n^VG^lz5~N6C9u|CE4#C4V(O4wrrT&(FbSbAL_K zgQ#fN83Q8#rWZ%V;GoA4yGr)FinE$+6$6DXz56qsA?n!f;>)S75B$_zLR%IU#LQR9 zun~|6d*QnZq)8o0=yOh@p8T_p#z_i=XMv87O=%@EE;`{1QtS| zjshA$i%1S1K#PzdyIv#~Kt8(U1wK_3Iq12hD^?XHd;Sl~?fOPY{JM<3D@`FQm5`NI zeXqBuR!zL&bVjvuQ#C&4LA7$B+8R?TKguVK)?}$Rw{W6rMI`{{b?kk_{$jgW_yBFD18=z#khKB^ym8 z8%Z`gyHO|!bCTnuNmbQ^sxXz!Sq>o>{i{jV_ej>uyFKkhjt|XhNuKDg_9XT0eV%q) z8Q&r@Ir~;&no3q-0;LkY>oa0{?k@gy7=*itu)C;)ssR0dBIzzFm6yHZ@)~*BCobp7 z%YJdWLS7Dt%Qf;cMVHWY5HWrDL3=%X5MS_ei7_JUp7TT-V^0yH#t5J{}j(XcTj#^*eRYs~&}ebZPC$KyWI^5!L!7Ba*_fD{1%y&xy_intf(;}GEif!`BjugMC znH_nOae8{)r)fL|ypYxedyBLPG2J|(yvw;|s#E3laiqtkleypM}XCieA zE#k12$EtoEPA+gRiJdr|OZL2mmdcT)!6VNpYN```KGf9nnJlbKlm*US-xpZgMujYqLp5@b2j-bKU@vX zz~^fOHRq?dO=Uhl_2Gz|oCtWS-jf^xmco!v!R z`r4AOpeftNd5>vCg#nfFk}xkXB0=RPUs-?Y>gONgx7fTD16U)|^zcTAlo&L|{+(Ou z2Wcci$x<$bcPMUTU@*#SD4(ShKQYtO(?)iv1FiFfD9az%FZ7pFbmC`%zO52XvffMD zb*2yNOPcfs5Chwiuk7}ukc7?ADd3HO%3M6sEJ$Q8!K7&}rzbziR3pao%jq0P*t;78 zSK7Or_E9G{d8`s5!sI^-dKy%GVNMA9SG;Q8xq(FVBaYgLLZjhR)xFQt1F=+9>D@R+K-EN3L*o{W2v8&HjMK}oo|ACl#B;wD9)a1e7YSVxeS7a*_-lSc-tN>o z&NP~iWxQl%SUhZ8BQF@l#2~&zP})Au5AP z)zKbVy_8M^M&Ao}M)&t1nY$Q1JcY&Q7@h$l4q)KHKmqoEn?&E2!pOmaO;@EWVjDVM zqW5-`vp5_trINE){G&YC<%cF&<4mJ`O(*IbM3QTGqg&i>(Zu~05YWjHDAV8Jf^dB$ zU3=4?;-}4x7q>Ijd|pnb??wADzHlFs!#_Ufj&93r^AMSJkyJ9@4Jx(GZzKywUq&c~ z@%oLDrE0`_%^B)X%o)ljE!|JVUc*?TranvzUNQB3ZeLDi%4vQ=E$4=l#lP=5!`J^% zvX3`gZYH*)YBH)`%A}4Forjd4Ql^`iH&KX08kn>rjAdfQF#@3&P8LL7DLeKEH9*KMOa=m(zIorf_*G&CjI0UH-=T^z`2p zmpx~>O*=uKMru9$A-ZyW&PVoIM_SRyG;++{#bftkeeC@wp|MU3+~V}{IXBoCUIV7o z{Q>UDC{;r#ZuefY+VYa!elAndH3q=~;R@ zPZ9y~a+`=y3SR`O`D{*&STZHe>ES`WTi#N^VNJW_ufYr{_A^5?Sd+#Jn6^k;qnDm{qdPUw5la~Jl)dAOZp7$x9!*6D5rt?pjYBcc2aV6- zK~eCnMAKHXowT*`p(*mv8VNd^p@$16f1F79Gt0x0s0wHDg|4+}2uUngi+R4P;sqV4 z0(w{wk5gLbo{>W!ANeA_6bE`$iNYx`ODs|aLR43wt0Pr}CKS?LqC!qACeQSibd935 z2!X$5iJMh{MW3|zCT?M&CmUVf ztGvQ@WgzF7G?wB;5R<+%{Wm1#tcpk*p15_;`Ksp%2&7SmXAY$=!l(FTA_glPC` z)%HvtjbA=B)JV5NV>Q+!hc%sXTrw!11k}j?aazr2!?6QD_K0H#Pvc!aR3R=pQq%N~ z;W6@boT-kRgK;Mooq|a|s55OOB9=O58#I>jYfq~2#1{mZ?8-!oI+EB%NFX|raP$S~ z>t#vGPwM!px+*Zo4T;Py-Dhn3E*wBd-h;Tj`15H7uwN-BR$i-L??}37dZAmtR0n00 zv$mprNC68f`&u!v&N(91QU2bP>{ZfV~l-W;&L(} zpbq$rAED*x>nr%$pPme*yuXH7abn&gWMMr)D~NoDqtGF%ep;_;*w6D*K%nbK@@c-g zfGodp{5yOhKSxdE1Gqb7`~sQq2AISo)_~Scr-+yXAkD(w3`*`X&IhtA{Tj(cBC-o3 z%0#{a@enb~ZU6Pr}K0wDMf!D{!AAY!mCt)D%CEcIGpu#Vt$jGRP8-C62V)#&x z@3NA8U*HAY42;WYJo+;xhg#@hXyI?h$Ccj}LU4Fw=5VL)N6vgj!~MPgWjeRX%skxT zLm=H`+Iyu-(PAY8rNEt5IXK)9Iin*-A%WC%?@WsJb{KA{qG%7Twz!I&cs@Z15Gqcn z$*ts?ggB2CHJO^@p)lk?^EcsjMi!!as+YO}8;OsobQz0(mUB^@8w>d-!!E00Z$?p#eaJz#?oD{ z0N7Eq5hVe@v-0U8-eA6T)!oKPJm!ULejZq~6POCiERx+mEgPLr0%WD4=ysqp0-lu$ z1X!m4L6PCK#b$8AGQs%E^v&`_ID`Mvyft(fMH4D98x0R88vSnpG`j9v+>c(=T}~e7 z`#YGOsEY|2B$8%0s_m8 z|MyMa5tB?7G+$t?&lgOvwaluB7Wuf%Msv_fk^)!j9mRAUok25-xy;vy2_s$;H|6l+g;```nYr{Maeh!! z+x>+P&7;Mv9|v{;;qf|ly8M{bXf*TfpFjPNoUo+(2fQloC*fpE_MC|D7VO>T=7b!>QWaUW8eh>c-}sem_*Q`_qaI(-oGvSBl!Y z{BXcHd)K?B(@H6;wS*0sWp*xMQ^T>5ZG`~pAK6xDLY0bEi++$w%Jy{yFiwge+klpD ze+e!BA}PA-MY7)uw#WP5bZhpZ*>MKo$~z?b-)teY>x{;4h~DA;4eMII-sMBnF9w`? z?AcGeeu5J))1dAyex?f0Eg}KkbH0W&N%R>@kwK3RA@gZ7^UW8{TkG7x2v-B7X&mC| zb#hK#y5t33>TL2s^o$;Va885m;b&Un3A){XhGOG4nDN9O>_Jk<2Zin-g+7m}h>}=H ztnEQP{&dbF)8RG>rF~duP^!_`&)HKrKf^9dh@DMhKZpD;xs1;u!tKBwL|AGU@I`dZ z|BSg<_wVHcZigY3 zzCflo(hdni6%1LrZ}u!URO#C&%atf4ZZHY2?-QGAy4K_Xrcn%KHQpMf zp&%QM24yyo7A+S+%l}E+r#^?P_mtv96HwC6?_iS65ApO7@W_~@v`(I00TD`{d*1063LqAhewpP zx9Ho*f~q)W*-C2>9}48sCUcK3jC`@Qp$U=JHE|!UX)Kw1L4y_s==|e{@Lr;cWaQl> z@x;h(M0})DHNv;@6_i)5pu8l}YpbRp{e7CIy{v7utvIW~`^^f)kmmLxe~INyKdbs6 zGrlKXNah>RXUWgG{>-^1wx1NDg*ZoBCkF}Li0D`|$l~}(2Ol+*(RJk@tmOb%%ZHv5 z0|MiRm&_V8-a}a!Kfd&11Q&$S@a!V$`%#2F!Ui%w%(pZn&ZZ?Pzw9pFj(wvs4K!r3 z3a4u+(I{@&6o*L4`$3U5Z@QoTEdDG|@#2Z=0X~XgLF9KHdR8<^qXua#E`E15f*+S- z5kE)2m70LlJL53uFtaW|=4P3x8-8NO^SeA_(0BpI5(JNy-ih2bCT}>yyTarxl)Sf4 z4THv?nY@1RmYKYZGrZTDyn|om^%n4qLE}RvFF|*UXi4JzkTc4q(|pzg7BNh=`eq)&up6s&l;gVM zIp|XJWEo4`FVbBBH+7!h*#8XhLJHGBMVW;T=ZMr=qU$bxk&yUXPRSuimh$&fh&0v` zcoaYoW0oPIw_1d1BRAtj>cYULpz-qK3**^Y!a%-^nBR%9?~b$ZluVtHsXYf%F8Uft zAIL#Jas0?iu9r;L=3uf|L9Y6YnW9m{i0zHCw_lQenc|Kd2gx@2Zb`o;-rl_UeqAy>lY_~EZ@%*tVLpE;LU^(ehs)wzpg~VsifHB>$<-&)3cl=grb&Ji4E=dcvPpV5a;k@0IqZF^7~cfd5oQ1w^luHJG#WJQX0 zShoHVnW8HGooSyave%C#J5@*L5u$#6rtdReL?dtuRovdqz-5svk{Nki&KHHF%P?f> z#{v~Ahz!1VN+HkI9%8cNJ3Tk2;IE{+`&@{1(}Rksm0zP{{cJD6?Tc@c@@o3jM6L`B z=fHntGX5M|UW0X{P0LJunf7JtYX_+zPo?LeZ#WMtS0abLpPP*ST=iX<$KKCD-)r)) z=F|6=la0?D_9txTN%}#4w(U>_`Pyf(^y!n~!`qjmQhR^?Ja_ftd6%60SijnZyq-r0 zM@oF1t3E%@BPu_C4t@SDkC3zL11C+p5?W5v>1_?Ql&@NK>Mg(V4Iz2r2}%yAMF0LM zcQ4uKC-~P~?X?p9{+HX%kGL!K8{fkVB`gz$-}p2#(l?}WIIwowdYlNw%eeKK4u5>k z8#rq_>y*Or&UP}GC~=H;X}>Fddm7tQ>fXD}+U0WlC-|K^2t@t=mk#wN*1tp6pK~2& zF}bW{W_(+f;zvXQ9NWJ{dk&}+Fuo2AI3Ojvzl2QKBc72d#rCxs$r^ePg{X=_CH5m? zLwUQPbAt{~+7QKuC54yR_@i~Pc|5rg2NR&EAP#^*nAuZxY(Kgqwp#J`{9-(Tk6U*X?R@$YB&_p|supXwZ1 z2LT5G2LT5G2LT5G2LT5G2LT5G2LT5G2LT5G2LT6xTnOL@X|j*{Z=3=UZAYlwg%B1T zstk95*_8;#QFdIG2@2!b^D(m9Qk11%*8#I0G3-?wwZueE`Ev@`9M@XrnpMDNxi&D@RQp|ixr*XQ}x@gRlbsA=TrIi*iLt~i(O6aQ}R?yI7}r!pPou?u6V1&o9d(fu*Rex z%AJpcz{NrU&v($P3D{Bm>V$4z(VRsgiR*m`y$IKr;{SmVu0i-Pf)^o${8tfvj4*~U zb1#@6p#c9-SjZ;Kol6>;nvh$|XVhT4HCo%y+7imn3Acpfl%wX`+JnR%g> zJXy8zP_&tJa6S$Kf0zjPlA>J`M%OAW9u3xqR%v<5L(yn!bdvH1g3*?6%XUSP8@~J@za0)nNYs*?sc0st*=}s%a^UL-gMWx)gN8A zdG$v8+>e>`>+Z-Uj`>#KR#CNi-Re6wuHJYX9!fXqA@-Jyo0Zs1eqhu3jhoh2ufJzq zZPmt#%G!;aD%WjdEN{Jmit1bS+VvX(8#h&No`mkUjhk+@wAL=~w$N5hTc$1dEnmS~ zx-}7QYFMSs$9rA*=YeobL+b-EtquE&aBB-n9!xBefe2K8awloIUfc?fXc&LOO5QU= zw>TE6PefJjg#5(~p{s#r)1f+)Pbr4^1_^EM9vErc zg5f5xJ`j#KLVPqH#>}xp3q~SMVPx|PLosdfVlB9}6{V;f7->zd!3Ly-IA)b+swf2W z`eN~5OG7Z)poxv1hX2D=ETnbYvL(yv-^VG%R}T7G{A+Hlg=;RUl!%c&(a`opQ!pxp zFWnXmg_h(HYLWlDnpFQs+Y$%H(ntttr7um~=%4U|!8p1O_So8xXbQppwbp1uD7tC_ zT}ZGdS{9qElr^6Znnhq-q;19CdX?r&&WF#l)~jUYieab(JS;61YzyJfjCZuQG>|=_ z50c^LM6>pZL~}%og&%}yj6dxr`erP_*A&FJJrvg-h=$`KErQiVI7Y)&+twP@f(;GP zP%Ng!uyhG&-e6ptAK|7^+f3qwHiDB67|4&HA>l>Tga?yIJ{V?`mA3?~dT?tc0KKSY zwKlne7X72G7zA5`_4nl;Gjkck3ag$k#A*p;%e4g6ELYdOje^+P-r7(yf)OO8K#sUJ z<3xQlPBkb)$ut94wl<-Q=FN}IogbS=gN85csQXMWk>#DKcOubH8_?C{B9i(>_#rvo zh24?k6{xS$>hTZO=ri~a)eoZcd8fcXm|D!!s7s+tgep>wkuJUS!^#IJ`fzguKF3rZ z|CWs!xk(UnhDZLXwqk=CWMA`RQe8Dp0Kzie2>#65GQ70lHms0Q`wUI27DG6C6#6O`#2?MWLCVgENy~Y1c z%ojpT{?={VVxf4gm~OfLB54u?@oIiceUd&J;v-({4Ta-U?cfYq^=9)}T9HexaDx&a zlCR{NQmhoY>yfV1vi+0|tZmpzC-kV7@Rcp4Jos193Q54rZF!teiq4W_$rGKfN@YFV z5JaBx&x(TABYigI*{)5jO#d%iAoa7<%$p_nl_e3alpy8^v-z_9F08_37OX7nXlaDvEO++pGL+Noo46 zLOs@U)gMZStnp{-pGjy;Jj_v%Wxk?+ZTuEk&sMHr&-A}pPqxWr^=lrQ33%k7Qx&%V zoVC-L*M1oj?|FUh7X0VM;}EkxiTA)1agW^P;64>I8kBgSo+9qO4(?Ndof#7Eu_@v{ zpKT#&*^A!?t{QLPr#_2VQw{MEL`yJe;gu7qjT{A`8dmY@TgnOgJyM2nd zvyV9InW+HZBk}H^BJM+uy-x*pHYo8Po+9o($KI!e`EfVoNSwzM-oMP=pE~=di-UWq#95PZxr|da0)OqE?0&d7TKw@z4M?2pWSo@o zsKh?XpqgnY7_PHE9mlxgyXb z@$Qpx%7*{&$Fu#-xyp6u&Vs;S3S3KNTqa}1>P`s6}K8f>y4ey?Nob{u^=wif!67SPC+}Xz*+%E>+O6I7-`wQ7PryP4% z5M7kGRN}4KaQ8d*eo^pNLVXf%zl;Mi?w7dNZOI-7iqDDP83K(G=cJ5#WE_#WYYy)3 z4B$N6eG=yZ84t>MRN~(2;GPG$ca}XYaUPSgDeYdzznEBDghc5V1-@Px>oWFB-0Pfu z(?vj?u||n=yNo+!tSWQPffW)LHr_9B?w9ePjGgny3yZT|+OWiVOvYt@CHzkNy)t(E zj2*HIa$PQQu92}4IwWyd*G_z45lBkB_se)d#-$$@_TKB*`-KHuN*k6qv%i+>8X4C~ z+*1zj7X^1m=>HA^^!`=z^|E-Qx6;Cr&9jJ3zNVK`nU|7*%H^9~Rk^sdl&km05&@Sl zUIA^&=db3KI-uNqFRy&Pv63s_NG}9ay$Sx8Lbd#5#FCxlTe8J_ds{*e(0@BLMq3}K zwa{6z1zk9X;@ZYgFal_{Fof zt(SOzzF^Aelfg^jt6t;H4N4T}fA6!jF36i;XX@SCN*;+a2Tw+>>K>j$_?tB9-OZNw zMx%IwoyvD@^J)}W3L4PcvqU|%+`M$Mx5d9iSl-tz^;d(~Ql5o3+QL;)e-MFsR2P`- zRU}zB@xiU6PV(MuIoK5?$Iq5W71{IE`=2vd3kliyE2)xS^%QT6kRY))2|2%b2RP5O zU8`!@rg+n4ybN83(=c(c=Gp)61ZB73_5OBxGcO1Irced_cWE%5;S%kDtO;dye$#5? zs6xL zFJC)R7JF_0xBHjNXbYB(9k#0x)@eSw~qIVk2L-(kiCr1i!}bfj=hc0hcvsJu|){|NaKIL z*mi^gr14*CtOp^5H2#N*y?`)?H2$N9y@@b{H2x=v%|keiH2y<|eFR|`Y5cDV+lOEv zjsMzU-$fWj8vp;mX4ODfr175w>`nx>2lR+PiQq=M7x4f>Dbjea75gDV8Pa&y5_|VY z!G|=Sk7PjvFVg*p{{z8?G~Nfq$`Hzt#=EfCEeL+3@qQ+@6G2BBzfEJO5CTZ!cRTD1 zLJiXR?G#&tP=_>rSHiX+G$M^>?%4r^2-5i71N%Ng5@|dW&Sq@^&Pd~Ve0C>7C(`&$ z1lxzugEXEOXJ19wk2Ib!WN#w$BF*kaT?l*gkjC$)*{cYrkshvPY*ig%qyvbbMldKJ@!LV@i?s7`5O5H&@Yj3m5eW(M!6*or9{0{x{d0zTw6JQU)r;^5O>Yj zw}j&rEwS+Ccr@Iyo&8e*@-o-et#`GA>suQ_vY5S9gz~Js>edW7d^)RYh~6HGS2i{2 z!Ip-m5PQjWM?4U1jfd*vcZZ|#M6hW?sJS(|gRzxVKWOiW-yMp^@Ql$NiRP_HG1f{_ z*0m&>Ls8u7uWAi8gc>%qHYA!tF*cVpxHA!o?pW8-7LK;IG>2N^cL$^4;8xUjKXKlM zr)<`>#G^ZIYmHVkG(e+Ou_Xu>xdZ={q5RdLh;%D2sg8 z#U9EG(QJ0-%0ytz&B^8_txb%%wF{Ol@h#9oEg0o+0BaZARejsy@&#Hfj^P_@YHbOv zU9ck*TX6GD#WU6fW3f>4)}|dA1hmA~E=WXMR>kTYL(Re1;^uIDv^CbcExx$EwRu%A z*1V)`*#fOO*b?3rim4GUYJ(_ETN95aV)6AY+ghcxd1sNfVjkxMXDn2oh=$`kTA(d8<*Zqnzm_#ivvgXs)NCBs*DO`I;lk_%C*dIAAixpm!QKpsz{#4E zJtvXqWenSRHrPAV>ptK=s2^-UIPhrd(XmH;hszH~4);CQ|CsTZ*5~c3>)U^%_sHoZ zrN_#S1&(zd?>RnroSkr=@SjMYY(F_bviczx(P-~jukS$P!N|eBgTs#+k7|c%4%ZzX zdhGOLdS9TgrmwLtb!70!(2?OI<;VQT^kX&0`i~DBPaPjR?mgi zSmUwCvE;G#W2cV~A0Iu=o^U@AI8k$=@kH`O`-#DmLnnt%j-DJl$%Yui(F5}z&<_k8 zOdT9Nc>194Q28PMp}?WO!~KT`4i6sI9``=(d%XN{|Kt1ndizR`mL1iO`i^!U>p8an zSl=;{T=oP>?K#0EV)H6=cK0?Oh#Y7?FnrKBIC^mGAbYgtP~D-%Ly<$tLqms8A09qz z93DNaKOT6z?(tOLVBb*R>AvB<@}vHv`q7%B{l^B5rH&0A8#?BF!uLe^6aFXkCwfox poftU5MscP6#nvPUZ@0LD~l-<`hcc2X@i2uZQ6pNuaKte+cY`J@w z59lxO4|Guwp(_zw7;xcI)Qt$DRHdG|cjo3MchV--4ov3Ex!=s0bIx~W(sMUg$*g0HKoYv~4 zP1;ixyQ!MKm0KaA6VkGX2OqUsxZmEoH-5~Nn% zFwR{^8k&wXcc-4Y0Hvcsg?-yIjY|_O-N# z3WaqM1_dx&SQ8qQ1O0zk}L>r^x;uB-k3o+p%V-(x>9GTKi*1PElEuX7S;};)E zG*slP*GIoTl7W-rcS1#u@lY8R%_`E~6eyx1M}?kM{s%GPBg36zoZE?UA04F)x^^0E zi>!2`0<5gH-A7NJe(pCs_cp%J{3UMsr}9ttuFS4CHT#X(bf@17z9LWZ8OIWiEGoBPB#Z*>e2+;h!H-vh0;w K7XRjC=Klf_eUb|~$szu3h-r?m0UK6Y($Z*CuIKN7aP?+-9qY&+fpXa5GqjfKChjq$0wEb>{`#hrwYP2b~*Sd>`dXp zLVc=Gp3Vl*I2yn{`G?iPItXj6DZ*T8UY{ZqlaPnM8dHQY_z>TM;ccT`gYMc<+80UA z?GoC*_SbkLTr}5V{0-o`$W!f*fVG#})}zpbttfG?a+t#+-c5MmXJ}|dDMfh*CGJ%& z2wWW1yghyo-rRTNZ5CA9`xMo^ zHVJQkBFa1Oz}x)0@HXzR@K%W~4DNrXH+!(JvQ^Xt#AwhGtqj_=HkYAM(90SK$A4D0 zd7^wdjlOBliBZy7@VAfsXteo(-gCPbr06?#?tC78?JuDVLHfdr5RS0rbs^)_SEyl6 zyH8_k52d%!#b^{k&)=sDidu$H7qCQvc74De4vKoeoD#r$b6Ak31ANdP*_+yuf^YuQ zDFJrvKa&!$M?ycw@R4vc`dWXpR`A{JZ`KLE3FpKJO*AwfUk30BijmNxtZ953O(JCR zH|vG1B8cwQwpa!rGsZ_76m?W_HE7qd3ZPanW}L7LWSp>HHNPh5GERv2Zn--DbVw8W zfvJ5YU?T{b^rfgGG$ANP{p}C}>CSA?dnW-)nmxK#)U~9B-bqNA#mm~0x@Od@UW)weuNYGZl^|9_^+(968BV;9qVLf(m{#N?kz zh$I~eSsI6T{nl9-r$iWuGzp^8SIRXIJZx~YQC`Qt^^%T)Q;BoH-cQ4~E(b=_8-Moc zTI>TDJ>ZW_cuJRX$}-?TZ46lYLK8Di*$4b*@&ooh=%CLkPvNB0VF6JaI)VQBJ+Bg5 zyWS>$0489|COTj24~XHQC2Y22d1uARCJTKtZSs$J-=G*~DhY?WR708&v_zOm!gRy0 zUoNyqyd?_B$ljKxAW1SBHkbMKNKoug6H!lMY}re#L3@81Y+JnA!dxeG?1qJ#NZ{A0 zgFXi5=1C3BV@UT}v@Hw~he_tm@Ulk5)f0O$=t004r?imjB6vo|sZ^+SR?vdFDexGy zm|yp0#yb&{pDuKs^+{o4CeA;dnEVB1Kkzi2RsQh9fdOcXYYCxDX9H)e{1&U$A>zQb zqs)Wh+ANs)o1?=Vo|^ZHk&z_~ew8pEuv^OGVvDHblY!0Y%$?F*bai5iO`gfyGfuU` zsC4GjzI4C|-G!M#2Fz!*%qkFkfa2vFj0E8SP)wi?#ssu;cL&bdkuk zn0_cUTIEL&PMLlpV2%Kd_+^hW-cd3HaK^PwKFqpNyeGj(_agpUF$dnLXN{yxNHdBE zdh$AQBYm-WN6XDjMemp>q8T^J+hQO>GDwcBc|JQnpNAO9=_4)}z2Bf{K+DNQwr2DO z*tK3@tKL)Kf5S@$0vG}pEgeQ$I;`?*JkUV1w2V~BFR9m^=09M0LF9wI(ejc-0S|$E zGU~3v=@41;Ldk3fr`(8UprcXgsxo?%u?@VdGDvZ%3SjQKW_zv|6x9z-A!I zyx*mv%}hQM5p8Df`3ti_75{ZMGng}*uYs>mn2}X}nOiYaZhCJY9OIlA27Q6;aQ3{K zvHJq*;$moe@5`uwC#BiL*r135SVNfOhq-=y>AlaO11ZiQnJwK8tqxSm(nkzv?~~UX z`#3suh*u2f#C}52A%5iU5U=9%FAB-|(getL!2hAaH)pSXpv8U_-j*hY6$=bA4+Iu* zfV`@AGQ&ds7y4#{3I*Vik#{3ta^yzpeZ>e*O|yjePQfA^TMCK&o>xKpUs-Rg!XITf z;LPt9lOda*`RxEYz1FUIOgbw;(yj6NE5M#XF(&B*J!54%VI8BM^h$L1`!G8(Y- z1noUG`M3YU#eG|hNh37D#I~Kd5oVw7Z)r!@jul29wCIDC(SW_1&!W*c-Gx)9UVfI- z@i+GizT2P@97;$#MwJw35~)TBYmqz=jkL{_>HLdz-wM@V!Rlha3fq+t^tvnMON?M< z$j1k|2@DxZweKNd@SoNMfCRHy8V!hj9az@<&3!`iWPDo~wDkS?7qs(10I*+0AQ(q` z2-+<66O$`8#FMCX^F;K6y(xTX=QCiF|MX`rk2zNc2I3d_PZ2pr2`XdQMDb(T`KOqj zf1)G+cK+ciy}^(Hdb|2sU`6M|LwKh}ASGkZI2;#U8eF&T*?x6k!0Kuw9bZ`7n7JkT(!hC2rHdM-E%g>hcaMA;lh&9Y0O9gS1 zO_Wj6Vts%~_7ef3#9|4vV$}AblGXNc`hD21l9dA5Q$wJ&5y21o<7P!iX>Q|us5}U5 zh68B;!g~gt))=L7X5u)j^$X zkiC;mDOk@T_q@Tg@$RWiNA1v&LMiJBP+HF@DZ;s+g1W##U0|TjtMt!v`sZ8h(IEnv zON7?MLprMtIm?Dv&JGG>RI@23NYOWC2fsiT-q_KIy-F=#S%D0Za>ZLa+^@_E*#Kewj|NrPjkIVbLHXCJ#SvX7QN_OVavp|<_g*vA2)4@0By(G$xAuGfOy2;d_>6D{S11#axp3>9Dc%?SPu;&xH(1sO&^Wf!kebZ^tp(C$rB}mKn@If8vvp_K=G!K1K zeRsHyQd8^sG%B;_0W#ci#;9yZWX9DLbyr3WyP9JKt&v z!?`22&BA73V5-iFM3aa_Q@_ykW$ZR_AThmK6R^lH(W+h(uwQQ7(}ReHP1t4u`5uG| z*(TfXBf8PxN)1SbZM+M@Ml?8O;0^39GETHZRO3<6u|!L$)sJuXOg>rc=q1CCu-)sx zDo)&8>%S=IHw&^nQxK37p*(5R6gYi9U+O;-?Qs2^9~iU=;MvtzfSVz7Kthmtfzo)s z@uv3Pw-7Vbl@1c@(C8s3Ed+JZCL4g!)LsLaFKTiAfMEZl_3NG3d*U3qS!fk6T1g%9 zzzhn>%$!~?}tC`P>#b~Q&yVIIl`tY#YQ zuEF*h3VerYwFkO*{cWaL4ulj-447hDEi&+S+vFw?Cz_CW$MTc`fvr`a$=;ah)8KE; z6eOf~GtW_^g`9Mi^5)->`{D2bhwT%XNNFttdYl%J&h)T$9F>`sviALGXQZ@23sP+T z0c70JfwGt|-Jl%_k8s-3;(&;0_xMj|1uWe!(TU~sJ22aB^h&GHuV*Ruh`&gXmLe77 z-ww4F!op$DX_ZL6B6kz@7YLO1f?1pG{X8=RlSFPBXrrNDv;1c!=!8yOCtGv4 zS)#B#cv34eP_iH0T5)n}9TpRqW2rp*uWJJge*tU_%+FWKb3he~HA|O7pjw>85OmTi zivy$_+25W8<-ABb!M&3p=_$f>g0phJ|49S31(aIC;gMQa8lXWXeo=N!Ioq?=uz6*c zcRlKuW+Q33{Q=9Rp#2hz2f4Zb*~4NxT-A)@A%ie0V800a2wE=I1T&XFKF~DhGA&@~ z%6NgUyND}M>I{mV$j0saZ1k(c9O!VYs98y*lNwrald2V=RMt%=)5gMu+#;+y8bF!} zmN^;FcfCUo3XnTvrxd=9MwmY6{*qO`hsl6UmSto$Iqji*GV03QUGE42Sm8tvLGWN_ z5O!Q+nJv@~JAjSGl<_mPiakRNm$`Ivdhh=Mb=N)#+oh&=6>pa*;7~e1HFCA@9@4v| zhb6nZ*&VU4S&T|gz-`G7qtYhw0VG=Cs?p$5%nN}bLScn!`U903TFpp)q}70yOapmo zBxl3HN@Pg-X*e|~qbQ>n4J_4=NbF$@XT|}+Uq`~D`!w^%Vo23xplR=c?pToHK`g!Z z9y9@=RenlsVNQ-@QW{#YccmK(bVgl(;dD6c%f*+Vz0c-aP6Y2@!)UWhXBAjV1d)$? zBjP`U=tWKz5HG^WAxA}#INEBljRjG5{3N11dJ?wS6CjI^&bM^I&sfa3pvH`!w6>n5 z_21Ho_21s@KQjwq=ibW347JU^>mmPd?g)OP^-KP_r75Eu^IakGyp-A}V%QtA2nG{FaG_gi!AjcDpn-1r;4T2}tv<(zjW+Li%<f-PFCgqfHrjZW)1D$wUu%@JPR^mB0|K|1+1X&b=vVLFtRM9h1{}$Yy39CJjXRoR%RP zVFsbw!4KyV@U+SI^SVKAi*#rv_9Bjh%%HPH0-A}4mz0{CuDBSC4_);kr) zqL6syB>2w!#YpmGJJ?zl{^|Y?;%=q<6Mp+fi8&d29-xIorXUJc%00hg^28?Qu?}os zJH)pKI>dME9pbzA-h+eDZ}Isau79WNfHpeBUQ35~S=%9o9@|VY6}t;$+`qx7P&Nt% zg8P#o{N)(iUdjZz`dQe9>v&=uH*|=dp!hmCz5xzz;(Hf3{uI|g^L2{Jeg+JYVWkk_r`Ii-ScGJyymxoFyWBH6(zQG6!^<8)rnSgV9(eg z{vK@J2k8&+y&r6GlUDo#$o~q}pmeIR|-4?U>+!&auc{z|!$%LRwLemf3%aH`K{zm}

K+)gTjNQ-T0!chd(Skj4 z6Jq9`_Ke+M<$Z!vKna-)S~e6!OIS2f(<0J(Mkz~>@DvjEn^|;@m=e${z|JjCp)8BA z#SWb0&HaH2y@5w7kfm7P&jJ_0j`4E22(IlS!JXFo6d2xW17-FGd19K~o7b_m;hR5a z-lybLeAqbS{MJva8MlponjG!1@hshO!v8fsz}1cm(u};3S70~exa|EO^KV!`hoFTjmdn##01=Q61d*!QAD0% zn}PGwppQJJ4u{-)8VnOcoscx$)gxk`UO;UA*njASCVIX@(=~Juxkc0?AZT0H zrc6_kG_-9axlrh2!9i=b>sZaN2_=rtzB}QXv}=UOD3<5~m0Aq=H#9?7Na`U{!&x={ z-tjm^!A$W)E;1xYl9ux@gLKM2c9Eq}(r-G80~(SeJldu4!CnrtRvo5Z#~4lqL96_w zKQZeHEk<36?=~7xi5+5C*&*t&Yoa43J?MuME7aK!qSy}H%WloW0g;N(1(s@C*+z#p zKos);W&#Iu6U~sxADdUjT$)LeXSMKbwk6C~bUlwcz%?G{&CHE_nXY0IN$ZdT z38B0B0G4XNHBU8=$-<9s$d$N1JeP|oZWgsnyX`a_iTz1b$zZJq1VG zwWU!|DIY?UoP$VQ=FjXZf$g*LlEF;`mya_4!3dzP$do1b6(Jz7g?+DpXBH_Vq7P>7 zvbk!vL0zxBPU?cj)oS^5R)hRXIP?M=KgG41dLHNX$VNWE?E@L?x1dWJjWyJ@vjCl5 z;N;j%PWA*2EWnb9Ae-SjN<>;Yk&u$qyk0tMaH1o8!~83;n!!D=k1XTNm5djqf}*{B zuohP>43o}s^3$Dcw(#azt|nBvTBwDTm&V6hwF}ixId~e*qb{cq|S_rLz#VTL?V~it-rG%yHa$TK_1CBWwPJ-WE zK@ij<9%C);M!Q_X+H@W1w9iSQaCZG1#R#HISk2?XwT9)kaG$`_me?;!O^T1e5BS-J z9nl?w5wlYM_W$FChU@^&#T1tjpfg$E*4SJJY4vSXx=!=Cvt>w+JR}IWE^E5 zjiaOQ-SR0@MwlNK`x>?x)mY`n_!m>VdOYUsE7bOq#O+^H+wV!-K2B}Fg6skI9mDCr z$E$HZO5EO|w*QcSQPHvU1Mq)WZGSfL_^+z%&HPKwu=&-f?T;lMzd~&z;EBJj&qWSY`%s+8^huZ$b#O>cz+n-I)UP%cVT9!N6246G8L%4loN5PV6bo&K+ z*G;Tr!bI@U{$!H$D--N4zre;&F6)dY?p{Cj$KjtI`$b}=o+IGfPX1KG-Mng0qeI;g z;|GleO*e4O_|Y#MtnyX1 zgYk=_r@H}q+UwEFg4@x-of50cMm5alo0)jNkXH|*nb4kwxB>9q>TBA*$ZL7libn>% zSDC@9LidGc_)ChhybIDD>Q;wrb6_e}DvmtTlNOi{9j1Kaj-`ZFhE_0MGXG%-=Y^yo z_C;UkRp`XHl_%mxmBbB;$4K0id=_r8>5)S^%J%t~+_pG|8M~jRYU~@{(Mx#rgxiuF zW{;s$VwBwy%4Z$4()|qZU-9wCFd;mfEt%e|pgnUwozv(5mj)X0pAcP5Mk7`%*uE@Teft{RVA#+ioZgl79oma{qEr6N+TdtfRxa4A!8*T)5S z)u}$lC-Vp9(gU*v+~gmatzqw4_D+xG49wQE_e}Pl#oni}cLUx(2CHP(qy&-@NJ=0n zfusbI5=crQDS@N}k`hQtASr>Q1dhmDQD4RC9a4j*s=A@x zIkd)I@0O@W9aUBB_BuAyvR<(|XZ;9ORg%+FM<|l78*~B)H!l3LyWW(u&gQIjI=s$n zJ3{e|)yy`yUELV6WUr(Iu9bjsyKrq}DKbbN$3|z7Az_=-<7x1W(6-Itsdv|JqSnMO z%PPx~ii*|Cs+O->vt((7SY;EJud<2D)}s40ZL1#=ZMId*MYLs`*V^o&`4MrAdG&+w zeZ>cdlM#&O6-z4Ch~@`Zn^&(`5l0u~mabkyR;^w2s9068ddc#t)wboL zZ4`diC2NLSBzFK}&h z*H<@e^BT4yR=6ALQTwECkx~e^8Z}txkGo#jfDs-y{#=*Vgu#9bz0Qq353g0nDs>C1 zof~|chWC%FjP>{MNTl+tieFBg^^#`?em~=KdYp!B^v3MNYpC{lUiw9o`=5|#Y(0XS35k_2EN!D z@PEc?cS5^0dr|g0!jqt175?qGDz5hog5dKKe~)vMuh!vFh~8i0aXJ^p5hH@1(^tI& z*(r)A2@*`&Qb6YJCmj;h0b6UR_SHIJzlH`+wbN5Hq>r;&@YOGj4Z)=_n;H883|L^; zfEBLDVB9_%aVVbGNaBF8kw9TFUdL7^{#<%TLwz;b6=b{JUFWMaZ1&YP8ocf&K^A^v zD5ZBnsB&&^gk}ZogEq455*Sp~Q8)ZLjqQGjCbI^b$lTeD^UQ|9hD}b%u+8I^oQ6hh zh}>TC&A4%%1{l}|$Hpxq+v5!IQ+Zd>tmCc;i`o{$#XG4OW1oB;3I&W)=cRo5HhLtY z&J+b&gH!N0y$!Vx+05DA8MD1J$vD}zkHi`Cz9G?#NyOva3KushTTF64?%&LhViac@ z91`Rl)#cF%bAya7dfRvVnvBl|K(xhe-|(sbIF3qJd&> zn8-{)oiC~dugh6mtL%#TK$;ut7K@v)-80#=!RvI^c?mK8q&&_<4UQV@d%|a+b{#HoBY}w-BV7 z29F^&qj(+FAo{B!6@-LKz8Y|B{j0MjG=;>sx^1 z;3!!3-$E=$faCZmSdQI7EbYM2ISQ7}TZrWnaP*CW#klm=*-IEWG~XDRpZ*qN(F4b< zQLtEVA(mX=SUC!o{kIUy!@wbpf~D~mV%Y~AM@GSN>=t5a1CGv7uyo!+EIowdnUVdd z{}y7&1POB@2bx`fs6q497AT7>p>z3Kr`a%Ebx{>rh+@ zmhv&g(gX~5x-f$ zk&9BU;2FzUw-y-Iqqr0-V;SqV149eSA(Z`yx$R@<)5n3~EXqZcc3|lrLoEHk5Jk~F zE9e`6ngzaVFgR0g2jbv_6wbx$Ttyb34B`= zEZqv0!?i^7wJ3*4E@OygH!$oc94OtuQa*-QjswGa6cXIdF~mXqyMZHu zlKC8D11z0m80&I@VI_(cg|Lm~o?$&ONGL5R>mir1T&E8MLmNsLN;|N0kD-pjz@Y!O zqMtFgT_bQ*pwy!1A(Q?wl*<8NIF51=rJQgm=Mu8x|H?1^%ou<4rV39vkSy3^WXjVx z)XpEd;;peu^rN=;k+Dmq&EsTGr?7!&Q!e>9dekb8A6+tf%40MP3C1W-&+sum_JDL1 zJshOcjH}{(>Ydx@c`uizVVl|~u8Q|@d(l+oaylA;X%iluQGkHoHBBUsI2qvSs!jFI zYCZt{711cw!{!~|d2s%CDxA^abk#oUV@3)T(!ubiL^vGMAoihHsvMGvUDbn1P#ttF zCO(MftM+ZEsqs3c*i%r9+sMtZ9pxb|^ijVG>VsXVQ@`T5Bi<2p>Q^>D6|4GuJ2MPS z1pQ(*!fZ$BpjrsphuDg86?;xTHUmye6*uDEH3`6y1QVj%la z-x2E+U8*#d;Gr7xHF8m7)Uf`lh|pgR z2Wk%%)P`V*1t=~^HEzW4$J%3}i1*WLS`*XeV`F+y{Iz<+Chi~c*D7LaB)wx2v012f z3c1+$_-oQ_74OK##$PLb7#lb8wF-6c`PF!uF`ta%6JMv=0QW)GiD4P>dYInhQ1dcgBU32PLbgLBhXqJmmIBK1Z#^S=T_ngEkO<^nFlzi2s87 zL0{bld=rExh>BS6t8;pg_Nl;cX`R&;_RCqXpd$p8KBs4gSijZnX{fKmZ!RBlc--u7 zIlMG-1ya=_{nmO#gJ(%~HGcL7n(^78c*!QeCnfMnB#052uh&_(0e^G=0`=aKIX+K)krzKHu5)-7*10!&8oUiP(n9>? zxX9tHTeLNM4t^0|@2+ur)kws-ft0~eEO~rhX;pnqgFC@fC~Ji9RJ5a?#dolOz+{?Ki<__de&hzDc zB`=uwblx+0|CINgyg5bXMK2b8uXuOK-V$%=&r5GFTUYkK%HAylQ3KWmd=wbJV?1FL zvR7sQ_w1i!|0a80PEF31obTkkkaH^Mm7EJXZ|3|m=MOo0(+txb(=yW*(+<<$o1O*l zADGUX-ZZ^sx@?m2cIW*|-uT6Tzu25Vr|6qS-Nk<_)|Ff?nOB-qYA*e0>90%wPzpv^ zpvv*_{p|a4zn-hhv*tgMKal@#1s4js3&g@_3STq7Rh(Z^QL?Y}MCq?e|5CcOELgU` z?0=V?Eqk?0F1u1jgF!+_HO@Ewweb;SR(3&wrQo{-FBd#h`29j%(d|W9MbnFB75%p& zW6|QG;v$+H$?jOe@q*5R;==O6b%o~(FBHm!Yl|K(l8U;EdWr;dt+~;>&m1vF%~{2} zi(8716zfYeON=FlN)DHtESX!npmb&F@zS=^uF~?d^P> z^H%0{=5^(*TfBa;ytr?1|KiAEDSvx@Q~vIJp+HlhEzlL{3-%T4FE~(eu;5TZR^hZl zL*cB#xrIjxj}{&)Y%4rjXe`RbJeL=(EIL`#UUa^wv#6_RrMbe~Wxi;>WbQGqFRm_j z6*m_5755j1i=)NcOPWe{mh3KRDbbW_OZS%^C_PwuxO7^Xp=?&!g0iD!&zBu1yV%b+ z61m0#W4W>2c-DBqSdnebemMJL_9fU~Pqv(0o#V=>&1uY$a{6<^Igy-bj$mps?KHKR zw7I%meQsuMR_=k^gSm%tkK`KiX64PzTaahWdp_@2-toM)ypwqai;EX8Uwn4)`NbC& ecP{Q)Y|USr|8V}g{Pp?ZpL``Hkdy$E!2btt@imG7 literal 0 HcmV?d00001 diff --git a/ww/managers/mimalloc/bin/mimalloc-redirect32.lib b/ww/managers/mimalloc/bin/mimalloc-redirect32.lib new file mode 100644 index 0000000000000000000000000000000000000000..87f19b8ec0f7ae1024ff508a738b517c71f11aad GIT binary patch literal 2928 zcmcImO-~b16g|^{QYspvM%`#iBvFDu=~n_VrYIN*1)5S4HfAWDDx)n!hN>hi`~!vs z8-IuXf^O8UJ9Xj0jXPNpHj?1IGxOT%%v9O{FL_`0-S^IW_ntd<2JRP(;`)Q|Sfsg( zj5p84ENTzp6DfPN8U}C?kPm@i7UAatZCv-8=-Twa-; zS$dS4&E^+#N=2&_%4NMcVw65*m7!+k;Ig{}!nM{b!!D=`o$yn)oe0W=;7$!C_Ax72G+NiKVMF_(AoLEg)_71M} z6PdwhbGd{8?CxBi*NTR|r9Z1i?&gLfnc0OoYq`1YqbYCX13-l5Q9rfa=BNcC(nY

Y=Y)Dvsj~l!2>A;LV<1T;~ z)Sfyt+ahbYQ)b0dv3efKNCB+8et-PQyfk0kpINH8)My;ciVG<->w}osJu~av8PksM zZ__zFDC1-B>F-|zU=hLJg8zxlq%a!Om1EOc9I=@Ya@PAPCU$>rmTt$o zLv)F@$l9r;Q*oQB5&q}lz|KgCslVTb%~yw9soPA)t-cT)$B7^Im#^2pGqyzEVg8kI z<)^d5PI})xo|>$U5H|k_Ft#TVo*lX)p9!nK(I-FiM^AR6+ZneNc*h=}@V@+|5v+|9 Oc9%c4mDyWeAp8$DwV4nA literal 0 HcmV?d00001 diff --git a/ww/managers/mimalloc/bin/readme.md b/ww/managers/mimalloc/bin/readme.md new file mode 100644 index 00000000..9b121bda --- /dev/null +++ b/ww/managers/mimalloc/bin/readme.md @@ -0,0 +1,71 @@ +# Windows Override + +Dynamically overriding on mimalloc on Windows +is robust and has the particular advantage to be able to redirect all malloc/free calls that go through +the (dynamic) C runtime allocator, including those from other DLL's or libraries. +As it intercepts all allocation calls on a low level, it can be used reliably +on large programs that include other 3rd party components. +There are four requirements to make the overriding work robustly: + +1. Use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch). + +2. Link your program explicitly with `mimalloc-override.dll` library. + To ensure the `mimalloc-override.dll` is loaded at run-time it is easiest to insert some + call to the mimalloc API in the `main` function, like `mi_version()` + (or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project + for an example on how to use this. + +3. The `mimalloc-redirect.dll` (or `mimalloc-redirect32.dll`) must be put + in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency of that DLL). + The redirection DLL ensures that all calls to the C runtime malloc API get redirected to + mimalloc functions (which reside in `mimalloc-override.dll`). + +4. Ensure the `mimalloc-override.dll` comes as early as possible in the import + list of the final executable (so it can intercept all potential allocations). + +For best performance on Windows with C++, it +is also recommended to also override the `new`/`delete` operations (by including +[`mimalloc-new-delete.h`](../include/mimalloc-new-delete.h) +a single(!) source file in your project). + +The environment variable `MIMALLOC_DISABLE_REDIRECT=1` can be used to disable dynamic +overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully redirected. + +## Minject + +We cannot always re-link an executable with `mimalloc-override.dll`, and similarly, we cannot always +ensure the the DLL comes first in the import table of the final executable. +In many cases though we can patch existing executables without any recompilation +if they are linked with the dynamic C runtime (`ucrtbase.dll`) -- just put the `mimalloc-override.dll` +into the import table (and put `mimalloc-redirect.dll` in the same folder) +Such patching can be done for example with [CFF Explorer](https://ntcore.com/?page_id=388). + +The `minject` program can also do this from the command line, use `minject --help` for options: + +``` +> minject --help + +minject: + Injects the mimalloc dll into the import table of a 64-bit executable, + and/or ensures that it comes first in het import table. + +usage: + > minject [options] + +options: + -h --help show this help + -v --verbose be verbose + -l --list only list imported modules + -i --inplace update the exe in-place (make sure there is a backup!) + -f --force always overwrite without prompting + --postfix=

use

as a postfix to the mimalloc dll (default is 'override') + e.g. use --postfix=override-debug to link with mimalloc-override-debug.dll + +notes: + Without '--inplace' an injected is generated with the same name ending in '-mi'. + Ensure 'mimalloc-redirect.dll' is in the same folder as the mimalloc dll. + +examples: + > minject --list myprogram.exe + > minject --force --inplace myprogram.exe +``` diff --git a/ww/managers/mimalloc/cmake/JoinPaths.cmake b/ww/managers/mimalloc/cmake/JoinPaths.cmake new file mode 100644 index 00000000..c68d91b8 --- /dev/null +++ b/ww/managers/mimalloc/cmake/JoinPaths.cmake @@ -0,0 +1,23 @@ +# This module provides function for joining paths +# known from most languages +# +# SPDX-License-Identifier: (MIT OR CC0-1.0) +# Copyright 2020 Jan Tojnar +# https://github.com/jtojnar/cmake-snips +# +# Modelled after Python’s os.path.join +# https://docs.python.org/3.7/library/os.path.html#os.path.join +# Windows not supported +function(join_paths joined_path first_path_segment) + set(temp_path "${first_path_segment}") + foreach(current_segment IN LISTS ARGN) + if(NOT ("${current_segment}" STREQUAL "")) + if(IS_ABSOLUTE "${current_segment}") + set(temp_path "${current_segment}") + else() + set(temp_path "${temp_path}/${current_segment}") + endif() + endif() + endforeach() + set(${joined_path} "${temp_path}" PARENT_SCOPE) +endfunction() diff --git a/ww/managers/mimalloc/cmake/mimalloc-config-version.cmake b/ww/managers/mimalloc/cmake/mimalloc-config-version.cmake new file mode 100644 index 00000000..81fd3c9d --- /dev/null +++ b/ww/managers/mimalloc/cmake/mimalloc-config-version.cmake @@ -0,0 +1,19 @@ +set(mi_version_major 2) +set(mi_version_minor 1) +set(mi_version_patch 7) +set(mi_version ${mi_version_major}.${mi_version_minor}) + +set(PACKAGE_VERSION ${mi_version}) +if(PACKAGE_FIND_VERSION_MAJOR) + if("${PACKAGE_FIND_VERSION_MAJOR}" EQUAL "${mi_version_major}") + if ("${PACKAGE_FIND_VERSION_MINOR}" EQUAL "${mi_version_minor}") + set(PACKAGE_VERSION_EXACT TRUE) + elseif("${PACKAGE_FIND_VERSION_MINOR}" LESS "${mi_version_minor}") + set(PACKAGE_VERSION_COMPATIBLE TRUE) + else() + set(PACKAGE_VERSION_UNSUITABLE TRUE) + endif() + else() + set(PACKAGE_VERSION_UNSUITABLE TRUE) + endif() +endif() diff --git a/ww/managers/mimalloc/cmake/mimalloc-config.cmake b/ww/managers/mimalloc/cmake/mimalloc-config.cmake new file mode 100644 index 00000000..a49b02a2 --- /dev/null +++ b/ww/managers/mimalloc/cmake/mimalloc-config.cmake @@ -0,0 +1,14 @@ +include(${CMAKE_CURRENT_LIST_DIR}/mimalloc.cmake) +get_filename_component(MIMALLOC_CMAKE_DIR "${CMAKE_CURRENT_LIST_DIR}" PATH) # one up from the cmake dir, e.g. /usr/local/lib/cmake/mimalloc-2.0 +get_filename_component(MIMALLOC_VERSION_DIR "${CMAKE_CURRENT_LIST_DIR}" NAME) +string(REPLACE "/lib/cmake" "/lib" MIMALLOC_LIBRARY_DIR "${MIMALLOC_CMAKE_DIR}") +if("${MIMALLOC_VERSION_DIR}" EQUAL "mimalloc") + # top level install + string(REPLACE "/lib/cmake" "/include" MIMALLOC_INCLUDE_DIR "${MIMALLOC_CMAKE_DIR}") + set(MIMALLOC_OBJECT_DIR "${MIMALLOC_LIBRARY_DIR}") +else() + # versioned + string(REPLACE "/lib/cmake/" "/include/" MIMALLOC_INCLUDE_DIR "${CMAKE_CURRENT_LIST_DIR}") + string(REPLACE "/lib/cmake/" "/lib/" MIMALLOC_OBJECT_DIR "${CMAKE_CURRENT_LIST_DIR}") +endif() +set(MIMALLOC_TARGET_DIR "${MIMALLOC_LIBRARY_DIR}") # legacy diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg b/ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg new file mode 100644 index 00000000..90050974 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-a.svg @@ -0,0 +1,887 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg b/ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg new file mode 100644 index 00000000..2d853edc --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-b.svg @@ -0,0 +1,1185 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg b/ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg new file mode 100644 index 00000000..393bfad9 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-a.svg @@ -0,0 +1,757 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg b/ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg new file mode 100644 index 00000000..419dc250 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-c5-18xlarge-2020-01-20-rss-b.svg @@ -0,0 +1,1028 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-r5a-1.svg b/ww/managers/mimalloc/doc/bench-2020/bench-r5a-1.svg new file mode 100644 index 00000000..c296a048 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-r5a-1.svg @@ -0,0 +1,769 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg b/ww/managers/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg new file mode 100644 index 00000000..b8a2f20e --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-a.svg @@ -0,0 +1,868 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg b/ww/managers/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg new file mode 100644 index 00000000..4a7e21e7 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-r5a-12xlarge-2020-01-16-b.svg @@ -0,0 +1,1157 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-r5a-2.svg b/ww/managers/mimalloc/doc/bench-2020/bench-r5a-2.svg new file mode 100644 index 00000000..917ea573 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-r5a-2.svg @@ -0,0 +1,983 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-r5a-rss-1.svg b/ww/managers/mimalloc/doc/bench-2020/bench-r5a-rss-1.svg new file mode 100644 index 00000000..375ebd20 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-r5a-rss-1.svg @@ -0,0 +1,683 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-r5a-rss-2.svg b/ww/managers/mimalloc/doc/bench-2020/bench-r5a-rss-2.svg new file mode 100644 index 00000000..cb2bbc89 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-r5a-rss-2.svg @@ -0,0 +1,854 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-spec-rss.svg b/ww/managers/mimalloc/doc/bench-2020/bench-spec-rss.svg new file mode 100644 index 00000000..2c936166 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-spec-rss.svg @@ -0,0 +1,713 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-spec.svg b/ww/managers/mimalloc/doc/bench-2020/bench-spec.svg new file mode 100644 index 00000000..af2b41ba --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-spec.svg @@ -0,0 +1,713 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-z4-1.svg b/ww/managers/mimalloc/doc/bench-2020/bench-z4-1.svg new file mode 100644 index 00000000..dacd8ab9 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-z4-1.svg @@ -0,0 +1,890 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-z4-2.svg b/ww/managers/mimalloc/doc/bench-2020/bench-z4-2.svg new file mode 100644 index 00000000..9990cdcc --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-z4-2.svg @@ -0,0 +1,1146 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-z4-rss-1.svg b/ww/managers/mimalloc/doc/bench-2020/bench-z4-rss-1.svg new file mode 100644 index 00000000..891f7d68 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-z4-rss-1.svg @@ -0,0 +1,796 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2020/bench-z4-rss-2.svg b/ww/managers/mimalloc/doc/bench-2020/bench-z4-rss-2.svg new file mode 100644 index 00000000..f4265378 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2020/bench-z4-rss-2.svg @@ -0,0 +1,974 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg b/ww/managers/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg new file mode 100644 index 00000000..86a97bfd --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-a.svg @@ -0,0 +1,952 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg b/ww/managers/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg new file mode 100644 index 00000000..c7488770 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2021/bench-amd5950x-2021-01-30-b.svg @@ -0,0 +1,1255 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg b/ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg new file mode 100644 index 00000000..bc91c218 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-a.svg @@ -0,0 +1,955 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg b/ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg new file mode 100644 index 00000000..e8b04a0d --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-b.svg @@ -0,0 +1,1269 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg b/ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg new file mode 100644 index 00000000..6cd36aaa --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-a.svg @@ -0,0 +1,836 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg b/ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg new file mode 100644 index 00000000..c81072e9 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2021/bench-c5-18xlarge-2021-01-30-rss-b.svg @@ -0,0 +1,1131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/bench-2021/bench-macmini-2021-01-30.svg b/ww/managers/mimalloc/doc/bench-2021/bench-macmini-2021-01-30.svg new file mode 100644 index 00000000..ece64185 --- /dev/null +++ b/ww/managers/mimalloc/doc/bench-2021/bench-macmini-2021-01-30.svg @@ -0,0 +1,766 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/doc/doxyfile b/ww/managers/mimalloc/doc/doxyfile new file mode 100644 index 00000000..d03a70f5 --- /dev/null +++ b/ww/managers/mimalloc/doc/doxyfile @@ -0,0 +1,2659 @@ +# Doxyfile 1.9.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = mi-malloc + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = 1.8/2.1 + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = mimalloc-logo.svg + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = .. + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all generated output in the proper direction. +# Possible values are: None, LTR, RTL and Context. +# The default value is: None. + +OUTPUT_TEXT_DIRECTION = None + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = YES + +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# By default Python docstrings are displayed as preformatted text and doxygen's +# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the +# doxygen's special commands can be used and the contents of the docstring +# documentation blocks is shown as doxygen documentation. +# The default value is: YES. + +PYTHON_DOCSTRING = YES + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 2 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines (in the resulting output). You can put ^^ in the value part of an +# alias to insert a newline as if a physical newline was in the original file. +# When you need a literal { or } or , in the value part of an alias you have to +# escape them by means of a backslash (\), this can lead to conflicts with the +# commands \{ and \} for these it is advised to use the version @{ and @} or use +# a double escape (\\{ and \\}) + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL, +# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. When specifying no_extension you should add +# * to the FILE_PATTERNS. +# +# Note see also the list of default file extension mappings. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See https://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 5. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 0 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = YES + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = YES + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which effectively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# declarations. If set to NO, these declarations will be included in the +# documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# With the correct setting of option CASE_SENSE_NAMES doxygen will better be +# able to match the capabilities of the underlying filesystem. In case the +# filesystem is case sensitive (i.e. it supports files in the same directory +# whose names only differ in casing), the option must be set to YES to properly +# deal with such files in case they appear in the input. For filesystems that +# are not case sensitive the option should be be set to NO to properly deal with +# output files written for symbols that only differ in casing, such as for two +# classes, one named CLASS and the other named Class, and to also support +# references to files without having to specify the exact matching casing. On +# Windows (including Cygwin) and MacOS, users should typically set this option +# to NO, whereas on Linux or other Unix flavors it should typically be set to +# YES. +# The default value is: system dependent. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 0 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = NO + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = NO + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. If +# EXTRACT_ALL is set to YES then this flag will automatically be disabled. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# Possible values are: NO, YES and FAIL_ON_WARNINGS. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = mimalloc-doc.h + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: +# https://www.gnu.org/software/libiconv/) for the list of possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# Note the list of default checked file patterns might differ from the list of +# default file extension mappings. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), +# *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl, +# *.ucf, *.qsf and *.ice. + +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.py \ + *.pyw \ + *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f \ + *.for \ + *.tcl \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# entity all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see https://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the +# clang parser (see: +# http://clang.llvm.org/) for more accurate parsing at the cost of reduced +# performance. This can be particularly helpful with template rich C++ code for +# which doxygen's built-in parser lacks the necessary type information. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse_libclang=ON option for CMake. +# The default value is: NO. + +CLANG_ASSISTED_PARSING = NO + +# If clang assisted parsing is enabled and the CLANG_ADD_INC_PATHS tag is set to +# YES then doxygen will add the directory of each input to the include path. +# The default value is: YES. + +CLANG_ADD_INC_PATHS = YES + +# If clang assisted parsing is enabled you can provide the compiler with command +# line options that you would normally use when invoking the compiler. Note that +# the include paths will already be set by doxygen for the files and directories +# specified with INPUT and INCLUDE_PATH. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +CLANG_OPTIONS = + +# If clang assisted parsing is enabled you can provide the clang parser with the +# path to the directory containing a file called compile_commands.json. This +# file is the compilation database (see: +# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the +# options used when the source files were built. This is equivalent to +# specifying the -p option to a clang tool, such as clang-check. These options +# will then be passed to the parser. Any options specified with CLANG_OPTIONS +# will be added as well. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse_libclang=ON option for CMake. + +CLANG_DATABASE_PATH = + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = docs + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = mimalloc-doxygen.css + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 189 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 12 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 240 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via JavaScript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have JavaScript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = NO + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: +# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To +# create a documentation set, doxygen will generate a Makefile in the HTML +# output directory. Running make will produce the docset in that directory and +# running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: +# https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the main .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location (absolute path +# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to +# run qhelpgenerator on the generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = YES + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 180 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = png + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANSPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. + +FORMULA_MACROFILE = + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# https://www.mathjax.org) which uses client side JavaScript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from https://www.mathjax.org before deployment. +# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /

x9bx{#wOF;}`2-SsF8m`>oKd;RtZjLCwq|vD!E$~=*fz9}wmiBxMJ_y41+%jr zv$Pt4C;H4~3B3)N8A!~oLuZMi7Lg~?rw$*~D#AvsBV`-2a( zG5nlbGlPvMqRvTUcRs04rHvcEK+`s04MkNw*){T5*Hy)Oy)(^U4^(&92lT{O0VdYs zv^wv=O{X2a{pBW`vc>K!lWV$ZQbICg@=Z(Ra$ekZ7R?n@PlR1$o{Uh`05M`haRKf7cMRtl_%8{H?c6Ei{BF?$p z+|Tqz18HhZY6S=hlaqOM-%vqR6q#N}BHnunS5ge@)yD3WgA~-tGD7eXGpp!)Vlj8r zMPj*J(Y0&V0haRxF7<@%nzNVgGdu1vuAu1yX}uu;sV+Er_D!6>@-ndxJbdw0>e+^R zUb1}lIV!+iTwsn?Y%Vs;Vugv8*=j}fP*;-uMv3UrinmD!J`sGPZ+b$26oljq7(+-x zSKVW?`h5D;e?YkS1nKPGkWPPr7|yVga2CWD_~g}BxK&GVesq^USftw!yAFwgK17V& z6-FFN`tA6ly#+5R50gWO~cUC3JVm3y1XKQd0fTi}QKchfUWuR0vJ z6Sl(|lyQwGGZF2>f|+Xa!&h%~S!|(pAKu< z27z=r>Tpz2Cf3jXSYGCV8|Hl)gLuqT>ltjm!f}PiGL8Q`@K9L-A3SFGdw>R9h9n37 zkfZF^2mj!4_Yru97g)G_s9MI%ryR+z!Zeo zM}lNBT~|Vk7^nGU=joWt+`_WB_iO0S zU&No?#{{Jy5)2d$T+Z#ZnpTm-=RI`i5{W5kB7RD2H*00|pro1|Q@V-*p}RVh4z}CX zGa8VA1Dg0DYv*#iZQ#OfwBbW=-|*S)KKl;nsX$7Hc8ykU*jX?bIsX5RO{#TuAbmA=P=th8579do4a-96&`;_(0| z?#65~bk`+^PCpxuc?}rtdSNw;VGg6dVpiXV^n# zM#SnrR%Gts>~rtAK))BC&%I~Lof#P!8L`%H{nl@(cNCc>lg>ig(+6QiC3vhq%Q=)_ zw{q$;C)Z=F$L7+Jxw18+>1HHAQ%78>8%1XvbepCoP-7s3K+{B0&;>Qdpmu^Wq&T$* zI^}j8qSQGshKSMX!{bd>*ZNrFvC`lRhwrab8=*7N_4JB0LX|MiGHw#( z`hJRDk1$x#dPUObsgBe?=a2QHpK-O;|Bx$N=YR_ z>q!~IxM}d-QS>aKju`I--yh|OX__ubo`VLEl6^VcsSaJkg-W$#$}UVe8bO{{r5 zdim9rHIllG$(S!O$74zBw7nO)7_Gi^(J4!6F7~kG${km4hl;@RHXlov!kN%-Zm|Bs@1%0;ZNtWxMpYA>vH=f<@X-fHK9-AghGi6w1ehlWvL?q%eE6*Bp+Iu~l zW?P6SlMA|-UH7`R&pY=cMMNAXO*I>5V@#41(Fma-)(K@2W^X4c^xg>rb+K$ZB1}pO z)fq3isOK}$;cbC4hPnzgsiCw5CUv3~&8TWZVp0nsC46CNlIP^fQ`nXLtayW@A-$C~LOMXK9mFxXF;;3~ zt?sWN`TIzkY!kK)L#(mJkxbLA56PJP7%apI)z*pbskCKnl#62_3UORxivfPHjxG8S zMohLgX~zExl3qJ2km;iO<+f?4Q+ML&YrVJxw{e8ipKs<>>-yWutEOz=Djl?yvdQYh z8A6b(5IX-N%fh+yPc40VrlpzPt_=V?Gc7FG(zg|_mg&ZqkvMhBTjN`2#ZE8N11LVsoP_dmYBva7aKC{+(cSfGv(^KR%VZ%*)&p8IhJR~Xh-`b@-9kAyG|h)V=BA=c0@ zEvDyien{Gm`14lTchTic#tkQq+|SmeVO(3%q$W)^*uOr&CQnt5*<_O<3QP+8{uNRX z7!1G`7?VH(k`v665$YJU+YaVt>AC1jiAk)Mea<=OT-NrjkrIqX+Z;P_f@)Ifyp!cGXqzvg zisB|GuqRdr11>mlE=5@|91n>>*#g$?Pq&5);NI;)6ZBP0PlSsht$bA8 z+gsQ)HA*^PU=OlbvmQ)p4WilS&?;K*5|5g~>{-w>L2s~5J!xo0oB1NZ>ANw#|I_}V zwekcgF-|=N$Or|Fh*oed>VQ}cQz)(7VU3UoNF;AWR%zeu<{fA9kenmr*D%(hE9GMH zJChs@?X8nEL+JSdjYPJ$H`%vufVZA{oY)LQSnE?J==CaG5XJ%eg~c{CF$hgE zh$#p(R4FnEkx?zwjk4$rhRLwuY(m&3rZG0EW}~rfxG$Fq zPY^>*9O-97Na$+T6V9_xB2zo8o?9>6UL<2UZwoT8z1`^%Cd(bgu3D5*L{L(-}1NIaQzo(nmCtF zwBs~PJqZzb^rOF#XFv0qU|~ESQIrJ+sGESXmQyEA@xm9rh+A&C`3xQG{1rqRuDId~ zUhsk!FenEYX9$fh3dRZB!!17Yv5)dsZ~Plh9NSpRj_wqjb3iCbnkv?`$0R5U$J2iN zhxw{&uHiSI|6-0EJ4ymV-Bxit{bTMRFhGpq{BzIc<-h*|-uRYldB=O+w<9_Fq6;r% zeRUo0T@Eu(V-7ooMW@Z8@*fvobP+%H!#~2^ci*i#Va8&dstnwI#~u9Do37@x5=#KkbjI}UBW;02^B>uy9K8bJnrf=a7Ui4z_ zy7$oBeYwK%@Bh#5qG@XW^4h-v+A6!VHJKJ#?t4es2YUTJ-}xO+;ya%71g^O9N_u5U zRoC2m=sw>3mTUR&M?TK>_V!ZqZnw0C4&~~O17?&kW`kqX1jX6l)sAmEuXAN(z{Y4Z z&qtHXb)-FSO0&&5i*=r&98fp43c?Ctb54x4n8snEdZl7?Gx5&0W@#7ql>{Nhykjg~ zpa1|M07*naRQM-~LaWEd5HYqjVhmzqhkV#HeN64`1OXc%1+XN7ZE#J@-gTmAB7#Bc z2{9(TjMU?IkQ%5X1VF*9Hu|hT^iDPSQ^(blFeYSJ|rnrwI^i6r9~r9_HQtgiqgq&kWaiB&^t zHjri;kp?8N2&z)h%siWy)iR4jWsyyXhI^z2&Pn{V7!TJ zZz4?%g(Jni(igtOq}Q*Ek2GzAUdaoe|2(dI;1#^(+PCtl&whroEP2S4SMYt`{oOqL zs;l{xXZ<>N-gVEO`UYwN-Fx4?{PTZ*2ZBnj|HM$iaRLPZkKfb*NYP#R-MF|xkC z#(5R-^j@mCveyPP4^Pc>gVm~-7Kq8U~SOnXMg6W`RSkeN&fW%@8k8ae?2Ep zo@C#FgFNc%AITrQ=tcbTpZ+H0G361B(7Y)tKnIfpjq)dn?w zT1K@m`Z%u%)pThMXP?w+P*lQML)q^!Sl>r7x$Hp_CZkFhK_8G9FwF!>0c^^#Qay{H zkf^l|mlsX-V6E0zG7!aL+jw2A(5;baHkgsl*;m~=I@>Qx)Wq)RHft)O8;;3eQ={j~ zqF96_tKRHV+g{TEUW8ONtPIvLQKfxOY*eKEC53P37bC1SxW*8JCxsrSfN>KUHi_-+ zicuA`o>7yi>#PBZ)-P(3QT;mu&18u2k<=JMJ;5|v*c5O^wEocy6fMlNNKG#1Zdyb) zvysGFLZk6x7UH}{^`uA%n!3&#SVOgWglZ!(xZnT}did9J?s*q*^Z$4+_kZz@JFhX&);|}?MvV+y!JJ3;KBp%Vr?|uKfkw)-^ z!(bS^;#EB7SAU5YJnz47&pn6u)Mq}cLo%y^$#gU7;%1=b3Uc?$(jg%bL+o~jMbX|C zR-26-sZro4jpxMCV|@KLd@VoylTYPkFMl~it>I!3ya8`D)+Z8hZNm1#9nYS--K?B* z{N&S~!qb2Lr}*)w{1hMf&_^}!p2cKe`^Go(xVQWr`wtu-z4GBkCq*1OTzXA*E7 zjHIIy4G2OPEz1Bz1zK;&Sgb@$0*}!+k@u{Y4(mOU0MalTH#Bu*y&SMwT1uBR9#to- z2+l;d>xNoX=W$>i>%BhxwLY%z7*2*9-`rw6*1S83V8t`349)%?!yv@XAuDAM34w78 z;0v~!n$67#8yiEmMl}?k5F$;Tv_fmLj2dX7!xaUwiu4OltR2ZjsE0tK@#<(WCLtzi z-7}{Mfk{f5O5_-+F{-mymeipp3bBeTP2|+6Eecm?Kz}@% z8mF_1xS9Dk_ji>Fdf+9O^XSJunwP!e75v+0Kf|{?@!NUKBOl3a_Z(4}%kF*bXB99u zM?+rv%0K4E|KpGIYtQ&Oe&ct3j}sePglfX6QyMq0-NHCyLBSkLZl>2cP9ifNPHSRm z%t)*z)V}VGf6cf5yC?93Cp>{qede>f_IH^=gO1NsD#u#8r}?)y=1GL>Z~OwUf8(3@ z_1}0lAN%-6`M~=>1er}N?{h=1f>n=S@VC%?ge}zH6U|lhk9X1tV&l6up8KV_7k( z!`3fLihU(zufPs0Wxt2@g_f!DP_CAkeh(=NnpkuE&=GFC<32Vvh8PRmnRrkKN8Kwm zo-;}KazKcxbg;2KW>g1aau`&%ubBubs&PTV?XFmJ6X=SC!%*`^D2=L+dPxt$d5(~&ePrb^r>2FdBCNYa>4oM@~{8;LFy*bG=W}Oa>>OP(q9>1 zt;_69;mEP$)OC&XzKij6GF!Q??KNO#VeJ-)Gj?xJO(87RYowWCdkLrsDp2Y@r5~u0 za}yvLoK^5gQoz|hy~43_&bj#WSJ^&sg3<5#$ZbBT#Y2#QeM7ZeDX6lT2|9XQp4v1g2@Q5^*u&61ZW&jFbj%tf60)^m`?1 zEBhGuf`Rv}lqD;Jl77FWTw9~Je+|>KF!1zBL#(wn5mvx?T+yTMm(($_zg)u^!{Nh+ zFs9&WlN8o#8sbEt?}^6K7>Hu1CpF_L(9}@{ovmdus;R1ow-C$#R?fk>nzUU*Q|Y_s z)o@4v!4svyxk7V>(3~V)`*MkQr|+>tjWd0Hm4f3OcwZnPb~$f0p*5R~!HO#Mmj@2; zZNK<)T>N#9;P@@~aO>y)jlNsu&X0eV>)-J%jvPIXTUp1ft;6=-=-qqVjOG`hWeW!E zzA5cU>{|$^4Y!B<(2xEI-}Bwy$Jc)C*Kyh9mr;1n+S(ew`ix)V#v5S|x9qmnR^ker-3%Zl6WUR$LEYYo?2brtXWmw#cny-i&; z6lIU|&p!u%k6-sme(gEWp$ZMom~H`D922_n&?<~6rZoT=Os{ zlL^O5g%jf8Kdx#n5H!hUW#)92P( zU-T-vVu|^`loDmx2P|9L+sgd3j_-W(llb*#J(DZ1d?402R#sOyboV{{$WwoU+wZt% zE?%}whWBuaG>5tDPgo>t~Fd1*-tW$T?6Y~gSMedFo-f`Kd8KUiUX z&}X&ivE~P?t@l}5UuFH^8tdnuLs41?BV0;Emq-OTFL*y78jJ0fqyW*DSYJ?&$26Cf z^!viTwLaU!38oq`v6k^>g%}9I5=&22S2!_L^@OI5G*zOCiMm!@JtKx-3;cx-X6>Sf zaOvuPZu;;$sZMPXMewGCvZpB(enss^sep)xRhDO-JJuN$M$U{mn^ciI#;AocO`h9E z^UqpuO5X**L~xG%mt2lnS>?`;e4O!c!u_}2!Mz{<3`MBf+S+98;!D^XZ9<%3?4F^W zRnn{4Ofw00i^uFCp>OebA_BE=`)&8~l9#;_(sVIdTkUhr!yd}ZU-nX-_Ozei+0TB? z86mW}1kbD@&|N19YpbjH!V_9LH?vTV2IZF0yQQVmDvUE6j(Ohi{vNm8emlv6agJYq z*0b2RzRoZG(l2qxq5Eb^W78$%0n;pdnTL^*5peJWKkz;5TO07GM|>UEJmjHZU{Y1Y z(D2JY{|kKh!=J!8Pw7wd_}V~pSH3`Hl6t)!WJ>IZKl)+b_O`e3%fIx?+<4=S_`I2v zWw``C-1S1V`C8rcjR}ciJ{E$~)Gxd2a_&2HFLl#QA;6hUW9b~qV`tipp}lOrW~Vo| ztsNAcP>yq-^e_H!6(`QNU&hM#}>f8HUN)VAH&bTC7tI0I(bCGGCw z-L=|}^A&d&&KVP!ND`y)RmwavYmuI#H(1A{9?@=+CYAa$Rp8j&Uu5;*K~~ldW@#yp zFA5|Yir#>t^w`qtlnf2cq+**yvvEvsOLSpOLRT(4(AHiiUBLVl9iPe_Vw3U=@kr|XRX(xKj=}c6`1o^2z^Hw zkEq5cahVCXGAJm$#}+-DGg$AbM>Sp|CP0*i;1g^6*Ez4MnS}dUp`ac&^j*Q&fHPX3 zNXfB**hI!nqKc6)PBcxUYmqB(2QOvyvWF0)PmG4iaFaxXT{{=Q|2)JNq_I-HO|aPL zP=V7Kd!s`ll%;4fN_9se$|U(`YX~u~S;o*`Sw)QJ;KdiSd2$0Uf$dYr`M0Ll-&*&~}wTGKIc$6a@E!!5V+V?X}Gyz|}fAw{9zAMlyaUe9%(`ZUHk z`bDASih)W;Vn` zs%ylllCyII%xX#9G@6M&++-_MOop3S>v7Jre$Kh{*Z1RmkD}1RZPjSZWVFp>GDMOV zy|YP$Lfd*@T2Co4!@a<{se|Z@!8k2-U@TbYI>f2oo??S54W<^PX>h55 z*pO0WurlDldFQcl{}I@(iS>j@9g&{LUn7mG``RNOCQLI`wqdR z;=Yq7Nt099!ch(mQuNko!URKuHIZln$;M3O+GP$WFjhBeElDpbpJX)1+o{_-OAy1# zzVrCT@BAK)9=o6G-uo}ec$<4a_c^Zo>W6X1O*d0l6AoT}8MlA_X1qXcN(L+IY?lLQ z>g9b{XnWd>bg@aNU7x-bg=W|DJm%5g$e;YtD|p`rKgf$;`Vwxr=~lLfBfNKcTLWxt>-3ihq8aqcBomKQS5CY;KLFbmN?(G=(E|eiaLsnlqV}ct5kb zPP;K67bSv#Oj)+mdzpTSE`v0yMkEB9hFUf-){|1i^m~*m16|BSkPxZ2Hi^mNY>9|r zZQp)|)h6S^TZ~SgB!rr#o?uhL#5xOXCVi7~OgE;8vvh*0*5*7Wt7B%37TYs`^=67< z8gM@6xmgd^V4TOe0^>>$EyGe*YBWu%icOdB-r!AvlL=k|WzVv?wZWvKJui)(|HFDLm!D0~{2`#`c|ze1nOJW>kX_>f{L4Lo(D= zO>7d994q|-xBqe;@HLO;w(G9rjhQkgGxU32ZV*YfR8{5HP%@sH=)x4c!=gO|h{W_#Cuf5}A`^0>!7mN&fN z4Qy{|(68U?8_Anmwz{|D9%4y;ai~53)DBM(6;a7j< zFXh;YlY8jx&L&YIoPK{=RB%Tvsa=}Euy1W2S6}lm?!5huYz*Ytk>Jyx{yeYz!&mWw z=Rc1S!xUaU?KyKFroG|1>psadp7Bhc{p-)-agX~(uDSYZ9&qU;Y@9g3l@GcSXACcY zx)@lc4fECepV2;qfP8M)^wek>z_%aRBKYD zF_`FaMTsrEo+AblRn8p}MjJ!!y6NT|1Bxo#85+FkJEgZGm?(&wj<~S~lQeHBB3g1h zdxWL4oy?VUnVe+pbgYa46Nxb(VyKA3)KE!BNFd~mT#z~;)-f^rSi9ok+;!WXY~A-oOdLP%wicF}RF-7b%I(eKB6Q%FrR|~c_H&yzmZir(A6k@X2 zsv*T1^S?Ujby-`?lb9`+(6-cC4CuTLP2i%7FXkQZd>7+M&8I&339h>8!CZFPrGyZ< z{m#32+uQ$%|M8&@apc%B^o7aif-_?0xo4P+Lx18EpX?S=E7KUjVgOx5Qe)ohrb|xB zWyNp3^^3gZB`@XZ(PMi4Y{`Le%PqI?+rRbOY;BElc6@$=TT5GMiL?^-7$vM(V7NWz zPyXc3`OGIjJD1REFo?C3)^qng_whTw`#hfh^rz#j@5aNxt|cGZ`#W;~QJ(Xgzr{U= z4nY_DGdKq~-*g+#dCvdCvwroNYN_uS#k>y95Co=PqXp*~kH);2sUW@!PFw7(uUh^ypFk?cYAjrI%j<*L`xwvpZ88^n2WW*IitH z!wp$Kue~{o2>jjOy`8P?Z4MtkLW*Guw(qIwSSArZ{Na!Ci@*2`9{rd{@hi{z70Q0e z{l`!6(NBDWk6-r*?z#Iuny87K?fHwGL?}_`g^4o^?snf<7^vAeTl*|#-?!Ze4D<6{ zJ7KNH3YgP7OmwBmXRAF*t2eKml8}f&aNc7`BrBNAQ#DDCjn$ae8&*|-ptlth5*s?R zM%rP|t71-0^;QGw#t7CsthIP+(e+l99K7`%WQsAlj|$7iD2k3mQ7;dpMdx}YuI#Bn z86pJDz1gS}$rh~k3RWA#sd5W@j%C$Btj266&sx6(3B;zR8E#^Yp>!TGhV4)hW1wyl zqfumREW>e)h{eSMk%njt!5BgcIYmol!XyC)E`Wm;v zXPZ7mnOQB2X<-rJ_|X%*_r3350(m5wCh-3Ee{gz}`5l%S_*SDs`lB&f-Lg7634?W>stWw|oBoDFhYl%~zVO|#S{O~HDgk=3JRa-lGI^nPQe+6 zm84}?@`CAoK@%HNutWmM7dUGO6G2=gnZR0qh2Hu)#u&z%r%1lQuC7vg%jTvaCJ~#l z<}_N*C^o8-=be_8XdoF-cxMDzYa6fel6eWl_q9ja6TFRZkWIUlh3;o@_#f-WIBCoD3gdt|v z^UHdakUk+ys0o{3@^rR>8%x6B6t*vdM2vyx=>pbJ=GcV9GIhxZpL-*P3(U77j zD1FH|oNmW%J(|{d-x;@9Wc9V4E=<{YVhpi`Te47Nw7PhAXEl~!b?(HdW2RJeyI={M zd<~�+VVCPTlGmsC>z-OtJwgh3r^o;RAOJ~3 zK~$4S)Ls1?lML22kkn%gG4EO?rwv%mfsq_5!kR86!&tTW+mrwYG$Ck7?HCCmU|oSR zSxzXlwD$sEfN_?lnvh~d9ISY+-kLFN4!7v_OAH03iS*ZdOeQrI2^SN_tAHz^L7Jd- zhkULzR$4|?pl%|Yql)d#A>$@8MupT6zcGg>_5GA0j9VVpB<7K!P^+TPk! z>qZ3cGVZHd6x}&zuM5lbik4od?PpTczK=F%JxNTPvvBf5~MY%nApctQ*q6FN_)?RiSATEMh2tVmmaU9`xa$w}*?WE}%o z@3GzyVnj$qvEwG7HW5)+%JwxC|c?d!vEMByCHLLaM@ z07#@dVv{9#OA;*>SdSYfbPFFQ}1E*)OOZMuL{X7XY$PD_$&>E z&i<~=A29TKeYPhflGO}f$b+@eX1%OM7-C28R@(9a`R@YNq{eyO<#)eN(=C`{20=8t*}?8=KL%7^tKC@gfh`2nK9jkzU^?0IoJG_U zcar_j441f~z=Q=VrfL7%eJqU0Zh8wDF&l%Q5uo4YG|Li}=PsYy{f7^8&DGb;ZIZJJ zAMMY}Hi@d5u(7p)FUvCkd^7#pMMmdz&49k>Ru^1vT60xT6^(j)#DtxJD%&xG>E`2d zK)-u_r|WB)s&&tPuicrB=-jrI)WDg7uBSP_q&cTL37HI_3tEg5-MmFmDX4%-OB;y# zPD?V>GRcr+hW%RW*<=A>B95#it+L+w=b4yT%KCE7dEHC}Xkjlr))u<(nwa4#x(!M) zOm4kJqD30P+JdSc(?ml|5#xoXi445Og+wzNu^lU;KIj#2b-1f#eexHnp1}mEXfww{u-uS0m@vT ztO66U-ePSZlPs~?22%lbCOfsp!}2w>HF%X#myLl0DZjUxZUTbE6pnJOL?#Nesyu0= zEIndNR@V=(y?GpiqZ*C~b)!ax&C*n7f)!@kS+MDhKIkN!rJH1aGurjzyTy5u?z$VB z8yr7=V$Tai7dT(UzGr`DrjMfiT~$?BV^jol;hfgCp1|})KAV`q&NhYmbOTUggsR%K zui^BLPgkNrm|u`*J&M-zo5mt$@WS2BHAf?yo;)VO;4SxG64)y&)$ zGp|&8%);01vl*srdpmo#rz_z;t4DbHCfN&HxUSCFCGWMx+C+;+wy{McHY-0Th0IJ3 zSMEbPJvvb@R~%x-AT>~sWa|BNRiVV`@w71@!K9R>fu`?yl0*n?iFIu)lqX3@&H@e> zg;Y0c;7NoSRf5YJu!Y8^j3Hu)VkqfRq==6?3dRZ{)Hv%H19cPeMGx-{DFh-adTgDA z5J|>R163nT8lkEhCc_Fb1@&aCNxkUeZ`aPFch#fWJ~igS zwS_*UG{~s{NufCt!56sxIS@`koFGD{P9Zb%q%+#O5l=W*AVFbpYucPk-Sn+f@mK-s z5C|!gtxALSj%rll2NAncV(Nmj=NN8pQq^N@*a-=rsqLM>V3vG8bEUJXQrUhsA@uqs zH{5VT4kpWKKYP00&%$=I-kHo;%du(j-e+2~h0K_1Z=*f?>`T71woJR@=ROO}Y23l= zGMA@>D0Z7tYnQ1ixw6HjWB0Y7Lu*C1W^9}}vC4OdP9#hw2hUv5fSbW}TIi=cVrJp# z))bk>y4>t0Y-TLy#f2$DT^3E0HP&P)63GSq=BG;BtF-}iv937}H!&QK389(mOM73l zb6~fHIEoImWL4|MW308^zrD7lka6zbva@tVC$&qb@M(K5i`Q;{mRSbi-Uf5={!*IO z>+E`LEY@Vj?ksb#yKR|NoWV?A0abJM7-R9izc3P=G-B`GDMfH#KFsH9Fz5JF;9*HleXWrayikOwcNiM~?Zd=)RGjL55 z!Fh+XZn}iWILCzAlSOIr1I)o+WLjQ4pJncT+j=4q2#WZE)eLJRLK}HBOdm-1=`?S@ zw-YSX;B1-k;q2|0b}YcnoTK$lEK~TW^V`~cn7OXWp08HZn2D3j9s?S;0wftq=de*b zW2e;}nCTWvPc%Df3ozqiGIL4_t$^ut0^4bBpo67m$uo1XaS|j(eO9|x^gX+_(<3om zJYZRy_kgCKP1ysQJ*I14;ybZOAnG>i*<-MXSCtGS1~Hh3>im=6$C9>CkQ45Wp|n0Bt!L zbZS)9n4U9CHcl~~G}xjbilZzC*rJb2v}}dxN)n#Aodva9K?3dk*_gP~Hf40^dAuv= z^-A>?_PBd4J>M>WYZs$gqJ_0CS05EzUMUwT<7ZR_V2W}{XW`B+BH60XFa{Z^b^=B; z?^&Us)}v|5cgqsbUwOp{)jSpeIdfyL2D(6OfgW?90T|CMy2Ta$5h zS(-q-2pM9aC<-tThy)8_A*2}tKJD~E$P3E!WYgHe`~sEl%NhoQ6*VSI&Tnd6vxR<6 zrI%*UqxYpkqRYLO*#dow?M&X%^zYI=)upFO(8(n^tD1ZNe>JP+){PBloo;JKkB2f9U%q4Sx21|M)bbGIfr*WoGtOLPw5?f3sNR55ibcV2J2go zB3t@d{ayWK9EG#5UM!Gj*l_T9IVHgCte8nLCq zk=AFUq*1R~oWheyNU;F|S^fp*`^0L3jWrS`kTlktvOS^imp3Q9^(``a&2jeoEH}+#84@nny4F`DL}l= z52MMhDG9DC=&kQl5~XJseu_A>#q`u8YFAt43`$9oEqjv-@hL{2;XjKBj$Y4G79~w+ z=nqz~w!~m@HdBnerLam8GC{B=>V_biCznDZ(Wumr(Ve(yA}MJ#*w92x1a5>>)jCg7 zjjZm+Jmhigd&oCYpWJ40{~gF=3vqp>nH-6j!ZgV0JjOW08glBYvfqeF>tJ+*Ll)l6|Rim(yXlb>(wK>?J{W=59&3l?tV_|J&1!G&7A)Rrqw=fN&$_8b> zM{JrdkhUz5`pYA(%v`%e9OvQZjuFOmcFlskiu^9dpmxICJFd5j5w=v#T>k<|7!HTj z)qG-QS}-IqV9OQ!>Up}+)FV<;;f+=KC0o6+l$uOr zuAZ)eIwgFzX{JfN;GN4Q$J>9?jtUy~(sTxu2~r}8B_*LO48CyGb%paKWZJ#NKvgGf zuZKg~aMn6Z0iYfOA_SwcUQEght|Lj*3aL?3K_n*Xsv^`Anr4h~j;enV7d-a+i059; zksH6j@cNI^9KQoAQ4y4w)KiSr%*4@o8ztmgV&Gg)E7rOdBqk&bF)3)g2RfiYqSi?2 zVy{5L;arc1V7$`YQy_~R>+@DmRO51Pu*NwTT}n0DV*BWQIA;klb)ZEQfmq`UOR5sa zMpCS)>jqPRcLPkYG-0@OUM{vhZAJ=+v{9BN*5+Q$F_)JDSY@^KdwqOSEI;;3#L5@* zFJ{>!&grC%Q7E$!erCqe;@{_eMg6#&`>V;ZojKA#i{FYubf?u-+oekeh*-^$aA)fG ze6!QeYCrNlwAy(h`hw9a_LAW{nKBF)nPDxFsVJO!xhXR)_nM>G(sgI(rfqz!z2Dib z;oS4i&n!8#(^ii-Q)wrY@yWHdRjQ^sol^uX-=y~33>}ppfytzr%gdWNjJED#N-b3S zrMKM}rn__zjKbA*U3bZe3CPyc{7dc}wp=CIQWr}Xks#DI;H{3YO#!NHq7|UfG!2RQ zoNdf9LrK}Iu}O1sU^=hb^(7?;P0FzwGrh;8M1YyMJd+r*N<$0B8r8BhIhJIt!Qcr^ zgCwDFj=HHxF%qI+ODzjwY*KH)8;UINqw`BhF%eQkj3r4_9ln%n0yTjUg;>|b&`{Sk zu?D&LDlYhrr?Gz3Be?1B|CZr(?ysaebgc z;s{beSDWl%w_!Gyy_+^%kT)9d3PPCZd~4y*?DQC;+!EsqMZZt*P_C{JCF%x7!g>t_ zn%Om@uCZo9^aZ9Yan?W$gLBRyj3zWEzk-|5&elHe)3;>_h;z-76Z@7g%BO?e_S$A+ z0D3N)sn837+%M}oW{%36vSNV#DW~MMW^lWH@})067S-A-{d5!GQEqy-w%c(=H>dHW zVr6Zm{nb3An9uILVk{vAg^V%`7BE;z6w&BJj}6d(1dK_z zmKc$siJ~m9_MAeumb7Ehaaa;G-NH;Z?vi>22p&}>G30kCk1L|_9g`t63>Yy)5khRR zK(F6pT-79FNM?o+7h_geQL3}YBRQ~-(Ri~2tAva+)w?sI5TVq8rmhvDOqO`@!&raZ z_p@@@)f{@?KeF-Z4-nZvDdln@m*ztBR#(MUuiqIXOPZ$@NPp3P_ z*;uQcoXt!;$T>6rl+VyQ3w-$%W{#5`ziXTAC}l|@v3!Vay@Io93QxsUQ^W~ zJ10`m#6YIR4FOfvo_1TuB+dV-y?)yyt3w!LX+pqA>ORxvX)Qu&XX72s9@R~moX=EG zS}b2H3%B>HN*cL;c4nT=Fze>VWkK;|@qKHVgo~*Oi|Q1emqCVLp({TW`5a1u?OMRAb$cLPTODrU|hu@eg=3<%7S8DmiX`+uySN`47Wn3vrHO^-`j7 zBpH#at*(AhYzM2Y+mxrsR4E0ex*NI@RBaQ4{{9QN^x@yY{>!f9?k{|X?Ndjln74yE zRY>-xbV|+`P&ou(5>#6wNCoA3pJH`CgMA0tJaL$!QH*Xw?&-$6{P$mCbKX&uNpV<;d^*KABlnXT0MZF*kr7&=c9+9K}HrN!ea5x-xc;T(R@Sotd*LK)m zcqJ_?MQA}$E_T<5t^^@lg1t+}_L zam{%ed?oi=7&YS#$_@@m$Z%;>b)mf!Q+xCru?iCbyA#@|52w2$fYJu0bE}Zu@ z!vTo4?5!vSjwYW=hPep?Z$rd)Y3GlUOn&b@d9gyaUKwktg9!I6LC}t&IoJDYbzOb& z-+c`Ea=)!(!+k{fyjVX{j4?5WqAF#4;kPc9PAQF1L#!+DhC-SGhcQ^VN9^RyvXDUo~j zfhaV|vH-zeGe`%NRfWx0Xg|RE5;f5HVl9cSW`sr%f7+Fj5&}3Yb6=Gum92zXVYr8V z`RgQa{aq4mc=#v(iuIijsMZg`I*d6K>%v!P>jGOuc$ZaJ?M{QywH4W3hDoBaQg|I$ zH{XxOeEYxuA9(q#Z}Gi<^M^e6`H!*db>rbsnrgkm;hKCi)i@vO)uX{UPqms+tmo*b z8EH1AESDrcltS;U(W0HCO}F|F0;mRxcu*n!B42sh2+R7$w8OhdhPuWE!c3(d!e}osQtzbts>H)% z;M_%zTt^Ly=>*&FUyCA6S$UEK5*-IBCWAM=Hm$XabU!X=`Etd^V=k4Qt3q6LS}E3f zjt}lX?Hx*`pcSt4RMv+0r^Y%d1D;@UWsV}a>6}HXQdyjG6v=?$=YJc2^g8+N54it> zKVkXcJ+XW#i8Kb)d<9WHFQcaC?(_8I>ot$hg7E-Z~W?S@{ND+ zAM@AW`8WLZJAZ&(oYv$yJp%czYv{;?CbhrRwXzLOl z&{kZx$JcMNT+Ff7Gr4li>7DtOr@Ps97DkH36{TzM<|k|pl#!S7(?T%SUH1sBnlU`m zigdn*ez(S^^ZePH)5iX_VI;+V9nQsa0*>fV+I92OTI1|)!uz7uxGxb1D2k#g$O8P6 z7P_rt+sNn0*nq->e2a52sg|MGC(=Fg=0c|79X^|256&o z8%j^~6V(E#yWd6&V_U5AqB0jvsS>v_Pu_MUBaK_qhFFd7nV08jU4>_3?6icxm4t*i z<}4ditxGJqJaZdkB&{29iZWkdost+t5d1pqN^;*Gyh7O~Y1$(K#YQ6HI5yQ4!X1x7 z>uVp}SK2AO7WAEV8kIzW7m{Ej!V+rtk+=^XP9>yfNaBPMVx6z?I8>UTtRv5J(s3$h zS0$r#h|LSU^*Cj5SrK9^CFOD@UausuN|*$%6Py|14qt|&*T^3};PKmkN&fMBD8I&M zL&)~f%pqSwU`$ z-{Ijg;}>3GFdg&pd++e_*S>{4d&KFTTj!r^Pup1-xz_H`??l+%&Tp_G=`}SNqiGnk z{e|c4;)=Vdxd7!hF?T^VeeL3o(OR9`Yg?lWeYN99-Rnn)ZxhSfje2G?KIfg}U}VWB zN9J$j&Hz>bCRX1!n%zTozTPSTH0!`wC-s~{y$ywwm2lQ!sqjiuq9kz?g;Eyf z!716?%f*}|NvNulqR2%J9HgOElEG2psuFx6u6*msi-OcRmWw4S9gyHzE#_#Y84d?H zTTmt`TzdnTj9J^1{HK4x)A!$j^$MRQC^tl>6U<;w_WSY_Jd~m6r>=$R=Ch5l$C_?Z zfgu5{V<5MJG*y!L zD$hkaP9{@E<9*69r`oIe<*%IQe3Snc0G#j!Gm1J>jB>4q>JZ#DiDG-Fbu;DoC z2^&Cj5$~GcKdt6Hg5av`h9aRhDyGVsw0J?+N#av~hhbOlvbo!Ke-eNK6>Ip$?_2$b zKAaNE)_Wnx9S%oSWsb{Z7Qb(49*RUm>3u*vgz^Q^1s&dNPov2D_`Q=S+E}Z>5(jEr zJ06q{$(22~9uD(BWo5q7Ktd|Sy!;d(*a|`7x zdNf82u3)PKUKVLob1M1Jll7d< z7Lw;cv+1{~D=zH!Ir5Q)gCV2QNKi2yMK{K+b`DyfbBjFdIPo#Z3G^YJu&vK+HDDW~HCL zdpSRp+HDn)UJ%@N)`#a~Hvtfcg{8xbQU}O&$dB`YQ&+!l4D|gB^HuP@wmXO05-&G

2Dur4Shx&f5LyKb^^y}XwcQE_-WAfP?Gn}IJkZdf7vei5kzXQ}D z!`KN}i?J1MIFMM}>`_B-pbU_qvk?|yb3j|jM!0M$)M-wb&5cTg4a02!w3}uN-1?05 z;tXXH(oBFDLW&_*bXdGMsANDgnvkX$`D#g#2e%xxhg%doC-1yU5=@$*la$Jp9NxIb z?9NA!!7u#!Z}arQW9BDM&yQ(K+tD(T76~Bxa~HO^4YNBI84GQGHp7*6`F;09Ma|IQ#pzwY{095M++Oa@D+#aDTZ^bf-D77eNYgb@&z^j%JjXk&Bx6TE^oXumjjW-?M zop5aZ`-WdEUS0BWocDFUoyTL+jQ#yXR`V5Qp3@c(g%ySdat0DwS$ztXiap~A4_Njd8%s+Cyj&XW&Dq@VjGl9#_kes;#=AN?6+ zu|iLe8BTAKm0)o#3&8@Bk;MK-F#`_ZCrNol6VA7%3suAd-K9E zpLxF>JN{gJ28bnvH(&DedZzca;T>FUl7q8YuCN{VXkTmRH#y}FWFet)_ya(eg9};zmnV*~a41YC{NJ1OkZ4&nEn(IZlc(20`OA7|(pZ z#CG$d;-(thgR0X{y&h`;hRmhy=)S?2(y<&vq_`q{?~%M9WS??M)iF9C6vC5(g?wA1 zlM#BFQ6)Lc^*p$Un#X9%B_^9dI+P7^y}-E=l&f8IeO676&AH{XOwfk>kIPhv0;R&v zXe*CSJQfp_WG6(#PN7N}uti*9O&Ja?bC`tKms4ovsft1bg|!R@L!7T9eBdmY3|EN- zk*XB0hfLr26%M}oYnN$<=G@zxpzd-uWrN{7b*Z zul&xp`S6{evv~ASP_a93LRUT_$wOU9e({K&%wAcmKZ-5WZcNVPI)||(H_Qw5b(f(aTx#c~+NVm$CJAPkQk5|_*LKm$;53m&uo9NOD$E@XtCxR}LFU)c)lN@@~Ym4dC%+ObZj3|@ST@wLzK_($L4%jxOCV*;X7%6Fg?O1lQ2fCo~JzWjC4<7+5m*njCdcYgQ-X77JUbvmc2a=dk< zS_`iunhFo=97@ah$_=jl;xBM^@`$g#`6a&n?SIHW|F_@a-p_x^*@N4-`I24Rjxi%K zSK|V>jOEa5+8v7uaMpAG{(V-fRh_{1X+pgF#+l~@2L}g;dqu|8mn_%^>Fu^%1(Niy z0!7Pa-opmvM=N`NYu_#RncsL>+lTZa!Q^ju&uwc{T4@&ZnUEp2w)l(6D!7n-Jsgb? z0Td8ITU?;$oHD*8qI-OL4%xio)$=@OGMN(f?Fi*Qr;U5rSi7S*Jv-en*5Z6`Saj_n zs0Ih8O;aJ*wfs@FFbi{TeSpMk&FO3gCM0@l1wPi;3AbRvKYVMGj;*`rt} z273w0l@yCPzE}ehI^Scxs^%Y3aVRziBp8#`^W%IdT^^2?$W&l-P%=M`5F^6lyusx~ zh*cSEBA^Ik9VAj?sVFUGkYG}Sa~5SSc}UhyOp26KFv6UzH6Fuay&%noxZyPp-uhL{ z=z#m*|6_`~?=%V7CLu|NB;MneXB5kOP%h()Re;eo26SUzdiS(0Lu!xl@_ zV1&*xY;NmyqP!Oi^FgJGV{|D zUjO^Q!|)4VWcug{lK~`?jKOTh-S7N=6w4(;4OKD6tqaaRKIQ4t1(U-Gi+hhS%Qd%F zAM(HcPyZvQkDpMjBw6?}D&A+#+P%&7<$|6t=l3Q)I=q}4(Q5JY?@M>%d)id(d+^SCl#fq1{`{LqPH4f>x!+dRPKiubdMi;%p(=I zM#plwU@#bBjF44!87`V!uFXDT?WWaYS*r$8%cWUE-Ss3%l(KQvP-i?IsSau+xn?u2 zR7crGPhG87;-+hlMh8+&8E`!}@yWU{R;&dh)`RhE%&m`>P4yO>P}!o}+?w=aoSzkG2*k z^;LpPPPZ(mBovWzXkQTl8M4Gx5IVMYMVWw=NkLezAwawYkfOY&T2+!9o+UU}2^hg> zyo0JLQP$zTluOCd46RJq^M$j{S0!E@V$gA5np zYot86r$9L=L!rXL)(Mz2tt-SBqaau0od+W_?6Lcz|v0M+3Of}X|17zsB=C(gSrFFwvyaUousm7{u&a+(3H*&_F z)dZq0>YHpfZHuiv;!t_kd!9Ub!qux+fKW(n2kxr~VpB>{;GG}x>bHKAy=yl~vH@jPKs6MlXMT!ZuQ5hZp~0;KHIkPN zQyUT3I=VQ6Op5HfaZN;}r|r%Tb-_e;X}Io`midn5%&oJ+!jNdOnt-ybuuhRE7-&OL zd7RNwSJf6m2CEfSnPZ)2c=aY%zx3LRz^Qy#JoQ2UozV!I@97173ZAxKPC`FJ<$?m@91^sOuI9snFuRt3} zbb_)Ggd|eiDTu@)qk{v|VamzfTX+v;0W8`AL!*EEW0HZ!u9qakl%M~fa5~S~yLp4^ zXCHFz>nh?%CNHqtU3h|7Y>zZ4*eBY4LJRU%q9f?cTTZcEiHdS_#Cnh8LSl zSY5I$dhe*Jin6pMrklxg!C0ImPA&>z16E4!wpM(Amb(x-34RF?a(x@7VqN51xpGws zeN~&DJs)B#eOLC#bme(Lvn}^;$>;dqphgb}#j2_EblRBXu2Q6QsDsnmNo%Vt#I(+h zLdmtFq843rn~vCB-N#OnLBm+Om+70QIQO2$9c;a8@@kb~uy;)L@+*rGg-$M5%z>3nPp5x&lgD6xd)M zC~ag@_jLhxv=40OCKR?*&0KpW6^y;IVo5kfj>VaPI1B2$bz#npB0YKuv-bjbe)K); zyh3FIs%jywM-R%3Q0X3+FhP+G`n zgBL(6_~89pQ0;EK>`n(1W<~(PpGF)75~K>FwRY#&%+^49%bnK8FBdloYb{BV)bWpa z-5QeH*-*NS441WfHk*X%B}f{_)%+Tn5T4y^_^;9Ew^0!7(&F7J72-l(p^Vv3{I{YC zy(2RT*&xF{b;0Kz*ra=|BYU_f)UMueO|@NBu(?^r&#Tp%beNEhMy&E8#%1fBb6ZIb z72*vJUmJb#j@t3C3l)+e`BWIc?a8Z({c3_MOhw*7iSIZi@ZQi#sz3cFe|fASQ{QUtE4a9C7GkX1f#twpx30O1!`8qwqw<Pzyb}ZMl z5{GhXOhQIgE}>fCt2|g`R$$QCKBy^Wu_iH)C@6~xSBe|3qN*Q+3ra|MK^n`Qi2aZE z+L*OUgRf+XrCCZ+fx|F({S|)izxglu>VNQU-g|Vyd_H3|GK}}99A7!W7A22wJ>+;a z!n=}3ckXcKC+{&TE%(3wJ>LJx+nhdnD%40oc4f+M{CEE?)x6+G|Mt5KT!LH7s8(}| zVojCjcq}Ts4+f-M`rZ-BDtNSFmv%`v-KS?60c2UmXf&d#s;$TD%EA!aMFgqdd#b7; zO;d)$Va@yi!5z|yjr8SHeR2qJ8|MA|Jy@hXc-&Qeqq;v=YXca(lU%ISB%$xknozxC zGKTSF!YaQVTxZ*nbkPMB^Ls7}5P93W!DHS5B*cJA57 zzj3V2Zxj*iof{|nR|B>P?k20yCiGePdP%fhbMfB*R>eqT9pZ1oYpu`QqJhS$<_kkt z!t}QG;ql+jgRvrw6`-<&YBZ)?t@!wZ_wZH43vYalbULN*CHwnF*eh2k<_qQz9lT*r7h4&?iFR=Nt&Sx^lh=)Mw082J?0xHhe#T`9?&ZlGX=^zWdk%E5S zLseOfgTyFQDrBgYbzmJD5!NEC6p9qHe-pER#M!MM<7ab}%J87&Fo3s5RB#G)ZutjeIq!8wEP` zWNC`4DoT^_(y#p%H~;o;@bvJQ@BZuWayDBsP8Bzk2BBFrTkjpUoH# z_xL;i>3_k$|L6Y~XY-tFe8kN!zs8l9U&duAzw}@IC*1qV&w2k3{xxZF#$s)8#S+RK zT`n<|K#o@qWq01XihuiJIKK1WwiaA^Ayn-lyogf2{u9Z_cm=Jn-ceQO z>*j2$nFF#}8Dq%vLNvUdRx1ScwRHWD3u^|qUT}wJKA)o;OebR;jub7FFPvWo4WUuf zOw#ZJMos!MO)Rf1*4LKUU-yQTBcR`?CD4USKS-)JZn6uDX?k}!Ohl14_ZPj1Q6qT9 zQWLabBk~$>We|T#lZ{76Sv|{>`*&G9e8{VBeVvnAw^*D!;`l3HCp$V~a_u_K=4c$F zg9EbZVW7yVP{oS;>@meE#}+xZvO$Oo@0<@2eCNb^iI%BD;dQuo9W00-uw>p<1`KX|o-|3BJ@`3(HN%?}qX&3b;C(4W&?zPzq0zW17Z5?5 zpD?$cM6SHJ2g?&G?eXOTnI6e(33NJQ{P{6T(GXq5;Ph;g`3*?4^A zm}D^I@sHo*^yeQ6P0wP<*~w#O`xCTID69nxi}?!gJqO1VK6mYyKl)eyg1`Lkcd-v2 zW3gCeIC|?1e(^v1hunYvLvDTN-{Go)a)|mQX?B#R?@!8Qgq!9v??f8)Zatwlzm@Vu_C%bw?g)?XCGxlJNkJ$>MX_PC=v?;_;g=JDs5 zzI|lttu3*5&1XpzvE>~Kqco`zD5<=XQIBml+UIH3T2^_1(J3P6o>huli#YJGD%MCL z3f~Lqjjk|bq#Ech@-uEG;)+(1b&uxYU>}c1dsWvX+jf5Y^B^0vT(pEZ+p|Za;t9J- z{SYfk(**DBx!=5#9pAMM?~WG`?bmMmv3;0~%TlKwwGy~GR@0@IhK5kHC}P|ciiC{O zAZ2YFlgTk>x8CKQKm9|zuW)!~|LzZ&T)D{$Z+(-ue&cVkSgt7+OIGtU5(^5);9#HW z(J@uArZ_ueadJYwp5w}bloXYw;Ea?m4=k~QY!s8xJBxDiR`|H46oPULp95w>;{DFb z{MYVnFy5iegyYw~04C$%t#@$68D1rVki)fHvSDJaOs=&#PzeuflqU*IIs#W<^970Z zl&%0$l+#eIac+&O8ZeFTybq0N(zupAY)pd1qB4WB7GD;4K z0Cr|`jI%twUBbE`(SkxHH82?P(!m$`qkr;GDVGb%RgM{tIC$ereEuK&E{9)uliPp! zBObnc8=WcgVokAJLb;+?Ed^~Duucw*$_9+C-{9)kzDag`#Cm>8wOs7H%KpZMQ5)fA zjHzQrpLL6ybDrnSW@k*NQ)aW-Iq(DYBjmb*DUSIf>r7%yo6eJF<7@g80K*n{Pkn6o z%^_LTN(C2?sAG*lYhqxR7uEuGjxoGlz#1BabQU2U^})yQG4(C>-PiLPa)m8<^5hZI zy}e%H&}Z4krW_$3Di*$@UW*sqYrRoyi$`rSYZT&&rVsHKG~62e(CU8!IY!I58@12g zYpK?I)b2oazp1U#ZR$XTolBgn(^qQ_wAW}Ypc9e|Gnrx+o5Yxl$M<>nPyPia@m&AP zH?h~Qku8_(Ef(ZwOIGuzpmUs>kli?Bc>P7H#gh4>d*q85Wl^EMln;q;c^ONJP7L0b zQh+WBmxy`{uR=T1ptQoufTKnzI&Cn6jKi1SB)6Wk5C0lh72&NaaUQA|r%@>;NO&XV`Dfv-aqf|n*UZboC zLA0=zyX6A4EGW-TSf>Ml(+?7c<1y<;Pf$h0y0Q|h$_5J9q54 z4eS{)sf#wU#@d3m8%`EgTXhw3$Efg~Z!I@oNG7n>%0^fJe!lH2kizNKT5MHrSZ&X2 zN&?MFE6S>*#+J zFN|V)=R5zB`#<_VSKjzM$1lA~HrQi$e1-k1hpZM$iqjeSY(}MW{J=22mN49(v!0() zuGUnQ!`f1muF8ThL@=oE4k=gXl0hAc3FB--MwHg(FbTOW^%yk3SX5{vilkF zjZfO5plV`85d{a%X^e^1hYo_{A`8*gf5l;3_9mU69@3FW>~0s{kGiZuIsxGIUDJbVo7<8B%nH5kqo93CIeeY zS&?i=WfjUhl(L*Xy2tF%J?@UDq~imU(U@%SfZ^dGqwxXLo7b=wiq(qKhxc);Nv2oP z!;*n3gw=tPDlaJ4D|`V}<-vgVCB7BgvYZ!4AW5=vBSWsf`86Dl*`s?b9^J)DMG&>7WfOd&@wO6xPCCXZ2hO1}_-c)g)qp)J6s*q@)6v_%Kg>>YIrFyhdsH&v$g|JDy6?BCgnqMFy|U5>$C1fTJSwMk2bp3&Dg&7E=?&# zNNz_V7V9p8!+RgxbRrPfdESh*!`S`YmPh9_r8KkooWfd+(Loq_wwaaf>qcZ-w8l}Y zW>@I0mu&)K=P?HtU`xjDc51NVq64VAnKq$CFMO9}gHkXMP)(`{Z-kRPyFY^_(R5R= z$L5hu&Kaqpqm)BcYn&c(aN~2V=8uTu9A9aDkRw0<03ZNKL_t&vSK%xqnI=oepbW-V_%g>A zYizj?Gtwxj@k2<(h(lK%l7zwJh~e~*`NySdy znKP}^;N7rTU!c^GG#P@g1a{<0sv?)*XX1&q36+wlYc?G-K0IJPTR@>8+rwo0c)P&n zGpg085D}|7EG5rE$vc53(5i(*@NnDSI;_&@3Q*!1@CIBqVE@_;?)~h?6#0sx1m$yz zyaf_!?Kf@$2YUUSRLV>-e*Wm^*j4|Mm|V&XyFX zCp)dT*e)lGT5Gw_o%JjTaJN}++1r~k8VoKqjLWt*IKWmVr>CbTv2ts7 zhU4_~v_|mZ%L~_KLxk!AZIQuQyBR}iBbI0cQu0a5y1Ov&O9&%L+*OUH-EnyW;u`e` z=_L>;e(37KJ2H6~VA6<68m*%^b;5UtfYHv}~I(zc8~ z4cFb~FvT+R-Ui|U5lBjzjo+j7*}7pI3Y{dBdAVT$?e3DhxLpcRqBY8EtS#&L{)5D#iBM~`^JzZf{sGkuLq}f zQ`V*aebZY0G!=t5y7uKv*g`yx)N6{xie$Ra@wL~$F3F$V%`)sd#pP?!)@>DP4{5wHVUrhx zT_OP$Dpiq957i~sHGcXB8 zWm4+CETvMhR+5bDwA7d~3S&J_?%d_$4?e6*SqMK}C6!v0T&4}}2eFc)U^F_#6=h@# z1Fst1##l$m`I<19yyWE8Z7S!v`qnonpPumW)_Zs>0#8}cCU|ocB#FYL1N>mX;NXbq zwUIA70w$fwczyLdn6{K$_tcIq|*bYM@M9PB9w}JjjBok9oUr0W_Vpm`3;+6ox>WK z>|bLzPB?vdmwbMPq6=UNNc#H5ZTVt}jK1lX|RE zXrri3Pe_U!tvxPL%udeadhsG5W+u;dz-a%NY_bowpqL&o`N~%qy?TSmV$RRM|L2tN z-eP_G1H3j2ZoIzw2!Qa;R1k^mieK5(UvlMO8Rw z;az$|_wky|4ieq1TB~!`Y zAsLO?d+AMHO)~C%?@uU}GeB6it*fYV$9OQHunK3ZuyvK#%A(dwOq!9TBhmhsPgvU= zWip&ECC5mC^A?ZBq(gM#13%p{I!#E66?Q%6$=&-|CRnzy8)(NpMb( zv;=D%R0j;2Cr_Vr;#nI;JS~UM)@yH$qXDB_q`hwnapAE;XISf-jpnmbmAu8g;9WZ$+&c7m1yQ@Hc-iJ%X)8-@;nBX{hH|wz{O;{EZrr~a8Pwmr z5+IO+Y%pZKoN;ph4rVZ6l8&(ol7l@)*KSaLxWrYKY?_dcCX|I01_2-I`XDRFmf$K; z@=b>InzQv&lCvjLio#cD4B8|T)OQ}T5$XOR(`zqr{K}gsTkzoz{ynR+hu|w2W6$I4 zm)|8=iA@I5auE=^*|1$KYNey7P~arLPTO2SQSrn+nlbm^n%TF0iCv{&et~DcFl-fm2 z)&G+Z-U&F_mK8-=oXZL9bxXBA_Z50;EVS(|mbl55WmyNOw~@YHB=EXp?HfTAiA4npJmj3=JmPZzp5;`ghl>bEwa3i zMiCwhnHy=OUVUt);c}3%Wc+U9Y3!?GTKPtHr1?wwkM%^q%J6Ajb9&vvJ*q& zt(sr$v3WilSKKZwteDm}O1R0Alwy^0`rsbPaLn}D4VJ5dWH`o*M%eY7aRRtgqot@gXpf`B$3$GOXl$CM`7m|u zJhb@yH_V_Y327`drD!ZQFA1+oIZR>%s1f@MN%7trlvh$t!${Q&Z!J}6rQ}AqCmYUD zSqiR%EW>3ZMu$ffIb8qUzsu;A7qPc*vwZsp-2K7#8O#=#@q`;+{2H^t2%jVz7=yQ# z`O_21b;JMpthSqFF3)G-T;ZE)LY(pbpS?+S?#+7be{(%Z+s{?wJq^3LBO8=iP=o4dF8qP`xhrXtf6RgDe|jy#`-!djA&1Y{}ku zNU>OAbx71K3M%EmhQPm0NV5UXC3u@R6XB_A8;f|YTe#UC{0IFPesygP~vL!d($@aX(8wJQD?ZR2CWG;nAo1j!igzjr&P$X#Yuzn4( zIG~4`3ZiE*M+Nen!gB%UL|AL|M%f(?WLbhv(!il74@K!jn902cGMxA56wsDpu_RUr zhzeyg-eWwOVtq+s4EowN_OHG|?mRF4&aZOii?1_#`)8>8AF%xJZOY{t2M5=fzVr%b zYRKg1h-x|K^zjLc#f)T}QB|L|Fj5dJFUGsy78-~#hd!`D_dAE9tV$t1v`)0t9zoQ# z!gecYCmMGeSLS~-tF%%qd7;G1vv-$c+tB3T?Eb$7GDQQa8$Ic zczpZw_7>;4Ynkm_*24=`OKEM0=zKcTXxo*uBxN|t$cw6G-&O%NwGH6XZd!|GXj`uA zK30uSiV76H!(m;+Xbi!9qN2hP-N(>L?%` zW!NMk8H^~-9<%l%RGN_tr{Y?(B}KWGev$+*B*PJSPg$l^RnBlQVmRG{(o&Qq+0ZZ; zPZ>@Qa7LrOL**5-hmSa$onoCw`&1MSFSu#3E_H~>v|Z*GnbNvB#s{Z(R4lx>{xvSZ ztS!!4sZQ%G+WS^j;Mk(%gWBxH1^1Y+C&)W^!87yJ?|0j45iKmM1R1PIPztk{^I{-asMN(z3>`Lk0~ctFj`}jM-MZKqF}IEU{g;z98j*7+wZH@E)g>d-b=@Z zyRL7;ipaROR^N@y#O&^}tOVNKu~7Op=_oX|x#-#&kM~gna^Z6vuZ$dJAzllP$nMZ+)0_Khdu$hPPSFwrv5v~J?9H|5)r_4W z6#Kv=@wxZq|7=Yb-ZC-HJF+Z6DaFHw52-@XR-px*bV0w~Hj!a1Wikc&dcm|@7CG4n zi`JOy_?xfemth!N*T?1Aojs#D+n&Wkf z!f|r<7PeTUh6B`KkAqiUq$<}ePM?sv6f+t#n4}c*In&7=$`+6py&(I2OQj*D-!)sTWua;m^4!-SOUJmk-{I~Le~AC! zeeB%_+<4(llAEu<(J?+T3|zt4{kv3UPI)rJovx@9xN>);Xm?@cT(9}K%>wFfN_9-4 z72x(BY*jJH23t!=?~*&y|NVJaRoh>`Ty9G1%epn%_t+mxYMbHb&h8xRH-tgE&u!!C zlFT;u77;lvj3FujZK1SXU`7E_mgR;}>;~MsmaDi5H?7MylSMqPeU0uKspm6%*C&)> zeD3214?_G$(}ulvkFDnualb1fvFo!Hj$PY1*D^r~j$j2XRebB9Gi}%Hv)5fjSUPuc z{?o1~&+gcFZ5N?W+ny2yQt{PWY_TF6?USSlcD<%ttVz-#+9VttzskY!4Q5YINrnT8 z^(m<_l(Q$ypFSj03Nsp$?2X0!S6NI|adzi6)uNJkhj+AwAuhT0pxQpuK)V&+2`MD$ zf)}lAH7Ohxt;*VZiJlgvJsxp|X{SNO6oYEJUZcZWh=M4F5!xrX@bAt-R!U(^3f`fW zs^g?m@=22<1H2g^uy}_z8l{El+YLsT(U9!Q0i&a1m{&Z#{Q+gZ0_#v&%C*-%N1;+y zt0mJbH%Ja%Wc>ASF+4nCbTDA;7R>+bhb(^d9)$w)G!(;T&!1@%* zlB6ujyl3VshKGAB*6X_d)GqCBzMhc+6XQB;TbxJ(;1MLUj+@@?4ZsqBS~$ zQVCiMKUg17V<)!qEu^dvH)BCaIS-kKROebI~?5Ysq8!xR5X)?s<44))eC3ldd31&FPCmCim zg3$=Qzeic*6b~P>e)1R>Km|q_a-iE}70inn!@-cgiqG5H&cet8bV?$( z+qT5ezKx7}Qbju%4ToE=+eL3ZPg5;b$r!_AGGVn^^^UC#l|JWw>Dj1(-p#f6w1%7diU%Z;@TS$?D^KFfP#d z?vkhls+e)-FaC;ZzQSY!_Fn%2tNlX`4v#2{B@Tlg9FQFkaElcvP32)cny_Bya3T5g zW_zxr*KIM3zGE-?9fY`am^IvVp9D&~?fNlx;+$hV9tT*okod*TsLOEO*6O=L=~*vx zzw5H?#9-rd8&>_S{<~J22n{)}9RO`xL(OIt^B6Xq&f(hfdzy+c>bmB(x#&Q0cZ@L? z#@wv6q-nbGdsq0^ziEBWMHgm@qvKzS5&)mhhjaX}eHCHoo2X<-OxoGac~Su+ zu=#D<^k|0)+DFCb>zaSon1|+JHmM^QR99BQauBcGH%3wJ+gFdZnuL5f9SqPqp_s3r zv;;f`$|%y|h$I`LRLa_}F-d~co^&+C491KPk4cY?N%m9n`}g_idq2RQoz{1o9UroH z^(K{bqz4Bae)DVWf9-2L{mGBHGBw=1F=74D9e(;x{~3?&ypNu((R#@KYhPe+{W>Iu z!WAqQGfZi*t2NG6oIQO^s$3}cmol6V(8)vCG4J&f!l-}dKT!bH#~M1_+901^>uQi$ zmXRb0^Z9(^xVT|;H;Jww>bA?d4{gwJLP_G*wJ&QWw2^>)t|sTYFPkWcUG|5~<5h6| zod=0@uib7{RgxtMNs?SBNV^+%Hy(K5T!wB(!;zF7ap!r(`cQH0@tRPIIv}byfH>_I zEyMWCyF#|_dVQ`krDG!F#cQO`%IeOIJZo{qM*5nrfNZ<%c4=*8ahG-6*Kc3?MeIpX z(IrO5%~pfafw&Zsbii;l1*Nh1oGM?Vk+R?bP7#PdYjvydE+7aO`6PgabLkyU{D&H` zXcQK_ZgKyGSdkOO%10Xejz!eWb$24_|4FWq_e8+gMlj9O2)d2CXb#e&5h@f8Or2^x zgGP~zC+Kv5OAKjhNJ@n=3ZEL%@c^GCFdm@BBUTUZ@%XQQ0B5K5JCR-{#h@rudb zA*b)Zk6+EHRu!Ys9!fdPBxQcKW|)moNd|oCXu2?ymoQqky%ddJ?y=reeukA^8;w}5 znx-jLRZ$d0J;pvXT^CT$7f0$^G#9sc9f{LKOCiQr`eH*{dkULXRaIMrMx8OYwU0JO zn|Mw|o6oY0JYQc3Gwlj!8<1)ySU&Bsw=K_IEz0)!G3pk!O-6$O^ZA^zJg<>olzbdP z9NxOE)6!@0$8qoSjO+E|!4uGh_StC(+rB>!@Q4=dd5ft1H;(mL%1s~8sC(V+Z`=Cm z`z$Qd9&AC9rBr$dc@8eeD6wAhz$CyzpO};r+ z4GbhO{B>2$h^kn@x+(_6xgP&oNro7OcIb{bg(IiXnn4iV03M^JR0K>$#?|C zivPd8Gy9PoITQQuiy)atWgUHYtF>c|W@ja>7AwOZ*lWvx;e}x={ODKzg?_dD=;OtZ zv9_SKu(EYnnw4gHrf0gl?#v{UWJLHOB9crdnU&SuAbclZ1nV3Dv4ZWf>|R;ZhB0Mlv~IJex7GHThRxBFp)D4$}t@7~g+_sVmZx zM~r{;4^Th&A75d~&~^K6py80UR+ME$Q`SOS_je_& z;A&sjgoawE;`mUT=nI}>$bP+tPk0f93a+`0+k3#jd03h@p;g& ztH$dvsHvKLkX{L~JwLy%EtQ-oug+4e%V{`|Vp?r@f}@lW^IOC)5T5}-VV&~<)jd=0 zE`zsaiI>KF4{W?E65d2<-oI@XqL&yY(TrzP>T-oy7KjzDi;{)0jAn;e3zhY1%GK2Z zL@^#8phjcrL^C-!rq&5{k@NYdzvghhz^r_@Zg%7g8ln>(e*aHNzyGJC&z^Jk=?fk| zxyOluuYdA0u3mmghVb?8K4tQ~AMorieoS)wfXUGetqkV!oWz*@6m=ZYM89~fgO>y?k%9#A)Sx%W7hC^fY~20BwB zXq8fy1`&-?+h=AM5_K3XRq$+!0gU~7yT7+vkJ?VaDlp=F^4Nj)+n$Gg#G^lCz-~B0 zM3?ctd%NqfuTw^!&9i-dmz`;YK>)yeQzgzj2IzE(Srr&sS+1Q!oVT-VX}{W%R^I{a z@JJd@0F{dl3HadG)-XLmQZ1?5pw{^@8Qim8h)|iihAa&$C%mMNQfNJ5G?`$< zV)C3iU*Ie_5p*KxGyzFK3_6g|M5UzT2~KHWuSjcbnlVaJu0Q|3RIgs}^1AYlA=!xI z557ya$jOe5$UgcQ{rDMYzxX9nosrv$uYUClvelBSmoJ%r`a2GO@O}R1-~3x9kM5IF zQ=eZ@EpryH&&j_!h1Hc$&y|fKNw9g18>J+N6GR}DG1G%%7Rgt8FXi6ENHK)o-U1ri z-dbg)WcwiY!P_A4Hitf&%{V_l-%z`C+Z^hnkNbREh-$F6l?0Vi>%iT&f$sbzq%E=5 zc~{%uy|b&n+}bnGIgG0}92DBW@47a~!FN(PziA^ClwnHg4WGjd49cf{juo=u~4@HR&j0 zwM?+KW-|oE5)=Byb!)%3ZDz4yY{Z*o!uGay&ocz=B8m=gTWP7Et+i`YPvhL9TA3DZ zn6%&v)wOM}af}2fJUhr1hH)lHqJ8FZTT!i+gmPO*nxe-eP@1x?Ns<(6OGJFNR;LB0 z73p-0&L(7u=K9OubNSofL2W&0)ENg4o?@@UgE5&5rwL+(oI*I!|m*POk64aSigi>@ka98zme9-c6-%OI9! z%xmZO^A@%0ZkK^U4X*6%z=1zSj;bsvbqbZ6eVi@ilG_83F3Db-02ob4L_t*k40(vP_jf}&3OV%rwf^FY-SPD~UbF*xV>xp# zwFnBIycXyCi_neFihXo#Kke&%grd3R5pvhnyU*d`wyx@NZTp-e+Jti#5%ofgx7KUX z7@bfV!}#cs#k>fqkpzsxX^jdJxG3>lcaPNDZ@a}ODUF^f(N3{7l{1=OFu}3}M(K#r(+@Fq$-&e6Oh5i}ra$@-7yt98y!wy- z4s&)%-8aVK4KMz~zvspEg1Wc>Yq6S;h*gn}$4IKsq!{hs)Utf>IYw!}7b2)sqb2d~ z-h0A+S-Y*lYgpeiI53sLWkHsf%_@>>8|6)+dbinud$iVTPr7f)HnlIb5#2ud@M3zU zi2R1U=-zQu0U<_PktaZ2YTC5M#w~HqU<~B>iY!f9zi-A%Md-5)_1Tl!f7{T{)?+t0TNMAxAq{*A=fTIW@O0?L63&N*G?rcGqp$hCcsitfoB_3yrS7)_nW?y7eC~C3p!y5K^tlR|V6PWAfEClq<+G4Bml31X2X& zymwrmBg|I+(IMJa-dj(6zHPTf34+qF$F3I(&&B+6zQ5RhcoVeGz^+B%oWa@PHMq6| zHS=CK;Mx}gs35&28R!h91#K(pqHMJVq+=$JpOK756c-j#*4}&WZ4M zWQWJ7dP()>6{j!0^g+hJ@rNH_$QX@}$iDM2$q#=>efkxrKmBj4&M!9v3k@V%Q5j2J zuCQNyPW1V8g!tat;H=LRC=%>s3ZmnogqjyNco90KsEYS&Gx8akZ&uXp8?yi*NKI_; zZ1Wt6wNZ<^t;)GcH8O;_+Bx#Z<1wq%iagH;#}|>!H-qdqq0e==4a4f0;W7{JIv%AI zWm&e2<8eQ>xypu^ILcMYY&Jv1m#w2>iJ!au4eeWOzpEmPHkLkj=m@>UU7ei;TN4f! z#s4Ts=#XxtyQD@+!|3jw2qPp$H%fPjgdn54yAf%SAtECtAcGN7QX3__e1`Wv_dDGC zJm;L>iTM%A5|KM?3P@0d9n;YQ)>h> zvIC;Z9CQc%k;dsUeSpEh2Hc-F43+(TqhG6J*crG0TvdRYU9!_Wc6KCu`V0}5Tob;r zh_737Oc`lvZ*t76@3$JO?~bs}^W176>$T!RzQ4P#?_T|NJCdK>@kvcf zV+oW1q}0v63b)I*x#>9ery?(q&`BGOTyTU0nFq%)GVnX>y z+%8TmyXVNTw4tq|<6m76C(L3vIGe!|To#`BKE$qf@CL|A>WNWCVDU4_WzTU7>JByN zeS83luJvCLJ-hmac;%C100dZT)1jGFk$hH6#_D8ybVuDLQ`;#E{?$3Uoz4isuJ__) zcfg3R2pZMem`A6|LViB!P@z{?R4JO>4)*nCQcei=(Y-@G)xF0=1oj%|x}5&yNxM54 zRA6FkxxUugnvVV{ee~-7KL}-fae-aS?P*M>!H;pV_RS_sGtY!VOb4xnn|?`1g0@*b z^aHe8OYDtJ;p?Tx2VqOjne4^fi2?~FI$g2pM>o;&w;sVKH@^4oTOs5I{QwmgH8-6? ziQu)cZI9$)ey!s#MSVAI2L$I2UA-*F^_Ny=<>C}tKTQHuVGYhqhRy{z`xHoa2Xdal ze(+!dS1ZCGU`h~MLt;fzewC}E#600uxQEqjym%m!MM&&VbNCX=<(PNR%AbW#R#T)YPyJ5Q72rk$YQ>UaO!5h97nG;eO$#H zF6YxMm}pc;%wtq&L|6j=u(0$Bm){TWivHE`n-Hc)`@zf4QuwyLYxl3ma(t_1kiqkQ zXOm{La5!c_zG2)IJ~YhUI1Ez8Ezvvy#t=O=BB760HXa*1*DW@A`4MT}ruR}nCJscb zNZM_RK9t*cXT2K$=W`SCUT|poW|oXQ{OMuMAcY6RLj>B}K8socCoTrR-}Ss`P$2*B z>D=B_d+x6;kGpOvpT%%aV0$<>Sx>?Ar%yV_Y;VQ-&=EdY=;{DfN&Cf5@ zCtZhP2X;>-I(Qn$;rx`LXo@38{+`FOQd|BuWJ?uq8P`|oGyUoO)9uEAFM>Pl4X10Y z^t`C;R;&BjVSyh%a5|aQR0~U6-wxkYrQnuaJLiPBGBxjNX|KeF{i^QW++E2UPhlWGvCns3kiBTp2k6+Qx>tww7>P9-fnSm4kIb5p|FLF~M7MTIDS&VVgw2iFP$bpMehe$AoXUR(#K@s@;zVRI(Ac?2!lY@V zKCb*6uafCbx9*McmZxy>cnbV>s3PBvC|@*JRIgu1M9H9)@`P7Iw<@6=ec@#tX)&WE z%(_6icCRB?u;sbnZVVes=9_4_+;<3lE9*jsKP6yp66=z@f?pt~xPb@OF?60V84}NZ z8k$-UhK9%RJ?jJ7Q)T6Na#Q;lOTWe(Jf2D_IM`6GX<>g=Sdx}HF&H>>dcl`j@@0v$ zHoAs?@Som2o{Fe6P3z^%Z1oh;D>}tYdrjqjYx^k<)>OKx`ep5!CdGF8J6)Hd9kC@C zDKekN#3Z#sKgg9Kl$+F%B~L4vXVX+>=trHuB$aKv=hb_nQClKG}3&*zR4R zC-sV@#P3!C+UxLqxTT~-c1BpR4YoN5;X4(NJz*@yZvoLG9kYJ% zhZ4NOpegB_N4O>A77(N{v;PnEdZ-PYP9S&g(eG; zf~Zcx?)L{#67n}zQ&*2BDKt%J-1IkTN0+70i;Z4>F+=Z-CRP%-h}ZOIsg3z>v8v1i zC*0jL#|Gm3uNs5e#-#&#TR2aa`-^##F0`nM^&YUzZO17S zKq?5{9vdtGI+gL?s#@nj+7{)4$%$tOLl|!*<`dw89L8u_YeTl|YVy|FO|}(fwRD?w zCX|@j1EfdOu2EhS9JscxOvuieB1cXFBHX^bW{q2&2hpz#HLd>JwbHt8?ALu!LV^GG z07^}Pv&VyO#fcBRNS)`d*Vn{%0ky;J)8A#O;jy+CMg+>OaS8ZiID1V3bmA6$XjYCj z8~@RKpJE+LPe&Du1xb-a>2D^a4f`z3H4o*azT3?zQANJ2|*@8FG(H zt2ddW)w0Pp^?>UrCI5C2iH5+MEv;D*Pi}H;8f~Hh`?+tmpD$)AZDKLQ#9o^35d>49 zNeE9ErKm@wRrzB}!3K>M!UWw#Yr2pZaJl$`mLY@yi>U)4$8GQYJ)@Zt!F;u zG)5uORJJwlLf?Qpzn=FZ+71l`zS@f*>s9(5vaQ^4T?*;GS@B9108f?E!??RF)88YD zC!}e{WN41KimdpjI4uS(1K?tRg+%5P-L z3+#&a$qqwNFT`cOND8ec`)P8}anV^(+KTgdUH&E9>&M?RdunZv&M@R!|zWqM}Pu)g<-cO)QU34XWnY z{2&^MuJF;fw(|+}CL@kDOK!Mm?2L{#Mgrdf`;rvfwW?6lqHoSUk=`itwODn(otI*% zEJr>?CMcKqnwbVM+K3S9&SOdVE)J*LNaaRsblez?XBM&RVm<>7uz2U^xSjM3iJO2s zPi=sLjDwQPGhD0(q3N+s^;BnXwI))~c!^~AP*}nN(hqWP*AMYPm3-2`1Efp1@)-=A zN8-{NZwy+!e58xZH(;7dl|+Va+-PED*WC0~s@B4_i~XVna#CJhsccw<9QXVg13LWd z00sBvfUc2{A6kr-il363I>we((q^wFd(Exkk2ITx8XWrW+-vVKYv)IVfrMj0k+Gr! z;fCP}6O@PrN%B^YxOg}t^P{ThgTJ^E(kgslzl$gCl)SX>*?)F7^6E&BNv?CiiwlOI zR6fpk+#SvI^-q?ji83rBRv({`1dH3fK4tI=8oHPk(w8ph7nHn_mmd|FrDbALlxCBm z8Gc}{o`_Gk=5Py?sr9Wiw7(9Pc0Aps-TVNOgYTUK7M9(#P2Tw+#cV$fn(}dCUqFx3 zBHqn#dR-IAz}PmQm6Io;(QtA4NGmQ)f!cz7t#M!fEk0UHdRx{7n#*8PaA;8Hf`^~Y z^OZdRAwdY?`XcB1r1j2jaKtQ0XJ?~FSk4ytg6Iu-QWa{+7QUnw!B{$$S2mawMjB{E zSshQQ7Wail*=F&$VEHw24A15(Ik}}YHTKkLgz!%FE%{&0d6lzInCh5->UJO&G9%B1 zT5Z^TA%5twqR#ORe4ds^`c*6DIiSAD3ueah=(aWk^ z<$um(>gNTB{au0eTznO-vmkv3+Mt2(bWhSh4*4yGz$7-=nvS0RE-z>gmd~b4lk&5- z^7hX5ukZvhdXRc?mU()&5it(MGTHc0`58wz@xUp5YDJMJX^DL_Df>W(;!nW)NMJJX z<>M29Y~H{^-Q)G~>a5@a6~meHA#&dO<_ogsPf;Nt8UqSy>ZMw(12kT_N%H*TDRkn& z@66ElH{SQc3=HY;RC60U{lf5+>TF3Xwu+JESd#9!+-3ZA2h1)q-SLfwYV1bOw~dwG ze_9r|_yRe>x0k&Ip{LKg?^n~LZqPz`gEbk$A?zKj@*z-#uhU9#Sc) zjz9m?(u}ldhO;}iwYc79b~FUtq{iOd2#%ym5Z)K}j97co=!uky)S1D~lax98xXF25 zI18vY|MB|Oi)khw7xv9@9Tkvd>fJ1F+|{3>sqa1e^TBonBNd@13gc`E?WFseHi zU(~3_POj)oPDsNHIYj`1b?*#q0Xrk0?Z(n;Wz(aH-5{`T=?He`>CpR8(WgWyritux zk`N7Tf(BnQ+4I{F!^J`p)rwK_*?|W@f{XK+)Eb`^_8|7f*M|T2m$^rK2>GlO&&!^6 ze10&7FWc*68Lr8RZMCBn_Ws^UE#fYaiOH1h`bKjOFBAD1Z{jn8?V{p^=!?bnoRdev zg79VnP8|hiMhJU#Q(vFL6i0=jzFQmA+)*F#@Vn*81!)F*y|Y{}=P;(>9LH8)keWDJ zxWrg&HSuSt>UMvdbb8M6irGi1!Zw{zmsUP3wXn=MXR)hRqHEtrjIx0@)aCT$^&1h# z>$&H7;kS^N7+ShNi?9zcTNc&5JPLs!^__d(Q1fJI>5U;Sc_$3Dywml$@WldUo^y*> zxh>kdKU}KG>1IggV|rC0o=G$F9H9%4Lqmi~py6Uge@V{AT^jw|LFGFzG&OS)Ny4TS z@p(tplVs!xsjA5=kb03O&r>wDqepv*Uv%x0q?`bti%Jj47;)RTngr8QK3|!Tw)LFv zDxO6isy&potp>5yWHHqmR^+JP2nR=-W~-_Pkb%`l=e#zMYNs2**YvE|wE`jPm+eIA z^};l5)vCVNRnDRScr~Ep807R=_=ri#I(FmYEv(wPGrCR0+&|nTLgmjiT}5yzhb)JlxJ=MRktQJoDby$!s4{3Y)?Y>C zTi^o|jLrsD>u8C;*>%m*Z64Yn>_?01S!YDveg>OP+i!ZQ?0jPZgNl_5`gPQP%=J+H zt!=D#PH*C})zo~CJ5*xKc;Hg8unByxhWpWR_pN50onpUI?PC=SYZS-=E|ejHzvLmQ#S|bz^8g!DC87Z-#+DUdY zO4iB!6@ja%Y+?9$0DLk^ZEjRr5ZG?7BL9JK{`p<>=J73UvfczIt$+B~)%w3DqJv)z zBB|=;3wg-&3FE0M^zL&*?$2RoyLSIL%+8)}!|t-rVE2H3hsfw3R3{M zYpT7w1KL$ZWGRL`e(gkV*_Qy_7!`>A$Nm2fk6K0Gxvzq>yZPtFWJ;)~Ie2w)x LD_PRELOAD=/usr/bin/libmimalloc.so myprogram +``` + +Notable aspects of the design include: + +- __small and consistent__: the library is about 8k LOC using simple and + consistent data structures. This makes it very suitable + to integrate and adapt in other projects. For runtime systems it + provides hooks for a monotonic _heartbeat_ and deferred freeing (for + bounded worst-case times with reference counting). +- __free list sharding__: instead of one big free list (per size class) we have + many smaller lists per "mimalloc page" which reduces fragmentation and + increases locality -- + things that are allocated close in time get allocated close in memory. + (A mimalloc page contains blocks of one size class and is usually 64KiB on a 64-bit system). +- __free list multi-sharding__: the big idea! Not only do we shard the free list + per mimalloc page, but for each page we have multiple free lists. In particular, there + is one list for thread-local `free` operations, and another one for concurrent `free` + operations. Free-ing from another thread can now be a single CAS without needing + sophisticated coordination between threads. Since there will be + thousands of separate free lists, contention is naturally distributed over the heap, + and the chance of contending on a single location will be low -- this is quite + similar to randomized algorithms like skip lists where adding + a random oracle removes the need for a more complex algorithm. +- __eager page reset__: when a "page" becomes empty (with increased chance + due to free list sharding) the memory is marked to the OS as unused ("reset" or "purged") + reducing (real) memory pressure and fragmentation, especially in long running + programs. +- __secure__: _mimalloc_ can be build in secure mode, adding guard pages, + randomized allocation, encrypted free lists, etc. to protect against various + heap vulnerabilities. The performance penalty is only around 5% on average + over our benchmarks. +- __first-class heaps__: efficiently create and use multiple heaps to allocate across different regions. + A heap can be destroyed at once instead of deallocating each object separately. +- __bounded__: it does not suffer from _blowup_ \[1\], has bounded worst-case allocation + times (_wcat_), bounded space overhead (~0.2% meta-data, with low internal fragmentation), + and has no internal points of contention using only atomic operations. +- __fast__: In our benchmarks (see [below](#performance)), + _mimalloc_ outperforms all other leading allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc), + and usually uses less memory (up to 25% more in the worst case). A nice property + is that it does consistently well over a wide range of benchmarks. + +You can read more on the design of _mimalloc_ in the +[technical report](https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action) +which also has detailed benchmark results. + + +Further information: + +- \ref build +- \ref using +- \ref environment +- \ref overrides +- \ref bench +- \ref malloc +- \ref extended +- \ref aligned +- \ref heap +- \ref typed +- \ref analysis +- \ref options +- \ref posix +- \ref cpp + +*/ + + +/// \defgroup malloc Basic Allocation +/// The basic allocation interface. +/// \{ + + +/// Free previously allocated memory. +/// The pointer `p` must have been allocated before (or be \a NULL). +/// @param p pointer to free, or \a NULL. +void mi_free(void* p); + +/// Allocate \a size bytes. +/// @param size number of bytes to allocate. +/// @returns pointer to the allocated memory or \a NULL if out of memory. +/// Returns a unique pointer if called with \a size 0. +void* mi_malloc(size_t size); + +/// Allocate zero-initialized `size` bytes. +/// @param size The size in bytes. +/// @returns Pointer to newly allocated zero initialized memory, +/// or \a NULL if out of memory. +void* mi_zalloc(size_t size); + +/// Allocate zero-initialized \a count elements of \a size bytes. +/// @param count number of elements. +/// @param size size of each element. +/// @returns pointer to the allocated memory +/// of \a size*\a count bytes, or \a NULL if either out of memory +/// or when `count*size` overflows. +/// +/// Returns a unique pointer if called with either \a size or \a count of 0. +/// @see mi_zalloc() +void* mi_calloc(size_t count, size_t size); + +/// Re-allocate memory to \a newsize bytes. +/// @param p pointer to previously allocated memory (or \a NULL). +/// @param newsize the new required size in bytes. +/// @returns pointer to the re-allocated memory +/// of \a newsize bytes, or \a NULL if out of memory. +/// If \a NULL is returned, the pointer \a p is not freed. +/// Otherwise the original pointer is either freed or returned +/// as the reallocated result (in case it fits in-place with the +/// new size). If the pointer \a p is \a NULL, it behaves as +/// \a mi_malloc(\a newsize). If \a newsize is larger than the +/// original \a size allocated for \a p, the bytes after \a size +/// are uninitialized. +void* mi_realloc(void* p, size_t newsize); + +/// Re-allocate memory to \a count elements of \a size bytes, with extra memory initialized to zero. +/// @param p Pointer to a previously allocated block (or \a NULL). +/// @param count The number of elements. +/// @param size The size of each element. +/// @returns A pointer to a re-allocated block of \a count * \a size bytes, or \a NULL +/// if out of memory or if \a count * \a size overflows. +/// +/// If there is no overflow, it behaves exactly like `mi_rezalloc(p,count*size)`. +/// @see mi_reallocn() +/// @see [recallocarray()](http://man.openbsd.org/reallocarray) (on BSD). +void* mi_recalloc(void* p, size_t count, size_t size); + +/// Try to re-allocate memory to \a newsize bytes _in place_. +/// @param p pointer to previously allocated memory (or \a NULL). +/// @param newsize the new required size in bytes. +/// @returns pointer to the re-allocated memory +/// of \a newsize bytes (always equal to \a p), +/// or \a NULL if either out of memory or if +/// the memory could not be expanded in place. +/// If \a NULL is returned, the pointer \a p is not freed. +/// Otherwise the original pointer is returned +/// as the reallocated result since it fits in-place with the +/// new size. If \a newsize is larger than the +/// original \a size allocated for \a p, the bytes after \a size +/// are uninitialized. +void* mi_expand(void* p, size_t newsize); + +/// Allocate \a count elements of \a size bytes. +/// @param count The number of elements. +/// @param size The size of each element. +/// @returns A pointer to a block of \a count * \a size bytes, or \a NULL +/// if out of memory or if \a count * \a size overflows. +/// +/// If there is no overflow, it behaves exactly like `mi_malloc(count*size)`. +/// @see mi_calloc() +/// @see mi_zallocn() +void* mi_mallocn(size_t count, size_t size); + +/// Re-allocate memory to \a count elements of \a size bytes. +/// @param p Pointer to a previously allocated block (or \a NULL). +/// @param count The number of elements. +/// @param size The size of each element. +/// @returns A pointer to a re-allocated block of \a count * \a size bytes, or \a NULL +/// if out of memory or if \a count * \a size overflows. +/// +/// If there is no overflow, it behaves exactly like `mi_realloc(p,count*size)`. +/// @see [reallocarray()]() (on BSD) +void* mi_reallocn(void* p, size_t count, size_t size); + +/// Re-allocate memory to \a newsize bytes, +/// @param p pointer to previously allocated memory (or \a NULL). +/// @param newsize the new required size in bytes. +/// @returns pointer to the re-allocated memory +/// of \a newsize bytes, or \a NULL if out of memory. +/// +/// In contrast to mi_realloc(), if \a NULL is returned, the original pointer +/// \a p is freed (if it was not \a NULL itself). +/// Otherwise the original pointer is either freed or returned +/// as the reallocated result (in case it fits in-place with the +/// new size). If the pointer \a p is \a NULL, it behaves as +/// \a mi_malloc(\a newsize). If \a newsize is larger than the +/// original \a size allocated for \a p, the bytes after \a size +/// are uninitialized. +/// +/// @see [reallocf](https://www.freebsd.org/cgi/man.cgi?query=reallocf) (on BSD) +void* mi_reallocf(void* p, size_t newsize); + + +/// Allocate and duplicate a string. +/// @param s string to duplicate (or \a NULL). +/// @returns a pointer to newly allocated memory initialized +/// to string \a s, or \a NULL if either out of memory or if +/// \a s is \a NULL. +/// +/// Replacement for the standard [strdup()](http://pubs.opengroup.org/onlinepubs/9699919799/functions/strdup.html) +/// such that mi_free() can be used on the returned result. +char* mi_strdup(const char* s); + +/// Allocate and duplicate a string up to \a n bytes. +/// @param s string to duplicate (or \a NULL). +/// @param n maximum number of bytes to copy (excluding the terminating zero). +/// @returns a pointer to newly allocated memory initialized +/// to string \a s up to the first \a n bytes (and always zero terminated), +/// or \a NULL if either out of memory or if \a s is \a NULL. +/// +/// Replacement for the standard [strndup()](http://pubs.opengroup.org/onlinepubs/9699919799/functions/strndup.html) +/// such that mi_free() can be used on the returned result. +char* mi_strndup(const char* s, size_t n); + +/// Resolve a file path name. +/// @param fname File name. +/// @param resolved_name Should be \a NULL (but can also point to a buffer +/// of at least \a PATH_MAX bytes). +/// @returns If successful a pointer to the resolved absolute file name, or +/// \a NULL on failure (with \a errno set to the error code). +/// +/// If \a resolved_name was \a NULL, the returned result should be freed with +/// mi_free(). +/// +/// Replacement for the standard [realpath()](http://pubs.opengroup.org/onlinepubs/9699919799/functions/realpath.html) +/// such that mi_free() can be used on the returned result (if \a resolved_name was \a NULL). +char* mi_realpath(const char* fname, char* resolved_name); + +/// \} + +// ------------------------------------------------------ +// Extended functionality +// ------------------------------------------------------ + +/// \defgroup extended Extended Functions +/// Extended functionality. +/// \{ + +/// Maximum size allowed for small allocations in +/// #mi_malloc_small and #mi_zalloc_small (usually `128*sizeof(void*)` (= 1KB on 64-bit systems)) +#define MI_SMALL_SIZE_MAX (128*sizeof(void*)) + +/// Allocate a small object. +/// @param size The size in bytes, can be at most #MI_SMALL_SIZE_MAX. +/// @returns a pointer to newly allocated memory of at least \a size +/// bytes, or \a NULL if out of memory. +/// This function is meant for use in run-time systems for best +/// performance and does not check if \a size was indeed small -- use +/// with care! +void* mi_malloc_small(size_t size); + +/// Allocate a zero initialized small object. +/// @param size The size in bytes, can be at most #MI_SMALL_SIZE_MAX. +/// @returns a pointer to newly allocated zero-initialized memory of at +/// least \a size bytes, or \a NULL if out of memory. +/// This function is meant for use in run-time systems for best +/// performance and does not check if \a size was indeed small -- use +/// with care! +void* mi_zalloc_small(size_t size); + +/// Return the available bytes in a memory block. +/// @param p Pointer to previously allocated memory (or \a NULL) +/// @returns Returns the available bytes in the memory block, or +/// 0 if \a p was \a NULL. +/// +/// The returned size can be +/// used to call \a mi_expand successfully. +/// The returned size is always at least equal to the +/// allocated size of \a p, and, in the current design, +/// should be less than 16.7% more. +/// +/// @see [_msize](https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/msize?view=vs-2017) (Windows) +/// @see [malloc_usable_size](http://man7.org/linux/man-pages/man3/malloc_usable_size.3.html) (Linux) +/// @see mi_good_size() +size_t mi_usable_size(void* p); + +/// Return the used allocation size. +/// @param size The minimal required size in bytes. +/// @returns the size `n` that will be allocated, where `n >= size`. +/// +/// Generally, `mi_usable_size(mi_malloc(size)) == mi_good_size(size)`. +/// This can be used to reduce internal wasted space when +/// allocating buffers for example. +/// +/// @see mi_usable_size() +size_t mi_good_size(size_t size); + +/// Eagerly free memory. +/// @param force If \a true, aggressively return memory to the OS (can be expensive!) +/// +/// Regular code should not have to call this function. It can be beneficial +/// in very narrow circumstances; in particular, when a long running thread +/// allocates a lot of blocks that are freed by other threads it may improve +/// resource usage by calling this every once in a while. +void mi_collect(bool force); + +/// Deprecated +/// @param out Ignored, outputs to the registered output function or stderr by default. +/// +/// Most detailed when using a debug build. +void mi_stats_print(void* out); + +/// Print the main statistics. +/// @param out An output function or \a NULL for the default. +/// @param arg Optional argument passed to \a out (if not \a NULL) +/// +/// Most detailed when using a debug build. +void mi_stats_print_out(mi_output_fun* out, void* arg); + +/// Reset statistics. +void mi_stats_reset(void); + +/// Merge thread local statistics with the main statistics and reset. +void mi_stats_merge(void); + +/// Initialize mimalloc on a thread. +/// Should not be used as on most systems (pthreads, windows) this is done +/// automatically. +void mi_thread_init(void); + +/// Uninitialize mimalloc on a thread. +/// Should not be used as on most systems (pthreads, windows) this is done +/// automatically. Ensures that any memory that is not freed yet (but will +/// be freed by other threads in the future) is properly handled. +void mi_thread_done(void); + +/// Print out heap statistics for this thread. +/// @param out An output function or \a NULL for the default. +/// @param arg Optional argument passed to \a out (if not \a NULL) +/// +/// Most detailed when using a debug build. +void mi_thread_stats_print_out(mi_output_fun* out, void* arg); + +/// Type of deferred free functions. +/// @param force If \a true all outstanding items should be freed. +/// @param heartbeat A monotonically increasing count. +/// @param arg Argument that was passed at registration to hold extra state. +/// +/// @see mi_register_deferred_free +typedef void (mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg); + +/// Register a deferred free function. +/// @param deferred_free Address of a deferred free-ing function or \a NULL to unregister. +/// @param arg Argument that will be passed on to the deferred free function. +/// +/// Some runtime systems use deferred free-ing, for example when using +/// reference counting to limit the worst case free time. +/// Such systems can register (re-entrant) deferred free function +/// to free more memory on demand. When the \a force parameter is +/// \a true all possible memory should be freed. +/// The per-thread \a heartbeat parameter is monotonically increasing +/// and guaranteed to be deterministic if the program allocates +/// deterministically. The \a deferred_free function is guaranteed +/// to be called deterministically after some number of allocations +/// (regardless of freeing or available free memory). +/// At most one \a deferred_free function can be active. +void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg); + +/// Type of output functions. +/// @param msg Message to output. +/// @param arg Argument that was passed at registration to hold extra state. +/// +/// @see mi_register_output() +typedef void (mi_output_fun)(const char* msg, void* arg); + +/// Register an output function. +/// @param out The output function, use `NULL` to output to stderr. +/// @param arg Argument that will be passed on to the output function. +/// +/// The `out` function is called to output any information from mimalloc, +/// like verbose or warning messages. +void mi_register_output(mi_output_fun* out, void* arg); + +/// Type of error callback functions. +/// @param err Error code (see mi_register_error() for a complete list). +/// @param arg Argument that was passed at registration to hold extra state. +/// +/// @see mi_register_error() +typedef void (mi_error_fun)(int err, void* arg); + +/// Register an error callback function. +/// @param errfun The error function that is called on an error (use \a NULL for default) +/// @param arg Extra argument that will be passed on to the error function. +/// +/// The \a errfun function is called on an error in mimalloc after emitting +/// an error message (through the output function). It as always legal to just +/// return from the \a errfun function in which case allocation functions generally +/// return \a NULL or ignore the condition. The default function only calls abort() +/// when compiled in secure mode with an \a EFAULT error. The possible error +/// codes are: +/// * \a EAGAIN: Double free was detected (only in debug and secure mode). +/// * \a EFAULT: Corrupted free list or meta-data was detected (only in debug and secure mode). +/// * \a ENOMEM: Not enough memory available to satisfy the request. +/// * \a EOVERFLOW: Too large a request, for example in mi_calloc(), the \a count and \a size parameters are too large. +/// * \a EINVAL: Trying to free or re-allocate an invalid pointer. +void mi_register_error(mi_error_fun* errfun, void* arg); + +/// Is a pointer part of our heap? +/// @param p The pointer to check. +/// @returns \a true if this is a pointer into our heap. +/// This function is relatively fast. +bool mi_is_in_heap_region(const void* p); + +/// Reserve OS memory for use by mimalloc. Reserved areas are used +/// before allocating from the OS again. By reserving a large area upfront, +/// allocation can be more efficient, and can be better managed on systems +/// without `mmap`/`VirtualAlloc` (like WASM for example). +/// @param size The size to reserve. +/// @param commit Commit the memory upfront. +/// @param allow_large Allow large OS pages (2MiB) to be used? +/// @return \a 0 if successful, and an error code otherwise (e.g. `ENOMEM`). +int mi_reserve_os_memory(size_t size, bool commit, bool allow_large); + +/// Manage a particular memory area for use by mimalloc. +/// This is just like `mi_reserve_os_memory` except that the area should already be +/// allocated in some manner and available for use my mimalloc. +/// @param start Start of the memory area +/// @param size The size of the memory area. +/// @param commit Is the area already committed? +/// @param is_large Does it consist of large OS pages? Set this to \a true as well for memory +/// that should not be decommitted or protected (like rdma etc.) +/// @param is_zero Does the area consists of zero's? +/// @param numa_node Possible associated numa node or `-1`. +/// @return \a true if successful, and \a false on error. +bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node); + +/// Reserve \a pages of huge OS pages (1GiB) evenly divided over \a numa_nodes nodes, +/// but stops after at most `timeout_msecs` seconds. +/// @param pages The number of 1GiB pages to reserve. +/// @param numa_nodes The number of nodes do evenly divide the pages over, or 0 for using the actual number of NUMA nodes. +/// @param timeout_msecs Maximum number of milli-seconds to try reserving, or 0 for no timeout. +/// @returns 0 if successful, \a ENOMEM if running out of memory, or \a ETIMEDOUT if timed out. +/// +/// The reserved memory is used by mimalloc to satisfy allocations. +/// May quit before \a timeout_msecs are expired if it estimates it will take more than +/// 1.5 times \a timeout_msecs. The time limit is needed because on some operating systems +/// it can take a long time to reserve contiguous memory if the physical memory is +/// fragmented. +int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs); + +/// Reserve \a pages of huge OS pages (1GiB) at a specific \a numa_node, +/// but stops after at most `timeout_msecs` seconds. +/// @param pages The number of 1GiB pages to reserve. +/// @param numa_node The NUMA node where the memory is reserved (start at 0). +/// @param timeout_msecs Maximum number of milli-seconds to try reserving, or 0 for no timeout. +/// @returns 0 if successful, \a ENOMEM if running out of memory, or \a ETIMEDOUT if timed out. +/// +/// The reserved memory is used by mimalloc to satisfy allocations. +/// May quit before \a timeout_msecs are expired if it estimates it will take more than +/// 1.5 times \a timeout_msecs. The time limit is needed because on some operating systems +/// it can take a long time to reserve contiguous memory if the physical memory is +/// fragmented. +int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs); + + +/// Is the C runtime \a malloc API redirected? +/// @returns \a true if all malloc API calls are redirected to mimalloc. +/// +/// Currently only used on Windows. +bool mi_is_redirected(); + +/// Return process information (time and memory usage). +/// @param elapsed_msecs Optional. Elapsed wall-clock time of the process in milli-seconds. +/// @param user_msecs Optional. User time in milli-seconds (as the sum over all threads). +/// @param system_msecs Optional. System time in milli-seconds. +/// @param current_rss Optional. Current working set size (touched pages). +/// @param peak_rss Optional. Peak working set size (touched pages). +/// @param current_commit Optional. Current committed memory (backed by the page file). +/// @param peak_commit Optional. Peak committed memory (backed by the page file). +/// @param page_faults Optional. Count of hard page faults. +/// +/// The \a current_rss is precise on Windows and MacOSX; other systems estimate +/// this using \a current_commit. The \a commit is precise on Windows but estimated +/// on other systems as the amount of read/write accessible memory reserved by mimalloc. +void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults); + +/// \} + +// ------------------------------------------------------ +// Aligned allocation +// ------------------------------------------------------ + +/// \defgroup aligned Aligned Allocation +/// +/// Allocating aligned memory blocks. +/// +/// \{ + +/// The maximum supported alignment size (currently 1MiB). +#define MI_BLOCK_ALIGNMENT_MAX (1024*1024UL) + +/// Allocate \a size bytes aligned by \a alignment. +/// @param size number of bytes to allocate. +/// @param alignment the minimal alignment of the allocated memory. Must be less than #MI_BLOCK_ALIGNMENT_MAX. +/// @returns pointer to the allocated memory or \a NULL if out of memory. +/// The returned pointer is aligned by \a alignment, i.e. +/// `(uintptr_t)p % alignment == 0`. +/// +/// Returns a unique pointer if called with \a size 0. +/// @see [_aligned_malloc](https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/aligned-malloc?view=vs-2017) (on Windows) +/// @see [aligned_alloc](http://man.openbsd.org/reallocarray) (on BSD, with switched arguments!) +/// @see [posix_memalign](https://linux.die.net/man/3/posix_memalign) (on Posix, with switched arguments!) +/// @see [memalign](https://linux.die.net/man/3/posix_memalign) (on Linux, with switched arguments!) +void* mi_malloc_aligned(size_t size, size_t alignment); +void* mi_zalloc_aligned(size_t size, size_t alignment); +void* mi_calloc_aligned(size_t count, size_t size, size_t alignment); +void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment); + +/// Allocate \a size bytes aligned by \a alignment at a specified \a offset. +/// @param size number of bytes to allocate. +/// @param alignment the minimal alignment of the allocated memory at \a offset. +/// @param offset the offset that should be aligned. +/// @returns pointer to the allocated memory or \a NULL if out of memory. +/// The returned pointer is aligned by \a alignment at \a offset, i.e. +/// `((uintptr_t)p + offset) % alignment == 0`. +/// +/// Returns a unique pointer if called with \a size 0. +/// @see [_aligned_offset_malloc](https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/aligned-offset-malloc?view=vs-2017) (on Windows) +void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset); +void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset); +void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset); +void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset); + +/// \} + +/// \defgroup heap Heap Allocation +/// +/// First-class heaps that can be destroyed in one go. +/// +/// \{ + +/// Type of first-class heaps. +/// A heap can only be used for allocation in +/// the thread that created this heap! Any allocated +/// blocks can be freed or reallocated by any other thread though. +struct mi_heap_s; + +/// Type of first-class heaps. +/// A heap can only be used for (re)allocation in +/// the thread that created this heap! Any allocated +/// blocks can be freed by any other thread though. +typedef struct mi_heap_s mi_heap_t; + +/// Create a new heap that can be used for allocation. +mi_heap_t* mi_heap_new(); + +/// Delete a previously allocated heap. +/// This will release resources and migrate any +/// still allocated blocks in this heap (efficiently) +/// to the default heap. +/// +/// If \a heap is the default heap, the default +/// heap is set to the backing heap. +void mi_heap_delete(mi_heap_t* heap); + +/// Destroy a heap, freeing all its still allocated blocks. +/// Use with care as this will free all blocks still +/// allocated in the heap. However, this can be a very +/// efficient way to free all heap memory in one go. +/// +/// If \a heap is the default heap, the default +/// heap is set to the backing heap. +void mi_heap_destroy(mi_heap_t* heap); + +/// Set the default heap to use for mi_malloc() et al. +/// @param heap The new default heap. +/// @returns The previous default heap. +mi_heap_t* mi_heap_set_default(mi_heap_t* heap); + +/// Get the default heap that is used for mi_malloc() et al. +/// @returns The current default heap. +mi_heap_t* mi_heap_get_default(); + +/// Get the backing heap. +/// The _backing_ heap is the initial default heap for +/// a thread and always available for allocations. +/// It cannot be destroyed or deleted +/// except by exiting the thread. +mi_heap_t* mi_heap_get_backing(); + +/// Release outstanding resources in a specific heap. +void mi_heap_collect(mi_heap_t* heap, bool force); + +/// Allocate in a specific heap. +/// @see mi_malloc() +void* mi_heap_malloc(mi_heap_t* heap, size_t size); + +/// Allocate a small object in a specific heap. +/// \a size must be smaller or equal to MI_SMALL_SIZE_MAX(). +/// @see mi_malloc() +void* mi_heap_malloc_small(mi_heap_t* heap, size_t size); + +/// Allocate zero-initialized in a specific heap. +/// @see mi_zalloc() +void* mi_heap_zalloc(mi_heap_t* heap, size_t size); + +/// Allocate \a count zero-initialized elements in a specific heap. +/// @see mi_calloc() +void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size); + +/// Allocate \a count elements in a specific heap. +/// @see mi_mallocn() +void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size); + +/// Duplicate a string in a specific heap. +/// @see mi_strdup() +char* mi_heap_strdup(mi_heap_t* heap, const char* s); + +/// Duplicate a string of at most length \a n in a specific heap. +/// @see mi_strndup() +char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n); + +/// Resolve a file path name using a specific \a heap to allocate the result. +/// @see mi_realpath() +char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name); + +void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize); +void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size); +void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize); + +void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment); +void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset); +void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment); +void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset); +void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment); +void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset); +void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment); +void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset); + +/// \} + + +/// \defgroup zeroinit Zero initialized re-allocation +/// +/// The zero-initialized re-allocations are only valid on memory that was +/// originally allocated with zero initialization too. +/// e.g. `mi_calloc`, `mi_zalloc`, `mi_zalloc_aligned` etc. +/// see +/// +/// \{ + +void* mi_rezalloc(void* p, size_t newsize); +void* mi_recalloc(void* p, size_t newcount, size_t size) ; + +void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment); +void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset); +void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment); +void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset); + +void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize); +void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size); + +void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment); +void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset); +void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment); +void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset); + +/// \} + +/// \defgroup typed Typed Macros +/// +/// Typed allocation macros. For example: +/// ``` +/// int* p = mi_malloc_tp(int) +/// ``` +/// +/// \{ + +/// Allocate a block of type \a tp. +/// @param tp The type of the block to allocate. +/// @returns A pointer to an object of type \a tp, or +/// \a NULL if out of memory. +/// +/// **Example:** +/// ``` +/// int* p = mi_malloc_tp(int) +/// ``` +/// +/// @see mi_malloc() +#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp))) + +/// Allocate a zero-initialized block of type \a tp. +#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp))) + +/// Allocate \a count zero-initialized blocks of type \a tp. +#define mi_calloc_tp(tp,count) ((tp*)mi_calloc(count,sizeof(tp))) + +/// Allocate \a count blocks of type \a tp. +#define mi_mallocn_tp(tp,count) ((tp*)mi_mallocn(count,sizeof(tp))) + +/// Re-allocate to \a count blocks of type \a tp. +#define mi_reallocn_tp(p,tp,count) ((tp*)mi_reallocn(p,count,sizeof(tp))) + +/// Allocate a block of type \a tp in a heap \a hp. +#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp))) + +/// Allocate a zero-initialized block of type \a tp in a heap \a hp. +#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp))) + +/// Allocate \a count zero-initialized blocks of type \a tp in a heap \a hp. +#define mi_heap_calloc_tp(hp,tp,count) ((tp*)mi_heap_calloc(hp,count,sizeof(tp))) + +/// Allocate \a count blocks of type \a tp in a heap \a hp. +#define mi_heap_mallocn_tp(hp,tp,count) ((tp*)mi_heap_mallocn(hp,count,sizeof(tp))) + +/// Re-allocate to \a count blocks of type \a tp in a heap \a hp. +#define mi_heap_reallocn_tp(hp,p,tp,count) ((tp*)mi_heap_reallocn(p,count,sizeof(tp))) + +/// Re-allocate to \a count zero initialized blocks of type \a tp in a heap \a hp. +#define mi_heap_recalloc_tp(hp,p,tp,count) ((tp*)mi_heap_recalloc(p,count,sizeof(tp))) + +/// \} + +/// \defgroup analysis Heap Introspection +/// +/// Inspect the heap at runtime. +/// +/// \{ + +/// Does a heap contain a pointer to a previously allocated block? +/// @param heap The heap. +/// @param p Pointer to a previously allocated block (in any heap)-- cannot be some +/// random pointer! +/// @returns \a true if the block pointed to by \a p is in the \a heap. +/// @see mi_heap_check_owned() +bool mi_heap_contains_block(mi_heap_t* heap, const void* p); + +/// Check safely if any pointer is part of a heap. +/// @param heap The heap. +/// @param p Any pointer -- not required to be previously allocated by us. +/// @returns \a true if \a p points to a block in \a heap. +/// +/// Note: expensive function, linear in the pages in the heap. +/// @see mi_heap_contains_block() +/// @see mi_heap_get_default() +bool mi_heap_check_owned(mi_heap_t* heap, const void* p); + +/// Check safely if any pointer is part of the default heap of this thread. +/// @param p Any pointer -- not required to be previously allocated by us. +/// @returns \a true if \a p points to a block in default heap of this thread. +/// +/// Note: expensive function, linear in the pages in the heap. +/// @see mi_heap_contains_block() +/// @see mi_heap_get_default() +bool mi_check_owned(const void* p); + +/// An area of heap space contains blocks of a single size. +/// The bytes in freed blocks are `committed - used`. +typedef struct mi_heap_area_s { + void* blocks; ///< start of the area containing heap blocks + size_t reserved; ///< bytes reserved for this area + size_t committed; ///< current committed bytes of this area + size_t used; ///< bytes in use by allocated blocks + size_t block_size; ///< size in bytes of one block +} mi_heap_area_t; + +/// Visitor function passed to mi_heap_visit_blocks() +/// @returns \a true if ok, \a false to stop visiting (i.e. break) +/// +/// This function is always first called for every \a area +/// with \a block as a \a NULL pointer. If \a visit_all_blocks +/// was \a true, the function is then called for every allocated +/// block in that area. +typedef bool (mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg); + +/// Visit all areas and blocks in a heap. +/// @param heap The heap to visit. +/// @param visit_all_blocks If \a true visits all allocated blocks, otherwise +/// \a visitor is only called for every heap area. +/// @param visitor This function is called for every area in the heap +/// (with \a block as \a NULL). If \a visit_all_blocks is +/// \a true, \a visitor is also called for every allocated +/// block in every area (with `block!=NULL`). +/// return \a false from this function to stop visiting early. +/// @param arg Extra argument passed to \a visitor. +/// @returns \a true if all areas and blocks were visited. +bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg); + +/// \} + +/// \defgroup options Runtime Options +/// +/// Set runtime behavior. +/// +/// \{ + +/// Runtime options. +typedef enum mi_option_e { + // stable options + mi_option_show_errors, ///< Print error messages to `stderr`. + mi_option_show_stats, ///< Print statistics to `stderr` when the program is done. + mi_option_verbose, ///< Print verbose messages to `stderr`. + + // the following options are experimental + mi_option_eager_commit, ///< Eagerly commit segments (4MiB) (enabled by default). + mi_option_large_os_pages, ///< Use large OS pages (2MiB in size) if possible + mi_option_reserve_huge_os_pages, ///< The number of huge OS pages (1GiB in size) to reserve at the start of the program. + mi_option_reserve_huge_os_pages_at, ///< Reserve huge OS pages at node N. + mi_option_reserve_os_memory, ///< Reserve specified amount of OS memory at startup, e.g. "1g" or "512m". + mi_option_segment_cache, ///< The number of segments per thread to keep cached (0). + mi_option_page_reset, ///< Reset page memory after \a mi_option_reset_delay milliseconds when it becomes free. + mi_option_abandoned_page_reset, //< Reset free page memory when a thread terminates. + mi_option_use_numa_nodes, ///< Pretend there are at most N NUMA nodes; Use 0 to use the actual detected NUMA nodes at runtime. + mi_option_eager_commit_delay, ///< the first N segments per thread are not eagerly committed (=1). + mi_option_os_tag, ///< OS tag to assign to mimalloc'd memory + mi_option_limit_os_alloc, ///< If set to 1, do not use OS memory for allocation (but only pre-reserved arenas) + + // v1.x specific options + mi_option_eager_region_commit, ///< Eagerly commit large (256MiB) memory regions (enabled by default, except on Windows) + mi_option_segment_reset, ///< Experimental + mi_option_reset_delay, ///< Delay in milli-seconds before resetting a page (100ms by default) + mi_option_purge_decommits, ///< Experimental + + // v2.x specific options + mi_option_allow_purge, ///< Enable decommitting memory (=on) + mi_option_purge_delay, ///< Decommit page memory after N milli-seconds delay (25ms). + mi_option_segment_purge_delay, ///< Decommit large segment memory after N milli-seconds delay (500ms). + + _mi_option_last +} mi_option_t; + + +bool mi_option_is_enabled(mi_option_t option); +void mi_option_enable(mi_option_t option); +void mi_option_disable(mi_option_t option); +void mi_option_set_enabled(mi_option_t option, bool enable); +void mi_option_set_enabled_default(mi_option_t option, bool enable); + +long mi_option_get(mi_option_t option); +void mi_option_set(mi_option_t option, long value); +void mi_option_set_default(mi_option_t option, long value); + + +/// \} + +/// \defgroup posix Posix +/// +/// `mi_` prefixed implementations of various Posix, Unix, and C++ allocation functions. +/// Defined for convenience as all redirect to the regular mimalloc API. +/// +/// \{ + +void* mi_recalloc(void* p, size_t count, size_t size); +size_t mi_malloc_size(const void* p); +size_t mi_malloc_usable_size(const void *p); + +/// Just as `free` but also checks if the pointer `p` belongs to our heap. +void mi_cfree(void* p); + +int mi_posix_memalign(void** p, size_t alignment, size_t size); +int mi__posix_memalign(void** p, size_t alignment, size_t size); +void* mi_memalign(size_t alignment, size_t size); +void* mi_valloc(size_t size); + +void* mi_pvalloc(size_t size); +void* mi_aligned_alloc(size_t alignment, size_t size); + +/// Correspond s to [reallocarray](https://www.freebsd.org/cgi/man.cgi?query=reallocarray&sektion=3&manpath=freebsd-release-ports) +/// in FreeBSD. +void* mi_reallocarray(void* p, size_t count, size_t size); + +/// Corresponds to [reallocarr](https://man.netbsd.org/reallocarr.3) in NetBSD. +int mi_reallocarr(void* p, size_t count, size_t size); + +void mi_free_size(void* p, size_t size); +void mi_free_size_aligned(void* p, size_t size, size_t alignment); +void mi_free_aligned(void* p, size_t alignment); + +/// \} + +/// \defgroup cpp C++ wrappers +/// +/// `mi_` prefixed implementations of various allocation functions +/// that use C++ semantics on out-of-memory, generally calling +/// `std::get_new_handler` and raising a `std::bad_alloc` exception on failure. +/// +/// Note: use the `mimalloc-new-delete.h` header to override the \a new +/// and \a delete operators globally. The wrappers here are mostly +/// for convenience for library writers that need to interface with +/// mimalloc from C++. +/// +/// \{ + +/// like mi_malloc(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. +void* mi_new(std::size_t n) noexcept(false); + +/// like mi_mallocn(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. +void* mi_new_n(size_t count, size_t size) noexcept(false); + +/// like mi_malloc_aligned(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. +void* mi_new_aligned(std::size_t n, std::align_val_t alignment) noexcept(false); + +/// like `mi_malloc`, but when out of memory, use `std::get_new_handler` but return \a NULL on failure. +void* mi_new_nothrow(size_t n); + +/// like `mi_malloc_aligned`, but when out of memory, use `std::get_new_handler` but return \a NULL on failure. +void* mi_new_aligned_nothrow(size_t n, size_t alignment); + +/// like mi_realloc(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. +void* mi_new_realloc(void* p, size_t newsize); + +/// like mi_reallocn(), but when out of memory, use `std::get_new_handler` and raise `std::bad_alloc` exception on failure. +void* mi_new_reallocn(void* p, size_t newcount, size_t size); + +/// \a std::allocator implementation for mimalloc for use in STL containers. +/// For example: +/// ``` +/// std::vector > vec; +/// vec.push_back(1); +/// vec.pop_back(); +/// ``` +template struct mi_stl_allocator { } + +/// \} + +/*! \page build Building + +Checkout the sources from GitHub: +``` +git clone https://github.com/microsoft/mimalloc +``` + +## Windows + +Open `ide/vs2019/mimalloc.sln` in Visual Studio 2019 and build (or `ide/vs2017/mimalloc.sln`). +The `mimalloc` project builds a static library (in `out/msvc-x64`), while the +`mimalloc-override` project builds a DLL for overriding malloc +in the entire program. + +## macOS, Linux, BSD, etc. + +We use [`cmake`](https://cmake.org)1 as the build system: + +``` +> mkdir -p out/release +> cd out/release +> cmake ../.. +> make +``` +This builds the library as a shared (dynamic) +library (`.so` or `.dylib`), a static library (`.a`), and +as a single object file (`.o`). + +`> sudo make install` (install the library and header files in `/usr/local/lib` and `/usr/local/include`) + +You can build the debug version which does many internal checks and +maintains detailed statistics as: + +``` +> mkdir -p out/debug +> cd out/debug +> cmake -DCMAKE_BUILD_TYPE=Debug ../.. +> make +``` +This will name the shared library as `libmimalloc-debug.so`. + +Finally, you can build a _secure_ version that uses guard pages, encrypted +free lists, etc, as: +``` +> mkdir -p out/secure +> cd out/secure +> cmake -DMI_SECURE=ON ../.. +> make +``` +This will name the shared library as `libmimalloc-secure.so`. +Use `ccmake`2 instead of `cmake` +to see and customize all the available build options. + +Notes: +1. Install CMake: `sudo apt-get install cmake` +2. Install CCMake: `sudo apt-get install cmake-curses-gui` + +*/ + +/*! \page using Using the library + +### Build + +The preferred usage is including ``, linking with +the shared- or static library, and using the `mi_malloc` API exclusively for allocation. For example, +``` +gcc -o myprogram -lmimalloc myfile.c +``` + +mimalloc uses only safe OS calls (`mmap` and `VirtualAlloc`) and can co-exist +with other allocators linked to the same program. +If you use `cmake`, you can simply use: +``` +find_package(mimalloc 1.0 REQUIRED) +``` +in your `CMakeLists.txt` to find a locally installed mimalloc. Then use either: +``` +target_link_libraries(myapp PUBLIC mimalloc) +``` +to link with the shared (dynamic) library, or: +``` +target_link_libraries(myapp PUBLIC mimalloc-static) +``` +to link with the static library. See `test\CMakeLists.txt` for an example. + +### C++ +For best performance in C++ programs, it is also recommended to override the +global `new` and `delete` operators. For convience, mimalloc provides +[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project. + +In C++, mimalloc also provides the `mi_stl_allocator` struct which implements the `std::allocator` +interface. For example: +``` +std::vector> vec; +vec.push_back(some_struct()); +``` + +### Statistics + +You can pass environment variables to print verbose messages (`MIMALLOC_VERBOSE=1`) +and statistics (`MIMALLOC_SHOW_STATS=1`) (in the debug version): +``` +> env MIMALLOC_SHOW_STATS=1 ./cfrac 175451865205073170563711388363 + +175451865205073170563711388363 = 374456281610909315237213 * 468551 + +heap stats: peak total freed unit +normal 2: 16.4 kb 17.5 mb 17.5 mb 16 b ok +normal 3: 16.3 kb 15.2 mb 15.2 mb 24 b ok +normal 4: 64 b 4.6 kb 4.6 kb 32 b ok +normal 5: 80 b 118.4 kb 118.4 kb 40 b ok +normal 6: 48 b 48 b 48 b 48 b ok +normal 17: 960 b 960 b 960 b 320 b ok + +heap stats: peak total freed unit + normal: 33.9 kb 32.8 mb 32.8 mb 1 b ok + huge: 0 b 0 b 0 b 1 b ok + total: 33.9 kb 32.8 mb 32.8 mb 1 b ok +malloc requested: 32.8 mb + + committed: 58.2 kb 58.2 kb 58.2 kb 1 b ok + reserved: 2.0 mb 2.0 mb 2.0 mb 1 b ok + reset: 0 b 0 b 0 b 1 b ok + segments: 1 1 1 +-abandoned: 0 + pages: 6 6 6 +-abandoned: 0 + mmaps: 3 + mmap fast: 0 + mmap slow: 1 + threads: 0 + elapsed: 2.022s + process: user: 1.781s, system: 0.016s, faults: 756, reclaims: 0, rss: 2.7 mb +``` + +The above model of using the `mi_` prefixed API is not always possible +though in existing programs that already use the standard malloc interface, +and another option is to override the standard malloc interface +completely and redirect all calls to the _mimalloc_ library instead. + +See \ref overrides for more info. + +*/ + +/*! \page environment Environment Options + +You can set further options either programmatically (using [`mi_option_set`](https://microsoft.github.io/mimalloc/group__options.html)), +or via environment variables. + +- `MIMALLOC_SHOW_STATS=1`: show statistics when the program terminates. +- `MIMALLOC_VERBOSE=1`: show verbose messages. +- `MIMALLOC_SHOW_ERRORS=1`: show error and warning messages. +- `MIMALLOC_PAGE_RESET=0`: by default, mimalloc will reset (or purge) OS pages when not in use to signal to the OS + that the underlying physical memory can be reused. This can reduce memory fragmentation in long running (server) + programs. By setting it to `0` no such page resets will be done which can improve performance for programs that are not long + running. As an alternative, the `MIMALLOC_DECOMMIT_DELAY=` can be set higher (100ms by default) to make the page + reset occur less frequently instead of turning it off completely. +- `MIMALLOC_LARGE_OS_PAGES=1`: use large OS pages (2MiB) when available; for some workloads this can significantly + improve performance. Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs + to explicitly allow large OS pages (as on [Windows][windows-huge] and [Linux][linux-huge]). However, sometimes + the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that + can have fragmented memory (for that reason, we generally recommend to use `MIMALLOC_RESERVE_HUGE_OS_PAGES` instead when possible). +- `MIMALLOC_RESERVE_HUGE_OS_PAGES=N`: where N is the number of 1GiB _huge_ OS pages. This reserves the huge pages at + startup and sometimes this can give a large (latency) performance improvement on big workloads. + Usually it is better to not use + `MIMALLOC_LARGE_OS_PAGES` in combination with this setting. Just like large OS pages, use with care as reserving + contiguous physical memory can take a long time when memory is fragmented (but reserving the huge pages is done at + startup only once). + Note that we usually need to explicitly enable huge OS pages (as on [Windows][windows-huge] and [Linux][linux-huge])). With huge OS pages, it may be beneficial to set the setting + `MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB) + of a thread to not allocate in the huge OS pages; this prevents threads that are short lived + and allocate just a little to take up space in the huge OS page area (which cannot be reset). +- `MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N`: where N is the numa node. This reserves the huge pages at a specific numa node. + (`N` is -1 by default to reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected)) + +Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write +for all pages in the original process including the huge OS pages. When any memory is now written in that area, the +OS will copy the entire 1GiB huge page (or 2MiB large page) which can cause the memory usage to grow in big increments. + +[linux-huge]: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/tuning_and_optimizing_red_hat_enterprise_linux_for_oracle_9i_and_10g_databases/sect-oracle_9i_and_10g_tuning_guide-large_memory_optimization_big_pages_and_huge_pages-configuring_huge_pages_in_red_hat_enterprise_linux_4_or_5 +[windows-huge]: https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/enable-the-lock-pages-in-memory-option-windows?view=sql-server-2017 + +*/ + +/*! \page overrides Overriding Malloc + +Overriding the standard `malloc` can be done either _dynamically_ or _statically_. + +## Dynamic override + +This is the recommended way to override the standard malloc interface. + + +### Linux, BSD + +On these systems we preload the mimalloc shared +library so all calls to the standard `malloc` interface are +resolved to the _mimalloc_ library. + +- `env LD_PRELOAD=/usr/lib/libmimalloc.so myprogram` + +You can set extra environment variables to check that mimalloc is running, +like: +``` +env MIMALLOC_VERBOSE=1 LD_PRELOAD=/usr/lib/libmimalloc.so myprogram +``` +or run with the debug version to get detailed statistics: +``` +env MIMALLOC_SHOW_STATS=1 LD_PRELOAD=/usr/lib/libmimalloc-debug.so myprogram +``` + +### MacOS + +On macOS we can also preload the mimalloc shared +library so all calls to the standard `malloc` interface are +resolved to the _mimalloc_ library. + +- `env DYLD_FORCE_FLAT_NAMESPACE=1 DYLD_INSERT_LIBRARIES=/usr/lib/libmimalloc.dylib myprogram` + +Note that certain security restrictions may apply when doing this from +the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-ignored-when-calling-application-through-bash). + +(Note: macOS support for dynamic overriding is recent, please report any issues.) + + +### Windows + +Overriding on Windows is robust and has the +particular advantage to be able to redirect all malloc/free calls that go through +the (dynamic) C runtime allocator, including those from other DLL's or libraries. + +The overriding on Windows requires that you link your program explicitly with +the mimalloc DLL and use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch). +Also, the `mimalloc-redirect.dll` (or `mimalloc-redirect32.dll`) must be available +in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency). +The redirection DLL ensures that all calls to the C runtime malloc API get redirected to +mimalloc (in `mimalloc-override.dll`). + +To ensure the mimalloc DLL is loaded at run-time it is easiest to insert some +call to the mimalloc API in the `main` function, like `mi_version()` +(or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project +for an example on how to use this. For best performance on Windows with C++, it +is also recommended to also override the `new`/`delete` operations (by including +[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) a single(!) source file in your project). + +The environment variable `MIMALLOC_DISABLE_REDIRECT=1` can be used to disable dynamic +overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully redirected. + +(Note: in principle, it is possible to even patch existing executables without any recompilation +if they are linked with the dynamic C runtime (`ucrtbase.dll`) -- just put the `mimalloc-override.dll` +into the import table (and put `mimalloc-redirect.dll` in the same folder) +Such patching can be done for example with [CFF Explorer](https://ntcore.com/?page_id=388)). + + +## Static override + +On Unix systems, you can also statically link with _mimalloc_ to override the standard +malloc interface. The recommended way is to link the final program with the +_mimalloc_ single object file (`mimalloc-override.o`). We use +an object file instead of a library file as linkers give preference to +that over archives to resolve symbols. To ensure that the standard +malloc interface resolves to the _mimalloc_ library, link it as the first +object file. For example: + +``` +gcc -o myprogram mimalloc-override.o myfile1.c ... +``` + +## List of Overrides: + +The specific functions that get redirected to the _mimalloc_ library are: + +``` +// C +void* malloc(size_t size); +void* calloc(size_t size, size_t n); +void* realloc(void* p, size_t newsize); +void free(void* p); + +void* aligned_alloc(size_t alignment, size_t size); +char* strdup(const char* s); +char* strndup(const char* s, size_t n); +char* realpath(const char* fname, char* resolved_name); + + +// C++ +void operator delete(void* p); +void operator delete[](void* p); + +void* operator new(std::size_t n) noexcept(false); +void* operator new[](std::size_t n) noexcept(false); +void* operator new( std::size_t n, std::align_val_t align) noexcept(false); +void* operator new[]( std::size_t n, std::align_val_t align) noexcept(false); + +void* operator new ( std::size_t count, const std::nothrow_t& tag); +void* operator new[]( std::size_t count, const std::nothrow_t& tag); +void* operator new ( std::size_t count, std::align_val_t al, const std::nothrow_t&); +void* operator new[]( std::size_t count, std::align_val_t al, const std::nothrow_t&); + +// Posix +int posix_memalign(void** p, size_t alignment, size_t size); + +// Linux +void* memalign(size_t alignment, size_t size); +void* valloc(size_t size); +void* pvalloc(size_t size); +size_t malloc_usable_size(void *p); +void* reallocf(void* p, size_t newsize); + +// macOS +void vfree(void* p); +size_t malloc_size(const void* p); +size_t malloc_good_size(size_t size); + +// BSD +void* reallocarray( void* p, size_t count, size_t size ); +void* reallocf(void* p, size_t newsize); +void cfree(void* p); + +// NetBSD +int reallocarr(void* p, size_t count, size_t size); + +// Windows +void* _expand(void* p, size_t newsize); +size_t _msize(void* p); + +void* _malloc_dbg(size_t size, int block_type, const char* fname, int line); +void* _realloc_dbg(void* p, size_t newsize, int block_type, const char* fname, int line); +void* _calloc_dbg(size_t count, size_t size, int block_type, const char* fname, int line); +void* _expand_dbg(void* p, size_t size, int block_type, const char* fname, int line); +size_t _msize_dbg(void* p, int block_type); +void _free_dbg(void* p, int block_type); +``` + +*/ + +/*! \page bench Performance + +We tested _mimalloc_ against many other top allocators over a wide +range of benchmarks, ranging from various real world programs to +synthetic benchmarks that see how the allocator behaves under more +extreme circumstances. + +In our benchmarks, _mimalloc_ always outperforms all other leading +allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc) (Jan 2021), +and usually uses less memory (up to 25% more in the worst case). +A nice property is that it does *consistently* well over the wide +range of benchmarks. + +See the [Performance](https://github.com/microsoft/mimalloc#Performance) +section in the _mimalloc_ repository for benchmark results, +or the the technical report for detailed benchmark results. + +*/ diff --git a/ww/managers/mimalloc/doc/mimalloc-doxygen.css b/ww/managers/mimalloc/doc/mimalloc-doxygen.css new file mode 100644 index 00000000..b24f5643 --- /dev/null +++ b/ww/managers/mimalloc/doc/mimalloc-doxygen.css @@ -0,0 +1,49 @@ +#projectlogo img { + padding: 1ex; +} +tt, code, kbd, samp, div.memproto, div.fragment, div.line, table.memname { + font-family: Consolas, Monaco, Inconsolata, "Courier New", monospace; +} +.image img, .textblock img { + max-width: 99%; + max-height: 350px; +} +table.memname, .memname{ + font-weight: bold; +} +code { + background-color: #EEE; + padding: 0ex 0.25ex; +} +body { + margin: 1ex 1ex 0ex 1ex; + border: 1px solid black; +} +.contents table, .contents div, .contents p, .contents dl { + font-size: 16px; + line-height: 1.44; +} +body #nav-tree .label { + font-size: 14px; +} +a{ + text-decoration: underline; +} +#side-nav { + margin-left: 1ex; + border-left: 1px solid black; +} +#nav-tree { + padding-left: 1ex; +} +#nav-path { + display: none; +} +div.fragment { + background-color: #EEE; + padding: 0.25ex 0.5ex; + border-color: black; +} +#nav-sync img { + display: none; +} diff --git a/ww/managers/mimalloc/doc/mimalloc-logo-100.png b/ww/managers/mimalloc/doc/mimalloc-logo-100.png new file mode 100644 index 0000000000000000000000000000000000000000..96f08259d182513d88708361f3026a76ba074a7c GIT binary patch literal 3532 zcmd^>_dDAQ7r?cI*h1{2;w!O3?Y)x-VnwJCiB+qFM$D>O6^R<5N>!~|MN6$#*Hz;R zI&QUfb$*4g@aB+m&%Ik-FU@!cpD-3tZr@$vI{5lC2GobM1%4j(#v_%Oe~VF4Z>enCM2 z0Ra%t3V{W{V34qgpoplTsF5dlj`iiwFyNJxMsrNEL9VJRNcBGNJ< zGEh;dtSD4YR90S0R$g3QQCvYuLQz>l5hkGolY||SgsDhNOUug2%E`$|!Bi9!6qJ>f zVKA7Aii#8*A$0@+Q2`(bHEC6(jG8(WsR2c5%BpM2Y3j&n>dI^DDd^}cq79Vvjg<6_ zm5v%K8=Arl&EUosa1%?ospSzf>m%kiDi*c~1Ofm6Boc{2q14sYwY0RfwY7D0baZuf z_4M=*mUadP28M=)=H})W78aJ4mNqsvwzjr*c6JC$dxVvPsx=Qsz}5+{#i-e1)f{k0 z2WO-s4?GIvg2K3};|Lng9vXNLO}wX;tGAXLQJdhS?M~A6AnADe>3AK}{SSXMF#zoo zs7DIY^Cj#11?&4!jvk{P^$#%!3^OE07?O`0Q6i10QO4A0FMq5O(YUYB$BVMub-cvzrTM#KtNz%AcaB+2?+@g508k5h>D7eiHV7g zjg5%<~pY3Ii}}3 zX5>3%oOL=?fXQTHP8VXcSlBacTy_yI`pIqO-JeHePe4;oz?I&B=H9@T{-D-@pteCWkM^77tG9wXhJrh9Q@ZX@y6#f1 z-=ki?AJTn4r00*2p5f5m2cf+qVf~|F{bS(+x?mhg(M|(3XEJ@06(|0f=)9yHp51_-E>EAVU(P=)B-?b<*mc@<6lAzz zIwDPIZ@D91g!G;=m6NNvki)K4YS+XA1f^>^Pub_LGE5LsZxr6s@3hh0uKmb-JoGjx z>)>ZGFmi>L-1n54?rS;~GP!hKs@5s@J$C?VP569==nQ1ZpF}r6=PR2rbxnHTo$^0ADj#0} zO_TAhc=u{8$ZYq!TW``X5)+um)$hBetPIWp$9yJG0i}MH%39l2J%m0Ze94ohe)I+~ zB$}e8Yqsw?S=@vCrcog@5YKT~8qhyc@&-tn_>zaQsk9OwSKTnTaIUu}Tt^P#E?5aR z&7nw%CdZiIV!=N>@|O-}{p0arMR!ACC=y}+blh`z_fZjTcNRV^S>|xJPt`hgH5fe+ z`iqD~)i-SD5}RQ;_m-=qU*H-BC*m&UV;w^*x5ulBLzxN+tLl;!XtA_0dh~m z#Cqzt=OYdTq6y;J$3(aYRB(Zc2L@MfP@;#S~mSF90OJ!0|6#5`+Ob$I2OW@zJftf>`x14ZoI zD}Xj;s|<^7prjim+?Jx&b2C}SL?&u6u;!y%gmSV!a3)87$!#flr3cd=;K9-7n6-~e zyST4m?qe33OC_bK)ovnAR~;(?XYzRYUl;A6dw?kXVj(^_8rOTB~q z5mSZy+m@f^L;)Aimd%GtDUO$uK{4> zcs2|@$oO|;H2TC}3r?&3E!=^Qmv*oF+6L8H)5pH6o&MYyw3{yd?#Hm)W<*F^b$HF& z;d@+rBUy?Ykm*Bq6SfN+*L_xt+6Nn6{i$Q;&j4ispl__R%U-(g2>sv}iw&Ug- zn{C%tHHTh$w5thCI=VNXZrC7{y?#0dd!cj=&UrLZS@os!fPnJ3bFmVblM6zQeK>XJ zx=d=P-AZk+O)&^4m;Sup4o>}cjdZC^5E8dWiOW1ZnO!QnEXBm^v_)Gzb5*^mJ~3wh zlYHRtCa%W8=^Ntd0FKqIYHNjGgLKM4#{TY9`hz zy=nzW8dLvp2}ohE`+TQC38p(%0!3YFPj&Z+MPAU$8wLi` z>Zba@kIl`15wx_5m*(3f3up}Rw(XrEMX!mK1M1jbE0OPKi7fWZ08c8qA1YlY19TND zqez^cBQPU2`CT>8Ylf!2GT!E%RM*bBlpwL(7H=i_<7UYg84gVT;A%0&M$q)7@WrpJ zjd^zUx3H#|1#ODqBh=z~&tPrA>0vboZ`&Jq-H+s4YKrBLGJjw%ThaW&t)#fByInHR zIUbwDb42!%WE4S373u{6kN6 zk6%&bMoAm$}QU@g45^o;QDmdR2*~9cyqmR4gsCT?&PupcP&HX?wBN1qq zzjV-8eKu(9zJ3nq7+X;3oBY1sZ;R2Ah_2ITrXL2dl&0@ydc0Z#K`R!nO*pqv^`8$Oug{q*L1M2iA4Ox2`X1%0>3y+MgFWlEepk^C;#pp zEcqQgQJT8C79HeA4K86`7T)|NQV$g44S%*fsl#*LBE~EFfyt$3PF^UqQ&v%;ieVw1 z#voTN2z7x?@Q0{{TTehi3o) literal 0 HcmV?d00001 diff --git a/ww/managers/mimalloc/doc/mimalloc-logo.png b/ww/managers/mimalloc/doc/mimalloc-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..e0a5a8ce258c29f6dfb5d0658880a2d339b5a47b GIT binary patch literal 73097 zcmd>Fg@Uww6wHz*8(D?0!qVzgdn9fQj3B}H`0i}(nw0b z_xb%B@8)M`&iS60GiT?_y%Vpmt3gK0NDKe~nU>}QLjZt4|9eC6z!o+g`#SIo-%&+J z1psPONUm%M000lrfAmBRYzJ^~!6yzbE)E_(E)qPItdz{qwIjQb*Q*-dpaPiY{^U?AM(D4h=^9s`Q3(@fj z)A0+_^9j=nh%g9=GYE?@ibybvN-~Q}F^NhuiAyt!Ni$2xut>_YO3SfIE5PA!4h{|; z9v%S!0d^Th5fKpy2?-e)83hFeHd$qMS!K99;;!O@yUObKRn+gPXx>L?ay-!HRDZ;& zsmraY%cZ5yt*y`V$dFgZh+of`Pv4kdAN(}sH+U-W*j(TVQqagk$ka;M*izWkQpm(w z$kbZc#9H{Njfk0@h`Ft(g}u0?gP4_*xTTYXwWFA|lemqu2cPR%?Y4GLb zDf`?<*40rfK;At-!81tlMTnA5n2L(Zg9i`P)z!7Mv~+ZI^!4=(4GoQqj7&{U zkw~P4g@v+rxUx@}wYBxLXV2{I?VX*SU0q$>-QB&synK9o{QUd^0|P@sLc+qrlzbzU z{UTHXq7Xqbsv)rtf@2?q#Hxn8d=UKdL1?^sc%oWFf_h|<1}a4}Dp?biq7nUC1NB-n z`n6`v8?CrB?U$+A@#&9Vy?d1S{$WDKqgNR^NtwFIIl8ZN_21^{y)DpxTVRk@X!!1f zVMg(j%#z2MWlyrojB?72^FEp8SDF-5nHE%;pleJEYo31ij4ZA*FKsX@YcMZsF#FhK z{;}EOQ;TI~n^jGRbyd4{O}lkbjrRf46Jsv1|Nc*VJp*)aTGT;PAEI z;p?DN+mKVoh;!#?WMm`?g^G=hjgOB{N=kbD`t{qlZ`0G$Gcq!=va)h>bJ1vYadB~3 zSy_2`d1Yl~ZEbCReSLFtb6ZOTmCue46=I7^^mzTlV zr~c7=ADjVzkD>14+yCDW;PDxR19S3!5peI9!_`1sqr zum?PCoZWo{-R*tY#RWwKCG0dz!Hje1r}aSPNr2_fb+EblM()OCAwSLNqMA)i0@G`| zkCg6Gwz6%Gyh)`gqbhXUV~7tp&6K<1wzAh*oH{+h!mRK>{I>$XKKr^pY;gUR3K zoA{+vYh=e-9M>7oY*zV`MY-meyXl6kCo+c|e$PtfHgzw2hJS6?(H*3ID}IGE8a}$3 z`Z9IVerxdTRSSRlRO!@Opm9$%sJuKW93KGmRG~OP=Qs=j00-hInDWQ)f7$sVhbv6g z?N?*o!}7k%oecunQXY$SUZDe9=_36}i5WXevGbi zxrbeCZYY%o;{p;OqI1~!uefTK;JxLkqzV%UT>@Rvd%=?zgOEI#!=FrSw*m@xftR5C zv9o@*Tq&E}VE4GK34C2vi=@XQJ+g_HS;{IOI>gc=+LB>am@Nnu-T zQU8vk#F^>Rz#O!8Z%3BE6dwVt84*&F_jBrh4mtBc?~pQR6DFPV;aQvp#e8td{ zUxR!+EQ0kf#>IMT$EmGpQ84SDAeede!ln?feYMg{R@_Ib(5^Yf@YNs2{=0ZOI;XMG zy^1#s0;GTk0@_uf7`!_YL_z0SHE;s4x+D^y2dY(|UBQZSH+89c_(r{^*Arf(s`&e0 z<3mM-2Df_ESlroO)B4?+6z52s02~B#@t`ju+vIK}kFYVD^vz>r0I(Mei`oti2`DNZ zf~4~VcZ;#zCTY|z?lP@Eq4!IV zl7pj`W%Csqaas{HPutgKmPVctdXk}F^M6XKt>yIF*32xsVj{bAK;HQLCHa?cC_@q_ z^_IRp6<@vsg*SCN3jO}7FK6c-u$O8zwghxGgMV!(g+5XWbfBq3sZdqq-C)_U)p*O?P`LEaQtr&F$Ep(PZM%5P;`qIw zD-3S-j1p5uEKb)=yhdQm;BfT*JMWhk>fsTUYI_te0bGM<(D1WE!W_C);hqcI<7vDR zCIoc$@;j4R4F^{LSN2jt$K8l_7-D2jkD~H}FR%2(As*W}8MKm53faAL{fBz`L`gu$3;tSpkIUHXgjRoWx8>|7VKPOG0HC6fA(b$5Tbq;B%R7X-70o(m%%0hNB9|dq|@A znUcq|iIo*mFzP@uv9ib~+oK45d^kSM;>@#=SrTPNTaqeQ3SM>;`;rYQJ-(V~{tabY zKkz{Xs`A-ZT7fVPLCxfr z?XfYwCKUpDH_QiMineYY5MUc)y2H)_g@4$muj0eWGWAVcOanfuLg6|y^!Sx_?$<9D zvj83h6fRJ0Z42|UU3P+hfWT1`p;1XHBacswa~=@?M{QJXEow0GzINR{s0i0I28Pg9 z3}j@AE@<9yo8d>Lv7=r`t%87DKjtHN9N7t=aMRNZr7M!kGDUHwa zxuuD9b8sqPh??Kuf{@LCH01Pklmrxh|LpE*e3TeNg~%%D{WuunjuJ-Ez*87+<2Aw% z3g_SYMu^M@q>a{lq_DfBFggS@q0?xiizZQ`dc=&8og4+ba~{A1BDnA?{j&|S#Tx)I zLfG6zoyc&PiGP`S1`@xdfTIE|yFH0K^9Pcu$C;R|B~dV1jKsUnV$taib&nSjaxg>& zh2xKi1cr*_E3&q694G?n9dbcE^cv^A+4+DTc9&e3432s#jOGr&9O5YzHp(lLBA|_- zHSxw&fP36yO&eMi47xr^sMj|ilDc@Y90-L!3T6Nyr>(^aU?|wLsG@os#WD+-Gn!Y0cmM0X}*$P8cUy?H5>xmh&sSvl3!;c$=$>GtP z8Nx?lbH@9HA&w-*Ns`o5v@Zd`-e)%ssZ zSU5YEl#q~6hqbni!N9BEb0s$(1}hVKhqjGhY!&ORgT}A4z*8DZCVu?ECWzhv}L~5m@!6-vw%! z&1%I~A7@V-^-jqxzG^2OyiHJT3pQh2yh$IsyCYEVu%>d67+JxN+B+)3`(bU^K2Kv$ zBj;D|bDT?NKqJrR(@ix&iC_GZMBbA4`YtRsw@=UdbOiwZhKdxJKyR~h?B{7UnEfqGs&ZL znbU=;sg|GCT8e(@oDQx5>53lU(gj@+Qo|aowtBQiywJJx~J&Y_O5faOp9EA*TQR z9ha7FiV&*)5Tdb~*FMTu7X+y&Nny?^3f(0$>bk=$We)WoY=R=Nv_dP(%jSF<0l$_?hbKe$7xN{g!s#o0De z9r%0oiAzYm96j{LEPqHWzy_ACv$I|{#^*5*+%Q%(FR?z$eF%u9xsWO)gqM$nc6C3^WA zcC=}`|AM$;HL={{Gryh~881(&#`eTcV^@jDqhYeC8l}^xYwwID_BMOf(!?bsls4^d z^J0BDSXsO~t{5&NqQ0=BbpH%))`t>wwQyYsvPaE|X)L;aB%%#^(G4@%^}UZ82~4j1 zFq{)dANlsD(yUy>H9DD+afWG5_;{IsOiU_VneB-uIcmhKA;8^xx#bSC0;XL@5TG|Uc#;r^j&wbQyB7U4OHRm(6a z`pc)Y2`$m~ub;P+b{TuPi8M$k{3YCk+7fO`7GEc+#0*8jWcwtAc4dO?w^^=f-sjtGIJ zXHqx{owYLm`y(|P{ZYkv`R<#M)=R&@vKT&!aSghMqT9rQ&@sB= z+s%iSu28b=S8ZdMlj@OPI&qMyo6*CS+#%i7V^%5A>rRzJ-tO~q-oHKZAAU6%qQyYh z9;DGpLXukds@;2>RP=;czwKV$l)cEe7fnL*oESdw z)LQu5sywn%$TTC3XgQl>aQJE2Q?LN;KC=O`vq04o}B2o zl190{zc8zKg%wyQcd;y?0cJ?x6Hyf?YE-Vs0%{ zHRGa$8gUn@`Y;@QbgsQ6=NZXi?b+V17OW{IW$V+n07GQE`3^o6m08nKnBgLRglHSw zHvbYwjmNVS3!CkiaO|Vraq3nyShmY^u70ZGp;X!}BPOW|-Tm{9Pv_At)%U={&Ozqw zS);9Fg%3T1=5H{%P;`=Ht8Jsxa^!lR7E%JICaC&MuYW=0d{+CB;`4i`5lIb`=4Xvg zdwx4q1WYj5uJ(CzW~;G8j{j8@KXIDi zC`qqwf`Iqb`#bd4IF(6&t8KF19=|2R;~}K`HKLUr)gb*lsw=vEtS5D~?k;Z4dzrA$ zqXhRz#JsvJgV|AjGh0^+g*WUDhIo~zi<>`uVL`#Nbg|U4xbQ#zAxW#lZ~H9HJT5SG zH#j2?nmUSq3kD^~8lN`kIjiA>zz}|M%t`^m9p#}Ti6cKD%4{o<4O@tyY+0jqby6O? zIGE^V?J9(#s%%awkxEVF@w%o)()i=1Ee@m^K!nt|j_A*+Pak zSxzUj($kK2iw((kpD@3Qa$pF>>Pr8^l`f&QD@svlvQ4u8bhYo{+0ag@Vi66bEDrWL zFpQ?HJNHLZ<6i+gSkC=-nAkfIJ*nuflSCLoaE@92rQ*A;yo9ZhvIOdNZ!dYCnUAyc_*}*V!kiyDJI+4t#7XHH!D&Q@AHQFE3pM}YyOdow#M@|@ znfo)Z1;$pmW(7>282-nzvo^KwU({ge4|ZuJe@yZMulN*mX*#qF1uH+>`h%~L5w-F-xR$`s?4jI z5oNO~rMjGt$7YULE7gFUA?E$X&5`~YNB2(t`Eoew`L$hwsuYA|J6PBqHvf%z&T3;| zvq@wy@dXrZsGIw!q$cc^02vB2#mLM)-5#J8>=hS#4oC5}VFwJT0_!h@%!_>?5eXSZ zEOLifw>|F6Y{kNK;<5-Bug}PvBF+~sr%Atwsg@1j$DO$j7dN(~+Ew*NBETBlUU6>g zn~M&9c^TbHsv7bMTnfnP5{K;B98M(ZChEcO4lmY)e2}Q%Z%T0lO_I`?+ zG-E+<7ozMX6_?Qw0haFx;&sic`V3P=%BU>i2LzNxdgW_(bh88(mlFZbGTFM5 zWBRHtyG7W6H*zrI0TdmgF)i)pEnE{Sq)S7vPeid>1s#d&a;}79YjHQR#?tH%1oxy!91AOd*?#6X8c5e_j{nr7(zElT`W$vPHXth%n-rw{vumD z=U770`7Y~1n4g%uVD`+2y+$(74V zRi-GT%Lf1%&yp24%M4=p@~>qoLzJlz+$6?KcBtkTW$7P6fng*jh{uF=)SO{)>ks zh=if^qw-lzb@v}$pYzG}V((D^Bo++MNn{D`S-L5!`#LgUK=$6WneJ(b3UB z0ze^#Xb4OLI4whEU4I(B`5Hsyuk@iK?Bi_nX zb^$0lg3ZnE2k2)VdKHS-Eq#k|N1Li2nvI5U6)NAclR?2!>ILPZvhNvwXcZ1S&H9g2 z@@B>HpQ?41QR3_17Auh@&or2Ur(qFOk6z27ge}M!B;QPX@;=CS z0yJ}F7Izc1raw21wGbJ`!b&z@-iy`w$9H>mC6U8V%#L9Wnx~{I{xSc+g-eqLk&)M( zoMGSkPlS{OALd`%xJX2|J21Oqr-^x}N zv2#Fe7{OVKdfM=CwnhfZ{;=uRCFXc^?AMlU-MgqK?5K~I0SS1r8AjuAgDODaKe>5s`9Y-N3Femp)gbE-=L9$QSEK_h|cvPW}S0h%U1Iey4OsSR0 zynox-(}qser8j3znOK9SyxlM!lhZM4x84Z6_<*gB*`u)uAPb<#j zy(?12T1ddHM1hMZXPb5}Jbhphqjvu3>l(x+Ea-Z=`_VAu(WpaP+$fVhi?l4XbMvS3 z0$0}4z>F!Y*Z2shKApi*qyBP*nA&LI>%d>ZK}R-=z?W0jHPWb&S5!=Odj$q#iU>9x zV3~^5iQgQ-rYay1o;77t!;Koj9W13gF*&pGhINv!$0Qd_;qiPH`CCQY)D0Mw5UZYYOnkw6X+#tW;4faA40do%qMiuO3J*XW#&=L&yV_}@$ zPqv*KNRXZkK|pZK#dGgXypQ)Db>%W6GQK6q`j425ifQGm0W}GMr(U@mD??!HiD}}) z&F3j|OMeZy$wq_YO!ny1bvl5ZKs$pJ(ZroA=QKYpqmqvRYE1qecT5Ga$JDOeLl&!; zLs#zJ+&oPZl3JOI+}_8<+L~`iQoDGHy!y_Liod=p=owF-l~;U}uLO*A6}Kaksx@oZ z@hhjnja6ZIt{j%c&+3C78-VSSpDzDP!=sN;1KF;2ctYhe>bJ#*-r|f5*`3LT4ri(m zQK>?y{>|ZC_<4S3HFw5U)>!dpI>g$R=TBCT&62IFLRZI|qB9;6Dx0mF27hc&B{pFz zJz*M%hV`602I1!9?VhJ z9&Jl!;K37Dz1`%n6%0*@pgY=hcBcI_SIWW36#WZ^iBTMHh$e8`z@`!c7sO}Ngjm%I zrZizzlxO5{!}D2{)yBndPelP;>pYjI=*_)RBMoOAh(9~(==#Al`NbBYZ~&|m&RM^p z&;qWkrn}srEAmQglwHKDFWHjg~n#d-Bq9)7f;5?1$lNc=@6WpZ_Mt3 z1;f=2#;oWQ78uGiS+Buv%uG$FJ?p0bcoP?hqrW{X4r1kgKm=C9j+Z`P@>oc(e#tPi z;(O-kd!*-irxc>V%Sd0aK_NBxCd&k#ANW*L*2jBIg}7Ikp(`v|$=SkFCo9BXfgwA~ znW7C)4SF>Z5=20D$x$DEyCQ0#5J;xk`Z)4}nLvGNI12Wk;wC#xrNdN!v_U`({Jr83 zQiW(=85&=5oXr!I|fos9cHJo(@M; zs16?nL9L!@c@TvDX~@-p;F$As+Kx%4%b#;#t==VR@#Nb1@iCu;UuR)$e0=LT~0=iPf@J zM(rVeCA_UC8-Ddma!^n?V_Gqp95>)mfO6yZRP`a2V_Sg%(-DsRLQZQd$pBD@#g z6OAE32g}9W@^qN#lPss!0`EiKP-dVrwJPW1U?EKut$Y9vadphWI^%`eHX8p+IZi-c zV3k2ds)ktO=*5_rJS`?g%Bf!A<+vISyThmA2xDU84|z?ON7V<$(C|;w_(kd0?>B7$ zAWyR0tRhpRO>(!a6jAo;+jBXQR&HP*V}~0}6HWV$t+>8KDu4&2q*O4D5>;P)>H`Bb zu>zHXW*vvVNRObXIK;nxRaxJW+d&bq%bn!RSpsslmEt_ONw7$?5*}<>N@V}S&-R@g zkRc#nV^J#8G##S8txIvB3$>bbMr1}MYX$A&0RzHuTex+vMk3=%GX6e-dQ_DtRgv#? zAE*GPoy?=o%IMnksl2_7bXeF|%c9khE`Q+=7(m++WLb(6lL>LU-V9d1X%^iT&IbH@zQ zwfNl-9BWE&CcYcl!i4pwj3Xx&zQQn8%oC;2?bRuT(Ll{h&uVoOZu4_^rG~{I9{j4b z`@&~rDaz&;sFALWovOYOsnl8+z=KaX0X79>-B=vy-(+;FLV7fXOQYvh|79xTK@(bq zb4dYHTXKa@gFY^q!($f{_2#UYj)n3;0CTzc0D{w5Ua`Sgm+3BWX^$*wN$*!J8}5w% z+$i!Nn`5%1i`c^K&lIQhEyF$Xm7`gony(Zr_z`J|jn5xrCY6fQ%jKG7QCxKl(gyLH zV7wTuwk9~yJZnv=Gxp^&iSs18CUP@Th#<#R{>gc^sLDs#$}s0tzoqR?j+W#~oFaQh zuK;|o`TY2Ofyi;I z3+<*uJY;iAii>JlKT%Q7^bi{CXc);Z&N6@JJn2{$yiRr$BJM z3-HwOp!xJzd~KjhBdaI_5cdz!ZErz}vEb>8)sIDjq-a*I#0xOHy~e)C=j8BN`Bbz= zvcf5F%J$D}2sD#@bglh`fHoFGGSw=G3d-S5os>T1>4lUPD zd}QxtE(yS%@XHFVMr6{mi(+^mb5H{Ifa$Ry>ep=nEr-t>k(eL}8f^P#2HLLQHSs5r z;eD>~Y8C9cS7G8U6P6t)Lu(O<J!lGn^2H=CFtU4rf>Qss4&t-pi0lF z35#MlL+OL0i5i~QDu4!266tBj`1s`HLh#nHQ^rlv9>!y_qU&NvjccHZ6D6C&ZM!3z zK#nW@z{%QU+$7p1ftZB&@aXVxUyz%hmuKnRw2`WPFt+ufQ>87}S&s1PJD6g1SG=gY z0+^+t=>3LWZk``x{Mn5*Ye$g7Jsyvpshf%YhZf(D({}%4Q8}33!BF^08G)8m5OO}2!AuhL%KUzfa+H&nXD;lhNGn3kLQ0=H~J4v z(C;0A;e6m9q4Mc}MGC(xHHXL5E1VT}oZO*kLI{-azd)+Y+@D%62_v&9Lfl5d#n=YP(2HE09yyKiT{h6OPRS+e0#xbF<{1r}aJLbt0=*2ri@Ye0O zB}_4@^*BLqPra^e5T$V=Hy;~F&cIwLo9#-o6pH1W@;k#?r~jC$qVD-phw$T#i#sUT zg+RT{xHfm8fGnfOo7$t1A0G6hUr;6x-gl#^bvmayNDjHQFRB3prym>e1hR%Tq03&j zIxqZK1kcCzDQ+V69t$~G>3a@WHyJ4vrGFNakwxWyY6$Kv>)4E{a!ds?g{duVC^F`} zkzy?OdC!i(mo^lWbLGyXWO6hpJ2WM6OpX%p5nsIf7eE^C$|!XF%ko%oJo`Bk5Nq6@ z{O1o$(d`Teu}U#ySuAxIaB}*rv&EAhsIzL&;@F02eYGoyE~*~eUWT#OUjsISFj;6p z?TBS_^iE>1U)=FL=hwR7Q)dm68?wqQmLFYkbNe%?0+tLXCS}JJ!C&K5^u=5fZOm#; zjl1K??gd6)@MNS=OuKu_vThq$Km@&a0ZQ@H;XhtN?4qyOuPnXcQNbQKQbsPtR@Lbe z2V*V7X^c1WOS3-8qd(GOqk8<>#oKrS9nHNZzkz%x5-`;q~OaJKb5_AL=Ab6fWU760ZD|+wBFf3;gq-i;8Y@kPyha=!)1{ zwNr1ddYr7wVCuNvd9lVezBMbtOos^8(ETH>H)2Kx_|QDJLgbs;6VLhNI=N|kj4ZC8 zww--<>2*m~2+jNxFGM0>%Hj9YkK>Jc%NQqp_I7z3{NpQKv{!-3np}(xT;6*72T{>F zf7>tsmX~-nmu9^mLEUl<9d=1!HfCw^`+v=U!t{l$Y+AKoKC%vk=sLXc*x`sOA$_vF!3@yJTFyUOf<3l z$ROxZs9MlZS9#wF(`)}a;^`b!MAKpqOHq3kWsn|D#A(m%YsKcDMmdvV2?w+F6)b$N z=D`l`Xrrd}97;^-&b$ax|7Nkq37*l;N;JW>(=vS{3N0pQY>YyoCPQ5<} z%-Wfx%9J*GAe}Ah8pz9Or)DTA~g|zv%A9lm)y{d8TDrPNDr06XRAV*<{@Sn##Yi2 zHDZFdc&!3Qw$`JuMHvgoss}<&( zW+qq>U&n@xaqou6+IWy`TnRPj3Yss}?TEYPcYk<4hzy!rVrDibUn5!31G zaQWv;`WGBDj#zNbHze}SPXg(v(_k|z(>+k0Kf-JAI4jOmK|q};?e_x)0D^$7CVZWC zS|SqbaX=QkXz#;j1A3ecHaLQAfr#iFo`T_;7v$MtA}^|D+o>GH*^H}w!Gha)PIuQl zhZ}EMGZF4HV_CEvsxqgWAu@H9X1t9YlSQ3cq%T^@Y45WDo@rvPw!JJ;L(*eUUFT$t z_GL7D$ObQj}Iwz7fF0QiA2zW9M!DKy#4UTt3L2aL&^H$EE6uE)*S%XXTh>36Vq8P9& z9qlT28&!&hv)JpQ-7aURZljF%XGbMb*=|gTmv4>%@*a8&4p$Yg;4i5v#Up2An~TOY zjJ096kV-B0{U#Y6knHXIdY9ew98f4JGH0v&yeqs?PT@pQ&7`CH?~NN>?hog@mm^Gs z05b&sh;!Cu&IVz-sx-oDDV7!MnP4HO9xA7fUBCs%eNHSI(lyRok2A=rYWIb0*5SzS zF6zUucBi@IQBg=9gb--72z~G7T{{2q$Q`xYYI$AGHmYqY6cjts&^-CdQzr<}XqAnC zaZ)kN*-V3C_$vRL6xu`GTCoq@kM5JhaHW4u;xtr`u9a?zuy9?lxye1s2t^qmAxA|} zy*n+8)zyp;C{9k22#SJy)K*r{&f|?m>%tT^Ajy(N)oE`2x&4ql5h>s<2End=vi$zq z+n?Q{c2^kvPEJFD6z%3+LF2muz$3=tDB}k`Zmrt;hbB%&)s)wW0B;EaLHA|>X=UiL zPmUSNk>zX8%hPL;R&px$5swmyYlPD@3?pQm0+tKOy>uAgd()dDx!kk0n38CHMsm&x zL6p!h-x5;LgeG8B8uPQ3TcPY;W&;bSdF6sNVi(aANlVjMnED@nNaEv9MoBV(2_rq- zs8~M}&Ith*`wC*&R!$2@X(i>iH#BpJ-yPWcu^MDN*(Y1#$FNmhumhtmGK`5)SN z08P6-tfA#bN7G4iA(fO17S`i7m=mQUqjpY^&LR7TpQDKo9wA3cGg^wQSB_q-enPcq zKRz@%5MB3k8f6I&SuE*mI6953QGqTKJN9F3E0JOSYNgeZRrzL)<3k36RgcPyph{2m zNa8a)!oW(+Gp=^`xY@X}Y4z>uG}CA*$LUUtnB?uf%G3G+BPhj;7-|G6>w6~<$nf|Y zsW?-fe%iDWq3tc$#_aHGc=WjyiEPZlq*2|=44T}phU=v_G3c4Pj|W;Io1M?9*rm}sTf#p$o`<)C7a5)i>8oRrV) z>fP78?hvgYr;(&{@u2RhmVSiq28HP?bHT&KBt!r zYj+*a6upT(nh_yh=1d(t-Zbv-Gr5<;cR?qagf>0msl0J3)qzTl7p}y-0dL+tx@Nry z;9FY??2=eQ-xD}ZL<`qtTB*MWwRZfxd$)0C0RUe33g;^>@m9{P7H*ayCi}7Wf05^m z1fA|#sDss$2G^{OI^8iQ9AN%;h(Ktk5pVc=)jJ7ysU0zR3`OghSRoTYrS@#eX~#6obL(?$OmZC@r{+Xs zF*65J)HuPLlo)~29eUq>X?7rE7o&TTSaDR~GQ^NeDZ^CfxPcV|X`CrBbS7g6oBB@< z_+Tu+AD;&n`G=4HeQ%w`HEUqkfBIu9g&VJ7>@Mn*G?)8avJDUDv=~xU&VOMP<(}ag zTvb8#^!T^wrU(iV2UE1|ABmq=c#kCmfW13mk&5fD{z&MxxceLW)Xl%kE#k96!)COi zVPSUtYXR53MgMu-uv_J#pn_6fEoGH+w2G!-hk4=ZEDe{*8HNbvWDvNv!d6oNRaA?5 zY(5dv+SOJ6go?<{+assUzotux8FMU15A8U>Wb4!x{RTJwI6 z5Rkz!p`P!zX&w@$Qnx7>^yZUsn~F?L(gOVIFCmip)1j=;I2$WqzPaVe5jt7z!^u=wM&aGi>43!pu@_Lb;j$>KxKs;h%)yuFJ5n#@6N+p{j$wS}i9236i1lmLjF; zy*#9TH6kdb&$j%bx{%m0ynvTq@6hhHYo2Ysc+{b?YU_{OrbTssT~7l&*_$I`0j<$vr-&Gk7tkO7|&Va>h%g zL`SJe`rwNYO7B;V%D-Yi^AMs=m;0_%7g24NvUBpHtaUaUPU{KOkg$fB0|_QURcys~ zqfMNzz04kzX4Q_@?Q+%18T!XDcbWW>XZ>vubBKTlOW9JZ@VJ|w0Jt-rjEf}sP*md^ zwbv_lSMK`wHv>b1Ma5zjY*wThX`Jmy?uXaOY;w`>tngaYs-`zM?#bPvv$+~A@KOXZ zzEms1shbd8{haRS`X5&tZ&?~J_>q~%MOpBR&_Qn>M@O*LZkpI z^`73UVcSjl4d3x4tF7@|jkiy+&UASr7b@SQ@crPArQbkg9R{`>kisXr?`7e#J?b?o#4 z;pum~`rrM}kT5AhS)Ap-_3s{&rY!@n4QeWsLej(fvj=u1Njc{)&eDjC8&C(ZThpNM z)QzEV!U*YseyW8@Eu_;QBY(Y&#i8YTmubCUo(#mnoX9WtP;8${-w#0Oa)Ai5Fwelk z+uENYNVx&qKPC;QdRa5kKD)@{6t@g#x1%LPZs}i?nCPn|QCz*>g;ap&!*Al%sG*ZO zC8UvYTDC+R#PjJ;^@7=XoSTB9s)@eJi`txUa6}(c6YEN%4)AaPJa3<+bcNw|y1sF7 z7+CXsV5vw6yj#PRhCy&;$@A&0?JEMd8yM>cy8d2nwr|M>Kt!6F*$XFypS`g< zh*KErM!uU|DOKw=4lo95SP98I$u|_7sfjy)wLMX6zYqac0zSZJA8Wc8s_$f@=HVeR z;n6kpCg6cYO}&X&KB}r4N`hz-Z|JQ%Q=7ixjj?$Q3}Dx>z1rxfpC9iRjyssq4nn(z zq^p7%72F_plDqvgSCLsg($9p+_b7hf$3>Cqh6j8Y{$F!@wtPsKhH1CiL78~(pyLA+VaQD*wPmG+#CIxsH z{@GoRS_<^?-2Feq@LNhuA?w8@4iHiH1H4(A&qNXuriEjW#E04`O%!y?`k=%H08pl9 zY=}44gai*HN?@N;;O2(0TIx8!pLj_Er|X}wF8uJv(3BS*u!}kHPlEyg?q^)-?7^w~ zGaZ=Mh``tx7JdGNY&4%70PJ4w*|QHKtgI*4T0O$bxl zeZh7Q6n2;p0LBt&N0ILaS8j!39&jXsak}_6>&VkW&_V znb5(KnurTI+%O6O0VK&w^aWz9ucAq;wp5VmJk16t*wShf{|v~Sy_y?1!0c6 z$+;&d;rdGxW2bf-R;Lt6tA%|DL@5;Ejg76|dS?C89mZ^rO&&LXLDy~$BhBXI7I)aP zI)}n*9{v$((wGD&L3Xy3YM00f~?v{)eL2 z@JD7X6B|~PpWI!yS-<_kHll`4c0F9md4&4TF5d|jb4b`~_1w9z8V_j~qDeQJXv_;a z3)Z~rSz*U&te|sW_RY7s;Hy_5(|89w&my7s|=-g29N-4KGqWT#xTCNIS z3OqJ!qVCWM=U;{?9^E>X^JiH`kF&80)LPv>8%>_FtPYJQqPRY1GfB785l}+j*?f8J z=ioId6)b4I4Ktkpu}s1uEhph$o3kv5s8+X^iaKbxC4&z2c=t0cV}#H#LJrTDK2}@z zQsKSF;+PLI@4Hp<{1u|P#1-kMdlnx5)wdE)`jq%*E?Jqwa-*{<%kLV*EsC?+xTW=u|4(8fj4%lAFWQtE}uO;TBZql0Gx9s zY!3F=-=k5Z`3?`!+T;pDLqwXh5GKPrRX>gXkFBo`h_ZPCeqZwd!J|dW1L+cw2GK(r z=}rLw>F)Xo51=$kN=cW5bfZX#GziktU2-(yx8SE9@Av(CH@iDCJ3BKwGtX=dUPI>{ z4?ls)1c(0FrFnIewLl}m&)s)0mnWM15%hSj39hMJzJaNMBXFB%;CORI{*M3&-Ztvg!ALf+eyyFKVvp^~{5tm?B{i?lc)T@g+ z`H!##@)((%1_pm_z1m1H8ox|-qWWEh8n9r?S|$jy9y1cX4cM$J)M5 zwEs`v_Gvtxn3alL+dGzPJA{L4`%V5er>1PHGDzc49HGJKvo=iR1ll zsj+Q8G#^$bsjkHqN$M$;y}2sZb-_POd^{OvL*Z53Gz({iJ^9vYu(otjbt( z7u!k+3$OIIy$RdKtpcX3n{R&}?C&$WhaxfiUmO*Jdy;UoPV)!iC_SmR-HZx;3)|4$B4dd!Jn*O0UIvR>ISmt7Y@U}*W#I!E%&R55$+O};cu4v9ftYUJks@*{FI>iGIF0o$dxu@GF3RVe}^Dbi)u0y3+eh?W}S7g0jzAE+{5M< z)1dU(b7}qVT=_S%wp$;DVizVd%jw^&76^~u?@kQpT@59_XI#>zwURV}T7mQFB+rCj zliiA1t;$xx6k$8QUQIy~OkHtVKq4!a_;8n-XFyfuk}si(BQ{Io#r?o=AFKwk4LaK8 zc?~6^Ml~0;T-K`NJ{P~QFVy++9mI1u+JA z?e6ORpTbvu*F@bwmX4Gw0SO3vx%q-^6o0LuM@4hpS7|%v>hIG%ihhAlP1#-jF{$HI z;Yr^Tt&W&}piFFwaWsaw7}kUObZ8wtQ~Bf;6s(E%>yM*7uhw*Y5Voe~H>8qqL!C+gcIoIRE#Hbu z%NDPF?v``W8&OZFirIwghF%S>n9tg++rWC3>t#X6O6zc)g^`v`!SJ>ce&cnu-jeyP z_y7D*AbYDeZ%Zpp6AYc`Jj3F+QIhTwhTUHfcaf`Oh*qU(<7JAtq@VleMVc(Ctdj)Q z83%{{mKw?Ir%7B~2tir7*8zoJ4(NQYw+S8P?rTX(yO>%>2ypN=>oxz-X(P*xslc%2 zOlx8(QDB~5mhg0=hJvTZZ+0)4)NE=__%aRL_!K#|W>V|B#59p1{K6jlNA)ay)HT+V zUM@ZLm85I38ogcB3NG~rR&I~gHrEo~5YKB)t)t{tHh(5z?ea17cL7w4szWr zcPjY2=uV#GplFDW^_J>qmakVc`Ldrd)*I?sg2Kq%v+fbmHtXE z^XDCJV*P^~!}-(c zScWFYb9k;B>;cJJ$?|WU)PJ29?!8j{y6lU5{z`+`lPCM}+U4$dJrt+) z^4}0rW6od@_FtGzd3?$582&vpaesWHFS#*?N(ED{u5F{T#Qv^Z!N^6D1Vt1SD5Q=b zmUGmSimw-y$NK0<&n-Rars~+U8E9)$mM!gDG1MorZ$J1B|6Z1!6O|i>oKcdGVa2`j z7^j*Y?Os2-n^p}L8XZX-VICo|uY;fImSV+DzC7N^P~KTm?`rbE4HQu(bxjZ0qy_AJ z(=TUw@@!-$uh7=`)#hQ(hMTbv#f-Pa_v&Md#H+i36VaoRY@=HpUQ0)-KC+WnGn!N; zg5=vMA0Pyv1`q!vMy#~qDN<<2D6uD6!gfO$<0HigSps4w$i4E_9{l;*v7UWvb3j2- z{u{O1VGV9ggIJHvmZ>(L`5F$8nm!5G6wh0VD~9d%CV$9DCCt5QG9+V(qv~R6Z{y#@ z_pWR3dlG)uzlMoSI0UjDiMnAj2Q0k$#t0<~%o=6MGaTp^UliYYP3H9@Rw8*UNVZAg ztL42Vl=(&`O`%5D;qv#BGKa7vyPq0vKX8cF(}}6oBrCQHov0@-Z7`z%2Dc|cMc1Nh zF}-`h2~3voN3M^g*aE}&Iw)T-edh1_k-p!q%RSzgQ#HZv&?#)V(0KP^+L%ZTM|p>Z z!H+i<8Pjc4d=MS4TD6|d;gzYfEkcyy{eQhfHU&Sa>B5uK`*Oa^^&xF?BLWtq+M@fP z+wNe6Fx?y1XC0=dpY`9|DDUlmqA;?u$r+b+RO^I;#WuOiS(@JOP|%eY@7sUDvh+p* z!`OpDk^0F+r`Aso72`raKiw*F@qbPIVhrxp#_yEcyF>~pMENf1o6!xvzy4Gs>w`y0 z+D z-0oiP<7#VTQ<5_wC@N)ovsTn7RxEa0sf#*U)?yoi3j#s%?g&xe{_f3Cr^nsdc1BC< z!jx~D=H2|a+-1jC40aiRY=g@@ub9S=NqjcHodot2DT+WN(m}w}_s+<-Icj-rDcMX; zf%F3(x~q?vm8W_N zHgzB#sYCkN^kfdgAKEu=m}ZOndOO%~kyVqQIX#J$nhW~8L}Ew3>~OjV>|u|_jAEue z8Du%|U59*?wr=13nHwE?C*?aRv-N4UjkNjq)y}GLPS)qPYlxK`j(A@ndQ7h z^Yq(gmW`l~5^LoXmAH$-W*6J9t3~D2{8MTe=lNDZQ8#0=?G`7Lkkvk0F5m^IYOJRI z@L8%_k$!4_sMa9^gQ>l%H`O*+o>a)u(aV#K8-sYCO;1BAD8RR)-{^^dcoId>nn>wl zB9msAy-{V8l!UZUT-(wbubqHiPCqS$kd|z>Ho0EsWo?Y2;0XIElFA8o zC-HXzRan#dte9x7RS-{y;dKjBrqRs8h<}Q^jW80H>NeH)9&|W$jcen{#Y7M{Qln;h z_Y=GxOfXDptu*IR4`&WEHrvxuH0zIEYEQh|jT*isWS+q&j@x({`Ihc=o!AyG9oy+z z$Ga5?4G(URv}Dk!(wEn=tZl6D+68pPbYV+5?`)18 zM%g2tQMqxwryj0Nt26q_QvE!Dwa>0kw=zDdSW|S3AyjS$vK@3Cq`xG!$3hRv%GUByVux@Ts_``y{q~{-l0-ZaRH6NL zJp3gS0qxQsY#+R0fBf|!rq3Z|OsqxbLhfq?Pm2DK7To%nt&22E4kUaCK`A*PT$7Hi zxMolf?T4zFH=PA#i;ppLODR9P*M}zM+VNW6WwmEppq(NmK(N~(D_>Yg;eG!98S1wB4cN@%PccsFCy@fErkOaLhD~|J%m}# zjJleTaJpv_R|lWZ^bI44=aKM&6qBYmHpyBOZm<;3 zbyqv%pKdPB6hDY19xQneRznNzi>{1nyOi+8%EpBLA?r8SFe31fZb&jG-Zjl(4Ddng z>p1*NqX6Y)Rz0;uW=4th zlcF9W{K+hQ+DJL2s=kUb_DJb^_<~BS(KVV#Tx5%Mf6QKu!JQujP>*bV;!H|3*`aWF z?wHWc4Kh*Kn#sC8*@Jg4k++dZm|kYq$M-fL{a_f#r+N21FE0G+S;4S0Q~`lPKU%&p ze)$&#Br5g^ulc@+_p_3QL9C^=@#_u z$qwq?;B{oSFQSEu>cDOO(3JbIo%j4cK6a7Zy#imQx_uw4 zxoW;&2i>S*hK2R@KbGGrs4;dEhkZC`7vCA>3xu0Bt{MpH%wM8mc;tf^ljueYB?o1aB!a$b zSb~9YB-N|SrEhLnWZuq@yaRbqi%Lz}4QDg}^qvt4jjVx9bi}o?>saD2Xz1dt(_Fi( zS!wkQ?nA0<#&g7t%*O8OH)_h%zz0=YZaB;1b)!02?OWRj=0p*quV{~7tqBXuK zgOC9tJ?s4&gZD=6Vz@|~fHKe5QDASbtSoU5WT0R_cJ0fT?&vq zWkS|xvNYxlQF_RHktADfWUbm6w7XjWHu>XTUDnS3;LE<-4sqC#8Kp3wfBv}`)5*vQ z`lrRfa7@ciS<^<+(r-jBZHjv^XR@^QrRE5uBJor%+!wh{<)Td#{2i(o+lRnw{DRtF zuG%Z_5>6stmQo&)r<)1R`YaE|Zq@`W z_I|0U81*EDz~AClSz_@(re%5MVsI-33-80!deidMIK{BOoHm&Y!wjLF6oZv}SpvZWku)$pka2cF5NE z8zrUjX0ZN%7o@kUb`50@cM#eEzSBNJ)t7^Y5HVODS)bpgoy7P>)1+2E`8Z;WA!cK?8K{m);NG)ij+-2J~t7MF@t zCQxWpB}5mI?gdjp-&mXLUR&T%L9gSO4FzDa^}PXOy%4@Y<+H_0z1F80zs9Q9YpXBTmMFPdxQ7U$B@pN!3{a)XI|b9iVAjS z-yc+hJLZ*LuE@FN%6-i0vah!v!W<3j-}6L=R>%GC?rab3?O>C-P9U>sIxNSn1 zf8;hK-)GIH=08GNJ=*Q8TZVt*o_ALrmu20|r-mcH!`Hr5UiD04>Lr}(#FU`!*{NmU zAMc*@3n_)yh$p0v^ol1l+{JWmG^$?;TG6Y#U)b#KJTe zddx_7QoOopp_|E?M8ea4`LL<}aj5L%?VAp~q0iuEt;aZ%@<=J=z`i4{a;7`uUsfBL zaJy_2V1|Uu<|MNAmq9lTiQS=wxMsI{L!8RwWFOA`@$FnkQPXoPOs+qNnk@U?KRQf9D z(Lp!XzWcA-hNjGtUpR##UmC3+wJgp|DE#okhh6q9Hl`_SH8bb=0JnQTN5ro-s)k|E z5>z>vZq&U_FOu;MlD`Gryg5xOB@rR@NVCB)mO`P*hkFR@wFoTc5bI_@-*^^}+U+s5 zl%W@txhy-xgS%4h?g6`zn~5p)u|!7;A`f9)cQrokitf=HG18d&*|B#;d*hYa7fpph%>%2Iup z2%$H2(kofrw9a%U0X8ohiX>Oxf4dN2{_}`ueYYtEsrT(2pyWJBla%F)P27J9w1^0|uPVlbZG zGzK-tXrdG*K6Xg#O|9Gy*V-wb4g0Be1#0-}li`oJRwrEU4HBgzftR?vd6nb+1Stn`^Z$Ww1X_V5ry*dT2$5 zgkkBJ86SD={@dJMJs?u)MV3u&NEcsO-VA>6s#$HI=~y>uOn-s+ zg`2zc95C;Pau(u+*X7^7l@@}AQR=yK)q9Qj{bYnlYWYqNp||oT5zFYOx1L+_5nsdASHIBW=nhi@+#HhJZ~+H~P7Dx3M3z_KL6{G}gsrWC?5E^`fh% zBjb!R+^cYKx{SRzC+pep@ju^A1@qHE z8cvTqt#)Gs>Y)0UyWu*%7*t0VqgC2p4bR-9oHiG}AptwWx%T2n5sB;R+8TvI=6c*X z7%3Z8N34EENx3#o7$uAcVK@`GOU3yR3yQ$v8GM;>tYz0VQ(Gf?na$==rKQJ?Ad6sy zofzz5lrPF-?b|BeO;_yQ^@)l?D9QXR2P*LsColT8g zWO`b$*Znxjt>+yLim)QUOQP^2ZG>!mDE_=7HB@x}$?=1Ny%}T7Xq6oQ@D;~n?vKj4 zyLR_s8@V^Djkg^%<|`825^OZ&=f-A4$v-*b8RIaVhFdWGn#LQ4bT5Po zI>g+VbK@yUf1(j-09$&XtfiUvdC%0b6i5G!!T*vnudv?bTh%0}XWM&vSMS)bI0IG7 zE5vItdTKNaS3X)Ci7{vpWjJkrMBT_G9N}>a328yx{jOPWvM_LKJJwf#%&{?8@1QJ5 zsb8-YN+L<~IBFT7h7JQ|lx;m(1f+V4Y0KaRS*`LcPZxNf&>s5RXqblS22HLqWMs`> z;&c3nsøj5eMx%A-x0o}wB{#0h|KrQCGXO2$SXqtSipCd2t;k6#Hxc9>f|{jF#7y564~bPNY}76`)DtCnWVACOnjB} zXW8@ToMQNg>uY_FaeQ2E9yo^%j@pv6Fu+o}s4i-!r9>;0GfOf+sDE`+)n1oLtoytdvNt}Dthc*_txbQch{FDgN(8;IG@dZG?Wm?(XuNC>NlrOZTk}d3 z(`QK2gLyWOAFjXTKB6quhu@d-_^CMaD$)I`9MAU+abQB=qmIjA@|ABTXrR2M6GG11 zu3jp8t~KQ1&vphkGyXtmluOutK#%Xu%+wbx3ghI*Ed@cq!P5*v*{_l6$W0YfMZx>V z^PJTB)34&-nh`hNU(1dTxN1$BKjfzF`E73}Hl8b@w`@#6JkRpxB|o^?+9Ca@l^bRZ zA;Z|NQML0Isp@*UppGwhvGirr)=Nf~gx=nh&l|mScra3xWx?`p=@t;MGKs~os`}pg z2TR4b{NQ%jN!bHO@ny_ze)2Q@Ppskx_C4b6uW7hNk6@N0rr$r-fgzfDTOA)nUW{bL zQiLJ;zrA={=xWfNt&6DD3Xr}|Yq)!PX~~SvWa0UV^~kF+&U_)U>mAn+16mI0&=Qjd z=0$oiYP`y>wYYsU?Xhy}tvrnF2H%Gy`2rI*-$7Bp zR@%y-`$^;a6m7M2^YIRT(pS!0IT&`9!{*3!k$isLG@gnN2u?(tN^LyEO@8V!5R)^c zUd|pN3RJTj6luGA&9r$!y0lVTHU@sPVd7D@gtwaG*zFEW!Hvj5E6)ybj5R)q=WsJt zj)Ob>JjF6+qa>X3HLG-qxm3hs;6!{`zC%)C!5`4%@t%B zB_U>OyjtM?rRxxBPWeM0Q=LOqq|E^%^CfXDd%jHY55(PeS-yNF9 z(jtpNrew|nSVi{;dHnv+L@t7&CvW}3*Hz1|3UZu{I^L-utK1>)HBKsfTZkiIa}Y{!`l{D`Dfx56dSI95Y-KN3M zCl|xcpLR_76q5JuE%7V8rgHQ@ylAKyZk6V)^7d(k+7&ra5u3mruE5R?dV3^g@;jQ zt0zkzYP51?@qjl(qqWMqbV|qq7fBYg$5Dr^rr7wh=mxS{sX2Omv*Q~^jmTo5{lNXr zVTVfQ6t=H&-7U6WJ)L~E2fMf@r}BUC;nm9euG2wIC58^kfpc8$#+y(l0+nw+_Wp&y z>Lia@XN8{MQkiM0>H+OHxUAl=M=35E&YHEUoY#xo2~+%~Qy6>&k& zjKw=k>)uOn)xq6oxx@PLZ8NtMtZSVBD+GI|3^W6C6~IbMsxkpEwbiYzZy4aRm*?0K z^<*Ycd-ioSRROb!j*S7LBw?vzhrzqO0iOin;L~<#=!+{0`R!7IMiz&#z{?HxQA96^ zzY-y-dB2Tagp^hjtp_T)mP#n&6707JzHiKdPMIH8A7f5&w|`A#_6s*5Na+?(ONuqn zf(6O)+y65~KJQ(;^}RNuE|hX*q?`ADsR=((PoN&$R9~5eT8*2MPZ=PGfWWy5m#3SM zC!)i3Y>Vl8+kvJV`|!^iQT8Y{)^NGTAf^CYzmiQUMT;+VG?8`H2sAGhad^J8vTBAoZ15HeUzdH?$jyeG zRKx&1BXaIA^Xf{<+cxInbH6mjvOtnGZ<^LRdphPxt!A<>L}aziKOS$eC-BpvHu5Vj za#D@_-lb6OWQ!uhYK4Wcgj+G*zSdh@@2&&QOHMV`;t;-RC^byQ3~k;TkJl8N(_l;F zG?RmEIBpW8m53tq`A{!;37u7yx1CtbI zAj*0pISTItez-DTOx0k#yVYH@cJxEv&7qakeSdx&r#Lw?7|wrblGz*#x!v$7z(FrC zYRA>otL(-uob`a5v;aG+U7n_n@m@yVx@$nv%xH;r&VPfXFioww1zV|o%xTHWO=X#|h%!bDmPq9mXiC-{L{QT75`6D99;3+@ z(v`ewsJ z|MjHl6psD@$({vV_EC2eFRfq;Y`0&e#;3}6tyEz$9+-3DI1gySGggvan%Nb$R6t-q zaIHis#ut*bwuROuS@gbS z2bHTaPhn3LABM*}35hrASeVx^Sj8#M6O|-dA7463F@<{AQ!mQclaZ$-&_v#aNSBfZ z+Rb+dXks%r{fkhMHT6$pe{wGDV|-)*+^~zXJ#Nwk4d$PR9!n>QSZmRnV9JFf`AyY&VSZp8sbOp}Jc^0h-H3^dxVtl6GdEE#x2`czH5| zW`;Zgfl^6!9xWC67<>`q2kR*NP0kjc@FKq>0aoPVt#O&Fc?@8Ynt!`RT|K#0x9<#x zazf2QR$8?Y@66mRmr3TVDq&OFi24;d!FHh&JcL8&;;nv3>rbp*@yyWGMX!N&>#Z>a zvI~9~ukEZKws>js`Qez*`lz+H-$r}OYKav!)P+kv+GQWU3BSw|@dPm&XInFyUcvg| zt{*$JUI~Kty`TIuP|J}_D`{69aa(YA%En5TPDfP@G}?#TV4sajuTfZau62G6M-bR~ zepe{}_n!xQYb15X(IuW?+4J{SpTYSh!ziv|l)+L8N9@yJxJdR0dCpPSFsp>s4<64g z%}91v?g076BY&cat=NMn`F%Uhfz52r-I@S_q^HGm3tn@$uI679%?6E4+TkLvz6e#_ zTum32m6X0E39IO-%Pw&BSo3AV`{-aIOXk4yQ@i_n&De>I0S^-QM^mG4W_4P8lc-k@UjTfMs*>ee(NlZ7# z_9_mD#X<|315YO0CJ-W><&)TF5M`-v7IBGP7s;RYq!4`g9FF+{5(r%S!T``c(W>vI z1F%GK$J9xlX>1Au(^`U%U`&$D%>U+QPf@U;L(;#uFv6N@;0=Kz4UDhK!FYZ_F~=mS zw|i;E5uIU*1xxv7d-9!Mkp&z9HKqO##DCA`^>1Lah=g;B>KlDBgmQZnVz!2(kV&M? z*=VBA8BoK_uiauF9-c}B3fVkE)*s}jI;}4)mHiJMcpnBDgK^w~@E+1GyJj98g%=YI zr6L3+_TH$PZm4M9PV)H+!x6hCcoic#<85=I^mxzwW!^V7ohh9cebBQ{vqIZL-EZw8WcRLSI zLIp>%!&0unQV4n}l6Yp62+CcEu%;LfJu{BEC~D7RLdE~yo+j?g83^KY=%^r@Zlp6N zlKujMz!%>BKe!Yw`BW9}hJndvW+LRFV_Bd;E$@ueHFN;i4ZSV0+vxz2wr<^wN!8Es zoK~%9Sdh)%OM*|Nmo~qXfN9J}v41Vp%f~`vhOfEA4}B>uvLMJt=tsGt#?AsR@(Ub^ zfTe&pBwjvGnm>4D`D--iQbiQwQ_t@B1(0Miz|W2*QxqxD@K*JcWMO?2 z8ZSfJGKgxrqa}~i;!!nbQP@4jn`CI~-|lP1Z>N&t~0@V3Hwz^QMyJi9O8sVu6;Zmfv5g zD}GqxIuS-t;fHqu%^5)T

!%kaPR9P3?F>D)&Dt=g&k|T*mVc2Ow3CW+rNH=%0tt z>aq*pG;JTffFaUm$KFWj&erR*NrA=-8jj(jNTOUt3^vKwI}j?!ENZvUZu1rzM9bJm zyK-09;G?-He0P_7ct)TlkuH%#EJm%Ay=ffQHcmY05^^(g;t&6GU zF9Z~UseTFPN5}FG+B1OIM-5rZInE0puLMqj0USvJN4j>5pUOqn`_3482xV?|S+2(F zvL*RnOxy82U(HEvV)%;x?AmrlX-OTFCyT)N&SVP#UUh-5Eoe|GPR&biPU@viDl%4l zC>eSTND%$ALaNAv=n^Bm1n$?jpzc0v29 z9%G-<|BID0f%W#pEUIPnw%biVyO}vdt~iAvAm-o4=k0Tx@5kfWXMbAvY|uwda=g$< zL;E{a^<4tXHm;~u!9Kzx)Jr^PI`j_91v%tj8edSQh(${dhAPg^2U0e6q6DO(c6;$Q zY0+fDjP3I zUbWza`M`Z>Hm<*YGncD;95m31J;k=D7TIq3{re>l`2!sKb$FCetG-|>kExBULMsu+ z|ML>bN!=}qk-I4h^ZUIQaa8y_%!p%%gGYP}UN6y*qWmI;P3#6>)Nl39A=;q|b}Lk2 ze*QUWV?K&hD#~-n(R=~4@BDKLm3vvicD?E7#N~nV7I~C8C{U<#sHl|k+{aSQ`4U;h zH|-y8D3{;hM1XkW1IK)Dipt|h4v$$_)H663?CqPdrr4H}!X#A`;7lI~PIhC@cKQ8-OmAil%*9tYWHA(B_yMd>x{9>t{U&AN6 z@(xmh>fvd>%{$||Iepgd?%ni%)mA&!GDJb=#Xz7!vKprnDT^JvZ`aHLrdnQOypB=wb zE62pI-G!U%GDA1*7j9Lc{9%Sd*p%R}+|C7t4vlbmQFVDAxmTAdHnX`Lr-R{O@?S=B zRgv?v=(4wDPCSh)Z1Rm3lgh}#7AQmb3lXwFwL__}Z2SVhk=nsLD)`%*mBCK%c$4UF z8a_>UznZgjja1~3P5@y6t-Ml;Rt*S_kfHp&2=j=ZM38Du7K39+6UNld>M|)lY+zlW zJD$JoZ1JLe_>4u|PGq&lG<4^cRleebl|7r_bCd9^cH%Z#NR2bwA|-fKb1SK3*v3oH zwzFLR)uS0YqLVDW9?Pie!Wi93XT)ELa#&-|3OS)}UJvy{96z_pQy%``ifU&L8Xd!_ zIzVtE4V+sW;S_13xuvLQ2tWn_@J!CsrBO{U=^LZG0o%ZFax9VkyW^@f-kmTcaIJnz zcCl!Kmdt0>91(&V0V02!i{q*p3F54`Kt$r7-$zKxVbPhlD|-0hyBb4%Sjkd3<6MP| zn(V5UI8x3czg!hBaoatJ*koh^&=dkfpSYsjvvzj}}p>sDHDWOa&ShG&5mdlt@LROW)#%ZzTNkR$H;RKoE8 z8me~ZwD`G1$*LO56Yp;~ebCYpFxj;pNL4(Fcgx8*-G{q@v*d z09>Ho0PpJnz-!@DFE1EhOaB6Q2sPN;KhA}oL#5A5X>SaJOGU@^sRz}GQkV0I+0i#| zuEwk0pBn0x-?WOOM22pmMBUkel>f0Kb3vQh&rPv@BcL}Hq7N4Mt$?%t_lYCi=H&U| z8Ft=m{|{!0b)2HnjdMB|b8NBL@ZpczpFgiVFhxa2CySlli+<9fW+2#qEC=SpqoAi-j9k`W3V8j43Ppw@{Fxp=G2X2OG4qq{>K`Tr0&k4Bphl{f+L5pE-3>hG(9@>Gi?X=XI|uR20X z`t#NQ{Q0B8@`ge;B{DcG83_CD!_N5dNUi6)O5jWJ`IAYYa+Dig4oT3Ez5tN+PG9Vd z1&vadxEbf}@p~oezRN;8jhMam%+$-mS!Xcu4x~}vDL=X#f<(<9!W(z6n1`~H1zJad zEtRQI$SES9Ip^bsyCn9AcT1U+->DB?z+`tfVS5U_C9Phj2rq^d$@ zU{h!pJY_2W4vy9lHU{_pd1%uRKpe2Vid8p8XAdrU{xobVE3(ei9_iv^m7@4S3x-HQ zyW&Ybq@|dLO+9L#C_Sz0tdP%nN;x5bj$%kp8rTEBVjfFb7+>a1mV1(HgDYqZ#4hpk zEbaw^CBWeHAxEY;6xy-9+3OUVW0aJ#`Y0a1G2a0zN~*yZE-UiazH*&Z`%*(!=}(EU zp&mzO6hEVvjgCV@(QJ&jDmc1GD*lYnaQ!YzJ%C3dny{LUEq+R&dE4i9MuoO)g?w8B zxl%~ieY9m#Jw;0)6-~+Ecy2oATGsNDFQK=fb9C-q}o~T=|P8quq>T=x4cFiL*7xVyJ5iH$2_) zbp!q?lEAUW+4@^kt8hE*&?1X}v>;A2*@+;f<21$1yL;pVm7HFB3wru)v5HHagAn>x zvx%|uY>I-EWTx4?{qC<+323_?SUdUoon3cw@uOg#dAG_J>zLKRrdUy|zR|LRgg{>))X{8Z<2? z;gFqRXhwoLNdTg%+*8%HXktP5BJ{1jGi>`c9lM(WI^Xcf^X$OyS`#8L6SUJp)r&Qz zHgANZ|A49BvotyzR`4BJuTQN@!=|pGY4+P38XN0yqOzF_JLgt~ZmxoR!D1F8;aZK) zd1|Sq#QBi>KT}Z69hg%KD5WS>hu0`Yq3eHTS*5D43AeE!;|wBf2~6VLiv`{j`D0I- zEv7)H+f){+`bWtq5PNqXmN(zhojMb9I8u( zQhFV4fx#74$w?txShV$NXd zLZavz*^9v#~nQ;~ihYHkL|4|GoRa?Y* zsYY-65NE_vya6D2H-ZfcdeMzcdq|QXphoD?8LpNnB|nc9{~}TJgAldnj`frz&*6W4 z#GFi!)>0Pc0eAw~8tpu*q^ zhE9SlRXsz|>XeE?vGY?~q3j_WtTQTkdgTbet0jw`V*}pW!alWC=9$Qd9p)f4^hD23 zFd;m*4j;S)fd=@0uEC7G8x!K6)iWNQ$;#M$VHNtkU2s7}M}6>A@Yn#_3ePwIU-Pq2&36K97U<8x>_2eR71==0i9BGk4rKJk2)D{8Uu) zrvII+e-X849)Ze>BnA^0aM(`epEWC)5rR*kDYV>&0qxjeFbhDt15=;-hjL1S|Mq`2 zOyv4kqDC3O>Z%ap|1hLm^$v6=zK{4Ld7~|KPMif19qFfYB7VrzK$|#dC&7;nx{|@> z&j&q%O@Sw3{`hE8C<_{(qlUsxLiAl8Ti~9<^J|spb3^DY`hG1IdA7p%{N?T%>^Gs$ zgDD4_N#<(j2!&rG#V~4GFAT1hZGt|{sWvCcBlwBAb1E&Dov8jJk^h3**%{kGQ|cV# zvuY4SIxkO0KpdZF!Ai_<&!4fy`wU=3ZpBkwVnYqK?Cop&^IJASx&H_V<1i7@yHn#o z(|UypPt*Q7eyPcfkt!kQ1+umqud(yjDUlIYX=^aNaZX)Im3p&)#mnUnLEHsYod1=7 zp2I+ZFM%+m1*kpvuU=SmUQYq4>HnVl3~=rz!9(15XksxZHi-#vwoE%a+1!97pBeu{ z_}(N#I+)~NDZOjfFi%3?^F&+KcD{4+2s}HVa;KUj%HEqEg}x)ecgq0dAN+piBoe#!|Izh2WE57B5g)LKB-{B@PwLK#ba{0)o(GK2 zU$Y%&)~f%y&m`JKN`5xhJ7$1hV!%_jpP#XL`a8_A<{P=fef?zjsN&Wtn0vi!5TrHcSmLqXJwrO?lkOBA$igCCGdiV zss$mI4a9lpZ}EV99YkJ-B#sQm#hgDLd?vE0<_|g39FVD5zBB`Nd#r@0iB$9H`vl z4ym!qNI**=DO0;eAK5{xh}ygi4c(GJ6v?m1Y=uiNF64^$tBfNXA}`FOTzdsdo_;0{7IH3qDM7X6b;Rl-EoXcEh1 zi|hX0*mo@vb9YCGUgp3|lO1R1V=&AK^vJ*5HD{_P_)RYzlmBJBMg{;0ny zOARTZowpaKhv|PR9B0uT8$J--DgGC-K@V@(0`I$tgXvf3MSpNDxbK7%l8u{~W7H3o?Vh@BJ#zp_{{5Hxt40Ig8xjgK0nb`N-Sh>_;+0t>js7i4siTZl2WS z7YnTPALra}@hPD=OgQ5#OxArGe+n%_*GotT`nJ>C*y;bs=o}-nE(C-gJw%@7g7h-z zT_Is0xtuWcWNfa@OriQ#$F&skL5Bh)HO z@t)W~D;lXDGDBI(JV{;;U>afrKc`%W@?(|I0xaJ}yNk4wPaH8PShhvlI8y5mPlMZk za(LfGK_ItJ#caam@G{~FiG+ISGc>yBCqt5$HLIl*)~!HyNs^38F?8IW4I9Wm!qzbK zKaKL5qov&lACrj13PNPm%W4kVV&D^>Khj*?mO&e8Z!{m4rFR1vin{|c1m|)rto*v> ztuBXtApf9{d{o%-pFcseuQ7}MG{J%Z@$|Qv+7W0;>I}o`n^QrNHKRmOxZ%UP!V7Ak zy3p@;o^HwNScR2EJLAvNBt|!c(Q`B7i^hDWsMcTOjgFh@>R_NE>URuX%GtA!C5_Gk zQ-2GKw$5q;O}P(KO+{yv+C-&(9oLtGVU5}%&|+kfAxl&;s^HN9t6W9h?c3+`W#WuL zx4b?}xml%*<`QI6+K=b*sr+0Z1AP21|j*1zi3>wI2Nc{!P za;eS+W^0h05UWu61WP*2Z~Xf^KTu98#7q}5k^Xi3-5n^o&oYW}P5j*dPwAv#-cn~N zIak)EfwhAze6;!Fht0b0AZw87rF5%1{op&|Kbu+HHp}WRCI?c?;N!#7Y!_fyEO^lr zWIjNx2}%Fri>R}VCJ5VVX51go{%__qwm|4z+*#g~B2_C7T9HGmr+LETrcoed8+-OQ z2>z>L4ke?7Yd=j0f3hp+Fh83=d=4o?BZTS`!uqRtX*fqgq02Wa8Cy_fGO`#*SLZbX z^3uPzvz^d&12&+4fktk@;$sFDjf`RjhmvNj zDYxo6srq((C_LH^yqyV>U@#<%=*Q5Bpk}d@<(r;PA~sMJxGVMJx~{M#I2ykV!koct zyQ5oYDRqwstHJ@#I@EL>+SQb1^dlZ{%FYt%v>>7G&M$ZlaVbI!E|v_Myn)tVF!t4T ziq68j6v#FZR5~jF`0#pBy-bHLqy-!CdH+St%`UyHp^Zmq8>-?!pREQGcyi2n{2<$5 zFNJ1pa-Vwt`@3=gykd2To#B&(i=_Qc;NUS=_o=P|H3QgF55T`tM>fIXAl`MhxR29l)(XCiX+1+tA}%}Fv$Qu_TXG}38HqG+J4%rRL45bYL_A}kB2^(aS6;z>Dp zrY6n7r)&eqw6{d|*?)v8R)vL9pDS~TDS1>7|28p`WC$U)&?e!3rQh3%=t-=fhRc; z(LpZ&z8CuQ&#mg@Iqf)Qk;<1|k8)nV%5=VthnCeAoccdZePvvf&-b=pa{;BKOX*x1 zX^^G6B$w`9B&8J=kp}6OZjf$}MY_9L=}wof=f>aj;{T4%4)>XJ&YYRKXYO;Ziy$7m z3?P#SQlOw)=dn!QH&l?GNB=)yDI2x|bph*YRsR6!!T^Shm)yD#r*B$(!1@2#<+w*P z-^zm`)&BiYBp~F(UUajOr`r7gI7zWO!_qbJPtGWu(k$7`9-GokWg_4qK_M{qqhAk@ z%xcm|WW2I8+J^DdE-;->^gb92?`%21SYAKy0un^EMBZcVP+7cyf&whDAu=1B z!<`^5Mj(moB`_NlbJ6FUy=oPz>psW(JJBND3_<0n*s)@DhsC$T1)+)8Ra=vc=S>UQ zfjE%O;Pg#?Eeq;(`!8@xA&Iua4@T38mc#K4OIkAWVhv;eO$Qz&&qAfjl^XWNQHiW3 zZdHLlzhB$fTF$mo#kK4&3hO#a1|3)euqFRe5Duh{DaQqeTjc`&h6b( zZtaSj<^WO{LXPXlK+19PIt4Y4no?wm-p@CbvD9r7qx z^-(XF0sE%em9VzyEN37WJO_UtNilCn$1%GvkD5XyAbv&mAf2+o6y(YxGe;7wycJ`C zrefq4UGyjukf>V&-{x2};|wxLb1GecDw+yzMaLBro$SrYmu=0s#f!s>8ULj}{&%IH zd*iw(>=ng+1!eKOPW(!Wiekkj2a@IUlgra6#xxLif*XPjNFTeyu&_Ry!=W%*4VNg( z^O^C7+{BcZ;ha#tj2n{imHyqVAeXqKgrwH5m$R16 z0ESJF=Af1j1wisGh2+%DvuTn0mq00+iPR`Ai6jU-muTy9Nu+~iV6@puK)~*!yXqx5 z34j)@Z2{X}62^Y99{0vpY{JzxU&%aDSIH#wksBdK^>1N5MrRn!B_5XQVO@;^xU4sqEwq&05 zM_jGP*76acEn8gK=f-P9dY-*qHsct!&gMA~isBe>j&E-FouZCZ%^R1>_yg;-&eI2* zw-8A&NXA+_MVJ2X(Z8s9o4Be&{!yvwRCB-2ZIf~Gd;%3Q20wJ4l?iEW%+;_DPyhV< zN=sy!MAh&k0V!Nf0y&f;S)`d2iqpjcFyd|1O)~8t@6{DqQTJCvm>1k}@))C4F#6vq zUb=7pr@wKcrPKPg!Vu1A(Hu>o2fkP0Rr+8Y#B47V1QN~sm#OP96whdb<#D=*x8Ii? z_~XN+hZgG}nTu!f71F1hfQYZ#A@Fx(RLyP;F%){G{@ub&JKvc(@7Ra6DbS%pp|@e{ zz3bdg%8~_#e9TZ=(91>WOyD{M}sUIzT^0*=fPGFJ3T8~Ig8mNpr+24V3zn%R} zt&cl0fpdfgMy-E`6Sx02+?tk^_VQYanQoEq6BQlqf!R#f-P}{*)ZIjT;Ac|%Sueh0 zY19$Z`a5I37P9H4Pnb9&^2VW-71)(}-`P#~eGf6aF4M1g8p>x|NEgI+wV#>FDn zA=EpT?@nB2PYz6p)gt@H<#Ew?j^Rh*5B3H|ZQ3O@TO1mErqV6Gk-wZ1nf`iowU*sS zXxQ0=GGfImQ)#`cuyw@iZdDzq2g|y|TJqhllxF*_ns|5`P^i7(*LW{4mLx#v*Uyer z6>d{Om#X@?lBn!jZf~B|73Y^b*ksax(j9G_Fh8RlyIsdZIdB=nc@X1gI zq&tl}%qsOx>7!6zzR@3_jvHLDm%~(&R>~X2NT1^C2w8ykcpem6use82~G0bu0_kv>pUJXu5;$4nguDW-^!QfDb6Io zP*clSIx-JaA|yBBWPxbgrkG$lIw3p2sTnV6aM4VHQW5UN`u=r4oCzk*tdS!`CLn9} z7u_XKw>iy`i07IAGPb+_aEkmdiyjEI;}eY?(-gRzw91~#-{;FHy3D>Rs+-cXrMF@B zxNb~~Gz*GYra{g&mQWBk00V{HTVJM>LF0;od9!GloSZbzSGal%5?XUGsCag*%b#6+ z%0_5hMpU8L%kY^D<;pd_6mgKAqu_==EHkBa-n;|R2H)Wi$8iugJXarBprK|vY=_~y=uZPGGXI$V+GvfOv&2ifYOtKY3eSE!S~_ z27%Y;eZwQ!p9^+ZYk(PNm|M_U_X)UYxK`<3y`J~`VFQx3$${5QZ|iB zR`hBQ(?ZN5esI=B4zJq`Rv|x>`5xSa^x05}N)NTWYvf2}q?Shs_T>j9ivd+8y^&g@ zX$2Zn?s4%Uyr;zgjetwObVRekwkk&=`%!)WFi}hAo@ND=?wrMa)9{|E%OmcsV4r6S z11*3R0O$*@3@##n+h9VsVtQ4peow{Gy56Z=PejnEP`v$e+M$Kw5VA~<7wb4J`KmSv zvX8P`W77jrnRjNEAV3O*#$u`)%%mLq{T;q!F^o(nX zKZc*iFRQVyN_S6C2q`lxo##I#tNf-nn%PrxQf$Y=LG3{e2vCmPC2(kvnouprYCJkm zab<&(WY>~nP)DR(P7AFA^8` z%t>DR=<#AH?~S9{!8_xmZmC*8*0#HgY@o-Ln5Rg%ezy4GE{K=LxDEulnAxxq>|0YWUNykuGSig*hj}N#l zJ4?5%EXK@H#n?Zq#v5BNCOpnNNo3Z$K+V_wDvEZ~eXh>rjw8QW}Fw__CCu{*Q#oGJWTn%v!az zzE3`Vp8gF?wwhI3JYZOqI&*~!EMS*>xn?AjK?8Ce9KKIuO^L{Jj zd{xyVqlc%|MNrYA_OGHpl;RS+>Z0{__9NJ8JtRMz&D3;qyDZ83G~4BkZeu-fLb{beSY;i1TY-1&T6KPAvL6sCGJ0=LW9A0$EYvvv}k~K3wx$!-?)uafU zT#a4dR|v{z5loAmHtdPpTD`?qzLyP0PG$)2qSbmh&<7Iw}>xBcCh9C{|A?yPt{Xy%q}*71+R~ z4lHDwk3O)aEIB4^mmnhqrgNG8cLQtdj6KLYVqbm?k=i$eQDF3VL%WjTIa3oIdELc8 z?`@mCeNd|EgbTqTX55m6fW!rEt{n~mK#}j<0d1Z7AW~#ktz5b-JFzXrSzjN09bGyg5ShU{zAh^;D%za|( zDJ0zVI6$SU21bMMg)l0eV$kLivcke z_r9;>Sqf)N(9`x8=RXiaz0KQHNxXj+CZWi0% zPEDbzQNL%Q1I5*UEJpmE6}sDae5s#M0?#wc-QDEU1HP(A}fl0Q&len?a?ocENb&BKuH3i#5yRXlvE7# z-yn?o;-4F*-8@9JuH^Ofu`0#!TC)eO1z^8a34LYPnaEa?mZ7% z|5r@E{H{^CGenU6^W#Uq_Nij54ahmoUoe`4RuKT-A4KpmxJ@2Cd-Qo;@_t$^4HHKf zI2(#VUjBIW$gypUF|Yl#CWiDqT}8;j2M)bFibuar5v2-VQs8i^YBB)Jp*-YPcURI5 z_wl28-8FcSONBZMPionSF7eFaSS?!H#}E{|EX2RBLKglsx~}@nF`bTp}h8Meb)> zQy%!6@!KLz4+4O{$FV|2box*5Fr!hg_N(Rc#K#76&VljxfEPw`yA#jCNMg%#Sb~7- z#wwFb@S$-G_CiC$elaNpcGkt3q#KiJ14R!LXn;$a} zk`E-`aTlDUuQN0DQ?8M`$Jy(chC0Sz3|>07-CZ$zmVe6~*=uwkD$8XfVxFVlwtUoh zeP@s-oXxlvey7Nqa-a(o)$64ZE_Cc+u-=SVPDYMIj^iX1KraG4WoH?^1D;BM3p+Fd zQhYj@`FAN7Am^ot20NiSS1$+NVVIK;9 z5$k0pp6cDkAHv55)o{o|oO_OI$;jxR?n@36j9qKg#QnpH0ixf15+GcD{KT=cN5#=% zK*F0JY@JGAF$ET&m0l-W#C)%1G0f@NROI$;%v=l|z*;}huUWy|BYn0Rs%O`mU+3c8 zK2e9vq7Nk`54LkL-DKDTJ4M?f_QN6>^$BvYA!Pw@UxnsZtd2f>GKX7F9}hCZaHR;4 zZwGp`6Nc_IABl`cOJWlBNIoFz%$MaiY3<*ONQA!p)xg>owfDsEV}yf^25PZ00$Ztr zfBHv*NEuokui8KS9_g(+4d!jc9l2v;kC}Sqs2Xh{X6C21Y^zb>Y>s^*$bY&RJRkRo zeUcF7l1!9qbG#&<@UErWx62~6=&v`RBk3!nM{N$6Bjt1-M3w5Ejf|d-j+q&5m}LT( zLSx}c&sEGmq~``c|MOvV^u+AV#jiG{c)KKRp_a9G}0k);I#%e62K>m7^3Ai~OPMu#-Rj z-&Q9C#6`+#kPn5vR-{01!m+st8nD;qxRMcng7L(AT%*{tZC+A7wXCX?ypRc@^nL^U z9s1H~pSZ^RlSy-O#yQ}G}IMmxBV~{9xgZglP0}?HP-Ps7ZB*9t ziFi7GLh~Q&*b#$pgp)=h7f$NShrV9^+$RALg6%6^@0I~ZU zmbb)nHC;K0dSA3|1n5otp!YXDqo&8>BzD!-fC3VDa<4N5S>Zh9!vs{Z;JBUvhA|t!-wVfXg+%5p z%qanrE+0Wa`HLmIgnq)BPah4BEsV{<2X2YK^{v;f zxDY$i0b#K4PG`t9!n}+5Un8Uy2)HgQ{>OY6fPD_6Ci8C2>R{D3cuEQXy?8JuxlD`LC;Oz}-Og53M+(3d3K(^j zuc#E-EK#t&nX~Zc$sDoRsV@Kp$LZu}pW~F}2<5WuV^^l}Cadzu^}qhb{}3RZd&*a3 zaU1Las;qTT6bxx4OZXMnf7J*G$iCd?wPNtc{Mml$-S6_2rauEA$M>Lu6J@0RuvK#Q zw&@raC9-w&MQiA|`7dig9Cio)w2%={hzA#74L7RGy<@uk+kC}$nI7ftgdwoH8?Cr|#%81_34L4q{j zRi8&#wRB&dX!}V6NVp!d#$pC`I6*g8m zVYT*@^ej!YrE0Mg3g58O8gCoOf1%f7sCNCE(O$GXVc6|mJbk^t*8+>D^{JU;gw}e-5a1+1pG4$6`Y~%Nef%0{r!RWK1->;fi+nBwom7C;k z3@JRDImMnF_AuG*_(b~t_dAhI`+}wqqAC|T1B3m@|FN{#r0?4}V5Ht#TFgf{c?MRy zbyT)5JB8^qG%e*Klmkx}ukUs!z^w&|)Lu-FQK>8p1%_tkKHGwk@goH8KbnQt z11206s9v7mkD_~$F+YwYe~n5Z0iEtMWzolnMMEcrgV7>7120$=Gb)0!Vks(s^7+kQ zKQzyb*-BB*HxJhIMv48XX;2W2z84wtXL<^!Ts}S}Ei98h*Y5k3w@x~%cm;b949PTOA&m{5 z5H-)=kPGHkbyNAK#U_|brH!#`-w?~EmW^LS?{&QvoZpZuu2cKu=C6dJQVI(N>{pi& z%(pfCC?Pe|*aPB>`{u#)%4de=&QP_v=8v-k%U;S$DH}n;b;~S^LH2ZKOVY}8`0^Mj zUl;w}OXWJM7a8V{TdI5?qai|sqgW=KkGDB*ZC6W89+NwZ!U@k~^X|*gz3m4~(8~?2 z8d7{2U#;K1%VkF)n(cRD*LYtJ>CBimtvu^4Ao`I3T`mG6M!WPH?_F3*we1YsY&Lqb zWg{`@R?7Dp;Dd;2MS>%HM*GZv*yqyQI_w3BO=V8uJ$FxciB?B-uJ`DY)%j5P%1WVe z#R5_%d)KnN$=)(_w;+( zUr^abh){XQ_Gz6xmimxG`=Pzgk~_semUR@{A$PO=$5eP#zr=I&t@X}|YL6oL#>MoS zcLir_!B{rol_8@B2AdG0DFIUUuWr45 zUyz5cF`@7tWO{h2Z$9#)m%1juv&zc&5QAL```}ckNNelHT8EHjy-6th4TN&B{AH^Y zqqC4U{sB1m1wnkxlkvNJmKzZAdFH2XU>PjUp-22!rUDzE*DqbsdVV}!6rS@cABg5_ z{rWT??pMy=qR#qSTTtS>c*e|Y#}dU>R8rUEGTIRysEPHu5y)c9(LG`Rnbl^t+l$4@ z-7BO{U5++Ro^PeM_OuwUG>a@(kqYjDk8?BEP}V+Pk!^ANjCd4LV3Ir4cttCli-Hp0k3#Q#D_B=RHAcO zUU^Z!X_Hmbx%}6!cF9;~k?8}yg?8V*%QIcC*8<+5M{?x@*#T9=@mN)PgJf6wp6rF* z__wUk{}|0$J1>s$QVi@vK7okyb(pQ(XI<5@DwRGd1h<)=#h-3PNF>B#Yh*dGCC+Lo zVRZ~lTiN>)Tf%U@>f)zw{nS6!9(Rn4vu+<(ZN{QzWI^v0wTqr9#>++p2LYi$Ise~t z)(u@x<7RI~_o3&0@@J#%I_N>$Vrv+#Ox{bOoUJzYUF^~@9Hj|H{m@%aQQ-(&!ud&I zk3EVSo2ZX_@U6E9A~fL8Is8?uSJr($88ln-4wi%aH6`w#tw+&Zsq%w@cg%5EbwgMm zrTh!Dv}Q@Z4|x|yO7E8go4QE1#bS@NxYDrq9RUik^ejBIC~Sn*)~utI<8*qkY_Dal z&RJlRJz|s`b+6GYfBgLwK{yX5gkk!*`gAO>(w^j(rNaltUG-ht=Eot%SS)~rr3Y=9!&8Z@PoEo^!3#;OsQzkDUeF5z8N&1uL+e06 ze)^MkUTDN@J;^*AsHcmx3E!1s*+r_tonKrlhC~tZQ{GHEd8)l!;PyvQ1NK5&z9Cm2 zUZ0d)^kcp1K;8cD_S}Z9;!_W6mL1OM?@8%7(%D%7rZdjxI9)R6%VM6IWK1vgt7Nz0 zur2WVFMzLwtqBK1N@a^wsUhVeWIR@Hu$(#|)a{Q5(k2jxZ9>swyfv3xf z+8(coEGOHle4>N5mX9+N7@f_#v;;RSG)fX((XBGVs)XMz>*8(vBp;t9#ec%E49R@E z%FDzU05UZLYJP;-Fa^IZo}9(tK{AF9htU;|uYGn}zZ12Zqxf|0%o$s!pI8F?f`@kC zMJ=1K8AOurl#Z`o9z1t-42<9odQaLqkkm;EZQO1>r@(m=%x)lg9`}8lw_23h_NpPj z>cl{^_`+XJ$?WdM(349j!}~0Nhll~_H1HZ5G4Fl%epFoXdWnLpg`HpIBtsy@pWpWx zI80c<_h)9Q?Vr7^i?zVax_R5sN;Mr^$H)h+_4Zp4XO-k84pQD={YM^k5p0p3+asQZh4Z_wB-8(=xBn5c8z7b#l@dDgt<`*zXVaX){WI{lgxS7H_zfQ(rfrf z$5n8T&{D~_hro#AlMhce`k9ta&pJIq5o9*Z!-|)o+L}NG!}+*rYud_bX~M-phKm`xX~Ia+udirnE2k9iS8No^B>MEa#imNJ^Myt=*}3-{n0& zo)Z`a(5-|$PZ)sFU;|tQ%%N#5!rGLU(zlfhJ4=!jT?_g~r}l5QswN_zO1X_KYOgYv zh`J{E@}vg?r~=%`pTJ8lH?ra;&?01+&AR1%8;SJqJ+G~@iK(yma%O#jy|I`S-Ra)_ z+AEz7m=o*yCkwZyt5k=!=ltFL{L!g8=6O9Bl$+a}0Nc_9NxU1#r(sgojb7h(%qa`^ zFChnr$dE)P}Ly zKj1`};XM6k7w^r-6G)F|qr%8T@93F042Sth!w)F$lgSN}N;h3W-WyL?X6|`wxyU6G z!!%psA zf1A0|J1Md!$>b_mlK9!-C|eHGgu|u${r6uHcvJG3Ewqd*FE5vyzrq;u!Up-`GwDVT>K* z&H_0U5NftS+j^i|g*y0MA7yBY21hideEz=qzC0|3J&@5n43YPDD#Lx?^Q3rcU4ug? z`-0tNz%H|*_1MgoHu^P<+&F29h{d-244$E?EjluNu-ztfxaG&-4h86(OK?noIY)e{ zkkd)L$D?r|VNQq`;`cMsck=s|wxU2#J16IX&8?`f%(<~-&TyYXWUK3TpyP_+C^^kK zvoNueeJ&oc4^5!Bg8D+Oq*bf!rBKemj#<9apk=KtQaE|a?8>-*+TdgnnKm~Mt8Sl* z_?wz{jt#-Ra`N0wLG=h~NI8HaNB){tm?p-z#N-k|sLS~~PrXqtIy=(soMBlX&ebKs z`k!doO2^Sz-JxmHV}uH(1Z%xhhQR5!!9JAzgSqQsoJPh~o^o||&(7GOoGF2bbI%Sz zPo$3uToM%*yd=MWp=J763cN6IL~BgG%~EGg z*L|nzIYYvB0{xT9CRC(zon&yyJYI2FWhMkg-;E~_TI)krQRaM z-)F24Ajo7rHSM7+Cd@s*KX~7F*dkLf*e6ImVzs^iAm?g1I$+BQX-GMN_$?E93!K}% z>lgz^?o~(tyer^}?`jF9m8Q5FaNDL+&rnlN7moe)e0{u}K3XFUVLUj?Dw-sRre7<` zr(^eqqc&cQ}LIbX*ooWfivZQyuEe(y>@|u-KY?c6O@?+hh<_ z1Be?H0fwprkM>KzQ22;p!qg^MKn3D%6y0Vll(t{HW!kIdZm1nCRrnR0PH|XXNl?It z@KR^UY^nu;GZv%fA#Kq`GS%f1D`WZzJJ=ziai|p83+Caxzv=07g1S+thJ>@6s@y#( zts;J2ka;>2H9tJVIh{2|HDf2*>lXMDa9@Uc9wsqR8@rUI13t{Hb!uIA!ddK~a#9`- zFU1hG-ZvxgGrw9rEe~{uww?I{{nCS_nDUc+DP5yUVbB(n%lSf|I-WMgwuIl6FNSg^ z+h-nU*vU5LXOF$IWS05()!B`wFuzX*bGh6gXR`-LY+2<+D-9Y!Mjia>-FnY7?TVGt zl0iS0EL8tx;)Tjft?@(Fq4UqVvd?iFE?)Hqsp;29S{+o?iVrg7sf{(aHsi?lxHam7z&*Hj?CPB|``91#hClJ`2^El9-!a52ht z0n@%j4z+i81G6bB)`dU@LETy?qcZF3C0pwN$8DHTs4u0WX%ou7C^m4s%r8 z_X}!wKD!bxKoZcBG%dsEzA25Fu~E}(t8$qa`p>FAf8jmIcV+WLs3zOb5=2o)!q zXfnvuxH%_@+I&pbn@|9@_gm@@a$%_PF}C6l@wF7m9Qw-_#*!hsIsN*Y8znkwGXnSM z6@|MMpQ;BGq3}#6=3Un8i8*Ei2^zbPbvic~c3tM_RW1qqiXY2`taR4o&Z!PxQm17W zA8@Cx4B(km7V=d-jAZS|U$c*gP)xtqx)_-s+eYv&sgbbS+K_Y6_&eWG#^p6=`ZLCZ z!PXI(w)%q9uN;|dx8nng1kXSJuD1#uxv9qN z3mHw7xrjFWB-H0w2at?r@nlzOWE_u72jH)LHI;k(ts_z$ieWtZL;Dc6SLdQioUuhK zm3^Ojz*w>HC6)r=J>NGIIS?XtjA_vHP7`J=ni*OP>WHWN(|`5VNH$OMn&s81j(|j+ zvIC-zkkHUk=As3M7^dKOG2a72@Fezpt2vDR-a{^k1B|Aqf%0%{;5b`|H6z?`2iH4# zR0M;Xlr!v)b-RXCHRE=?rIDXpB+eDx?UG5@cUJYTB7rf&>dN)KEHFZqS+FV$J-WCX zNzh=t&u&~bsjfW0dKKPD^vr81e;(r-O`_XGUDh*l8jI{xc|N(H(Vo=dd)Rk7{GNF% zVR z9VW=EvF$G1ia7?a&eiN@?rYYHhL%KNVrq@RSh@_EH&afuotORTVP9jW?)eu(M53+CYP&&JgiosLp~`S zrb_z7&)kH*SOvVtTN1RsxlCDeVXu9E#iGyAyHfaPgC9eA$-OP-yz5k6f1TfXiA?^6 zc91o3O_f0X&se^7l#m;hlqbl8p6rx||4O?i5PKu7wzZh~i%bE>wp@o*6ua|L=NhG* zf_W#m!}^CLuQSha5|8FEH?mFT9X@!CggM*B*?tv!I*DrATJc|=#=R8 zCtx+a>vVSc+o{i#JI{vD5}`btHEbdfsj4&9w>}o`%#91zH|_buVo?=$$T!wQ0fdt% zyqTK8dr)bKrWzYjov)d)?s>8;h0#h&4VOJPyDC!WbEj-(yQJ&?zQWpA^D=sAEdWg) z@Np&kVUjBAF@yG6*Q(}MTOn(6Ok3w~4lRi{i?~Cv!QxZzD!sy>{N&0Mv!?VWc<`gA zyPy!GzG;WiZ*kbZ84k>E=Z5eQIoU{`I7XqcIN@vnhQWgrUcpCmZ?PK>hExqh;!YaP z%23%cSlWC)wH(Y9VrL7w*u5skwLgKq@r@u(_FH&HWyFu8+gaKxTR!Y@f@Q-) zQ*`68DoXEl-mThRomnoMlDG!3Py2INNXBe0r5-SFpI~~zRw5O+Fir*At6;BdfWZO1 z9mm?#OIG%6-hqr;gT1Vx|3--b5g`WhbDcp(aQGd0@wck6aNfcfv;4z*$?e*7;#WAbR2UDL_?mg0ryrUXe#I z>kqdPYmqhw`SYw)BWAZP&#CBG49w+mF9GjLbW(5WJkeFv;vUqC1*al;uTnJ?>?#x1SmZzy7O{8NiZgNjx|s z5gx6e2=Y5+*)=fDP-laD0ptv{uH|c##I;&ZZ~e3r4D-vZn6CA#`!iF)w@j{-DM%^nMfBcUpQPbjOHw;3RQxm);ngeQFj%j;{| zpeOc~ymk2!;SIV!>>z7nPlKc>6=O4XJcJ>6&gNi;mCHu% z4lvXz^=ZDe6$^Q?mO&$3U_|eldNW>YF%^9J4oNUXx|n2ETd?*z{K_9!gg0tIXQ^Mw zj4bn4PaYE-#*$AzwAxxn-XQznA36X*g0><{>C)CF(hf(r;i-C3a^%Ey+P#*SJ1Ks5 z+y>fRpIDAY3QeAj;}eKf;L%5Cg=v&dW8pr&E=tYQnz9J{mD@y{dc6m%aXX`X=95Cv^hFytUSwS zv25{G%phvpZ8rC>Ikc2ah)+XU_CMYPmtVZv^^*0>p_-`Oy`yps06$xvK%p%b!*l7E z)cjhb4G0a-Ec)p&j4gvXWzr+!gp2rK-uBgiTSa0T`>8HZFrFTMgah#0$C6-WZf#yPk!s)|) z8j26_W2?o+Hf9xGCJ$_<5>T;A`L{g>&(g`eT&Y8_1oBz*SQKS{)T|(Q-Bu_3XMz?xeSLvN*w_OZK z`3E@VE$oE0EpJ-azER#U(|D!WH_QMZc|%eX_wjxCotSmJuKcLU6ViaAXO{Aa8_Y{N z2w)HVj_X#!TT=hsOE-!_kn+ydyc~YZgwyzwMy?b)eFUBDEvo3^u_6d^9f+cz0h_D4 z*ATyZp4s6urM(aG5jlAeIMz3Rw0aD1P8}BkyxY}Xc|iwupv>jqx#D{T`|a+Bc)ORS z)tOM$`PKG`!)SIJ@uZqts)Ao!xEPU#oV#1UT7#5QEdd5F*h(T2qBsfVS%znC*EDf# z^cy3$$5LM1GRGV%`zhUrUpOg^+iq6J8Q@PhH!D5O+=s+r62jlPYNZ%kVjHmJ_{xh7t#Z%>gnj!Us;EBpSqlg<@5---&6=%O z5tt@MG`nvnOSF*eSeo=%1*hwy!Tl#ssGIVqT&fAt^rhOx*Xru`dikQPG(vt^UkvaP z)NX3+Pg$aRoq*w4*9pOg&XB-^Ft(6?qWXNV!py#euw(AmA5^%uccGD{n|J`hD=N=3 z-RiZy*fw=rUS;gLpZabrbVW^$4jb+^UxdsZR>NS_*fX}p7Fe(h&lPnLv*AO-HzsR4 zm{g43+mI!FE*SX%{;H9=@$Vkg(``BbO56$&kmzEG5vzuAKq6zMcYlLxyPvI65!52K z<(R>vIVGR*odz%^^E7r$`FEWCZDF%2Fv`3;2H2Npt~QN7CH<~-va)N3cZUcVa#3nO z6+?Y>V2tx~_QRFDAo7E%t_@y#JvT7f2lz)KzIfhz4ROg&f~H~xek`z7AN@VH>fIN{ zSx=-+Q{tG+3G@b|dK{xWo-I3s;HAuv!G{I)aN~PiA#6+tzG_|U|1h$Gf5vcx2bf~HDI z;SJEOn)nkL$pR#9PERW=wvE3CX$n@|G*WaIZ@nS}LA*{kN1^+%X?P576@nlvfqBbt zaD37EyNCV{sIG$Ro$BV;xpv}09MgQU$K>tVZGl!|1ntW-Z@60U>@w{g&x8!- zF(C6m|KC++8Pv0R9&ATLv}I1~_EFn5*l`jx&}yKY_zVAw@RTHX@O)ZJYnUSFL` z6ctM$bEo~u(?4EZ(AMr?guI@gp@2uXU?#xZOV3K(t4IKw!|XDCi69z?Fd$Fn>Gz-*gTkJvA9m}Zi>x-K@;`g6TCPZU-c&7r_5_xd z6jZ=S*^`}Yv6(z%X#=kqwu!i~rTv5E%%b`16_K}M7ftzOtZ3uVoLP-XAm~e#&!K)l zWv>WqZNmXTgVNn?x&C(GmzD-_V;4p|OhT$jH!Sftm7jhskYaSb5?It5bpQPQw-V;l%+(jDzVU|v73#nmQU1MNP7);y zy3~Lx10tGgXH;Oro?7R`Z~oH1L=bpt==J$i0IWmbMt4xV9=727SpVNjN>7)0G2e{z z$10*(YW@Zmg>;U^4z0-(k=cMKs3hN{GhvGmt6z-0SV7zE7cieqbF)S^@#!sf;0KSE&{zY{@A0CfvA`K=l8ba`4?Kp&tn{V}X)2RH|ooi-H-9F z?L$DU!Vi>0auH?ucH=&c`+sM!hDSm-x!(hVM!u1s)`DQJo@_0LI&wH({J8Mw4y6S6 z7fr@m6(C-vxsK@PM_uPh@s~fyw9Q*q=nG;4(_6WWhwdg)dCRvv-O#kd?mq>^G$z;j zJmY?mOzesTF0qSA)_kN0|NlhnPLPA`XJxjcLyi{QU12?XO}h9bv%(cT%h|6K-QQ%X zNwGbXPrI)e_u7uTjir3&Ob4Q71Lyc9s82`{yWwV1g>}D=ta)Gh>Y83i4Tof_f?zLa zT6Z6Ue@lqFB})cxoc^jNWY^*HWi#;l@MB|^4vIta+aMV_f^Ayh<~frjCz-;x5OxY+ z(I)Xa^?JubdMYDnWhC9wHk;0Xro7`eY$~@>HBo(vYOIgFnJ#I2_x3}!;Q&;ARvOKO zH^`sespnzMU{}aU{{<|%Jk8O*G-ZMzP4}cNGXe&MAVC+uXkBw{oz}|pG?-ris_|mtaL)bbPsI{GobEDY;@)q-EHJo(j@fPxN`)%;wA^4q}=TzojZ_%9j zv&)DqgZ}bYkr-Hun<(r9J+Uo4Y$;5B1Kg$i#;slR2N8cHAcD0MUck<}av{HeKU-HzA+BnCCYGqmKh+nb%Rii z&*$w#kTM}Ilz7L7e_M9^hX=^We~!}XH>=Ufpk0<-bPudJ^&IA|rbhK$Onk?o$&8~b z{wL%G282FfSnmVMyP7 z^Ywace=k!WVJmU&Z^3V$NLr&(FPyTZ-F=asx8~>{04#gG+Y#9;GZtcDGa$gWx!tQ` zCjW5F47J0gkfT&z7;*f(rH`ulAJO7w$@1{S)I07pU0N~eSJrUlxNSd20_cW{dJ?hH z^JOhTa*?8R#&wfAGD%4y3 zhf%3S=SoDae+@%XFsV?m`x+UdnCuk-mhRF>V|FPm(WmzOBZ|3|7GF*6kiawn8(wIs$K8@UYQKV~e+D?YEJ8IdZl{e}Acy#i^zX9-UCJ79UawE+05m7G3)4;UEEv0 z&^Jx>@kxAH#>)>ldeSYA5Ck9w1-*bRE;Cr_36K2J2A5{ITTCR0w(NFt$dPwObjMRU zu;3I|iEz&Shwl}-rjeXs1L2ja-P@CeB(@>Q$Ic za{VSpCY6HBF1sUkf$owxo3f63;g>(IijsV;Quj|oth9H@gUx1K5~3~Q?j z3+gyZ;Ur9>ebV5|^h_QS-H)R-C{NzX{B%Z-ebfmaoc{lj^wa@SK20Bel$>;XC=$|0 zO5P#e(kV!tbW7(^($dY*NQ!g{NH-|mlF}U#-}8IFKksIDpV^t6*_oZ$S$aj$*Xom- z>ly)4ZZSFSKY8EhRXfIX6^|=oA}F6CK)})IO9qOL;nzm^B^`wU+?JY3KueL>T-KewwPU=Gurp8 z-P3B?Lj5<;sJr2lFL4$QWM`fNui$iM(K4ZbTmG^OS(sPP_xle{LN|<_W=mKia?Y1j z3&c^p!0Vag9?b*Sl`AgF=dW?GtTl{<-{_X0Er0*s`lPSB~*0`uCacJ9{n1h2eW`n-Rb5Iq^8UXZhe!A$zqHuCA!Til{TjXxRxGIV9*h-oTb}_*R&h zl}Hk^4$KV$>)gRTM&Is^wbccf9QCAxCui^biL9>u#AzdY#1?0-2PAn4aP_pUYM$Rw z+(X#~E*drWxV1~Ms8tHNCv$v@!u?`n+&c1dxS2HNB~ns>Q~vls9D-IUJ>_B}Dx}7O zOBNRxey~(bbaJUb38TWw1a89%gCPjDf zUb)}7wAn*A>G>`M?rIiiE3OZEH!@R1SF# zCy|+Y$$H6>f2SHQJ$!tLs3=u_RXP>7gFCVK*xE;%~THzPWlEc>DiKhRBzz=i&S z>7o{DI^4TXq3pbZXDB)zZQ`2pf|hp(S&~|drU9Z6KOHf%az^uq4q7?R5|uj2c?11l z_MJwrvenu;BfAwM#S7vb$#bB0&c$a-d%p;i-IQwyUcrslzSD*F<{`w?FJ#_z$4|G6 z%E_jcMd6_Q(waJLAp$%0_M^|ZuiRE$Lpr(c=SBwX%8A;n&rfXXceO3%X?kV;irnob z%&&4!fAtO?Wg)pnW^9CroMz@UogL&yIa1~TdlB%GW?K~20#YOmLWV+n5nAsndy_(| z{9*4?N#D;_MB*A$^u~86_;^WX2|E1R6H+&Ahv0vwnJXT#l)g4VC$k5eHhFjuHQUGK z&DefFV z%xBGYdLAVu?4u^2KHKI=hb_jh?FGPYY`KOBz&@V#CcoRrYsBFEb!Z5)pLGdJNvVAR zxG54t6MZ5w^j@$jU4iB!&vnDfZ=v|b;j-~sY>d7@eU3j+Eaa327V4~zN1844O=R~5%(^ct>S`iyIe}f z(GHI|`LLc}IZQ5vVbr^QZl#5?N)?C+Z0cx^sSo8FCHPh{#VrKyDaf7M7X;P-)3$9? zP~S{=fnAP_4fUK4?mac$gvFy^ER_>6rt96}893Ln(n|n)~ zO@9PWtEL?5lHX^)!Em+Mhfjevg9&fCU^D`vXJ0w(=}5wMLa{u6JDr{V)^ zr10dM=RLuH8nGC{WNg?g4675m)t*v2TeeIbZAD?dilsDO7MjzrX%9@CfD**+HUyS( z3rnBaM;?B7@Z7o@j;30KKP9o{rmtmgpcanLHfL@s`L{^Y`(5B#GN*G_-Oe`b1jqcU z`NZ6~f_2B|yB+7qZ%ZfquKux1huYpd|0HJ|V?|(+mW|8T%_?qA8aXKb*Yl1BybA>c zIBSg$>t`L@!m*wxW&fg+j`Sy_6^adMTW){9SVjGoDE9t4>@$Q=a%W!t_`*v2%Z-1% zS)w01YWze-O3dC0lSaedl`VT$Q0QFS4BwMaC!>c8Tvfhttg?Lo`8!1O3T{?QzvONq z2==M6le{vT>TR|r%xZIF>xKzUl+a*W^zHUw8zIJK+CD1TE5zdRr{2IJKDmni8$@&t z0{mZ+nc5ED?M<~sRBAZ?ZFZxmM@Sj4MdfsED_V+u-RUy3te@5oQFW=)y9?qz%{+^q zPSt1l#Y0*GCQM)akcTj3w^_OjL~5Y~F7>725B;u$sh)IAjWojytew9;2+E#QaJw>{ zjC}n0a|5C2XZC5GUq0d9ZnG$xd(-vhGbWV4YqNU&yM$$=%Qgx{$8XurKo)tiG*6xW zK-)@4Zj^^u4KRcF)u(q)&7RA|Gs&tyrRFbR(b$wxxNM$Y{^k+9w-FH&_BkPyEXz~~ z%xvJ6W1D{9&bQ>7WAD|#{4M3(p1^Ev01N)#>~I5D8|L>8H`z@ZBfpC@>#rYb(*2@& z9=<$Y&&y?A8D;D&=-YXqY@$S%WxNTmavLDUTh`x&uuV5YLe8HEmC6<5|F@L(Wdmi^|#V!LP56E@W#z zMm_{t(Xe+Yb^gE@gKkV9qD%Lbj_~S;yW+8K2avxX3 ztUK>nGjp7UT!Sn`m79KG0SHbaqOsC@K4JuZ9&hBim$i418+uhadu*cNviFBW;$yji zPI}6zj!`$P#BH~ot~2@dR*<8dzq)^2T9)4o;=XVo%GcU{x4AjAD0zB%t2dNT@pF{B zc%yu^*l+pt0a>L!k8=m2WXxsSwTRR)OTXi=*KjIXb2h|Xph1BL<@h4#_-1R~4 z0*NXCBd^<3r|sR{SMCI=5^%JLM2SCT4oMV_X{^1CP{)8W#!;sJ*MIU4p3W^Tbt>I$ zY+xm>iX8487sctTWX(!INEqLqvRfd`(sqyKOHCuuT!iR3=#>-<$3 zc*#SH)bPEUiv8b?zISQGpMNK^j)BMT1TRRmzTaO`MB5-lh zyoXZMsZFIoqyt1Id#k!E8!Nq6;~vlYsV34A*{mSU*V34@2F;|OT)Asp-bkWc&NUz_ zCb~;Eww*M`NcsFv^@}*)l?-Y|Ndm5-_sR+uX>&`N5}_XBy=&$81kTy=H!1H$P&wdZ zA_h0yw`E%vD2ckBF1`4x7t&9WyaK9OTi2lcchVfs3g0s4Fa{0HkZz2TlX1<_6Wr$JjFInE- z3>ApHw`M*c`_-WT@S5BQsLfmI&l^@a`MIIV5jC7Ro!&K_x8d|A<>AdT?7D~gi2;kQ zWSrea30Rrhj<;@M?n!U3;~m4&8UATQ#)5>CY2?FMMp40~>Q6-9AT2$nOMR+=Kt_LR zL}+cP#^?DjemPhIO9owLANYc&n$BlV0N+WP-hgEmork(N|2dycW8l=67@H`ZHS>#fRhRwP^n~I@JDbkwTlMHRNsXT?*<{0|Hqs?M_ybE z!esm1-=VLvZvQ0RZ57npd{*&}8v2RoJR`-vJ0|LiO+NG3o1#(C^CV)zB}0AG_s^QZ z&^Ry^1s8jrkNsg2Y)G>5Du-X?XQmA5_xihX;|52QS1nK9ro1`pW5d@sObeb`x_a}F zD5S9ViC~TYE||L}r<;}Z^-{&o)Haw5<+;7W>o%IT2~1lg`1T+3aAk7Z*9m_}@?`B} z(+icCrqvUt+L?aE1IlfxCc)-&dg}DHB=OgtN<(In zd*WX|H+xSH@)%d;1E5i~oCTkLsz2OrP>|_jk(u(=cFHVa#O#}if}?3iyss=1hGZJ{ zo{jvIw8p{6I^g~E#?<-rS=~#eS|E+zMojvl#{JWlNn>PR>3I}KG3}}X5*l?Z6!QGd zr9++LfW9-6t6;dnyQjw75TuwkChE{h$Xfa@U?}~nl1=yI?~TG*GmzoRP-{7)01oa3 zEb_6Rd{C%7xOCI6@{^s?By;$-yETPvw+x{#*;iWsDT}MrF;X02+Ur7!VTk(_nUXx= zz0Oh>^=|_qCkUQ;=oa>}!{gi^Z|jQmssO{6w$lH;!GCzIZkCZCt}Cde@H=3Qp;hNo zozfw>3)z~4@=mPwMXC6Y$y*!SCntKOX{PWO@4e} z8^ZVeM^QUP+9yw4#sHo2?AJ(1@-LC>D1rI8F01x7ZK#zr3_26f$ z>tBMg!dNh`iC+Mo-s&mUMy*peZPv6AL6!c*=MzD(=I;4hb+({t*pV4QtsW`h;bl{c zN6Jp6`$_u;abghR4&5sGwcirqCh2gbDoZR`uB=aFDl>_cr}H=cX}P>Rd8py2Toic9 zFP7WE)A!`LZC3cx7vL9TPIK^hZEN+u6?5giOWEQ2^&j7N1CMqI2}a0=f2f3rTpSke zix0JXA~2K<+qvx@Y=)v5S)NgTU0-;Ar{GocyfGNP6#?R5ZS= z(8VjYi)JmmdXn)_>Wh}!)BjiX9;hhC3+#eZG;FK_Ah#W8Zf@DbWBAs|L zE%w;JCH=uQMP*exT9+6(#ee>9U9Q2_(zSQajEGEc5%)#)`M-!1{cCFFU$2j%rWyUd z=$(+aO*Zas{q)c6SopjyzUAs&_}lGw>%a8S-G&9*ytc0Su>Q`AtiwhAr?2lVsQ{JF zQ?K$f960D__N=9~IQx9)%9DxO`LnLk&QL+>3$Zx#wWn|_2O*|7<(|7b` z`oYY$-lfw9?ys~o@?dYS!V9rXqo!9ht$z#pzig=A)6vl1_Qkct{BEE+}e$P;t`tAlWvM*5*>xQ?-&f$;lBR=;zh==3(ZlJSQFBT5SHY z#V(7#%;ooFkzZJci*>hz(TC2B$}bvqzM9BxYuhtxo>=3BC^Ecp^TcoxWX_2mSGTj^ z*^-n~Iq!0Z$n1xulz+$O28$0?o4)7I*?vlpa*mr%rTje^ZC+2T%37VAE>mRaP5<-K z)T?5yM{Di%!Bwxf)aQEUtI$>#_QOA1{!-QlzqfJ;>-E)iOJ5R-(bB{3x=)HuNw-In zOnU0ry0bhEGELf-_%87!Goc%G#xhMgXY`-{IC;T0`3hA(Nf#P&DreNzh>Sgr7RYr$h8^>buT>Nn)Br1BFwr>kAFO>NLax)v=cP< zc`N1@A7m1G7X}?X|K_tVpW_d8EZF#7G}yz4G;8(5Qv}GG?dIBQR?!xQ4hPyFTTwsn z<^}nS@76{q3?3WR#)_~6RVi47wZ<5mo*L31SX*ug)q^ae`!qbIy39nr);K({-}msl zbMB<>SbuHZC=7smMtO!jWqZKV9q zbH4B5GC$${Ez^=dlY*(_lc6xgTD3AMXyudXl~;MLA5n^_U|r9s^#^Nu7k! zK!x>z^356D{5u2cR&8?Z!Sp;Q`)w`SCxqftYVi)%;{?gm^{xv7FYJQ^CZ^tLe0YwZ zw3D+uZ*u@#B~y;oEIw6ZzzRu0%u)oY!6@~*t=giRHz<#|{N{he_x%;roDz-$B3f8leSWd1XGc({w^$6`?WLZ%?}(-zcEYcy=y&fp3df3 zHVY!@vU+&a_ft?7-(tTI_MYGa)>@DBIj1nYFUgD;skyW5#@ol$e&mQ?uDh4(&d-Q7 zo)+2B@!5XK$5q66&L2cmMf8=MsD48&qu@haalv+q_H+Y1-FSwAsEB~;`gKGDhgTV# zIltrJI$}b8Pu#@L_WJGP$%AQ2RoY%{{9#2j|7K4oW+6)A_R`*3c+z28W6c~v;wzH= ztc0aqoYk3zzpSLS{$n2xvn4ljymL?VI~m^=KD->d;Yv?p)>M%>h61sDBw=n|c;=0e zvd|k4Yl3yCM*Hn*Y5Lz92}{59*VdaFugUjTt5w&3KEIQZWv86+l}#b?>}d3~?7Kr1 zmsqNf=DYSvf!oV=ahAXL1Wxz8|8Yp{CAA*Vk`w zYjwB%(KM(JK8l*LI-zp(4a#hHjWz)`+xRIjszm8Dj6r*ymbFO4C7&0nTAN;+;a zjh=%*hwk>Tt8kF*S$xy@Z(9M`-KsSX#X3%ffLhn;{t1gg&4&;L6cGV{J33H&BVcx8 z@E>f^;r8cyXD>;X@o(pX5FW5Fo%Wcn{;rLO5*TEEHL5EH3@PpuQ`iw04tQXil1FJ% zcUmgX@(4kzwQ6j|-%~Ib;X+dV(2e@EIN;oPG6@~D`p&4;9e9`Hp07VrdKl{C|Fo)# z&C~9GBML0N7tX;JEyisOaSToU&!@9-=Bub3I$(Z>5s;#s^HtDS>6;ebM*rLTf54ps5KN~vZaa91Mrn&r?C(PE}0G# z(TD3#1;96M`eqLbqD+_|3Ov<%M{h|Ma|izC_7E9&mke6MUv0`MgrER#1l=7ygq$`H zWh9A?v)0Gvp&V1Qzva|y57b0^es{IJooHvLLr$xpaEi%de=B-`92I-{H+w^Z;25_H23HRW{Z@?0UqX!yiI&`uCmfpy* z+EW9TKL*PA&8bMh!^CfWQ^7-C@(eYE1CsQcf9i*Cfjp>?`S}Fy*Qdsve3rwsbBhLw zGito@Udihm@uV7?jN@eK431l%YEzd!U|>&01L>sFf0Zvrjd~Ld%PA82w8`VNBL{+;R#|yw+vDX*07VO*V zv;&x+$-kRTYP}7iGEU#*f%u3NEwk*L*aFjT5IQJsNCbm6*7%G02hC)_eA^Nd{<^qA zm1lzol4P37zktP%SW|ILKsbfugN{$}VsZ-g0}Jy(P}J^8&W01Mh_A>;Oo}Wb4$cWr&Qn zvyW20b9&#-Ab`qSRbW}?4>x3BJDd!VAj3RWVruRx{_Dhxo1;7C(#c{VW7dGV0mMLD zM44rih}r3|KoLr@zpR`LSzb*1EYSo6r^m%v(tuk(i^Z@qpbiup|F2J&3!NcFR1G?H-6m8EA~SuONoyfI zvhyDbLMs}hZnfv~j^H)I4Lo?ZEQ!pfL+7MADDKC}t4e&Cq&hxJMR+B2!;6o>?CkR= zlU){2IOp(%+%1c|m)-jpdVorE;kcSoXc?VI!UjQ$b|)VBGkV#@c@eG2p|<~_?&eH- z-+(6RcoZeLeX`Fz=;QsdF*(!~cCEnBHsf<$7W)DhP15jNGzoy?V4cec07NPL!;J{5 zrOf_##Uj2@5N_NO}>1WB6Ky5yiPzdoibKJNk1>Kb0#sQ!&$6N-HjrIj8&H(!I zfaoP0UD1=5&5{K%Hyx!Uke&ta2pE)fpDHdx`;CJ=z}|657tK zN$9oO?WYEkdW4TqCc-1LDqv9F_=Ej`4|S%8o=pCCp8`Vop*M@JG27@a7KjJY_e$6S zYM#a?peP3v%r4)&OO4xXP#)(8gMN@*MV~8qW`|nh0%=jtPW+VmKhHaT5CeftazoE# zyo&bo;`L;RaZEP~L(Xv>H5LO&_Rs=aP|fVriH-4BXr_t7z$Te|W!6-oudXh zlF~bjZf6DC3Puln`62=&bDbFdG7Q^qS zVi_?(;(#1}78Xp5zXwp+2R6T<`xdoadk~;Q43(T6xCc|=`QKU#h`PqK$E`Y5r=<`g zcpy>a8&5&X=y|FC2lbmCw&3a=$3Ja;g$BxWIjnPDCY0Iqm!y2@55YSINX3_vw?_>6 z!K$x{b<}Hpnho=KK>;W^E?;_D6q+DNNO|vG{E;^C5kP?^NG+fbiTFVXdNOr8eKrQG zkL@)rZDIgsbmp(rf7$>AHalT`f44V;60{Ws>bmxofQCbRh{e@=sGoB6MF-6)d+*_( zZ>++pgI{jP0P5;bdq&(hZ$}mqkdTE5#!+xeo(~`T(CACp_2#QvLyxW#VC#ts)JJoU zqbqo->&^*i4xFK#;I+~nDnXlvmxEvIq5Z%%Q-*KkW{?m5Y-9(A=yH=N!iQtf12%ipPv7>#F4?0S z?bm9cQHI2(odg;~fY6!nt2>Wt;ahxI1dKH)taEjz5Mb?;;qr3*tqI}fslBy`9M#AgbSXH35Sn5dv z1cx(0oUc9c1Hh*Od)@FerabZq$~|5n+}|($%-7b3wI@rclmhgQvG*oeJ&mucIa)DT zEv_&>ibI@NK7|(~`7Tt@Vj`d`jfNs>)X@g=dBTR{YY7Ixj)AGrznB9s9Xgl?Ndq>D zGK{2mF_;lEpx~nv4F+H8aebJK&XtoLkc~$%8P>NTrV6Y}sPMc=1&eTFdapKxGY~O& z68_CK@f#LUPn(|*WD+V+4NIhT-3^+c;`}d%h@m+~=07E(1;8?=bvP8BG*@@;xY+|R zdsuQzO>44Kb0r!mYSL?_cnaE;Ve=n~Zh);dVbyP>GDT@L2LkmXmK1@<5*@EZ3BMk0 z976BRg?+;pEd-+`>wr;zHsx;u#4@JVFdAF}h-LBAH);{<86|MZRuB|Wsn*v)k}swG zi<(+}X>b$RBv^h1Uho7yBd~T2!~pGp>rzViJRG;%U+g-pT>|?v$Ka&;)_cJKfb&vJ z4W#%9!T2Bf+_+|NOwD7tzvDlA_5;{J$vvmfsPx2gT=(yXLn6TTP!~|I78!cv0$@Ao z*Ak~8R1Kum@=L=Qu+1Jjw`Gp9WBEDl1!}av?%em}6Ad>JjLyhjEd-{eEBEXh zq#6YQRSK)Kjo0H|d;i2;eRz>D}NrlVSkoNdTIrDh0 zS8q`CA6E)cK+4A@GIYBP;1%oWRO7X8fLeTtF!SZZ6^xnm1%Z|e=~y#VgMl};5Uu9~ zRamCxDUvjMREky9T>^NZSds4s;(_(b9_x(D1|FQ?7b4R7xx-s2|B0ih-TI05BtjAr z6eckFezOhO9m!td{{B}Tjs9auhMs)qypx5?uApNu7~nVFZEH{NhC?SX<`}W<0;bv6 zgxu~+dO%t&;`N8$Zt?*%4iNk(wuhV-=$sX&W)|y-{4a+uO57QSDX_|i2jY6;{kC9= z>BHqX%PvPY(2?yqzd_`w&J#fVM6H}%zNrmKQ+a>sm5V*G1e7)L*EwplzxO+xgsc2P zZgVlG+GcfoqQJ%d#wXO~# z+BM*sbyly>l#x9;u?2#F;;M0jYTa1njZSzf9!e#*AMPe6iQqMDs-K6=tl z1NJH*>XNuKd7!#07J)d25&z!EdGXR^(&%`ce!SING(0=AIn7ycny_zQ6N05=^O%CUSR_mjO*d zJ<*>0&18vki5;{mqraUKQLhww!#NqZqfSo*M@txg?}QFuYPjc`fmkxn6TZeH3^(4q z0GL$72BWo>r}zT-Q7A({N_My)xS-mNvhoxMOOWsPJ(YI`f#~ey-@-ImKC`^6!ZNZ1 z8Os0Y3!b386C(`E-URa7d>D<1YfL!kNzof)16p?SFYjZprH-20WizH^p< zAtk(P5AO8fM+GTV7z4ORB%51?Tu01SJC|r6 z3L)bJ{#bj5!2O%_P?)L|q_>sJgt&%Xlm*j~utM=?A7JG;b7RAV{^Xg+PLCaPbq2$M zqO6P0*Z;uk_Y|qo#h$=~_NxDbsVN!*22kSf=7d7Az$A0p}H zD>0nstwjx=y0)Dj=j@3ih#;P&|DJTRrC$3q=n3Y9? zG9MtC0!F0swT)4xM0o)0jhZAygzlY1-*GNP5>Ap40>lbNI#m(l!IYS5%GqSEaewC> zc&LG)S%J5-48_uOgRvj`gQ>t>8Dh+q#5I}9e_U8A2QdHAspLaO5cTL>>F#?W9wDN=b%DCetS64xrUfYjP-x99YoA{fF#%$RGL zNYTg-GF~?st_^4b;Yl4ARr~Ong zXcq}!lwpnXlp^U7_QD{@(A?;$YqNS!@EOh@7^L^Crcr)}OHAPt9SF=QdZ`;AifZ_(9y?HrGe5U;E#1?a*U_D!WK3*6r!dhZ}OR`BDMfwhu2bLCt8~ zIrLBpv|7?3V6Ynh{Esm8C=|FLlbs~qocfmZ`oXFBDOx~6^=z5>e?I>(lpYgRsr8&0 zrGKtbo`$w+T`jJ&s>RDrB3yzGD{-jnmoG8bBAPe=gGTY)l5uwIN#Avz@x{S>z;KLd z%kh^l3vPN0)JV}Cc~AfN#1Fyhk0FF+aiE~_@GLkpinyGnFoPJBN&5FI7~HB5`mL** zFC6w943~1p&q5BrKP6a70j;JU>ka|;f?@Uh&k0TAAVK4)zG<$e5%MFY64F;pAR(!} zQsTxs#PF#iUjXa_7#@5{F<$txNaW7qk$EW#v>I^y)*#}C_l}vik6bs!4sc1$O>&>n z+p7X%q9))B|arCykF1!(q6NcJMuLIhslL`DFV&y33GLtZ)s%TbXC8OWA6dy&Zp z2>vMslT!vl`7F{_iLg@Bum0c;$D+^5i5Ro}2b7f}rwoGf$!?gXSr+EAU{v>7IBi|ewS`< z0ID9>wfT0u2{i~8Sg>ROD|^4YV^kWFxl#uWeH7Eu%afopw|zPvnH~z|bEujEHi3QDH7VI_YU0Gkc16bIr(NkN#_ggY~ae->cbgjM^H{&=%Z=s7Su&Q zFfmPeImsubnQLx(76AV0n%6eT?FTbiR@_8kvvOm=QZuEAefTwNtg+Qe(F-_f-@LkR z3jXfnjR-(@fgt7eIXk=21v++4&T)2b_I2r*^rR9#n|N7pzYdzbNMf(Ez`&BN*Bwp!P%@x8h zh!G0bIL4t8Anh%=84*fA|G|Fn*0+)%(mYRlo$po?fI91*fA^|V;M(gdf`c@A7k)}u zeMoj-j+Tp~#Wb~Erg%WY^DG_S7E^)#e2A8Q+0^pdeaZt<9u6tmsUclp`LM-hpHW~y z3vwlpaE@hfaA#6^rbHIRt(8*|zv-@yo}|l%Ve)e$hWAml+3Bs$HF~sk93Q6XVEgim zK7r3H{KfW&^g~xrxRi)CNuqf?L2{%%It&q*Vy?$avY^U^e@n$Wi3rXK>AaZY--ua_=*_r$OV--yeLj93U_| zn^b%=I$1#kGA}E9A zT}lo@kki7oYDFz_Bc6_!v3O|V(;e#B93<-P{CtN2SP&$6GmBzJo!-Ox!K?xrBtM#u zBUM!}l$1BSKpm$%09rd4#>S}6OJc*XF%eur3eq4sDEyLH|CaY^A{8SUnBHIfkGB;g z?}D{_`A9(?S|u&-zJ>)yafr&jc%q8|Lsa-D6|c;*^B0?9p5?EazPr%KsM{xw7BqQ^ ziTZ6EXFZ^lDAD0bpbn-lB;?6vJal92CcH1Wkg!c z3sVMn2H*D?<2jZNj5-g@myQh1%ID+FqQMZ%tj|b!iA<)|2f+B@VT?o?3Jt@+lA*9#z09CMuC_)euHIkih0y9la z*AkKI#tEQ7dX?2|M8U9PaSlSEwHZ+&cvg#4<#c%rn3C@L{uKp0;jcaD0P-$vmu+Oy zM{9@75$5wnf%=tbN`{j8u=SWm>1&?_jiaerO}^yFx@l52<4XkfE6|jWrKzUW+9q!D zeGbTiAXPj{)h2h0@IIeb!+D$#b?a((kikCe`MklS@qAJq50&af zKiZ`fpBY2V=Qcpqm$%usOFd1mcD7RwSn{t`SGw*F(N zgCbopg6R)CCMsY2OWg>8`jP%hEDL65*4{r$FZ;VK#lVjA<_Q zK_ko@mHleKERL-8?H?>5OxR9s&VZO0b!3-w<_Ds@H&c3?-UNARQGi7n0~EIwc+JSs z=8BUC!F2Ufl=q@GDyr$KWx)Z%w|zPOog->rom12)jn860!y~49{N_3=KIkXnV=@ro zyc8=q#y$fG{lY^R+%+Cb~|8d3MuWCKDpkS;AatBQK2RY0Os`G z`I(#(2LJu?NCKZYnh^At@%m@kU>+UCa&9*9lc z*3X$eWleQIb!y-s$qGqedK(NS9qF?dj$daQymQbI4K7+Vf$uoX;P@P}Vdo2WC%9Gx z19^VybGyBHjzybFzR#kaKyPlTPYv}5cW%nOSy zU6U2le$oxXU`!Yb;|#5a2b(mZcH8I5#W=~5{H`pGJRW?BTmxEU5R^2*CbBDybyB&F zZwqZPuY+MmS**_C+m>Q;CyX5oS3hvJQKu{{qG*Uhe%!!l{Htm6?R{HtCGi#OTVa-0 zn5b$)L();Dcx`bWSobr@U$d7T;1c^R;^=qo{>lvkU>q0=x6UDJtIP=woBQ1tY}j$* z6rGm+qL6>Ebjof$HW~w1+Px>q8?Q>^zKP$+3c-Roh6h}+&OVyf$^)#cc2oxSTO^cp zVXBf7FF#MO5k)g3C&qsM$*qZViN|B9CU!Rzn}-36b(MzS61xn)px~$No(RbsR?7y!w(cEHleHQ!!g5>q+#E>?%Rh0H}dhr*|1#X+Goh4!IpB+@) zWx|eU0B=?g;!B&dED!6|_J=iafuF!wqL=7(wIwzEjW0;db~?9s*G@*$Gj9&<7`@866kYd%jv>g? zYjcJr6XV?Bu~OS-(&Wv3vm}p-dN$BcpB}oiaKS|7QS{Zq3KB!KH`3) zu=4PQS4)!4P7xLr6ueo}tD){8Ys>J__X-nFDoy#4&!_LGg47&~Yl;xxSrVeK^^R%D zIvaKCGGphNfvpucbkoqFg>AGf~KQWN!xg!7!F$<~BizH!r5Wikf{X73D(;2^(~_N3Rfg zHE`E2@gzZN?{x5XrqAf52}x(3^{|%)$b9-m=2=G?b$W37*g?wa%W`g2wQXP z;7j8`7sYNpCN2Zm!hKFrd1i~)+gx`l=J2&>NQ{K>)AN0)&N3g>589fRV0aVV>G2&) zgk5=w^?``gA6_ArUo-k?e{LXA`X^79;-G*|+rMPMv+CmZ9x!Y3u-wU=@}JRkPic6D zXRH+-YF#4*8FYBr9@oe6_s&G`kIf%Gm0;i6Zm>wfzgBv-N2pHv zJEx_f)C&D$DzRN_ODQc4qy4tQKj9@Gs0b#?+k~x$mQ+ib>#O}<-`kJx;P)9b{x~74 znbj6u(!dZkH0nad2x{yhdfTJEOT0g|5!ut}>$=#>u%qXTCCA)LuB+H!_$R&SszWE1 z2|fb$O8Z^<9;MPJcIP{0za#Uh!o`A4NKJ;2El}{1B~~7HAcFe#ELp&fz~rl|$~N|! z^j9_8W}JKsE2CUH_82LWkmgZShwOQqpf5#Vg*ORBxV5}6?8=MXx_!U#La0(xYyRRb zS+7z9MD#+7#k=S!Qn>eA@kT6sbBY~Hl6m2{5WLyMf)TBEDZXU63M^3_FFbWbKizAm zR1NK!=|4U8eDiXu21a`06w#9AxjeJNJ1Ss!ihgkXNcEi1Fh`Zcp73V85CNg9z)WI6 zS-7M|wM=gX1X&rP^_riNrC1dl6p(MR+BX;VWF=PN^@t_{A|r#b1K-`dE(QnTpSpg#Vy z!I)G^%ck0&n>asWi^TZWK|ci3&~S^ZT%`=^aq_)&(8j-b3#z zxVT17M|z)$JF;1bD+>}gQUs0TYnz(a_WV}5yZkgbEjE6QFzB=C!&aH=__4M4Q_f#? z`*OPIHvQ#G;fFK{hIGH{xP!Q+So`OWuQ5@A<+%&gw#4{(X>Z^8KJ4dBe!g&v<}jES zY4N(f4U=Jp%ZHf(w{^@{Jshr9#J^S4?RF9-S@bjorZscbKcczhxDWk|!F7;?c=b3p z*LC84Gc0g+N|s2~aee>M(?aXw_Svs#yRT*5)hF??)EWBv1NjnujJFaqGt!T$G+)cg zX0#HMNViw$QaP?HNA6U5t52tRs9JWn&>Ey5UVXgHZGkBwTO&w4rFp`~Bf?4h>y>dL zH0E}#S)m;&Fq}v@Q@%k6#8_nVCpQHO7Tbj6K_qi(Xswhy@Pfw8#%-}B$MU*eTLNSO zpp`@(1R4muGpLO=2H-ocp&Nzz{@;8hJhID-D+p?%;9CnU)Z zg`U^@dp}hN8yB;Cryb%PvtXiT?iV7l_-H{M@iqHo%@jWX8y3=}DJO*lZH@YG7MjX!}tPcuBXR;6B&m=F;C}SQPYE3p=R3@uDik84B)kN zu9-YsR*$#6hgCZ@IFgXA==SB{QJYmXtbgq&MS3V63@q45@9ij3cpM@zzWD?=a{)Ja zJ!@h8(UvG3_84913JQWE7ff+k8xtKinO);C48x$vCKKSdTJPxCE;>%*(;&#!%e&^Lw07R|P3uE0M?BAsz= z3@Gn#L=Y<&&b_%;{B4LHGv$VKlp0e57-Je|=wmAyB(~;`*r)^~1N3OE8uuamnHcDo z7l(<-z_>}QVvRn*GF1H$@&zCVqi-F>KUPU7m8u^sg(*ND43}H}$AS+d7|e@wRYy++ zD5U8VAye=p$6?!HHwaS>I|%aboHxcAJv-6jXsQq#$B4;LaHz-r97~1w(qs%;KmY@4 zH`ZvN0&#!v41OJQMMEz@2htrqu`~)+OQgegCozE25=i-(ArqlGNML9-a)j7~3X?&1 zcZ!)u8t>BJ{;828NE--Y^>BW0VVK3!$TQ&wz(=4WxlSgkf8$D|?7;F6@d?r@FcGt0 zj-kR7fr2}kQf9_?uuLMfo{bk`nebpT%x>Q&@gdO)5HD4zF#iE9w2#(E-^g+h9 zS7QZ1_T4FmY3;ASB)c=-bmKgZ#z zSz~ah#bNOf_J6Lc<0RT}d=MJGf2cl$7T|YSTe!w6C<=JKv=jt5k>F8p2P{HQP=oFz zuMs0?6b3=3c$RxauxPpY&(#wo1T5B8L*r<_0>>q5S2YY{2=d2-m547NUQjWs&A2j+ z5FCR3zGEqvFqFRG7-WPG%7lUgJ9Ne~7rtv77DR#qo?!waF|{;ncO*1Z6+xMhC{}^%hV~G z-}R~_20@{~fPdFt?en;jS-fm64$Orhq1D??jctnTT|O*efUQB^)XakNl_rAj@&U5| zwDs{QnBT*kr8J6v2a2~Y1qmYc^Y_4_r&5s7J0bS<0(}7f;`96DLH0^Y-n{&cQVw;% zHPA0>v$vVoE5DfkX+|mpJ&wLV9T}a*xhcl zIQ$^U#>Hl!h0umEc(5jTWjqWW3Xq`xV2?uQ4~jtttA+DETbuwS46Jx8afwFaJ$d2d z3ZZK3XLh!f`4(wdy;qowJzU@ zE*?H~VE^9TyLRndvnp!kvZzH13knJ{W9x8~jOhQbpfL8oToL;}ui3e4=dL|ZKYQ@S zSKj~dlh1Gh5`euBga82GX$V390Pr*fApih)8iEi206Yyr2mk<{h9Cq008c{@0sw%A bMI!${0bO36huu=@00000NkvXXu0mjfmW0V> literal 0 HcmV?d00001 diff --git a/ww/managers/mimalloc/doc/mimalloc-logo.svg b/ww/managers/mimalloc/doc/mimalloc-logo.svg new file mode 100644 index 00000000..672c7e41 --- /dev/null +++ b/ww/managers/mimalloc/doc/mimalloc-logo.svg @@ -0,0 +1,161 @@ + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ww/managers/mimalloc/doc/spades-logo.png b/ww/managers/mimalloc/doc/spades-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..d8c73fef436b101d2a89b70466a00ad18633bf50 GIT binary patch literal 34583 zcmcF~g;$hs(DuS29U|Q#B`w{dNJ&dK(%s#tfOI!3sWeD;NaxbsAS~St3*Y1Kd*46d zBKNz%L}153*{g zz?Uzo=?@U-HAwd3do|C@!xaxt{F!@{)04EcBgTR_2#ByuOZj&M{$5eeQ_pIz^U-Rl zXZc*k+^WxsYXK21!EkSgF#>H{0)%x6dJ^+eA$i3V&)q!!@WBi6pqG|*=&$O&CSsw8 zfJ*)S<%j=+AD6l}zS?d{GbI0Ht3F;#4LNOzF|8B%es)fq(MAoU`#OVw8m3OidhYiE zi_`jXQAERnnl@Q^gQf=&$Dyow9ahr$C9m^$+k4qICxzY%4U0Fl8vp(G0KHX-Sr7%B z;}l)V+%~-;`$M+q6xX`@AuT*+{)5u zc-5qsz9~fv2B>}pCA!_-u2(V{iH0`hpWu&%$==Tsv^K@L&==gtx8;2SO$%wLVLWQL zs2qc4!=IE&&h`qegfG3YbNULgIxMyK#1yckWFm9Bq<_ekJnnzv5hW-~ut_JN=v6pp z{T2L%7Fh)li!@ip=HqUYC9H-o$m~-N*8h|~jR0e1(>D9e{lOTUK=cFV_5Dpc+5(ODA0+bA((&D-OyFrwzwGmdopm8v>}yBO^>w6GWd z^zR(C8KzuorE%0cb%F+UbLnTx#=i?iFfy`?y($Nt4)tW3IOEw0d5*tM6+76b)+}T4 z8BSdP-U9;k{kXtIR$Az6>4JXwa|3O$da0>~m??=edo{)CVsVhyW zNv^knkY-%09r9brK2(}uTB7c1=^pw`oSy!V0u#j;;X8>u#Z7-Al)>Wud3F~r3_Byp zAmn6}T6?*~U3|o&O5JxN0iQH7|IRGcJ$yt~<7N(dt>5z4;>Mlx@0}_$TO1~r>$eCLbXq%gd<3_SK8$NAuHSDIU-WJF zci;&hv`J2AtK@$Qj)`3n2Y$TexBD^?6CCyFoNc(q(P&=0U+IP-gQN6l9M-EaRhKpT zp$`<=Gp!tRcZx=0fum3lpMRp(KG1>47P7v56y0A#qUQLDPDS0a*Gl)(C144cn9$cy z2_|mvwfyX8& z2s_NVp5u9_O@~5YgH-}C*m5quGf%|e*9Ur92Gh>ncx%&<3sDZ=VpAmm15xT2Zg$jA z=g=Wh%?ZSA#95E6irqn!4PiK^9nFF-`o{D2zqAN0wekquG>W`BTKcA&tWXW9Eh*S; zKQ3gTbq9jI00pY2_9$yr<1KUQlm#AC#KlAIABD7QJ~k^G>MrK>AP7nOq6bPWx(Q0W z#jyB`GZ>aaTIW8av?cD8&$-!UM(1#OC`3(r4fsq!Munm7^`p|vldR6^cad7c>a7cr zu@y(pMU0L1c8;9qna!FAOz^UA${ywhCdGOq=L{X3r^CS_g`vIjGTR3RBd86aqQ=eX zl9u5UK2@I?Ek$ol30{=X0uw`a?vL9|CM$OjIgk*b_0RPfm`?Gmo3McUp0WlMx_QHz zh77(xASAWu?H8;!qR)Ou1wW`FV~HgJGoh~BNp6Q-G;qVY)Z}nL=bLssD5mq3dxsp5 z=L+-4UlNEQNAXw0!p5m6m|I810z^B#a%4+XCYVJyyQvL7&qj}h38|}o0G@<^K6n)Fgw;FQIs*JrpCUv(Pk&@iLGSyF4&;jH)86q}AmFCkz>*tMJs8#0H zF3VhUsG^J*YkYq^52W+Yv-ip!zCoMN0`{=0e!LNp5Q};FF8P|~12ny@;yh%<%l5-P zGmy1hH}}=ID+6>UIn|fVaG|raGX~&8b#{e;1)<6Q(o<`vAt*;CX5dhxhsU{yqh8;z zM8Z)e5{~;B6ZwH$233&KB5y2&Xf*J(8t=@AX=V2`m(zgfWLjx!s3wW5 z=EXA>65p9H;fej>9s@WZlGowVzKWQd*P1ov?r|*9yY(L|HzlFcW~(bJ{ns|jEA0q- zk&ZLTGAyZ(Y!3bjY*tGoMW=*n*hnzlbHwb$%gH$)TxbW*za}Sbvz83FA8jCkFy)+V z{mpGPVw!PuOBJqonN2qZ7m^Dm;TE}#an?RE7vD9$o~EV;{8!hyt&<5`+}zm zKLZwfhd&vH?_A(c5&_zn7M@YrJ-0W?4x$Nst#MOb2r;P^A-cIdbI{TBdSl{PCq`d2 z-corIUD$6X=BD-Qp!ZrqqYtFLb(=dBa0UPb_XV2RSgjAiF)u+}I|P2JlLeII$G&+I zr%?lrdLCOFjLbgwwuDOI}26XHQ1w${&@{#vfZFtH3~5el!``U zvjsOPVRmXi1Om}l7f{2`)f*`kL9uw#0zi_!1Qc^v^|RfvX}J()MgOdW|1>vGh}>Ce z+A! za_OYzB!_H39)P^Rl3q#lDLeuz;i^N|S4h_=s^y_E%gaLN_{j(7_w#J~jfRo9R|Gv= zkHQnS%3i`icK7)GQcCSsW<=18PX*+Q`cN@7sbFgtH>h=rfx$64xC`l#-wv9oh3}a z93qa{`Mx;%#2_u!#kv+!Gg(Ui&XwG;D`$AZ`w?0u?032vhMijhZ(x3xQ!S~Gb|c-5 zF&1Db28{%~9{HbUO}5&5HXYLgQ(qap8&a~3n0;+Nt)5znI(#d$Xn1bu4D@WluR}Z3 zZ!*$4zvL%V{n~NFl%0Mzn0>{bp7iZyDc~9$+si-c#(dQ`L=3T}rljhnRQIvXv2I}4PQs7M2(;w5%}>%9d%F63aQSLLju)KtGTrepG@HP5Wi zISr;_hHJhR{JrGo{ANDF{79o9PvXOplwd&pN5_1w?=S~&v!_#>_8 zG|vAdZ=3FU;WM-0&t2ieHJ=i>&H>E1I=(1*pSdFPI)ALALV|aJLf>~;Kd(@X#SHtq zeGIwaMW9HLp8_yrQDi1ZrDGgH6DkLoKy zxWT{dMHjB{Zd7je|Ep=NZb2AO9lgioq&rj&L_j9{C{VR;*@ga=`jJZi;3@ngupUp` z_ty*Jz0*@h774AXQ&BX1Kcl&|ytb=39OZvp(aKMKL)HD6qEetB+65Dy9&KoiXp!Ai zG#~2&`K}d=PPOO3rla9IcTyK*V;U8zllTH;L`2fJ)8F>IUKCk6I-vHM9fo4DLB7bG z;F6Md_NedW_8A(C@>3w?cl6Ec++FgHX{29S7qWw7$-y+VL;vn&JKn6f;aQoWQDj*} z1oRs%GG@S$&CgP;Prk-kz;=^b*z!OGJ5@BVhO1I(D9v|39Ht{{L=j(-iIHLR2!pgW z{xulx!hIzP#6*YFPF~)#za~K=GaxH^R(2~C)3w6S=>wG3W$NtjI;({?JF_q7boTG^ zL^rbFPRm#1)?2`ES3wIzER5~L&92`+qWm*lrfA3RouSS4fv_VZ#v zCWkr926H!h-I@iJrKF`&dRRBXS!sc<_sr>3q~lj$v;t4ik~u3okL@KhwK)J$?ptdJ ziA3+Dzxk$>ohf69fXXEuY$33V!ag$!3-fy3B!d|Oy;w=YW#mJVRY<(gV*6k;55*1z zXJ?`FwYt|Zfo2(t-#nn#AtjSH2|c$?CV2+ zu0vqGeigKI?XkvUBA#W4*HL;c<_a&|ZqO!;jkV7M44278L{Rb6 zrU)rGKV=nx236f1N3lnb33N?6Qrng!<4L?QTTd>VTEMGR7!cix4XWs^cjtp6A#)Gv z>FCZoKa3s>o&0`BNxcFTu1iKDR@S@6P-`z&i;MwuOyabo$NS={h0wGVmR94+bi953 zZ%w+djw(_ZOqD3&_H;;m$CPFpYnumz9u{YQKK}-c_OIEvBmpYS{r)iL+C-MyOLWY6 zvEKHLI<%(bhRa1yq>WDHn_GU@VZx&0;*>s%Y){u;HWfm*knFsrj&e+4-NXlKIV_e&GKN2P)|EWbvw8xTYB1mfJSyt zX0g-G3$fYiA54+2so!U}?oDx$o__G=0i0qkUs^Ma zF)!DbzZdykgXVzQiK|@^#h#fu#eBT?qV(MAcw91rnO-`q1lDh1=;fJwA*muBnzals zvDa;0@#v2#9j>SY4E_ZuENzon2PM%NoDDgy7tx)uzjax1>XND)!nc;@7_OmtC?JLE zV{A7)Bfo~;xt(AqffS0gJ^yJ&j`FQVNb}J!`ZHLw{B4?H7|N`x-^2om7nc%mpPZ42ZM+rh8onETP`$w6Vb{MY zXSO88`W;)z6*@15P6cWeWxV+VS3Rm(sqg4u2tsf}7(Mf{8iqgX={m&#+14rs2q>PX z3(1+{1G(0dncLdf{MSp4*pVZS_TFa3#|Y+T2gM3*qJ>b24yZ8pfH{m~k{QpLN|3Yv zE{AsgU;e&TV=HH|eww?o89J#4tQY`{U%Z14=92a5O?@t;XGB>KUzv77eY^*+PgXNC zg+^LkMH-rPWTyp)B(hwtC+))A`&IK@>?x99UgHF=|J?Udve@Ig42bz!AV|xB=5#o# zp?0_Zsy7IB045bYSp7>r?-B@a5h(nN!^ggI9Onk3Vf_uBM=378oSPx1I-H` zj^n#JIPHy;Hc>!X@=1rO_TJI#Pnz%OR8{l6a&l{{YIM(2rTf%;v3B|UXrG7|r0NYq|=zQSQ+w#Su>Ov@1k z$Qwy2{Y2!iek{VA6MT$rTjL^`Y`B_{KF83^x1`G5$p4}8r(Mdup{9ioyZnIn=C0n@ zO0;{vriZ;bv+dg}1NXuBYFOEbv@sEZsBsuDJ}T4EU(Z^c5fTIc)~r5ix%stobsxl% z%U#l}k8`20`0;WN?J&;MRc$9gk$lsCoDrR3mO#_(2boQ44HRB*OFi5bmkPv1>hWK% z7d9O%H&M7Pw+CAwWV*$6UC*H$(V4b|RqV>0iEi84ii;=1ThRKSt!jG_`hKqiP-s!@ z^UHNje*f|$2SFRB2@}9(|Jp;2ndumefCZh($o+jBr7iM@U-JRl!ucnJA|TY=#Xv_^ zQAbY&ER$S@K2tHzQ}O;QS!@-njx@W7ysbHk^EwN^&%*tafwaGXR{B>OZ$plg?zdLM zROE3(S11R|=-ESN!uf;gjkyo@dwJ}eqx!e4J=5<9ecbh~r)apc8d^SH&$-KEii>Z} zt8I&!?R3Qx5cbh<0GG+pTOjb_{G`s=*TUs8u-?ZzdX!3Wc&$It2pNxAtyIchYW%9`aa&7!e7MT&TK6uO0lIQqotx zgt+a=umA-V(`6V&FeS=<-KMgdNAIS7p$4tejQ4wp2ij(ChAaWZ^kdYUa+ivm!6?i8 zjR_U)8yq<|gOtCf)(^quV5a_A!MjM+9fFE#?$>*BKnzF-U)x=cxCOIP9iCcQ>Fbv| zM+0S#?m}sh%2jn4sY5!BHjGZudN2dv-FXnXqrp|Y{Nvnx;Om(S3= z%}TnHc?MkLN%yX7yz0PTe;UIf_4K2vz4>k)Up7ol!K-@6Ffl{+j18pn=C=+zQ-;e zz}w)sK+D0ySHalg;Ab_guTahAu71f4t&+}b^Yw+m5M3tI*gI1dFjTLQ`d%`i!_CFo zSFZl19tz>eyDcZK|2y&cB3AoPMS+dKO~GqgDbbgF0LW5XzB8LqwTWnZwoY{O(Zmvw zgz>-b-P7l%5j)t&wK|YTvqb19S;UVD)hkLska0z`{-MfokQ62mbCj}~eq`N{#wu|T zT2PL4zJVvkwru-Fcrpg?nwBF7J9Bo%gHN}*Xq1P*m-i^Y1H$vqwh}Ic%}r}Q=Tj*P z=q1D;fNc@o+>pK8jIXumMCz-8F7=4KW95_b5s`~H5*eBoT*sJjsucJNk0fpa5RM|uUe`dN-D){JfdgJwt z4Q_pW#lI>P5E>GXMGrSW{+7Qn3ByJ>ipcH@D!&TKSAuBX1_MrQC=C?a0fmF1?1z$( zfGo|K7V_5OeR3a$1wV$9ILG@AY$a{0Pn`yxCJD}hLMQmer*1pJcs2I?+@p7qoGr|eXYTt-n+7;64xi2mcYq5gER`m~z;PS&I(|Ja-06~z z#h4yAA{#k=EjRCioH3AQmGW7C_M8Wrt-K%pZD60;R&I`m!?-l4q;bdYxF5@>R(D)$8n>0##jV4~ z0_reg&Q8kITIpr&267(9G?F{-d_qZ&?*}8MksPE`3nBnq6!ZfI!b_AHr<$rwc48d> z(pOc~2k4U_-+~h$yEMDbr4X=J&aXNL8*b}kn@+e!=MG}~okjY>ctU^mU6RG=8|`Je znWJZ_MpR}PkYu|+{Rgk<>fO1dbWvIzAM3lt?jwVHZ2(wzj}G7p)fzG8fb0CVbhgQs zMP9$c_hHw4!BZE-9Nfguz##SmJc2$Vcp=+G>K`*pNaQWhb3Fy+0E1G@H7F;xTXxe~kS{?DwUXj|? zJmBF{ljqTN{i`p?Hs(8t#1_MvyIP;ID&jS6`0O65@Glijn7vPc&T8cQuu0D16{UEN z&Qj<9YIAq`d1#rOv^OwDEcoWrUe@jobr`+5=V=>Wm?0N$)>Zq3?XJ}Ln4`AH#rk7J zD0{uqhC4`vbSF#L(pd59rNToDE@oh!%e9s4up3_mTgOnfV}T~nC_FijE>_nNJuNtP zuzw}c1{noPSn>;Z`yJQOOr?-fR^Y^Umo1Ni92HkGCu&Xm*5kgtIub*AZuDbH;Z?h( zY<`+LpJG9i@&=|DNFc}V#p;9()(eq^H!c>mtRgWA%F!d|%Vz^zI&u!QvWYkO!N6yIc<4;SjHV5v?GzRARI%UaFbqH8MY=4g zsMrSCBEB;lyM;aZeCWfY?>VSRRBRR$07cd3Mm6IoLWLZ&P5ZC0dt(r0L`Q21a?BuR ze`G3w?#1XSIoVBJw0z2qZb=5waR`Fuu zX!HG6SuEB=0_#V4?1`5nKSSxug}h=KBc}8=qpv#<`vQjud8{`3Ut|D|?hR)0<*@bo zGHla38m=2@q}J9G=of!@A|l57fQXlO<>ldT;3jVY69`1s&R5AoZgTcq%;RXPaN!a* zHaSI!ouoHg+q;P1)xwXuL{z&qo=$9fA(qI^_>IO3woN|IH`BBSxALlGlKZ|%UY4*u zq!aK<0X=^%JIG#$-evIp9`vGm)3)Vsv;8NVV^6T0oUwI)%sHR;MsJRbDC51iO_sAV z?c6^3Ji~&)!=LHpFH*35C0f*DMGUIIX7d$u4T#p&Et>WPj-tzI((dl9Mxo34f%Dpg zW(Cv3K%QmK2SB9}V1PR7uX;>1F(cB(+jRDMVOjNDHBw>b8TBihn>*xNzK({aV+0x{ zMS>S>93M999(Q!EZR%HbGi{@pBxWheW=sJX>2E7tFO~hUZ!y&q+E>V(DDjkaU* zTM1WUy}b}H&G?LFDV#?bGLS=+vE_aHv^8b;PEa?X_w{#!$})<$ zKPI2atEVuwYC^E&T=0>`xQoxNkFnI-Q&;%~03Q1QcKlF%e_J`+<}mZ(jjpSXac7)1 zL=u?4HP1x)!gXQ8PK!uV!DEP2kVHOyA;mJQ0C}U%Y^-XN`^P-%QfJ(Ye==tA`8qTw zK{b0S|GNu-zkuW>Ycwx5Q?(g67W`Lz={ zJudig|IgNBwh+@4Ja9lO)V--QP;CEi{unSM`(?KAW25NwN_A?7uvK@l?2u|tqj1l@ z*xlBC*{<(bI7M?GnHEvjFhbIluqcKIr{Kl4ti+}Uvct=t`{cU&e_hygRpbvQu2t1; z?{no+QdU(K%b?S5md{4bOrG*<^q!l)d}LZ#j)^CXQg`!;Dw{eOGsdCJcG`RordUI! za?o&n)X5k1f3TQIC?AlFlw@!9-?`^88E2$DBymGve4a zmfcMk3-#fT2LvnjpNJ=mZwhy!2BI30>Yn}3FpJKa=9MSTtIYoV_yQb@rt(ou=qv;M znHiXK;x5mALtPJ4>t|Mq9&MjIb>t{2U3+g5h;Hb>A>8eiKOB;L-TyHtw^r6E`nhOb z6Sy)H;L4;|?|wdo33enC%T7!QIEtrl&GFI03~YU?=ci|K-bkMKYI7Mwd}2mD9UgOT z8@|PGAt&-NKV%A52J%)NNdsEo6|%DEXl>11hhN@D@>e~!6?;99q%qFQ8WVBd@fY>Z zMJ&76_Lb4ptUO~-9_BgalxYjy<(d6_uB9gl<7M)NcLLE~!EUMis{|L@+-V8vZk-#} z-S_KFJ)4d%CQF|FNtl-bWwzg~foy%(dXj#+DaF!07-zY$-nootP=;G&<6w*OHt(>p z{OTM$>O99>Q!C(^126BFvJf%1e1{l1AZT~afvi(nLnQc?<~Xm6pK-kX7BHrz%EB+w zHMW)9f|J~?x&Cri3}3aQnPzJ}5J(N`WEpa?l+rilC&D%VV%AEtuw=?LWOesA|Fb3z zrBTP4$-(UbE{TL}LcFM-!G;H+B#p`k_Eyou%V0_7aMGEeIBfF;t_ey{o6pCnTM4+J z-w|0It)?QA+b@1B{5y@N!Pi>qX$MQ)HsA3(xrl6DF@i8cCSSklx>C85M>lCBJzB{a zu#79}5P(c-Fma@wCMCb&ghThP<|0N#1fGl3Qnc`VJCwFxJ{2X6wS7@A>sj)RTMahw z^c(M(O`^E5lUxs9Co<)a2Z_>7Tz7Tn*X$5yMAr6$enwnOjs(BRA03itc33tV!vc8U zKb3VMft1fS^1W~^hr$$bzLi`nOYNmH}jN=znN9tD7yfAq@$~?mZSO@Tp813?RH0@YrRcFV|D0M zdtA_NB<0_8@gQ?~t5#lH)8Z)~C}APP!;ZTb2J46Ebr^=5*f^Fd!8&i$pGa!oSOW6+>;9ICsntWO`j#677*EsVg!y!y8qv}=(B2BQ3>P{@TUSa6abF@>DL!B^ zvQ1F=>do?N5b~`!NYYQb01+J9`(m_m_V#LEcEhyuz16j314WX5%h{mJTQuImr3DLo z1(Xt9z@c9#Wv5m-j?wPH*9luY14%2*vw0gV|5F4A7#gUS)~$wigkV8w)^yP9tidno z#p}L;R;v&cnlE1Dwkd=kd<0>rmd#6L!lpN^q8uk*sl)2`mA?&r%Gl0l)6#K{xGR=X z0wjSebW{ZGe7*^?dSd-?6n=HFO+SCk$xE;o4v;!EhW~b58X9A)f(Gm}IBB2A8)^@P z39^TRH|e*{mF!I>)oK0cfX-1w6i|%yfbB)b&4H~q{r7(c~i?3pu_NKxQ0GRbgD!4Lb zNspqDCU0e0;H85_>brTr1jauyDX+-C&JkE6&MT6OXDSd|8@zeNy5O|gfk$4}9r6}4 zP;=w9dHI2=)H!Ydut+aJ6)m=QS4PA|ZX$uPNp^iNl5zJ=h$$mK($27Y>r)@2)Xn8N zkJSnUOJ@*;l(5V-CgLVm}YpH2Dl6@aF3t){B)dFb|`}UXmlbH zz;|b#_b|3xpj@IPVTo_@b!NQ$8V#wz<9WL>9&#vYX%Qd$Z(DB*mw9tJrkggU?oOFC z)yG*w4Z_h4*{|HRhp}+$E|m!e5R4YT)F#y;#oI1ve&+5gGELJ$#(w;=&@3U7uG^}~cMT-hf5+;N4P z(SwK2Zm6Wp&E_Z$0uzTG z*P{(Vz_tV^g$pN*_8;%{XlgE{8U8lfmza#WNVX$(+Yq zik#4pF^}E5ggW2+BK49fHw2r2z^;xJT=b|Q3vweniSwnW0Lyp<0hT4d*$!#!$G1A# z#(%uB6Mf+d!44l!4>Y#OrA?c~{QN#c(Pn9Ba+M=0Qy6WPEu=nQxMG`n^_Vkxx9Nsp z90>HQgFdi{cY=_(qoW8`6F`UV8!g6?3oA(mSK&<^^#!}@tYO$^g}H;*YoGP=*jK5O z7ibL*HE%_u5d6f|BOspvSg@_>Wgn5(f=Jr$;1*tq3!z7y&>#q+$!+nX*!~mNOuoGU=aFjZd0h0rDwoOKaXPnVjrbs=&waxWO6hqg z$bP&JEEx!!DGE-iO1HO_7pPzdjA?9jiZVv`-7&!Et}g}HZVPV1^31Pl>9vQ+caxb^ z^)|^K=!8<>2R~}-^!<+P4AnAK=5rugN9szd`>2MS92F9|qb+l*VGJ(b$7L$AMh0CQ+z=MOuHHP^DjZXIgUSfFX- z+S>-+xs|)+8@GISn3Kt$(NG+{W`~lk7G;=eR79}CK1&T(+&FMk7@2jAe`m98d_79M z=Wg^!z|O+;b;F8O+>4R6w1UxI9%PkQd%L#M*Zi_Y@cU(v&w@Ug2O7x+${RDst7>Ee zal{fDnU_oR?~hR`#BA5eCZN8+HcwQ?EHd_tK?VKagZeE(afVw;>e-e}GD zxrk-axhQ7AC-A~4Mq$@=MKKDP(wzSVM(wQ)uDac{pxcw@k_rfCq0!VF!4pIMEHh08 zK1DZ|o}{#SqoTjphuLi@7(fXgQv(Y+ORXgUXFXJ#F_#j}b1(4F3;?k#Vg8AbbzLGv z@KAbEVyQ;bgdbo8&B9*#fL3^+7}+$KCau8&kGMjF*6r{P1xw4$m-Ce!KovUNw-0#) z&T9u)FdrwTQd;04*7>jebb-1bx93I=Mfp7cQ6Yh)#x=CbQW06|9+++9a2G!3C!Fh< ziv<|ETn$yjGzjWO(4W)~$ccB8f7|aEd7sq1th;4#J-8oI1s1TtE&f!tet*bgceJD> zD^ExhO*?2t=@@Y%zZi=qMW`E$j{ZrL!$$;oyntI6E+bdjTW`=i{t0)NR{c(P9t4p-K?-=)mvCV+%W)#F`DscdNree!aqKd_oJ;WH_SmSXoka#!0KvoczFbQGYYi zco>S(+^YaO^-~W#R+W4WW%GM!t;lAA*>M`eDt6gQqgeuGs3Z0C-2Az^g;V5G+^T$e{?I=fI!}Sr;X!$ay`@ zlus|>-vy_7J|J?L+Da~{N{1{36^O2hU$Z*Pj*$6gB3LH#+&r_gc+Y&*LUN56;{@2n z`|X>>PJjKCr7h5B{a4qdMr02|J>>g4hYS#SvAL~!K5v`BY`-ibEdI5BoGnp(ztF<3 zVI0LQ3%7Ph>;Mz^p_X1;y2nI8TZIN0?<5p9E*Z(A{IB#bRB?fjDzb`;`Qhrt z8Kdyl)6N-+(MSJ=ma~C8?#VV!BG7QFC`EmLKeCe=OmQK>X3!bC-u ze*5~#ND*JuZsmWuxQ8emH8=GmG}+akWxm26UY4f5 zH|jibJEvs#H56L7ROX08&mJT-wUaNvo5hiJVdoOXrCv$c!+kcwEV>EyW@gAe%=qis zM`C>ZO5&ic-OX&24^S4^8ReP~h>)XP*ifZ`{C?{CepvhqiKc)I*}KOail7Z^ysJ4c zErU)GExV6&YH|vH)-UpTh(yq}As~Hg`i9AuzT}coQ*&LnJEEqFOQ_WQQ_8pd1dvd=*!&QKDp5J=#zzBItqGbs4>mxcLgPY8K zg>MQTbW9bDEsvrdlwC7syzaw2Ga4NOX)bjupnslhV`I1lK9A&eS;X_aU`_d0tpkFI zhxBVa@1hA*l4XU(LW<6dA@vk)@N~rpqmR2k!xm1sjC30>Si?mvXNNd-x~Bk#lLz2I zWMn}d19~86)h4CvO~GvGb{(8i4zVrRRk}oO`PH$X_-tcgiKM{l^2e&MCu;>D^s5&T zk|N^t4qm>X>t(Ir6U3ty*^d3b;S0llb;et@urKT4dpOII!p< z|NAHJJ3`#2GCRaiK%Tfs|K|u*Lxgm}9LPZ&jYnm(z$XU*$~8M4iTdTSZ$jRQgE>S? zGI$J0RAG%do?r-IKUz*}CcIGhik0el<=@(=$V!G{~KGz=6me zA;Z8J-`=|j*jyenVi;v_7T*%)0R-Ugvc<)AyUYQK*PLqes&Pb_c z1z5QKYPUWy4b{Y%SQ{i(rWywr<%Lngo^TT2xjQd|Y2GmUz5>6}+r$e&c~cNNi_bka z?na9kI1uRCpk)kvc!;s8K`T|##|=8ca*&cr&InMeT;sjNLlx!c*0|-P0-edh#usq^$Y5zdKe;LE|DSDUIYixmVz%Wctp)U(at%6 z7}>(4T&`7b?1}_WV!XZN6UuyA!yzp1Dm-b6|Ja$Z?%D6fs5Bjq(I3Z+m(H2D8Jq2? zuO&V_ky^8(m7T+jy{NR(irHI^Pit@Jef3W5zkK_;j&>A@pn$OA{*ao#z zS(LI3wFKTyRP9XQakABOyAmgj?*C*(5AD{!CAn?^QHfjoWi=eBn&&sWw*6X zH{)XV1%wBhhAiWs)Do~ljbm_cF(nD$+^Fo1g4>jq_M&5+8&C7f%0@@sZ_Y+btc)sw z?w08fW;Uu4qlmgAH3QB zc|)1~6rGfI56+Hc7tR_9w$->3$J^?`XYxh1O04_A8)t_3^2G*O{>96S$F;7!Isyaz z_%|%xM;yp9h~G+FY>U-51B$No+p8w|EGGMx#9Hjt7i__s?$+o1FFMrfEu9c;j zUNb$wgc8>npO6Pv>!1xn;XkTAT|oI@1<<(+zNZ!otl8?q^$#(tb%QSG8iYKI4|OvR zSuW(MJGWSyfIads?^G-(ubrb`ihIz=wlWC3;bTqn#Cdf6TrS;YHvj^XCzM z$;>!<0e__C?8;r`t1p*2JV&$kkF0jbd)5!L#p z15qiI(S2J99UMUp1SC@5?=vR&QBXosvrC@yyDWfRW46dkV2yBQEGxAMIw0Ib4!qZr zsXbkUjqb6^inh417i|F{%XZN(x&U^3{H?WX1lxxSA-a=givrT~i1Xf=xvkogk~M-H zgN%ygHyhUrgNkRv-=?xk9Y_}7cZPG7Y%PCbi=7l}>UTBT%5vOdf3AlC3+=5MB%1m3 zq?Zs9RM*>VPYJvnf1|q&am67J1wU$6ntt>y@AE2-)BfQ_OcZ@tEd2KfjBGSU`%t*? zuaR;6u+t9`MRpv-Dji;1s_hPt`eJf0Fz&dLYZqL_mXpgC2rg|~3<{WKwotmYd{~h{ zo(~YT&AG_O!1;97Sb7Kx_$&;I;9M&(m-MLb;9C(4S3PG>g(%&mW99_G5wXQU7N-K( zuXD~#FI2Arfb{ui>P^!pBk__c4Kr6#<7j%xrI?G^iUFhefkfQBXz4dc5sxRSBWWP> z)!)B-7edaHIj|5`_iXBp7zwf@CYt(}3Qkm~3yv?et8OkisBCX6`sI@B$(AS?K6Nm( ze1BCrw6zc4e~LmEyNy{C-vK%XeRBgxY}%%een%wV^l|&>S6m0gH8+&h{Yj{SHLQ6$ z-hpXx;BB331XI@P?9$HEWC|q=_%#tL`WV^__fivK>GM4QR# z7Zl?DUyp7eqQ_PeTN)h#bQa6S5?f>Q>3ct*S5CYGdn*Tt7 zPhtel?r6dB@7@5Xw2HBw{H?RVbjpl9Fi6E?e?Q~Z^Wrx%sly7T4L}qvY?6FC&%Y=t zU|yiqRlQ}zQn}xb$1XaR)c%r?No@UH<1MDVsMT0O9kTQr`U!rV1q4RU_1o6QB74q9 zpp!_@fQY67h+!k?@uTe0O8=C=Zx|@Ucl9@ds$p8J-QFv!D)T@&OY9~JAsw+uz2>sN zNG^E)Tqlp^4>e|Zfy-C$smzFtE2S~zGxUlDK>de#fiBoG{$oF2tNDXBwD)$CVGpof;rrb0ZA2rid(>6S@r_tI+}n=7C-(*q*F((In8X*CfA&@?rY)J8dSgL4W;J ziJYT7Lqp0X{zf{OeP|!CL};=>K*8dfiyM<8iTqbDkp*A-Y2D3XV;PFP--l}5ke~eN zqRjT#ZSE8(>FO&c-bMb=&4T%nK@5_v6yOs0xzxW>Qbc`x*l`l6pfb)$u!7vp8z(&Tq}&mi~SON49aj_P6<6 zL}mygmjmtk9OUU=3jeUed+3Hg^@lJy029-&ONXyLdAI@uk}%o5M(^UumDu_>$jZo03QQ#j%2RvK!J%7#)PS<4LU6ZGggMD%Pt|){OCy^6#%Nu96o!)lkr-bG^%%5Q%CCU> z_m-Bheqo??dHshPi;E9;Pww*F2}&Mlmx-B6K(Pbxknq3uk*oQHhR^4V1m@5eb{-LV%$zPFNyYb= zP4^$(l9LA$fLS72HQe@k=m z(uE`B_l=2Z*48AQ8AEzn=p~jq0xQ=v1AQEarQVXYf8Nu-$MD?Vj>SER3xH%Nakw~f zI5Z>jl7e+(LtRis^{Gs)Ur#b5)((W_M#~|3hIeaNxkI%6x!r1Jd>hkb(f|@DV3rYA z$&;ZiyUawo(IxV|%!N$Ro}Mx*8y|K3>5V}VlMwPOf|!iVE`I6{!${E~G= zUHy%L=a^HblFx``N(3}mV$)8AJ=UYpSIod|q>T@PLXE0_VVpebWgD9UnJ`7c64k>V zw$p7z-$(7&GwUtDqb8v9D{%i$c*pb`GC;}en}z~^QLtb1JUKsoOxP>2abQknj+ zamyjrI>P7>!USFKb0nVfEbpm5{ZQBaoYnO}*b@fyWO=qSU+qjh>R-hX4~uzNoiQRy zy`lHA@^wrVXZhb zG6ObLpOs-hJfEVJw(HHc*ZK$KdxUurzQE3l1*KaR^453|Kzg^bgJprgaBKr2U4XF} zP;8O^K|ox~uuC0vm}R`xUiDKKW7wCqT32kxitRMOD-A}Q{e2^rt1=Q0h}U1ibQ9jX z1Fa9Dgh@flKKaYeXg*GCju@p0A${Gd$x^S8p%31C*P zBMe~LkY%bPaAO9HL9OWhl5%noj(4v(WJCH520Od#-?7*T4~_^Icre*&JmD$}xx0iT z8kyJzOU4>5XiuMa z4oNEf{qQ6I$M;H3vM1X3|Mk5xtqRI-Sp5;f;Q;lA<6kQDXI#0*F|@Gh6S3BOlW%oj z++X}Xh&^VDxx&H)76Qee;iUy6joa(QOr~X;km^GZ89suMxa_wN z!`fG1`>?w%_>LR4CvA|!WvYsYY^4$wg`RS26rB z#NV3XaIx8fa0LJ-C*oN=6tx&`sou8NXe4Lbf21^y7D1L->bi2~0wF|gZB#P-KhC~_ zAa|xDKR;28L4%rboBEvS(O`)i-7w=|eB6-#SeXI5EIcQ~DE)hYSG(cM5M`t1o-ZVr8bl0_;s-ctGMiOs7C$|>7-nGw zSGamU9iC5*jL3}Kj6kRf;SJ#NIPiu59=P`PH&tPif(NwVIWS{cXg)PQAvw5O^e=u> zHCI=6u1=q%U-3fFi30*smfN3J&{&^BAEHvD;2Ob1GnxMiGq=xTX()?MLL?m5{XiBg ziKIhs}zrfr6)h24(Qrs%5B*2BuqyH(|)6HD+ zbYgH2!P@~51QI^wjyA)G|9-TN#cDO1oTb6tdeaVYW=r*Y^iUR3$RKwPz0IqGwNvaN zDPu}2dn`sx6~8!qKt;jbw*p+NHH_CW5#3=Dg9x!zYF9VWJBL?MP~k6lSZ3Cgcqj0b zMyT_ynweVXxIMCYjp?cT*4u_eym@|58C~p}xvkc$kLwD^LOP%+J&09osV8gbFF8xw zUnF#U&e%+T-ghcj+wOL52zlcbq&RGF^%^AmQdajSGqF373F6s;CUf8TYdIr(X>%I? zvCV6ltafF}0bUMr4PWE!V9!Z(o!9BW7~l`vz!2i8`M;x_Equ-Ysv0;C$3i^y`~1j+ zh#q+R*?HGFiHBqfu2`fetgWed7vvboM({7-46jFhiS-tJXlyf{Z!PNzgI57R+sSYe z1i*0ehM2?FGvIv_IF!Zwq7RWuDL+>_e8OKgKI~-@T%6_{=F(&s!97HjxySYI_?`)G zR-Z&d5cCgs0yuR(N<}YSBdEoW8ubfD^ToOBA6|?|o87NPmRd`=en*yIb?NZaML9SF zFYwGADsb8jj%OWuXg~91nSc0FdHD3E#afRb=Ig(d%fIeLcy_DkCigmTt-39+1mi<_ zHLlBI-|Q?;?$Hdvd#K$vL5{^cJI&hGBc6haAGK_Qp;4{8-=~f`Iej|pyRHz z*;I`Y&SRpVmFcrNwUvW~A(ziQ&-gPM5wpdB8tGsB)7%t>m1Lg4n$}WsHmvgL`Fp7D z5dnB@@ESU*Cff?fjG44XX7zGwjiWlHqRDO9O3+6Cuo8%ejwF(xnAPDg)tbv5aV+EN z>o|TOVPssL#GCt#J&HR8o<3*_58VJ`WuT)OPHf-FH+ahG2%a1vg@@tR{mvV(x*?xm z`szS$0vU#C6n=<-1TM@`k$bb|5gv7EbAJurri>ph_=l zhU&hFFZOpy#70YBZh8a)0y;8uCS(O~`mbaTgO_W5{3gE2JPJ;UZLf8mYAZc0y|QoO z+mckLvfQV#-YNxl`aiz&YZ84}Sz8frssq+pTjg*wgIn07FyFUfF*w-&hIK{VFIx@q zcZm-TGA0H^1VTugPq(fx1y_IThZ0bqx8V+BN>tmVo-_$>Ig=#?AAxqKk=ld&cs7UST;`T#l z`hk}E>8Q26iAD#pYgu{R?W+7w`Rteb``55!#`K5nfefQ(Z)xy|uY`OD84uGa?8)9b z{>QsR@vlss;uC0HWIxjmKX_!VcJ^4^x7KdN;BHdn0g_??80t|?k~)H@AR8*66aIr|_sAFamH zc67GxDjWYuVJ=3H zQ#j|^x(kk?lrYl18Ju!_ewNI&J#Uy=I-q_NnrB?hN_vmQ@Z*NOZC~j}|9KFJ1 zuZ}K0>KHw`8G1RNOgH5E;cyXW2UQ{eVOI5Y`^W?K)Hh);-MarBS>-hwPN{nDnRHr7 zojoVmcoe~xVkaaZx>G8OL+kFkl@IN3dZ|53{suh^YWvl5NLfm1oG3l3I;@)Qv6m2< z6WuTZ&ej{9#TL*TJ3vSF^`a4edq9dyBk^|kyq6j_b7;9NcI{0!@kr7c%|xiBYCIp6 zBv)u85%6($IqOZ3v+FaIGElE`6u4Gw+SsrLPq;vE$0>#Vz4~7hD2?4-yS#urZ6UUD#KCqlDtrc>HyO(t|QgfwXf8=fs=f#-R$> zV%8yD4Hz80JQ4utE-g7}cO|d8q1^Cgoh_x18ZD24$15P=P-rpr!x6}0o+CX4E7EfResJ|kn3E%3BGA=Jw{h5=SsS{E*Scrcr-a+N$B zF=Ir+I4YU0VXBjUcMuY~62IbdwkE!Dy{jf@_z1`N&Q8PJj7X4!$`F>xhuWH}0^_(7 zT4*3=K-XjP_}u(> z>V{m_`xH|X3f|rY<#{EwED?i~!b!|ONn_HF5Xg`2VQ%WNC;kVdCrTCid5aKU57H@a>t&|`bDrmEWU@%tuk{#w{j zwTIr3pR<`Q6#!sj;!Qiik~eS)`ut&Yc`t@a*fRHlxal3b#6ul_K|AEVam}dLq<*=5 z_S23%wCnElM!))KGh&DqI&=ZgP|i(~mjyu5bq%cR`YG*+Pq5e{Xte=$?Xn)KMM zZwXr*l9C z#y2U3pyN2LxYF)B-IK!rG^SPGrx4v6FTObORS#Ni(2J!kn0z*S&g*@=43Jx>|1#@m zl8D>&^Ha{@IiTN(F|ry>bmv;jHBBFz-4w7GkXha7HWmRikKJ6bp7Yn%bv`Dw z?cxl*R0k(Iz3kOU@Ac`eoswuhUT4jE+#qys5NQNm4y()QLvx+&6v; zbMIL=84pyclgFXlQ1Sn5s)5uNx88@Iqop|C7}n-NK7XLJ_S#fNp~>1bxI0f{gn0P` zn>E1!2{yciKl@u4@DEM4OJ@HD!cl|v9K0KX6 z%PEfF4b2rfpJw&xJ}9fC3jk{ql`V?X#dmlZFW@D0f&xD*fXN!5dkb6ixw??-gaDxdiI7S+WpBOZ<= zXck`9`hKik(XcPNo`gf~$MxI5O43)%vKrs{{Khj_~KGg)Ux z?}Lo@m8pnPFodjJ~JZ96}7AdZf72)hVy6MowwHR zEC-kqAq>PRcOHkBhpiDn8%etipJfIG#;Y6s98%TU;;g#q==$@71{HTglSZ~+x+iMR z!tUMcW~<{T=he=Qk7b2Rem#eFFE;Uxdo#BSG5ni6h#CVbBkoRvr+b}$8yQhMmUa-L z|M4cN@b)i{*X-p1goHmIr6K^b4%iBwe99xA0|F+x|Lz6-Oo6$W-4Jx$%pHpy4WHj@ zXgORMHu|@5%~mfirm0H4>h62HcW$jl%R_&;fKu(4souV-VG&MD)=E5jT4eR~`5r_* z4*BTWlR?S#Vj8^%slS*G{wIp$#HhZ1y+qQW6ieZD)eA=n??)N*dzS{V%fR>#S()V7V(;6Z6 zsS8`2hQo5qfxmqe7tisPTOwU@T}((h9vdTdzbHu^JAEf1OA^QDharK~Pn^i~AwVX; zh)ep{cZ1Af#lnXgDp!Xa?gf`&NN5oMUGj3_=(0GVib!kOT-OUpe86RxVc>v3JZkrc z+|==1urT)2lK~{mmQkbX5pF!My>}lm8)u5FP7yiiZ!p%J(+T%>jI%GRQ;;(D9g&Pm z;%4fXNi6MDbt|*%yL8kltGTLTL={h3=s*;N!^ZQD(;gikr>fxygQhF{<0otL6g6oe zICm`I&KC6})hqDF(HCk#-W@eX_U%8#T3qW}H3Q#HzaS|8`Me6-I#}dc4?oTw z(5|e!g5q<~;>KX}B}mX6G(W_X6HVhK!7j9UB~!m2lm*>Y95u7FX2T$m<+=I$vGs## z;j2KoimZ3;XY%+6E)=hxl8EnHm06{}8DD%Ze%H1V^)@B+?KB;(q^KAFf_?4>Ya}{4 z;ZaSn^0&7c@O-kv+(W~3vCnTszEVi_YFj&nXorAQR*NRpsaI_%TxDtPSP9WQo??KX zXJH>@)aG{m-ShW+ANCk=JGMH880SyUISm@XYRvtC0p;RfyFF;}HfqSqJ#P^<=rlHS zEi^_I)?|gw4`yF9WtOL@G@+gpl=rEcDMxhdgQhx8()>b1pE6|h7xJ2VOCd2joOY;?7BJGQb>}T!X3oDxbm@WdY%o2kGKslpr0Se?*1~*!SCiO zZx&(IQwn}Y+~7*-Y;(Pz^9|TOH0(5pHVn-i^G1Anj86}@R2&b z^)0wE*_`D%{xWmG3`@~f4dqNv`5Q{Sedw=wcc!pwG5_A*@(5!0(!))D z7l)5jZiJufZ&-fJ0J=#%Sc8@=-G5kh+v1%ASE1Lm`_H3SXji9a9v>##6tc)IRc3s-$w3xtLt(1fq?@HW#TpwQfy4t7# zG94}8)f#6|sYUr-Gir1ae_hSPdPf`&k!`WmQ?v5Cnp(EVDj#yH*vdg)`@>`&6q3F*hMjm&$O-1Q()L?drw-Sz5o-a9e z{vz|%&d(~6c-3bdM$ue+>+UJRLxZRMlo5g(;*S|`bxDcucCtHW#$saJn zM*I96mtNcSOI?;x8?05ynZPE|0!u-FM`-p@Xn5y#ysH>Hu(Pg?2l!hZc!-~TiymCr zGP?LtPsBlINnT#8W2vcO&9hONC_Tizc=CkqGT8uCmwL6R?%C~VVjiI#2SD1TALyT@ zX8dux4tM2XmJzNE7pPJR^z)zuc{t1>++bV0+C-H?(Sre^rn@6T7TmTv`)f8|OgQms z_Eg6~Q~;}vI@s*b<$z-6swR z1Mv%gKx0`MQ0N0C$5<1FWUQK(O{XdYBEEdvtZ_=ZSMS6PLOZQq=hcNuibF-i`07Vz zYMoEc#iwTrl}<5~c2*HPxee_Q%4TsqZlOgPt3-8SI>i7uW?^R>5|p=r^}Jtg6bW*= z9|^;=5CHQ*nnq_}t@YKP9$F#@a_u+I%c~K9P?OmG?S;=1xqnlf#N4|-MYhkY8ih?; z;~kvYCt-DeC<7}Nk2!ZKb**_E<*M~0y;_Cc&!3s752AW=o8Tnv z6o;!u5nU#LQHJaKR;9L4U659rn|YXTx36QH^gOv&%A&@7SD2@73COtBeCb**h$Nww zH&gD)hY{|^}$n_xaT2GwbK#L)Eu@L;cM-zm+CJuYL311(x=mGP0jmw zbPdnmsh9@QO8T1Qen1)JQArc!(sWiv2F%E7fbN*wTspv1rX&`5jdgVbRgiLUSeMd}W3qKnLD~{IMnRJswlgLxNGR4hNgb7b}M!=WA9s2AjsMO9R%O@ zIN+J;d4X120#gsH56Wy*?EQH>u-HsZlY$??l0WSMr^2)R#wn>^jf zg(5amZO~Wr0EI0=jL-Axq17lZj{49?Xpnwf+0}9>4tRq|kbfjQ^J>!R6<0|5dQBGk ze$LjyY+9l+k};js7E-X@Fb#SQAK~Vrn^;yCyA32vMo7_uQ`2e zi05?%onu7&Wz{W>-sQ!g&`4#!4(W*&8Kobnexo*ut;BBhPyQB>U>8J8j|>oNYrnVI zEgDxgDZ=aJ*QgVLhxe?J0(J%TgODSG;m$TA6Y8(jiII{U`?t5Y1NCEl^nGh`1UYy- zdbNY6*Y>OK%n-@+@(b`D4aoH@<0&yFBFW{!j7sL9$tR8B%eYlV#kJ~*JGa!4EE5a^ zzaZ9Qt<%IVvq~9>?1w`izxgYmv+d#z3pX}}%YgQ$fu^Po+leMg6NadCpe9_LY!-FoB=E%>C-;%Z?Wg|Mi zF@fmK>RZNCo8ZrWaY$>#Ucr7W4p<&V%*+g86*f z!>@%wPBg$bm&>7yIOW}f`anEHyQqUn>V~ z9a;Y<@q5)Adu?+zcF)QwiJNY)Mr`df+}+t|Q=H=v_*GxfUhemtE0cs?X$@^X2auC9tq7vKVZi^aLNY<%z zGkmXxUIxnhzgmF&A=*@jK=6p?@Bnc_T9#Fx|F!4DM&YYUa+-{q(jF(TY&H68t^aIg z*B5$wmi%W>_e79D_t_MjXvbi6DT~gfeywmo3fE{hmFh&$RMbYJQU+%06kZ084sm&Fv16gBinqbqeR@3>W{i zKR1`Y5eF$*WJ+C<{QEc9D?@C$C**<$)m*c+ppgA~nrpBmnoqZ3z|Lnne}4tX_cX%v z4J{9%qVJC9Ixz;8dW4ZQ{P2#fYlm41MCp8brZTM;Jyi?i8=Bzdb4Vaox4FlWXAmJT z^?2-i+)SpbWq2bzG-&x10bw3qM>w3l@|zC}&e@*?ije?hWOD;GSB+p)&I-IfHrtGn!m z4P5%$EcIj~0|2{#rF|P+!uyu>Q>cI>^4s;3(ZIhChXn*g|M%^}<}8{{PttEDjZbPL zpT(CyJ-qjvyy4*5#SJFZ^X|0bdI5lBtBN!YDI7gG44}!*`nb=>uo9WYsA5c= zao|IayL0VPGa3^m^3o^4Ro2d0MfYw0^*?m&hI)v`y%g1&*5;v8!~{HuDp{!>J=~rw zc%^GJ*d1GJ#bN)&oaS2eKdH#97es(3b8CFtUQS`MtTLx`a4EzcPddMHgkiJ{-4o!k zl^nWivtKV&ojoMMPR2GCv!vnWvwZ&h?8F6lyLv+HKR&O|8DA_$?~4TIb7b)vhvoX5 zMqk_y$gGHvOAmh$zxP&BP6CwCMBxZHY5~aHHGi$Nw+7m7t+)R3Zwpw97v zs`S1h|5oT|&AI|F@s#Q$PStwWBB1G&y&T$qgDq92TxotQ&%g_$Y3sQ@m?W~fAmg?P@ySBwBfFDFG zhQ9wL#U)RF%v+|L!@Bp>0Jp%B-^i%J1u!~bxcsTB;(93!@ZF-URYUG-@wn;IcCj=m z;=7e+r<+pqy6T{>t#@l(6vB-6Rr8l7fwjzt&)@bV;dDpHt(^->_=RA#ab`MbZ1_Fp zdF&)EBtmR78R6136+_XVGs!43E#*0Xm~Q5tf5@JL9R5h9UgB+^(dqfkkVHvEogqa` zW@j8+l+yScc=h#c+@Dz@em`;u2wfTz0SZ2q*)o1qdP-OSZTVOfr{@8k}K{8k`I?J?3j2*ASwD>1~UG93-rpJa5s}d_yd6 zF?7=RpQr@Iu3!PRD?eqvzRnO&a(eN1p88*Mv7}#iE0?Yd)Uynw2)4+B6c#3R22jK6PHJbC`d4ZARs3-Iq z7C7FnMS||@&;JaG1k@M<6RBR2s*W`OYBNU!ByHe$Y+3+tLXQ&;PN)qVy4I<`U)`*9 zs^-kkm6Y8WKZeJ%$Dfo1VAQa{4gYHq;)Xf!PD5boDpX%ms z;o(H(^JZn=&hl$PxmM=U8q}|d8yexM2b%r=F2W}0UwhDbS|(^hVKY+7VO13-S?>4n*X-_B(C(e z=+S+)DIT+B3!#X22S-=>!p$yY-86|Xk*P)&za3l!T0Mx%V58bfhpGV>!+>w9L*C~ z2^_e+rRbkO|Ik_X95SRgob}UU;|m7)4Grk+&gMznn?w<%P`qYxM~SA8mA5xe$Byr1 zoJX^#P8$cGR$I%)K9e|Ky_YAQWQQN8W9ieWc6(dDwS8VmFkjlcu|^82UwzF<2W|Dl@W)&HIWH4X}|g~x+^NN z_(3oha3nEd=(~&`8o0X3Bdf991hjjkkO^7FZ~1Xw(Pq{{SAEJc5UUUXb|mO5b+}#3 z9526)aE+5~IFFsaJPu8R!6!gB#whK^n1nuo2^#?Cm(CSzxj1%+KuwjUd7^O4K+N${5<>LmE8LMDA)Ds zrUiTGHXbp^#|=bE*J%P8(E!7Z{Fg|8(e4R8G}K>dtLe7@lk?m{A8qABuf5-VXmcl* z*2cHHVQO&P(i6a$Z~xP-f6KwRv6i=LEx(JF)6y2)yUki(<>@y&R(<)}y*f`GUF>f7b?)Aa(BADB!>Hy5-;g-lI6| z^stzV1m|QOej6HGR3iMTRXbR!vV4<=lkLGsO~9)$^5wk%#4}6TCt9Zmv*BN>)Jb^o zls$`YY?Ib;YFk4Oq*-~db-3vAN!n{*8lTe6UzOp@FdaOULl8KB<-O{17Hm6U!4jkn zo!4p3yO!_@sJmMDx1BNGHcys!C! z`Yz#P@E5CJ-KZgW&E;*VMDId~gS%N#kY1S(`zw=AsD2FmD0Z}0U7^N+crQd8Rw#5& zBaFk$v%0??NYA!!J^<&6UlaEQQ^uee{GHi1x-3K7&F`U66 zlUdS+gvVe6Tw6NZ++rK&G1V}HFxtHd;qmMlA)#_6y_W!J<#oMj#sSFYIqH@}_9Bp= z2&f_ap^X9yILPyAuB;gVyY@F6Na^HQ!SDJ;CVB>hFJCq;5_;qIOk|`1=l5-$f!Fm% zp>6s8aC~LLB59nZV}H(#JN_V0=f!tpJ;1{+mrOJTD3|e&A4m$&VgI%YQ$fy%5aKo^ zLu^v~P)OV@L-)n(10ByB8@v!Ah49*_cTs7n(u%R<6)}>aSAE?h!8pI@Q4k6Fcor!f zcC31>xY<=u2_t%<$dyArMC7IBMWzwqN07Zo0w5`e0BcPSHK@dp^JGd8B!TWGsdTo% zQW6Aw5%6w7X5_JxFBm@}5WdV+ybD83CF?^ri_M~FFP*Dy@*1RM`~RLL7CBcApQWT- zZ-oa=QF4eut&b0xEh?RR^j06+J&996r|-+o8(`^l%P}|CbzJ;~HG;rRH)J;qXdOOY zhs`vl9G}M5OBtCi9s`W=z?=bq_wVYdmI7-n=9*vE^rdO*+_MEgPSi4cY}KZjzc*hL zfSi{Dn$Z_N7<_M2%O4_^I!a?EscMgr^g>DAFG~E98RS)z+1@+#sYfDO>DA|Pg%93- ze6y|vah91qzcFLb_BNWmBYt}~%&ZDrp=cbz`|2s9)zYH$|}po(;HMR(_0y5@#o zq>qIZA{4>bY0I2>IA0#&YtHJA5SS1`5YnwFAP2B8TaPWbh-#D}(!?MfCcc3G++YD{ zX8Tqty3D4K#3SAdj#TxBk90GeFhA`E)s$R*#G|G(un4ZoP8HeB+Z!*i!eDzzm_=Hg zx#;nKf0p;u9@n}IFd%Jk*|mOe6)WDg0~S{MajkgHpoXL&*^CrotPeO_j{L@+W2nb{ z4NYFoe4wIKvLEJ*`kgZAgE@r5o|bUogTH3vbR7%2z^fSQwXV=y@TdT-~>Y}I+qyu8-bHEzys zwl-S68mfGhM5CW*2K^C`<`eM|615dA_k_&g6g0fi2=b!A8bT;nHZ!tKZM7J@blu5{ ze4Y4+^2^*|v_Sb+nManF?4a|xov5Ciwb3^HxT69hV1A1CWxy@25wKArj5#_A4m&Qu z`WtHdNv{n0z%@>}hNpz3vB&A@JyIVDIF|nk0%+CQKI(j)Ouee<+bplDmCC9)C;bsu zlXlIMs*yIkB=U=-I!kznuyeIj)}GgY34|P_p)zdrxcz#2zK?(f&p0bWcC9^8SztsA zg`a>z6}!v?5@uNk`O#OK<9%qzaOfa*W$V=$FPWsW? z!CR2E%szKdSVd;a@{-!v(=SeIidFe;aub@BORP$iOFttP!_#w9QN(#F_V=$ohs&{9 zOQ|39M8S-atD?h<*DFzfDZwhax{->ukAbv#r$^D<3vEN@*mf}k(|Q}!` zFW7y{*JOd)g|vYP&^1+?1=<+-mO;Y|8?v1d3PaQlc-e0XWjZ14D3;;xF026n2%{7G zPCBamssZAd+(rec5ApW|v`@H)BzSK3_s}qB8%B2yLNh}JjDuY69qe*aqLCbJGQ*-& z*J0rTkhx?w`wEYfD=qEezVL2%z?L`L%tV6kMcEm3@-i_^_*5o_5H=gBbEMJyxt`er zm^v>1*IXU7SNb*Z^E2@QCSSI1N`y$p_xl;yQ*$HrgVUa7!QZ$mk^-c4`BL>K`}2~0 zL2TIxenExinc6xO-Z6IriMr-2EayUe?6jAOj<@hJ8r%56W3NCRg@trp>k7*rzi5_Q zz6Iy>COc8T?AM|bw+$|(6^Oy>U~t*q{1MMBq`L_|DG?cP2kZ8G+p$F zg1reIw0Y{hL2+p{dzuA(`z0^O)(idP9h`O|-N*G^@@@K=EnG7Ha_q=fU>4UBFj2bx zjSF~2N@o!VC@`PRw%Lh0BcuXM$j~bZ{W7Z)7M=8Rz8`zcaO6v(6(&>XDFX)D{Jlj_ zdlF}ocaF4mmL#0L5Bva5vVXaJ0jWcc9YS3;JI>27p+2mX*8?_BmKXr(Zf_Ci=OJj9{J*T@f9>Bc_8EqEEx$BVUv}Nrre_t1vQAq=~h`? zWQCgH9Z28=NxLS09v<&Qen#FX*eF1&C$=}*EW;%C7yM%V2heF*1dVXPN59E?V{2`< z@sv#Lz4cg|MCPx%|LY5P4tdvHdOWfXYB24W@if4T@eDnqx{bs*c2F!JIe6bG`1w_@ zV9!9@;n4g-FGdd+9o{xEb89-fAmXinLzZ;H-}K4P(zIui-o@;QqbRqCRwCrxj(|#Z z(4I+=-+7c)=_rH-Z@Q#F3;=Z0zG3IyR||fiF4140uj}Gq&IlZVm*%6DM7l zFY($k4okSGftd(^!w45fd^L?2X^dt(^lW?tz_wt|xMl%d7PWf_^b8aauMKd6@oquu zs@?0UF#BL<)ZWgNRJAvy939QEyn2fJuIT=pMW`un5u-3}s~BNS@8dfl^2x5El-H2} z2r}*AfJ`02$hFX+zt1=JE`)tI?#|yT(2JdE76+xiz>D3V76iiMQ!yC`Ujtze4IkCQ zt5aV73}IkV5gAzU&L-vU5p4>ip2%&aWF`G^r3rp%T+xOza~_tHauwam2QlDH)$dQw zAlJaf1{r^~dhg<~N`>nWhSM&Xkf3|#h1=y1`l7)D7qq%hUIItB2!9b=Ym3kjW6pzET&o)X;~BxFB7g!7dylwM1W+-K$!W(u z&X)bNjjm#(X<*C{77We<$!;9YB5#|HY=$m3uNlY+&>H#=E?CAsq#3u8eC_&HsUaN( zt2-;2hFdj+fUoJ0>-8oZ6$ma+{&>H4dzNC`IyQ)1B(K>Acb6+Zz3wcHV-XCJK6A_G zr^YhzvH1qSIk9EWNtoj816j}f(y4^~q9q}_G;T~r*W(M-kasA#JOB`~tL*DANGM^% z*HRU$Gx==*q3=;rPj8CWyhCe^*!PIs81}lLoXDFZ{pA&|E8-;(5QMz>2`nJEM_gs_ zLtDr5K;4a`G3O|9Y>@qJWz1PM>QGB8#swh82L2La(A^~uJ2=`zdK6v%QK(&7+V$NS zDtD)iK?C0$1)LQNqFEei#tqrs%hA#JSnsXJ5G!mN&Bdn#f1WDb zU9o2&qj&64Pw|hv13Bp}*QO#aptHk4L7Y&0NfQ~{whsA7M7RTMt3{HaML6NS)FJ~aXQD^Ve=X}7alK7TyPye%a0ya&zRT-s+wz9b zYW;H<0zgG=G9I3fJARoKQBZt2$dkcWg-lCtkuJtmTqgg_HLGKPy&x*$b z;;^{|wLYM(I`_)^Mt;}3@Kbq9987f)-O$IFB8Mq5&&RlMOP?_f<_qTt3{FxG; z(Ir;7cwP57=k1$6S(+cei@r&XG4I_dRl&Tko}6QJDKDhel6$PKX`xWPY7#+012lTv zrq~Sox_{XAEo5}U1xOh2!e8R0$hhOXY-TNAa$1i=;hUwgMDD&>f_sEG?dV}MZ9G|xX8XWZbJSLTwNNzX(hqc#Kt1CDz&YGMLJ^XO9y2RS z5psPchxJ!l*f>?h3x?t74r>57#GYr8ekBGZ6P($(tdDM&>IZyz1&!#H89sld#L}o! zsr!01#+Y$}9ue@{Y%j`zOs>Rmbx3K{kg7%t>FnW{mO8Ga%M>C2$(woS^X->oRxCda zu9clgLBpM;mS%AGqB1X`xJ7TXV0DQDz3mf5=z6rr*p$nISW8v)_5D36sTbVXdg58A zZX~9JsJ&8`&*^)j$S~EEqck&PbHIwuhg|@lCoP{$7#W-Qbw%7(@&(L-t?om*Lv~qY zCkc#Y|B9sl#TCVntj%f{*_!KHSy(A%Z3oMk#cLDg)y2bIE@@J@*x3lsy-;9?LFP{E zNkWjxvSWucq;;0cCG4Q=hR1ta1%?;VsLA_LlUpNs3z*T5aP{$K(o=+9#hih_QTTan?iWV= zGYqghY<}jqKYuo5CNQrI4MQB^euN_cuhh&+@Ho-Bp3v9ZJJ!0XpEN!I*!BBT5NjpA zNF)hsS0i}fIiQF#RE!e<7c z;0b9&o;Ogdi(PS!iDc|=Pk%Z!XPjEuO*Ja|Euu(9BP}MGA0IlW&j4D(_O;NkUTY^t zQ)SijIRBdeiDgf`k*%%LLwkh6SRHPSo`yNnV$oHEVm2)j)?U>7wD@cybr?64_Ca%L zfk{RtAR{}tD2+z(cKSaJUK>wpz9!qguQC8`;VL zNV#sD70dcQPX<|@tWAwMNZmtQGT8^WgBBW>p&IhGTa7H+8&+UNGwPw)ovXsO%RfNS$3v0+djo(;#xv$d z)OF(>ED>gZzMBHV?EMMo9x(*kBxY_Rd3XU>?X07t@u;i|5j9c+h;Q{PHz9F>D^|r{ zOo00JRuR6)1NjN2%5i))B3eDTjus1oI^Ua^90R8ek;^bER_bXqGm9yGz)1A4N z*H3Fd9J=yVC8rBkzhx1+?H~?!+--unRTH5HBIsUw1Jv1_p9{)a-svMDbngH=@2+ck zMBPrfJX)p^=kx1T^#A#;;sE4~ux!9t-%Lw1wGr@1j(LUP?O}I>s7>Db{rQ!CZMZJ_ z47j4P)sAxHp#cWut(qrOsyHrXbn|$nPZ6T^i@q~5b*et?x8le zW*umo;p{u@Ni~P8sNOc;p;VpovSNz9@>3c4jpXKH>$($7!m}p}3`234;E! z2>>Z+djI4`M(n`@-nKn&^=T7#OPNew$HSF=9KM|9+<&-S6KP|aDULJih51|zeP$3( zfnN}sOkSO?qjUhVMw! zN0X=Bxb(US_te82n^uxKzL^Sqsvxpf*43^;t51DZLxfR7*GL;!oSqivM-W$6I)kS1 zFSVtcYT57zOQ54THl3gz0<04vL!ax4#XxAKmZ{xpJnR9HXsw`)07KZ$znFwqtXc_D zS3~LyyLzQ^&HHz>!nWgciGMi$|5D7{Zwv!K>96G@AB@Gd*$3?IHJI)pG6Jwpw zwPtkTR|F(+7p8oP$(I3+Ga5Qgy70+(3KHG_7wF)y5y0zMtrG`yQ%&&&?O^ukS_2@Z zdCXlRmTM`!bHwPERqsQ0Z!R#0PXMsJik~Q3gk+B_^)RF&i@DVCe=>xILi$|j&R}f5BG&L2%#N*%8f0Rb z*jo(Oxk099q%voTMWqhgY{*uQ2!Jk?>-8npTBW0+?JMlyOv$@u7P$5$;xWQ-x7h-3 zf_-dY=3x=IIYQN!t4w~@uD>6G#ICBQmrveYl@c<~&-iwx33XlaBL)q;$FAYXjw`b5 z)ghOo+z!cr_6+11iu1w{Bw-^pr+cJ7*ZN}g@x7`0z5)?42bDvVyL+;`H_1-vM*RZy z!AK(J_SX}H`01sH!OX<^TS(nuz9i|^XrzXLjpImXXaBryI z$qVu-0lJAi<|*U`I!A-?unt^Aa|&WI!Qk64^dt)&9w&V*?%m2WG-`|I#ql{^bgV5e z;H}eC^YRX6-{jWPG8^VKc1`DX15n=Q9CMh{;>-XC?$+URF>DobD~rs0s)BN+hKK?+tDA*JsQ+#^>0Oc;z>ls18HG8nTTnu5 z(K9Dshu-<3eld=UMKePqJYmEcSp(-LDQ;TePT?S)sr(nTIC|J7d`NdPf6&l5M6~q) zRr~vkI=EsuJO>FXS@dw@?35{k1;OSfP!-u9rRYH6A`9Qvg_nn4wI3{_HN3YBE)1k; zy!DWG+WE*EFhYt>`5-%C{q@;M#C#;YF%Jh>Pwfl!0uM;g;XwhAw_B@KLu$<1`l_@u zl*}JD9~a%%_>D-ex7JB!vf3lJ+YAm02utQTYf#*BLC6knb{~80Ig=Q}rGZn|5Yo1U z4G6U=1hK;)D85J)9Z;%8I<_4U8>Y26z;XCctx_<0_1SGDdHz0x^%m*F>) zX0hPKc`H7Zce8SaJCHrBUwSJ1#N#l#@%WS&V{oC`yWCJ2r9eQAYdBkoI=1@`XE(VoNIu)`D3VM0ZrF8w9BoVu#6=GdZmRd-0 z3*fZQspfZ=Z1B6s7{Z%pA zp8Yvs|DV93zWkT(16U~x{s-s3{0Tui5C0^JBRHHow@2EfM8)Ye&RK8Fo#21wzqfn) zxp#tJHQgr5#~NPAU%A-fgjztW((l*v(|TejcKo_FxzD+}%J-45?d2msQ`G|t)Hn}s zyVqZ~{Uva8W_|jidIkoG|2i|LR=>^T3Ff%i{pCh>&7$&vu6og3W$D^qc^blkkNRpv zO+CG4efFKU54ZAo0y!?82Cm1s`!-hLMrP}aJAV#sO1gHh-AnJ>?RnpCtjdY_<>2t2 z*;lr_?gy}9m@VwUt>_ZHWMy#4>WQw|cUEq=$9q@9xJNw0Y`?;RKZ|p39anj^?2uE( z-r)XsS2e5lfijv6-yF7Q=@<5-M^{`*X4>~Dx@L`vt)k1GCq4de3_r<5Pdz)Wb`n!B zg8@g2QN#cALm&5N6jyL5l0Dx7oFAaO-Z#1bzopr0KUAw A-~a#s literal 0 HcmV?d00001 diff --git a/ww/managers/mimalloc/doc/unreal-logo.svg b/ww/managers/mimalloc/doc/unreal-logo.svg new file mode 100644 index 00000000..5d5192a2 --- /dev/null +++ b/ww/managers/mimalloc/doc/unreal-logo.svg @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + diff --git a/ww/managers/mimalloc/docker/alpine-arm32v7/Dockerfile b/ww/managers/mimalloc/docker/alpine-arm32v7/Dockerfile new file mode 100644 index 00000000..56f071db --- /dev/null +++ b/ww/managers/mimalloc/docker/alpine-arm32v7/Dockerfile @@ -0,0 +1,28 @@ +# install from an image +# download first an appropiate tar.gz image into the current directory +# from: +FROM scratch + +# Substitute the image name that was downloaded +ADD alpine-minirootfs-20240329-armv7.tar.gz / + +# Install tools +RUN apk add build-base make cmake +RUN apk add git +RUN apk add vim + +RUN mkdir -p /home/dev +WORKDIR /home/dev + +# Get mimalloc +RUN git clone https://github.com/microsoft/mimalloc -b dev-slice +RUN mkdir -p mimalloc/out/release +RUN mkdir -p mimalloc/out/debug + +# Build mimalloc debug +WORKDIR /home/dev/mimalloc/out/debug +RUN cmake ../.. -DMI_DEBUG_FULL=ON +RUN make -j +RUN make test + +CMD ["/bin/sh"] diff --git a/ww/managers/mimalloc/docker/alpine/Dockerfile b/ww/managers/mimalloc/docker/alpine/Dockerfile new file mode 100644 index 00000000..b222b791 --- /dev/null +++ b/ww/managers/mimalloc/docker/alpine/Dockerfile @@ -0,0 +1,23 @@ +# alpine image +FROM alpine + +# Install tools +RUN apk add build-base make cmake +RUN apk add git +RUN apk add vim + +RUN mkdir -p /home/dev +WORKDIR /home/dev + +# Get mimalloc +RUN git clone https://github.com/microsoft/mimalloc -b dev-slice +RUN mkdir -p mimalloc/out/release +RUN mkdir -p mimalloc/out/debug + +# Build mimalloc debug +WORKDIR /home/dev/mimalloc/out/debug +RUN cmake ../.. -DMI_DEBUG_FULL=ON +RUN make -j +RUN make test + +CMD ["/bin/sh"] \ No newline at end of file diff --git a/ww/managers/mimalloc/docker/manylinux-x64/Dockerfile b/ww/managers/mimalloc/docker/manylinux-x64/Dockerfile new file mode 100644 index 00000000..22d37e5a --- /dev/null +++ b/ww/managers/mimalloc/docker/manylinux-x64/Dockerfile @@ -0,0 +1,23 @@ +FROM quay.io/pypa/manylinux2014_x86_64 + +# Install tools +RUN yum install -y openssl-devel +RUN yum install -y gcc gcc-c++ kernel-devel make +RUN yum install -y git cmake +RUN yum install -y vim + +RUN mkdir -p /home/dev +WORKDIR /home/dev + +# Get mimalloc +RUN git clone https://github.com/microsoft/mimalloc -b dev-slice +RUN mkdir -p mimalloc/out/release +RUN mkdir -p mimalloc/out/debug + +# Build mimalloc debug +WORKDIR /home/dev/mimalloc/out/debug +RUN cmake ../.. -DMI_DEBUG_FULL=ON +RUN make -j +RUN make test + +CMD ["/bin/sh"] \ No newline at end of file diff --git a/ww/managers/mimalloc/docker/readme.md b/ww/managers/mimalloc/docker/readme.md new file mode 100644 index 00000000..b3d90094 --- /dev/null +++ b/ww/managers/mimalloc/docker/readme.md @@ -0,0 +1,10 @@ +Various example docker files used for testing. + +Usage: + +``` +> cd +> docker build -t -mimalloc . +> docker run -it -mimalloc +>> make test +``` diff --git a/ww/managers/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj b/ww/managers/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj new file mode 100644 index 00000000..04c16a9f --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2017/mimalloc-override-test.vcxproj @@ -0,0 +1,190 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7868F-750E-4C21-A04D-22707CC66879} + mimalloc-override-test + mimalloc-override-test + 10.0.19041.0 + + + + Application + true + v141 + + + Application + false + v141 + true + + + Application + true + v141 + + + Application + false + v141 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + false + Default + false + + + Console + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + + + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + + + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + {abb5eae7-b3e6-432e-b636-333449892ea7} + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2017/mimalloc-override.vcxproj b/ww/managers/mimalloc/ide/vs2017/mimalloc-override.vcxproj new file mode 100644 index 00000000..6d20eb57 --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2017/mimalloc-override.vcxproj @@ -0,0 +1,260 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA7} + mimalloc-override + mimalloc-override + 10.0.19041.0 + + + + DynamicLibrary + true + v141 + + + DynamicLibrary + false + v141 + + + DynamicLibrary + true + v141 + + + DynamicLibrary + false + v141 + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + + Level3 + Disabled + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + Default + + + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + Disabled + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + Default + + + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;bcrypt.lib;%(AdditionalDependencies) + + + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + Default + false + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + Default + false + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;bcrypt.lib;%(AdditionalDependencies) + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + + + + + + + + + + + + false + false + false + false + + + true + true + true + true + + + + + + + + + + + + + true + true + true + true + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj b/ww/managers/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj new file mode 100644 index 00000000..061b8605 --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2017/mimalloc-test-stress.vcxproj @@ -0,0 +1,159 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7958F-750E-4C21-A04D-22707CC66878} + mimalloc-test-stress + mimalloc-test-stress + 10.0.19041.0 + + + + Application + true + v141 + + + Application + false + v141 + true + + + Application + true + v141 + + + Application + false + v141 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + false + false + false + false + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2017/mimalloc-test.vcxproj b/ww/managers/mimalloc/ide/vs2017/mimalloc-test.vcxproj new file mode 100644 index 00000000..04bd6537 --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2017/mimalloc-test.vcxproj @@ -0,0 +1,158 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7858F-750E-4C21-A04D-22707CC66878} + mimalloctest + mimalloc-test + 10.0.19041.0 + + + + Application + true + v141 + + + Application + false + v141 + true + + + Application + true + v141 + + + Application + false + v141 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + stdcpp17 + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + stdcpp14 + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2017/mimalloc.sln b/ww/managers/mimalloc/ide/vs2017/mimalloc.sln new file mode 100644 index 00000000..515c03f2 --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2017/mimalloc.sln @@ -0,0 +1,71 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 15 +VisualStudioVersion = 15.0.26228.102 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A} + EndGlobalSection +EndGlobal diff --git a/ww/managers/mimalloc/ide/vs2017/mimalloc.vcxproj b/ww/managers/mimalloc/ide/vs2017/mimalloc.vcxproj new file mode 100644 index 00000000..ece9a14d --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2017/mimalloc.vcxproj @@ -0,0 +1,260 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA6} + mimalloc + 10.0.19041.0 + mimalloc + + + + StaticLibrary + true + v141 + + + StaticLibrary + false + v141 + true + + + StaticLibrary + true + v141 + + + StaticLibrary + false + v141 + true + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + false + + + false + + + false + + + false + + + + Level3 + Disabled + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;MI_DEBUG=3;%(PreprocessorDefinitions); + CompileAsC + false + stdcpp17 + + + + + + + + + + + Level4 + Disabled + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;MI_DEBUG=3;%(PreprocessorDefinitions); + CompileAsCpp + false + stdcpp14 + + + + + + + + + + + + + + + + + + + Level3 + MaxSpeed + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsC + true + + + true + true + + + + + + + + + + + Level4 + MaxSpeed + true + true + ../../include + _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsC + true + + + true + true + + + + + + + + + + + + + + + + + false + false + false + false + + + true + true + true + true + + + + + + + + + + + + true + true + true + true + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2019/mimalloc-override-test.vcxproj b/ww/managers/mimalloc/ide/vs2019/mimalloc-override-test.vcxproj new file mode 100644 index 00000000..7a9202f1 --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2019/mimalloc-override-test.vcxproj @@ -0,0 +1,190 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7868F-750E-4C21-A04D-22707CC66879} + mimalloc-override-test + 10.0 + mimalloc-override-test + + + + Application + true + v142 + + + Application + false + v142 + true + + + Application + true + v142 + + + Application + false + v142 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + + + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + + + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + + + + {abb5eae7-b3e6-432e-b636-333449892ea7} + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2019/mimalloc-override.vcxproj b/ww/managers/mimalloc/ide/vs2019/mimalloc-override.vcxproj new file mode 100644 index 00000000..a84a5178 --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2019/mimalloc-override.vcxproj @@ -0,0 +1,260 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA7} + mimalloc-override + 10.0 + mimalloc-override + + + + DynamicLibrary + true + v142 + + + DynamicLibrary + false + v142 + + + DynamicLibrary + true + v142 + + + DynamicLibrary + false + v142 + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + + Level3 + Disabled + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + Default + + + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + Disabled + true + true + ../../include + MI_DEBUG=3;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + Default + + + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) + + + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + Default + false + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + Default + false + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + + + + + + + + + + + + false + false + false + false + + + true + true + true + true + + + + + + + + + + + + + true + true + true + true + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2019/mimalloc-test-api.vcxproj b/ww/managers/mimalloc/ide/vs2019/mimalloc-test-api.vcxproj new file mode 100644 index 00000000..812a9cb1 --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2019/mimalloc-test-api.vcxproj @@ -0,0 +1,155 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FFF7958F-750E-4C21-A04D-22707CC66878} + mimalloc-test-api + 10.0 + mimalloc-test-api + + + + Application + true + v142 + + + Application + false + v142 + true + + + Application + true + v142 + + + Application + false + v142 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + diff --git a/ww/managers/mimalloc/ide/vs2019/mimalloc-test-stress.vcxproj b/ww/managers/mimalloc/ide/vs2019/mimalloc-test-stress.vcxproj new file mode 100644 index 00000000..ef7ab357 --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2019/mimalloc-test-stress.vcxproj @@ -0,0 +1,159 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7958F-750E-4C21-A04D-22707CC66878} + mimalloc-test-stress + 10.0 + mimalloc-test-stress + + + + Application + true + v142 + + + Application + false + v142 + true + + + Application + true + v142 + + + Application + false + v142 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + false + false + false + false + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2019/mimalloc-test.vcxproj b/ww/managers/mimalloc/ide/vs2019/mimalloc-test.vcxproj new file mode 100644 index 00000000..13af6ab4 --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2019/mimalloc-test.vcxproj @@ -0,0 +1,158 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7858F-750E-4C21-A04D-22707CC66878} + mimalloctest + 10.0 + mimalloc-test + + + + Application + true + v142 + + + Application + false + v142 + true + + + Application + true + v142 + + + Application + false + v142 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + stdcpp17 + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + stdcpp17 + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2019/mimalloc.sln b/ww/managers/mimalloc/ide/vs2019/mimalloc.sln new file mode 100644 index 00000000..6ff01d3b --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2019/mimalloc.sln @@ -0,0 +1,81 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.29709.97 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-api", "mimalloc-test-api.vcxproj", "{FFF7958F-750E-4C21-A04D-22707CC66878}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A} + EndGlobalSection +EndGlobal diff --git a/ww/managers/mimalloc/ide/vs2019/mimalloc.vcxproj b/ww/managers/mimalloc/ide/vs2019/mimalloc.vcxproj new file mode 100644 index 00000000..0076b1db --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2019/mimalloc.vcxproj @@ -0,0 +1,258 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA6} + mimalloc + 10.0 + mimalloc + + + + StaticLibrary + true + v142 + + + StaticLibrary + false + v142 + true + + + StaticLibrary + true + v142 + + + StaticLibrary + false + v142 + true + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + + Level4 + Disabled + true + true + ../../include + MI_DEBUG=3;%(PreprocessorDefinitions); + CompileAsCpp + false + Default + + + + + + + + + + + Level4 + Disabled + true + Default + ../../include + MI_DEBUG=3;%(PreprocessorDefinitions); + CompileAsCpp + false + Default + + + + + + + + + + + + + + + + + + + Level4 + MaxSpeed + true + true + ../../include + %(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsCpp + true + Default + + + true + true + + + + + + + + + + + Level4 + MaxSpeed + true + true + ../../include + %(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsCpp + true + Default + + + true + true + + + + + + + + + + + + + + + + + false + false + false + false + + + true + true + true + true + + + + + + false + + + + + + + true + true + true + true + + + + true + true + true + true + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj b/ww/managers/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj new file mode 100644 index 00000000..a3c56f7b --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2022/mimalloc-override-test.vcxproj @@ -0,0 +1,190 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7868F-750E-4C21-A04D-22707CC66879} + mimalloc-override-test + 10.0 + mimalloc-override-test + + + + Application + true + v143 + + + Application + false + v143 + true + + + Application + true + v143 + + + Application + false + v143 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + Disabled + true + true + ..\..\include + MultiThreadedDebugDLL + Sync + Default + false + + + Console + + + kernel32.lib;%(AdditionalDependencies) + + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + MultiThreadedDLL + + + true + true + Console + + + kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + + + + {abb5eae7-b3e6-432e-b636-333449892ea7} + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2022/mimalloc-override.vcxproj b/ww/managers/mimalloc/ide/vs2022/mimalloc-override.vcxproj new file mode 100644 index 00000000..df2a0816 --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2022/mimalloc-override.vcxproj @@ -0,0 +1,271 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA7} + mimalloc-override + 10.0 + mimalloc-override + + + + DynamicLibrary + true + v143 + + + DynamicLibrary + false + v143 + + + DynamicLibrary + true + v143 + + + DynamicLibrary + false + v143 + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .dll + mimalloc-override + + + + Level3 + Disabled + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + Default + + + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + Disabled + true + true + ../../include + MI_DEBUG=4;MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions); + MultiThreadedDebugDLL + false + Default + + + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) + + + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + Default + false + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect32.lib;%(AdditionalDependencies) + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect32.dll" "$(OutputPath)" + + + Copy mimalloc-redirect32.dll to the output directory + + + + + Level3 + MaxSpeed + true + true + true + ../../include + MI_SHARED_LIB;MI_SHARED_LIB_EXPORT;MI_MALLOC_OVERRIDE;%(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + MultiThreadedDLL + Default + false + + + true + true + $(ProjectDir)\..\..\bin\mimalloc-redirect.lib;%(AdditionalDependencies) + + + Default + false + + + COPY /Y "$(ProjectDir)..\..\bin\mimalloc-redirect.dll" "$(OutputPath)" + + + copy mimalloc-redirect.dll to the output directory + + + + + + + + + + + + + + + + + + false + false + false + false + + + true + true + true + true + + + + + + + + + + + true + true + true + true + + + + + true + true + true + true + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj b/ww/managers/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj new file mode 100644 index 00000000..d9b9cae4 --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2022/mimalloc-test-api.vcxproj @@ -0,0 +1,162 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FFF7958F-750E-4C21-A04D-22707CC66878} + mimalloc-test-api + 10.0 + mimalloc-test-api + + + + Application + true + v143 + + + Application + false + v143 + true + + + Application + true + v143 + + + Application + false + v143 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + true + true + true + true + + + false + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj b/ww/managers/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj new file mode 100644 index 00000000..14bd3e69 --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2022/mimalloc-test-stress.vcxproj @@ -0,0 +1,159 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7958F-750E-4C21-A04D-22707CC66878} + mimalloc-test-stress + 10.0 + mimalloc-test-stress + + + + Application + true + v143 + + + Application + false + v143 + true + + + Application + true + v143 + + + Application + false + v143 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + %(PreprocessorDefinitions);NDEBUG + + + true + true + Console + + + + + false + false + false + false + + + + + {abb5eae7-b3e6-432e-b636-333449892ea7} + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2022/mimalloc-test.vcxproj b/ww/managers/mimalloc/ide/vs2022/mimalloc-test.vcxproj new file mode 100644 index 00000000..506dd7d4 --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2022/mimalloc-test.vcxproj @@ -0,0 +1,158 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {FEF7858F-750E-4C21-A04D-22707CC66878} + mimalloctest + 10.0 + mimalloc-test + + + + Application + true + v143 + + + Application + false + v143 + true + + + Application + true + v143 + + + Application + false + v143 + true + + + + + + + + + + + + + + + + + + + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + $(ProjectDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(ProjectDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + + + + Level3 + Disabled + true + true + ..\..\include + stdcpp17 + + + Console + + + + + Level3 + Disabled + true + true + ..\..\include + stdcpp17 + + + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + + + + Level3 + MaxSpeed + true + true + true + true + ..\..\include + _MBCS;%(PreprocessorDefinitions);NDEBUG + stdcpp17 + + + true + true + Console + + + + + {abb5eae7-b3e6-432e-b636-333449892ea6} + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/ide/vs2022/mimalloc.sln b/ww/managers/mimalloc/ide/vs2022/mimalloc.sln new file mode 100644 index 00000000..6ff01d3b --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2022/mimalloc.sln @@ -0,0 +1,81 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.29709.97 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc", "mimalloc.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA6}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test", "mimalloc-test.vcxproj", "{FEF7858F-750E-4C21-A04D-22707CC66878}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override", "mimalloc-override.vcxproj", "{ABB5EAE7-B3E6-432E-B636-333449892EA7}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-override-test", "mimalloc-override-test.vcxproj", "{FEF7868F-750E-4C21-A04D-22707CC66879}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-stress", "mimalloc-test-stress.vcxproj", "{FEF7958F-750E-4C21-A04D-22707CC66878}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mimalloc-test-api", "mimalloc-test-api.vcxproj", "{FFF7958F-750E-4C21-A04D-22707CC66878}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.ActiveCfg = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x64.Build.0 = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.ActiveCfg = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.ActiveCfg = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x64.Build.0 = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.ActiveCfg = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA6}.Release|x86.Build.0 = Release|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FEF7858F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.ActiveCfg = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x64.Build.0 = Debug|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.ActiveCfg = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Debug|x86.Build.0 = Debug|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.ActiveCfg = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x64.Build.0 = Release|x64 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.ActiveCfg = Release|Win32 + {ABB5EAE7-B3E6-432E-B636-333449892EA7}.Release|x86.Build.0 = Release|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x64.Build.0 = Debug|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Debug|x86.Build.0 = Debug|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.ActiveCfg = Release|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x64.Build.0 = Release|x64 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.ActiveCfg = Release|Win32 + {FEF7868F-750E-4C21-A04D-22707CC66879}.Release|x86.Build.0 = Release|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FEF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.ActiveCfg = Debug|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x64.Build.0 = Debug|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.ActiveCfg = Debug|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Debug|x86.Build.0 = Debug|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.ActiveCfg = Release|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x64.Build.0 = Release|x64 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.ActiveCfg = Release|Win32 + {FFF7958F-750E-4C21-A04D-22707CC66878}.Release|x86.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {4297F93D-486A-4243-995F-7D32F59AE82A} + EndGlobalSection +EndGlobal diff --git a/ww/managers/mimalloc/ide/vs2022/mimalloc.vcxproj b/ww/managers/mimalloc/ide/vs2022/mimalloc.vcxproj new file mode 100644 index 00000000..33ad9cef --- /dev/null +++ b/ww/managers/mimalloc/ide/vs2022/mimalloc.vcxproj @@ -0,0 +1,264 @@ + + + + + Debug + Win32 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + 15.0 + {ABB5EAE7-B3E6-432E-B636-333449892EA6} + mimalloc + 10.0 + mimalloc + + + + StaticLibrary + true + v143 + + + StaticLibrary + false + v143 + true + + + StaticLibrary + true + v143 + + + StaticLibrary + false + v143 + true + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + $(SolutionDir)..\..\out\msvc-$(Platform)\$(Configuration)\ + $(SolutionDir)..\..\out\msvc-$(Platform)\$(ProjectName)\$(Configuration)\ + .lib + mimalloc-static + + + + Level4 + Disabled + true + Default + ../../include + MI_DEBUG=3;%(PreprocessorDefinitions); + CompileAsCpp + false + stdcpp20 + + + + + + + + + + + Level4 + Disabled + true + Default + ../../include + MI_DEBUG=4;MI_SECURE=0;%(PreprocessorDefinitions); + CompileAsCpp + false + stdcpp20 + + + + + + + + + + + + + + + + + + + Level4 + MaxSpeed + true + Default + ../../include + %(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsCpp + true + stdcpp20 + + + true + true + + + + + + + + + + + Level4 + MaxSpeed + true + Default + ../../include + %(PreprocessorDefinitions);NDEBUG + AssemblyAndSourceCode + $(IntDir) + false + false + Default + CompileAsCpp + true + stdcpp20 + + + true + true + + + + + + + + + + + + + + + + + false + false + false + false + + + true + true + true + true + + + + + + false + + + true + true + true + true + + + + + + + true + true + true + true + + + + true + true + true + true + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ww/managers/mimalloc/include/mimalloc-new-delete.h b/ww/managers/mimalloc/include/mimalloc-new-delete.h new file mode 100644 index 00000000..c16f4a66 --- /dev/null +++ b/ww/managers/mimalloc/include/mimalloc-new-delete.h @@ -0,0 +1,66 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_NEW_DELETE_H +#define MIMALLOC_NEW_DELETE_H + +// ---------------------------------------------------------------------------- +// This header provides convenient overrides for the new and +// delete operations in C++. +// +// This header should be included in only one source file! +// +// On Windows, or when linking dynamically with mimalloc, these +// can be more performant than the standard new-delete operations. +// See +// --------------------------------------------------------------------------- +#if defined(__cplusplus) + #include + #include + + #if defined(_MSC_VER) && defined(_Ret_notnull_) && defined(_Post_writable_byte_size_) + // stay consistent with VCRT definitions + #define mi_decl_new(n) mi_decl_nodiscard mi_decl_restrict _Ret_notnull_ _Post_writable_byte_size_(n) + #define mi_decl_new_nothrow(n) mi_decl_nodiscard mi_decl_restrict _Ret_maybenull_ _Success_(return != NULL) _Post_writable_byte_size_(n) + #else + #define mi_decl_new(n) mi_decl_nodiscard mi_decl_restrict + #define mi_decl_new_nothrow(n) mi_decl_nodiscard mi_decl_restrict + #endif + + void operator delete(void* p) noexcept { mi_free(p); }; + void operator delete[](void* p) noexcept { mi_free(p); }; + + void operator delete (void* p, const std::nothrow_t&) noexcept { mi_free(p); } + void operator delete[](void* p, const std::nothrow_t&) noexcept { mi_free(p); } + + mi_decl_new(n) void* operator new(std::size_t n) noexcept(false) { return mi_new(n); } + mi_decl_new(n) void* operator new[](std::size_t n) noexcept(false) { return mi_new(n); } + + mi_decl_new_nothrow(n) void* operator new (std::size_t n, const std::nothrow_t& tag) noexcept { (void)(tag); return mi_new_nothrow(n); } + mi_decl_new_nothrow(n) void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { (void)(tag); return mi_new_nothrow(n); } + + #if (__cplusplus >= 201402L || _MSC_VER >= 1916) + void operator delete (void* p, std::size_t n) noexcept { mi_free_size(p,n); }; + void operator delete[](void* p, std::size_t n) noexcept { mi_free_size(p,n); }; + #endif + + #if (__cplusplus > 201402L || defined(__cpp_aligned_new)) + void operator delete (void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast(al)); } + void operator delete[](void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast(al)); } + void operator delete (void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); }; + void operator delete[](void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); }; + void operator delete (void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); } + void operator delete[](void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); } + + void* operator new (std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); } + void* operator new[](std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); } + void* operator new (std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); } + void* operator new[](std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); } + #endif +#endif + +#endif // MIMALLOC_NEW_DELETE_H diff --git a/ww/managers/mimalloc/include/mimalloc-override.h b/ww/managers/mimalloc/include/mimalloc-override.h new file mode 100644 index 00000000..48a8a622 --- /dev/null +++ b/ww/managers/mimalloc/include/mimalloc-override.h @@ -0,0 +1,68 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_OVERRIDE_H +#define MIMALLOC_OVERRIDE_H + +/* ---------------------------------------------------------------------------- +This header can be used to statically redirect malloc/free and new/delete +to the mimalloc variants. This can be useful if one can include this file on +each source file in a project (but be careful when using external code to +not accidentally mix pointers from different allocators). +-----------------------------------------------------------------------------*/ + +#include + +// Standard C allocation +#define malloc(n) mi_malloc(n) +#define calloc(n,c) mi_calloc(n,c) +#define realloc(p,n) mi_realloc(p,n) +#define free(p) mi_free(p) + +#define strdup(s) mi_strdup(s) +#define strndup(s,n) mi_strndup(s,n) +#define realpath(f,n) mi_realpath(f,n) + +// Microsoft extensions +#define _expand(p,n) mi_expand(p,n) +#define _msize(p) mi_usable_size(p) +#define _recalloc(p,n,c) mi_recalloc(p,n,c) + +#define _strdup(s) mi_strdup(s) +#define _strndup(s,n) mi_strndup(s,n) +#define _wcsdup(s) (wchar_t*)mi_wcsdup((const unsigned short*)(s)) +#define _mbsdup(s) mi_mbsdup(s) +#define _dupenv_s(b,n,v) mi_dupenv_s(b,n,v) +#define _wdupenv_s(b,n,v) mi_wdupenv_s((unsigned short*)(b),n,(const unsigned short*)(v)) + +// Various Posix and Unix variants +#define reallocf(p,n) mi_reallocf(p,n) +#define malloc_size(p) mi_usable_size(p) +#define malloc_usable_size(p) mi_usable_size(p) +#define malloc_good_size(sz) mi_malloc_good_size(sz) +#define cfree(p) mi_free(p) + +#define valloc(n) mi_valloc(n) +#define pvalloc(n) mi_pvalloc(n) +#define reallocarray(p,s,n) mi_reallocarray(p,s,n) +#define reallocarr(p,s,n) mi_reallocarr(p,s,n) +#define memalign(a,n) mi_memalign(a,n) +#define aligned_alloc(a,n) mi_aligned_alloc(a,n) +#define posix_memalign(p,a,n) mi_posix_memalign(p,a,n) +#define _posix_memalign(p,a,n) mi_posix_memalign(p,a,n) + +// Microsoft aligned variants +#define _aligned_malloc(n,a) mi_malloc_aligned(n,a) +#define _aligned_realloc(p,n,a) mi_realloc_aligned(p,n,a) +#define _aligned_recalloc(p,s,n,a) mi_aligned_recalloc(p,s,n,a) +#define _aligned_msize(p,a,o) mi_usable_size(p) +#define _aligned_free(p) mi_free(p) +#define _aligned_offset_malloc(n,a,o) mi_malloc_aligned_at(n,a,o) +#define _aligned_offset_realloc(p,n,a,o) mi_realloc_aligned_at(p,n,a,o) +#define _aligned_offset_recalloc(p,s,n,a,o) mi_recalloc_aligned_at(p,s,n,a,o) + +#endif // MIMALLOC_OVERRIDE_H diff --git a/ww/managers/mimalloc/include/mimalloc.h b/ww/managers/mimalloc/include/mimalloc.h new file mode 100644 index 00000000..c41bcc80 --- /dev/null +++ b/ww/managers/mimalloc/include/mimalloc.h @@ -0,0 +1,569 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_H +#define MIMALLOC_H + +#define MI_MALLOC_VERSION 217 // major + 2 digits minor + +// ------------------------------------------------------ +// Compiler specific attributes +// ------------------------------------------------------ + +#ifdef __cplusplus + #if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 + #define mi_attr_noexcept noexcept + #else + #define mi_attr_noexcept throw() + #endif +#else + #define mi_attr_noexcept +#endif + +#if defined(__cplusplus) && (__cplusplus >= 201703) + #define mi_decl_nodiscard [[nodiscard]] +#elif (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) // includes clang, icc, and clang-cl + #define mi_decl_nodiscard __attribute__((warn_unused_result)) +#elif defined(_HAS_NODISCARD) + #define mi_decl_nodiscard _NODISCARD +#elif (_MSC_VER >= 1700) + #define mi_decl_nodiscard _Check_return_ +#else + #define mi_decl_nodiscard +#endif + +#if defined(_MSC_VER) || defined(__MINGW32__) + #if !defined(MI_SHARED_LIB) + #define mi_decl_export + #elif defined(MI_SHARED_LIB_EXPORT) + #define mi_decl_export __declspec(dllexport) + #else + #define mi_decl_export __declspec(dllimport) + #endif + #if defined(__MINGW32__) + #define mi_decl_restrict + #define mi_attr_malloc __attribute__((malloc)) + #else + #if (_MSC_VER >= 1900) && !defined(__EDG__) + #define mi_decl_restrict __declspec(allocator) __declspec(restrict) + #else + #define mi_decl_restrict __declspec(restrict) + #endif + #define mi_attr_malloc + #endif + #define mi_cdecl __cdecl + #define mi_attr_alloc_size(s) + #define mi_attr_alloc_size2(s1,s2) + #define mi_attr_alloc_align(p) +#elif defined(__GNUC__) // includes clang and icc + #if defined(MI_SHARED_LIB) && defined(MI_SHARED_LIB_EXPORT) + #define mi_decl_export __attribute__((visibility("default"))) + #else + #define mi_decl_export + #endif + #define mi_cdecl // leads to warnings... __attribute__((cdecl)) + #define mi_decl_restrict + #define mi_attr_malloc __attribute__((malloc)) + #if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5) + #define mi_attr_alloc_size(s) + #define mi_attr_alloc_size2(s1,s2) + #define mi_attr_alloc_align(p) + #elif defined(__INTEL_COMPILER) + #define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) + #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2))) + #define mi_attr_alloc_align(p) + #else + #define mi_attr_alloc_size(s) __attribute__((alloc_size(s))) + #define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2))) + #define mi_attr_alloc_align(p) __attribute__((alloc_align(p))) + #endif +#else + #define mi_cdecl + #define mi_decl_export + #define mi_decl_restrict + #define mi_attr_malloc + #define mi_attr_alloc_size(s) + #define mi_attr_alloc_size2(s1,s2) + #define mi_attr_alloc_align(p) +#endif + +// ------------------------------------------------------ +// Includes +// ------------------------------------------------------ + +#include // size_t +#include // bool +#include // INTPTR_MAX + +#ifdef __cplusplus +extern "C" { +#endif + +// ------------------------------------------------------ +// Standard malloc interface +// ------------------------------------------------------ + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_nodiscard mi_decl_export void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); +mi_decl_export void* mi_expand(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); + +mi_decl_export void mi_free(void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc; + +// ------------------------------------------------------ +// Extended functionality +// ------------------------------------------------------ +#define MI_SMALL_WSIZE_MAX (128) +#define MI_SMALL_SIZE_MAX (MI_SMALL_WSIZE_MAX*sizeof(void*)) + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_nodiscard mi_decl_export void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); +mi_decl_nodiscard mi_decl_export void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); + +mi_decl_nodiscard mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept; + + +// ------------------------------------------------------ +// Internals +// ------------------------------------------------------ + +typedef void (mi_cdecl mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg); +mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg) mi_attr_noexcept; + +typedef void (mi_cdecl mi_output_fun)(const char* msg, void* arg); +mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept; + +typedef void (mi_cdecl mi_error_fun)(int err, void* arg); +mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg); + +mi_decl_export void mi_collect(bool force) mi_attr_noexcept; +mi_decl_export int mi_version(void) mi_attr_noexcept; +mi_decl_export void mi_stats_reset(void) mi_attr_noexcept; +mi_decl_export void mi_stats_merge(void) mi_attr_noexcept; +mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL +mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; + +mi_decl_export void mi_process_init(void) mi_attr_noexcept; +mi_decl_export void mi_thread_init(void) mi_attr_noexcept; +mi_decl_export void mi_thread_done(void) mi_attr_noexcept; +mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept; + +mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, + size_t* current_rss, size_t* peak_rss, + size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept; + +// ------------------------------------------------------------------------------------- +// Aligned allocation +// Note that `alignment` always follows `size` for consistency with unaligned +// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`. +// ------------------------------------------------------------------------------------- + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2); +mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2); + + +// ------------------------------------------------------------------------------------- +// Heaps: first-class, but can only allocate from the same thread that created it. +// ------------------------------------------------------------------------------------- + +struct mi_heap_s; +typedef struct mi_heap_s mi_heap_t; + +mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new(void); +mi_decl_export void mi_heap_delete(mi_heap_t* heap); +mi_decl_export void mi_heap_destroy(mi_heap_t* heap); +mi_decl_export mi_heap_t* mi_heap_set_default(mi_heap_t* heap); +mi_decl_export mi_heap_t* mi_heap_get_default(void); +mi_decl_export mi_heap_t* mi_heap_get_backing(void); +mi_decl_export void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept; + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); + +mi_decl_nodiscard mi_decl_export void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); +mi_decl_nodiscard mi_decl_export void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4); +mi_decl_nodiscard mi_decl_export void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); + +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc; + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3); +mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3); + + +// -------------------------------------------------------------------------------- +// Zero initialized re-allocation. +// Only valid on memory that was originally allocated with zero initialization too. +// e.g. `mi_calloc`, `mi_zalloc`, `mi_zalloc_aligned` etc. +// see +// -------------------------------------------------------------------------------- + +mi_decl_nodiscard mi_decl_export void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export void* mi_recalloc(void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); + +mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3); +mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(2,3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(2,3); + +mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3); +mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4); + +mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4); +mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3); +mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(3,4) mi_attr_alloc_align(5); +mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(3,4); + + +// ------------------------------------------------------ +// Analysis +// ------------------------------------------------------ + +mi_decl_export bool mi_heap_contains_block(mi_heap_t* heap, const void* p); +mi_decl_export bool mi_heap_check_owned(mi_heap_t* heap, const void* p); +mi_decl_export bool mi_check_owned(const void* p); + +// An area of heap space contains blocks of a single size. +typedef struct mi_heap_area_s { + void* blocks; // start of the area containing heap blocks + size_t reserved; // bytes reserved for this area (virtual) + size_t committed; // current available bytes for this area + size_t used; // number of allocated blocks + size_t block_size; // size in bytes of each block + size_t full_block_size; // size in bytes of a full block including padding and metadata. +} mi_heap_area_t; + +typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg); + +mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg); + +// Experimental +mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept; + +mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept; +mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept; + +mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept; +mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept; + +mi_decl_export void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge) mi_attr_noexcept; + +// Experimental: heaps associated with specific memory arena's +typedef int mi_arena_id_t; +mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size); +mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; +mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; +mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept; + +#if MI_MALLOC_VERSION >= 182 +// Create a heap that only allocates in the specified arena +mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id); +#endif + +// deprecated +mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept; + + +// ------------------------------------------------------ +// Convenience +// ------------------------------------------------------ + +#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp))) +#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp))) +#define mi_calloc_tp(tp,n) ((tp*)mi_calloc(n,sizeof(tp))) +#define mi_mallocn_tp(tp,n) ((tp*)mi_mallocn(n,sizeof(tp))) +#define mi_reallocn_tp(p,tp,n) ((tp*)mi_reallocn(p,n,sizeof(tp))) +#define mi_recalloc_tp(p,tp,n) ((tp*)mi_recalloc(p,n,sizeof(tp))) + +#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp))) +#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp))) +#define mi_heap_calloc_tp(hp,tp,n) ((tp*)mi_heap_calloc(hp,n,sizeof(tp))) +#define mi_heap_mallocn_tp(hp,tp,n) ((tp*)mi_heap_mallocn(hp,n,sizeof(tp))) +#define mi_heap_reallocn_tp(hp,p,tp,n) ((tp*)mi_heap_reallocn(hp,p,n,sizeof(tp))) +#define mi_heap_recalloc_tp(hp,p,tp,n) ((tp*)mi_heap_recalloc(hp,p,n,sizeof(tp))) + + +// ------------------------------------------------------ +// Options +// ------------------------------------------------------ + +typedef enum mi_option_e { + // stable options + mi_option_show_errors, // print error messages + mi_option_show_stats, // print statistics on termination + mi_option_verbose, // print verbose messages + // advanced options + mi_option_eager_commit, // eager commit segments? (after `eager_commit_delay` segments) (=1) + mi_option_arena_eager_commit, // eager commit arenas? Use 2 to enable just on overcommit systems (=2) + mi_option_purge_decommits, // should a memory purge decommit? (=1). Set to 0 to use memory reset on a purge (instead of decommit) + mi_option_allow_large_os_pages, // allow large (2 or 4 MiB) OS pages, implies eager commit. If false, also disables THP for the process. + mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB pages) at startup + mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node + mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup (internally, this value is in KiB; use `mi_option_get_size`) + mi_option_deprecated_segment_cache, + mi_option_deprecated_page_reset, + mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination + mi_option_deprecated_segment_reset, + mi_option_eager_commit_delay, // the first N segments per thread are not eagerly committed (but per page in the segment on demand) + mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all. (=10) + mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes. + mi_option_disallow_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas) + mi_option_os_tag, // tag used for OS logging (macOS only for now) (=100) + mi_option_max_errors, // issue at most N error messages + mi_option_max_warnings, // issue at most N warning messages + mi_option_max_segment_reclaim, // max. percentage of the abandoned segments can be reclaimed per try (=10%) + mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe + mi_option_arena_reserve, // initial memory size for arena reservation (= 1 GiB on 64-bit) (internally, this value is in KiB; use `mi_option_get_size`) + mi_option_arena_purge_mult, // multiplier for `purge_delay` for the purging delay for arenas (=10) + mi_option_purge_extend_delay, + mi_option_abandoned_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1) + mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's) + mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows) + _mi_option_last, + // legacy option names + mi_option_large_os_pages = mi_option_allow_large_os_pages, + mi_option_eager_region_commit = mi_option_arena_eager_commit, + mi_option_reset_decommits = mi_option_purge_decommits, + mi_option_reset_delay = mi_option_purge_delay, + mi_option_abandoned_page_reset = mi_option_abandoned_page_purge, + mi_option_limit_os_alloc = mi_option_disallow_os_alloc +} mi_option_t; + + +mi_decl_nodiscard mi_decl_export bool mi_option_is_enabled(mi_option_t option); +mi_decl_export void mi_option_enable(mi_option_t option); +mi_decl_export void mi_option_disable(mi_option_t option); +mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable); +mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable); + +mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option); +mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max); +mi_decl_nodiscard mi_decl_export size_t mi_option_get_size(mi_option_t option); +mi_decl_export void mi_option_set(mi_option_t option, long value); +mi_decl_export void mi_option_set_default(mi_option_t option, long value); + + +// ------------------------------------------------------------------------------------------------------- +// "mi" prefixed implementations of various posix, Unix, Windows, and C++ allocation functions. +// (This can be convenient when providing overrides of these functions as done in `mimalloc-override.h`.) +// note: we use `mi_cfree` as "checked free" and it checks if the pointer is in our heap before free-ing. +// ------------------------------------------------------------------------------------------------------- + +mi_decl_export void mi_cfree(void* p) mi_attr_noexcept; +mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_malloc_size(const void* p) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_malloc_good_size(size_t size) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept; + +mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1); + +mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3); +mi_decl_nodiscard mi_decl_export int mi_reallocarr(void* p, size_t count, size_t size) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept; +mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept; + +mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept mi_attr_malloc; +mi_decl_export int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept; +mi_decl_export int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept; + +mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept; +mi_decl_export void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept; +mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept; + +// The `mi_new` wrappers implement C++ semantics on out-of-memory instead of directly returning `NULL`. +// (and call `std::get_new_handler` and potentially raise a `std::bad_alloc` exception). +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2); +mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3); + +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) mi_attr_malloc mi_attr_alloc_size(2); +mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3); + +#ifdef __cplusplus +} +#endif + +// --------------------------------------------------------------------------------------------- +// Implement the C++ std::allocator interface for use in STL containers. +// (note: see `mimalloc-new-delete.h` for overriding the new/delete operators globally) +// --------------------------------------------------------------------------------------------- +#ifdef __cplusplus + +#include // std::size_t +#include // PTRDIFF_MAX +#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11 +#include // std::true_type +#include // std::forward +#endif + +template struct _mi_stl_allocator_common { + typedef T value_type; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef value_type& reference; + typedef value_type const& const_reference; + typedef value_type* pointer; + typedef value_type const* const_pointer; + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + template void construct(U* p, Args&& ...args) { ::new(p) U(std::forward(args)...); } + template void destroy(U* p) mi_attr_noexcept { p->~U(); } + #else + void construct(pointer p, value_type const& val) { ::new(p) value_type(val); } + void destroy(pointer p) { p->~value_type(); } + #endif + + size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); } + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } +}; + +template struct mi_stl_allocator : public _mi_stl_allocator_common { + using typename _mi_stl_allocator_common::size_type; + using typename _mi_stl_allocator_common::value_type; + using typename _mi_stl_allocator_common::pointer; + template struct rebind { typedef mi_stl_allocator other; }; + + mi_stl_allocator() mi_attr_noexcept = default; + mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept = default; + template mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept { } + mi_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T* p, size_type) { mi_free(p); } + + #if (__cplusplus >= 201703L) // C++17 + mi_decl_nodiscard T* allocate(size_type count) { return static_cast(mi_new_n(count, sizeof(T))); } + mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); } + #else + mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast(mi_new_n(count, sizeof(value_type))); } + #endif + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using is_always_equal = std::true_type; + #endif +}; + +template bool operator==(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return true; } +template bool operator!=(const mi_stl_allocator& , const mi_stl_allocator& ) mi_attr_noexcept { return false; } + + +#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) // C++11 +#define MI_HAS_HEAP_STL_ALLOCATOR 1 + +#include // std::shared_ptr + +// Common base class for STL allocators in a specific heap +template struct _mi_heap_stl_allocator_common : public _mi_stl_allocator_common { + using typename _mi_stl_allocator_common::size_type; + using typename _mi_stl_allocator_common::value_type; + using typename _mi_stl_allocator_common::pointer; + + _mi_heap_stl_allocator_common(mi_heap_t* hp) : heap(hp, [](mi_heap_t*) {}) {} /* will not delete nor destroy the passed in heap */ + + #if (__cplusplus >= 201703L) // C++17 + mi_decl_nodiscard T* allocate(size_type count) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(T))); } + mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); } + #else + mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(value_type))); } + #endif + + #if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11 + using is_always_equal = std::false_type; + #endif + + void collect(bool force) { mi_heap_collect(this->heap.get(), force); } + template bool is_equal(const _mi_heap_stl_allocator_common& x) const { return (this->heap == x.heap); } + +protected: + std::shared_ptr heap; + template friend struct _mi_heap_stl_allocator_common; + + _mi_heap_stl_allocator_common() { + mi_heap_t* hp = mi_heap_new(); + this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */ + } + _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { } + template _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { } + +private: + static void heap_delete(mi_heap_t* hp) { if (hp != NULL) { mi_heap_delete(hp); } } + static void heap_destroy(mi_heap_t* hp) { if (hp != NULL) { mi_heap_destroy(hp); } } +}; + +// STL allocator allocation in a specific heap +template struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common { + using typename _mi_heap_stl_allocator_common::size_type; + mi_heap_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is deleted when the destructor is called + mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + template mi_heap_stl_allocator(const mi_heap_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } + + mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T* p, size_type) { mi_free(p); } + template struct rebind { typedef mi_heap_stl_allocator other; }; +}; + +template bool operator==(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); } +template bool operator!=(const mi_heap_stl_allocator& x, const mi_heap_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); } + + +// STL allocator allocation in a specific heap, where `free` does nothing and +// the heap is destroyed in one go on destruction -- use with care! +template struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common { + using typename _mi_heap_stl_allocator_common::size_type; + mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common() { } // creates fresh heap that is destroyed when the destructor is called + mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common(hp) { } // no delete nor destroy on the passed in heap + template mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator& x) mi_attr_noexcept : _mi_heap_stl_allocator_common(x) { } + + mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; } + void deallocate(T*, size_type) { /* do nothing as we destroy the heap on destruct. */ } + template struct rebind { typedef mi_heap_destroy_stl_allocator other; }; +}; + +template bool operator==(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (x.is_equal(y)); } +template bool operator!=(const mi_heap_destroy_stl_allocator& x, const mi_heap_destroy_stl_allocator& y) mi_attr_noexcept { return (!x.is_equal(y)); } + +#endif // C++11 + +#endif // __cplusplus + +#endif diff --git a/ww/managers/mimalloc/include/mimalloc/atomic.h b/ww/managers/mimalloc/include/mimalloc/atomic.h new file mode 100644 index 00000000..d5333dd9 --- /dev/null +++ b/ww/managers/mimalloc/include/mimalloc/atomic.h @@ -0,0 +1,393 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_ATOMIC_H +#define MIMALLOC_ATOMIC_H + +// -------------------------------------------------------------------------------------------- +// Atomics +// We need to be portable between C, C++, and MSVC. +// We base the primitives on the C/C++ atomics and create a mimimal wrapper for MSVC in C compilation mode. +// This is why we try to use only `uintptr_t` and `*` as atomic types. +// To gain better insight in the range of used atomics, we use explicitly named memory order operations +// instead of passing the memory order as a parameter. +// ----------------------------------------------------------------------------------------------- + +#if defined(__cplusplus) +// Use C++ atomics +#include +#define _Atomic(tp) std::atomic +#define mi_atomic(name) std::atomic_##name +#define mi_memory_order(name) std::memory_order_##name +#if (__cplusplus >= 202002L) // c++20, see issue #571 +#define MI_ATOMIC_VAR_INIT(x) x +#elif !defined(ATOMIC_VAR_INIT) +#define MI_ATOMIC_VAR_INIT(x) x +#else + #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) +#endif +#elif defined(_MSC_VER) +// Use MSVC C wrapper for C11 atomics +#define _Atomic(tp) tp +#define MI_ATOMIC_VAR_INIT(x) x +#define mi_atomic(name) mi_atomic_##name +#define mi_memory_order(name) mi_memory_order_##name +#else +// Use C11 atomics +#include +#define mi_atomic(name) atomic_##name +#define mi_memory_order(name) memory_order_##name +#if (__STDC_VERSION__ >= 201710L) // c17, see issue #735 + #define MI_ATOMIC_VAR_INIT(x) x +#elif !defined(ATOMIC_VAR_INIT) + #define MI_ATOMIC_VAR_INIT(x) x +#else + #define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x) +#endif +#endif + +// Various defines for all used memory orders in mimalloc +#define mi_atomic_cas_weak(p,expected,desired,mem_success,mem_fail) \ + mi_atomic(compare_exchange_weak_explicit)(p,expected,desired,mem_success,mem_fail) + +#define mi_atomic_cas_strong(p,expected,desired,mem_success,mem_fail) \ + mi_atomic(compare_exchange_strong_explicit)(p,expected,desired,mem_success,mem_fail) + +#define mi_atomic_load_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire)) +#define mi_atomic_load_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed)) +#define mi_atomic_store_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release)) +#define mi_atomic_store_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed)) +#define mi_atomic_exchange_release(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(release)) +#define mi_atomic_exchange_acq_rel(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_cas_weak_release(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed)) +#define mi_atomic_cas_weak_acq_rel(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire)) +#define mi_atomic_cas_strong_release(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed)) +#define mi_atomic_cas_strong_acq_rel(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire)) + +#define mi_atomic_add_relaxed(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(relaxed)) +#define mi_atomic_sub_relaxed(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(relaxed)) +#define mi_atomic_add_acq_rel(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_sub_acq_rel(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_and_acq_rel(p,x) mi_atomic(fetch_and_explicit)(p,x,mi_memory_order(acq_rel)) +#define mi_atomic_or_acq_rel(p,x) mi_atomic(fetch_or_explicit)(p,x,mi_memory_order(acq_rel)) + +#define mi_atomic_increment_relaxed(p) mi_atomic_add_relaxed(p,(uintptr_t)1) +#define mi_atomic_decrement_relaxed(p) mi_atomic_sub_relaxed(p,(uintptr_t)1) +#define mi_atomic_increment_acq_rel(p) mi_atomic_add_acq_rel(p,(uintptr_t)1) +#define mi_atomic_decrement_acq_rel(p) mi_atomic_sub_acq_rel(p,(uintptr_t)1) + +static inline void mi_atomic_yield(void); +static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add); +static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub); + + +#if defined(__cplusplus) || !defined(_MSC_VER) + +// In C++/C11 atomics we have polymorphic atomics so can use the typed `ptr` variants (where `tp` is the type of atomic value) +// We use these macros so we can provide a typed wrapper in MSVC in C compilation mode as well +#define mi_atomic_load_ptr_acquire(tp,p) mi_atomic_load_acquire(p) +#define mi_atomic_load_ptr_relaxed(tp,p) mi_atomic_load_relaxed(p) + +// In C++ we need to add casts to help resolve templates if NULL is passed +#if defined(__cplusplus) +#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,(tp*)x) +#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,(tp*)x) +#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des) +#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des) +#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des) +#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x) +#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x) +#else +#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,x) +#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,x) +#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des) +#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des) +#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des) +#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x) +#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x) +#endif + +// These are used by the statistics +static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) { + return mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed)); +} +static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) { + int64_t current = mi_atomic_load_relaxed((_Atomic(int64_t)*)p); + while (current < x && !mi_atomic_cas_weak_release((_Atomic(int64_t)*)p, ¤t, x)) { /* nothing */ }; +} + +// Used by timers +#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire)) +#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed)) +#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release)) +#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed)) + +#define mi_atomic_casi64_strong_acq_rel(p,e,d) mi_atomic_cas_strong_acq_rel(p,e,d) +#define mi_atomic_addi64_acq_rel(p,i) mi_atomic_add_acq_rel(p,i) + + +#elif defined(_MSC_VER) + +// Legacy MSVC plain C compilation wrapper that uses Interlocked operations to model C11 atomics. +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#include +#ifdef _WIN64 +typedef LONG64 msc_intptr_t; +#define MI_64(f) f##64 +#else +typedef LONG msc_intptr_t; +#define MI_64(f) f +#endif + +typedef enum mi_memory_order_e { + mi_memory_order_relaxed, + mi_memory_order_consume, + mi_memory_order_acquire, + mi_memory_order_release, + mi_memory_order_acq_rel, + mi_memory_order_seq_cst +} mi_memory_order; + +static inline uintptr_t mi_atomic_fetch_add_explicit(_Atomic(uintptr_t)*p, uintptr_t add, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add); +} +static inline uintptr_t mi_atomic_fetch_sub_explicit(_Atomic(uintptr_t)*p, uintptr_t sub, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, -((msc_intptr_t)sub)); +} +static inline uintptr_t mi_atomic_fetch_and_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x); +} +static inline uintptr_t mi_atomic_fetch_or_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedOr)((volatile msc_intptr_t*)p, (msc_intptr_t)x); +} +static inline bool mi_atomic_compare_exchange_strong_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) { + (void)(mo1); (void)(mo2); + uintptr_t read = (uintptr_t)MI_64(_InterlockedCompareExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)desired, (msc_intptr_t)(*expected)); + if (read == *expected) { + return true; + } + else { + *expected = read; + return false; + } +} +static inline bool mi_atomic_compare_exchange_weak_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) { + return mi_atomic_compare_exchange_strong_explicit(p, expected, desired, mo1, mo2); +} +static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)*p, uintptr_t exchange, mi_memory_order mo) { + (void)(mo); + return (uintptr_t)MI_64(_InterlockedExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)exchange); +} +static inline void mi_atomic_thread_fence(mi_memory_order mo) { + (void)(mo); + _Atomic(uintptr_t) x = 0; + mi_atomic_exchange_explicit(&x, 1, mo); +} +static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) { + (void)(mo); +#if defined(_M_IX86) || defined(_M_X64) + return *p; +#else + uintptr_t x = *p; + if (mo > mi_memory_order_relaxed) { + while (!mi_atomic_compare_exchange_weak_explicit((_Atomic(uintptr_t)*)p, &x, x, mo, mi_memory_order_relaxed)) { /* nothing */ }; + } + return x; +#endif +} +static inline void mi_atomic_store_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) { + (void)(mo); +#if defined(_M_IX86) || defined(_M_X64) + *p = x; +#else + mi_atomic_exchange_explicit(p, x, mo); +#endif +} +static inline int64_t mi_atomic_loadi64_explicit(_Atomic(int64_t)*p, mi_memory_order mo) { + (void)(mo); +#if defined(_M_X64) + return *p; +#else + int64_t old = *p; + int64_t x = old; + while ((old = InterlockedCompareExchange64(p, x, old)) != x) { + x = old; + } + return x; +#endif +} +static inline void mi_atomic_storei64_explicit(_Atomic(int64_t)*p, int64_t x, mi_memory_order mo) { + (void)(mo); +#if defined(x_M_IX86) || defined(_M_X64) + *p = x; +#else + InterlockedExchange64(p, x); +#endif +} + +// These are used by the statistics +static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int64_t add) { +#ifdef _WIN64 + return (int64_t)mi_atomic_addi((int64_t*)p, add); +#else + int64_t current; + int64_t sum; + do { + current = *p; + sum = current + add; + } while (_InterlockedCompareExchange64(p, sum, current) != current); + return current; +#endif +} +static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) { + int64_t current; + do { + current = *p; + } while (current < x && _InterlockedCompareExchange64(p, x, current) != current); +} + +static inline void mi_atomic_addi64_acq_rel(volatile _Atomic(int64_t*)p, int64_t i) { + mi_atomic_addi64_relaxed(p, i); +} + +static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, int64_t* exp, int64_t des) { + int64_t read = _InterlockedCompareExchange64(p, des, *exp); + if (read == *exp) { + return true; + } + else { + *exp = read; + return false; + } +} + +// The pointer macros cast to `uintptr_t`. +#define mi_atomic_load_ptr_acquire(tp,p) (tp*)mi_atomic_load_acquire((_Atomic(uintptr_t)*)(p)) +#define mi_atomic_load_ptr_relaxed(tp,p) (tp*)mi_atomic_load_relaxed((_Atomic(uintptr_t)*)(p)) +#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release((_Atomic(uintptr_t)*)(p),(uintptr_t)(x)) +#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)(x)) +#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) +#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) +#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des) +#define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x) +#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x) + +#define mi_atomic_loadi64_acquire(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(acquire)) +#define mi_atomic_loadi64_relaxed(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(relaxed)) +#define mi_atomic_storei64_release(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(release)) +#define mi_atomic_storei64_relaxed(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(relaxed)) + + +#endif + + +// Atomically add a signed value; returns the previous value. +static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add) { + return (intptr_t)mi_atomic_add_acq_rel((_Atomic(uintptr_t)*)p, (uintptr_t)add); +} + +// Atomically subtract a signed value; returns the previous value. +static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) { + return (intptr_t)mi_atomic_addi(p, -sub); +} + +typedef _Atomic(uintptr_t) mi_atomic_once_t; + +// Returns true only on the first invocation +static inline bool mi_atomic_once( mi_atomic_once_t* once ) { + if (mi_atomic_load_relaxed(once) != 0) return false; // quick test + uintptr_t expected = 0; + return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1 +} + +typedef _Atomic(uintptr_t) mi_atomic_guard_t; + +// Allows only one thread to execute at a time +#define mi_atomic_guard(guard) \ + uintptr_t _mi_guard_expected = 0; \ + for(bool _mi_guard_once = true; \ + _mi_guard_once && mi_atomic_cas_strong_acq_rel(guard,&_mi_guard_expected,(uintptr_t)1); \ + (mi_atomic_store_release(guard,(uintptr_t)0), _mi_guard_once = false) ) + + + +// Yield +#if defined(__cplusplus) +#include +static inline void mi_atomic_yield(void) { + std::this_thread::yield(); +} +#elif defined(_WIN32) +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +static inline void mi_atomic_yield(void) { + YieldProcessor(); +} +#elif defined(__SSE2__) +#include +static inline void mi_atomic_yield(void) { + _mm_pause(); +} +#elif (defined(__GNUC__) || defined(__clang__)) && \ + (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__armel__) || defined(__ARMEL__) || \ + defined(__aarch64__) || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__)) || defined(__POWERPC__) +#if defined(__x86_64__) || defined(__i386__) +static inline void mi_atomic_yield(void) { + __asm__ volatile ("pause" ::: "memory"); +} +#elif defined(__aarch64__) +static inline void mi_atomic_yield(void) { + __asm__ volatile("wfe"); +} +#elif (defined(__arm__) && __ARM_ARCH__ >= 7) +static inline void mi_atomic_yield(void) { + __asm__ volatile("yield" ::: "memory"); +} +#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__) +#ifdef __APPLE__ +static inline void mi_atomic_yield(void) { + __asm__ volatile ("or r27,r27,r27" ::: "memory"); +} +#else +static inline void mi_atomic_yield(void) { + __asm__ __volatile__ ("or 27,27,27" ::: "memory"); +} +#endif +#elif defined(__armel__) || defined(__ARMEL__) +static inline void mi_atomic_yield(void) { + __asm__ volatile ("nop" ::: "memory"); +} +#endif +#elif defined(__sun) +// Fallback for other archs +#include +static inline void mi_atomic_yield(void) { + smt_pause(); +} +#elif defined(__wasi__) +#include +static inline void mi_atomic_yield(void) { + sched_yield(); +} +#else +#include +static inline void mi_atomic_yield(void) { + sleep(0); +} +#endif + + +#endif // __MIMALLOC_ATOMIC_H diff --git a/ww/managers/mimalloc/include/mimalloc/internal.h b/ww/managers/mimalloc/include/mimalloc/internal.h new file mode 100644 index 00000000..6c6e5ed0 --- /dev/null +++ b/ww/managers/mimalloc/include/mimalloc/internal.h @@ -0,0 +1,1018 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_INTERNAL_H +#define MIMALLOC_INTERNAL_H + + +// -------------------------------------------------------------------------- +// This file contains the interal API's of mimalloc and various utility +// functions and macros. +// -------------------------------------------------------------------------- + +#include "types.h" +#include "track.h" + +#if (MI_DEBUG>0) +#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__) +#else +#define mi_trace_message(...) +#endif + +#define MI_CACHE_LINE 64 +#if defined(_MSC_VER) +#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths) +#pragma warning(disable:26812) // unscoped enum warning +#define mi_decl_noinline __declspec(noinline) +#define mi_decl_thread __declspec(thread) +#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE)) +#define mi_decl_weak +#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc +#define mi_decl_noinline __attribute__((noinline)) +#define mi_decl_thread __thread +#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE))) +#define mi_decl_weak __attribute__((weak)) +#else +#define mi_decl_noinline +#define mi_decl_thread __thread // hope for the best :-) +#define mi_decl_cache_align +#define mi_decl_weak +#endif + +#if defined(__EMSCRIPTEN__) && !defined(__wasi__) +#define __wasi__ +#endif + +#if defined(__cplusplus) +#define mi_decl_externc extern "C" +#else +#define mi_decl_externc +#endif + +// pthreads +#if !defined(_WIN32) && !defined(__wasi__) +#define MI_USE_PTHREADS +#include +#endif + +// "options.c" +void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message); +void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...); +void _mi_warning_message(const char* fmt, ...); +void _mi_verbose_message(const char* fmt, ...); +void _mi_trace_message(const char* fmt, ...); +void _mi_options_init(void); +void _mi_error_message(int err, const char* fmt, ...); + +// random.c +void _mi_random_init(mi_random_ctx_t* ctx); +void _mi_random_init_weak(mi_random_ctx_t* ctx); +void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx); +void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx); +uintptr_t _mi_random_next(mi_random_ctx_t* ctx); +uintptr_t _mi_heap_random_next(mi_heap_t* heap); +uintptr_t _mi_os_random_weak(uintptr_t extra_seed); +static inline uintptr_t _mi_random_shuffle(uintptr_t x); + +// init.c +extern mi_decl_cache_align mi_stats_t _mi_stats_main; +extern mi_decl_cache_align const mi_page_t _mi_page_empty; +bool _mi_is_main_thread(void); +size_t _mi_current_thread_count(void); +bool _mi_preloading(void); // true while the C runtime is not initialized yet +mi_threadid_t _mi_thread_id(void) mi_attr_noexcept; +mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap +void _mi_thread_done(mi_heap_t* heap); +void _mi_thread_data_collect(void); +void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap); + +// os.c +void _mi_os_init(void); // called from process init +void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats); +void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats); +void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats); + +size_t _mi_os_page_size(void); +size_t _mi_os_good_alloc_size(size_t size); +bool _mi_os_has_overcommit(void); +bool _mi_os_has_virtual_reserve(void); + +bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats); +bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats); +bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats); +bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); +bool _mi_os_protect(void* addr, size_t size); +bool _mi_os_unprotect(void* addr, size_t size); +bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats); +bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats); + +void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats); +void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats); + +void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size); +bool _mi_os_use_large_page(size_t size, size_t alignment); +size_t _mi_os_large_page_size(void); + +void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid); + +// arena.c +mi_arena_id_t _mi_arena_id_none(void); +void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats); +void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld); +void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld); +bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id); +bool _mi_arena_contains(const void* p); +void _mi_arenas_collect(bool force_purge, mi_stats_t* stats); +void _mi_arena_unsafe_destroy_all(mi_stats_t* stats); + +bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment); +void _mi_arena_segment_mark_abandoned(mi_segment_t* segment); +size_t _mi_arena_segment_abandoned_count(void); + +typedef struct mi_arena_field_cursor_s { // abstract + mi_arena_id_t start; + int count; + size_t bitmap_idx; +} mi_arena_field_cursor_t; +void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* current); +mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous); + +// "segment-map.c" +void _mi_segment_map_allocated_at(const mi_segment_t* segment); +void _mi_segment_map_freed_at(const mi_segment_t* segment); + +// "segment.c" +mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld); +void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld); +void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld); +bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld); +void _mi_segment_collect(mi_segment_t* segment, bool force, mi_segments_tld_t* tld); + +#if MI_HUGE_PAGE_ABANDON +void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block); +#else +void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block); +#endif + +uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page +void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld); +void _mi_abandoned_await_readers(void); +void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld); +bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment); + +// "page.c" +void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc; + +void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks +void _mi_page_unfull(mi_page_t* page); +void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page +void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread... +void _mi_heap_delayed_free_all(mi_heap_t* heap); +bool _mi_heap_delayed_free_partial(mi_heap_t* heap); +void _mi_heap_collect_retired(mi_heap_t* heap, bool force); + +void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); +bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never); +size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append); +void _mi_deferred_free(mi_heap_t* heap, bool force); + +void _mi_page_free_collect(mi_page_t* page,bool force); +void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments + +size_t _mi_bin_size(uint8_t bin); // for stats +uint8_t _mi_bin(size_t size); // for stats + +// "heap.c" +void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag); +void _mi_heap_destroy_pages(mi_heap_t* heap); +void _mi_heap_collect_abandon(mi_heap_t* heap); +void _mi_heap_set_default_direct(mi_heap_t* heap); +bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid); +void _mi_heap_unsafe_destroy_all(void); +mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag); + +// "stats.c" +void _mi_stats_done(mi_stats_t* stats); +mi_msecs_t _mi_clock_now(void); +mi_msecs_t _mi_clock_end(mi_msecs_t start); +mi_msecs_t _mi_clock_start(void); + +// "alloc.c" +void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic` +void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` +void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` +void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept; +void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned` +void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept; +mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p); +bool _mi_free_delayed_block(mi_block_t* block); +void _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size); + +// "libc.c" +#include +void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args); +void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...); +char _mi_toupper(char c); +int _mi_strnicmp(const char* s, const char* t, size_t n); +void _mi_strlcpy(char* dest, const char* src, size_t dest_size); +void _mi_strlcat(char* dest, const char* src, size_t dest_size); +size_t _mi_strlen(const char* s); +size_t _mi_strnlen(const char* s, size_t max_len); +bool _mi_getenv(const char* name, char* result, size_t result_size); + +#if MI_DEBUG>1 +bool _mi_page_is_valid(mi_page_t* page); +#endif + + +// ------------------------------------------------------ +// Branches +// ------------------------------------------------------ + +#if defined(__GNUC__) || defined(__clang__) +#define mi_unlikely(x) (__builtin_expect(!!(x),false)) +#define mi_likely(x) (__builtin_expect(!!(x),true)) +#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) +#define mi_unlikely(x) (x) [[unlikely]] +#define mi_likely(x) (x) [[likely]] +#else +#define mi_unlikely(x) (x) +#define mi_likely(x) (x) +#endif + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + + +/* ----------------------------------------------------------- + Error codes passed to `_mi_fatal_error` + All are recoverable but EFAULT is a serious error and aborts by default in secure mode. + For portability define undefined error codes using common Unix codes: + +----------------------------------------------------------- */ +#include +#ifndef EAGAIN // double free +#define EAGAIN (11) +#endif +#ifndef ENOMEM // out of memory +#define ENOMEM (12) +#endif +#ifndef EFAULT // corrupted free-list or meta-data +#define EFAULT (14) +#endif +#ifndef EINVAL // trying to free an invalid pointer +#define EINVAL (22) +#endif +#ifndef EOVERFLOW // count*size overflow +#define EOVERFLOW (75) +#endif + + +/* ----------------------------------------------------------- + Inlined definitions +----------------------------------------------------------- */ +#define MI_UNUSED(x) (void)(x) +#if (MI_DEBUG>0) +#define MI_UNUSED_RELEASE(x) +#else +#define MI_UNUSED_RELEASE(x) MI_UNUSED(x) +#endif + +#define MI_INIT4(x) x(),x(),x(),x() +#define MI_INIT8(x) MI_INIT4(x),MI_INIT4(x) +#define MI_INIT16(x) MI_INIT8(x),MI_INIT8(x) +#define MI_INIT32(x) MI_INIT16(x),MI_INIT16(x) +#define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x) +#define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x) +#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x) + + +#include +// initialize a local variable to zero; use memset as compilers optimize constant sized memset's +#define _mi_memzero_var(x) memset(&x,0,sizeof(x)) + +// Is `x` a power of two? (0 is considered a power of two) +static inline bool _mi_is_power_of_two(uintptr_t x) { + return ((x & (x - 1)) == 0); +} + +// Is a pointer aligned? +static inline bool _mi_is_aligned(void* p, size_t alignment) { + mi_assert_internal(alignment != 0); + return (((uintptr_t)p % alignment) == 0); +} + +// Align upwards +static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) { + mi_assert_internal(alignment != 0); + uintptr_t mask = alignment - 1; + if ((alignment & mask) == 0) { // power of two? + return ((sz + mask) & ~mask); + } + else { + return (((sz + mask)/alignment)*alignment); + } +} + +// Align downwards +static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) { + mi_assert_internal(alignment != 0); + uintptr_t mask = alignment - 1; + if ((alignment & mask) == 0) { // power of two? + return (sz & ~mask); + } + else { + return ((sz / alignment) * alignment); + } +} + +// Align a pointer upwards +static inline void* mi_align_up_ptr(void* p, size_t alignment) { + return (void*)_mi_align_up((uintptr_t)p, alignment); +} + +// Align a pointer downwards +static inline void* mi_align_down_ptr(void* p, size_t alignment) { + return (void*)_mi_align_down((uintptr_t)p, alignment); +} + + +// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`. +static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) { + mi_assert_internal(divider != 0); + return (divider == 0 ? size : ((size + divider - 1) / divider)); +} + +// Is memory zero initialized? +static inline bool mi_mem_is_zero(const void* p, size_t size) { + for (size_t i = 0; i < size; i++) { + if (((uint8_t*)p)[i] != 0) return false; + } + return true; +} + + +// Align a byte size to a size in _machine words_, +// i.e. byte size == `wsize*sizeof(void*)`. +static inline size_t _mi_wsize_from_size(size_t size) { + mi_assert_internal(size <= SIZE_MAX - sizeof(uintptr_t)); + return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t); +} + +// Overflow detecting multiply +#if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5)) +#include // UINT_MAX, ULONG_MAX +#if defined(_CLOCK_T) // for Illumos +#undef _CLOCK_T +#endif +static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) { + #if (SIZE_MAX == ULONG_MAX) + return __builtin_umull_overflow(count, size, (unsigned long *)total); + #elif (SIZE_MAX == UINT_MAX) + return __builtin_umul_overflow(count, size, (unsigned int *)total); + #else + return __builtin_umulll_overflow(count, size, (unsigned long long *)total); + #endif +} +#else /* __builtin_umul_overflow is unavailable */ +static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) { + #define MI_MUL_COULD_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX) + *total = count * size; + // note: gcc/clang optimize this to directly check the overflow flag + return ((size >= MI_MUL_COULD_OVERFLOW || count >= MI_MUL_COULD_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count); +} +#endif + +// Safe multiply `count*size` into `total`; return `true` on overflow. +static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* total) { + if (count==1) { // quick check for the case where count is one (common for C++ allocators) + *total = size; + return false; + } + else if mi_unlikely(mi_mul_overflow(count, size, total)) { + #if MI_DEBUG > 0 + _mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size); + #endif + *total = SIZE_MAX; + return true; + } + else return false; +} + + +/*---------------------------------------------------------------------------------------- + Heap functions +------------------------------------------------------------------------------------------- */ + +extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap + +static inline bool mi_heap_is_backing(const mi_heap_t* heap) { + return (heap->tld->heap_backing == heap); +} + +static inline bool mi_heap_is_initialized(mi_heap_t* heap) { + mi_assert_internal(heap != NULL); + return (heap != &_mi_heap_empty); +} + +static inline uintptr_t _mi_ptr_cookie(const void* p) { + extern mi_heap_t _mi_heap_main; + mi_assert_internal(_mi_heap_main.cookie != 0); + return ((uintptr_t)p ^ _mi_heap_main.cookie); +} + +/* ----------------------------------------------------------- + Pages +----------------------------------------------------------- */ + +static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) { + mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE)); + const size_t idx = _mi_wsize_from_size(size); + mi_assert_internal(idx < MI_PAGES_DIRECT); + return heap->pages_free_direct[idx]; +} + +// Segment that contains the pointer +// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE), +// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it; +// therefore we align one byte before `p`. +// We check for NULL afterwards on 64-bit systems to improve codegen for `mi_free`. +static inline mi_segment_t* _mi_ptr_segment(const void* p) { + mi_segment_t* const segment = (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK); + #if MI_INTPTR_SIZE <= 4 + return (p==NULL ? NULL : segment); + #else + return ((intptr_t)segment <= 0 ? NULL : segment); + #endif +} + +static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) { + mi_assert_internal(s->slice_offset== 0 && s->slice_count > 0); + return (mi_page_t*)(s); +} + +static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) { + mi_assert_internal(p->slice_offset== 0 && p->slice_count > 0); + return (mi_slice_t*)(p); +} + +// Segment belonging to a page +static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) { + mi_assert_internal(page!=NULL); + mi_segment_t* segment = _mi_ptr_segment(page); + mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries)); + return segment; +} + +static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) { + mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset); + mi_assert_internal(start >= _mi_ptr_segment(slice)->slices); + mi_assert_internal(start->slice_offset == 0); + mi_assert_internal(start + start->slice_count > slice); + return start; +} + +// Get the page containing the pointer (performance critical as it is called in mi_free) +static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) { + mi_assert_internal(p > (void*)segment); + ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment; + mi_assert_internal(diff > 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE); + size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT; + mi_assert_internal(idx <= segment->slice_entries); + mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx]; + mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data + mi_assert_internal(slice->slice_offset == 0); + mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries); + return mi_slice_to_page(slice); +} + +// Quick page start for initialized pages +static inline uint8_t* mi_page_start(const mi_page_t* page) { + mi_assert_internal(page->page_start != NULL); + mi_assert_expensive(_mi_segment_page_start(_mi_page_segment(page),page,NULL) == page->page_start); + return page->page_start; +} + +// Get the page containing the pointer +static inline mi_page_t* _mi_ptr_page(void* p) { + mi_assert_internal(p!=NULL); + return _mi_segment_page_of(_mi_ptr_segment(p), p); +} + +// Get the block size of a page (special case for huge objects) +static inline size_t mi_page_block_size(const mi_page_t* page) { + mi_assert_internal(page->block_size > 0); + return page->block_size; +} + +static inline bool mi_page_is_huge(const mi_page_t* page) { + mi_assert_internal((page->is_huge && _mi_page_segment(page)->kind == MI_SEGMENT_HUGE) || + (!page->is_huge && _mi_page_segment(page)->kind != MI_SEGMENT_HUGE)); + return page->is_huge; +} + +// Get the usable block size of a page without fixed padding. +// This may still include internal padding due to alignment and rounding up size classes. +static inline size_t mi_page_usable_block_size(const mi_page_t* page) { + return mi_page_block_size(page) - MI_PADDING_SIZE; +} + +// size of a segment +static inline size_t mi_segment_size(mi_segment_t* segment) { + return segment->segment_slices * MI_SEGMENT_SLICE_SIZE; +} + +static inline uint8_t* mi_segment_end(mi_segment_t* segment) { + return (uint8_t*)segment + mi_segment_size(segment); +} + +// Thread free access +static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) { + return (mi_block_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & ~3); +} + +static inline mi_delayed_t mi_page_thread_free_flag(const mi_page_t* page) { + return (mi_delayed_t)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & 3); +} + +// Heap access +static inline mi_heap_t* mi_page_heap(const mi_page_t* page) { + return (mi_heap_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xheap)); +} + +static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) { + mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING); + mi_atomic_store_release(&page->xheap,(uintptr_t)heap); + if (heap != NULL) { page->heap_tag = heap->tag; } +} + +// Thread free flag helpers +static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) { + return (mi_block_t*)(tf & ~0x03); +} +static inline mi_delayed_t mi_tf_delayed(mi_thread_free_t tf) { + return (mi_delayed_t)(tf & 0x03); +} +static inline mi_thread_free_t mi_tf_make(mi_block_t* block, mi_delayed_t delayed) { + return (mi_thread_free_t)((uintptr_t)block | (uintptr_t)delayed); +} +static inline mi_thread_free_t mi_tf_set_delayed(mi_thread_free_t tf, mi_delayed_t delayed) { + return mi_tf_make(mi_tf_block(tf),delayed); +} +static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t* block) { + return mi_tf_make(block, mi_tf_delayed(tf)); +} + +// are all blocks in a page freed? +// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`. +static inline bool mi_page_all_free(const mi_page_t* page) { + mi_assert_internal(page != NULL); + return (page->used == 0); +} + +// are there any available blocks? +static inline bool mi_page_has_any_available(const mi_page_t* page) { + mi_assert_internal(page != NULL && page->reserved > 0); + return (page->used < page->reserved || (mi_page_thread_free(page) != NULL)); +} + +// are there immediately available blocks, i.e. blocks available on the free list. +static inline bool mi_page_immediate_available(const mi_page_t* page) { + mi_assert_internal(page != NULL); + return (page->free != NULL); +} + +// is more than 7/8th of a page in use? +static inline bool mi_page_mostly_used(const mi_page_t* page) { + if (page==NULL) return true; + uint16_t frac = page->reserved / 8U; + return (page->reserved - page->used <= frac); +} + +static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size) { + return &((mi_heap_t*)heap)->pages[_mi_bin(size)]; +} + + + +//----------------------------------------------------------- +// Page flags +//----------------------------------------------------------- +static inline bool mi_page_is_in_full(const mi_page_t* page) { + return page->flags.x.in_full; +} + +static inline void mi_page_set_in_full(mi_page_t* page, bool in_full) { + page->flags.x.in_full = in_full; +} + +static inline bool mi_page_has_aligned(const mi_page_t* page) { + return page->flags.x.has_aligned; +} + +static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) { + page->flags.x.has_aligned = has_aligned; +} + + +/* ------------------------------------------------------------------- +Encoding/Decoding the free list next pointers + +This is to protect against buffer overflow exploits where the +free list is mutated. Many hardened allocators xor the next pointer `p` +with a secret key `k1`, as `p^k1`. This prevents overwriting with known +values but might be still too weak: if the attacker can guess +the pointer `p` this can reveal `k1` (since `p^k1^p == k1`). +Moreover, if multiple blocks can be read as well, the attacker can +xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot +about the pointers (and subsequently `k1`). + +Instead mimalloc uses an extra key `k2` and encodes as `((p^k2)<<> (MI_INTPTR_BITS - shift)))); +} +static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) { + shift %= MI_INTPTR_BITS; + return (shift==0 ? x : ((x >> shift) | (x << (MI_INTPTR_BITS - shift)))); +} + +static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) { + void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]); + return (p==null ? NULL : p); +} + +static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) { + uintptr_t x = (uintptr_t)(p==NULL ? null : p); + return mi_rotl(x ^ keys[1], keys[0]) + keys[0]; +} + +static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) { + mi_track_mem_defined(block,sizeof(mi_block_t)); + mi_block_t* next; + #ifdef MI_ENCODE_FREELIST + next = (mi_block_t*)mi_ptr_decode(null, block->next, keys); + #else + MI_UNUSED(keys); MI_UNUSED(null); + next = (mi_block_t*)block->next; + #endif + mi_track_mem_noaccess(block,sizeof(mi_block_t)); + return next; +} + +static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) { + mi_track_mem_undefined(block,sizeof(mi_block_t)); + #ifdef MI_ENCODE_FREELIST + block->next = mi_ptr_encode(null, next, keys); + #else + MI_UNUSED(keys); MI_UNUSED(null); + block->next = (mi_encoded_t)next; + #endif + mi_track_mem_noaccess(block,sizeof(mi_block_t)); +} + +static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) { + #ifdef MI_ENCODE_FREELIST + mi_block_t* next = mi_block_nextx(page,block,page->keys); + // check for free list corruption: is `next` at least in the same page? + // TODO: check if `next` is `page->block_size` aligned? + if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) { + _mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next); + next = NULL; + } + return next; + #else + MI_UNUSED(page); + return mi_block_nextx(page,block,NULL); + #endif +} + +static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) { + #ifdef MI_ENCODE_FREELIST + mi_block_set_nextx(page,block,next, page->keys); + #else + MI_UNUSED(page); + mi_block_set_nextx(page,block,next,NULL); + #endif +} + + +// ------------------------------------------------------------------- +// commit mask +// ------------------------------------------------------------------- + +static inline void mi_commit_mask_create_empty(mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + cm->mask[i] = 0; + } +} + +static inline void mi_commit_mask_create_full(mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + cm->mask[i] = ~((size_t)0); + } +} + +static inline bool mi_commit_mask_is_empty(const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + if (cm->mask[i] != 0) return false; + } + return true; +} + +static inline bool mi_commit_mask_is_full(const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + if (cm->mask[i] != ~((size_t)0)) return false; + } + return true; +} + +// defined in `segment.c`: +size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total); +size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx); + +#define mi_commit_mask_foreach(cm,idx,count) \ + idx = 0; \ + while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) { + +#define mi_commit_mask_foreach_end() \ + idx += count; \ + } + + + +/* ----------------------------------------------------------- + memory id's +----------------------------------------------------------- */ + +static inline mi_memid_t _mi_memid_create(mi_memkind_t memkind) { + mi_memid_t memid; + _mi_memzero_var(memid); + memid.memkind = memkind; + return memid; +} + +static inline mi_memid_t _mi_memid_none(void) { + return _mi_memid_create(MI_MEM_NONE); +} + +static inline mi_memid_t _mi_memid_create_os(bool committed, bool is_zero, bool is_large) { + mi_memid_t memid = _mi_memid_create(MI_MEM_OS); + memid.initially_committed = committed; + memid.initially_zero = is_zero; + memid.is_pinned = is_large; + return memid; +} + + +// ------------------------------------------------------------------- +// Fast "random" shuffle +// ------------------------------------------------------------------- + +static inline uintptr_t _mi_random_shuffle(uintptr_t x) { + if (x==0) { x = 17; } // ensure we don't get stuck in generating zeros +#if (MI_INTPTR_SIZE==8) + // by Sebastiano Vigna, see: + x ^= x >> 30; + x *= 0xbf58476d1ce4e5b9UL; + x ^= x >> 27; + x *= 0x94d049bb133111ebUL; + x ^= x >> 31; +#elif (MI_INTPTR_SIZE==4) + // by Chris Wellons, see: + x ^= x >> 16; + x *= 0x7feb352dUL; + x ^= x >> 15; + x *= 0x846ca68bUL; + x ^= x >> 16; +#endif + return x; +} + +// ------------------------------------------------------------------- +// Optimize numa node access for the common case (= one node) +// ------------------------------------------------------------------- + +int _mi_os_numa_node_get(mi_os_tld_t* tld); +size_t _mi_os_numa_node_count_get(void); + +extern _Atomic(size_t) _mi_numa_node_count; +static inline int _mi_os_numa_node(mi_os_tld_t* tld) { + if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; } + else return _mi_os_numa_node_get(tld); +} +static inline size_t _mi_os_numa_node_count(void) { + const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count); + if mi_likely(count > 0) { return count; } + else return _mi_os_numa_node_count_get(); +} + + + +// ----------------------------------------------------------------------- +// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero) +// ----------------------------------------------------------------------- + +#if defined(__GNUC__) + +#include // LONG_MAX +#define MI_HAVE_FAST_BITSCAN +static inline size_t mi_clz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; +#if (INTPTR_MAX == LONG_MAX) + return __builtin_clzl(x); +#else + return __builtin_clzll(x); +#endif +} +static inline size_t mi_ctz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; +#if (INTPTR_MAX == LONG_MAX) + return __builtin_ctzl(x); +#else + return __builtin_ctzll(x); +#endif +} + +#elif defined(_MSC_VER) + +#include // LONG_MAX +#include // BitScanReverse64 +#define MI_HAVE_FAST_BITSCAN +static inline size_t mi_clz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; + unsigned long idx; +#if (INTPTR_MAX == LONG_MAX) + _BitScanReverse(&idx, x); +#else + _BitScanReverse64(&idx, x); +#endif + return ((MI_INTPTR_BITS - 1) - idx); +} +static inline size_t mi_ctz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; + unsigned long idx; +#if (INTPTR_MAX == LONG_MAX) + _BitScanForward(&idx, x); +#else + _BitScanForward64(&idx, x); +#endif + return idx; +} + +#else +static inline size_t mi_ctz32(uint32_t x) { + // de Bruijn multiplication, see + static const unsigned char debruijn[32] = { + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 + }; + if (x==0) return 32; + return debruijn[((x & -(int32_t)x) * 0x077CB531UL) >> 27]; +} +static inline size_t mi_clz32(uint32_t x) { + // de Bruijn multiplication, see + static const uint8_t debruijn[32] = { + 31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1, + 23, 19, 11, 3, 16, 14, 7, 24, 12, 4, 8, 25, 5, 26, 27, 0 + }; + if (x==0) return 32; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + return debruijn[(uint32_t)(x * 0x07C4ACDDUL) >> 27]; +} + +static inline size_t mi_clz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; +#if (MI_INTPTR_BITS <= 32) + return mi_clz32((uint32_t)x); +#else + size_t count = mi_clz32((uint32_t)(x >> 32)); + if (count < 32) return count; + return (32 + mi_clz32((uint32_t)x)); +#endif +} +static inline size_t mi_ctz(uintptr_t x) { + if (x==0) return MI_INTPTR_BITS; +#if (MI_INTPTR_BITS <= 32) + return mi_ctz32((uint32_t)x); +#else + size_t count = mi_ctz32((uint32_t)x); + if (count < 32) return count; + return (32 + mi_ctz32((uint32_t)(x>>32))); +#endif +} + +#endif + +// "bit scan reverse": Return index of the highest bit (or MI_INTPTR_BITS if `x` is zero) +static inline size_t mi_bsr(uintptr_t x) { + return (x==0 ? MI_INTPTR_BITS : MI_INTPTR_BITS - 1 - mi_clz(x)); +} + + +// --------------------------------------------------------------------------------- +// Provide our own `_mi_memcpy` for potential performance optimizations. +// +// For now, only on Windows with msvc/clang-cl we optimize to `rep movsb` if +// we happen to run on x86/x64 cpu's that have "fast short rep movsb" (FSRM) support +// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253. +// --------------------------------------------------------------------------------- + +#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64)) +#include +extern bool _mi_cpu_has_fsrm; +static inline void _mi_memcpy(void* dst, const void* src, size_t n) { + if (_mi_cpu_has_fsrm) { + __movsb((unsigned char*)dst, (const unsigned char*)src, n); + } + else { + memcpy(dst, src, n); + } +} +static inline void _mi_memzero(void* dst, size_t n) { + if (_mi_cpu_has_fsrm) { + __stosb((unsigned char*)dst, 0, n); + } + else { + memset(dst, 0, n); + } +} +#else +static inline void _mi_memcpy(void* dst, const void* src, size_t n) { + memcpy(dst, src, n); +} +static inline void _mi_memzero(void* dst, size_t n) { + memset(dst, 0, n); +} +#endif + +// ------------------------------------------------------------------------------- +// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned +// This is used for example in `mi_realloc`. +// ------------------------------------------------------------------------------- + +#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) +// On GCC/CLang we provide a hint that the pointers are word aligned. +static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) { + mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0)); + void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE); + const void* asrc = __builtin_assume_aligned(src, MI_INTPTR_SIZE); + _mi_memcpy(adst, asrc, n); +} + +static inline void _mi_memzero_aligned(void* dst, size_t n) { + mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0); + void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE); + _mi_memzero(adst, n); +} +#else +// Default fallback on `_mi_memcpy` +static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) { + mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0)); + _mi_memcpy(dst, src, n); +} + +static inline void _mi_memzero_aligned(void* dst, size_t n) { + mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0); + _mi_memzero(dst, n); +} +#endif + + +#endif diff --git a/ww/managers/mimalloc/include/mimalloc/prim.h b/ww/managers/mimalloc/include/mimalloc/prim.h new file mode 100644 index 00000000..3f4574dd --- /dev/null +++ b/ww/managers/mimalloc/include/mimalloc/prim.h @@ -0,0 +1,373 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_PRIM_H +#define MIMALLOC_PRIM_H + + +// -------------------------------------------------------------------------- +// This file specifies the primitive portability API. +// Each OS/host needs to implement these primitives, see `src/prim` +// for implementations on Window, macOS, WASI, and Linux/Unix. +// +// note: on all primitive functions, we always have result parameters != NULL, and: +// addr != NULL and page aligned +// size > 0 and page aligned +// the return value is an error code as an `int` where 0 is success +// -------------------------------------------------------------------------- + +// OS memory configuration +typedef struct mi_os_mem_config_s { + size_t page_size; // default to 4KiB + size_t large_page_size; // 0 if not supported, usually 2MiB (4MiB on Windows) + size_t alloc_granularity; // smallest allocation size (usually 4KiB, on Windows 64KiB) + bool has_overcommit; // can we reserve more memory than can be actually committed? + bool has_partial_free; // can allocated blocks be freed partially? (true for mmap, false for VirtualAlloc) + bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory) +} mi_os_mem_config_t; + +// Initialize +void _mi_prim_mem_init( mi_os_mem_config_t* config ); + +// Free OS memory +int _mi_prim_free(void* addr, size_t size ); + +// Allocate OS memory. Return NULL on error. +// The `try_alignment` is just a hint and the returned pointer does not have to be aligned. +// If `commit` is false, the virtual memory range only needs to be reserved (with no access) +// which will later be committed explicitly using `_mi_prim_commit`. +// `is_zero` is set to true if the memory was zero initialized (as on most OS's) +// pre: !commit => !allow_large +// try_alignment >= _mi_os_page_size() and a power of 2 +int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr); + +// Commit memory. Returns error code or 0 on success. +// For example, on Linux this would make the memory PROT_READ|PROT_WRITE. +// `is_zero` is set to true if the memory was zero initialized (e.g. on Windows) +int _mi_prim_commit(void* addr, size_t size, bool* is_zero); + +// Decommit memory. Returns error code or 0 on success. The `needs_recommit` result is true +// if the memory would need to be re-committed. For example, on Windows this is always true, +// but on Linux we could use MADV_DONTNEED to decommit which does not need a recommit. +// pre: needs_recommit != NULL +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit); + +// Reset memory. The range keeps being accessible but the content might be reset. +// Returns error code or 0 on success. +int _mi_prim_reset(void* addr, size_t size); + +// Protect memory. Returns error code or 0 on success. +int _mi_prim_protect(void* addr, size_t size, bool protect); + +// Allocate huge (1GiB) pages possibly associated with a NUMA node. +// `is_zero` is set to true if the memory was zero initialized (as on most OS's) +// pre: size > 0 and a multiple of 1GiB. +// numa_node is either negative (don't care), or a numa node number. +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr); + +// Return the current NUMA node +size_t _mi_prim_numa_node(void); + +// Return the number of logical NUMA nodes +size_t _mi_prim_numa_node_count(void); + +// Clock ticks +mi_msecs_t _mi_prim_clock_now(void); + +// Return process information (only for statistics) +typedef struct mi_process_info_s { + mi_msecs_t elapsed; + mi_msecs_t utime; + mi_msecs_t stime; + size_t current_rss; + size_t peak_rss; + size_t current_commit; + size_t peak_commit; + size_t page_faults; +} mi_process_info_t; + +void _mi_prim_process_info(mi_process_info_t* pinfo); + +// Default stderr output. (only for warnings etc. with verbose enabled) +// msg != NULL && _mi_strlen(msg) > 0 +void _mi_prim_out_stderr( const char* msg ); + +// Get an environment variable. (only for options) +// name != NULL, result != NULL, result_size >= 64 +bool _mi_prim_getenv(const char* name, char* result, size_t result_size); + + +// Fill a buffer with strong randomness; return `false` on error or if +// there is no strong randomization available. +bool _mi_prim_random_buf(void* buf, size_t buf_len); + +// Called on the first thread start, and should ensure `_mi_thread_done` is called on thread termination. +void _mi_prim_thread_init_auto_done(void); + +// Called on process exit and may take action to clean up resources associated with the thread auto done. +void _mi_prim_thread_done_auto_done(void); + +// Called when the default heap for a thread changes +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap); + + +//------------------------------------------------------------------- +// Thread id: `_mi_prim_thread_id()` +// +// Getting the thread id should be performant as it is called in the +// fast path of `_mi_free` and we specialize for various platforms as +// inlined definitions. Regular code should call `init.c:_mi_thread_id()`. +// We only require _mi_prim_thread_id() to return a unique id +// for each thread (unequal to zero). +//------------------------------------------------------------------- + +// On some libc + platform combinations we can directly access a thread-local storage (TLS) slot. +// The TLS layout depends on both the OS and libc implementation so we use specific tests for each main platform. +// If you test on another platform and it works please send a PR :-) +// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register. +// +// Note: we would like to prefer `__builtin_thread_pointer()` nowadays instead of using assembly, +// but unfortunately we can not detect support reliably (see issue #883) +// We also use it on Apple OS as we use a TLS slot for the default heap there. +#if defined(__GNUC__) && ( \ + (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ + || (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__) || defined(__POWERPC__))) \ + || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \ + || (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ + || (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \ + ) + +#define MI_HAS_TLS_SLOT + +static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept { + void* res; + const size_t ofs = (slot*sizeof(void*)); + #if defined(__i386__) + __asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS + #elif defined(__APPLE__) && defined(__x86_64__) + __asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS + #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4) + __asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI + #elif defined(__x86_64__) + __asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS + #elif defined(__arm__) + void** tcb; MI_UNUSED(ofs); + __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb)); + res = tcb[slot]; + #elif defined(__aarch64__) + void** tcb; MI_UNUSED(ofs); + #if defined(__APPLE__) // M1, issue #343 + __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb)); + #else + __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); + #endif + res = tcb[slot]; + #elif defined(__APPLE__) && defined(__POWERPC__) // ppc, issue #781 + MI_UNUSED(ofs); + res = pthread_getspecific(slot); + #endif + return res; +} + +// setting a tls slot is only used on macOS for now +static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept { + const size_t ofs = (slot*sizeof(void*)); + #if defined(__i386__) + __asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS + #elif defined(__APPLE__) && defined(__x86_64__) + __asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS + #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4) + __asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI + #elif defined(__x86_64__) + __asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS + #elif defined(__arm__) + void** tcb; MI_UNUSED(ofs); + __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb)); + tcb[slot] = value; + #elif defined(__aarch64__) + void** tcb; MI_UNUSED(ofs); + #if defined(__APPLE__) // M1, issue #343 + __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb)); + #else + __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb)); + #endif + tcb[slot] = value; + #elif defined(__APPLE__) && defined(__POWERPC__) // ppc, issue #781 + MI_UNUSED(ofs); + pthread_setspecific(slot, value); + #endif +} + +#endif + +// Do we have __builtin_thread_pointer? This would be the preferred way to get a unique thread id +// but unfortunately, it seems we cannot test for this reliably at this time (see issue #883) +// Nevertheless, it seems needed on older graviton platforms (see issue #851). +// For now, we only enable this for specific platforms. +#if !defined(__APPLE__) /* on apple (M1) the wrong register is read (tpidr_el0 instead of tpidrro_el0) so fall back to TLS slot assembly ()*/ \ + && !defined(MI_LIBC_MUSL) \ + && (!defined(__clang_major__) || __clang_major__ >= 14) /* older clang versions emit bad code; fall back to using the TLS slot () */ + #if (defined(__GNUC__) && (__GNUC__ >= 7) && defined(__aarch64__)) /* aarch64 for older gcc versions (issue #851) */ \ + || (defined(__GNUC__) && (__GNUC__ >= 11) && defined(__x86_64__)) \ + || (defined(__clang_major__) && (__clang_major__ >= 14) && (defined(__aarch64__) || defined(__x86_64__))) + #define MI_USE_BUILTIN_THREAD_POINTER 1 + #endif +#endif + + + +// defined in `init.c`; do not use these directly +extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from +extern bool _mi_process_is_initialized; // has mi_process_init been called? + +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept; + +// Get a unique id for the current thread. +#if defined(MI_PRIM_THREAD_ID) + +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + return MI_PRIM_THREAD_ID(); // used for example by CPython for a free threaded build (see python/cpython#115488) +} + +#elif defined(_WIN32) + +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + // Windows: works on Intel and ARM in both 32- and 64-bit + return (uintptr_t)NtCurrentTeb(); +} + +#elif MI_USE_BUILTIN_THREAD_POINTER + +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + // Works on most Unix based platforms with recent compilers + return (uintptr_t)__builtin_thread_pointer(); +} + +#elif defined(MI_HAS_TLS_SLOT) + +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + #if defined(__BIONIC__) + // issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id + // see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86 + return (uintptr_t)mi_prim_tls_slot(1); + #else + // in all our other targets, slot 0 is the thread id + // glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h + // apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36 + return (uintptr_t)mi_prim_tls_slot(0); + #endif +} + +#else + +// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms). +static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { + return (uintptr_t)&_mi_heap_default; +} + +#endif + + + +/* ---------------------------------------------------------------------------------------- +The thread local default heap: `_mi_prim_get_default_heap()` +This is inlined here as it is on the fast path for allocation functions. + +On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a +__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures +that the storage will always be available (allocated on the thread stacks). + +On some platforms though we cannot use that when overriding `malloc` since the underlying +TLS implementation (or the loader) will call itself `malloc` on a first access and recurse. +We try to circumvent this in an efficient way: +- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the + loader itself calls `malloc` even before the modules are initialized. +- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS). +- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323) +------------------------------------------------------------------------------------------- */ + +static inline mi_heap_t* mi_prim_get_default_heap(void); + +#if defined(MI_MALLOC_OVERRIDE) +#if defined(__APPLE__) // macOS + #define MI_TLS_SLOT 89 // seems unused? + // other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89) + // see +#elif defined(__OpenBSD__) + // use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16) + // see + #define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24) + // #elif defined(__DragonFly__) + // #warning "mimalloc is not working correctly on DragonFly yet." + // #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) +#elif defined(__ANDROID__) + // See issue #381 + #define MI_TLS_PTHREAD +#endif +#endif + + +#if defined(MI_TLS_SLOT) +# if !defined(MI_HAS_TLS_SLOT) +# error "trying to use a TLS slot for the default heap, but the mi_prim_tls_slot primitives are not defined" +# endif + +static inline mi_heap_t* mi_prim_get_default_heap(void) { + mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT); + if mi_unlikely(heap == NULL) { + #ifdef __GNUC__ + __asm(""); // prevent conditional load of the address of _mi_heap_empty + #endif + heap = (mi_heap_t*)&_mi_heap_empty; + } + return heap; +} + +#elif defined(MI_TLS_PTHREAD_SLOT_OFS) + +static inline mi_heap_t** mi_prim_tls_pthread_heap_slot(void) { + pthread_t self = pthread_self(); + #if defined(__DragonFly__) + if (self==NULL) return NULL; + #endif + return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS); +} + +static inline mi_heap_t* mi_prim_get_default_heap(void) { + mi_heap_t** pheap = mi_prim_tls_pthread_heap_slot(); + if mi_unlikely(pheap == NULL) return _mi_heap_main_get(); + mi_heap_t* heap = *pheap; + if mi_unlikely(heap == NULL) return (mi_heap_t*)&_mi_heap_empty; + return heap; +} + +#elif defined(MI_TLS_PTHREAD) + +extern pthread_key_t _mi_heap_default_key; +static inline mi_heap_t* mi_prim_get_default_heap(void) { + mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key)); + return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap); +} + +#else // default using a thread local variable; used on most platforms. + +static inline mi_heap_t* mi_prim_get_default_heap(void) { + #if defined(MI_TLS_RECURSE_GUARD) + if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get(); + #endif + return _mi_heap_default; +} + +#endif // mi_prim_get_default_heap() + + + +#endif // MIMALLOC_PRIM_H diff --git a/ww/managers/mimalloc/include/mimalloc/track.h b/ww/managers/mimalloc/include/mimalloc/track.h new file mode 100644 index 00000000..a659d940 --- /dev/null +++ b/ww/managers/mimalloc/include/mimalloc/track.h @@ -0,0 +1,149 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_TRACK_H +#define MIMALLOC_TRACK_H + +/* ------------------------------------------------------------------------------------------------------ +Track memory ranges with macros for tools like Valgrind address sanitizer, or other memory checkers. +These can be defined for tracking allocation: + + #define mi_track_malloc_size(p,reqsize,size,zero) + #define mi_track_free_size(p,_size) + +The macros are set up such that the size passed to `mi_track_free_size` +always matches the size of `mi_track_malloc_size`. (currently, `size == mi_usable_size(p)`). +The `reqsize` is what the user requested, and `size >= reqsize`. +The `size` is either byte precise (and `size==reqsize`) if `MI_PADDING` is enabled, +or otherwise it is the usable block size which may be larger than the original request. +Use `_mi_block_size_of(void* p)` to get the full block size that was allocated (including padding etc). +The `zero` parameter is `true` if the allocated block is zero initialized. + +Optional: + + #define mi_track_align(p,alignedp,offset,size) + #define mi_track_resize(p,oldsize,newsize) + #define mi_track_init() + +The `mi_track_align` is called right after a `mi_track_malloc` for aligned pointers in a block. +The corresponding `mi_track_free` still uses the block start pointer and original size (corresponding to the `mi_track_malloc`). +The `mi_track_resize` is currently unused but could be called on reallocations within a block. +`mi_track_init` is called at program start. + +The following macros are for tools like asan and valgrind to track whether memory is +defined, undefined, or not accessible at all: + + #define mi_track_mem_defined(p,size) + #define mi_track_mem_undefined(p,size) + #define mi_track_mem_noaccess(p,size) + +-------------------------------------------------------------------------------------------------------*/ + +#if MI_TRACK_VALGRIND +// valgrind tool + +#define MI_TRACK_ENABLED 1 +#define MI_TRACK_HEAP_DESTROY 1 // track free of individual blocks on heap_destroy +#define MI_TRACK_TOOL "valgrind" + +#include +#include + +#define mi_track_malloc_size(p,reqsize,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero) +#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/) +#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/) +#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size) +#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size) +#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size) + +#elif MI_TRACK_ASAN +// address sanitizer + +#define MI_TRACK_ENABLED 1 +#define MI_TRACK_HEAP_DESTROY 0 +#define MI_TRACK_TOOL "asan" + +#include + +#define mi_track_malloc_size(p,reqsize,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size) +#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size) +#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) +#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size) +#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size) + +#elif MI_TRACK_ETW +// windows event tracing + +#define MI_TRACK_ENABLED 1 +#define MI_TRACK_HEAP_DESTROY 1 +#define MI_TRACK_TOOL "ETW" + +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#include "../src/prim/windows/etw.h" + +#define mi_track_init() EventRegistermicrosoft_windows_mimalloc(); +#define mi_track_malloc_size(p,reqsize,size,zero) EventWriteETW_MI_ALLOC((UINT64)(p), size) +#define mi_track_free_size(p,size) EventWriteETW_MI_FREE((UINT64)(p), size) + +#else +// no tracking + +#define MI_TRACK_ENABLED 0 +#define MI_TRACK_HEAP_DESTROY 0 +#define MI_TRACK_TOOL "none" + +#define mi_track_malloc_size(p,reqsize,size,zero) +#define mi_track_free_size(p,_size) + +#endif + +// ------------------- +// Utility definitions + +#ifndef mi_track_resize +#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false) +#endif + +#ifndef mi_track_align +#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset) +#endif + +#ifndef mi_track_init +#define mi_track_init() +#endif + +#ifndef mi_track_mem_defined +#define mi_track_mem_defined(p,size) +#endif + +#ifndef mi_track_mem_undefined +#define mi_track_mem_undefined(p,size) +#endif + +#ifndef mi_track_mem_noaccess +#define mi_track_mem_noaccess(p,size) +#endif + + +#if MI_PADDING +#define mi_track_malloc(p,reqsize,zero) \ + if ((p)!=NULL) { \ + mi_assert_internal(mi_usable_size(p)==(reqsize)); \ + mi_track_malloc_size(p,reqsize,reqsize,zero); \ + } +#else +#define mi_track_malloc(p,reqsize,zero) \ + if ((p)!=NULL) { \ + mi_assert_internal(mi_usable_size(p)>=(reqsize)); \ + mi_track_malloc_size(p,reqsize,mi_usable_size(p),zero); \ + } +#endif + +#endif diff --git a/ww/managers/mimalloc/include/mimalloc/types.h b/ww/managers/mimalloc/include/mimalloc/types.h new file mode 100644 index 00000000..2fdde904 --- /dev/null +++ b/ww/managers/mimalloc/include/mimalloc/types.h @@ -0,0 +1,705 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#pragma once +#ifndef MIMALLOC_TYPES_H +#define MIMALLOC_TYPES_H + +// -------------------------------------------------------------------------- +// This file contains the main type definitions for mimalloc: +// mi_heap_t : all data for a thread-local heap, contains +// lists of all managed heap pages. +// mi_segment_t : a larger chunk of memory (32GiB) from where pages +// are allocated. A segment is divided in slices (64KiB) from +// which pages are allocated. +// mi_page_t : a "mimalloc" page (usually 64KiB or 512KiB) from +// where objects are allocated. +// Note: we write "OS page" for OS memory pages while +// using plain "page" for mimalloc pages (`mi_page_t`). +// -------------------------------------------------------------------------- + + +#include // ptrdiff_t +#include // uintptr_t, uint16_t, etc +#include "atomic.h" // _Atomic + +#ifdef _MSC_VER +#pragma warning(disable:4214) // bitfield is not int +#endif + +// Minimal alignment necessary. On most platforms 16 bytes are needed +// due to SSE registers for example. This must be at least `sizeof(void*)` +#ifndef MI_MAX_ALIGN_SIZE +#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t) +#endif + +// ------------------------------------------------------ +// Variants +// ------------------------------------------------------ + +// Define NDEBUG in the release version to disable assertions. +// #define NDEBUG + +// Define MI_TRACK_ to enable tracking support +// #define MI_TRACK_VALGRIND 1 +// #define MI_TRACK_ASAN 1 +// #define MI_TRACK_ETW 1 + +// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance). +// #define MI_STAT 1 + +// Define MI_SECURE to enable security mitigations +// #define MI_SECURE 1 // guard page around metadata +// #define MI_SECURE 2 // guard page around each mimalloc page +// #define MI_SECURE 3 // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free) +// #define MI_SECURE 4 // checks for double free. (may be more expensive) + +#if !defined(MI_SECURE) +#define MI_SECURE 0 +#endif + +// Define MI_DEBUG for debug mode +// #define MI_DEBUG 1 // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free. +// #define MI_DEBUG 2 // + internal assertion checks +// #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON) +#if !defined(MI_DEBUG) +#if !defined(NDEBUG) || defined(_DEBUG) +#define MI_DEBUG 2 +#else +#define MI_DEBUG 0 +#endif +#endif + +// Reserve extra padding at the end of each block to be more resilient against heap block overflows. +// The padding can detect buffer overflow on free. +#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW)) +#define MI_PADDING 1 +#endif + +// Check padding bytes; allows byte-precise buffer overflow detection +#if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1) +#define MI_PADDING_CHECK 1 +#endif + + +// Encoded free lists allow detection of corrupted free lists +// and can detect buffer overflows, modify after free, and double `free`s. +#if (MI_SECURE>=3 || MI_DEBUG>=1) +#define MI_ENCODE_FREELIST 1 +#endif + + +// We used to abandon huge pages in order to eagerly deallocate it if freed from another thread. +// Unfortunately, that makes it not possible to visit them during a heap walk or include them in a +// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks nowadays if freed from +// another thread so the memory becomes "virtually" available (and eventually gets properly freed by +// the owning thread). +// #define MI_HUGE_PAGE_ABANDON 1 + + +// ------------------------------------------------------ +// Platform specific values +// ------------------------------------------------------ + +// ------------------------------------------------------ +// Size of a pointer. +// We assume that `sizeof(void*)==sizeof(intptr_t)` +// and it holds for all platforms we know of. +// +// However, the C standard only requires that: +// p == (void*)((intptr_t)p)) +// but we also need: +// i == (intptr_t)((void*)i) +// or otherwise one might define an intptr_t type that is larger than a pointer... +// ------------------------------------------------------ + +#if INTPTR_MAX > INT64_MAX +# define MI_INTPTR_SHIFT (4) // assume 128-bit (as on arm CHERI for example) +#elif INTPTR_MAX == INT64_MAX +# define MI_INTPTR_SHIFT (3) +#elif INTPTR_MAX == INT32_MAX +# define MI_INTPTR_SHIFT (2) +#else +#error platform pointers must be 32, 64, or 128 bits +#endif + +#if SIZE_MAX == UINT64_MAX +# define MI_SIZE_SHIFT (3) +typedef int64_t mi_ssize_t; +#elif SIZE_MAX == UINT32_MAX +# define MI_SIZE_SHIFT (2) +typedef int32_t mi_ssize_t; +#else +#error platform objects must be 32 or 64 bits +#endif + +#if (SIZE_MAX/2) > LONG_MAX +# define MI_ZU(x) x##ULL +# define MI_ZI(x) x##LL +#else +# define MI_ZU(x) x##UL +# define MI_ZI(x) x##L +#endif + +#define MI_INTPTR_SIZE (1< 4 +#define MI_SEGMENT_SHIFT ( 9 + MI_SEGMENT_SLICE_SHIFT) // 32MiB +#else +#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit +#endif +#endif + +#ifndef MI_SMALL_PAGE_SHIFT +#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB +#endif +#ifndef MI_MEDIUM_PAGE_SHIFT +#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB +#endif + +// Derived constants +#define MI_SEGMENT_SIZE (MI_ZU(1)<= 655360) +#error "mimalloc internal: define more bins" +#endif + +// Maximum block size for which blocks are guaranteed to be block size aligned. (see `segment.c:_mi_segment_page_start`) +#define MI_MAX_ALIGN_GUARANTEE (MI_MEDIUM_OBJ_SIZE_MAX) + +// Alignments over MI_BLOCK_ALIGNMENT_MAX are allocated in dedicated huge page segments +#define MI_BLOCK_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1) + +// Maximum slice count (255) for which we can find the page for interior pointers +#define MI_MAX_SLICE_OFFSET_COUNT ((MI_BLOCK_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1) + +// we never allocate more than PTRDIFF_MAX (see also ) +// on 64-bit+ systems we also limit the maximum allocation size such that the slice count fits in 32-bits. (issue #877) +#if (PTRDIFF_MAX > INT32_MAX) && (PTRDIFF_MAX >= (MI_SEGMENT_SLIZE_SIZE * UINT32_MAX)) +#define MI_MAX_ALLOC_SIZE (MI_SEGMENT_SLICE_SIZE * (UINT32_MAX-1)) +#else +#define MI_MAX_ALLOC_SIZE PTRDIFF_MAX +#endif + + +// ------------------------------------------------------ +// Mimalloc pages contain allocated blocks +// ------------------------------------------------------ + +// The free lists use encoded next fields +// (Only actually encodes when MI_ENCODED_FREELIST is defined.) +typedef uintptr_t mi_encoded_t; + +// thread id's +typedef size_t mi_threadid_t; + +// free lists contain blocks +typedef struct mi_block_s { + mi_encoded_t next; +} mi_block_t; + + +// The delayed flags are used for efficient multi-threaded free-ing +typedef enum mi_delayed_e { + MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list + MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap + MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list + MI_NEVER_DELAYED_FREE = 3 // sticky: used for abondoned pages without a owning heap; this only resets on page reclaim +} mi_delayed_t; + + +// The `in_full` and `has_aligned` page flags are put in a union to efficiently +// test if both are false (`full_aligned == 0`) in the `mi_free` routine. +#if !MI_TSAN +typedef union mi_page_flags_s { + uint8_t full_aligned; + struct { + uint8_t in_full : 1; + uint8_t has_aligned : 1; + } x; +} mi_page_flags_t; +#else +// under thread sanitizer, use a byte for each flag to suppress warning, issue #130 +typedef union mi_page_flags_s { + uint16_t full_aligned; + struct { + uint8_t in_full; + uint8_t has_aligned; + } x; +} mi_page_flags_t; +#endif + +// Thread free list. +// We use the bottom 2 bits of the pointer for mi_delayed_t flags +typedef uintptr_t mi_thread_free_t; + +// A page contains blocks of one specific size (`block_size`). +// Each page has three list of free blocks: +// `free` for blocks that can be allocated, +// `local_free` for freed blocks that are not yet available to `mi_malloc` +// `thread_free` for freed blocks by other threads +// The `local_free` and `thread_free` lists are migrated to the `free` list +// when it is exhausted. The separate `local_free` list is necessary to +// implement a monotonic heartbeat. The `thread_free` list is needed for +// avoiding atomic operations in the common case. +// +// `used - |thread_free|` == actual blocks that are in use (alive) +// `used - |thread_free| + |free| + |local_free| == capacity` +// +// We don't count `freed` (as |free|) but use `used` to reduce +// the number of memory accesses in the `mi_page_all_free` function(s). +// +// Notes: +// - Access is optimized for `free.c:mi_free` and `alloc.c:mi_page_alloc` +// - Using `uint16_t` does not seem to slow things down +// - The size is 12 words on 64-bit which helps the page index calculations +// (and 14 words on 32-bit, and encoded free lists add 2 words) +// - `xthread_free` uses the bottom bits as a delayed-free flags to optimize +// concurrent frees where only the first concurrent free adds to the owning +// heap `thread_delayed_free` list (see `free.c:mi_free_block_mt`). +// The invariant is that no-delayed-free is only set if there is +// at least one block that will be added, or as already been added, to +// the owning heap `thread_delayed_free` list. This guarantees that pages +// will be freed correctly even if only other threads free blocks. +typedef struct mi_page_s { + // "owned" by the segment + uint32_t slice_count; // slices in this page (0 if not a page) + uint32_t slice_offset; // distance from the actual page data slice (0 if a page) + uint8_t is_committed:1; // `true` if the page virtual memory is committed + uint8_t is_zero_init:1; // `true` if the page was initially zero initialized + uint8_t is_huge:1; // `true` if the page is in a huge segment (`segment->kind == MI_SEGMENT_HUGE`) + // padding + // layout like this to optimize access in `mi_malloc` and `mi_free` + uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear` + uint16_t reserved; // number of blocks reserved in memory + mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits) + uint8_t free_is_zero:1; // `true` if the blocks in the free list are zero initialized + uint8_t retire_expire:7; // expiration count for retired blocks + + mi_block_t* free; // list of available free blocks (`malloc` allocates from this list) + mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`) + uint16_t used; // number of blocks in use (including blocks in `thread_free`) + uint8_t block_size_shift; // if not zero, then `(1 << block_size_shift) == block_size` (only used for fast path in `free.c:_mi_page_ptr_unalign`) + uint8_t heap_tag; // tag of the owning heap, used for separated heaps by object type + // padding + size_t block_size; // size available in each block (always `>0`) + uint8_t* page_start; // start of the page area containing the blocks + + #if (MI_ENCODE_FREELIST || MI_PADDING) + uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary + #endif + + _Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads + _Atomic(uintptr_t) xheap; + + struct mi_page_s* next; // next page owned by this thread with the same `block_size` + struct mi_page_s* prev; // previous page owned by this thread with the same `block_size` + + // 64-bit 11 words, 32-bit 13 words, (+2 for secure) + void* padding[1]; +} mi_page_t; + + + +// ------------------------------------------------------ +// Mimalloc segments contain mimalloc pages +// ------------------------------------------------------ + +typedef enum mi_page_kind_e { + MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment + MI_PAGE_MEDIUM, // medium blocks go into 512KiB pages inside a segment + MI_PAGE_LARGE, // larger blocks go into a single page spanning a whole segment + MI_PAGE_HUGE // a huge page is a single page in a segment of variable size + // used for blocks `> MI_LARGE_OBJ_SIZE_MAX` or an aligment `> MI_BLOCK_ALIGNMENT_MAX`. +} mi_page_kind_t; + +typedef enum mi_segment_kind_e { + MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside. + MI_SEGMENT_HUGE, // segment with just one huge page inside. +} mi_segment_kind_t; + +// ------------------------------------------------------ +// A segment holds a commit mask where a bit is set if +// the corresponding MI_COMMIT_SIZE area is committed. +// The MI_COMMIT_SIZE must be a multiple of the slice +// size. If it is equal we have the most fine grained +// decommit (but setting it higher can be more efficient). +// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will +// be committed in one go which can be set higher than +// MI_COMMIT_SIZE for efficiency (while the decommit mask +// is still tracked in fine-grained MI_COMMIT_SIZE chunks) +// ------------------------------------------------------ + +#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE) +#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB +#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE) +#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS +#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS) + +#if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS)) +#error "the segment size must be exactly divisible by the (commit size * size_t bits)" +#endif + +typedef struct mi_commit_mask_s { + size_t mask[MI_COMMIT_MASK_FIELD_COUNT]; +} mi_commit_mask_t; + +typedef mi_page_t mi_slice_t; +typedef int64_t mi_msecs_t; + + +// --------------------------------------------------------------- +// a memory id tracks the provenance of arena/OS allocated memory +// --------------------------------------------------------------- + +// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this. +typedef enum mi_memkind_e { + MI_MEM_NONE, // not allocated + MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example) + MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example) + MI_MEM_OS, // allocated from the OS + MI_MEM_OS_HUGE, // allocated as huge OS pages (usually 1GiB, pinned to physical memory) + MI_MEM_OS_REMAP, // allocated in a remapable area (i.e. using `mremap`) + MI_MEM_ARENA // allocated from an arena (the usual case) +} mi_memkind_t; + +static inline bool mi_memkind_is_os(mi_memkind_t memkind) { + return (memkind >= MI_MEM_OS && memkind <= MI_MEM_OS_REMAP); +} + +typedef struct mi_memid_os_info { + void* base; // actual base address of the block (used for offset aligned allocations) + size_t alignment; // alignment at allocation +} mi_memid_os_info_t; + +typedef struct mi_memid_arena_info { + size_t block_index; // index in the arena + mi_arena_id_t id; // arena id (>= 1) + bool is_exclusive; // this arena can only be used for specific arena allocations +} mi_memid_arena_info_t; + +typedef struct mi_memid_s { + union { + mi_memid_os_info_t os; // only used for MI_MEM_OS + mi_memid_arena_info_t arena; // only used for MI_MEM_ARENA + } mem; + bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large (2Mib) or huge (1GiB) OS pages) + bool initially_committed;// `true` if the memory was originally allocated as committed + bool initially_zero; // `true` if the memory was originally zero initialized + mi_memkind_t memkind; +} mi_memid_t; + + +// ----------------------------------------------------------------------------------------- +// Segments are large allocated memory blocks (8mb on 64 bit) from arenas or the OS. +// +// Inside segments we allocated fixed size mimalloc pages (`mi_page_t`) that contain blocks. +// The start of a segment is this structure with a fixed number of slice entries (`slices`) +// usually followed by a guard OS page and the actual allocation area with pages. +// While a page is not allocated, we view it's data as a `mi_slice_t` (instead of a `mi_page_t`). +// Of any free area, the first slice has the info and `slice_offset == 0`; for any subsequent +// slices part of the area, the `slice_offset` is the byte offset back to the first slice +// (so we can quickly find the page info on a free, `internal.h:_mi_segment_page_of`). +// For slices, the `block_size` field is repurposed to signify if a slice is used (`1`) or not (`0`). +// Small and medium pages use a fixed amount of slices to reduce slice fragmentation, while +// large and huge pages span a variable amount of slices. +typedef struct mi_segment_s { + // constant fields + mi_memid_t memid; // memory id for arena/OS allocation + bool allow_decommit; // can we decommmit the memory + bool allow_purge; // can we purge the memory (reset or decommit) + size_t segment_size; + + // segment fields + mi_msecs_t purge_expire; // purge slices in the `purge_mask` after this time + mi_commit_mask_t purge_mask; // slices that can be purged + mi_commit_mask_t commit_mask; // slices that are currently committed + + // from here is zero initialized + struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`) + bool was_reclaimed; // true if it was reclaimed (used to limit on-free reclamation) + + size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`) + size_t abandoned_visits; // count how often this segment is visited during abondoned reclamation (to force reclaim if it takes too long) + size_t used; // count of pages in use + uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie` + + size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT` + size_t segment_info_slices; // initial count of slices that we are using for segment info and possible guard pages. + + // layout like this to optimize access in `mi_free` + mi_segment_kind_t kind; + size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT` + _Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment + + mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one extra final entry for huge blocks with large alignment +} mi_segment_t; + + +// ------------------------------------------------------ +// Heaps +// Provide first-class heaps to allocate from. +// A heap just owns a set of pages for allocation and +// can only be allocate/reallocate from the thread that created it. +// Freeing blocks can be done from any thread though. +// Per thread, the segments are shared among its heaps. +// Per thread, there is always a default heap that is +// used for allocation; it is initialized to statically +// point to an empty heap to avoid initialization checks +// in the fast path. +// ------------------------------------------------------ + +// Thread local data +typedef struct mi_tld_s mi_tld_t; + +// Pages of a certain block size are held in a queue. +typedef struct mi_page_queue_s { + mi_page_t* first; + mi_page_t* last; + size_t block_size; +} mi_page_queue_t; + +#define MI_BIN_FULL (MI_BIN_HUGE+1) + +// Random context +typedef struct mi_random_cxt_s { + uint32_t input[16]; + uint32_t output[16]; + int output_available; + bool weak; +} mi_random_ctx_t; + + +// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows +#if (MI_PADDING) +typedef struct mi_padding_s { + uint32_t canary; // encoded block value to check validity of the padding (in case of overflow) + uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes) +} mi_padding_t; +#define MI_PADDING_SIZE (sizeof(mi_padding_t)) +#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE) +#else +#define MI_PADDING_SIZE 0 +#define MI_PADDING_WSIZE 0 +#endif + +#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1) + + +// A heap owns a set of pages. +struct mi_heap_s { + mi_tld_t* tld; + _Atomic(mi_block_t*) thread_delayed_free; + mi_threadid_t thread_id; // thread this heap belongs too + mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0) + uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`) + uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list + mi_random_ctx_t random; // random number context used for secure allocation + size_t page_count; // total number of pages in the `pages` queues. + size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues) + size_t page_retired_max; // largest retired index into the `pages` array. + mi_heap_t* next; // list of heaps per thread + bool no_reclaim; // `true` if this heap should not reclaim abandoned pages + uint8_t tag; // custom tag, can be used for separating heaps based on the object types + mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size. + mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin") +}; + + + +// ------------------------------------------------------ +// Debug +// ------------------------------------------------------ + +#if !defined(MI_DEBUG_UNINIT) +#define MI_DEBUG_UNINIT (0xD0) +#endif +#if !defined(MI_DEBUG_FREED) +#define MI_DEBUG_FREED (0xDF) +#endif +#if !defined(MI_DEBUG_PADDING) +#define MI_DEBUG_PADDING (0xDE) +#endif + +#if (MI_DEBUG) +// use our own assertion to print without memory allocation +void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func ); +#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__)) +#else +#define mi_assert(x) +#endif + +#if (MI_DEBUG>1) +#define mi_assert_internal mi_assert +#else +#define mi_assert_internal(x) +#endif + +#if (MI_DEBUG>2) +#define mi_assert_expensive mi_assert +#else +#define mi_assert_expensive(x) +#endif + +// ------------------------------------------------------ +// Statistics +// ------------------------------------------------------ + +#ifndef MI_STAT +#if (MI_DEBUG>0) +#define MI_STAT 2 +#else +#define MI_STAT 0 +#endif +#endif + +typedef struct mi_stat_count_s { + int64_t allocated; + int64_t freed; + int64_t peak; + int64_t current; +} mi_stat_count_t; + +typedef struct mi_stat_counter_s { + int64_t total; + int64_t count; +} mi_stat_counter_t; + +typedef struct mi_stats_s { + mi_stat_count_t segments; + mi_stat_count_t pages; + mi_stat_count_t reserved; + mi_stat_count_t committed; + mi_stat_count_t reset; + mi_stat_count_t purged; + mi_stat_count_t page_committed; + mi_stat_count_t segments_abandoned; + mi_stat_count_t pages_abandoned; + mi_stat_count_t threads; + mi_stat_count_t normal; + mi_stat_count_t huge; + mi_stat_count_t large; + mi_stat_count_t malloc; + mi_stat_count_t segments_cache; + mi_stat_counter_t pages_extended; + mi_stat_counter_t mmap_calls; + mi_stat_counter_t commit_calls; + mi_stat_counter_t reset_calls; + mi_stat_counter_t purge_calls; + mi_stat_counter_t page_no_retire; + mi_stat_counter_t searches; + mi_stat_counter_t normal_count; + mi_stat_counter_t huge_count; + mi_stat_counter_t large_count; + mi_stat_counter_t arena_count; + mi_stat_counter_t arena_crossover_count; + mi_stat_counter_t arena_rollback_count; +#if MI_STAT>1 + mi_stat_count_t normal_bins[MI_BIN_HUGE+1]; +#endif +} mi_stats_t; + + +void _mi_stat_increase(mi_stat_count_t* stat, size_t amount); +void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount); +void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount); + +#if (MI_STAT) +#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount) +#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount) +#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount) +#else +#define mi_stat_increase(stat,amount) (void)0 +#define mi_stat_decrease(stat,amount) (void)0 +#define mi_stat_counter_increase(stat,amount) (void)0 +#endif + +#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount) +#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount) +#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount) + + +// ------------------------------------------------------ +// Thread Local data +// ------------------------------------------------------ + +// A "span" is is an available range of slices. The span queues keep +// track of slice spans of at most the given `slice_count` (but more than the previous size class). +typedef struct mi_span_queue_s { + mi_slice_t* first; + mi_slice_t* last; + size_t slice_count; +} mi_span_queue_t; + +#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT) + +// OS thread local data +typedef struct mi_os_tld_s { + size_t region_idx; // start point for next allocation + mi_stats_t* stats; // points to tld stats +} mi_os_tld_t; + + +// Segments thread local data +typedef struct mi_segments_tld_s { + mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments + size_t count; // current number of segments; + size_t peak_count; // peak number of segments + size_t current_size; // current size of all segments + size_t peak_size; // peak size of all segments + size_t reclaim_count;// number of reclaimed (abandoned) segments + mi_stats_t* stats; // points to tld stats + mi_os_tld_t* os; // points to os stats +} mi_segments_tld_t; + +// Thread local data +struct mi_tld_s { + unsigned long long heartbeat; // monotonic heartbeat count + bool recurse; // true if deferred was called; used to prevent infinite recursion. + mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted) + mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates) + mi_segments_tld_t segments; // segment tld + mi_os_tld_t os; // os tld + mi_stats_t stats; // statistics +}; + +#endif diff --git a/ww/managers/mimalloc/mimalloc.pc.in b/ww/managers/mimalloc/mimalloc.pc.in new file mode 100644 index 00000000..36da2038 --- /dev/null +++ b/ww/managers/mimalloc/mimalloc.pc.in @@ -0,0 +1,11 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +libdir=@libdir_for_pc_file@ +includedir=@includedir_for_pc_file@ + +Name: @PROJECT_NAME@ +Description: A compact general purpose allocator with excellent performance +Version: @PACKAGE_VERSION@ +URL: https://github.com/microsoft/mimalloc/ +Libs: -L${libdir} -lmimalloc +Libs.private: @pc_libraries@ +Cflags: -I${includedir} diff --git a/ww/managers/mimalloc/readme.md b/ww/managers/mimalloc/readme.md new file mode 100644 index 00000000..a0296b43 --- /dev/null +++ b/ww/managers/mimalloc/readme.md @@ -0,0 +1,862 @@ + + + +[](https://dev.azure.com/Daan0324/mimalloc/_build?definitionId=1&_a=summary) + +# mimalloc + +  + +mimalloc (pronounced "me-malloc") +is a general purpose allocator with excellent [performance](#performance) characteristics. +Initially developed by Daan Leijen for the runtime systems of the +[Koka](https://koka-lang.github.io) and [Lean](https://github.com/leanprover/lean) languages. + +Latest release tag: `v2.1.7` (2024-05-21). +Latest v1 tag: `v1.8.7` (2024-05-21). + +mimalloc is a drop-in replacement for `malloc` and can be used in other programs +without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as: +``` +> LD_PRELOAD=/usr/lib/libmimalloc.so myprogram +``` +It also includes a robust way to override the default allocator in [Windows](#override_on_windows). Notable aspects of the design include: + +- __small and consistent__: the library is about 8k LOC using simple and + consistent data structures. This makes it very suitable + to integrate and adapt in other projects. For runtime systems it + provides hooks for a monotonic _heartbeat_ and deferred freeing (for + bounded worst-case times with reference counting). + Partly due to its simplicity, mimalloc has been ported to many systems (Windows, macOS, + Linux, WASM, various BSD's, Haiku, MUSL, etc) and has excellent support for dynamic overriding. + At the same time, it is an industrial strength allocator that runs (very) large scale + distributed services on thousands of machines with excellent worst case latencies. +- __free list sharding__: instead of one big free list (per size class) we have + many smaller lists per "mimalloc page" which reduces fragmentation and + increases locality -- + things that are allocated close in time get allocated close in memory. + (A mimalloc page contains blocks of one size class and is usually 64KiB on a 64-bit system). +- __free list multi-sharding__: the big idea! Not only do we shard the free list + per mimalloc page, but for each page we have multiple free lists. In particular, there + is one list for thread-local `free` operations, and another one for concurrent `free` + operations. Free-ing from another thread can now be a single CAS without needing + sophisticated coordination between threads. Since there will be + thousands of separate free lists, contention is naturally distributed over the heap, + and the chance of contending on a single location will be low -- this is quite + similar to randomized algorithms like skip lists where adding + a random oracle removes the need for a more complex algorithm. +- __eager page purging__: when a "page" becomes empty (with increased chance + due to free list sharding) the memory is marked to the OS as unused (reset or decommitted) + reducing (real) memory pressure and fragmentation, especially in long running + programs. +- __secure__: _mimalloc_ can be built in secure mode, adding guard pages, + randomized allocation, encrypted free lists, etc. to protect against various + heap vulnerabilities. The performance penalty is usually around 10% on average + over our benchmarks. +- __first-class heaps__: efficiently create and use multiple heaps to allocate across different regions. + A heap can be destroyed at once instead of deallocating each object separately. +- __bounded__: it does not suffer from _blowup_ \[1\], has bounded worst-case allocation + times (_wcat_) (upto OS primitives), bounded space overhead (~0.2% meta-data, with low + internal fragmentation), and has no internal points of contention using only atomic operations. +- __fast__: In our benchmarks (see [below](#performance)), + _mimalloc_ outperforms other leading allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc), + and often uses less memory. A nice property is that it does consistently well over a wide range + of benchmarks. There is also good huge OS page support for larger server programs. + +The [documentation](https://microsoft.github.io/mimalloc) gives a full overview of the API. +You can read more on the design of _mimalloc_ in the [technical report](https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action) which also has detailed benchmark results. + +Enjoy! + +### Branches + +* `master`: latest stable release (based on `dev-slice`). +* `dev`: development branch for mimalloc v1. Use this branch for submitting PR's. +* `dev-slice`: development branch for mimalloc v2. This branch is downstream of `dev` (and is essentially equal to `dev` except for +`src/segment.c`) + +### Releases + +Note: the `v2.x` version has a different algorithm for managing internal mimalloc pages (as slices) that tends to use reduce +memory usage + and fragmentation compared to mimalloc `v1.x` (especially for large workloads). Should otherwise have similar performance + (see [below](#performance)); please report if you observe any significant performance regression. + +* 2024-05-21, `v1.8.7`, `v2.1.7`: Fix build issues on less common platforms. Started upstreaming patches + from the CPython [integration](https://github.com/python/cpython/issues/113141#issuecomment-2119255217). Upstream `vcpkg` patches. +* 2024-05-13, `v1.8.6`, `v2.1.6`: Fix build errors on various (older) platforms. Refactored aligned allocation. +* 2024-04-22, `v1.8.4`, `v2.1.4`: Fixes various bugs and build issues. Add `MI_LIBC_MUSL` cmake flag for musl builds. + Free-ing code is refactored into a separate module (`free.c`). Mimalloc page info is simplified with the block size + directly available (and new `block_size_shift` to improve aligned block free-ing). + New approach to collection of abandoned segments: When + a thread terminates the segments it owns are abandoned (containing still live objects) and these can be + reclaimed by other threads. We no longer use a list of abandoned segments but this is now done using bitmaps in arena's + which is more concurrent (and more aggressive). Abandoned memory can now also be reclaimed if a thread frees an object in + an abandoned page (which can be disabled using `mi_option_abandoned_reclaim_on_free`). The option `mi_option_max_segment_reclaim` + gives a maximum percentage of abandoned segments that can be reclaimed per try (=10%). + +* 2023-04-24, `v1.8.2`, `v2.1.2`: Fixes build issues on freeBSD, musl, and C17 (UE 5.1.1). Reduce code size/complexity + by removing regions and segment-cache's and only use arenas with improved memory purging -- this may improve memory + usage as well for larger services. Renamed options for consistency. Improved Valgrind and ASAN checking. + +* 2023-04-03, `v1.8.1`, `v2.1.1`: Fixes build issues on some platforms. + +* 2023-03-29, `v1.8.0`, `v2.1.0`: Improved support dynamic overriding on Windows 11. Improved tracing precision + with [asan](#asan) and [Valgrind](#valgrind), and added Windows event tracing [ETW](#ETW) (contributed by Xinglong He). Created an OS + abstraction layer to make it easier to port and separate platform dependent code (in `src/prim`). Fixed C++ STL compilation on older Microsoft C++ compilers, and various small bug fixes. + +* 2022-12-23, `v1.7.9`, `v2.0.9`: Supports building with [asan](#asan) and improved [Valgrind](#valgrind) support. + Support arbitrary large alignments (in particular for `std::pmr` pools). + Added C++ STL allocators attached to a specific heap (thanks @vmarkovtsev). + Heap walks now visit all object (including huge objects). Support Windows nano server containers (by Johannes Schindelin,@dscho). + Various small bug fixes. + +* 2022-11-03, `v1.7.7`, `v2.0.7`: Initial support for [Valgrind](#valgrind) for leak testing and heap block overflow + detection. Initial + support for attaching heaps to a speficic memory area (only in v2). Fix `realloc` behavior for zero size blocks, remove restriction to integral multiple of the alignment in `alloc_align`, improved aligned allocation performance, reduced contention with many threads on few processors (thank you @dposluns!), vs2022 support, support `pkg-config`, . + +* 2022-04-14, `v1.7.6`, `v2.0.6`: fix fallback path for aligned OS allocation on Windows, improve Windows aligned allocation + even when compiling with older SDK's, fix dynamic overriding on macOS Monterey, fix MSVC C++ dynamic overriding, fix + warnings under Clang 14, improve performance if many OS threads are created and destroyed, fix statistics for large object + allocations, using MIMALLOC_VERBOSE=1 has no maximum on the number of error messages, various small fixes. + +* 2022-02-14, `v1.7.5`, `v2.0.5` (alpha): fix malloc override on + Windows 11, fix compilation with musl, potentially reduced + committed memory, add `bin/minject` for Windows, + improved wasm support, faster aligned allocation, + various small fixes. + +* [Older release notes](#older-release-notes) + +Special thanks to: + +* [David Carlier](https://devnexen.blogspot.com/) (@devnexen) for his many contributions, and making + mimalloc work better on many less common operating systems, like Haiku, Dragonfly, etc. +* Mary Feofanova (@mary3000), Evgeniy Moiseenko, and Manuel Pöter (@mpoeter) for making mimalloc TSAN checkable, and finding + memory model bugs using the [genMC] model checker. +* Weipeng Liu (@pongba), Zhuowei Li, Junhua Wang, and Jakub Szymanski, for their early support of mimalloc and deployment + at large scale services, leading to many improvements in the mimalloc algorithms for large workloads. +* Jason Gibson (@jasongibson) for exhaustive testing on large scale workloads and server environments, and finding complex bugs + in (early versions of) `mimalloc`. +* Manuel Pöter (@mpoeter) and Sam Gross(@colesbury) for finding an ABA concurrency issue in abandoned segment reclamation. Sam also created the [no GIL](https://github.com/colesbury/nogil) Python fork which + uses mimalloc internally. + + +[genMC]: https://plv.mpi-sws.org/genmc/ + +### Usage + +mimalloc is used in various large scale low-latency services and programs, for example: + + + + + + + + +# Building + +## Windows + +Open `ide/vs2022/mimalloc.sln` in Visual Studio 2022 and build. +The `mimalloc` project builds a static library (in `out/msvc-x64`), while the +`mimalloc-override` project builds a DLL for overriding malloc +in the entire program. + +## macOS, Linux, BSD, etc. + +We use [`cmake`](https://cmake.org)1 as the build system: + +``` +> mkdir -p out/release +> cd out/release +> cmake ../.. +> make +``` +This builds the library as a shared (dynamic) +library (`.so` or `.dylib`), a static library (`.a`), and +as a single object file (`.o`). + +`> sudo make install` (install the library and header files in `/usr/local/lib` and `/usr/local/include`) + +You can build the debug version which does many internal checks and +maintains detailed statistics as: + +``` +> mkdir -p out/debug +> cd out/debug +> cmake -DCMAKE_BUILD_TYPE=Debug ../.. +> make +``` +This will name the shared library as `libmimalloc-debug.so`. + +Finally, you can build a _secure_ version that uses guard pages, encrypted +free lists, etc., as: +``` +> mkdir -p out/secure +> cd out/secure +> cmake -DMI_SECURE=ON ../.. +> make +``` +This will name the shared library as `libmimalloc-secure.so`. +Use `ccmake`2 instead of `cmake` +to see and customize all the available build options. + +Notes: +1. Install CMake: `sudo apt-get install cmake` +2. Install CCMake: `sudo apt-get install cmake-curses-gui` + + +## Single source + +You can also directly build the single `src/static.c` file as part of your project without +needing `cmake` at all. Make sure to also add the mimalloc `include` directory to the include path. + + +# Using the library + +The preferred usage is including ``, linking with +the shared- or static library, and using the `mi_malloc` API exclusively for allocation. For example, +``` +> gcc -o myprogram -lmimalloc myfile.c +``` + +mimalloc uses only safe OS calls (`mmap` and `VirtualAlloc`) and can co-exist +with other allocators linked to the same program. +If you use `cmake`, you can simply use: +``` +find_package(mimalloc 1.4 REQUIRED) +``` +in your `CMakeLists.txt` to find a locally installed mimalloc. Then use either: +``` +target_link_libraries(myapp PUBLIC mimalloc) +``` +to link with the shared (dynamic) library, or: +``` +target_link_libraries(myapp PUBLIC mimalloc-static) +``` +to link with the static library. See `test\CMakeLists.txt` for an example. + +For best performance in C++ programs, it is also recommended to override the +global `new` and `delete` operators. For convenience, mimalloc provides +[`mimalloc-new-delete.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-new-delete.h) which does this for you -- just include it in a single(!) source file in your project. +In C++, mimalloc also provides the `mi_stl_allocator` struct which implements the `std::allocator` +interface. + +You can pass environment variables to print verbose messages (`MIMALLOC_VERBOSE=1`) +and statistics (`MIMALLOC_SHOW_STATS=1`) (in the debug version): +``` +> env MIMALLOC_SHOW_STATS=1 ./cfrac 175451865205073170563711388363 + +175451865205073170563711388363 = 374456281610909315237213 * 468551 + +heap stats: peak total freed unit +normal 2: 16.4 kb 17.5 mb 17.5 mb 16 b ok +normal 3: 16.3 kb 15.2 mb 15.2 mb 24 b ok +normal 4: 64 b 4.6 kb 4.6 kb 32 b ok +normal 5: 80 b 118.4 kb 118.4 kb 40 b ok +normal 6: 48 b 48 b 48 b 48 b ok +normal 17: 960 b 960 b 960 b 320 b ok + +heap stats: peak total freed unit + normal: 33.9 kb 32.8 mb 32.8 mb 1 b ok + huge: 0 b 0 b 0 b 1 b ok + total: 33.9 kb 32.8 mb 32.8 mb 1 b ok +malloc requested: 32.8 mb + + committed: 58.2 kb 58.2 kb 58.2 kb 1 b ok + reserved: 2.0 mb 2.0 mb 2.0 mb 1 b ok + reset: 0 b 0 b 0 b 1 b ok + segments: 1 1 1 +-abandoned: 0 + pages: 6 6 6 +-abandoned: 0 + mmaps: 3 + mmap fast: 0 + mmap slow: 1 + threads: 0 + elapsed: 2.022s + process: user: 1.781s, system: 0.016s, faults: 756, reclaims: 0, rss: 2.7 mb +``` + +The above model of using the `mi_` prefixed API is not always possible +though in existing programs that already use the standard malloc interface, +and another option is to override the standard malloc interface +completely and redirect all calls to the _mimalloc_ library instead . + +## Environment Options + +You can set further options either programmatically (using [`mi_option_set`](https://microsoft.github.io/mimalloc/group__options.html)), or via environment variables: + +- `MIMALLOC_SHOW_STATS=1`: show statistics when the program terminates. +- `MIMALLOC_VERBOSE=1`: show verbose messages. +- `MIMALLOC_SHOW_ERRORS=1`: show error and warning messages. + +Advanced options: + +- `MIMALLOC_ARENA_EAGER_COMMIT=2`: turns on eager commit for the large arenas (usually 1GiB) from which mimalloc + allocates segments and pages. Set this to 2 (default) to + only enable this on overcommit systems (e.g. Linux). Set this to 1 to enable explicitly on other systems + as well (like Windows or macOS) which may improve performance (as the whole arena is committed at once). + Note that eager commit only increases the commit but not the actual the peak resident set + (rss) so it is generally ok to enable this. +- `MIMALLOC_PURGE_DELAY=N`: the delay in `N` milli-seconds (by default `10`) after which mimalloc will purge + OS pages that are not in use. This signals to the OS that the underlying physical memory can be reused which + can reduce memory fragmentation especially in long running (server) programs. Setting `N` to `0` purges immediately when + a page becomes unused which can improve memory usage but also decreases performance. Setting `N` to a higher + value like `100` can improve performance (sometimes by a lot) at the cost of potentially using more memory at times. + Setting it to `-1` disables purging completely. +- `MIMALLOC_PURGE_DECOMMITS=1`: By default "purging" memory means unused memory is decommitted (`MEM_DECOMMIT` on Windows, + `MADV_DONTNEED` (which decresease rss immediately) on `mmap` systems). Set this to 0 to instead "reset" unused + memory on a purge (`MEM_RESET` on Windows, generally `MADV_FREE` (which does not decrease rss immediately) on `mmap` systems). + Mimalloc generally does not "free" OS memory but only "purges" OS memory, in other words, it tries to keep virtual + address ranges and decommits within those ranges (to make the underlying physical memory available to other processes). + +Further options for large workloads and services: + +- `MIMALLOC_USE_NUMA_NODES=N`: pretend there are at most `N` NUMA nodes. If not set, the actual NUMA nodes are detected + at runtime. Setting `N` to 1 may avoid problems in some virtual environments. Also, setting it to a lower number than + the actual NUMA nodes is fine and will only cause threads to potentially allocate more memory across actual NUMA + nodes (but this can happen in any case as NUMA local allocation is always a best effort but not guaranteed). +- `MIMALLOC_ALLOW_LARGE_OS_PAGES=1`: use large OS pages (2 or 4MiB) when available; for some workloads this can significantly + improve performance. When this option is disabled, it also disables transparent huge pages (THP) for the process + (on Linux and Android). Use `MIMALLOC_VERBOSE` to check if the large OS pages are enabled -- usually one needs + to explicitly give permissions for large OS pages (as on [Windows][windows-huge] and [Linux][linux-huge]). However, sometimes + the OS is very slow to reserve contiguous physical memory for large OS pages so use with care on systems that + can have fragmented memory (for that reason, we generally recommend to use `MIMALLOC_RESERVE_HUGE_OS_PAGES` instead whenever possible). +- `MIMALLOC_RESERVE_HUGE_OS_PAGES=N`: where `N` is the number of 1GiB _huge_ OS pages. This reserves the huge pages at + startup and sometimes this can give a large (latency) performance improvement on big workloads. + Usually it is better to not use `MIMALLOC_ALLOW_LARGE_OS_PAGES=1` in combination with this setting. Just like large + OS pages, use with care as reserving + contiguous physical memory can take a long time when memory is fragmented (but reserving the huge pages is done at + startup only once). + Note that we usually need to explicitly give permission for huge OS pages (as on [Windows][windows-huge] and [Linux][linux-huge])). + With huge OS pages, it may be beneficial to set the setting + `MIMALLOC_EAGER_COMMIT_DELAY=N` (`N` is 1 by default) to delay the initial `N` segments (of 4MiB) + of a thread to not allocate in the huge OS pages; this prevents threads that are short lived + and allocate just a little to take up space in the huge OS page area (which cannot be purged as huge OS pages are pinned + to physical memory). + The huge pages are usually allocated evenly among NUMA nodes. + We can use `MIMALLOC_RESERVE_HUGE_OS_PAGES_AT=N` where `N` is the numa node (starting at 0) to allocate all + the huge pages at a specific numa node instead. + +Use caution when using `fork` in combination with either large or huge OS pages: on a fork, the OS uses copy-on-write +for all pages in the original process including the huge OS pages. When any memory is now written in that area, the +OS will copy the entire 1GiB huge page (or 2MiB large page) which can cause the memory usage to grow in large increments. + +[linux-huge]: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/tuning_and_optimizing_red_hat_enterprise_linux_for_oracle_9i_and_10g_databases/sect-oracle_9i_and_10g_tuning_guide-large_memory_optimization_big_pages_and_huge_pages-configuring_huge_pages_in_red_hat_enterprise_linux_4_or_5 +[windows-huge]: https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/enable-the-lock-pages-in-memory-option-windows?view=sql-server-2017 + +## Secure Mode + +_mimalloc_ can be build in secure mode by using the `-DMI_SECURE=ON` flags in `cmake`. This build enables various mitigations +to make mimalloc more robust against exploits. In particular: + +- All internal mimalloc pages are surrounded by guard pages and the heap metadata is behind a guard page as well (so a buffer overflow + exploit cannot reach into the metadata). +- All free list pointers are + [encoded](https://github.com/microsoft/mimalloc/blob/783e3377f79ee82af43a0793910a9f2d01ac7863/include/mimalloc-internal.h#L396) + with per-page keys which is used both to prevent overwrites with a known pointer, as well as to detect heap corruption. +- Double free's are detected (and ignored). +- The free lists are initialized in a random order and allocation randomly chooses between extension and reuse within a page to + mitigate against attacks that rely on a predicable allocation order. Similarly, the larger heap blocks allocated by mimalloc + from the OS are also address randomized. + +As always, evaluate with care as part of an overall security strategy as all of the above are mitigations but not guarantees. + +## Debug Mode + +When _mimalloc_ is built using debug mode, various checks are done at runtime to catch development errors. + +- Statistics are maintained in detail for each object size. They can be shown using `MIMALLOC_SHOW_STATS=1` at runtime. +- All objects have padding at the end to detect (byte precise) heap block overflows. +- Double free's, and freeing invalid heap pointers are detected. +- Corrupted free-lists and some forms of use-after-free are detected. + + +# Overriding Standard Malloc + +Overriding the standard `malloc` (and `new`) can be done either _dynamically_ or _statically_. + +## Dynamic override + +This is the recommended way to override the standard malloc interface. + +### Dynamic Override on Linux, BSD + +On these ELF-based systems we preload the mimalloc shared +library so all calls to the standard `malloc` interface are +resolved to the _mimalloc_ library. +``` +> env LD_PRELOAD=/usr/lib/libmimalloc.so myprogram +``` + +You can set extra environment variables to check that mimalloc is running, +like: +``` +> env MIMALLOC_VERBOSE=1 LD_PRELOAD=/usr/lib/libmimalloc.so myprogram +``` +or run with the debug version to get detailed statistics: +``` +> env MIMALLOC_SHOW_STATS=1 LD_PRELOAD=/usr/lib/libmimalloc-debug.so myprogram +``` + +### Dynamic Override on MacOS + +On macOS we can also preload the mimalloc shared +library so all calls to the standard `malloc` interface are +resolved to the _mimalloc_ library. +``` +> env DYLD_INSERT_LIBRARIES=/usr/lib/libmimalloc.dylib myprogram +``` + +Note that certain security restrictions may apply when doing this from +the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-ignored-when-calling-application-through-bash). + + +### Dynamic Override on Windows + +Dynamically overriding on mimalloc on Windows +is robust and has the particular advantage to be able to redirect all malloc/free calls that go through +the (dynamic) C runtime allocator, including those from other DLL's or libraries. +As it intercepts all allocation calls on a low level, it can be used reliably +on large programs that include other 3rd party components. +There are four requirements to make the overriding work robustly: + +1. Use the C-runtime library as a DLL (using the `/MD` or `/MDd` switch). +2. Link your program explicitly with `mimalloc-override.dll` library. + To ensure the `mimalloc-override.dll` is loaded at run-time it is easiest to insert some + call to the mimalloc API in the `main` function, like `mi_version()` + (or use the `/INCLUDE:mi_version` switch on the linker). See the `mimalloc-override-test` project + for an example on how to use this. +3. The [`mimalloc-redirect.dll`](bin) (or `mimalloc-redirect32.dll`) must be put + in the same folder as the main `mimalloc-override.dll` at runtime (as it is a dependency of that DLL). + The redirection DLL ensures that all calls to the C runtime malloc API get redirected to + mimalloc functions (which reside in `mimalloc-override.dll`). +4. Ensure the `mimalloc-override.dll` comes as early as possible in the import + list of the final executable (so it can intercept all potential allocations). + +For best performance on Windows with C++, it +is also recommended to also override the `new`/`delete` operations (by including +[`mimalloc-new-delete.h`](include/mimalloc-new-delete.h) +a single(!) source file in your project). + +The environment variable `MIMALLOC_DISABLE_REDIRECT=1` can be used to disable dynamic +overriding at run-time. Use `MIMALLOC_VERBOSE=1` to check if mimalloc was successfully redirected. + +We cannot always re-link an executable with `mimalloc-override.dll`, and similarly, we cannot always +ensure the the DLL comes first in the import table of the final executable. +In many cases though we can patch existing executables without any recompilation +if they are linked with the dynamic C runtime (`ucrtbase.dll`) -- just put the `mimalloc-override.dll` +into the import table (and put `mimalloc-redirect.dll` in the same folder) +Such patching can be done for example with [CFF Explorer](https://ntcore.com/?page_id=388) or +the [`minject`](bin) program. + +## Static override + +On Unix-like systems, you can also statically link with _mimalloc_ to override the standard +malloc interface. The recommended way is to link the final program with the +_mimalloc_ single object file (`mimalloc.o`). We use +an object file instead of a library file as linkers give preference to +that over archives to resolve symbols. To ensure that the standard +malloc interface resolves to the _mimalloc_ library, link it as the first +object file. For example: +``` +> gcc -o myprogram mimalloc.o myfile1.c ... +``` + +Another way to override statically that works on all platforms, is to +link statically to mimalloc (as shown in the introduction) and include a +header file in each source file that re-defines `malloc` etc. to `mi_malloc`. +This is provided by [`mimalloc-override.h`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc-override.h). This only works reliably though if all sources are +under your control or otherwise mixing of pointers from different heaps may occur! + + +# Tools + +Generally, we recommend using the standard allocator with memory tracking tools, but mimalloc +can also be build to support the [address sanitizer][asan] or the excellent [Valgrind] tool. +Moreover, it can be build to support Windows event tracing ([ETW]). +This has a small performance overhead but does allow detecting memory leaks and byte-precise +buffer overflows directly on final executables. See also the `test/test-wrong.c` file to test with various tools. + +## Valgrind + +To build with [valgrind] support, use the `MI_TRACK_VALGRIND=ON` cmake option: + +``` +> cmake ../.. -DMI_TRACK_VALGRIND=ON +``` + +This can also be combined with secure mode or debug mode. +You can then run your programs directly under valgrind: + +``` +> valgrind +``` + +If you rely on overriding `malloc`/`free` by mimalloc (instead of using the `mi_malloc`/`mi_free` API directly), +you also need to tell `valgrind` to not intercept those calls itself, and use: + +``` +> MIMALLOC_SHOW_STATS=1 valgrind --soname-synonyms=somalloc=*mimalloc* -- +``` + +By setting the `MIMALLOC_SHOW_STATS` environment variable you can check that mimalloc is indeed +used and not the standard allocator. Even though the [Valgrind option][valgrind-soname] +is called `--soname-synonyms`, this also +works when overriding with a static library or object file. Unfortunately, it is not possible to +dynamically override mimalloc using `LD_PRELOAD` together with `valgrind`. +See also the `test/test-wrong.c` file to test with `valgrind`. + +Valgrind support is in its initial development -- please report any issues. + +[Valgrind]: https://valgrind.org/ +[valgrind-soname]: https://valgrind.org/docs/manual/manual-core.html#opt.soname-synonyms + +## ASAN + +To build with the address sanitizer, use the `-DMI_TRACK_ASAN=ON` cmake option: + +``` +> cmake ../.. -DMI_TRACK_ASAN=ON +``` + +This can also be combined with secure mode or debug mode. +You can then run your programs as:' + +``` +> ASAN_OPTIONS=verbosity=1 +``` + +When you link a program with an address sanitizer build of mimalloc, you should +generally compile that program too with the address sanitizer enabled. +For example, assuming you build mimalloc in `out/debug`: + +``` +clang -g -o test-wrong -Iinclude test/test-wrong.c out/debug/libmimalloc-asan-debug.a -lpthread -fsanitize=address -fsanitize-recover=address +``` + +Since the address sanitizer redirects the standard allocation functions, on some platforms (macOSX for example) +it is required to compile mimalloc with `-DMI_OVERRIDE=OFF`. +Adress sanitizer support is in its initial development -- please report any issues. + +[asan]: https://github.com/google/sanitizers/wiki/AddressSanitizer + +## ETW + +Event tracing for Windows ([ETW]) provides a high performance way to capture all allocations though +mimalloc and analyze them later. To build with ETW support, use the `-DMI_TRACK_ETW=ON` cmake option. + +You can then capture an allocation trace using the Windows performance recorder (WPR), using the +`src/prim/windows/etw-mimalloc.wprp` profile. In an admin prompt, you can use: +``` +> wpr -start src\prim\windows\etw-mimalloc.wprp -filemode +> +> wpr -stop .etl +``` +and then open `.etl` in the Windows Performance Analyzer (WPA), or +use a tool like [TraceControl] that is specialized for analyzing mimalloc traces. + +[ETW]: https://learn.microsoft.com/en-us/windows-hardware/test/wpt/event-tracing-for-windows +[TraceControl]: https://github.com/xinglonghe/TraceControl + + +# Performance + +Last update: 2021-01-30 + +We tested _mimalloc_ against many other top allocators over a wide +range of benchmarks, ranging from various real world programs to +synthetic benchmarks that see how the allocator behaves under more +extreme circumstances. In our benchmark suite, _mimalloc_ outperforms other leading +allocators (_jemalloc_, _tcmalloc_, _Hoard_, etc), and has a similar memory footprint. A nice property is that it +does consistently well over the wide range of benchmarks. + +General memory allocators are interesting as there exists no algorithm that is +optimal -- for a given allocator one can usually construct a workload +where it does not do so well. The goal is thus to find an allocation +strategy that performs well over a wide range of benchmarks without +suffering from (too much) underperformance in less common situations. + +As always, interpret these results with care since some benchmarks test synthetic +or uncommon situations that may never apply to your workloads. For example, most +allocators do not do well on `xmalloc-testN` but that includes even the best +industrial allocators like _jemalloc_ and _tcmalloc_ that are used in some of +the world's largest systems (like Chrome or FreeBSD). + +Also, the benchmarks here do not measure the behaviour on very large and long-running server workloads, +or worst-case latencies of allocation. Much work has gone into `mimalloc` to work well on such +workloads (for example, to reduce virtual memory fragmentation on long-running services) +but such optimizations are not always reflected in the current benchmark suite. + +We show here only an overview -- for +more specific details and further benchmarks we refer to the +[technical report](https://www.microsoft.com/en-us/research/publication/mimalloc-free-list-sharding-in-action). +The benchmark suite is automated and available separately +as [mimalloc-bench](https://github.com/daanx/mimalloc-bench). + + +## Benchmark Results on a 16-core AMD 5950x (Zen3) + +Testing on the 16-core AMD 5950x processor at 3.4Ghz (4.9Ghz boost), with +with 32GiB memory at 3600Mhz, running Ubuntu 20.04 with glibc 2.31 and GCC 9.3.0. + +We measure three versions of _mimalloc_: the main version `mi` (tag:v1.7.0), +the new v2.0 beta version as `xmi` (tag:v2.0.0), and the main version in secure mode as `smi` (tag:v1.7.0). + +The other allocators are +Google's [_tcmalloc_](https://github.com/gperftools/gperftools) (`tc`, tag:gperftools-2.8.1) used in Chrome, +Facebook's [_jemalloc_](https://github.com/jemalloc/jemalloc) (`je`, tag:5.2.1) by Jason Evans used in Firefox and FreeBSD, +the Intel thread building blocks [allocator](https://github.com/intel/tbb) (`tbb`, tag:v2020.3), +[rpmalloc](https://github.com/mjansson/rpmalloc) (`rp`,tag:1.4.1) by Mattias Jansson, +the original scalable [_Hoard_](https://github.com/emeryberger/Hoard) (git:d880f72) allocator by Emery Berger \[1], +the memory compacting [_Mesh_](https://github.com/plasma-umass/Mesh) (git:67ff31a) allocator by +Bobby Powers _et al_ \[8], +and finally the default system allocator (`glibc`, 2.31) (based on _PtMalloc2_). + + + + +Any benchmarks ending in `N` run on all 32 logical cores in parallel. +Results are averaged over 10 runs and reported relative +to mimalloc (where 1.2 means it took 1.2× longer to run). +The legend also contains the _overall relative score_ between the +allocators where 100 points is the maximum if an allocator is fastest on +all benchmarks. + +The single threaded _cfrac_ benchmark by Dave Barrett is an implementation of +continued fraction factorization which uses many small short-lived allocations. +All allocators do well on such common usage, where _mimalloc_ is just a tad +faster than _tcmalloc_ and +_jemalloc_. + +The _leanN_ program is interesting as a large realistic and +concurrent workload of the [Lean](https://github.com/leanprover/lean) +theorem prover compiling its own standard library, and there is a 13% +speedup over _tcmalloc_. This is +quite significant: if Lean spends 20% of its time in the +allocator that means that _mimalloc_ is 1.6× faster than _tcmalloc_ +here. (This is surprising as that is not measured in a pure +allocation benchmark like _alloc-test_. We conjecture that we see this +outsized improvement here because _mimalloc_ has better locality in +the allocation which improves performance for the *other* computations +in a program as well). + +The single threaded _redis_ benchmark again show that most allocators do well on such workloads. + +The _larsonN_ server benchmark by Larson and Krishnan \[2] allocates and frees between threads. They observed this +behavior (which they call _bleeding_) in actual server applications, and the benchmark simulates this. +Here, _mimalloc_ is quite a bit faster than _tcmalloc_ and _jemalloc_ probably due to the object migration between different threads. + +The _mstressN_ workload performs many allocations and re-allocations, +and migrates objects between threads (as in _larsonN_). However, it also +creates and destroys the _N_ worker threads a few times keeping some objects +alive beyond the life time of the allocating thread. We observed this +behavior in many larger server applications. + +The [_rptestN_](https://github.com/mjansson/rpmalloc-benchmark) benchmark +by Mattias Jansson is a allocator test originally designed +for _rpmalloc_, and tries to simulate realistic allocation patterns over +multiple threads. Here the differences between allocators become more apparent. + +The second benchmark set tests specific aspects of the allocators and +shows even more extreme differences between them. + +The _alloc-test_, by +[OLogN Technologies AG](http://ithare.com/testing-memory-allocators-ptmalloc2-tcmalloc-hoard-jemalloc-while-trying-to-simulate-real-world-loads/), is a very allocation intensive benchmark doing millions of +allocations in various size classes. The test is scaled such that when an +allocator performs almost identically on _alloc-test1_ as _alloc-testN_ it +means that it scales linearly. + +The _sh6bench_ and _sh8bench_ benchmarks are +developed by [MicroQuill](http://www.microquill.com/) as part of SmartHeap. +In _sh6bench_ _mimalloc_ does much +better than the others (more than 2.5× faster than _jemalloc_). +We cannot explain this well but believe it is +caused in part by the "reverse" free-ing pattern in _sh6bench_. +The _sh8bench_ is a variation with object migration +between threads; whereas _tcmalloc_ did well on _sh6bench_, the addition of object migration causes it to be 10× slower than before. + +The _xmalloc-testN_ benchmark by Lever and Boreham \[5] and Christian Eder, simulates an asymmetric workload where +some threads only allocate, and others only free -- they observed this pattern in +larger server applications. Here we see that +the _mimalloc_ technique of having non-contended sharded thread free +lists pays off as it outperforms others by a very large margin. Only _rpmalloc_, _tbb_, and _glibc_ also scale well on this benchmark. + +The _cache-scratch_ benchmark by Emery Berger \[1], and introduced with +the Hoard allocator to test for _passive-false_ sharing of cache lines. +With a single thread they all +perform the same, but when running with multiple threads the potential allocator +induced false sharing of the cache lines can cause large run-time differences. +Crundal \[6] describes in detail why the false cache line sharing occurs in the _tcmalloc_ design, and also discusses how this +can be avoided with some small implementation changes. +Only the _tbb_, _rpmalloc_ and _mesh_ allocators also avoid the +cache line sharing completely, while _Hoard_ and _glibc_ seem to mitigate +the effects. Kukanov and Voss \[7] describe in detail +how the design of _tbb_ avoids the false cache line sharing. + + +## On a 36-core Intel Xeon + +For completeness, here are the results on a big Amazon +[c5.18xlarge](https://aws.amazon.com/ec2/instance-types/#Compute_Optimized) instance +consisting of a 2×18-core Intel Xeon (Cascade Lake) at 3.4GHz (boost 3.5GHz) +with 144GiB ECC memory, running Ubuntu 20.04 with glibc 2.31, GCC 9.3.0, and +Clang 10.0.0. This time, the mimalloc allocators (mi, xmi, and smi) were +compiled with the Clang compiler instead of GCC. +The results are similar to the AMD results but it is interesting to +see the differences in the _larsonN_, _mstressN_, and _xmalloc-testN_ benchmarks. + + + + + +## Peak Working Set + +The following figure shows the peak working set (rss) of the allocators +on the benchmarks (on the c5.18xlarge instance). + + + + +Note that the _xmalloc-testN_ memory usage should be disregarded as it +allocates more the faster the program runs. Similarly, memory usage of +_larsonN_, _mstressN_, _rptestN_ and _sh8bench_ can vary depending on scheduling and +speed. Nevertheless, we hope to improve the memory usage on _mstressN_ +and _rptestN_ (just as _cfrac_, _larsonN_ and _sh8bench_ have a small working set which skews the results). + + + + +# References + +- \[1] Emery D. Berger, Kathryn S. McKinley, Robert D. Blumofe, and Paul R. Wilson. + _Hoard: A Scalable Memory Allocator for Multithreaded Applications_ + the Ninth International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS-IX). Cambridge, MA, November 2000. + [pdf](http://www.cs.utexas.edu/users/mckinley/papers/asplos-2000.pdf) + +- \[2] P. Larson and M. Krishnan. _Memory allocation for long-running server applications_. + In ISMM, Vancouver, B.C., Canada, 1998. [pdf](http://citeseer.ist.psu.edu/viewdoc/download?doi=10.1.1.45.1947&rep=rep1&type=pdf) + +- \[3] D. Grunwald, B. Zorn, and R. Henderson. + _Improving the cache locality of memory allocation_. In R. Cartwright, editor, + Proceedings of the Conference on Programming Language Design and Implementation, pages 177–186, New York, NY, USA, June 1993. [pdf](http://citeseer.ist.psu.edu/viewdoc/download?doi=10.1.1.43.6621&rep=rep1&type=pdf) + +- \[4] J. Barnes and P. Hut. _A hierarchical O(n*log(n)) force-calculation algorithm_. Nature, 324:446-449, 1986. + +- \[5] C. Lever, and D. Boreham. _Malloc() Performance in a Multithreaded Linux Environment._ + In USENIX Annual Technical Conference, Freenix Session. San Diego, CA. Jun. 2000. + Available at + +- \[6] Timothy Crundal. _Reducing Active-False Sharing in TCMalloc_. 2016. CS16S1 project at the Australian National University. [pdf](http://courses.cecs.anu.edu.au/courses/CSPROJECTS/16S1/Reports/Timothy_Crundal_Report.pdf) + +- \[7] Alexey Kukanov, and Michael J Voss. + _The Foundations for Scalable Multi-Core Software in Intel Threading Building Blocks._ + Intel Technology Journal 11 (4). 2007 + +- \[8] Bobby Powers, David Tench, Emery D. Berger, and Andrew McGregor. + _Mesh: Compacting Memory Management for C/C++_ + In Proceedings of the 40th ACM SIGPLAN Conference on Programming Language Design and Implementation (PLDI'19), June 2019, pages 333-–346. + + + +# Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + + +# Older Release Notes + +* 2021-11-14, `v1.7.3`, `v2.0.3` (beta): improved WASM support, improved macOS support and performance (including + M1), improved performance for v2 for large objects, Python integration improvements, more standard + installation directories, various small fixes. +* 2021-06-17, `v1.7.2`, `v2.0.2` (beta): support M1, better installation layout on Linux, fix + thread_id on Android, prefer 2-6TiB area for aligned allocation to work better on pre-windows 8, various small fixes. +* 2021-04-06, `v1.7.1`, `v2.0.1` (beta): fix bug in arena allocation for huge pages, improved aslr on large allocations, initial M1 support (still experimental). +* 2021-01-31, `v2.0.0`: beta release 2.0: new slice algorithm for managing internal mimalloc pages. +* 2021-01-31, `v1.7.0`: stable release 1.7: support explicit user provided memory regions, more precise statistics, + improve macOS overriding, initial support for Apple M1, improved DragonFly support, faster memcpy on Windows, various small fixes. + +* 2020-09-24, `v1.6.7`: stable release 1.6: using standard C atomics, passing tsan testing, improved + handling of failing to commit on Windows, add [`mi_process_info`](https://github.com/microsoft/mimalloc/blob/master/include/mimalloc.h#L156) api call. +* 2020-08-06, `v1.6.4`: stable release 1.6: improved error recovery in low-memory situations, + support for IllumOS and Haiku, NUMA support for Vista/XP, improved NUMA detection for AMD Ryzen, ubsan support. +* 2020-05-05, `v1.6.3`: stable release 1.6: improved behavior in out-of-memory situations, improved malloc zones on macOS, + build PIC static libraries by default, add option to abort on out-of-memory, line buffered statistics. +* 2020-04-20, `v1.6.2`: stable release 1.6: fix compilation on Android, MingW, Raspberry, and Conda, + stability fix for Windows 7, fix multiple mimalloc instances in one executable, fix `strnlen` overload, + fix aligned debug padding. +* 2020-02-17, `v1.6.1`: stable release 1.6: minor updates (build with clang-cl, fix alignment issue for small objects). +* 2020-02-09, `v1.6.0`: stable release 1.6: fixed potential memory leak, improved overriding + and thread local support on FreeBSD, NetBSD, DragonFly, and macOSX. New byte-precise + heap block overflow detection in debug mode (besides the double-free detection and free-list + corruption detection). Add `nodiscard` attribute to most allocation functions. + Enable `MIMALLOC_PAGE_RESET` by default. New reclamation strategy for abandoned heap pages + for better memory footprint. +* 2020-02-09, `v1.5.0`: stable release 1.5: improved free performance, small bug fixes. +* 2020-01-22, `v1.4.0`: stable release 1.4: improved performance for delayed OS page reset, +more eager concurrent free, addition of STL allocator, fixed potential memory leak. +* 2020-01-15, `v1.3.0`: stable release 1.3: bug fixes, improved randomness and [stronger +free list encoding](https://github.com/microsoft/mimalloc/blob/783e3377f79ee82af43a0793910a9f2d01ac7863/include/mimalloc-internal.h#L396) in secure mode. + +* 2019-12-22, `v1.2.2`: stable release 1.2: minor updates. +* 2019-11-22, `v1.2.0`: stable release 1.2: bug fixes, improved secure mode (free list corruption checks, double free mitigation). Improved dynamic overriding on Windows. +* 2019-10-07, `v1.1.0`: stable release 1.1. +* 2019-09-01, `v1.0.8`: pre-release 8: more robust windows dynamic overriding, initial huge page support. +* 2019-08-10, `v1.0.6`: pre-release 6: various performance improvements. diff --git a/ww/managers/mimalloc/src/alloc-aligned.c b/ww/managers/mimalloc/src/alloc-aligned.c new file mode 100644 index 00000000..ba629ef3 --- /dev/null +++ b/ww/managers/mimalloc/src/alloc-aligned.c @@ -0,0 +1,312 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" // mi_prim_get_default_heap + +#include // memset + +// ------------------------------------------------------ +// Aligned Allocation +// ------------------------------------------------------ + +static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) { + // objects up to `MI_MAX_ALIGN_GUARANTEE` are allocated aligned to their size (see `segment.c:_mi_segment_page_start`). + mi_assert_internal(_mi_is_power_of_two(alignment) && (alignment > 0)); + if (alignment > size) return false; + if (alignment <= MI_MAX_ALIGN_SIZE) return true; + const size_t bsize = mi_good_size(size); + return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0); +} + +// Fallback aligned allocation that over-allocates -- split out for better codegen +static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept +{ + mi_assert_internal(size <= (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)); + mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment)); + + void* p; + size_t oversize; + if mi_unlikely(alignment > MI_BLOCK_ALIGNMENT_MAX) { + // use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page) + // This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the + // first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down) + if mi_unlikely(offset != 0) { + // todo: cannot support offset alignment for very large alignments yet + #if MI_DEBUG > 0 + _mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset); + #endif + return NULL; + } + oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size); + p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block + // zero afterwards as only the area from the aligned_p may be committed! + if (p == NULL) return NULL; + } + else { + // otherwise over-allocate + oversize = size + alignment - 1; + p = _mi_heap_malloc_zero(heap, oversize, zero); + if (p == NULL) return NULL; + } + + // .. and align within the allocation + const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)` + const uintptr_t poffset = ((uintptr_t)p + offset) & align_mask; + const uintptr_t adjust = (poffset == 0 ? 0 : alignment - poffset); + mi_assert_internal(adjust < alignment); + void* aligned_p = (void*)((uintptr_t)p + adjust); + if (aligned_p != p) { + mi_page_t* page = _mi_ptr_page(p); + mi_page_set_has_aligned(page, true); + _mi_padding_shrink(page, (mi_block_t*)p, adjust + size); + } + // todo: expand padding if overallocated ? + + mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size); + mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_page(aligned_p), aligned_p)); + mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0); + mi_assert_internal(mi_usable_size(aligned_p)>=size); + mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust); + + // now zero the block if needed + if (alignment > MI_BLOCK_ALIGNMENT_MAX) { + // for the tracker, on huge aligned allocations only the memory from the start of the large block is defined + mi_track_mem_undefined(aligned_p, size); + if (zero) { + _mi_memzero_aligned(aligned_p, mi_usable_size(aligned_p)); + } + } + + if (p != aligned_p) { + mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p)); + } + return aligned_p; +} + +// Generic primitive aligned allocation -- split out for better codegen +static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_generic(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept +{ + mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment)); + // we don't allocate more than MI_MAX_ALLOC_SIZE (see ) + if mi_unlikely(size > (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)) { + #if MI_DEBUG > 0 + _mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment); + #endif + return NULL; + } + + // use regular allocation if it is guaranteed to fit the alignment constraints. + // this is important to try as the fast path in `mi_heap_malloc_zero_aligned` only works when there exist + // a page with the right block size, and if we always use the over-alloc fallback that would never happen. + if (offset == 0 && mi_malloc_is_naturally_aligned(size,alignment)) { + void* p = _mi_heap_malloc_zero(heap, size, zero); + mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0); + const bool is_aligned_or_null = (((uintptr_t)p) & (alignment-1))==0; + if mi_likely(is_aligned_or_null) { + return p; + } + else { + // this should never happen if the `mi_malloc_is_naturally_aligned` check is correct.. + mi_assert(false); + mi_free(p); + } + } + + // fall back to over-allocation + return mi_heap_malloc_zero_aligned_at_overalloc(heap,size,alignment,offset,zero); +} + +// Primitive aligned allocation +static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept +{ + // note: we don't require `size > offset`, we just guarantee that the address at offset is aligned regardless of the allocated size. + if mi_unlikely(alignment == 0 || !_mi_is_power_of_two(alignment)) { // require power-of-two (see ) + #if MI_DEBUG > 0 + _mi_error_message(EOVERFLOW, "aligned allocation requires the alignment to be a power-of-two (size %zu, alignment %zu)\n", size, alignment); + #endif + return NULL; + } + + // try first if there happens to be a small block available with just the right alignment + if mi_likely(size <= MI_SMALL_SIZE_MAX && alignment <= size) { + const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)` + const size_t padsize = size + MI_PADDING_SIZE; + mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize); + if mi_likely(page->free != NULL) { + const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0; + if mi_likely(is_aligned) + { + #if MI_STAT>1 + mi_heap_stat_increase(heap, malloc, size); + #endif + void* p = (zero ? _mi_page_malloc_zeroed(heap,page,padsize) : _mi_page_malloc(heap,page,padsize)); // call specific page malloc for better codegen + mi_assert_internal(p != NULL); + mi_assert_internal(((uintptr_t)p + offset) % alignment == 0); + mi_track_malloc(p,size,zero); + return p; + } + } + } + + // fallback to generic aligned allocation + return mi_heap_malloc_zero_aligned_at_generic(heap, size, alignment, offset, zero); +} + + +// ------------------------------------------------------ +// Optimized mi_heap_malloc_aligned / mi_malloc_aligned +// ------------------------------------------------------ + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, false); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_malloc_aligned_at(heap, size, alignment, 0); +} + +// ensure a definition is emitted +#if defined(__cplusplus) +void* _mi_extern_heap_malloc_aligned = (void*)&mi_heap_malloc_aligned; +#endif + +// ------------------------------------------------------ +// Aligned Allocation +// ------------------------------------------------------ + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_malloc_zero_aligned_at(heap, size, alignment, offset, true); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_zalloc_aligned_at(heap, size, alignment, 0); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(count, size, &total)) return NULL; + return mi_heap_zalloc_aligned_at(heap, total, alignment, offset); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_calloc_aligned_at(heap,count,size,alignment,0); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_malloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_malloc_aligned(mi_prim_get_default_heap(), size, alignment); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_zalloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_zalloc_aligned(mi_prim_get_default_heap(), size, alignment); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_calloc_aligned_at(mi_prim_get_default_heap(), count, size, alignment, offset); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_calloc_aligned(mi_prim_get_default_heap(), count, size, alignment); +} + + +// ------------------------------------------------------ +// Aligned re-allocation +// ------------------------------------------------------ + +static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset, bool zero) mi_attr_noexcept { + mi_assert(alignment > 0); + if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero); + if (p == NULL) return mi_heap_malloc_zero_aligned_at(heap,newsize,alignment,offset,zero); + size_t size = mi_usable_size(p); + if (newsize <= size && newsize >= (size - (size / 2)) + && (((uintptr_t)p + offset) % alignment) == 0) { + return p; // reallocation still fits, is aligned and not more than 50% waste + } + else { + // note: we don't zero allocate upfront so we only zero initialize the expanded part + void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset); + if (newp != NULL) { + if (zero && newsize > size) { + // also set last word in the previous allocation to zero to ensure any padding is zero-initialized + size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); + _mi_memzero((uint8_t*)newp + start, newsize - start); + } + _mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize)); + mi_free(p); // only free if successful + } + return newp; + } +} + +static void* mi_heap_realloc_zero_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, bool zero) mi_attr_noexcept { + mi_assert(alignment > 0); + if (alignment <= sizeof(uintptr_t)) return _mi_heap_realloc_zero(heap,p,newsize,zero); + size_t offset = ((uintptr_t)p % alignment); // use offset of previous allocation (p can be NULL) + return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,zero); +} + +mi_decl_nodiscard void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_realloc_zero_aligned_at(heap,p,newsize,alignment,offset,false); +} + +mi_decl_nodiscard void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept { + return mi_heap_realloc_zero_aligned(heap,p,newsize,alignment,false); +} + +mi_decl_nodiscard void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_realloc_zero_aligned_at(heap, p, newsize, alignment, offset, true); +} + +mi_decl_nodiscard void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept { + return mi_heap_realloc_zero_aligned(heap, p, newsize, alignment, true); +} + +mi_decl_nodiscard void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(newcount, size, &total)) return NULL; + return mi_heap_rezalloc_aligned_at(heap, p, total, alignment, offset); +} + +mi_decl_nodiscard void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(newcount, size, &total)) return NULL; + return mi_heap_rezalloc_aligned(heap, p, total, alignment); +} + +mi_decl_nodiscard void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_realloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset); +} + +mi_decl_nodiscard void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept { + return mi_heap_realloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment); +} + +mi_decl_nodiscard void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_rezalloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset); +} + +mi_decl_nodiscard void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept { + return mi_heap_rezalloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment); +} + +mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { + return mi_heap_recalloc_aligned_at(mi_prim_get_default_heap(), p, newcount, size, alignment, offset); +} + +mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { + return mi_heap_recalloc_aligned(mi_prim_get_default_heap(), p, newcount, size, alignment); +} diff --git a/ww/managers/mimalloc/src/alloc-override.c b/ww/managers/mimalloc/src/alloc-override.c new file mode 100644 index 00000000..12837cdd --- /dev/null +++ b/ww/managers/mimalloc/src/alloc-override.c @@ -0,0 +1,314 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +#if !defined(MI_IN_ALLOC_C) +#error "this file should be included from 'alloc.c' (so aliases can work)" +#endif + +#if defined(MI_MALLOC_OVERRIDE) && defined(_WIN32) && !(defined(MI_SHARED_LIB) && defined(_DLL)) +#error "It is only possible to override "malloc" on Windows when building as a DLL (and linking the C runtime as a DLL)" +#endif + +#if defined(MI_MALLOC_OVERRIDE) && !(defined(_WIN32)) + +#if defined(__APPLE__) +#include +mi_decl_externc void vfree(void* p); +mi_decl_externc size_t malloc_size(const void* p); +mi_decl_externc size_t malloc_good_size(size_t size); +#endif + +// helper definition for C override of C++ new +typedef void* mi_nothrow_t; + +// ------------------------------------------------------ +// Override system malloc +// ------------------------------------------------------ + +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__) && !MI_TRACK_ENABLED + // gcc, clang: use aliasing to alias the exported function to one of our `mi_` functions + #if (defined(__GNUC__) && __GNUC__ >= 9) + #pragma GCC diagnostic ignored "-Wattributes" // or we get warnings that nodiscard is ignored on a forward + #define MI_FORWARD(fun) __attribute__((alias(#fun), used, visibility("default"), copy(fun))); + #else + #define MI_FORWARD(fun) __attribute__((alias(#fun), used, visibility("default"))); + #endif + #define MI_FORWARD1(fun,x) MI_FORWARD(fun) + #define MI_FORWARD2(fun,x,y) MI_FORWARD(fun) + #define MI_FORWARD3(fun,x,y,z) MI_FORWARD(fun) + #define MI_FORWARD0(fun,x) MI_FORWARD(fun) + #define MI_FORWARD02(fun,x,y) MI_FORWARD(fun) +#else + // otherwise use forwarding by calling our `mi_` function + #define MI_FORWARD1(fun,x) { return fun(x); } + #define MI_FORWARD2(fun,x,y) { return fun(x,y); } + #define MI_FORWARD3(fun,x,y,z) { return fun(x,y,z); } + #define MI_FORWARD0(fun,x) { fun(x); } + #define MI_FORWARD02(fun,x,y) { fun(x,y); } +#endif + + +#if defined(__APPLE__) && defined(MI_SHARED_LIB_EXPORT) && defined(MI_OSX_INTERPOSE) + // define MI_OSX_IS_INTERPOSED as we should not provide forwarding definitions for + // functions that are interposed (or the interposing does not work) + #define MI_OSX_IS_INTERPOSED + + mi_decl_externc size_t mi_malloc_size_checked(void *p) { + if (!mi_is_in_heap_region(p)) return 0; + return mi_usable_size(p); + } + + // use interposing so `DYLD_INSERT_LIBRARIES` works without `DYLD_FORCE_FLAT_NAMESPACE=1` + // See: + struct mi_interpose_s { + const void* replacement; + const void* target; + }; + #define MI_INTERPOSE_FUN(oldfun,newfun) { (const void*)&newfun, (const void*)&oldfun } + #define MI_INTERPOSE_MI(fun) MI_INTERPOSE_FUN(fun,mi_##fun) + + __attribute__((used)) static struct mi_interpose_s _mi_interposes[] __attribute__((section("__DATA, __interpose"))) = + { + MI_INTERPOSE_MI(malloc), + MI_INTERPOSE_MI(calloc), + MI_INTERPOSE_MI(realloc), + MI_INTERPOSE_MI(strdup), + #if defined(MAC_OS_X_VERSION_10_7) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_7 + MI_INTERPOSE_MI(strndup), + #endif + MI_INTERPOSE_MI(realpath), + MI_INTERPOSE_MI(posix_memalign), + MI_INTERPOSE_MI(reallocf), + MI_INTERPOSE_MI(valloc), + MI_INTERPOSE_FUN(malloc_size,mi_malloc_size_checked), + MI_INTERPOSE_MI(malloc_good_size), + #if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15 + MI_INTERPOSE_MI(aligned_alloc), + #endif + #ifdef MI_OSX_ZONE + // we interpose malloc_default_zone in alloc-override-osx.c so we can use mi_free safely + MI_INTERPOSE_MI(free), + MI_INTERPOSE_FUN(vfree,mi_free), + #else + // sometimes code allocates from default zone but deallocates using plain free :-( (like NxHashResizeToCapacity ) + MI_INTERPOSE_FUN(free,mi_cfree), // use safe free that checks if pointers are from us + MI_INTERPOSE_FUN(vfree,mi_cfree), + #endif + }; + + #ifdef __cplusplus + extern "C" { + #endif + void _ZdlPv(void* p); // delete + void _ZdaPv(void* p); // delete[] + void _ZdlPvm(void* p, size_t n); // delete + void _ZdaPvm(void* p, size_t n); // delete[] + void* _Znwm(size_t n); // new + void* _Znam(size_t n); // new[] + void* _ZnwmRKSt9nothrow_t(size_t n, mi_nothrow_t tag); // new nothrow + void* _ZnamRKSt9nothrow_t(size_t n, mi_nothrow_t tag); // new[] nothrow + #ifdef __cplusplus + } + #endif + __attribute__((used)) static struct mi_interpose_s _mi_cxx_interposes[] __attribute__((section("__DATA, __interpose"))) = + { + MI_INTERPOSE_FUN(_ZdlPv,mi_free), + MI_INTERPOSE_FUN(_ZdaPv,mi_free), + MI_INTERPOSE_FUN(_ZdlPvm,mi_free_size), + MI_INTERPOSE_FUN(_ZdaPvm,mi_free_size), + MI_INTERPOSE_FUN(_Znwm,mi_new), + MI_INTERPOSE_FUN(_Znam,mi_new), + MI_INTERPOSE_FUN(_ZnwmRKSt9nothrow_t,mi_new_nothrow), + MI_INTERPOSE_FUN(_ZnamRKSt9nothrow_t,mi_new_nothrow), + }; + +#elif defined(_MSC_VER) + // cannot override malloc unless using a dll. + // we just override new/delete which does work in a static library. +#else + // On all other systems forward allocation primitives to our API + mi_decl_export void* malloc(size_t size) MI_FORWARD1(mi_malloc, size) + mi_decl_export void* calloc(size_t size, size_t n) MI_FORWARD2(mi_calloc, size, n) + mi_decl_export void* realloc(void* p, size_t newsize) MI_FORWARD2(mi_realloc, p, newsize) + mi_decl_export void free(void* p) MI_FORWARD0(mi_free, p) + // In principle we do not need to forward `strdup`/`strndup` but on some systems these do not use `malloc` internally (but a more primitive call) + // We only override if `strdup` is not a macro (as on some older libc's, see issue #885) + #if !defined(strdup) + mi_decl_export char* strdup(const char* str) MI_FORWARD1(mi_strdup, str) + #endif + #if !defined(strndup) && (!defined(__APPLE__) || (defined(MAC_OS_X_VERSION_10_7) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_7)) + mi_decl_export char* strndup(const char* str, size_t n) MI_FORWARD2(mi_strndup, str, n) + #endif +#endif + +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__) +#pragma GCC visibility push(default) +#endif + +// ------------------------------------------------------ +// Override new/delete +// This is not really necessary as they usually call +// malloc/free anyway, but it improves performance. +// ------------------------------------------------------ +#ifdef __cplusplus + // ------------------------------------------------------ + // With a C++ compiler we override the new/delete operators. + // see + // ------------------------------------------------------ + #include + + #ifndef MI_OSX_IS_INTERPOSED + void operator delete(void* p) noexcept MI_FORWARD0(mi_free,p) + void operator delete[](void* p) noexcept MI_FORWARD0(mi_free,p) + + void* operator new(std::size_t n) noexcept(false) MI_FORWARD1(mi_new,n) + void* operator new[](std::size_t n) noexcept(false) MI_FORWARD1(mi_new,n) + + void* operator new (std::size_t n, const std::nothrow_t& tag) noexcept { MI_UNUSED(tag); return mi_new_nothrow(n); } + void* operator new[](std::size_t n, const std::nothrow_t& tag) noexcept { MI_UNUSED(tag); return mi_new_nothrow(n); } + + #if (__cplusplus >= 201402L || _MSC_VER >= 1916) + void operator delete (void* p, std::size_t n) noexcept MI_FORWARD02(mi_free_size,p,n) + void operator delete[](void* p, std::size_t n) noexcept MI_FORWARD02(mi_free_size,p,n) + #endif + #endif + + #if (__cplusplus > 201402L && defined(__cpp_aligned_new)) && (!defined(__GNUC__) || (__GNUC__ > 5)) + void operator delete (void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast(al)); } + void operator delete[](void* p, std::align_val_t al) noexcept { mi_free_aligned(p, static_cast(al)); } + void operator delete (void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); }; + void operator delete[](void* p, std::size_t n, std::align_val_t al) noexcept { mi_free_size_aligned(p, n, static_cast(al)); }; + void operator delete (void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); } + void operator delete[](void* p, std::align_val_t al, const std::nothrow_t&) noexcept { mi_free_aligned(p, static_cast(al)); } + + void* operator new( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); } + void* operator new[]( std::size_t n, std::align_val_t al) noexcept(false) { return mi_new_aligned(n, static_cast(al)); } + void* operator new (std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); } + void* operator new[](std::size_t n, std::align_val_t al, const std::nothrow_t&) noexcept { return mi_new_aligned_nothrow(n, static_cast(al)); } + #endif + +#elif (defined(__GNUC__) || defined(__clang__)) + // ------------------------------------------------------ + // Override by defining the mangled C++ names of the operators (as + // used by GCC and CLang). + // See + // ------------------------------------------------------ + + void _ZdlPv(void* p) MI_FORWARD0(mi_free,p) // delete + void _ZdaPv(void* p) MI_FORWARD0(mi_free,p) // delete[] + void _ZdlPvm(void* p, size_t n) MI_FORWARD02(mi_free_size,p,n) + void _ZdaPvm(void* p, size_t n) MI_FORWARD02(mi_free_size,p,n) + + void _ZdlPvSt11align_val_t(void* p, size_t al) { mi_free_aligned(p,al); } + void _ZdaPvSt11align_val_t(void* p, size_t al) { mi_free_aligned(p,al); } + void _ZdlPvmSt11align_val_t(void* p, size_t n, size_t al) { mi_free_size_aligned(p,n,al); } + void _ZdaPvmSt11align_val_t(void* p, size_t n, size_t al) { mi_free_size_aligned(p,n,al); } + + void _ZdlPvRKSt9nothrow_t(void* p, mi_nothrow_t tag) { MI_UNUSED(tag); mi_free(p); } // operator delete(void*, std::nothrow_t const&) + void _ZdaPvRKSt9nothrow_t(void* p, mi_nothrow_t tag) { MI_UNUSED(tag); mi_free(p); } // operator delete[](void*, std::nothrow_t const&) + void _ZdlPvSt11align_val_tRKSt9nothrow_t(void* p, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); mi_free_aligned(p,al); } // operator delete(void*, std::align_val_t, std::nothrow_t const&) + void _ZdaPvSt11align_val_tRKSt9nothrow_t(void* p, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); mi_free_aligned(p,al); } // operator delete[](void*, std::align_val_t, std::nothrow_t const&) + + #if (MI_INTPTR_SIZE==8) + void* _Znwm(size_t n) MI_FORWARD1(mi_new,n) // new 64-bit + void* _Znam(size_t n) MI_FORWARD1(mi_new,n) // new[] 64-bit + void* _ZnwmRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } + void* _ZnamRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } + void* _ZnwmSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al) + void* _ZnamSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al) + void* _ZnwmSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); } + void* _ZnamSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); } + #elif (MI_INTPTR_SIZE==4) + void* _Znwj(size_t n) MI_FORWARD1(mi_new,n) // new 64-bit + void* _Znaj(size_t n) MI_FORWARD1(mi_new,n) // new[] 64-bit + void* _ZnwjRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } + void* _ZnajRKSt9nothrow_t(size_t n, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_nothrow(n); } + void* _ZnwjSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al) + void* _ZnajSt11align_val_t(size_t n, size_t al) MI_FORWARD2(mi_new_aligned, n, al) + void* _ZnwjSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); } + void* _ZnajSt11align_val_tRKSt9nothrow_t(size_t n, size_t al, mi_nothrow_t tag) { MI_UNUSED(tag); return mi_new_aligned_nothrow(n,al); } + #else + #error "define overloads for new/delete for this platform (just for performance, can be skipped)" + #endif +#endif // __cplusplus + +// ------------------------------------------------------ +// Further Posix & Unix functions definitions +// ------------------------------------------------------ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef MI_OSX_IS_INTERPOSED + // Forward Posix/Unix calls as well + void* reallocf(void* p, size_t newsize) MI_FORWARD2(mi_reallocf,p,newsize) + size_t malloc_size(const void* p) MI_FORWARD1(mi_usable_size,p) + #if !defined(__ANDROID__) && !defined(__FreeBSD__) + size_t malloc_usable_size(void *p) MI_FORWARD1(mi_usable_size,p) + #else + size_t malloc_usable_size(const void *p) MI_FORWARD1(mi_usable_size,p) + #endif + + // No forwarding here due to aliasing/name mangling issues + void* valloc(size_t size) { return mi_valloc(size); } + void vfree(void* p) { mi_free(p); } + size_t malloc_good_size(size_t size) { return mi_malloc_good_size(size); } + int posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p, alignment, size); } + + // `aligned_alloc` is only available when __USE_ISOC11 is defined. + // Note: it seems __USE_ISOC11 is not defined in musl (and perhaps other libc's) so we only check + // for it if using glibc. + // Note: Conda has a custom glibc where `aligned_alloc` is declared `static inline` and we cannot + // override it, but both _ISOC11_SOURCE and __USE_ISOC11 are undefined in Conda GCC7 or GCC9. + // Fortunately, in the case where `aligned_alloc` is declared as `static inline` it + // uses internally `memalign`, `posix_memalign`, or `_aligned_malloc` so we can avoid overriding it ourselves. + #if !defined(__GLIBC__) || __USE_ISOC11 + void* aligned_alloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); } + #endif +#endif + +// no forwarding here due to aliasing/name mangling issues +void cfree(void* p) { mi_free(p); } +void* pvalloc(size_t size) { return mi_pvalloc(size); } +void* memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); } +void* _aligned_malloc(size_t alignment, size_t size) { return mi_aligned_alloc(alignment, size); } +void* reallocarray(void* p, size_t count, size_t size) { return mi_reallocarray(p, count, size); } +// some systems define reallocarr so mark it as a weak symbol (#751) +mi_decl_weak int reallocarr(void* p, size_t count, size_t size) { return mi_reallocarr(p, count, size); } + +#if defined(__wasi__) + // forward __libc interface (see PR #667) + void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc, size) + void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc, count, size) + void* __libc_realloc(void* p, size_t size) MI_FORWARD2(mi_realloc, p, size) + void __libc_free(void* p) MI_FORWARD0(mi_free, p) + void* __libc_memalign(size_t alignment, size_t size) { return mi_memalign(alignment, size); } + +#elif defined(__GLIBC__) && defined(__linux__) + // forward __libc interface (needed for glibc-based Linux distributions) + void* __libc_malloc(size_t size) MI_FORWARD1(mi_malloc,size) + void* __libc_calloc(size_t count, size_t size) MI_FORWARD2(mi_calloc,count,size) + void* __libc_realloc(void* p, size_t size) MI_FORWARD2(mi_realloc,p,size) + void __libc_free(void* p) MI_FORWARD0(mi_free,p) + void __libc_cfree(void* p) MI_FORWARD0(mi_free,p) + + void* __libc_valloc(size_t size) { return mi_valloc(size); } + void* __libc_pvalloc(size_t size) { return mi_pvalloc(size); } + void* __libc_memalign(size_t alignment, size_t size) { return mi_memalign(alignment,size); } + int __posix_memalign(void** p, size_t alignment, size_t size) { return mi_posix_memalign(p,alignment,size); } +#endif + +#ifdef __cplusplus +} +#endif + +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__APPLE__) +#pragma GCC visibility pop +#endif + +#endif // MI_MALLOC_OVERRIDE && !_WIN32 diff --git a/ww/managers/mimalloc/src/alloc-posix.c b/ww/managers/mimalloc/src/alloc-posix.c new file mode 100644 index 00000000..225752fd --- /dev/null +++ b/ww/managers/mimalloc/src/alloc-posix.c @@ -0,0 +1,185 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// ------------------------------------------------------------------------ +// mi prefixed publi definitions of various Posix, Unix, and C++ functions +// for convenience and used when overriding these functions. +// ------------------------------------------------------------------------ +#include "mimalloc.h" +#include "mimalloc/internal.h" + +// ------------------------------------------------------ +// Posix & Unix functions definitions +// ------------------------------------------------------ + +#include +#include // memset +#include // getenv + +#ifdef _MSC_VER +#pragma warning(disable:4996) // getenv _wgetenv +#endif + +#ifndef EINVAL +#define EINVAL 22 +#endif +#ifndef ENOMEM +#define ENOMEM 12 +#endif + + +mi_decl_nodiscard size_t mi_malloc_size(const void* p) mi_attr_noexcept { + // if (!mi_is_in_heap_region(p)) return 0; + return mi_usable_size(p); +} + +mi_decl_nodiscard size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept { + // if (!mi_is_in_heap_region(p)) return 0; + return mi_usable_size(p); +} + +mi_decl_nodiscard size_t mi_malloc_good_size(size_t size) mi_attr_noexcept { + return mi_good_size(size); +} + +void mi_cfree(void* p) mi_attr_noexcept { + if (mi_is_in_heap_region(p)) { + mi_free(p); + } +} + +int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept { + // Note: The spec dictates we should not modify `*p` on an error. (issue#27) + // + if (p == NULL) return EINVAL; + if ((alignment % sizeof(void*)) != 0) return EINVAL; // natural alignment + // it is also required that alignment is a power of 2 and > 0; this is checked in `mi_malloc_aligned` + if (alignment==0 || !_mi_is_power_of_two(alignment)) return EINVAL; // not a power of 2 + void* q = mi_malloc_aligned(size, alignment); + if (q==NULL && size != 0) return ENOMEM; + mi_assert_internal(((uintptr_t)q % alignment) == 0); + *p = q; + return 0; +} + +mi_decl_nodiscard mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept { + void* p = mi_malloc_aligned(size, alignment); + mi_assert_internal(((uintptr_t)p % alignment) == 0); + return p; +} + +mi_decl_nodiscard mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept { + return mi_memalign( _mi_os_page_size(), size ); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept { + size_t psize = _mi_os_page_size(); + if (size >= SIZE_MAX - psize) return NULL; // overflow + size_t asize = _mi_align_up(size, psize); + return mi_malloc_aligned(asize, psize); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept { + // C11 requires the size to be an integral multiple of the alignment, see . + // unfortunately, it turns out quite some programs pass a size that is not an integral multiple so skip this check.. + /* if mi_unlikely((size & (alignment - 1)) != 0) { // C11 requires alignment>0 && integral multiple, see + #if MI_DEBUG > 0 + _mi_error_message(EOVERFLOW, "(mi_)aligned_alloc requires the size to be an integral multiple of the alignment (size %zu, alignment %zu)\n", size, alignment); + #endif + return NULL; + } + */ + // C11 also requires alignment to be a power-of-two (and > 0) which is checked in mi_malloc_aligned + void* p = mi_malloc_aligned(size, alignment); + mi_assert_internal(((uintptr_t)p % alignment) == 0); + return p; +} + +mi_decl_nodiscard void* mi_reallocarray( void* p, size_t count, size_t size ) mi_attr_noexcept { // BSD + void* newp = mi_reallocn(p,count,size); + if (newp==NULL) { errno = ENOMEM; } + return newp; +} + +mi_decl_nodiscard int mi_reallocarr( void* p, size_t count, size_t size ) mi_attr_noexcept { // NetBSD + mi_assert(p != NULL); + if (p == NULL) { + errno = EINVAL; + return EINVAL; + } + void** op = (void**)p; + void* newp = mi_reallocarray(*op, count, size); + if mi_unlikely(newp == NULL) { return errno; } + *op = newp; + return 0; +} + +void* mi__expand(void* p, size_t newsize) mi_attr_noexcept { // Microsoft + void* res = mi_expand(p, newsize); + if (res == NULL) { errno = ENOMEM; } + return res; +} + +mi_decl_nodiscard mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept { + if (s==NULL) return NULL; + size_t len; + for(len = 0; s[len] != 0; len++) { } + size_t size = (len+1)*sizeof(unsigned short); + unsigned short* p = (unsigned short*)mi_malloc(size); + if (p != NULL) { + _mi_memcpy(p,s,size); + } + return p; +} + +mi_decl_nodiscard mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept { + return (unsigned char*)mi_strdup((const char*)s); +} + +int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept { + if (buf==NULL || name==NULL) return EINVAL; + if (size != NULL) *size = 0; + char* p = getenv(name); // mscver warning 4996 + if (p==NULL) { + *buf = NULL; + } + else { + *buf = mi_strdup(p); + if (*buf==NULL) return ENOMEM; + if (size != NULL) *size = _mi_strlen(p); + } + return 0; +} + +int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept { + if (buf==NULL || name==NULL) return EINVAL; + if (size != NULL) *size = 0; +#if !defined(_WIN32) || (defined(WINAPI_FAMILY) && (WINAPI_FAMILY != WINAPI_FAMILY_DESKTOP_APP)) + // not supported + *buf = NULL; + return EINVAL; +#else + unsigned short* p = (unsigned short*)_wgetenv((const wchar_t*)name); // msvc warning 4996 + if (p==NULL) { + *buf = NULL; + } + else { + *buf = mi_wcsdup(p); + if (*buf==NULL) return ENOMEM; + if (size != NULL) *size = wcslen((const wchar_t*)p); + } + return 0; +#endif +} + +mi_decl_nodiscard void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept { // Microsoft + return mi_recalloc_aligned_at(p, newcount, size, alignment, offset); +} + +mi_decl_nodiscard void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept { // Microsoft + return mi_recalloc_aligned(p, newcount, size, alignment); +} diff --git a/ww/managers/mimalloc/src/alloc.c b/ww/managers/mimalloc/src/alloc.c new file mode 100644 index 00000000..86aaae75 --- /dev/null +++ b/ww/managers/mimalloc/src/alloc.c @@ -0,0 +1,598 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#ifndef _DEFAULT_SOURCE +#define _DEFAULT_SOURCE // for realpath() on Linux +#endif + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" // _mi_prim_thread_id() + +#include // memset, strlen (for mi_strdup) +#include // malloc, abort + +#define MI_IN_ALLOC_C +#include "alloc-override.c" +#include "free.c" +#undef MI_IN_ALLOC_C + +// ------------------------------------------------------ +// Allocation +// ------------------------------------------------------ + +// Fast allocation in a page: just pop from the free list. +// Fall back to generic allocation only if the list is empty. +// Note: in release mode the (inlined) routine is about 7 instructions with a single test. +extern inline void* _mi_page_malloc_zero(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept +{ + mi_assert_internal(page->block_size == 0 /* empty heap */ || mi_page_block_size(page) >= size); + mi_block_t* const block = page->free; + if mi_unlikely(block == NULL) { + return _mi_malloc_generic(heap, size, zero, 0); + } + mi_assert_internal(block != NULL && _mi_ptr_page(block) == page); + // pop from the free list + page->free = mi_block_next(page, block); + page->used++; + mi_assert_internal(page->free == NULL || _mi_ptr_page(page->free) == page); + #if MI_DEBUG>3 + if (page->free_is_zero) { + mi_assert_expensive(mi_mem_is_zero(block+1,size - sizeof(*block))); + } + #endif + + // allow use of the block internally + // note: when tracking we need to avoid ever touching the MI_PADDING since + // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc/track.h`) + mi_track_mem_undefined(block, mi_page_usable_block_size(page)); + + // zero the block? note: we need to zero the full block size (issue #63) + if mi_unlikely(zero) { + mi_assert_internal(page->block_size != 0); // do not call with zero'ing for huge blocks (see _mi_malloc_generic) + mi_assert_internal(page->block_size >= MI_PADDING_SIZE); + if (page->free_is_zero) { + block->next = 0; + mi_track_mem_defined(block, page->block_size - MI_PADDING_SIZE); + } + else { + _mi_memzero_aligned(block, page->block_size - MI_PADDING_SIZE); + } + } + + #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN + if (!zero && !mi_page_is_huge(page)) { + memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page)); + } + #elif (MI_SECURE!=0) + if (!zero) { block->next = 0; } // don't leak internal data + #endif + + #if (MI_STAT>0) + const size_t bsize = mi_page_usable_block_size(page); + if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) { + mi_heap_stat_increase(heap, normal, bsize); + mi_heap_stat_counter_increase(heap, normal_count, 1); + #if (MI_STAT>1) + const size_t bin = _mi_bin(bsize); + mi_heap_stat_increase(heap, normal_bins[bin], 1); + #endif + } + #endif + + #if MI_PADDING // && !MI_TRACK_ENABLED + mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page)); + ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE)); + #if (MI_DEBUG>=2) + mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta)); + #endif + mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess + padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys)); + padding->delta = (uint32_t)(delta); + #if MI_PADDING_CHECK + if (!mi_page_is_huge(page)) { + uint8_t* fill = (uint8_t*)padding - delta; + const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes + for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; } + } + #endif + #endif + + return block; +} + +// extra entries for improved efficiency in `alloc-aligned.c`. +extern void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept { + return _mi_page_malloc_zero(heap,page,size,false); +} +extern void* _mi_page_malloc_zeroed(mi_heap_t* heap, mi_page_t* page, size_t size) mi_attr_noexcept { + return _mi_page_malloc_zero(heap,page,size,true); +} + +static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept { + mi_assert(heap != NULL); + #if MI_DEBUG + const uintptr_t tid = _mi_thread_id(); + mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local + #endif + mi_assert(size <= MI_SMALL_SIZE_MAX); + #if (MI_PADDING) + if (size == 0) { size = sizeof(void*); } + #endif + + mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE); + void* const p = _mi_page_malloc_zero(heap, page, size + MI_PADDING_SIZE, zero); + mi_track_malloc(p,size,zero); + + #if MI_STAT>1 + if (p != NULL) { + if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } + mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); + } + #endif + #if MI_DEBUG>3 + if (p != NULL && zero) { + mi_assert_expensive(mi_mem_is_zero(p, size)); + } + #endif + return p; +} + +// allocate a small block +mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept { + return mi_heap_malloc_small_zero(heap, size, false); +} + +mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept { + return mi_heap_malloc_small(mi_prim_get_default_heap(), size); +} + +// The main allocation function +extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept { + if mi_likely(size <= MI_SMALL_SIZE_MAX) { + mi_assert_internal(huge_alignment == 0); + return mi_heap_malloc_small_zero(heap, size, zero); + } + else { + mi_assert(heap!=NULL); + mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local + void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic + mi_track_malloc(p,size,zero); + #if MI_STAT>1 + if (p != NULL) { + if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } + mi_heap_stat_increase(heap, malloc, mi_usable_size(p)); + } + #endif + #if MI_DEBUG>3 + if (p != NULL && zero) { + mi_assert_expensive(mi_mem_is_zero(p, size)); + } + #endif + return p; + } +} + +extern inline void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept { + return _mi_heap_malloc_zero_ex(heap, size, zero, 0); +} + +mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { + return _mi_heap_malloc_zero(heap, size, false); +} + +mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept { + return mi_heap_malloc(mi_prim_get_default_heap(), size); +} + +// zero initialized small block +mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept { + return mi_heap_malloc_small_zero(mi_prim_get_default_heap(), size, true); +} + +mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept { + return _mi_heap_malloc_zero(heap, size, true); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept { + return mi_heap_zalloc(mi_prim_get_default_heap(),size); +} + + +mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(count,size,&total)) return NULL; + return mi_heap_zalloc(heap,total); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept { + return mi_heap_calloc(mi_prim_get_default_heap(),count,size); +} + +// Uninitialized `calloc` +mi_decl_nodiscard extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(count, size, &total)) return NULL; + return mi_heap_malloc(heap, total); +} + +mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept { + return mi_heap_mallocn(mi_prim_get_default_heap(),count,size); +} + +// Expand (or shrink) in place (or fail) +void* mi_expand(void* p, size_t newsize) mi_attr_noexcept { + #if MI_PADDING + // we do not shrink/expand with padding enabled + MI_UNUSED(p); MI_UNUSED(newsize); + return NULL; + #else + if (p == NULL) return NULL; + const size_t size = _mi_usable_size(p,"mi_expand"); + if (newsize > size) return NULL; + return p; // it fits + #endif +} + +void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept { + // if p == NULL then behave as malloc. + // else if size == 0 then reallocate to a zero-sized block (and don't return NULL, just as mi_malloc(0)). + // (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.) + const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0) + if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0) + mi_assert_internal(p!=NULL); + // todo: do not track as the usable size is still the same in the free; adjust potential padding? + // mi_track_resize(p,size,newsize) + // if (newsize < size) { mi_track_mem_noaccess((uint8_t*)p + newsize, size - newsize); } + return p; // reallocation still fits and not more than 50% waste + } + void* newp = mi_heap_malloc(heap,newsize); + if mi_likely(newp != NULL) { + if (zero && newsize > size) { + // also set last word in the previous allocation to zero to ensure any padding is zero-initialized + const size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0); + _mi_memzero((uint8_t*)newp + start, newsize - start); + } + else if (newsize == 0) { + ((uint8_t*)newp)[0] = 0; // work around for applications that expect zero-reallocation to be zero initialized (issue #725) + } + if mi_likely(p != NULL) { + const size_t copysize = (newsize > size ? size : newsize); + mi_track_mem_defined(p,copysize); // _mi_useable_size may be too large for byte precise memory tracking.. + _mi_memcpy(newp, p, copysize); + mi_free(p); // only free the original pointer if successful + } + } + return newp; +} + +mi_decl_nodiscard void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { + return _mi_heap_realloc_zero(heap, p, newsize, false); +} + +mi_decl_nodiscard void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(count, size, &total)) return NULL; + return mi_heap_realloc(heap, p, total); +} + + +// Reallocate but free `p` on errors +mi_decl_nodiscard void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { + void* newp = mi_heap_realloc(heap, p, newsize); + if (newp==NULL && p!=NULL) mi_free(p); + return newp; +} + +mi_decl_nodiscard void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept { + return _mi_heap_realloc_zero(heap, p, newsize, true); +} + +mi_decl_nodiscard void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept { + size_t total; + if (mi_count_size_overflow(count, size, &total)) return NULL; + return mi_heap_rezalloc(heap, p, total); +} + + +mi_decl_nodiscard void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept { + return mi_heap_realloc(mi_prim_get_default_heap(),p,newsize); +} + +mi_decl_nodiscard void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept { + return mi_heap_reallocn(mi_prim_get_default_heap(),p,count,size); +} + +// Reallocate but free `p` on errors +mi_decl_nodiscard void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept { + return mi_heap_reallocf(mi_prim_get_default_heap(),p,newsize); +} + +mi_decl_nodiscard void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept { + return mi_heap_rezalloc(mi_prim_get_default_heap(), p, newsize); +} + +mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept { + return mi_heap_recalloc(mi_prim_get_default_heap(), p, count, size); +} + + + +// ------------------------------------------------------ +// strdup, strndup, and realpath +// ------------------------------------------------------ + +// `strdup` using mi_malloc +mi_decl_nodiscard mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept { + if (s == NULL) return NULL; + size_t len = _mi_strlen(s); + char* t = (char*)mi_heap_malloc(heap,len+1); + if (t == NULL) return NULL; + _mi_memcpy(t, s, len); + t[len] = 0; + return t; +} + +mi_decl_nodiscard mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept { + return mi_heap_strdup(mi_prim_get_default_heap(), s); +} + +// `strndup` using mi_malloc +mi_decl_nodiscard mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept { + if (s == NULL) return NULL; + const size_t len = _mi_strnlen(s,n); // len <= n + char* t = (char*)mi_heap_malloc(heap, len+1); + if (t == NULL) return NULL; + _mi_memcpy(t, s, len); + t[len] = 0; + return t; +} + +mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept { + return mi_heap_strndup(mi_prim_get_default_heap(),s,n); +} + +#ifndef __wasi__ +// `realpath` using mi_malloc +#ifdef _WIN32 +#ifndef PATH_MAX +#define PATH_MAX MAX_PATH +#endif +#include +mi_decl_nodiscard mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept { + // todo: use GetFullPathNameW to allow longer file names + char buf[PATH_MAX]; + DWORD res = GetFullPathNameA(fname, PATH_MAX, (resolved_name == NULL ? buf : resolved_name), NULL); + if (res == 0) { + errno = GetLastError(); return NULL; + } + else if (res > PATH_MAX) { + errno = EINVAL; return NULL; + } + else if (resolved_name != NULL) { + return resolved_name; + } + else { + return mi_heap_strndup(heap, buf, PATH_MAX); + } +} +#else +/* +#include // pathconf +static size_t mi_path_max(void) { + static size_t path_max = 0; + if (path_max <= 0) { + long m = pathconf("/",_PC_PATH_MAX); + if (m <= 0) path_max = 4096; // guess + else if (m < 256) path_max = 256; // at least 256 + else path_max = m; + } + return path_max; +} +*/ +char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept { + if (resolved_name != NULL) { + return realpath(fname,resolved_name); + } + else { + char* rname = realpath(fname, NULL); + if (rname == NULL) return NULL; + char* result = mi_heap_strdup(heap, rname); + mi_cfree(rname); // use checked free (which may be redirected to our free but that's ok) + // note: with ASAN realpath is intercepted and mi_cfree may leak the returned pointer :-( + return result; + } + /* + const size_t n = mi_path_max(); + char* buf = (char*)mi_malloc(n+1); + if (buf == NULL) { + errno = ENOMEM; + return NULL; + } + char* rname = realpath(fname,buf); + char* result = mi_heap_strndup(heap,rname,n); // ok if `rname==NULL` + mi_free(buf); + return result; + } + */ +} +#endif + +mi_decl_nodiscard mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept { + return mi_heap_realpath(mi_prim_get_default_heap(),fname,resolved_name); +} +#endif + +/*------------------------------------------------------- +C++ new and new_aligned +The standard requires calling into `get_new_handler` and +throwing the bad_alloc exception on failure. If we compile +with a C++ compiler we can implement this precisely. If we +use a C compiler we cannot throw a `bad_alloc` exception +but we call `exit` instead (i.e. not returning). +-------------------------------------------------------*/ + +#ifdef __cplusplus +#include +static bool mi_try_new_handler(bool nothrow) { + #if defined(_MSC_VER) || (__cplusplus >= 201103L) + std::new_handler h = std::get_new_handler(); + #else + std::new_handler h = std::set_new_handler(); + std::set_new_handler(h); + #endif + if (h==NULL) { + _mi_error_message(ENOMEM, "out of memory in 'new'"); + #if defined(_CPPUNWIND) || defined(__cpp_exceptions) // exceptions are not always enabled + if (!nothrow) { + throw std::bad_alloc(); + } + #else + MI_UNUSED(nothrow); + #endif + return false; + } + else { + h(); + return true; + } +} +#else +typedef void (*std_new_handler_t)(void); + +#if (defined(__GNUC__) || (defined(__clang__) && !defined(_MSC_VER))) // exclude clang-cl, see issue #631 +std_new_handler_t __attribute__((weak)) _ZSt15get_new_handlerv(void) { + return NULL; +} +static std_new_handler_t mi_get_new_handler(void) { + return _ZSt15get_new_handlerv(); +} +#else +// note: on windows we could dynamically link to `?get_new_handler@std@@YAP6AXXZXZ`. +static std_new_handler_t mi_get_new_handler() { + return NULL; +} +#endif + +static bool mi_try_new_handler(bool nothrow) { + std_new_handler_t h = mi_get_new_handler(); + if (h==NULL) { + _mi_error_message(ENOMEM, "out of memory in 'new'"); + if (!nothrow) { + abort(); // cannot throw in plain C, use abort + } + return false; + } + else { + h(); + return true; + } +} +#endif + +mi_decl_export mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) { + void* p = NULL; + while(p == NULL && mi_try_new_handler(nothrow)) { + p = mi_heap_malloc(heap,size); + } + return p; +} + +static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow) { + return mi_heap_try_new(mi_prim_get_default_heap(), size, nothrow); +} + + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) { + void* p = mi_heap_malloc(heap,size); + if mi_unlikely(p == NULL) return mi_heap_try_new(heap, size, false); + return p; +} + +mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) { + return mi_heap_alloc_new(mi_prim_get_default_heap(), size); +} + + +mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) { + size_t total; + if mi_unlikely(mi_count_size_overflow(count, size, &total)) { + mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc + return NULL; + } + else { + return mi_heap_alloc_new(heap,total); + } +} + +mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) { + return mi_heap_alloc_new_n(mi_prim_get_default_heap(), size, count); +} + + +mi_decl_nodiscard mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept { + void* p = mi_malloc(size); + if mi_unlikely(p == NULL) return mi_try_new(size, true); + return p; +} + +mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) { + void* p; + do { + p = mi_malloc_aligned(size, alignment); + } + while(p == NULL && mi_try_new_handler(false)); + return p; +} + +mi_decl_nodiscard mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept { + void* p; + do { + p = mi_malloc_aligned(size, alignment); + } + while(p == NULL && mi_try_new_handler(true)); + return p; +} + +mi_decl_nodiscard void* mi_new_realloc(void* p, size_t newsize) { + void* q; + do { + q = mi_realloc(p, newsize); + } while (q == NULL && mi_try_new_handler(false)); + return q; +} + +mi_decl_nodiscard void* mi_new_reallocn(void* p, size_t newcount, size_t size) { + size_t total; + if mi_unlikely(mi_count_size_overflow(newcount, size, &total)) { + mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc + return NULL; + } + else { + return mi_new_realloc(p, total); + } +} + +// ------------------------------------------------------ +// ensure explicit external inline definitions are emitted! +// ------------------------------------------------------ + +#ifdef __cplusplus +void* _mi_externs[] = { + (void*)&_mi_page_malloc, + (void*)&_mi_heap_malloc_zero, + (void*)&_mi_heap_malloc_zero_ex, + (void*)&mi_malloc, + (void*)&mi_malloc_small, + (void*)&mi_zalloc_small, + (void*)&mi_heap_malloc, + (void*)&mi_heap_zalloc, + (void*)&mi_heap_malloc_small, + // (void*)&mi_heap_alloc_new, + // (void*)&mi_heap_alloc_new_n +}; +#endif diff --git a/ww/managers/mimalloc/src/arena.c b/ww/managers/mimalloc/src/arena.c new file mode 100644 index 00000000..648ee844 --- /dev/null +++ b/ww/managers/mimalloc/src/arena.c @@ -0,0 +1,1108 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2019-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ---------------------------------------------------------------------------- +"Arenas" are fixed area's of OS memory from which we can allocate +large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB). +In contrast to the rest of mimalloc, the arenas are shared between +threads and need to be accessed using atomic operations. + +Arenas are used to for huge OS page (1GiB) reservations or for reserving +OS memory upfront which can be improve performance or is sometimes needed +on embedded devices. We can also employ this with WASI or `sbrk` systems +to reserve large arenas upfront and be able to reuse the memory more effectively. + +The arena allocation needs to be thread safe and we use an atomic bitmap to allocate. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" + +#include // memset +#include // ENOMEM + +#include "bitmap.h" // atomic bitmap + +/* ----------------------------------------------------------- + Arena allocation +----------------------------------------------------------- */ + +// Block info: bit 0 contains the `in_use` bit, the upper bits the +// size in count of arena blocks. +typedef uintptr_t mi_block_info_t; +#define MI_ARENA_BLOCK_SIZE (MI_SEGMENT_SIZE) // 64MiB (must be at least MI_SEGMENT_ALIGN) +#define MI_ARENA_MIN_OBJ_SIZE (MI_ARENA_BLOCK_SIZE/2) // 32MiB +#define MI_MAX_ARENAS (112) // not more than 126 (since we use 7 bits in the memid and an arena index + 1) + +// A memory arena descriptor +typedef struct mi_arena_s { + mi_arena_id_t id; // arena id; 0 for non-specific + mi_memid_t memid; // memid of the memory area + _Atomic(uint8_t*) start; // the start of the memory area + size_t block_count; // size of the area in arena blocks (of `MI_ARENA_BLOCK_SIZE`) + size_t field_count; // number of bitmap fields (where `field_count * MI_BITMAP_FIELD_BITS >= block_count`) + size_t meta_size; // size of the arena structure itself (including its bitmaps) + mi_memid_t meta_memid; // memid of the arena structure itself (OS or static allocation) + int numa_node; // associated NUMA node + bool exclusive; // only allow allocations if specifically for this arena + bool is_large; // memory area consists of large- or huge OS pages (always committed) + _Atomic(size_t) search_idx; // optimization to start the search for free blocks + _Atomic(mi_msecs_t) purge_expire; // expiration time when blocks should be decommitted from `blocks_decommit`. + mi_bitmap_field_t* blocks_dirty; // are the blocks potentially non-zero? + mi_bitmap_field_t* blocks_committed; // are the blocks committed? (can be NULL for memory that cannot be decommitted) + mi_bitmap_field_t* blocks_purge; // blocks that can be (reset) decommitted. (can be NULL for memory that cannot be (reset) decommitted) + mi_bitmap_field_t* blocks_abandoned; // blocks that start with an abandoned segment. (This crosses API's but it is convenient to have here) + mi_bitmap_field_t blocks_inuse[1]; // in-place bitmap of in-use blocks (of size `field_count`) + // do not add further fields here as the dirty, committed, purged, and abandoned bitmaps follow the inuse bitmap fields. +} mi_arena_t; + + +// The available arenas +static mi_decl_cache_align _Atomic(mi_arena_t*) mi_arenas[MI_MAX_ARENAS]; +static mi_decl_cache_align _Atomic(size_t) mi_arena_count; // = 0 + + +//static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept; + +/* ----------------------------------------------------------- + Arena id's + id = arena_index + 1 +----------------------------------------------------------- */ + +static size_t mi_arena_id_index(mi_arena_id_t id) { + return (size_t)(id <= 0 ? MI_MAX_ARENAS : id - 1); +} + +static mi_arena_id_t mi_arena_id_create(size_t arena_index) { + mi_assert_internal(arena_index < MI_MAX_ARENAS); + return (int)arena_index + 1; +} + +mi_arena_id_t _mi_arena_id_none(void) { + return 0; +} + +static bool mi_arena_id_is_suitable(mi_arena_id_t arena_id, bool arena_is_exclusive, mi_arena_id_t req_arena_id) { + return ((!arena_is_exclusive && req_arena_id == _mi_arena_id_none()) || + (arena_id == req_arena_id)); +} + +bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id) { + if (memid.memkind == MI_MEM_ARENA) { + return mi_arena_id_is_suitable(memid.mem.arena.id, memid.mem.arena.is_exclusive, request_arena_id); + } + else { + return mi_arena_id_is_suitable(_mi_arena_id_none(), false, request_arena_id); + } +} + +bool _mi_arena_memid_is_os_allocated(mi_memid_t memid) { + return (memid.memkind == MI_MEM_OS); +} + +/* ----------------------------------------------------------- + Arena allocations get a (currently) 16-bit memory id where the + lower 8 bits are the arena id, and the upper bits the block index. +----------------------------------------------------------- */ + +static size_t mi_block_count_of_size(size_t size) { + return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE); +} + +static size_t mi_arena_block_size(size_t bcount) { + return (bcount * MI_ARENA_BLOCK_SIZE); +} + +static size_t mi_arena_size(mi_arena_t* arena) { + return mi_arena_block_size(arena->block_count); +} + +static mi_memid_t mi_memid_create_arena(mi_arena_id_t id, bool is_exclusive, mi_bitmap_index_t bitmap_index) { + mi_memid_t memid = _mi_memid_create(MI_MEM_ARENA); + memid.mem.arena.id = id; + memid.mem.arena.block_index = bitmap_index; + memid.mem.arena.is_exclusive = is_exclusive; + return memid; +} + +static bool mi_arena_memid_indices(mi_memid_t memid, size_t* arena_index, mi_bitmap_index_t* bitmap_index) { + mi_assert_internal(memid.memkind == MI_MEM_ARENA); + *arena_index = mi_arena_id_index(memid.mem.arena.id); + *bitmap_index = memid.mem.arena.block_index; + return memid.mem.arena.is_exclusive; +} + + + +/* ----------------------------------------------------------- + Special static area for mimalloc internal structures + to avoid OS calls (for example, for the arena metadata) +----------------------------------------------------------- */ + +#define MI_ARENA_STATIC_MAX (MI_INTPTR_SIZE*MI_KiB) // 8 KiB on 64-bit + +static mi_decl_cache_align uint8_t mi_arena_static[MI_ARENA_STATIC_MAX]; // must be cache aligned, see issue #895 +static mi_decl_cache_align _Atomic(size_t) mi_arena_static_top; + +static void* mi_arena_static_zalloc(size_t size, size_t alignment, mi_memid_t* memid) { + *memid = _mi_memid_none(); + if (size == 0 || size > MI_ARENA_STATIC_MAX) return NULL; + const size_t toplow = mi_atomic_load_relaxed(&mi_arena_static_top); + if ((toplow + size) > MI_ARENA_STATIC_MAX) return NULL; + + // try to claim space + if (alignment < MI_MAX_ALIGN_SIZE) { alignment = MI_MAX_ALIGN_SIZE; } + const size_t oversize = size + alignment - 1; + if (toplow + oversize > MI_ARENA_STATIC_MAX) return NULL; + const size_t oldtop = mi_atomic_add_acq_rel(&mi_arena_static_top, oversize); + size_t top = oldtop + oversize; + if (top > MI_ARENA_STATIC_MAX) { + // try to roll back, ok if this fails + mi_atomic_cas_strong_acq_rel(&mi_arena_static_top, &top, oldtop); + return NULL; + } + + // success + *memid = _mi_memid_create(MI_MEM_STATIC); + memid->initially_zero = true; + const size_t start = _mi_align_up(oldtop, alignment); + uint8_t* const p = &mi_arena_static[start]; + _mi_memzero_aligned(p, size); + return p; +} + +static void* mi_arena_meta_zalloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) { + *memid = _mi_memid_none(); + + // try static + void* p = mi_arena_static_zalloc(size, MI_MAX_ALIGN_SIZE, memid); + if (p != NULL) return p; + + // or fall back to the OS + p = _mi_os_alloc(size, memid, stats); + if (p == NULL) return NULL; + + // zero the OS memory if needed + if (!memid->initially_zero) { + _mi_memzero_aligned(p, size); + memid->initially_zero = true; + } + return p; +} + +static void mi_arena_meta_free(void* p, mi_memid_t memid, size_t size, mi_stats_t* stats) { + if (mi_memkind_is_os(memid.memkind)) { + _mi_os_free(p, size, memid, stats); + } + else { + mi_assert(memid.memkind == MI_MEM_STATIC); + } +} + +static void* mi_arena_block_start(mi_arena_t* arena, mi_bitmap_index_t bindex) { + return (arena->start + mi_arena_block_size(mi_bitmap_index_bit(bindex))); +} + + +/* ----------------------------------------------------------- + Thread safe allocation in an arena +----------------------------------------------------------- */ + +// claim the `blocks_inuse` bits +static bool mi_arena_try_claim(mi_arena_t* arena, size_t blocks, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) +{ + size_t idx = 0; // mi_atomic_load_relaxed(&arena->search_idx); // start from last search; ok to be relaxed as the exact start does not matter + if (_mi_bitmap_try_find_from_claim_across(arena->blocks_inuse, arena->field_count, idx, blocks, bitmap_idx, stats)) { + mi_atomic_store_relaxed(&arena->search_idx, mi_bitmap_index_field(*bitmap_idx)); // start search from found location next time around + return true; + }; + return false; +} + + +/* ----------------------------------------------------------- + Arena Allocation +----------------------------------------------------------- */ + +static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t arena_index, size_t needed_bcount, + bool commit, mi_memid_t* memid, mi_os_tld_t* tld) +{ + MI_UNUSED(arena_index); + mi_assert_internal(mi_arena_id_index(arena->id) == arena_index); + + mi_bitmap_index_t bitmap_index; + if (!mi_arena_try_claim(arena, needed_bcount, &bitmap_index, tld->stats)) return NULL; + + // claimed it! + void* p = mi_arena_block_start(arena, bitmap_index); + *memid = mi_memid_create_arena(arena->id, arena->exclusive, bitmap_index); + memid->is_pinned = arena->memid.is_pinned; + + // none of the claimed blocks should be scheduled for a decommit + if (arena->blocks_purge != NULL) { + // this is thread safe as a potential purge only decommits parts that are not yet claimed as used (in `blocks_inuse`). + _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, needed_bcount, bitmap_index); + } + + // set the dirty bits (todo: no need for an atomic op here?) + if (arena->memid.initially_zero && arena->blocks_dirty != NULL) { + memid->initially_zero = _mi_bitmap_claim_across(arena->blocks_dirty, arena->field_count, needed_bcount, bitmap_index, NULL); + } + + // set commit state + if (arena->blocks_committed == NULL) { + // always committed + memid->initially_committed = true; + } + else if (commit) { + // commit requested, but the range may not be committed as a whole: ensure it is committed now + memid->initially_committed = true; + bool any_uncommitted; + _mi_bitmap_claim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index, &any_uncommitted); + if (any_uncommitted) { + bool commit_zero = false; + if (!_mi_os_commit(p, mi_arena_block_size(needed_bcount), &commit_zero, tld->stats)) { + memid->initially_committed = false; + } + else { + if (commit_zero) { memid->initially_zero = true; } + } + } + } + else { + // no need to commit, but check if already fully committed + memid->initially_committed = _mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index); + } + + return p; +} + +// allocate in a speficic arena +static void* mi_arena_try_alloc_at_id(mi_arena_id_t arena_id, bool match_numa_node, int numa_node, size_t size, size_t alignment, + bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) +{ + MI_UNUSED_RELEASE(alignment); + mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); + const size_t bcount = mi_block_count_of_size(size); + const size_t arena_index = mi_arena_id_index(arena_id); + mi_assert_internal(arena_index < mi_atomic_load_relaxed(&mi_arena_count)); + mi_assert_internal(size <= mi_arena_block_size(bcount)); + + // Check arena suitability + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]); + if (arena == NULL) return NULL; + if (!allow_large && arena->is_large) return NULL; + if (!mi_arena_id_is_suitable(arena->id, arena->exclusive, req_arena_id)) return NULL; + if (req_arena_id == _mi_arena_id_none()) { // in not specific, check numa affinity + const bool numa_suitable = (numa_node < 0 || arena->numa_node < 0 || arena->numa_node == numa_node); + if (match_numa_node) { if (!numa_suitable) return NULL; } + else { if (numa_suitable) return NULL; } + } + + // try to allocate + void* p = mi_arena_try_alloc_at(arena, arena_index, bcount, commit, memid, tld); + mi_assert_internal(p == NULL || _mi_is_aligned(p, alignment)); + return p; +} + + +// allocate from an arena with fallback to the OS +static mi_decl_noinline void* mi_arena_try_alloc(int numa_node, size_t size, size_t alignment, + bool commit, bool allow_large, + mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld ) +{ + MI_UNUSED(alignment); + mi_assert_internal(alignment <= MI_SEGMENT_ALIGN); + const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); + if mi_likely(max_arena == 0) return NULL; + + if (req_arena_id != _mi_arena_id_none()) { + // try a specific arena if requested + if (mi_arena_id_index(req_arena_id) < max_arena) { + void* p = mi_arena_try_alloc_at_id(req_arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + if (p != NULL) return p; + } + } + else { + // try numa affine allocation + for (size_t i = 0; i < max_arena; i++) { + void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + if (p != NULL) return p; + } + + // try from another numa node instead.. + if (numa_node >= 0) { // if numa_node was < 0 (no specific affinity requested), all arena's have been tried already + for (size_t i = 0; i < max_arena; i++) { + void* p = mi_arena_try_alloc_at_id(mi_arena_id_create(i), false /* only proceed if not numa local */, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + if (p != NULL) return p; + } + } + } + return NULL; +} + +// try to reserve a fresh arena space +static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t req_arena_id, mi_arena_id_t *arena_id) +{ + if (_mi_preloading()) return false; // use OS only while pre loading + if (req_arena_id != _mi_arena_id_none()) return false; + + const size_t arena_count = mi_atomic_load_acquire(&mi_arena_count); + if (arena_count > (MI_MAX_ARENAS - 4)) return false; + + size_t arena_reserve = mi_option_get_size(mi_option_arena_reserve); + if (arena_reserve == 0) return false; + + if (!_mi_os_has_virtual_reserve()) { + arena_reserve = arena_reserve/4; // be conservative if virtual reserve is not supported (for WASM for example) + } + arena_reserve = _mi_align_up(arena_reserve, MI_ARENA_BLOCK_SIZE); + if (arena_count >= 8 && arena_count <= 128) { + arena_reserve = ((size_t)1<<(arena_count/8)) * arena_reserve; // scale up the arena sizes exponentially + } + if (arena_reserve < req_size) return false; // should be able to at least handle the current allocation size + + // commit eagerly? + bool arena_commit = false; + if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); } + else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; } + + return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id) == 0); +} + + +void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, + mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) +{ + mi_assert_internal(memid != NULL && tld != NULL); + mi_assert_internal(size > 0); + *memid = _mi_memid_none(); + + const int numa_node = _mi_os_numa_node(tld); // current numa node + + // try to allocate in an arena if the alignment is small enough and the object is not too small (as for heap meta data) + if (!mi_option_is_enabled(mi_option_disallow_arena_alloc) || req_arena_id != _mi_arena_id_none()) { // is arena allocation allowed? + if (size >= MI_ARENA_MIN_OBJ_SIZE && alignment <= MI_SEGMENT_ALIGN && align_offset == 0) { + void* p = mi_arena_try_alloc(numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + if (p != NULL) return p; + + // otherwise, try to first eagerly reserve a new arena + if (req_arena_id == _mi_arena_id_none()) { + mi_arena_id_t arena_id = 0; + if (mi_arena_reserve(size, allow_large, req_arena_id, &arena_id)) { + // and try allocate in there + mi_assert_internal(req_arena_id == _mi_arena_id_none()); + p = mi_arena_try_alloc_at_id(arena_id, true, numa_node, size, alignment, commit, allow_large, req_arena_id, memid, tld); + if (p != NULL) return p; + } + } + } + } + + // if we cannot use OS allocation, return NULL + if (mi_option_is_enabled(mi_option_disallow_os_alloc) || req_arena_id != _mi_arena_id_none()) { + errno = ENOMEM; + return NULL; + } + + // finally, fall back to the OS + if (align_offset > 0) { + return _mi_os_alloc_aligned_at_offset(size, alignment, align_offset, commit, allow_large, memid, tld->stats); + } + else { + return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, tld->stats); + } +} + +void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld) +{ + return _mi_arena_alloc_aligned(size, MI_ARENA_BLOCK_SIZE, 0, commit, allow_large, req_arena_id, memid, tld); +} + + +void* mi_arena_area(mi_arena_id_t arena_id, size_t* size) { + if (size != NULL) *size = 0; + size_t arena_index = mi_arena_id_index(arena_id); + if (arena_index >= MI_MAX_ARENAS) return NULL; + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_index]); + if (arena == NULL) return NULL; + if (size != NULL) { *size = mi_arena_block_size(arena->block_count); } + return arena->start; +} + + +/* ----------------------------------------------------------- + Arena purge +----------------------------------------------------------- */ + +static long mi_arena_purge_delay(void) { + // <0 = no purging allowed, 0=immediate purging, >0=milli-second delay + return (mi_option_get(mi_option_purge_delay) * mi_option_get(mi_option_arena_purge_mult)); +} + +// reset or decommit in an arena and update the committed/decommit bitmaps +// assumes we own the area (i.e. blocks_in_use is claimed by us) +static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) { + mi_assert_internal(arena->blocks_committed != NULL); + mi_assert_internal(arena->blocks_purge != NULL); + mi_assert_internal(!arena->memid.is_pinned); + const size_t size = mi_arena_block_size(blocks); + void* const p = mi_arena_block_start(arena, bitmap_idx); + bool needs_recommit; + if (_mi_bitmap_is_claimed_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx)) { + // all blocks are committed, we can purge freely + needs_recommit = _mi_os_purge(p, size, stats); + } + else { + // some blocks are not committed -- this can happen when a partially committed block is freed + // in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge + // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory), + // and also undo the decommit stats (as it was already adjusted) + mi_assert_internal(mi_option_is_enabled(mi_option_purge_decommits)); + needs_recommit = _mi_os_purge_ex(p, size, false /* allow reset? */, stats); + if (needs_recommit) { _mi_stat_increase(&_mi_stats_main.committed, size); } + } + + // clear the purged blocks + _mi_bitmap_unclaim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx); + // update committed bitmap + if (needs_recommit) { + _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx); + } +} + +// Schedule a purge. This is usually delayed to avoid repeated decommit/commit calls. +// Note: assumes we (still) own the area as we may purge immediately +static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks, mi_stats_t* stats) { + mi_assert_internal(arena->blocks_purge != NULL); + const long delay = mi_arena_purge_delay(); + if (delay < 0) return; // is purging allowed at all? + + if (_mi_preloading() || delay == 0) { + // decommit directly + mi_arena_purge(arena, bitmap_idx, blocks, stats); + } + else { + // schedule decommit + mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire); + if (expire != 0) { + mi_atomic_addi64_acq_rel(&arena->purge_expire, (mi_msecs_t)(delay/10)); // add smallish extra delay + } + else { + mi_atomic_storei64_release(&arena->purge_expire, _mi_clock_now() + delay); + } + _mi_bitmap_claim_across(arena->blocks_purge, arena->field_count, blocks, bitmap_idx, NULL); + } +} + +// purge a range of blocks +// return true if the full range was purged. +// assumes we own the area (i.e. blocks_in_use is claimed by us) +static bool mi_arena_purge_range(mi_arena_t* arena, size_t idx, size_t startidx, size_t bitlen, size_t purge, mi_stats_t* stats) { + const size_t endidx = startidx + bitlen; + size_t bitidx = startidx; + bool all_purged = false; + while (bitidx < endidx) { + // count consequetive ones in the purge mask + size_t count = 0; + while (bitidx + count < endidx && (purge & ((size_t)1 << (bitidx + count))) != 0) { + count++; + } + if (count > 0) { + // found range to be purged + const mi_bitmap_index_t range_idx = mi_bitmap_index_create(idx, bitidx); + mi_arena_purge(arena, range_idx, count, stats); + if (count == bitlen) { + all_purged = true; + } + } + bitidx += (count+1); // +1 to skip the zero bit (or end) + } + return all_purged; +} + +// returns true if anything was purged +static bool mi_arena_try_purge(mi_arena_t* arena, mi_msecs_t now, bool force, mi_stats_t* stats) +{ + if (arena->memid.is_pinned || arena->blocks_purge == NULL) return false; + mi_msecs_t expire = mi_atomic_loadi64_relaxed(&arena->purge_expire); + if (expire == 0) return false; + if (!force && expire > now) return false; + + // reset expire (if not already set concurrently) + mi_atomic_casi64_strong_acq_rel(&arena->purge_expire, &expire, (mi_msecs_t)0); + + // potential purges scheduled, walk through the bitmap + bool any_purged = false; + bool full_purge = true; + for (size_t i = 0; i < arena->field_count; i++) { + size_t purge = mi_atomic_load_relaxed(&arena->blocks_purge[i]); + if (purge != 0) { + size_t bitidx = 0; + while (bitidx < MI_BITMAP_FIELD_BITS) { + // find consequetive range of ones in the purge mask + size_t bitlen = 0; + while (bitidx + bitlen < MI_BITMAP_FIELD_BITS && (purge & ((size_t)1 << (bitidx + bitlen))) != 0) { + bitlen++; + } + // try to claim the longest range of corresponding in_use bits + const mi_bitmap_index_t bitmap_index = mi_bitmap_index_create(i, bitidx); + while( bitlen > 0 ) { + if (_mi_bitmap_try_claim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index)) { + break; + } + bitlen--; + } + // actual claimed bits at `in_use` + if (bitlen > 0) { + // read purge again now that we have the in_use bits + purge = mi_atomic_load_acquire(&arena->blocks_purge[i]); + if (!mi_arena_purge_range(arena, i, bitidx, bitlen, purge, stats)) { + full_purge = false; + } + any_purged = true; + // release the claimed `in_use` bits again + _mi_bitmap_unclaim(arena->blocks_inuse, arena->field_count, bitlen, bitmap_index); + } + bitidx += (bitlen+1); // +1 to skip the zero (or end) + } // while bitidx + } // purge != 0 + } + // if not fully purged, make sure to purge again in the future + if (!full_purge) { + const long delay = mi_arena_purge_delay(); + mi_msecs_t expected = 0; + mi_atomic_casi64_strong_acq_rel(&arena->purge_expire,&expected,_mi_clock_now() + delay); + } + return any_purged; +} + +static void mi_arenas_try_purge( bool force, bool visit_all, mi_stats_t* stats ) { + if (_mi_preloading() || mi_arena_purge_delay() <= 0) return; // nothing will be scheduled + + const size_t max_arena = mi_atomic_load_acquire(&mi_arena_count); + if (max_arena == 0) return; + + // allow only one thread to purge at a time + static mi_atomic_guard_t purge_guard; + mi_atomic_guard(&purge_guard) + { + mi_msecs_t now = _mi_clock_now(); + size_t max_purge_count = (visit_all ? max_arena : 1); + for (size_t i = 0; i < max_arena; i++) { + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); + if (arena != NULL) { + if (mi_arena_try_purge(arena, now, force, stats)) { + if (max_purge_count <= 1) break; + max_purge_count--; + } + } + } + } +} + + +/* ----------------------------------------------------------- + Arena free +----------------------------------------------------------- */ + +void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memid, mi_stats_t* stats) { + mi_assert_internal(size > 0 && stats != NULL); + mi_assert_internal(committed_size <= size); + if (p==NULL) return; + if (size==0) return; + const bool all_committed = (committed_size == size); + + if (mi_memkind_is_os(memid.memkind)) { + // was a direct OS allocation, pass through + if (!all_committed && committed_size > 0) { + // if partially committed, adjust the committed stats (as `_mi_os_free` will increase decommit by the full size) + _mi_stat_decrease(&_mi_stats_main.committed, committed_size); + } + _mi_os_free(p, size, memid, stats); + } + else if (memid.memkind == MI_MEM_ARENA) { + // allocated in an arena + size_t arena_idx; + size_t bitmap_idx; + mi_arena_memid_indices(memid, &arena_idx, &bitmap_idx); + mi_assert_internal(arena_idx < MI_MAX_ARENAS); + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t,&mi_arenas[arena_idx]); + mi_assert_internal(arena != NULL); + const size_t blocks = mi_block_count_of_size(size); + + // checks + if (arena == NULL) { + _mi_error_message(EINVAL, "trying to free from an invalid arena: %p, size %zu, memid: 0x%zx\n", p, size, memid); + return; + } + mi_assert_internal(arena->field_count > mi_bitmap_index_field(bitmap_idx)); + if (arena->field_count <= mi_bitmap_index_field(bitmap_idx)) { + _mi_error_message(EINVAL, "trying to free from an invalid arena block: %p, size %zu, memid: 0x%zx\n", p, size, memid); + return; + } + + // need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.) + mi_track_mem_undefined(p,size); + + // potentially decommit + if (arena->memid.is_pinned || arena->blocks_committed == NULL) { + mi_assert_internal(all_committed); + } + else { + mi_assert_internal(arena->blocks_committed != NULL); + mi_assert_internal(arena->blocks_purge != NULL); + + if (!all_committed) { + // mark the entire range as no longer committed (so we recommit the full range when re-using) + _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, blocks, bitmap_idx); + mi_track_mem_noaccess(p,size); + if (committed_size > 0) { + // if partially committed, adjust the committed stats (is it will be recommitted when re-using) + // in the delayed purge, we now need to not count a decommit if the range is not marked as committed. + _mi_stat_decrease(&_mi_stats_main.committed, committed_size); + } + // note: if not all committed, it may be that the purge will reset/decommit the entire range + // that contains already decommitted parts. Since purge consistently uses reset or decommit that + // works (as we should never reset decommitted parts). + } + // (delay) purge the entire range + mi_arena_schedule_purge(arena, bitmap_idx, blocks, stats); + } + + // and make it available to others again + bool all_inuse = _mi_bitmap_unclaim_across(arena->blocks_inuse, arena->field_count, blocks, bitmap_idx); + if (!all_inuse) { + _mi_error_message(EAGAIN, "trying to free an already freed arena block: %p, size %zu\n", p, size); + return; + }; + } + else { + // arena was none, external, or static; nothing to do + mi_assert_internal(memid.memkind < MI_MEM_OS); + } + + // purge expired decommits + mi_arenas_try_purge(false, false, stats); +} + +// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit` +// for dynamic libraries that are unloaded and need to release all their allocated memory. +static void mi_arenas_unsafe_destroy(void) { + const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); + size_t new_max_arena = 0; + for (size_t i = 0; i < max_arena; i++) { + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); + if (arena != NULL) { + if (arena->start != NULL && mi_memkind_is_os(arena->memid.memkind)) { + mi_atomic_store_ptr_release(mi_arena_t, &mi_arenas[i], NULL); + _mi_os_free(arena->start, mi_arena_size(arena), arena->memid, &_mi_stats_main); + } + else { + new_max_arena = i; + } + mi_arena_meta_free(arena, arena->meta_memid, arena->meta_size, &_mi_stats_main); + } + } + + // try to lower the max arena. + size_t expected = max_arena; + mi_atomic_cas_strong_acq_rel(&mi_arena_count, &expected, new_max_arena); +} + +// Purge the arenas; if `force_purge` is true, amenable parts are purged even if not yet expired +void _mi_arenas_collect(bool force_purge, mi_stats_t* stats) { + mi_arenas_try_purge(force_purge, force_purge /* visit all? */, stats); +} + +// destroy owned arenas; this is unsafe and should only be done using `mi_option_destroy_on_exit` +// for dynamic libraries that are unloaded and need to release all their allocated memory. +void _mi_arena_unsafe_destroy_all(mi_stats_t* stats) { + mi_arenas_unsafe_destroy(); + _mi_arenas_collect(true /* force purge */, stats); // purge non-owned arenas +} + +// Is a pointer inside any of our arenas? +bool _mi_arena_contains(const void* p) { + const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); + for (size_t i = 0; i < max_arena; i++) { + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[i]); + if (arena != NULL && arena->start <= (const uint8_t*)p && arena->start + mi_arena_block_size(arena->block_count) > (const uint8_t*)p) { + return true; + } + } + return false; +} + +/* ----------------------------------------------------------- + Abandoned blocks/segments. + This is used to atomically abandon/reclaim segments + (and crosses the arena API but it is convenient to have here). + Abandoned segments still have live blocks; they get reclaimed + when a thread frees a block in it, or when a thread needs a fresh + segment; these threads scan the abandoned segments through + the arena bitmaps. +----------------------------------------------------------- */ + +// Maintain a count of all abandoned segments +static mi_decl_cache_align _Atomic(size_t)abandoned_count; + +size_t _mi_arena_segment_abandoned_count(void) { + return mi_atomic_load_relaxed(&abandoned_count); +} + +// reclaim a specific abandoned segment; `true` on success. +// sets the thread_id. +bool _mi_arena_segment_clear_abandoned(mi_segment_t* segment ) +{ + if (segment->memid.memkind != MI_MEM_ARENA) { + // not in an arena, consider it un-abandoned now. + // but we need to still claim it atomically -- we use the thread_id for that. + size_t expected = 0; + if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected, _mi_thread_id())) { + mi_atomic_decrement_relaxed(&abandoned_count); + return true; + } + else { + return false; + } + } + // arena segment: use the blocks_abandoned bitmap. + size_t arena_idx; + size_t bitmap_idx; + mi_arena_memid_indices(segment->memid, &arena_idx, &bitmap_idx); + mi_assert_internal(arena_idx < MI_MAX_ARENAS); + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]); + mi_assert_internal(arena != NULL); + bool was_marked = _mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx); + if (was_marked) { + mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0); + mi_atomic_decrement_relaxed(&abandoned_count); + mi_atomic_store_release(&segment->thread_id, _mi_thread_id()); + } + // mi_assert_internal(was_marked); + mi_assert_internal(!was_marked || _mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); + //mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx)); + return was_marked; +} + +// mark a specific segment as abandoned +// clears the thread_id. +void _mi_arena_segment_mark_abandoned(mi_segment_t* segment) +{ + mi_atomic_store_release(&segment->thread_id, 0); + mi_assert_internal(segment->used == segment->abandoned); + if (segment->memid.memkind != MI_MEM_ARENA) { + // not in an arena; count it as abandoned and return + mi_atomic_increment_relaxed(&abandoned_count); + return; + } + size_t arena_idx; + size_t bitmap_idx; + mi_arena_memid_indices(segment->memid, &arena_idx, &bitmap_idx); + mi_assert_internal(arena_idx < MI_MAX_ARENAS); + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]); + mi_assert_internal(arena != NULL); + const bool was_unmarked = _mi_bitmap_claim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx, NULL); + if (was_unmarked) { mi_atomic_increment_relaxed(&abandoned_count); } + mi_assert_internal(was_unmarked); + mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); +} + +// start a cursor at a randomized arena +void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_arena_field_cursor_t* current) { + const size_t max_arena = mi_atomic_load_relaxed(&mi_arena_count); + current->start = (max_arena == 0 ? 0 : (mi_arena_id_t)( _mi_heap_random_next(heap) % max_arena)); + current->count = 0; + current->bitmap_idx = 0; +} + +// reclaim abandoned segments +// this does not set the thread id (so it appears as still abandoned) +mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous ) +{ + const int max_arena = (int)mi_atomic_load_relaxed(&mi_arena_count); + if (max_arena <= 0 || mi_atomic_load_relaxed(&abandoned_count) == 0) return NULL; + + int count = previous->count; + size_t field_idx = mi_bitmap_index_field(previous->bitmap_idx); + size_t bit_idx = mi_bitmap_index_bit_in_field(previous->bitmap_idx) + 1; + // visit arena's (from previous) + for (; count < max_arena; count++, field_idx = 0, bit_idx = 0) { + mi_arena_id_t arena_idx = previous->start + count; + if (arena_idx >= max_arena) { arena_idx = arena_idx % max_arena; } // wrap around + mi_arena_t* arena = mi_atomic_load_ptr_acquire(mi_arena_t, &mi_arenas[arena_idx]); + if (arena != NULL) { + // visit the abandoned fields (starting at previous_idx) + for ( ; field_idx < arena->field_count; field_idx++, bit_idx = 0) { + size_t field = mi_atomic_load_relaxed(&arena->blocks_abandoned[field_idx]); + if mi_unlikely(field != 0) { // skip zero fields quickly + // visit each set bit in the field (todo: maybe use `ctz` here?) + for ( ; bit_idx < MI_BITMAP_FIELD_BITS; bit_idx++) { + // pre-check if the bit is set + size_t mask = ((size_t)1 << bit_idx); + if mi_unlikely((field & mask) == mask) { + mi_bitmap_index_t bitmap_idx = mi_bitmap_index_create(field_idx, bit_idx); + // try to reclaim it atomically + if (_mi_bitmap_unclaim(arena->blocks_abandoned, arena->field_count, 1, bitmap_idx)) { + mi_atomic_decrement_relaxed(&abandoned_count); + previous->bitmap_idx = bitmap_idx; + previous->count = count; + mi_assert_internal(_mi_bitmap_is_claimed(arena->blocks_inuse, arena->field_count, 1, bitmap_idx)); + mi_segment_t* segment = (mi_segment_t*)mi_arena_block_start(arena, bitmap_idx); + mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0); + //mi_assert_internal(arena->blocks_committed == NULL || _mi_bitmap_is_claimed(arena->blocks_committed, arena->field_count, 1, bitmap_idx)); + return segment; + } + } + } + } + } + } + } + // no more found + previous->bitmap_idx = 0; + previous->count = 0; + return NULL; +} + + +/* ----------------------------------------------------------- + Add an arena. +----------------------------------------------------------- */ + +static bool mi_arena_add(mi_arena_t* arena, mi_arena_id_t* arena_id, mi_stats_t* stats) { + mi_assert_internal(arena != NULL); + mi_assert_internal((uintptr_t)mi_atomic_load_ptr_relaxed(uint8_t,&arena->start) % MI_SEGMENT_ALIGN == 0); + mi_assert_internal(arena->block_count > 0); + if (arena_id != NULL) { *arena_id = -1; } + + size_t i = mi_atomic_increment_acq_rel(&mi_arena_count); + if (i >= MI_MAX_ARENAS) { + mi_atomic_decrement_acq_rel(&mi_arena_count); + return false; + } + _mi_stat_counter_increase(&stats->arena_count,1); + arena->id = mi_arena_id_create(i); + mi_atomic_store_ptr_release(mi_arena_t,&mi_arenas[i], arena); + if (arena_id != NULL) { *arena_id = arena->id; } + return true; +} + +static bool mi_manage_os_memory_ex2(void* start, size_t size, bool is_large, int numa_node, bool exclusive, mi_memid_t memid, mi_arena_id_t* arena_id) mi_attr_noexcept +{ + if (arena_id != NULL) *arena_id = _mi_arena_id_none(); + if (size < MI_ARENA_BLOCK_SIZE) return false; + + if (is_large) { + mi_assert_internal(memid.initially_committed && memid.is_pinned); + } + + const size_t bcount = size / MI_ARENA_BLOCK_SIZE; + const size_t fields = _mi_divide_up(bcount, MI_BITMAP_FIELD_BITS); + const size_t bitmaps = (memid.is_pinned ? 3 : 5); + const size_t asize = sizeof(mi_arena_t) + (bitmaps*fields*sizeof(mi_bitmap_field_t)); + mi_memid_t meta_memid; + mi_arena_t* arena = (mi_arena_t*)mi_arena_meta_zalloc(asize, &meta_memid, &_mi_stats_main); // TODO: can we avoid allocating from the OS? + if (arena == NULL) return false; + + // already zero'd due to zalloc + // _mi_memzero(arena, asize); + arena->id = _mi_arena_id_none(); + arena->memid = memid; + arena->exclusive = exclusive; + arena->meta_size = asize; + arena->meta_memid = meta_memid; + arena->block_count = bcount; + arena->field_count = fields; + arena->start = (uint8_t*)start; + arena->numa_node = numa_node; // TODO: or get the current numa node if -1? (now it allows anyone to allocate on -1) + arena->is_large = is_large; + arena->purge_expire = 0; + arena->search_idx = 0; + // consequetive bitmaps + arena->blocks_dirty = &arena->blocks_inuse[fields]; // just after inuse bitmap + arena->blocks_abandoned = &arena->blocks_inuse[2 * fields]; // just after dirty bitmap + arena->blocks_committed = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[3*fields]); // just after abandoned bitmap + arena->blocks_purge = (arena->memid.is_pinned ? NULL : &arena->blocks_inuse[4*fields]); // just after committed bitmap + // initialize committed bitmap? + if (arena->blocks_committed != NULL && arena->memid.initially_committed) { + memset((void*)arena->blocks_committed, 0xFF, fields*sizeof(mi_bitmap_field_t)); // cast to void* to avoid atomic warning + } + + // and claim leftover blocks if needed (so we never allocate there) + ptrdiff_t post = (fields * MI_BITMAP_FIELD_BITS) - bcount; + mi_assert_internal(post >= 0); + if (post > 0) { + // don't use leftover bits at the end + mi_bitmap_index_t postidx = mi_bitmap_index_create(fields - 1, MI_BITMAP_FIELD_BITS - post); + _mi_bitmap_claim(arena->blocks_inuse, fields, post, postidx, NULL); + } + return mi_arena_add(arena, arena_id, &_mi_stats_main); + +} + +bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept { + mi_memid_t memid = _mi_memid_create(MI_MEM_EXTERNAL); + memid.initially_committed = is_committed; + memid.initially_zero = is_zero; + memid.is_pinned = is_large; + return mi_manage_os_memory_ex2(start,size,is_large,numa_node,exclusive,memid, arena_id); +} + +// Reserve a range of regular OS memory +int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept { + if (arena_id != NULL) *arena_id = _mi_arena_id_none(); + size = _mi_align_up(size, MI_ARENA_BLOCK_SIZE); // at least one block + mi_memid_t memid; + void* start = _mi_os_alloc_aligned(size, MI_SEGMENT_ALIGN, commit, allow_large, &memid, &_mi_stats_main); + if (start == NULL) return ENOMEM; + const bool is_large = memid.is_pinned; // todo: use separate is_large field? + if (!mi_manage_os_memory_ex2(start, size, is_large, -1 /* numa node */, exclusive, memid, arena_id)) { + _mi_os_free_ex(start, size, commit, memid, &_mi_stats_main); + _mi_verbose_message("failed to reserve %zu KiB memory\n", _mi_divide_up(size, 1024)); + return ENOMEM; + } + _mi_verbose_message("reserved %zu KiB memory%s\n", _mi_divide_up(size, 1024), is_large ? " (in large os pages)" : ""); + return 0; +} + + +// Manage a range of regular OS memory +bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept { + return mi_manage_os_memory_ex(start, size, is_committed, is_large, is_zero, numa_node, false /* exclusive? */, NULL); +} + +// Reserve a range of regular OS memory +int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept { + return mi_reserve_os_memory_ex(size, commit, allow_large, false, NULL); +} + + +/* ----------------------------------------------------------- + Debugging +----------------------------------------------------------- */ + +static size_t mi_debug_show_bitmap(const char* prefix, const char* header, size_t block_count, mi_bitmap_field_t* fields, size_t field_count ) { + _mi_verbose_message("%s%s:\n", prefix, header); + size_t bcount = 0; + size_t inuse_count = 0; + for (size_t i = 0; i < field_count; i++) { + char buf[MI_BITMAP_FIELD_BITS + 1]; + uintptr_t field = mi_atomic_load_relaxed(&fields[i]); + for (size_t bit = 0; bit < MI_BITMAP_FIELD_BITS; bit++, bcount++) { + if (bcount < block_count) { + bool inuse = ((((uintptr_t)1 << bit) & field) != 0); + if (inuse) inuse_count++; + buf[bit] = (inuse ? 'x' : '.'); + } + else { + buf[bit] = ' '; + } + } + buf[MI_BITMAP_FIELD_BITS] = 0; + _mi_verbose_message("%s %s\n", prefix, buf); + } + _mi_verbose_message("%s total ('x'): %zu\n", prefix, inuse_count); + return inuse_count; +} + +void mi_debug_show_arenas(bool show_inuse, bool show_abandoned, bool show_purge) mi_attr_noexcept { + size_t max_arenas = mi_atomic_load_relaxed(&mi_arena_count); + size_t inuse_total = 0; + size_t abandoned_total = 0; + size_t purge_total = 0; + for (size_t i = 0; i < max_arenas; i++) { + mi_arena_t* arena = mi_atomic_load_ptr_relaxed(mi_arena_t, &mi_arenas[i]); + if (arena == NULL) break; + _mi_verbose_message("arena %zu: %zu blocks of size %zuMiB (in %zu fields) %s\n", i, arena->block_count, MI_ARENA_BLOCK_SIZE / MI_MiB, arena->field_count, (arena->memid.is_pinned ? ", pinned" : "")); + if (show_inuse) { + inuse_total += mi_debug_show_bitmap(" ", "inuse blocks", arena->block_count, arena->blocks_inuse, arena->field_count); + } + if (arena->blocks_committed != NULL) { + mi_debug_show_bitmap(" ", "committed blocks", arena->block_count, arena->blocks_committed, arena->field_count); + } + if (show_abandoned) { + abandoned_total += mi_debug_show_bitmap(" ", "abandoned blocks", arena->block_count, arena->blocks_abandoned, arena->field_count); + } + if (show_purge && arena->blocks_purge != NULL) { + purge_total += mi_debug_show_bitmap(" ", "purgeable blocks", arena->block_count, arena->blocks_purge, arena->field_count); + } + } + if (show_inuse) _mi_verbose_message("total inuse blocks : %zu\n", inuse_total); + if (show_abandoned) _mi_verbose_message("total abandoned blocks: %zu\n", abandoned_total); + if (show_purge) _mi_verbose_message("total purgeable blocks: %zu\n", purge_total); +} + + +/* ----------------------------------------------------------- + Reserve a huge page arena. +----------------------------------------------------------- */ +// reserve at a specific numa node +int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept { + if (arena_id != NULL) *arena_id = -1; + if (pages==0) return 0; + if (numa_node < -1) numa_node = -1; + if (numa_node >= 0) numa_node = numa_node % _mi_os_numa_node_count(); + size_t hsize = 0; + size_t pages_reserved = 0; + mi_memid_t memid; + void* p = _mi_os_alloc_huge_os_pages(pages, numa_node, timeout_msecs, &pages_reserved, &hsize, &memid); + if (p==NULL || pages_reserved==0) { + _mi_warning_message("failed to reserve %zu GiB huge pages\n", pages); + return ENOMEM; + } + _mi_verbose_message("numa node %i: reserved %zu GiB huge pages (of the %zu GiB requested)\n", numa_node, pages_reserved, pages); + + if (!mi_manage_os_memory_ex2(p, hsize, true, numa_node, exclusive, memid, arena_id)) { + _mi_os_free(p, hsize, memid, &_mi_stats_main); + return ENOMEM; + } + return 0; +} + +int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept { + return mi_reserve_huge_os_pages_at_ex(pages, numa_node, timeout_msecs, false, NULL); +} + +// reserve huge pages evenly among the given number of numa nodes (or use the available ones as detected) +int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept { + if (pages == 0) return 0; + + // pages per numa node + size_t numa_count = (numa_nodes > 0 ? numa_nodes : _mi_os_numa_node_count()); + if (numa_count <= 0) numa_count = 1; + const size_t pages_per = pages / numa_count; + const size_t pages_mod = pages % numa_count; + const size_t timeout_per = (timeout_msecs==0 ? 0 : (timeout_msecs / numa_count) + 50); + + // reserve evenly among numa nodes + for (size_t numa_node = 0; numa_node < numa_count && pages > 0; numa_node++) { + size_t node_pages = pages_per; // can be 0 + if (numa_node < pages_mod) node_pages++; + int err = mi_reserve_huge_os_pages_at(node_pages, (int)numa_node, timeout_per); + if (err) return err; + if (pages < node_pages) { + pages = 0; + } + else { + pages -= node_pages; + } + } + + return 0; +} + +int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept { + MI_UNUSED(max_secs); + _mi_warning_message("mi_reserve_huge_os_pages is deprecated: use mi_reserve_huge_os_pages_interleave/at instead\n"); + if (pages_reserved != NULL) *pages_reserved = 0; + int err = mi_reserve_huge_os_pages_interleave(pages, 0, (size_t)(max_secs * 1000.0)); + if (err==0 && pages_reserved!=NULL) *pages_reserved = pages; + return err; +} + diff --git a/ww/managers/mimalloc/src/bitmap.c b/ww/managers/mimalloc/src/bitmap.c new file mode 100644 index 00000000..4b6be66b --- /dev/null +++ b/ww/managers/mimalloc/src/bitmap.c @@ -0,0 +1,436 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2019-2023 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ---------------------------------------------------------------------------- +Concurrent bitmap that can set/reset sequences of bits atomically, +represented as an array of fields where each field is a machine word (`size_t`) + +There are two api's; the standard one cannot have sequences that cross +between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS). + +The `_across` postfixed functions do allow sequences that can cross over +between the fields. (This is used in arena allocation) +---------------------------------------------------------------------------- */ + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "bitmap.h" + +/* ----------------------------------------------------------- + Bitmap definition +----------------------------------------------------------- */ + +// The bit mask for a given number of blocks at a specified bit index. +static inline size_t mi_bitmap_mask_(size_t count, size_t bitidx) { + mi_assert_internal(count + bitidx <= MI_BITMAP_FIELD_BITS); + mi_assert_internal(count > 0); + if (count >= MI_BITMAP_FIELD_BITS) return MI_BITMAP_FIELD_FULL; + if (count == 0) return 0; + return ((((size_t)1 << count) - 1) << bitidx); +} + + +/* ----------------------------------------------------------- + Claim a bit sequence atomically +----------------------------------------------------------- */ + +// Try to atomically claim a sequence of `count` bits in a single +// field at `idx` in `bitmap`. Returns `true` on success. +inline bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx) +{ + mi_assert_internal(bitmap_idx != NULL); + mi_assert_internal(count <= MI_BITMAP_FIELD_BITS); + mi_assert_internal(count > 0); + mi_bitmap_field_t* field = &bitmap[idx]; + size_t map = mi_atomic_load_relaxed(field); + if (map==MI_BITMAP_FIELD_FULL) return false; // short cut + + // search for 0-bit sequence of length count + const size_t mask = mi_bitmap_mask_(count, 0); + const size_t bitidx_max = MI_BITMAP_FIELD_BITS - count; + +#ifdef MI_HAVE_FAST_BITSCAN + size_t bitidx = mi_ctz(~map); // quickly find the first zero bit if possible +#else + size_t bitidx = 0; // otherwise start at 0 +#endif + size_t m = (mask << bitidx); // invariant: m == mask shifted by bitidx + + // scan linearly for a free range of zero bits + while (bitidx <= bitidx_max) { + const size_t mapm = (map & m); + if (mapm == 0) { // are the mask bits free at bitidx? + mi_assert_internal((m >> bitidx) == mask); // no overflow? + const size_t newmap = (map | m); + mi_assert_internal((newmap^map) >> bitidx == mask); + if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { // TODO: use weak cas here? + // no success, another thread claimed concurrently.. keep going (with updated `map`) + continue; + } + else { + // success, we claimed the bits! + *bitmap_idx = mi_bitmap_index_create(idx, bitidx); + return true; + } + } + else { + // on to the next bit range +#ifdef MI_HAVE_FAST_BITSCAN + mi_assert_internal(mapm != 0); + const size_t shift = (count == 1 ? 1 : (MI_INTPTR_BITS - mi_clz(mapm) - bitidx)); + mi_assert_internal(shift > 0 && shift <= count); +#else + const size_t shift = 1; +#endif + bitidx += shift; + m <<= shift; + } + } + // no bits found + return false; +} + +// Find `count` bits of 0 and set them to 1 atomically; returns `true` on success. +// Starts at idx, and wraps around to search in all `bitmap_fields` fields. +// `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields. +bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx) { + size_t idx = start_field_idx; + for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) { + if (idx >= bitmap_fields) { idx = 0; } // wrap + if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) { + return true; + } + } + return false; +} + +// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled +bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, + const size_t start_field_idx, const size_t count, + mi_bitmap_pred_fun_t pred_fun, void* pred_arg, + mi_bitmap_index_t* bitmap_idx) { + size_t idx = start_field_idx; + for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) { + if (idx >= bitmap_fields) idx = 0; // wrap + if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) { + if (pred_fun == NULL || pred_fun(*bitmap_idx, pred_arg)) { + return true; + } + // predicate returned false, unclaim and look further + _mi_bitmap_unclaim(bitmap, bitmap_fields, count, *bitmap_idx); + } + } + return false; +} + +// Set `count` bits at `bitmap_idx` to 0 atomically +// Returns `true` if all `count` bits were 1 previously. +bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + const size_t idx = mi_bitmap_index_field(bitmap_idx); + const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); + const size_t mask = mi_bitmap_mask_(count, bitidx); + mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); + // mi_assert_internal((bitmap[idx] & mask) == mask); + const size_t prev = mi_atomic_and_acq_rel(&bitmap[idx], ~mask); + return ((prev & mask) == mask); +} + + +// Set `count` bits at `bitmap_idx` to 1 atomically +// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. +bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero) { + const size_t idx = mi_bitmap_index_field(bitmap_idx); + const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); + const size_t mask = mi_bitmap_mask_(count, bitidx); + mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); + //mi_assert_internal(any_zero != NULL || (bitmap[idx] & mask) == 0); + size_t prev = mi_atomic_or_acq_rel(&bitmap[idx], mask); + if (any_zero != NULL) { *any_zero = ((prev & mask) != mask); } + return ((prev & mask) == 0); +} + +// Returns `true` if all `count` bits were 1. `any_ones` is `true` if there was at least one bit set to one. +static bool mi_bitmap_is_claimedx(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_ones) { + const size_t idx = mi_bitmap_index_field(bitmap_idx); + const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); + const size_t mask = mi_bitmap_mask_(count, bitidx); + mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); + const size_t field = mi_atomic_load_relaxed(&bitmap[idx]); + if (any_ones != NULL) { *any_ones = ((field & mask) != 0); } + return ((field & mask) == mask); +} + +// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically. +// Returns `true` if successful when all previous `count` bits were 0. +bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + const size_t idx = mi_bitmap_index_field(bitmap_idx); + const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); + const size_t mask = mi_bitmap_mask_(count, bitidx); + mi_assert_internal(bitmap_fields > idx); MI_UNUSED(bitmap_fields); + size_t expected = mi_atomic_load_relaxed(&bitmap[idx]); + do { + if ((expected & mask) != 0) return false; + } + while (!mi_atomic_cas_strong_acq_rel(&bitmap[idx], &expected, expected | mask)); + mi_assert_internal((expected & mask) == 0); + return true; +} + + +bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + return mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, NULL); +} + +bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + bool any_ones; + mi_bitmap_is_claimedx(bitmap, bitmap_fields, count, bitmap_idx, &any_ones); + return any_ones; +} + + +//-------------------------------------------------------------------------- +// the `_across` functions work on bitmaps where sequences can cross over +// between the fields. This is used in arena allocation +//-------------------------------------------------------------------------- + +// Try to atomically claim a sequence of `count` bits starting from the field +// at `idx` in `bitmap` and crossing into subsequent fields. Returns `true` on success. +// Only needs to consider crossing into the next fields (see `mi_bitmap_try_find_from_claim_across`) +static bool mi_bitmap_try_find_claim_field_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t idx, const size_t count, const size_t retries, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) +{ + mi_assert_internal(bitmap_idx != NULL); + + // check initial trailing zeros + mi_bitmap_field_t* field = &bitmap[idx]; + size_t map = mi_atomic_load_relaxed(field); + const size_t initial = mi_clz(map); // count of initial zeros starting at idx + mi_assert_internal(initial <= MI_BITMAP_FIELD_BITS); + if (initial == 0) return false; + if (initial >= count) return _mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx); // no need to cross fields (this case won't happen for us) + if (_mi_divide_up(count - initial, MI_BITMAP_FIELD_BITS) >= (bitmap_fields - idx)) return false; // not enough entries + + // scan ahead + size_t found = initial; + size_t mask = 0; // mask bits for the final field + while(found < count) { + field++; + map = mi_atomic_load_relaxed(field); + const size_t mask_bits = (found + MI_BITMAP_FIELD_BITS <= count ? MI_BITMAP_FIELD_BITS : (count - found)); + mi_assert_internal(mask_bits > 0 && mask_bits <= MI_BITMAP_FIELD_BITS); + mask = mi_bitmap_mask_(mask_bits, 0); + if ((map & mask) != 0) return false; // some part is already claimed + found += mask_bits; + } + mi_assert_internal(field < &bitmap[bitmap_fields]); + + // we found a range of contiguous zeros up to the final field; mask contains mask in the final field + // now try to claim the range atomically + mi_bitmap_field_t* const final_field = field; + const size_t final_mask = mask; + mi_bitmap_field_t* const initial_field = &bitmap[idx]; + const size_t initial_idx = MI_BITMAP_FIELD_BITS - initial; + const size_t initial_mask = mi_bitmap_mask_(initial, initial_idx); + + // initial field + size_t newmap; + field = initial_field; + map = mi_atomic_load_relaxed(field); + do { + newmap = (map | initial_mask); + if ((map & initial_mask) != 0) { goto rollback; }; + } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)); + + // intermediate fields + while (++field < final_field) { + newmap = MI_BITMAP_FIELD_FULL; + map = 0; + if (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)) { goto rollback; } + } + + // final field + mi_assert_internal(field == final_field); + map = mi_atomic_load_relaxed(field); + do { + newmap = (map | final_mask); + if ((map & final_mask) != 0) { goto rollback; } + } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)); + + // claimed! + mi_stat_counter_increase(stats->arena_crossover_count,1); + *bitmap_idx = mi_bitmap_index_create(idx, initial_idx); + return true; + +rollback: + // roll back intermediate fields + // (we just failed to claim `field` so decrement first) + while (--field > initial_field) { + newmap = 0; + map = MI_BITMAP_FIELD_FULL; + mi_assert_internal(mi_atomic_load_relaxed(field) == map); + mi_atomic_store_release(field, newmap); + } + if (field == initial_field) { // (if we failed on the initial field, `field + 1 == initial_field`) + map = mi_atomic_load_relaxed(field); + do { + mi_assert_internal((map & initial_mask) == initial_mask); + newmap = (map & ~initial_mask); + } while (!mi_atomic_cas_strong_acq_rel(field, &map, newmap)); + } + mi_stat_counter_increase(stats->arena_rollback_count,1); + // retry? (we make a recursive call instead of goto to be able to use const declarations) + if (retries <= 2) { + return mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, retries+1, bitmap_idx, stats); + } + else { + return false; + } +} + + +// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success. +// Starts at idx, and wraps around to search in all `bitmap_fields` fields. +bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats) { + mi_assert_internal(count > 0); + if (count <= 2) { + // we don't bother with crossover fields for small counts + return _mi_bitmap_try_find_from_claim(bitmap, bitmap_fields, start_field_idx, count, bitmap_idx); + } + + // visit the fields + size_t idx = start_field_idx; + for (size_t visited = 0; visited < bitmap_fields; visited++, idx++) { + if (idx >= bitmap_fields) { idx = 0; } // wrap + // first try to claim inside a field + /* + if (count <= MI_BITMAP_FIELD_BITS) { + if (_mi_bitmap_try_find_claim_field(bitmap, idx, count, bitmap_idx)) { + return true; + } + } + */ + // if that fails, then try to claim across fields + if (mi_bitmap_try_find_claim_field_across(bitmap, bitmap_fields, idx, count, 0, bitmap_idx, stats)) { + return true; + } + } + return false; +} + +// Helper for masks across fields; returns the mid count, post_mask may be 0 +static size_t mi_bitmap_mask_across(mi_bitmap_index_t bitmap_idx, size_t bitmap_fields, size_t count, size_t* pre_mask, size_t* mid_mask, size_t* post_mask) { + MI_UNUSED(bitmap_fields); + const size_t bitidx = mi_bitmap_index_bit_in_field(bitmap_idx); + if mi_likely(bitidx + count <= MI_BITMAP_FIELD_BITS) { + *pre_mask = mi_bitmap_mask_(count, bitidx); + *mid_mask = 0; + *post_mask = 0; + mi_assert_internal(mi_bitmap_index_field(bitmap_idx) < bitmap_fields); + return 0; + } + else { + const size_t pre_bits = MI_BITMAP_FIELD_BITS - bitidx; + mi_assert_internal(pre_bits < count); + *pre_mask = mi_bitmap_mask_(pre_bits, bitidx); + count -= pre_bits; + const size_t mid_count = (count / MI_BITMAP_FIELD_BITS); + *mid_mask = MI_BITMAP_FIELD_FULL; + count %= MI_BITMAP_FIELD_BITS; + *post_mask = (count==0 ? 0 : mi_bitmap_mask_(count, 0)); + mi_assert_internal(mi_bitmap_index_field(bitmap_idx) + mid_count + (count==0 ? 0 : 1) < bitmap_fields); + return mid_count; + } +} + +// Set `count` bits at `bitmap_idx` to 0 atomically +// Returns `true` if all `count` bits were 1 previously. +bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + size_t idx = mi_bitmap_index_field(bitmap_idx); + size_t pre_mask; + size_t mid_mask; + size_t post_mask; + size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); + bool all_one = true; + mi_bitmap_field_t* field = &bitmap[idx]; + size_t prev = mi_atomic_and_acq_rel(field++, ~pre_mask); // clear first part + if ((prev & pre_mask) != pre_mask) all_one = false; + while(mid_count-- > 0) { + prev = mi_atomic_and_acq_rel(field++, ~mid_mask); // clear mid part + if ((prev & mid_mask) != mid_mask) all_one = false; + } + if (post_mask!=0) { + prev = mi_atomic_and_acq_rel(field, ~post_mask); // clear end part + if ((prev & post_mask) != post_mask) all_one = false; + } + return all_one; +} + +// Set `count` bits at `bitmap_idx` to 1 atomically +// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. +bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero) { + size_t idx = mi_bitmap_index_field(bitmap_idx); + size_t pre_mask; + size_t mid_mask; + size_t post_mask; + size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); + bool all_zero = true; + bool any_zero = false; + _Atomic(size_t)*field = &bitmap[idx]; + size_t prev = mi_atomic_or_acq_rel(field++, pre_mask); + if ((prev & pre_mask) != 0) all_zero = false; + if ((prev & pre_mask) != pre_mask) any_zero = true; + while (mid_count-- > 0) { + prev = mi_atomic_or_acq_rel(field++, mid_mask); + if ((prev & mid_mask) != 0) all_zero = false; + if ((prev & mid_mask) != mid_mask) any_zero = true; + } + if (post_mask!=0) { + prev = mi_atomic_or_acq_rel(field, post_mask); + if ((prev & post_mask) != 0) all_zero = false; + if ((prev & post_mask) != post_mask) any_zero = true; + } + if (pany_zero != NULL) { *pany_zero = any_zero; } + return all_zero; +} + + +// Returns `true` if all `count` bits were 1. +// `any_ones` is `true` if there was at least one bit set to one. +static bool mi_bitmap_is_claimedx_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_ones) { + size_t idx = mi_bitmap_index_field(bitmap_idx); + size_t pre_mask; + size_t mid_mask; + size_t post_mask; + size_t mid_count = mi_bitmap_mask_across(bitmap_idx, bitmap_fields, count, &pre_mask, &mid_mask, &post_mask); + bool all_ones = true; + bool any_ones = false; + mi_bitmap_field_t* field = &bitmap[idx]; + size_t prev = mi_atomic_load_relaxed(field++); + if ((prev & pre_mask) != pre_mask) all_ones = false; + if ((prev & pre_mask) != 0) any_ones = true; + while (mid_count-- > 0) { + prev = mi_atomic_load_relaxed(field++); + if ((prev & mid_mask) != mid_mask) all_ones = false; + if ((prev & mid_mask) != 0) any_ones = true; + } + if (post_mask!=0) { + prev = mi_atomic_load_relaxed(field); + if ((prev & post_mask) != post_mask) all_ones = false; + if ((prev & post_mask) != 0) any_ones = true; + } + if (pany_ones != NULL) { *pany_ones = any_ones; } + return all_ones; +} + +bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + return mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, NULL); +} + +bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx) { + bool any_ones; + mi_bitmap_is_claimedx_across(bitmap, bitmap_fields, count, bitmap_idx, &any_ones); + return any_ones; +} diff --git a/ww/managers/mimalloc/src/bitmap.h b/ww/managers/mimalloc/src/bitmap.h new file mode 100644 index 00000000..d8316b83 --- /dev/null +++ b/ww/managers/mimalloc/src/bitmap.h @@ -0,0 +1,115 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2019-2023 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ---------------------------------------------------------------------------- +Concurrent bitmap that can set/reset sequences of bits atomically, +represented as an array of fields where each field is a machine word (`size_t`) + +There are two api's; the standard one cannot have sequences that cross +between the bitmap fields (and a sequence must be <= MI_BITMAP_FIELD_BITS). +(this is used in region allocation) + +The `_across` postfixed functions do allow sequences that can cross over +between the fields. (This is used in arena allocation) +---------------------------------------------------------------------------- */ +#pragma once +#ifndef MI_BITMAP_H +#define MI_BITMAP_H + +/* ----------------------------------------------------------- + Bitmap definition +----------------------------------------------------------- */ + +#define MI_BITMAP_FIELD_BITS (8*MI_SIZE_SIZE) +#define MI_BITMAP_FIELD_FULL (~((size_t)0)) // all bits set + +// An atomic bitmap of `size_t` fields +typedef _Atomic(size_t) mi_bitmap_field_t; +typedef mi_bitmap_field_t* mi_bitmap_t; + +// A bitmap index is the index of the bit in a bitmap. +typedef size_t mi_bitmap_index_t; + +// Create a bit index. +static inline mi_bitmap_index_t mi_bitmap_index_create(size_t idx, size_t bitidx) { + mi_assert_internal(bitidx < MI_BITMAP_FIELD_BITS); + return (idx*MI_BITMAP_FIELD_BITS) + bitidx; +} + +// Create a bit index. +static inline mi_bitmap_index_t mi_bitmap_index_create_from_bit(size_t full_bitidx) { + return mi_bitmap_index_create(full_bitidx / MI_BITMAP_FIELD_BITS, full_bitidx % MI_BITMAP_FIELD_BITS); +} + +// Get the field index from a bit index. +static inline size_t mi_bitmap_index_field(mi_bitmap_index_t bitmap_idx) { + return (bitmap_idx / MI_BITMAP_FIELD_BITS); +} + +// Get the bit index in a bitmap field +static inline size_t mi_bitmap_index_bit_in_field(mi_bitmap_index_t bitmap_idx) { + return (bitmap_idx % MI_BITMAP_FIELD_BITS); +} + +// Get the full bit index +static inline size_t mi_bitmap_index_bit(mi_bitmap_index_t bitmap_idx) { + return bitmap_idx; +} + +/* ----------------------------------------------------------- + Claim a bit sequence atomically +----------------------------------------------------------- */ + +// Try to atomically claim a sequence of `count` bits in a single +// field at `idx` in `bitmap`. Returns `true` on success. +bool _mi_bitmap_try_find_claim_field(mi_bitmap_t bitmap, size_t idx, const size_t count, mi_bitmap_index_t* bitmap_idx); + +// Starts at idx, and wraps around to search in all `bitmap_fields` fields. +// For now, `count` can be at most MI_BITMAP_FIELD_BITS and will never cross fields. +bool _mi_bitmap_try_find_from_claim(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx); + +// Like _mi_bitmap_try_find_from_claim but with an extra predicate that must be fullfilled +typedef bool (mi_cdecl *mi_bitmap_pred_fun_t)(mi_bitmap_index_t bitmap_idx, void* pred_arg); +bool _mi_bitmap_try_find_from_claim_pred(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_pred_fun_t pred_fun, void* pred_arg, mi_bitmap_index_t* bitmap_idx); + +// Set `count` bits at `bitmap_idx` to 0 atomically +// Returns `true` if all `count` bits were 1 previously. +bool _mi_bitmap_unclaim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); + +// Try to set `count` bits at `bitmap_idx` from 0 to 1 atomically. +// Returns `true` if successful when all previous `count` bits were 0. +bool _mi_bitmap_try_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); + +// Set `count` bits at `bitmap_idx` to 1 atomically +// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. +bool _mi_bitmap_claim(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* any_zero); + +bool _mi_bitmap_is_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); +bool _mi_bitmap_is_any_claimed(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); + + +//-------------------------------------------------------------------------- +// the `_across` functions work on bitmaps where sequences can cross over +// between the fields. This is used in arena allocation +//-------------------------------------------------------------------------- + +// Find `count` bits of zeros and set them to 1 atomically; returns `true` on success. +// Starts at idx, and wraps around to search in all `bitmap_fields` fields. +bool _mi_bitmap_try_find_from_claim_across(mi_bitmap_t bitmap, const size_t bitmap_fields, const size_t start_field_idx, const size_t count, mi_bitmap_index_t* bitmap_idx, mi_stats_t* stats); + +// Set `count` bits at `bitmap_idx` to 0 atomically +// Returns `true` if all `count` bits were 1 previously. +bool _mi_bitmap_unclaim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); + +// Set `count` bits at `bitmap_idx` to 1 atomically +// Returns `true` if all `count` bits were 0 previously. `any_zero` is `true` if there was at least one zero bit. +bool _mi_bitmap_claim_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx, bool* pany_zero); + +bool _mi_bitmap_is_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); +bool _mi_bitmap_is_any_claimed_across(mi_bitmap_t bitmap, size_t bitmap_fields, size_t count, mi_bitmap_index_t bitmap_idx); + +#endif diff --git a/ww/managers/mimalloc/src/free.c b/ww/managers/mimalloc/src/free.c new file mode 100644 index 00000000..b9cb6346 --- /dev/null +++ b/ww/managers/mimalloc/src/free.c @@ -0,0 +1,530 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#if !defined(MI_IN_ALLOC_C) +#error "this file should be included from 'alloc.c' (so aliases can work from alloc-override)" +// add includes help an IDE +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" // _mi_prim_thread_id() +#endif + +// forward declarations +static void mi_check_padding(const mi_page_t* page, const mi_block_t* block); +static bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block); +static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block); +static void mi_stat_free(const mi_page_t* page, const mi_block_t* block); + + +// ------------------------------------------------------ +// Free +// ------------------------------------------------------ + +// forward declaration of multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON) +static mi_decl_noinline void mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block); + +// regular free of a (thread local) block pointer +// fast path written carefully to prevent spilling on the stack +static inline void mi_free_block_local(mi_page_t* page, mi_block_t* block, bool track_stats, bool check_full) +{ + // checks + if mi_unlikely(mi_check_is_double_free(page, block)) return; + mi_check_padding(page, block); + if (track_stats) { mi_stat_free(page, block); } + #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN + if (!mi_page_is_huge(page)) { // huge page content may be already decommitted + memset(block, MI_DEBUG_FREED, mi_page_block_size(page)); + } + #endif + if (track_stats) { mi_track_free_size(block, mi_page_usable_size_of(page, block)); } // faster then mi_usable_size as we already know the page and that p is unaligned + + // actual free: push on the local free list + mi_block_set_next(page, block, page->local_free); + page->local_free = block; + if mi_unlikely(--page->used == 0) { + _mi_page_retire(page); + } + else if mi_unlikely(check_full && mi_page_is_in_full(page)) { + _mi_page_unfull(page); + } +} + +// Adjust a block that was allocated aligned, to the actual start of the block in the page. +// note: this can be called from `mi_free_generic_mt` where a non-owning thread accesses the +// `page_start` and `block_size` fields; however these are constant and the page won't be +// deallocated (as the block we are freeing keeps it alive) and thus safe to read concurrently. +mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) { + mi_assert_internal(page!=NULL && p!=NULL); + + size_t diff = (uint8_t*)p - page->page_start; + size_t adjust; + if mi_likely(page->block_size_shift != 0) { + adjust = diff & (((size_t)1 << page->block_size_shift) - 1); + } + else { + adjust = diff % mi_page_block_size(page); + } + + return (mi_block_t*)((uintptr_t)p - adjust); +} + +// free a local pointer (page parameter comes first for better codegen) +static void mi_decl_noinline mi_free_generic_local(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept { + MI_UNUSED(segment); + mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(page, p) : (mi_block_t*)p); + mi_free_block_local(page, block, true /* track stats */, true /* check for a full page */); +} + +// free a pointer owned by another thread (page parameter comes first for better codegen) +static void mi_decl_noinline mi_free_generic_mt(mi_page_t* page, mi_segment_t* segment, void* p) mi_attr_noexcept { + mi_block_t* const block = _mi_page_ptr_unalign(page, p); // don't check `has_aligned` flag to avoid a race (issue #865) + mi_free_block_mt(page, segment, block); +} + +// generic free (for runtime integration) +void mi_decl_noinline _mi_free_generic(mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept { + if (is_local) mi_free_generic_local(page,segment,p); + else mi_free_generic_mt(page,segment,p); +} + +// Get the segment data belonging to a pointer +// This is just a single `and` in release mode but does further checks in debug mode +// (and secure mode) to see if this was a valid pointer. +static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* msg) +{ + MI_UNUSED(msg); + +#if (MI_DEBUG>0) + if mi_unlikely(((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) { + _mi_error_message(EINVAL, "%s: invalid (unaligned) pointer: %p\n", msg, p); + return NULL; + } +#endif + + mi_segment_t* const segment = _mi_ptr_segment(p); + if mi_unlikely(segment==NULL) return segment; + +#if (MI_DEBUG>0) + if mi_unlikely(!mi_is_in_heap_region(p)) { + #if (MI_INTPTR_SIZE == 8 && defined(__linux__)) + if (((uintptr_t)p >> 40) != 0x7F) { // linux tends to align large blocks above 0x7F000000000 (issue #640) + #else + { + #endif + _mi_warning_message("%s: pointer might not point to a valid heap region: %p\n" + "(this may still be a valid very large allocation (over 64MiB))\n", msg, p); + if mi_likely(_mi_ptr_cookie(segment) == segment->cookie) { + _mi_warning_message("(yes, the previous pointer %p was valid after all)\n", p); + } + } + } +#endif +#if (MI_DEBUG>0 || MI_SECURE>=4) + if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) { + _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p); + return NULL; + } +#endif + + return segment; +} + +// Free a block +// Fast path written carefully to prevent register spilling on the stack +void mi_free(void* p) mi_attr_noexcept +{ + mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free"); + if mi_unlikely(segment==NULL) return; + + const bool is_local = (_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id)); + mi_page_t* const page = _mi_segment_page_of(segment, p); + + if mi_likely(is_local) { // thread-local free? + if mi_likely(page->flags.full_aligned == 0) { // and it is not a full page (full pages need to move from the full bin), nor has aligned blocks (aligned blocks need to be unaligned) + // thread-local, aligned, and not a full page + mi_block_t* const block = (mi_block_t*)p; + mi_free_block_local(page, block, true /* track stats */, false /* no need to check if the page is full */); + } + else { + // page is full or contains (inner) aligned blocks; use generic path + mi_free_generic_local(page, segment, p); + } + } + else { + // not thread-local; use generic path + mi_free_generic_mt(page, segment, p); + } +} + +// return true if successful +bool _mi_free_delayed_block(mi_block_t* block) { + // get segment and page + mi_assert_internal(block!=NULL); + const mi_segment_t* const segment = _mi_ptr_segment(block); + mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); + mi_assert_internal(_mi_thread_id() == segment->thread_id); + mi_page_t* const page = _mi_segment_page_of(segment, block); + + // Clear the no-delayed flag so delayed freeing is used again for this page. + // This must be done before collecting the free lists on this page -- otherwise + // some blocks may end up in the page `thread_free` list with no blocks in the + // heap `thread_delayed_free` list which may cause the page to be never freed! + // (it would only be freed if we happen to scan it in `mi_page_queue_find_free_ex`) + if (!_mi_page_try_use_delayed_free(page, MI_USE_DELAYED_FREE, false /* dont overwrite never delayed */)) { + return false; + } + + // collect all other non-local frees (move from `thread_free` to `free`) to ensure up-to-date `used` count + _mi_page_free_collect(page, false); + + // and free the block (possibly freeing the page as well since `used` is updated) + mi_free_block_local(page, block, false /* stats have already been adjusted */, true /* check for a full page */); + return true; +} + +// ------------------------------------------------------ +// Multi-threaded Free (`_mt`) +// ------------------------------------------------------ + +// Push a block that is owned by another thread on its page-local thread free +// list or it's heap delayed free list. Such blocks are later collected by +// the owning thread in `_mi_free_delayed_block`. +static void mi_decl_noinline mi_free_block_delayed_mt( mi_page_t* page, mi_block_t* block ) +{ + // Try to put the block on either the page-local thread free list, + // or the heap delayed free list (if this is the first non-local free in that page) + mi_thread_free_t tfreex; + bool use_delayed; + mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free); + do { + use_delayed = (mi_tf_delayed(tfree) == MI_USE_DELAYED_FREE); + if mi_unlikely(use_delayed) { + // unlikely: this only happens on the first concurrent free in a page that is in the full list + tfreex = mi_tf_set_delayed(tfree,MI_DELAYED_FREEING); + } + else { + // usual: directly add to page thread_free list + mi_block_set_next(page, block, mi_tf_block(tfree)); + tfreex = mi_tf_set_block(tfree,block); + } + } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex)); + + // If this was the first non-local free, we need to push it on the heap delayed free list instead + if mi_unlikely(use_delayed) { + // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`) + mi_heap_t* const heap = (mi_heap_t*)(mi_atomic_load_acquire(&page->xheap)); //mi_page_heap(page); + mi_assert_internal(heap != NULL); + if (heap != NULL) { + // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity) + mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); + do { + mi_block_set_nextx(heap,block,dfree, heap->keys); + } while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block)); + } + + // and reset the MI_DELAYED_FREEING flag + tfree = mi_atomic_load_relaxed(&page->xthread_free); + do { + tfreex = tfree; + mi_assert_internal(mi_tf_delayed(tfree) == MI_DELAYED_FREEING); + tfreex = mi_tf_set_delayed(tfree,MI_NO_DELAYED_FREE); + } while (!mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex)); + } +} + +// Multi-threaded free (`_mt`) (or free in huge block if compiled with MI_HUGE_PAGE_ABANDON) +static void mi_decl_noinline mi_free_block_mt(mi_page_t* page, mi_segment_t* segment, mi_block_t* block) +{ + // first see if the segment was abandoned and if we can reclaim it into our thread + if (mi_option_is_enabled(mi_option_abandoned_reclaim_on_free) && + #if MI_HUGE_PAGE_ABANDON + segment->page_kind != MI_PAGE_HUGE && + #endif + mi_atomic_load_relaxed(&segment->thread_id) == 0) + { + // the segment is abandoned, try to reclaim it into our heap + if (_mi_segment_attempt_reclaim(mi_heap_get_default(), segment)) { + mi_assert_internal(_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id)); + mi_free(block); // recursively free as now it will be a local free in our heap + return; + } + } + + // The padding check may access the non-thread-owned page for the key values. + // that is safe as these are constant and the page won't be freed (as the block is not freed yet). + mi_check_padding(page, block); + + // adjust stats (after padding check and potentially recursive `mi_free` above) + mi_stat_free(page, block); // stat_free may access the padding + mi_track_free_size(block, mi_page_usable_size_of(page,block)); + + // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection + _mi_padding_shrink(page, block, sizeof(mi_block_t)); + + if (segment->kind == MI_SEGMENT_HUGE) { + #if MI_HUGE_PAGE_ABANDON + // huge page segments are always abandoned and can be freed immediately + _mi_segment_huge_page_free(segment, page, block); + return; + #else + // huge pages are special as they occupy the entire segment + // as these are large we reset the memory occupied by the page so it is available to other threads + // (as the owning thread needs to actually free the memory later). + _mi_segment_huge_page_reset(segment, page, block); + #endif + } + else { + #if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading + memset(block, MI_DEBUG_FREED, mi_usable_size(block)); + #endif + } + + // and finally free the actual block by pushing it on the owning heap + // thread_delayed free list (or heap delayed free list) + mi_free_block_delayed_mt(page,block); +} + + +// ------------------------------------------------------ +// Usable size +// ------------------------------------------------------ + +// Bytes available in a block +static size_t mi_decl_noinline mi_page_usable_aligned_size_of(const mi_page_t* page, const void* p) mi_attr_noexcept { + const mi_block_t* block = _mi_page_ptr_unalign(page, p); + const size_t size = mi_page_usable_size_of(page, block); + const ptrdiff_t adjust = (uint8_t*)p - (uint8_t*)block; + mi_assert_internal(adjust >= 0 && (size_t)adjust <= size); + return (size - adjust); +} + +static inline size_t _mi_usable_size(const void* p, const char* msg) mi_attr_noexcept { + const mi_segment_t* const segment = mi_checked_ptr_segment(p, msg); + if mi_unlikely(segment==NULL) return 0; + const mi_page_t* const page = _mi_segment_page_of(segment, p); + if mi_likely(!mi_page_has_aligned(page)) { + const mi_block_t* block = (const mi_block_t*)p; + return mi_page_usable_size_of(page, block); + } + else { + // split out to separate routine for improved code generation + return mi_page_usable_aligned_size_of(page, p); + } +} + +mi_decl_nodiscard size_t mi_usable_size(const void* p) mi_attr_noexcept { + return _mi_usable_size(p, "mi_usable_size"); +} + + +// ------------------------------------------------------ +// Free variants +// ------------------------------------------------------ + +void mi_free_size(void* p, size_t size) mi_attr_noexcept { + MI_UNUSED_RELEASE(size); + mi_assert(p == NULL || size <= _mi_usable_size(p,"mi_free_size")); + mi_free(p); +} + +void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept { + MI_UNUSED_RELEASE(alignment); + mi_assert(((uintptr_t)p % alignment) == 0); + mi_free_size(p,size); +} + +void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept { + MI_UNUSED_RELEASE(alignment); + mi_assert(((uintptr_t)p % alignment) == 0); + mi_free(p); +} + + +// ------------------------------------------------------ +// Check for double free in secure and debug mode +// This is somewhat expensive so only enabled for secure mode 4 +// ------------------------------------------------------ + +#if (MI_ENCODE_FREELIST && (MI_SECURE>=4 || MI_DEBUG!=0)) +// linear check if the free list contains a specific element +static bool mi_list_contains(const mi_page_t* page, const mi_block_t* list, const mi_block_t* elem) { + while (list != NULL) { + if (elem==list) return true; + list = mi_block_next(page, list); + } + return false; +} + +static mi_decl_noinline bool mi_check_is_double_freex(const mi_page_t* page, const mi_block_t* block) { + // The decoded value is in the same page (or NULL). + // Walk the free lists to verify positively if it is already freed + if (mi_list_contains(page, page->free, block) || + mi_list_contains(page, page->local_free, block) || + mi_list_contains(page, mi_page_thread_free(page), block)) + { + _mi_error_message(EAGAIN, "double free detected of block %p with size %zu\n", block, mi_page_block_size(page)); + return true; + } + return false; +} + +#define mi_track_page(page,access) { size_t psize; void* pstart = _mi_page_start(_mi_page_segment(page),page,&psize); mi_track_mem_##access( pstart, psize); } + +static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { + bool is_double_free = false; + mi_block_t* n = mi_block_nextx(page, block, page->keys); // pretend it is freed, and get the decoded first field + if (((uintptr_t)n & (MI_INTPTR_SIZE-1))==0 && // quick check: aligned pointer? + (n==NULL || mi_is_in_same_page(block, n))) // quick check: in same page or NULL? + { + // Suspicious: decoded value a in block is in the same page (or NULL) -- maybe a double free? + // (continue in separate function to improve code generation) + is_double_free = mi_check_is_double_freex(page, block); + } + return is_double_free; +} +#else +static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(page); + MI_UNUSED(block); + return false; +} +#endif + + +// --------------------------------------------------------------------------- +// Check for heap block overflow by setting up padding at the end of the block +// --------------------------------------------------------------------------- + +#if MI_PADDING // && !MI_TRACK_ENABLED +static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) { + *bsize = mi_page_usable_block_size(page); + const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize); + mi_track_mem_defined(padding,sizeof(mi_padding_t)); + *delta = padding->delta; + uint32_t canary = padding->canary; + uintptr_t keys[2]; + keys[0] = page->keys[0]; + keys[1] = page->keys[1]; + bool ok = ((uint32_t)mi_ptr_encode(page,block,keys) == canary && *delta <= *bsize); + mi_track_mem_noaccess(padding,sizeof(mi_padding_t)); + return ok; +} + +// Return the exact usable size of a block. +static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { + size_t bsize; + size_t delta; + bool ok = mi_page_decode_padding(page, block, &delta, &bsize); + mi_assert_internal(ok); mi_assert_internal(delta <= bsize); + return (ok ? bsize - delta : 0); +} + +// When a non-thread-local block is freed, it becomes part of the thread delayed free +// list that is freed later by the owning heap. If the exact usable size is too small to +// contain the pointer for the delayed list, then shrink the padding (by decreasing delta) +// so it will later not trigger an overflow error in `mi_free_block`. +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { + size_t bsize; + size_t delta; + bool ok = mi_page_decode_padding(page, block, &delta, &bsize); + mi_assert_internal(ok); + if (!ok || (bsize - delta) >= min_size) return; // usually already enough space + mi_assert_internal(bsize >= min_size); + if (bsize < min_size) return; // should never happen + size_t new_delta = (bsize - min_size); + mi_assert_internal(new_delta < bsize); + mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize); + mi_track_mem_defined(padding,sizeof(mi_padding_t)); + padding->delta = (uint32_t)new_delta; + mi_track_mem_noaccess(padding,sizeof(mi_padding_t)); +} +#else +static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(block); + return mi_page_usable_block_size(page); +} + +void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) { + MI_UNUSED(page); + MI_UNUSED(block); + MI_UNUSED(min_size); +} +#endif + +#if MI_PADDING && MI_PADDING_CHECK + +static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) { + size_t bsize; + size_t delta; + bool ok = mi_page_decode_padding(page, block, &delta, &bsize); + *size = *wrong = bsize; + if (!ok) return false; + mi_assert_internal(bsize >= delta); + *size = bsize - delta; + if (!mi_page_is_huge(page)) { + uint8_t* fill = (uint8_t*)block + bsize - delta; + const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // check at most the first N padding bytes + mi_track_mem_defined(fill, maxpad); + for (size_t i = 0; i < maxpad; i++) { + if (fill[i] != MI_DEBUG_PADDING) { + *wrong = bsize - delta + i; + ok = false; + break; + } + } + mi_track_mem_noaccess(fill, maxpad); + } + return ok; +} + +static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { + size_t size; + size_t wrong; + if (!mi_verify_padding(page,block,&size,&wrong)) { + _mi_error_message(EFAULT, "buffer overflow in heap block %p of size %zu: write after %zu bytes\n", block, size, wrong ); + } +} + +#else + +static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(page); + MI_UNUSED(block); +} + +#endif + +// only maintain stats for smaller objects if requested +#if (MI_STAT>0) +static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { + #if (MI_STAT < 2) + MI_UNUSED(block); + #endif + mi_heap_t* const heap = mi_heap_get_default(); + const size_t bsize = mi_page_usable_block_size(page); + #if (MI_STAT>1) + const size_t usize = mi_page_usable_size_of(page, block); + mi_heap_stat_decrease(heap, malloc, usize); + #endif + if (bsize <= MI_MEDIUM_OBJ_SIZE_MAX) { + mi_heap_stat_decrease(heap, normal, bsize); + #if (MI_STAT > 1) + mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], 1); + #endif + } + else if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + mi_heap_stat_decrease(heap, large, bsize); + } + else { + mi_heap_stat_decrease(heap, huge, bsize); + } +} +#else +static void mi_stat_free(const mi_page_t* page, const mi_block_t* block) { + MI_UNUSED(page); MI_UNUSED(block); +} +#endif diff --git a/ww/managers/mimalloc/src/heap.c b/ww/managers/mimalloc/src/heap.c new file mode 100644 index 00000000..e498fdb2 --- /dev/null +++ b/ww/managers/mimalloc/src/heap.c @@ -0,0 +1,653 @@ +/*---------------------------------------------------------------------------- +Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" // mi_prim_get_default_heap + +#include // memset, memcpy + +#if defined(_MSC_VER) && (_MSC_VER < 1920) +#pragma warning(disable:4204) // non-constant aggregate initializer +#endif + +/* ----------------------------------------------------------- + Helpers +----------------------------------------------------------- */ + +// return `true` if ok, `false` to break +typedef bool (heap_page_visitor_fun)(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2); + +// Visit all pages in a heap; returns `false` if break was called. +static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void* arg1, void* arg2) +{ + if (heap==NULL || heap->page_count==0) return 0; + + // visit all pages + #if MI_DEBUG>1 + size_t total = heap->page_count; + size_t count = 0; + #endif + + for (size_t i = 0; i <= MI_BIN_FULL; i++) { + mi_page_queue_t* pq = &heap->pages[i]; + mi_page_t* page = pq->first; + while(page != NULL) { + mi_page_t* next = page->next; // save next in case the page gets removed from the queue + mi_assert_internal(mi_page_heap(page) == heap); + #if MI_DEBUG>1 + count++; + #endif + if (!fn(heap, pq, page, arg1, arg2)) return false; + page = next; // and continue + } + } + mi_assert_internal(count == total); + return true; +} + + +#if MI_DEBUG>=2 +static bool mi_heap_page_is_valid(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { + MI_UNUSED(arg1); + MI_UNUSED(arg2); + MI_UNUSED(pq); + mi_assert_internal(mi_page_heap(page) == heap); + mi_segment_t* segment = _mi_page_segment(page); + mi_assert_internal(segment->thread_id == heap->thread_id); + mi_assert_expensive(_mi_page_is_valid(page)); + return true; +} +#endif +#if MI_DEBUG>=3 +static bool mi_heap_is_valid(mi_heap_t* heap) { + mi_assert_internal(heap!=NULL); + mi_heap_visit_pages(heap, &mi_heap_page_is_valid, NULL, NULL); + return true; +} +#endif + + + + +/* ----------------------------------------------------------- + "Collect" pages by migrating `local_free` and `thread_free` + lists and freeing empty pages. This is done when a thread + stops (and in that case abandons pages if there are still + blocks alive) +----------------------------------------------------------- */ + +typedef enum mi_collect_e { + MI_NORMAL, + MI_FORCE, + MI_ABANDON +} mi_collect_t; + + +static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg_collect, void* arg2 ) { + MI_UNUSED(arg2); + MI_UNUSED(heap); + mi_assert_internal(mi_heap_page_is_valid(heap, pq, page, NULL, NULL)); + mi_collect_t collect = *((mi_collect_t*)arg_collect); + _mi_page_free_collect(page, collect >= MI_FORCE); + if (collect == MI_FORCE) { + // note: call before a potential `_mi_page_free` as the segment may be freed if this was the last used page in that segment. + mi_segment_t* segment = _mi_page_segment(page); + _mi_segment_collect(segment, true /* force? */, &heap->tld->segments); + } + if (mi_page_all_free(page)) { + // no more used blocks, free the page. + // note: this will free retired pages as well. + _mi_page_free(page, pq, collect >= MI_FORCE); + } + else if (collect == MI_ABANDON) { + // still used blocks but the thread is done; abandon the page + _mi_page_abandon(page, pq); + } + return true; // don't break +} + +static bool mi_heap_page_never_delayed_free(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { + MI_UNUSED(arg1); + MI_UNUSED(arg2); + MI_UNUSED(heap); + MI_UNUSED(pq); + _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); + return true; // don't break +} + +static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect) +{ + if (heap==NULL || !mi_heap_is_initialized(heap)) return; + + const bool force = (collect >= MI_FORCE); + _mi_deferred_free(heap, force); + + // python/cpython#112532: we may be called from a thread that is not the owner of the heap + const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id()); + + // note: never reclaim on collect but leave it to threads that need storage to reclaim + const bool force_main = + #ifdef NDEBUG + collect == MI_FORCE + #else + collect >= MI_FORCE + #endif + && is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim; + + if (force_main) { + // the main thread is abandoned (end-of-program), try to reclaim all abandoned segments. + // if all memory is freed by now, all segments should be freed. + _mi_abandoned_reclaim_all(heap, &heap->tld->segments); + } + + // if abandoning, mark all pages to no longer add to delayed_free + if (collect == MI_ABANDON) { + mi_heap_visit_pages(heap, &mi_heap_page_never_delayed_free, NULL, NULL); + } + + // free all current thread delayed blocks. + // (if abandoning, after this there are no more thread-delayed references into the pages.) + _mi_heap_delayed_free_all(heap); + + // collect retired pages + _mi_heap_collect_retired(heap, force); + + // collect all pages owned by this thread + mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL); + mi_assert_internal( collect != MI_ABANDON || mi_atomic_load_ptr_acquire(mi_block_t,&heap->thread_delayed_free) == NULL ); + + // collect abandoned segments (in particular, purge expired parts of segments in the abandoned segment list) + // note: forced purge can be quite expensive if many threads are created/destroyed so we do not force on abandonment + _mi_abandoned_collect(heap, collect == MI_FORCE /* force? */, &heap->tld->segments); + + // if forced, collect thread data cache on program-exit (or shared library unload) + if (force && is_main_thread && mi_heap_is_backing(heap)) { + _mi_thread_data_collect(); // collect thread data cache + } + + // collect arenas (this is program wide so don't force purges on abandonment of threads) + _mi_arenas_collect(collect == MI_FORCE /* force purge? */, &heap->tld->stats); +} + +void _mi_heap_collect_abandon(mi_heap_t* heap) { + mi_heap_collect_ex(heap, MI_ABANDON); +} + +void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept { + mi_heap_collect_ex(heap, (force ? MI_FORCE : MI_NORMAL)); +} + +void mi_collect(bool force) mi_attr_noexcept { + mi_heap_collect(mi_prim_get_default_heap(), force); +} + + +/* ----------------------------------------------------------- + Heap new +----------------------------------------------------------- */ + +mi_heap_t* mi_heap_get_default(void) { + mi_thread_init(); + return mi_prim_get_default_heap(); +} + +static bool mi_heap_is_default(const mi_heap_t* heap) { + return (heap == mi_prim_get_default_heap()); +} + + +mi_heap_t* mi_heap_get_backing(void) { + mi_heap_t* heap = mi_heap_get_default(); + mi_assert_internal(heap!=NULL); + mi_heap_t* bheap = heap->tld->heap_backing; + mi_assert_internal(bheap!=NULL); + mi_assert_internal(bheap->thread_id == _mi_thread_id()); + return bheap; +} + +void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool noreclaim, uint8_t tag) { + _mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t)); + heap->tld = tld; + heap->thread_id = _mi_thread_id(); + heap->arena_id = arena_id; + heap->no_reclaim = noreclaim; + heap->tag = tag; + if (heap == tld->heap_backing) { + _mi_random_init(&heap->random); + } + else { + _mi_random_split(&tld->heap_backing->random, &heap->random); + } + heap->cookie = _mi_heap_random_next(heap) | 1; + heap->keys[0] = _mi_heap_random_next(heap); + heap->keys[1] = _mi_heap_random_next(heap); + // push on the thread local heaps list + heap->next = heap->tld->heaps; + heap->tld->heaps = heap; +} + +mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) { + mi_heap_t* bheap = mi_heap_get_backing(); + mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode? + if (heap == NULL) return NULL; + // don't reclaim abandoned pages or otherwise destroy is unsafe + _mi_heap_init(heap, bheap->tld, arena_id, true /* no reclaim */, 0 /* default tag */); + return heap; +} + +mi_decl_nodiscard mi_heap_t* mi_heap_new(void) { + return mi_heap_new_in_arena(_mi_arena_id_none()); +} + +bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid) { + return _mi_arena_memid_is_suitable(memid, heap->arena_id); +} + +uintptr_t _mi_heap_random_next(mi_heap_t* heap) { + return _mi_random_next(&heap->random); +} + +// zero out the page queues +static void mi_heap_reset_pages(mi_heap_t* heap) { + mi_assert_internal(heap != NULL); + mi_assert_internal(mi_heap_is_initialized(heap)); + // TODO: copy full empty heap instead? + memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct)); + _mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages)); + heap->thread_delayed_free = NULL; + heap->page_count = 0; +} + +// called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources. +static void mi_heap_free(mi_heap_t* heap) { + mi_assert(heap != NULL); + mi_assert_internal(mi_heap_is_initialized(heap)); + if (heap==NULL || !mi_heap_is_initialized(heap)) return; + if (mi_heap_is_backing(heap)) return; // dont free the backing heap + + // reset default + if (mi_heap_is_default(heap)) { + _mi_heap_set_default_direct(heap->tld->heap_backing); + } + + // remove ourselves from the thread local heaps list + // linear search but we expect the number of heaps to be relatively small + mi_heap_t* prev = NULL; + mi_heap_t* curr = heap->tld->heaps; + while (curr != heap && curr != NULL) { + prev = curr; + curr = curr->next; + } + mi_assert_internal(curr == heap); + if (curr == heap) { + if (prev != NULL) { prev->next = heap->next; } + else { heap->tld->heaps = heap->next; } + } + mi_assert_internal(heap->tld->heaps != NULL); + + // and free the used memory + mi_free(heap); +} + +// return a heap on the same thread as `heap` specialized for the specified tag (if it exists) +mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag) { + if (heap->tag == tag) { + return heap; + } + for (mi_heap_t *curr = heap->tld->heaps; curr != NULL; curr = curr->next) { + if (curr->tag == tag) { + return curr; + } + } + return NULL; +} + +/* ----------------------------------------------------------- + Heap destroy +----------------------------------------------------------- */ + +static bool _mi_heap_page_destroy(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* arg1, void* arg2) { + MI_UNUSED(arg1); + MI_UNUSED(arg2); + MI_UNUSED(heap); + MI_UNUSED(pq); + + // ensure no more thread_delayed_free will be added + _mi_page_use_delayed_free(page, MI_NEVER_DELAYED_FREE, false); + + // stats + const size_t bsize = mi_page_block_size(page); + if (bsize > MI_MEDIUM_OBJ_SIZE_MAX) { + if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + mi_heap_stat_decrease(heap, large, bsize); + } + else { + mi_heap_stat_decrease(heap, huge, bsize); + } + } +#if (MI_STAT) + _mi_page_free_collect(page, false); // update used count + const size_t inuse = page->used; + if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + mi_heap_stat_decrease(heap, normal, bsize * inuse); +#if (MI_STAT>1) + mi_heap_stat_decrease(heap, normal_bins[_mi_bin(bsize)], inuse); +#endif + } + mi_heap_stat_decrease(heap, malloc, bsize * inuse); // todo: off for aligned blocks... +#endif + + /// pretend it is all free now + mi_assert_internal(mi_page_thread_free(page) == NULL); + page->used = 0; + + // and free the page + // mi_page_free(page,false); + page->next = NULL; + page->prev = NULL; + _mi_segment_page_free(page,false /* no force? */, &heap->tld->segments); + + return true; // keep going +} + +void _mi_heap_destroy_pages(mi_heap_t* heap) { + mi_heap_visit_pages(heap, &_mi_heap_page_destroy, NULL, NULL); + mi_heap_reset_pages(heap); +} + +#if MI_TRACK_HEAP_DESTROY +static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) { + MI_UNUSED(heap); MI_UNUSED(area); MI_UNUSED(arg); MI_UNUSED(block_size); + mi_track_free_size(block,mi_usable_size(block)); + return true; +} +#endif + +void mi_heap_destroy(mi_heap_t* heap) { + mi_assert(heap != NULL); + mi_assert(mi_heap_is_initialized(heap)); + mi_assert(heap->no_reclaim); + mi_assert_expensive(mi_heap_is_valid(heap)); + if (heap==NULL || !mi_heap_is_initialized(heap)) return; + if (!heap->no_reclaim) { + // don't free in case it may contain reclaimed pages + mi_heap_delete(heap); + } + else { + // track all blocks as freed + #if MI_TRACK_HEAP_DESTROY + mi_heap_visit_blocks(heap, true, mi_heap_track_block_free, NULL); + #endif + // free all pages + _mi_heap_destroy_pages(heap); + mi_heap_free(heap); + } +} + +// forcefully destroy all heaps in the current thread +void _mi_heap_unsafe_destroy_all(void) { + mi_heap_t* bheap = mi_heap_get_backing(); + mi_heap_t* curr = bheap->tld->heaps; + while (curr != NULL) { + mi_heap_t* next = curr->next; + if (curr->no_reclaim) { + mi_heap_destroy(curr); + } + else { + _mi_heap_destroy_pages(curr); + } + curr = next; + } +} + +/* ----------------------------------------------------------- + Safe Heap delete +----------------------------------------------------------- */ + +// Transfer the pages from one heap to the other +static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) { + mi_assert_internal(heap!=NULL); + if (from==NULL || from->page_count == 0) return; + + // reduce the size of the delayed frees + _mi_heap_delayed_free_partial(from); + + // transfer all pages by appending the queues; this will set a new heap field + // so threads may do delayed frees in either heap for a while. + // note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state + // so after this only the new heap will get delayed frees + for (size_t i = 0; i <= MI_BIN_FULL; i++) { + mi_page_queue_t* pq = &heap->pages[i]; + mi_page_queue_t* append = &from->pages[i]; + size_t pcount = _mi_page_queue_append(heap, pq, append); + heap->page_count += pcount; + from->page_count -= pcount; + } + mi_assert_internal(from->page_count == 0); + + // and do outstanding delayed frees in the `from` heap + // note: be careful here as the `heap` field in all those pages no longer point to `from`, + // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a + // the regular `_mi_free_delayed_block` which is safe. + _mi_heap_delayed_free_all(from); + #if !defined(_MSC_VER) || (_MSC_VER > 1900) // somehow the following line gives an error in VS2015, issue #353 + mi_assert_internal(mi_atomic_load_ptr_relaxed(mi_block_t,&from->thread_delayed_free) == NULL); + #endif + + // and reset the `from` heap + mi_heap_reset_pages(from); +} + +// Safe delete a heap without freeing any still allocated blocks in that heap. +void mi_heap_delete(mi_heap_t* heap) +{ + mi_assert(heap != NULL); + mi_assert(mi_heap_is_initialized(heap)); + mi_assert_expensive(mi_heap_is_valid(heap)); + if (heap==NULL || !mi_heap_is_initialized(heap)) return; + + if (!mi_heap_is_backing(heap)) { + // transfer still used pages to the backing heap + mi_heap_absorb(heap->tld->heap_backing, heap); + } + else { + // the backing heap abandons its pages + _mi_heap_collect_abandon(heap); + } + mi_assert_internal(heap->page_count==0); + mi_heap_free(heap); +} + +mi_heap_t* mi_heap_set_default(mi_heap_t* heap) { + mi_assert(heap != NULL); + mi_assert(mi_heap_is_initialized(heap)); + if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL; + mi_assert_expensive(mi_heap_is_valid(heap)); + mi_heap_t* old = mi_prim_get_default_heap(); + _mi_heap_set_default_direct(heap); + return old; +} + + + + +/* ----------------------------------------------------------- + Analysis +----------------------------------------------------------- */ + +// static since it is not thread safe to access heaps from other threads. +static mi_heap_t* mi_heap_of_block(const void* p) { + if (p == NULL) return NULL; + mi_segment_t* segment = _mi_ptr_segment(p); + bool valid = (_mi_ptr_cookie(segment) == segment->cookie); + mi_assert_internal(valid); + if mi_unlikely(!valid) return NULL; + return mi_page_heap(_mi_segment_page_of(segment,p)); +} + +bool mi_heap_contains_block(mi_heap_t* heap, const void* p) { + mi_assert(heap != NULL); + if (heap==NULL || !mi_heap_is_initialized(heap)) return false; + return (heap == mi_heap_of_block(p)); +} + + +static bool mi_heap_page_check_owned(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* p, void* vfound) { + MI_UNUSED(heap); + MI_UNUSED(pq); + bool* found = (bool*)vfound; + void* start = mi_page_start(page); + void* end = (uint8_t*)start + (page->capacity * mi_page_block_size(page)); + *found = (p >= start && p < end); + return (!*found); // continue if not found +} + +bool mi_heap_check_owned(mi_heap_t* heap, const void* p) { + mi_assert(heap != NULL); + if (heap==NULL || !mi_heap_is_initialized(heap)) return false; + if (((uintptr_t)p & (MI_INTPTR_SIZE - 1)) != 0) return false; // only aligned pointers + bool found = false; + mi_heap_visit_pages(heap, &mi_heap_page_check_owned, (void*)p, &found); + return found; +} + +bool mi_check_owned(const void* p) { + return mi_heap_check_owned(mi_prim_get_default_heap(), p); +} + +/* ----------------------------------------------------------- + Visit all heap blocks and areas + Todo: enable visiting abandoned pages, and + enable visiting all blocks of all heaps across threads +----------------------------------------------------------- */ + +// Separate struct to keep `mi_page_t` out of the public interface +typedef struct mi_heap_area_ex_s { + mi_heap_area_t area; + mi_page_t* page; +} mi_heap_area_ex_t; + +static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_visit_fun* visitor, void* arg) { + mi_assert(xarea != NULL); + if (xarea==NULL) return true; + const mi_heap_area_t* area = &xarea->area; + mi_page_t* page = xarea->page; + mi_assert(page != NULL); + if (page == NULL) return true; + + _mi_page_free_collect(page,true); + mi_assert_internal(page->local_free == NULL); + if (page->used == 0) return true; + + const size_t bsize = mi_page_block_size(page); + const size_t ubsize = mi_page_usable_block_size(page); // without padding + size_t psize; + uint8_t* pstart = _mi_segment_page_start(_mi_page_segment(page), page, &psize); + + if (page->capacity == 1) { + // optimize page with one block + mi_assert_internal(page->used == 1 && page->free == NULL); + return visitor(mi_page_heap(page), area, pstart, ubsize, arg); + } + + // create a bitmap of free blocks. + #define MI_MAX_BLOCKS (MI_SMALL_PAGE_SIZE / sizeof(void*)) + uintptr_t free_map[MI_MAX_BLOCKS / sizeof(uintptr_t)]; + memset(free_map, 0, sizeof(free_map)); + + #if MI_DEBUG>1 + size_t free_count = 0; + #endif + for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) { + #if MI_DEBUG>1 + free_count++; + #endif + mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize)); + size_t offset = (uint8_t*)block - pstart; + mi_assert_internal(offset % bsize == 0); + size_t blockidx = offset / bsize; // Todo: avoid division? + mi_assert_internal( blockidx < MI_MAX_BLOCKS); + size_t bitidx = (blockidx / sizeof(uintptr_t)); + size_t bit = blockidx - (bitidx * sizeof(uintptr_t)); + free_map[bitidx] |= ((uintptr_t)1 << bit); + } + mi_assert_internal(page->capacity == (free_count + page->used)); + + // walk through all blocks skipping the free ones + #if MI_DEBUG>1 + size_t used_count = 0; + #endif + for (size_t i = 0; i < page->capacity; i++) { + size_t bitidx = (i / sizeof(uintptr_t)); + size_t bit = i - (bitidx * sizeof(uintptr_t)); + uintptr_t m = free_map[bitidx]; + if (bit == 0 && m == UINTPTR_MAX) { + i += (sizeof(uintptr_t) - 1); // skip a run of free blocks + } + else if ((m & ((uintptr_t)1 << bit)) == 0) { + #if MI_DEBUG>1 + used_count++; + #endif + uint8_t* block = pstart + (i * bsize); + if (!visitor(mi_page_heap(page), area, block, ubsize, arg)) return false; + } + } + mi_assert_internal(page->used == used_count); + return true; +} + +typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ex_t* area, void* arg); + + +static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) { + MI_UNUSED(heap); + MI_UNUSED(pq); + mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun; + mi_heap_area_ex_t xarea; + const size_t bsize = mi_page_block_size(page); + const size_t ubsize = mi_page_usable_block_size(page); + xarea.page = page; + xarea.area.reserved = page->reserved * bsize; + xarea.area.committed = page->capacity * bsize; + xarea.area.blocks = mi_page_start(page); + xarea.area.used = page->used; // number of blocks in use (#553) + xarea.area.block_size = ubsize; + xarea.area.full_block_size = bsize; + return fun(heap, &xarea, arg); +} + +// Visit all heap pages as areas +static bool mi_heap_visit_areas(const mi_heap_t* heap, mi_heap_area_visit_fun* visitor, void* arg) { + if (visitor == NULL) return false; + return mi_heap_visit_pages((mi_heap_t*)heap, &mi_heap_visit_areas_page, (void*)(visitor), arg); // note: function pointer to void* :-{ +} + +// Just to pass arguments +typedef struct mi_visit_blocks_args_s { + bool visit_blocks; + mi_block_visit_fun* visitor; + void* arg; +} mi_visit_blocks_args_t; + +static bool mi_heap_area_visitor(const mi_heap_t* heap, const mi_heap_area_ex_t* xarea, void* arg) { + mi_visit_blocks_args_t* args = (mi_visit_blocks_args_t*)arg; + if (!args->visitor(heap, &xarea->area, NULL, xarea->area.block_size, args->arg)) return false; + if (args->visit_blocks) { + return mi_heap_area_visit_blocks(xarea, args->visitor, args->arg); + } + else { + return true; + } +} + +// Visit all blocks in a heap +bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg) { + mi_visit_blocks_args_t args = { visit_blocks, visitor, arg }; + return mi_heap_visit_areas(heap, &mi_heap_area_visitor, &args); +} diff --git a/ww/managers/mimalloc/src/init.c b/ww/managers/mimalloc/src/init.c new file mode 100644 index 00000000..6f51ca89 --- /dev/null +++ b/ww/managers/mimalloc/src/init.c @@ -0,0 +1,714 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2022, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" + +#include // memcpy, memset +#include // atexit + + +// Empty page used to initialize the small free pages array +const mi_page_t _mi_page_empty = { + 0, + false, false, false, false, + 0, // capacity + 0, // reserved capacity + { 0 }, // flags + false, // is_zero + 0, // retire_expire + NULL, // free + NULL, // local_free + 0, // used + 0, // block size shift + 0, // heap tag + 0, // block_size + NULL, // page_start + #if (MI_PADDING || MI_ENCODE_FREELIST) + { 0, 0 }, + #endif + MI_ATOMIC_VAR_INIT(0), // xthread_free + MI_ATOMIC_VAR_INIT(0), // xheap + NULL, NULL + , { 0 } // padding +}; + +#define MI_PAGE_EMPTY() ((mi_page_t*)&_mi_page_empty) + +#if (MI_SMALL_WSIZE_MAX==128) +#if (MI_PADDING>0) && (MI_INTPTR_SIZE >= 8) +#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() } +#elif (MI_PADDING>0) +#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY(), MI_PAGE_EMPTY(), MI_PAGE_EMPTY() } +#else +#define MI_SMALL_PAGES_EMPTY { MI_INIT128(MI_PAGE_EMPTY), MI_PAGE_EMPTY() } +#endif +#else +#error "define right initialization sizes corresponding to MI_SMALL_WSIZE_MAX" +#endif + +// Empty page queues for every bin +#define QNULL(sz) { NULL, NULL, (sz)*sizeof(uintptr_t) } +#define MI_PAGE_QUEUES_EMPTY \ + { QNULL(1), \ + QNULL( 1), QNULL( 2), QNULL( 3), QNULL( 4), QNULL( 5), QNULL( 6), QNULL( 7), QNULL( 8), /* 8 */ \ + QNULL( 10), QNULL( 12), QNULL( 14), QNULL( 16), QNULL( 20), QNULL( 24), QNULL( 28), QNULL( 32), /* 16 */ \ + QNULL( 40), QNULL( 48), QNULL( 56), QNULL( 64), QNULL( 80), QNULL( 96), QNULL( 112), QNULL( 128), /* 24 */ \ + QNULL( 160), QNULL( 192), QNULL( 224), QNULL( 256), QNULL( 320), QNULL( 384), QNULL( 448), QNULL( 512), /* 32 */ \ + QNULL( 640), QNULL( 768), QNULL( 896), QNULL( 1024), QNULL( 1280), QNULL( 1536), QNULL( 1792), QNULL( 2048), /* 40 */ \ + QNULL( 2560), QNULL( 3072), QNULL( 3584), QNULL( 4096), QNULL( 5120), QNULL( 6144), QNULL( 7168), QNULL( 8192), /* 48 */ \ + QNULL( 10240), QNULL( 12288), QNULL( 14336), QNULL( 16384), QNULL( 20480), QNULL( 24576), QNULL( 28672), QNULL( 32768), /* 56 */ \ + QNULL( 40960), QNULL( 49152), QNULL( 57344), QNULL( 65536), QNULL( 81920), QNULL( 98304), QNULL(114688), QNULL(131072), /* 64 */ \ + QNULL(163840), QNULL(196608), QNULL(229376), QNULL(262144), QNULL(327680), QNULL(393216), QNULL(458752), QNULL(524288), /* 72 */ \ + QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 1 /* 655360, Huge queue */), \ + QNULL(MI_MEDIUM_OBJ_WSIZE_MAX + 2) /* Full queue */ } + +#define MI_STAT_COUNT_NULL() {0,0,0,0} + +// Empty statistics +#if MI_STAT>1 +#define MI_STAT_COUNT_END_NULL() , { MI_STAT_COUNT_NULL(), MI_INIT32(MI_STAT_COUNT_NULL) } +#else +#define MI_STAT_COUNT_END_NULL() +#endif + +#define MI_STATS_NULL \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), MI_STAT_COUNT_NULL(), \ + MI_STAT_COUNT_NULL(), \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 } \ + MI_STAT_COUNT_END_NULL() + + +// Empty slice span queues for every bin +#define SQNULL(sz) { NULL, NULL, sz } +#define MI_SEGMENT_SPAN_QUEUES_EMPTY \ + { SQNULL(1), \ + SQNULL( 1), SQNULL( 2), SQNULL( 3), SQNULL( 4), SQNULL( 5), SQNULL( 6), SQNULL( 7), SQNULL( 10), /* 8 */ \ + SQNULL( 12), SQNULL( 14), SQNULL( 16), SQNULL( 20), SQNULL( 24), SQNULL( 28), SQNULL( 32), SQNULL( 40), /* 16 */ \ + SQNULL( 48), SQNULL( 56), SQNULL( 64), SQNULL( 80), SQNULL( 96), SQNULL( 112), SQNULL( 128), SQNULL( 160), /* 24 */ \ + SQNULL( 192), SQNULL( 224), SQNULL( 256), SQNULL( 320), SQNULL( 384), SQNULL( 448), SQNULL( 512), SQNULL( 640), /* 32 */ \ + SQNULL( 768), SQNULL( 896), SQNULL( 1024) /* 35 */ } + + +// -------------------------------------------------------- +// Statically allocate an empty heap as the initial +// thread local value for the default heap, +// and statically allocate the backing heap for the main +// thread so it can function without doing any allocation +// itself (as accessing a thread local for the first time +// may lead to allocation itself on some platforms) +// -------------------------------------------------------- + +mi_decl_cache_align const mi_heap_t _mi_heap_empty = { + NULL, + MI_ATOMIC_VAR_INIT(NULL), + 0, // tid + 0, // cookie + 0, // arena id + { 0, 0 }, // keys + { {0}, {0}, 0, true }, // random + 0, // page count + MI_BIN_FULL, 0, // page retired min/max + NULL, // next + false, // can reclaim + 0, // tag + MI_SMALL_PAGES_EMPTY, + MI_PAGE_QUEUES_EMPTY +}; + +#define tld_empty_stats ((mi_stats_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,stats))) +#define tld_empty_os ((mi_os_tld_t*)((uint8_t*)&tld_empty + offsetof(mi_tld_t,os))) + +mi_decl_cache_align static const mi_tld_t tld_empty = { + 0, + false, + NULL, NULL, + { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, tld_empty_stats, tld_empty_os }, // segments + { 0, tld_empty_stats }, // os + { MI_STATS_NULL } // stats +}; + +mi_threadid_t _mi_thread_id(void) mi_attr_noexcept { + return _mi_prim_thread_id(); +} + +// the thread-local default heap for allocation +mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty; + +extern mi_heap_t _mi_heap_main; + +static mi_tld_t tld_main = { + 0, false, + &_mi_heap_main, & _mi_heap_main, + { MI_SEGMENT_SPAN_QUEUES_EMPTY, 0, 0, 0, 0, 0, &tld_main.stats, &tld_main.os }, // segments + { 0, &tld_main.stats }, // os + { MI_STATS_NULL } // stats +}; + +mi_heap_t _mi_heap_main = { + &tld_main, + MI_ATOMIC_VAR_INIT(NULL), + 0, // thread id + 0, // initial cookie + 0, // arena id + { 0, 0 }, // the key of the main heap can be fixed (unlike page keys that need to be secure!) + { {0x846ca68b}, {0}, 0, true }, // random + 0, // page count + MI_BIN_FULL, 0, // page retired min/max + NULL, // next heap + false, // can reclaim + 0, // tag + MI_SMALL_PAGES_EMPTY, + MI_PAGE_QUEUES_EMPTY +}; + +bool _mi_process_is_initialized = false; // set to `true` in `mi_process_init`. + +mi_stats_t _mi_stats_main = { MI_STATS_NULL }; + + +static void mi_heap_main_init(void) { + if (_mi_heap_main.cookie == 0) { + _mi_heap_main.thread_id = _mi_thread_id(); + _mi_heap_main.cookie = 1; + #if defined(_WIN32) && !defined(MI_SHARED_LIB) + _mi_random_init_weak(&_mi_heap_main.random); // prevent allocation failure during bcrypt dll initialization with static linking + #else + _mi_random_init(&_mi_heap_main.random); + #endif + _mi_heap_main.cookie = _mi_heap_random_next(&_mi_heap_main); + _mi_heap_main.keys[0] = _mi_heap_random_next(&_mi_heap_main); + _mi_heap_main.keys[1] = _mi_heap_random_next(&_mi_heap_main); + } +} + +mi_heap_t* _mi_heap_main_get(void) { + mi_heap_main_init(); + return &_mi_heap_main; +} + + +/* ----------------------------------------------------------- + Initialization and freeing of the thread local heaps +----------------------------------------------------------- */ + +// note: in x64 in release build `sizeof(mi_thread_data_t)` is under 4KiB (= OS page size). +typedef struct mi_thread_data_s { + mi_heap_t heap; // must come first due to cast in `_mi_heap_done` + mi_tld_t tld; + mi_memid_t memid; // must come last due to zero'ing +} mi_thread_data_t; + + +// Thread meta-data is allocated directly from the OS. For +// some programs that do not use thread pools and allocate and +// destroy many OS threads, this may causes too much overhead +// per thread so we maintain a small cache of recently freed metadata. + +#define TD_CACHE_SIZE (16) +static _Atomic(mi_thread_data_t*) td_cache[TD_CACHE_SIZE]; + +static mi_thread_data_t* mi_thread_data_zalloc(void) { + // try to find thread metadata in the cache + bool is_zero = false; + mi_thread_data_t* td = NULL; + for (int i = 0; i < TD_CACHE_SIZE; i++) { + td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]); + if (td != NULL) { + // found cached allocation, try use it + td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL); + if (td != NULL) { + break; + } + } + } + + // if that fails, allocate as meta data + if (td == NULL) { + mi_memid_t memid; + td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main); + if (td == NULL) { + // if this fails, try once more. (issue #257) + td = (mi_thread_data_t*)_mi_os_alloc(sizeof(mi_thread_data_t), &memid, &_mi_stats_main); + if (td == NULL) { + // really out of memory + _mi_error_message(ENOMEM, "unable to allocate thread local heap metadata (%zu bytes)\n", sizeof(mi_thread_data_t)); + } + } + if (td != NULL) { + td->memid = memid; + is_zero = memid.initially_zero; + } + } + + if (td != NULL && !is_zero) { + _mi_memzero_aligned(td, offsetof(mi_thread_data_t,memid)); + } + return td; +} + +static void mi_thread_data_free( mi_thread_data_t* tdfree ) { + // try to add the thread metadata to the cache + for (int i = 0; i < TD_CACHE_SIZE; i++) { + mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]); + if (td == NULL) { + mi_thread_data_t* expected = NULL; + if (mi_atomic_cas_ptr_weak_acq_rel(mi_thread_data_t, &td_cache[i], &expected, tdfree)) { + return; + } + } + } + // if that fails, just free it directly + _mi_os_free(tdfree, sizeof(mi_thread_data_t), tdfree->memid, &_mi_stats_main); +} + +void _mi_thread_data_collect(void) { + // free all thread metadata from the cache + for (int i = 0; i < TD_CACHE_SIZE; i++) { + mi_thread_data_t* td = mi_atomic_load_ptr_relaxed(mi_thread_data_t, &td_cache[i]); + if (td != NULL) { + td = mi_atomic_exchange_ptr_acq_rel(mi_thread_data_t, &td_cache[i], NULL); + if (td != NULL) { + _mi_os_free(td, sizeof(mi_thread_data_t), td->memid, &_mi_stats_main); + } + } + } +} + +// Initialize the thread local default heap, called from `mi_thread_init` +static bool _mi_thread_heap_init(void) { + if (mi_heap_is_initialized(mi_prim_get_default_heap())) return true; + if (_mi_is_main_thread()) { + // mi_assert_internal(_mi_heap_main.thread_id != 0); // can happen on freeBSD where alloc is called before any initialization + // the main heap is statically allocated + mi_heap_main_init(); + _mi_heap_set_default_direct(&_mi_heap_main); + //mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_prim_get_default_heap()); + } + else { + // use `_mi_os_alloc` to allocate directly from the OS + mi_thread_data_t* td = mi_thread_data_zalloc(); + if (td == NULL) return false; + + mi_tld_t* tld = &td->tld; + mi_heap_t* heap = &td->heap; + _mi_tld_init(tld, heap); // must be before `_mi_heap_init` + _mi_heap_init(heap, tld, _mi_arena_id_none(), false /* can reclaim */, 0 /* default tag */); + _mi_heap_set_default_direct(heap); + } + return false; +} + +// initialize thread local data +void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) { + _mi_memcpy_aligned(tld, &tld_empty, sizeof(mi_tld_t)); + tld->heap_backing = bheap; + tld->heaps = NULL; + tld->segments.stats = &tld->stats; + tld->segments.os = &tld->os; + tld->os.stats = &tld->stats; +} + +// Free the thread local default heap (called from `mi_thread_done`) +static bool _mi_thread_heap_done(mi_heap_t* heap) { + if (!mi_heap_is_initialized(heap)) return true; + + // reset default heap + _mi_heap_set_default_direct(_mi_is_main_thread() ? &_mi_heap_main : (mi_heap_t*)&_mi_heap_empty); + + // switch to backing heap + heap = heap->tld->heap_backing; + if (!mi_heap_is_initialized(heap)) return false; + + // delete all non-backing heaps in this thread + mi_heap_t* curr = heap->tld->heaps; + while (curr != NULL) { + mi_heap_t* next = curr->next; // save `next` as `curr` will be freed + if (curr != heap) { + mi_assert_internal(!mi_heap_is_backing(curr)); + mi_heap_delete(curr); + } + curr = next; + } + mi_assert_internal(heap->tld->heaps == heap && heap->next == NULL); + mi_assert_internal(mi_heap_is_backing(heap)); + + // collect if not the main thread + if (heap != &_mi_heap_main) { + _mi_heap_collect_abandon(heap); + } + + // merge stats + _mi_stats_done(&heap->tld->stats); + + // free if not the main thread + if (heap != &_mi_heap_main) { + // the following assertion does not always hold for huge segments as those are always treated + // as abondened: one may allocate it in one thread, but deallocate in another in which case + // the count can be too large or negative. todo: perhaps not count huge segments? see issue #363 + // mi_assert_internal(heap->tld->segments.count == 0 || heap->thread_id != _mi_thread_id()); + mi_thread_data_free((mi_thread_data_t*)heap); + } + else { + #if 0 + // never free the main thread even in debug mode; if a dll is linked statically with mimalloc, + // there may still be delete/free calls after the mi_fls_done is called. Issue #207 + _mi_heap_destroy_pages(heap); + mi_assert_internal(heap->tld->heap_backing == &_mi_heap_main); + #endif + } + return false; +} + + + +// -------------------------------------------------------- +// Try to run `mi_thread_done()` automatically so any memory +// owned by the thread but not yet released can be abandoned +// and re-owned by another thread. +// +// 1. windows dynamic library: +// call from DllMain on DLL_THREAD_DETACH +// 2. windows static library: +// use `FlsAlloc` to call a destructor when the thread is done +// 3. unix, pthreads: +// use a pthread key to call a destructor when a pthread is done +// +// In the last two cases we also need to call `mi_process_init` +// to set up the thread local keys. +// -------------------------------------------------------- + +// Set up handlers so `mi_thread_done` is called automatically +static void mi_process_setup_auto_thread_done(void) { + static bool tls_initialized = false; // fine if it races + if (tls_initialized) return; + tls_initialized = true; + _mi_prim_thread_init_auto_done(); + _mi_heap_set_default_direct(&_mi_heap_main); +} + + +bool _mi_is_main_thread(void) { + return (_mi_heap_main.thread_id==0 || _mi_heap_main.thread_id == _mi_thread_id()); +} + +static _Atomic(size_t) thread_count = MI_ATOMIC_VAR_INIT(1); + +size_t _mi_current_thread_count(void) { + return mi_atomic_load_relaxed(&thread_count); +} + +// This is called from the `mi_malloc_generic` +void mi_thread_init(void) mi_attr_noexcept +{ + // ensure our process has started already + mi_process_init(); + + // initialize the thread local default heap + // (this will call `_mi_heap_set_default_direct` and thus set the + // fiber/pthread key to a non-zero value, ensuring `_mi_thread_done` is called) + if (_mi_thread_heap_init()) return; // returns true if already initialized + + _mi_stat_increase(&_mi_stats_main.threads, 1); + mi_atomic_increment_relaxed(&thread_count); + //_mi_verbose_message("thread init: 0x%zx\n", _mi_thread_id()); +} + +void mi_thread_done(void) mi_attr_noexcept { + _mi_thread_done(NULL); +} + +void _mi_thread_done(mi_heap_t* heap) +{ + // calling with NULL implies using the default heap + if (heap == NULL) { + heap = mi_prim_get_default_heap(); + if (heap == NULL) return; + } + + // prevent re-entrancy through heap_done/heap_set_default_direct (issue #699) + if (!mi_heap_is_initialized(heap)) { + return; + } + + // adjust stats + mi_atomic_decrement_relaxed(&thread_count); + _mi_stat_decrease(&_mi_stats_main.threads, 1); + + // check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps... + if (heap->thread_id != _mi_thread_id()) return; + + // abandon the thread local heap + if (_mi_thread_heap_done(heap)) return; // returns true if already ran +} + +void _mi_heap_set_default_direct(mi_heap_t* heap) { + mi_assert_internal(heap != NULL); + #if defined(MI_TLS_SLOT) + mi_prim_tls_slot_set(MI_TLS_SLOT,heap); + #elif defined(MI_TLS_PTHREAD_SLOT_OFS) + *mi_prim_tls_pthread_heap_slot() = heap; + #elif defined(MI_TLS_PTHREAD) + // we use _mi_heap_default_key + #else + _mi_heap_default = heap; + #endif + + // ensure the default heap is passed to `_mi_thread_done` + // setting to a non-NULL value also ensures `mi_thread_done` is called. + _mi_prim_thread_associate_default_heap(heap); +} + + +// -------------------------------------------------------- +// Run functions on process init/done, and thread init/done +// -------------------------------------------------------- +static void mi_cdecl mi_process_done(void); + +static bool os_preloading = true; // true until this module is initialized +static bool mi_redirected = false; // true if malloc redirects to mi_malloc + +// Returns true if this module has not been initialized; Don't use C runtime routines until it returns false. +bool mi_decl_noinline _mi_preloading(void) { + return os_preloading; +} + +mi_decl_nodiscard bool mi_is_redirected(void) mi_attr_noexcept { + return mi_redirected; +} + +// Communicate with the redirection module on Windows +#if defined(_WIN32) && defined(MI_SHARED_LIB) && !defined(MI_WIN_NOREDIRECT) +#ifdef __cplusplus +extern "C" { +#endif +mi_decl_export void _mi_redirect_entry(DWORD reason) { + // called on redirection; careful as this may be called before DllMain + if (reason == DLL_PROCESS_ATTACH) { + mi_redirected = true; + } + else if (reason == DLL_PROCESS_DETACH) { + mi_redirected = false; + } + else if (reason == DLL_THREAD_DETACH) { + mi_thread_done(); + } +} +__declspec(dllimport) bool mi_cdecl mi_allocator_init(const char** message); +__declspec(dllimport) void mi_cdecl mi_allocator_done(void); +#ifdef __cplusplus +} +#endif +#else +static bool mi_allocator_init(const char** message) { + if (message != NULL) *message = NULL; + return true; +} +static void mi_allocator_done(void) { + // nothing to do +} +#endif + +// Called once by the process loader +static void mi_process_load(void) { + mi_heap_main_init(); + #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD) + volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true; + if (dummy == NULL) return; // use dummy or otherwise the access may get optimized away (issue #697) + #endif + os_preloading = false; + mi_assert_internal(_mi_is_main_thread()); + #if !(defined(_WIN32) && defined(MI_SHARED_LIB)) // use Dll process detach (see below) instead of atexit (issue #521) + atexit(&mi_process_done); + #endif + _mi_options_init(); + mi_process_setup_auto_thread_done(); + mi_process_init(); + if (mi_redirected) _mi_verbose_message("malloc is redirected.\n"); + + // show message from the redirector (if present) + const char* msg = NULL; + mi_allocator_init(&msg); + if (msg != NULL && (mi_option_is_enabled(mi_option_verbose) || mi_option_is_enabled(mi_option_show_errors))) { + _mi_fputs(NULL,NULL,NULL,msg); + } + + // reseed random + _mi_random_reinit_if_weak(&_mi_heap_main.random); +} + +#if defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64)) +#include +mi_decl_cache_align bool _mi_cpu_has_fsrm = false; + +static void mi_detect_cpu_features(void) { + // FSRM for fast rep movsb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017)) + int32_t cpu_info[4]; + __cpuid(cpu_info, 7); + _mi_cpu_has_fsrm = ((cpu_info[3] & (1 << 4)) != 0); // bit 4 of EDX : see +} +#else +static void mi_detect_cpu_features(void) { + // nothing +} +#endif + +// Initialize the process; called by thread_init or the process loader +void mi_process_init(void) mi_attr_noexcept { + // ensure we are called once + static mi_atomic_once_t process_init; + #if _MSC_VER < 1920 + mi_heap_main_init(); // vs2017 can dynamically re-initialize _mi_heap_main + #endif + if (!mi_atomic_once(&process_init)) return; + _mi_process_is_initialized = true; + _mi_verbose_message("process init: 0x%zx\n", _mi_thread_id()); + mi_process_setup_auto_thread_done(); + + mi_detect_cpu_features(); + _mi_os_init(); + mi_heap_main_init(); + #if MI_DEBUG + _mi_verbose_message("debug level : %d\n", MI_DEBUG); + #endif + _mi_verbose_message("secure level: %d\n", MI_SECURE); + _mi_verbose_message("mem tracking: %s\n", MI_TRACK_TOOL); + #if MI_TSAN + _mi_verbose_message("thread santizer enabled\n"); + #endif + mi_thread_init(); + + #if defined(_WIN32) + // On windows, when building as a static lib the FLS cleanup happens to early for the main thread. + // To avoid this, set the FLS value for the main thread to NULL so the fls cleanup + // will not call _mi_thread_done on the (still executing) main thread. See issue #508. + _mi_prim_thread_associate_default_heap(NULL); + #endif + + mi_stats_reset(); // only call stat reset *after* thread init (or the heap tld == NULL) + mi_track_init(); + + if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) { + size_t pages = mi_option_get_clamp(mi_option_reserve_huge_os_pages, 0, 128*1024); + long reserve_at = mi_option_get(mi_option_reserve_huge_os_pages_at); + if (reserve_at != -1) { + mi_reserve_huge_os_pages_at(pages, reserve_at, pages*500); + } else { + mi_reserve_huge_os_pages_interleave(pages, 0, pages*500); + } + } + if (mi_option_is_enabled(mi_option_reserve_os_memory)) { + long ksize = mi_option_get(mi_option_reserve_os_memory); + if (ksize > 0) { + mi_reserve_os_memory((size_t)ksize*MI_KiB, true /* commit? */, true /* allow large pages? */); + } + } +} + +// Called when the process is done (through `at_exit`) +static void mi_cdecl mi_process_done(void) { + // only shutdown if we were initialized + if (!_mi_process_is_initialized) return; + // ensure we are called once + static bool process_done = false; + if (process_done) return; + process_done = true; + + // release any thread specific resources and ensure _mi_thread_done is called on all but the main thread + _mi_prim_thread_done_auto_done(); + + #ifndef MI_SKIP_COLLECT_ON_EXIT + #if (MI_DEBUG || !defined(MI_SHARED_LIB)) + // free all memory if possible on process exit. This is not needed for a stand-alone process + // but should be done if mimalloc is statically linked into another shared library which + // is repeatedly loaded/unloaded, see issue #281. + mi_collect(true /* force */ ); + #endif + #endif + + // Forcefully release all retained memory; this can be dangerous in general if overriding regular malloc/free + // since after process_done there might still be other code running that calls `free` (like at_exit routines, + // or C-runtime termination code. + if (mi_option_is_enabled(mi_option_destroy_on_exit)) { + mi_collect(true /* force */); + _mi_heap_unsafe_destroy_all(); // forcefully release all memory held by all heaps (of this thread only!) + _mi_arena_unsafe_destroy_all(& _mi_heap_main_get()->tld->stats); + } + + if (mi_option_is_enabled(mi_option_show_stats) || mi_option_is_enabled(mi_option_verbose)) { + mi_stats_print(NULL); + } + mi_allocator_done(); + _mi_verbose_message("process done: 0x%zx\n", _mi_heap_main.thread_id); + os_preloading = true; // don't call the C runtime anymore +} + + + +#if defined(_WIN32) && defined(MI_SHARED_LIB) + // Windows DLL: easy to hook into process_init and thread_done + __declspec(dllexport) BOOL WINAPI DllMain(HINSTANCE inst, DWORD reason, LPVOID reserved) { + MI_UNUSED(reserved); + MI_UNUSED(inst); + if (reason==DLL_PROCESS_ATTACH) { + mi_process_load(); + } + else if (reason==DLL_PROCESS_DETACH) { + mi_process_done(); + } + else if (reason==DLL_THREAD_DETACH) { + if (!mi_is_redirected()) { + mi_thread_done(); + } + } + return TRUE; + } + +#elif defined(_MSC_VER) + // MSVC: use data section magic for static libraries + // See + static int _mi_process_init(void) { + mi_process_load(); + return 0; + } + typedef int(*_mi_crt_callback_t)(void); + #if defined(_M_X64) || defined(_M_ARM64) + __pragma(comment(linker, "/include:" "_mi_msvc_initu")) + #pragma section(".CRT$XIU", long, read) + #else + __pragma(comment(linker, "/include:" "__mi_msvc_initu")) + #endif + #pragma data_seg(".CRT$XIU") + mi_decl_externc _mi_crt_callback_t _mi_msvc_initu[] = { &_mi_process_init }; + #pragma data_seg() + +#elif defined(__cplusplus) + // C++: use static initialization to detect process start + static bool _mi_process_init(void) { + mi_process_load(); + return (_mi_heap_main.thread_id != 0); + } + static bool mi_initialized = _mi_process_init(); + +#elif defined(__GNUC__) || defined(__clang__) + // GCC,Clang: use the constructor attribute + static void __attribute__((constructor)) _mi_process_init(void) { + mi_process_load(); + } + +#else +#pragma message("define a way to call mi_process_load on your platform") +#endif diff --git a/ww/managers/mimalloc/src/libc.c b/ww/managers/mimalloc/src/libc.c new file mode 100644 index 00000000..dd6b4007 --- /dev/null +++ b/ww/managers/mimalloc/src/libc.c @@ -0,0 +1,273 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// -------------------------------------------------------- +// This module defines various std libc functions to reduce +// the dependency on libc, and also prevent errors caused +// by some libc implementations when called before `main` +// executes (due to malloc redirection) +// -------------------------------------------------------- + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" // mi_prim_getenv + +char _mi_toupper(char c) { + if (c >= 'a' && c <= 'z') return (c - 'a' + 'A'); + else return c; +} + +int _mi_strnicmp(const char* s, const char* t, size_t n) { + if (n == 0) return 0; + for (; *s != 0 && *t != 0 && n > 0; s++, t++, n--) { + if (_mi_toupper(*s) != _mi_toupper(*t)) break; + } + return (n == 0 ? 0 : *s - *t); +} + +void _mi_strlcpy(char* dest, const char* src, size_t dest_size) { + if (dest==NULL || src==NULL || dest_size == 0) return; + // copy until end of src, or when dest is (almost) full + while (*src != 0 && dest_size > 1) { + *dest++ = *src++; + dest_size--; + } + // always zero terminate + *dest = 0; +} + +void _mi_strlcat(char* dest, const char* src, size_t dest_size) { + if (dest==NULL || src==NULL || dest_size == 0) return; + // find end of string in the dest buffer + while (*dest != 0 && dest_size > 1) { + dest++; + dest_size--; + } + // and catenate + _mi_strlcpy(dest, src, dest_size); +} + +size_t _mi_strlen(const char* s) { + if (s==NULL) return 0; + size_t len = 0; + while(s[len] != 0) { len++; } + return len; +} + +size_t _mi_strnlen(const char* s, size_t max_len) { + if (s==NULL) return 0; + size_t len = 0; + while(s[len] != 0 && len < max_len) { len++; } + return len; +} + +#ifdef MI_NO_GETENV +bool _mi_getenv(const char* name, char* result, size_t result_size) { + MI_UNUSED(name); + MI_UNUSED(result); + MI_UNUSED(result_size); + return false; +} +#else +bool _mi_getenv(const char* name, char* result, size_t result_size) { + if (name==NULL || result == NULL || result_size < 64) return false; + return _mi_prim_getenv(name,result,result_size); +} +#endif + +// -------------------------------------------------------- +// Define our own limited `_mi_vsnprintf` and `_mi_snprintf` +// This is mostly to avoid calling these when libc is not yet +// initialized (and to reduce dependencies) +// +// format: d i, p x u, s +// prec: z l ll L +// width: 10 +// align-left: - +// fill: 0 +// plus: + +// -------------------------------------------------------- + +static void mi_outc(char c, char** out, char* end) { + char* p = *out; + if (p >= end) return; + *p = c; + *out = p + 1; +} + +static void mi_outs(const char* s, char** out, char* end) { + if (s == NULL) return; + char* p = *out; + while (*s != 0 && p < end) { + *p++ = *s++; + } + *out = p; +} + +static void mi_out_fill(char fill, size_t len, char** out, char* end) { + char* p = *out; + for (size_t i = 0; i < len && p < end; i++) { + *p++ = fill; + } + *out = p; +} + +static void mi_out_alignright(char fill, char* start, size_t len, size_t extra, char* end) { + if (len == 0 || extra == 0) return; + if (start + len + extra >= end) return; + // move `len` characters to the right (in reverse since it can overlap) + for (size_t i = 1; i <= len; i++) { + start[len + extra - i] = start[len - i]; + } + // and fill the start + for (size_t i = 0; i < extra; i++) { + start[i] = fill; + } +} + + +static void mi_out_num(uintptr_t x, size_t base, char prefix, char** out, char* end) +{ + if (x == 0 || base == 0 || base > 16) { + if (prefix != 0) { mi_outc(prefix, out, end); } + mi_outc('0',out,end); + } + else { + // output digits in reverse + char* start = *out; + while (x > 0) { + char digit = (char)(x % base); + mi_outc((digit <= 9 ? '0' + digit : 'A' + digit - 10),out,end); + x = x / base; + } + if (prefix != 0) { + mi_outc(prefix, out, end); + } + size_t len = *out - start; + // and reverse in-place + for (size_t i = 0; i < (len / 2); i++) { + char c = start[len - i - 1]; + start[len - i - 1] = start[i]; + start[i] = c; + } + } +} + + +#define MI_NEXTC() c = *in; if (c==0) break; in++; + +void _mi_vsnprintf(char* buf, size_t bufsize, const char* fmt, va_list args) { + if (buf == NULL || bufsize == 0 || fmt == NULL) return; + buf[bufsize - 1] = 0; + char* const end = buf + (bufsize - 1); + const char* in = fmt; + char* out = buf; + while (true) { + if (out >= end) break; + char c; + MI_NEXTC(); + if (c != '%') { + if ((c >= ' ' && c <= '~') || c=='\n' || c=='\r' || c=='\t') { // output visible ascii or standard control only + mi_outc(c, &out, end); + } + } + else { + MI_NEXTC(); + char fill = ' '; + size_t width = 0; + char numtype = 'd'; + char numplus = 0; + bool alignright = true; + if (c == '+' || c == ' ') { numplus = c; MI_NEXTC(); } + if (c == '-') { alignright = false; MI_NEXTC(); } + if (c == '0') { fill = '0'; MI_NEXTC(); } + if (c >= '1' && c <= '9') { + width = (c - '0'); MI_NEXTC(); + while (c >= '0' && c <= '9') { + width = (10 * width) + (c - '0'); MI_NEXTC(); + } + if (c == 0) break; // extra check due to while + } + if (c == 'z' || c == 't' || c == 'L') { numtype = c; MI_NEXTC(); } + else if (c == 'l') { + numtype = c; MI_NEXTC(); + if (c == 'l') { numtype = 'L'; MI_NEXTC(); } + } + + char* start = out; + if (c == 's') { + // string + const char* s = va_arg(args, const char*); + mi_outs(s, &out, end); + } + else if (c == 'p' || c == 'x' || c == 'u') { + // unsigned + uintptr_t x = 0; + if (c == 'x' || c == 'u') { + if (numtype == 'z') x = va_arg(args, size_t); + else if (numtype == 't') x = va_arg(args, uintptr_t); // unsigned ptrdiff_t + else if (numtype == 'L') x = (uintptr_t)va_arg(args, unsigned long long); + else x = va_arg(args, unsigned long); + } + else if (c == 'p') { + x = va_arg(args, uintptr_t); + mi_outs("0x", &out, end); + start = out; + width = (width >= 2 ? width - 2 : 0); + } + if (width == 0 && (c == 'x' || c == 'p')) { + if (c == 'p') { width = 2 * (x <= UINT32_MAX ? 4 : ((x >> 16) <= UINT32_MAX ? 6 : sizeof(void*))); } + if (width == 0) { width = 2; } + fill = '0'; + } + mi_out_num(x, (c == 'x' || c == 'p' ? 16 : 10), numplus, &out, end); + } + else if (c == 'i' || c == 'd') { + // signed + intptr_t x = 0; + if (numtype == 'z') x = va_arg(args, intptr_t ); + else if (numtype == 't') x = va_arg(args, ptrdiff_t); + else if (numtype == 'L') x = (intptr_t)va_arg(args, long long); + else x = va_arg(args, long); + char pre = 0; + if (x < 0) { + pre = '-'; + if (x > INTPTR_MIN) { x = -x; } + } + else if (numplus != 0) { + pre = numplus; + } + mi_out_num((uintptr_t)x, 10, pre, &out, end); + } + else if (c >= ' ' && c <= '~') { + // unknown format + mi_outc('%', &out, end); + mi_outc(c, &out, end); + } + + // fill & align + mi_assert_internal(out <= end); + mi_assert_internal(out >= start); + const size_t len = out - start; + if (len < width) { + mi_out_fill(fill, width - len, &out, end); + if (alignright && out <= end) { + mi_out_alignright(fill, start, len, width - len, end); + } + } + } + } + mi_assert_internal(out <= end); + *out = 0; +} + +void _mi_snprintf(char* buf, size_t buflen, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + _mi_vsnprintf(buf, buflen, fmt, args); + va_end(args); +} diff --git a/ww/managers/mimalloc/src/options.c b/ww/managers/mimalloc/src/options.c new file mode 100644 index 00000000..a62727dd --- /dev/null +++ b/ww/managers/mimalloc/src/options.c @@ -0,0 +1,526 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" // mi_prim_out_stderr + +#include // stdin/stdout +#include // abort + + + +static long mi_max_error_count = 16; // stop outputting errors after this (use < 0 for no limit) +static long mi_max_warning_count = 16; // stop outputting warnings after this (use < 0 for no limit) + +static void mi_add_stderr_output(void); + +int mi_version(void) mi_attr_noexcept { + return MI_MALLOC_VERSION; +} + + +// -------------------------------------------------------- +// Options +// These can be accessed by multiple threads and may be +// concurrently initialized, but an initializing data race +// is ok since they resolve to the same value. +// -------------------------------------------------------- +typedef enum mi_init_e { + UNINIT, // not yet initialized + DEFAULTED, // not found in the environment, use default value + INITIALIZED // found in environment or set explicitly +} mi_init_t; + +typedef struct mi_option_desc_s { + long value; // the value + mi_init_t init; // is it initialized yet? (from the environment) + mi_option_t option; // for debugging: the option index should match the option + const char* name; // option name without `mimalloc_` prefix + const char* legacy_name; // potential legacy option name +} mi_option_desc_t; + +#define MI_OPTION(opt) mi_option_##opt, #opt, NULL +#define MI_OPTION_LEGACY(opt,legacy) mi_option_##opt, #opt, #legacy + +static mi_option_desc_t options[_mi_option_last] = +{ + // stable options + #if MI_DEBUG || defined(MI_SHOW_ERRORS) + { 1, UNINIT, MI_OPTION(show_errors) }, + #else + { 0, UNINIT, MI_OPTION(show_errors) }, + #endif + { 0, UNINIT, MI_OPTION(show_stats) }, + { 0, UNINIT, MI_OPTION(verbose) }, + + // the following options are experimental and not all combinations make sense. + { 1, UNINIT, MI_OPTION(eager_commit) }, // commit per segment directly (4MiB) (but see also `eager_commit_delay`) + { 2, UNINIT, MI_OPTION_LEGACY(arena_eager_commit,eager_region_commit) }, // eager commit arena's? 2 is used to enable this only on an OS that has overcommit (i.e. linux) + { 1, UNINIT, MI_OPTION_LEGACY(purge_decommits,reset_decommits) }, // purge decommits memory (instead of reset) (note: on linux this uses MADV_DONTNEED for decommit) + { 0, UNINIT, MI_OPTION_LEGACY(allow_large_os_pages,large_os_pages) }, // use large OS pages, use only with eager commit to prevent fragmentation of VMA's + { 0, UNINIT, MI_OPTION(reserve_huge_os_pages) }, // per 1GiB huge pages + {-1, UNINIT, MI_OPTION(reserve_huge_os_pages_at) }, // reserve huge pages at node N + { 0, UNINIT, MI_OPTION(reserve_os_memory) }, // reserve N KiB OS memory in advance (use `option_get_size`) + { 0, UNINIT, MI_OPTION(deprecated_segment_cache) }, // cache N segments per thread + { 0, UNINIT, MI_OPTION(deprecated_page_reset) }, // reset page memory on free + { 0, UNINIT, MI_OPTION_LEGACY(abandoned_page_purge,abandoned_page_reset) }, // reset free page memory when a thread terminates + { 0, UNINIT, MI_OPTION(deprecated_segment_reset) }, // reset segment memory on free (needs eager commit) +#if defined(__NetBSD__) + { 0, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed +#else + { 1, UNINIT, MI_OPTION(eager_commit_delay) }, // the first N segments per thread are not eagerly committed (but per page in the segment on demand) +#endif + { 10, UNINIT, MI_OPTION_LEGACY(purge_delay,reset_delay) }, // purge delay in milli-seconds + { 0, UNINIT, MI_OPTION(use_numa_nodes) }, // 0 = use available numa nodes, otherwise use at most N nodes. + { 0, UNINIT, MI_OPTION_LEGACY(disallow_os_alloc,limit_os_alloc) }, // 1 = do not use OS memory for allocation (but only reserved arenas) + { 100, UNINIT, MI_OPTION(os_tag) }, // only apple specific for now but might serve more or less related purpose + { 32, UNINIT, MI_OPTION(max_errors) }, // maximum errors that are output + { 32, UNINIT, MI_OPTION(max_warnings) }, // maximum warnings that are output + { 10, UNINIT, MI_OPTION(max_segment_reclaim)}, // max. percentage of the abandoned segments to be reclaimed per try. + { 0, UNINIT, MI_OPTION(destroy_on_exit)}, // release all OS memory on process exit; careful with dangling pointer or after-exit frees! + #if (MI_INTPTR_SIZE>4) + { 1024L*1024L, UNINIT, MI_OPTION(arena_reserve) }, // reserve memory N KiB at a time (=1GiB) (use `option_get_size`) + #else + { 128L*1024L, UNINIT, MI_OPTION(arena_reserve) }, // =128MiB on 32-bit + #endif + { 10, UNINIT, MI_OPTION(arena_purge_mult) }, // purge delay multiplier for arena's + { 1, UNINIT, MI_OPTION_LEGACY(purge_extend_delay, decommit_extend_delay) }, + { 1, UNINIT, MI_OPTION(abandoned_reclaim_on_free) },// reclaim an abandoned segment on a free + { 0, UNINIT, MI_OPTION(disallow_arena_alloc) }, // 1 = do not use arena's for allocation (except if using specific arena id's) + { 400, UNINIT, MI_OPTION(retry_on_oom) }, // windows only: retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. +}; + +static void mi_option_init(mi_option_desc_t* desc); + +static bool mi_option_has_size_in_kib(mi_option_t option) { + return (option == mi_option_reserve_os_memory || option == mi_option_arena_reserve); +} + +void _mi_options_init(void) { + // called on process load; should not be called before the CRT is initialized! + // (e.g. do not call this from process_init as that may run before CRT initialization) + mi_add_stderr_output(); // now it safe to use stderr for output + for(int i = 0; i < _mi_option_last; i++ ) { + mi_option_t option = (mi_option_t)i; + long l = mi_option_get(option); MI_UNUSED(l); // initialize + // if (option != mi_option_verbose) + { + mi_option_desc_t* desc = &options[option]; + _mi_verbose_message("option '%s': %ld %s\n", desc->name, desc->value, (mi_option_has_size_in_kib(option) ? "KiB" : "")); + } + } + mi_max_error_count = mi_option_get(mi_option_max_errors); + mi_max_warning_count = mi_option_get(mi_option_max_warnings); +} + +mi_decl_nodiscard long mi_option_get(mi_option_t option) { + mi_assert(option >= 0 && option < _mi_option_last); + if (option < 0 || option >= _mi_option_last) return 0; + mi_option_desc_t* desc = &options[option]; + mi_assert(desc->option == option); // index should match the option + if mi_unlikely(desc->init == UNINIT) { + mi_option_init(desc); + } + return desc->value; +} + +mi_decl_nodiscard long mi_option_get_clamp(mi_option_t option, long min, long max) { + long x = mi_option_get(option); + return (x < min ? min : (x > max ? max : x)); +} + +mi_decl_nodiscard size_t mi_option_get_size(mi_option_t option) { + mi_assert_internal(mi_option_has_size_in_kib(option)); + const long x = mi_option_get(option); + size_t size = (x < 0 ? 0 : (size_t)x); + if (mi_option_has_size_in_kib(option)) { + size *= MI_KiB; + } + return size; +} + +void mi_option_set(mi_option_t option, long value) { + mi_assert(option >= 0 && option < _mi_option_last); + if (option < 0 || option >= _mi_option_last) return; + mi_option_desc_t* desc = &options[option]; + mi_assert(desc->option == option); // index should match the option + desc->value = value; + desc->init = INITIALIZED; +} + +void mi_option_set_default(mi_option_t option, long value) { + mi_assert(option >= 0 && option < _mi_option_last); + if (option < 0 || option >= _mi_option_last) return; + mi_option_desc_t* desc = &options[option]; + if (desc->init != INITIALIZED) { + desc->value = value; + } +} + +mi_decl_nodiscard bool mi_option_is_enabled(mi_option_t option) { + return (mi_option_get(option) != 0); +} + +void mi_option_set_enabled(mi_option_t option, bool enable) { + mi_option_set(option, (enable ? 1 : 0)); +} + +void mi_option_set_enabled_default(mi_option_t option, bool enable) { + mi_option_set_default(option, (enable ? 1 : 0)); +} + +void mi_option_enable(mi_option_t option) { + mi_option_set_enabled(option,true); +} + +void mi_option_disable(mi_option_t option) { + mi_option_set_enabled(option,false); +} + +static void mi_cdecl mi_out_stderr(const char* msg, void* arg) { + MI_UNUSED(arg); + if (msg != NULL && msg[0] != 0) { + _mi_prim_out_stderr(msg); + } +} + +// Since an output function can be registered earliest in the `main` +// function we also buffer output that happens earlier. When +// an output function is registered it is called immediately with +// the output up to that point. +#ifndef MI_MAX_DELAY_OUTPUT +#define MI_MAX_DELAY_OUTPUT ((size_t)(32*1024)) +#endif +static char out_buf[MI_MAX_DELAY_OUTPUT+1]; +static _Atomic(size_t) out_len; + +static void mi_cdecl mi_out_buf(const char* msg, void* arg) { + MI_UNUSED(arg); + if (msg==NULL) return; + if (mi_atomic_load_relaxed(&out_len)>=MI_MAX_DELAY_OUTPUT) return; + size_t n = _mi_strlen(msg); + if (n==0) return; + // claim space + size_t start = mi_atomic_add_acq_rel(&out_len, n); + if (start >= MI_MAX_DELAY_OUTPUT) return; + // check bound + if (start+n >= MI_MAX_DELAY_OUTPUT) { + n = MI_MAX_DELAY_OUTPUT-start-1; + } + _mi_memcpy(&out_buf[start], msg, n); +} + +static void mi_out_buf_flush(mi_output_fun* out, bool no_more_buf, void* arg) { + if (out==NULL) return; + // claim (if `no_more_buf == true`, no more output will be added after this point) + size_t count = mi_atomic_add_acq_rel(&out_len, (no_more_buf ? MI_MAX_DELAY_OUTPUT : 1)); + // and output the current contents + if (count>MI_MAX_DELAY_OUTPUT) count = MI_MAX_DELAY_OUTPUT; + out_buf[count] = 0; + out(out_buf,arg); + if (!no_more_buf) { + out_buf[count] = '\n'; // if continue with the buffer, insert a newline + } +} + + +// Once this module is loaded, switch to this routine +// which outputs to stderr and the delayed output buffer. +static void mi_cdecl mi_out_buf_stderr(const char* msg, void* arg) { + mi_out_stderr(msg,arg); + mi_out_buf(msg,arg); +} + + + +// -------------------------------------------------------- +// Default output handler +// -------------------------------------------------------- + +// Should be atomic but gives errors on many platforms as generally we cannot cast a function pointer to a uintptr_t. +// For now, don't register output from multiple threads. +static mi_output_fun* volatile mi_out_default; // = NULL +static _Atomic(void*) mi_out_arg; // = NULL + +static mi_output_fun* mi_out_get_default(void** parg) { + if (parg != NULL) { *parg = mi_atomic_load_ptr_acquire(void,&mi_out_arg); } + mi_output_fun* out = mi_out_default; + return (out == NULL ? &mi_out_buf : out); +} + +void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept { + mi_out_default = (out == NULL ? &mi_out_stderr : out); // stop using the delayed output buffer + mi_atomic_store_ptr_release(void,&mi_out_arg, arg); + if (out!=NULL) mi_out_buf_flush(out,true,arg); // output all the delayed output now +} + +// add stderr to the delayed output after the module is loaded +static void mi_add_stderr_output(void) { + mi_assert_internal(mi_out_default == NULL); + mi_out_buf_flush(&mi_out_stderr, false, NULL); // flush current contents to stderr + mi_out_default = &mi_out_buf_stderr; // and add stderr to the delayed output +} + +// -------------------------------------------------------- +// Messages, all end up calling `_mi_fputs`. +// -------------------------------------------------------- +static _Atomic(size_t) error_count; // = 0; // when >= max_error_count stop emitting errors +static _Atomic(size_t) warning_count; // = 0; // when >= max_warning_count stop emitting warnings + +// When overriding malloc, we may recurse into mi_vfprintf if an allocation +// inside the C runtime causes another message. +// In some cases (like on macOS) the loader already allocates which +// calls into mimalloc; if we then access thread locals (like `recurse`) +// this may crash as the access may call _tlv_bootstrap that tries to +// (recursively) invoke malloc again to allocate space for the thread local +// variables on demand. This is why we use a _mi_preloading test on such +// platforms. However, C code generator may move the initial thread local address +// load before the `if` and we therefore split it out in a separate funcion. +static mi_decl_thread bool recurse = false; + +static mi_decl_noinline bool mi_recurse_enter_prim(void) { + if (recurse) return false; + recurse = true; + return true; +} + +static mi_decl_noinline void mi_recurse_exit_prim(void) { + recurse = false; +} + +static bool mi_recurse_enter(void) { + #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD) + if (_mi_preloading()) return false; + #endif + return mi_recurse_enter_prim(); +} + +static void mi_recurse_exit(void) { + #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD) + if (_mi_preloading()) return; + #endif + mi_recurse_exit_prim(); +} + +void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message) { + if (out==NULL || (void*)out==(void*)stdout || (void*)out==(void*)stderr) { // TODO: use mi_out_stderr for stderr? + if (!mi_recurse_enter()) return; + out = mi_out_get_default(&arg); + if (prefix != NULL) out(prefix, arg); + out(message, arg); + mi_recurse_exit(); + } + else { + if (prefix != NULL) out(prefix, arg); + out(message, arg); + } +} + +// Define our own limited `fprintf` that avoids memory allocation. +// We do this using `_mi_vsnprintf` with a limited buffer. +static void mi_vfprintf( mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args ) { + char buf[512]; + if (fmt==NULL) return; + if (!mi_recurse_enter()) return; + _mi_vsnprintf(buf, sizeof(buf)-1, fmt, args); + mi_recurse_exit(); + _mi_fputs(out,arg,prefix,buf); +} + +void _mi_fprintf( mi_output_fun* out, void* arg, const char* fmt, ... ) { + va_list args; + va_start(args,fmt); + mi_vfprintf(out,arg,NULL,fmt,args); + va_end(args); +} + +static void mi_vfprintf_thread(mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args) { + if (prefix != NULL && _mi_strnlen(prefix,33) <= 32 && !_mi_is_main_thread()) { + char tprefix[64]; + _mi_snprintf(tprefix, sizeof(tprefix), "%sthread 0x%tx: ", prefix, (uintptr_t)_mi_thread_id()); + mi_vfprintf(out, arg, tprefix, fmt, args); + } + else { + mi_vfprintf(out, arg, prefix, fmt, args); + } +} + +void _mi_trace_message(const char* fmt, ...) { + if (mi_option_get(mi_option_verbose) <= 1) return; // only with verbose level 2 or higher + va_list args; + va_start(args, fmt); + mi_vfprintf_thread(NULL, NULL, "mimalloc: ", fmt, args); + va_end(args); +} + +void _mi_verbose_message(const char* fmt, ...) { + if (!mi_option_is_enabled(mi_option_verbose)) return; + va_list args; + va_start(args,fmt); + mi_vfprintf(NULL, NULL, "mimalloc: ", fmt, args); + va_end(args); +} + +static void mi_show_error_message(const char* fmt, va_list args) { + if (!mi_option_is_enabled(mi_option_verbose)) { + if (!mi_option_is_enabled(mi_option_show_errors)) return; + if (mi_max_error_count >= 0 && (long)mi_atomic_increment_acq_rel(&error_count) > mi_max_error_count) return; + } + mi_vfprintf_thread(NULL, NULL, "mimalloc: error: ", fmt, args); +} + +void _mi_warning_message(const char* fmt, ...) { + if (!mi_option_is_enabled(mi_option_verbose)) { + if (!mi_option_is_enabled(mi_option_show_errors)) return; + if (mi_max_warning_count >= 0 && (long)mi_atomic_increment_acq_rel(&warning_count) > mi_max_warning_count) return; + } + va_list args; + va_start(args,fmt); + mi_vfprintf_thread(NULL, NULL, "mimalloc: warning: ", fmt, args); + va_end(args); +} + + +#if MI_DEBUG +void _mi_assert_fail(const char* assertion, const char* fname, unsigned line, const char* func ) { + _mi_fprintf(NULL, NULL, "mimalloc: assertion failed: at \"%s\":%u, %s\n assertion: \"%s\"\n", fname, line, (func==NULL?"":func), assertion); + abort(); +} +#endif + +// -------------------------------------------------------- +// Errors +// -------------------------------------------------------- + +static mi_error_fun* volatile mi_error_handler; // = NULL +static _Atomic(void*) mi_error_arg; // = NULL + +static void mi_error_default(int err) { + MI_UNUSED(err); +#if (MI_DEBUG>0) + if (err==EFAULT) { + #ifdef _MSC_VER + __debugbreak(); + #endif + abort(); + } +#endif +#if (MI_SECURE>0) + if (err==EFAULT) { // abort on serious errors in secure mode (corrupted meta-data) + abort(); + } +#endif +#if defined(MI_XMALLOC) + if (err==ENOMEM || err==EOVERFLOW) { // abort on memory allocation fails in xmalloc mode + abort(); + } +#endif +} + +void mi_register_error(mi_error_fun* fun, void* arg) { + mi_error_handler = fun; // can be NULL + mi_atomic_store_ptr_release(void,&mi_error_arg, arg); +} + +void _mi_error_message(int err, const char* fmt, ...) { + // show detailed error message + va_list args; + va_start(args, fmt); + mi_show_error_message(fmt, args); + va_end(args); + // and call the error handler which may abort (or return normally) + if (mi_error_handler != NULL) { + mi_error_handler(err, mi_atomic_load_ptr_acquire(void,&mi_error_arg)); + } + else { + mi_error_default(err); + } +} + +// -------------------------------------------------------- +// Initialize options by checking the environment +// -------------------------------------------------------- + +// TODO: implement ourselves to reduce dependencies on the C runtime +#include // strtol +#include // strstr + + +static void mi_option_init(mi_option_desc_t* desc) { + // Read option value from the environment + char s[64 + 1]; + char buf[64+1]; + _mi_strlcpy(buf, "mimalloc_", sizeof(buf)); + _mi_strlcat(buf, desc->name, sizeof(buf)); + bool found = _mi_getenv(buf, s, sizeof(s)); + if (!found && desc->legacy_name != NULL) { + _mi_strlcpy(buf, "mimalloc_", sizeof(buf)); + _mi_strlcat(buf, desc->legacy_name, sizeof(buf)); + found = _mi_getenv(buf, s, sizeof(s)); + if (found) { + _mi_warning_message("environment option \"mimalloc_%s\" is deprecated -- use \"mimalloc_%s\" instead.\n", desc->legacy_name, desc->name); + } + } + + if (found) { + size_t len = _mi_strnlen(s, sizeof(buf) - 1); + for (size_t i = 0; i < len; i++) { + buf[i] = _mi_toupper(s[i]); + } + buf[len] = 0; + if (buf[0] == 0 || strstr("1;TRUE;YES;ON", buf) != NULL) { + desc->value = 1; + desc->init = INITIALIZED; + } + else if (strstr("0;FALSE;NO;OFF", buf) != NULL) { + desc->value = 0; + desc->init = INITIALIZED; + } + else { + char* end = buf; + long value = strtol(buf, &end, 10); + if (mi_option_has_size_in_kib(desc->option)) { + // this option is interpreted in KiB to prevent overflow of `long` for large allocations + // (long is 32-bit on 64-bit windows, which allows for 4TiB max.) + size_t size = (value < 0 ? 0 : (size_t)value); + bool overflow = false; + if (*end == 'K') { end++; } + else if (*end == 'M') { overflow = mi_mul_overflow(size,MI_KiB,&size); end++; } + else if (*end == 'G') { overflow = mi_mul_overflow(size,MI_MiB,&size); end++; } + else if (*end == 'T') { overflow = mi_mul_overflow(size,MI_GiB,&size); end++; } + else { size = (size + MI_KiB - 1) / MI_KiB; } + if (end[0] == 'I' && end[1] == 'B') { end += 2; } // KiB, MiB, GiB, TiB + else if (*end == 'B') { end++; } // Kb, Mb, Gb, Tb + if (overflow || size > MI_MAX_ALLOC_SIZE) { size = (MI_MAX_ALLOC_SIZE / MI_KiB); } + value = (size > LONG_MAX ? LONG_MAX : (long)size); + } + if (*end == 0) { + desc->value = value; + desc->init = INITIALIZED; + } + else { + // set `init` first to avoid recursion through _mi_warning_message on mimalloc_verbose. + desc->init = DEFAULTED; + if (desc->option == mi_option_verbose && desc->value == 0) { + // if the 'mimalloc_verbose' env var has a bogus value we'd never know + // (since the value defaults to 'off') so in that case briefly enable verbose + desc->value = 1; + _mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name); + desc->value = 0; + } + else { + _mi_warning_message("environment option mimalloc_%s has an invalid value.\n", desc->name); + } + } + } + mi_assert_internal(desc->init != UNINIT); + } + else if (!_mi_preloading()) { + desc->init = DEFAULTED; + } +} diff --git a/ww/managers/mimalloc/src/os.c b/ww/managers/mimalloc/src/os.c new file mode 100644 index 00000000..ce104273 --- /dev/null +++ b/ww/managers/mimalloc/src/os.c @@ -0,0 +1,678 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" + + +/* ----------------------------------------------------------- + Initialization. +----------------------------------------------------------- */ + +static mi_os_mem_config_t mi_os_mem_config = { + 4096, // page size + 0, // large page size (usually 2MiB) + 4096, // allocation granularity + true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems) + false, // can we partially free allocated blocks? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span) + true // has virtual reserve? (if true we can reserve virtual address space without using commit or physical memory) +}; + +bool _mi_os_has_overcommit(void) { + return mi_os_mem_config.has_overcommit; +} + +bool _mi_os_has_virtual_reserve(void) { + return mi_os_mem_config.has_virtual_reserve; +} + + +// OS (small) page size +size_t _mi_os_page_size(void) { + return mi_os_mem_config.page_size; +} + +// if large OS pages are supported (2 or 4MiB), then return the size, otherwise return the small page size (4KiB) +size_t _mi_os_large_page_size(void) { + return (mi_os_mem_config.large_page_size != 0 ? mi_os_mem_config.large_page_size : _mi_os_page_size()); +} + +bool _mi_os_use_large_page(size_t size, size_t alignment) { + // if we have access, check the size and alignment requirements + if (mi_os_mem_config.large_page_size == 0 || !mi_option_is_enabled(mi_option_allow_large_os_pages)) return false; + return ((size % mi_os_mem_config.large_page_size) == 0 && (alignment % mi_os_mem_config.large_page_size) == 0); +} + +// round to a good OS allocation size (bounded by max 12.5% waste) +size_t _mi_os_good_alloc_size(size_t size) { + size_t align_size; + if (size < 512*MI_KiB) align_size = _mi_os_page_size(); + else if (size < 2*MI_MiB) align_size = 64*MI_KiB; + else if (size < 8*MI_MiB) align_size = 256*MI_KiB; + else if (size < 32*MI_MiB) align_size = 1*MI_MiB; + else align_size = 4*MI_MiB; + if mi_unlikely(size >= (SIZE_MAX - align_size)) return size; // possible overflow? + return _mi_align_up(size, align_size); +} + +void _mi_os_init(void) { + _mi_prim_mem_init(&mi_os_mem_config); +} + + +/* ----------------------------------------------------------- + Util +-------------------------------------------------------------- */ +bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); +bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats); + + +/* ----------------------------------------------------------- + aligned hinting +-------------------------------------------------------------- */ + +// On 64-bit systems, we can do efficient aligned allocation by using +// the 2TiB to 30TiB area to allocate those. +#if (MI_INTPTR_SIZE >= 8) +static mi_decl_cache_align _Atomic(uintptr_t)aligned_base; + +// Return a MI_SEGMENT_SIZE aligned address that is probably available. +// If this returns NULL, the OS will determine the address but on some OS's that may not be +// properly aligned which can be more costly as it needs to be adjusted afterwards. +// For a size > 1GiB this always returns NULL in order to guarantee good ASLR randomization; +// (otherwise an initial large allocation of say 2TiB has a 50% chance to include (known) addresses +// in the middle of the 2TiB - 6TiB address range (see issue #372)) + +#define MI_HINT_BASE ((uintptr_t)2 << 40) // 2TiB start +#define MI_HINT_AREA ((uintptr_t)4 << 40) // upto 6TiB (since before win8 there is "only" 8TiB available to processes) +#define MI_HINT_MAX ((uintptr_t)30 << 40) // wrap after 30TiB (area after 32TiB is used for huge OS pages) + +void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) +{ + if (try_alignment <= 1 || try_alignment > MI_SEGMENT_SIZE) return NULL; + size = _mi_align_up(size, MI_SEGMENT_SIZE); + if (size > 1*MI_GiB) return NULL; // guarantee the chance of fixed valid address is at most 1/(MI_HINT_AREA / 1<<30) = 1/4096. + #if (MI_SECURE>0) + size += MI_SEGMENT_SIZE; // put in `MI_SEGMENT_SIZE` virtual gaps between hinted blocks; this splits VLA's but increases guarded areas. + #endif + + uintptr_t hint = mi_atomic_add_acq_rel(&aligned_base, size); + if (hint == 0 || hint > MI_HINT_MAX) { // wrap or initialize + uintptr_t init = MI_HINT_BASE; + #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of aligned allocations unless in debug mode + uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap()); + init = init + ((MI_SEGMENT_SIZE * ((r>>17) & 0xFFFFF)) % MI_HINT_AREA); // (randomly 20 bits)*4MiB == 0 to 4TiB + #endif + uintptr_t expected = hint + size; + mi_atomic_cas_strong_acq_rel(&aligned_base, &expected, init); + hint = mi_atomic_add_acq_rel(&aligned_base, size); // this may still give 0 or > MI_HINT_MAX but that is ok, it is a hint after all + } + if (hint%try_alignment != 0) return NULL; + return (void*)hint; +} +#else +void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) { + MI_UNUSED(try_alignment); MI_UNUSED(size); + return NULL; +} +#endif + + +/* ----------------------------------------------------------- + Free memory +-------------------------------------------------------------- */ + +static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats); + +static void mi_os_prim_free(void* addr, size_t size, bool still_committed, mi_stats_t* tld_stats) { + MI_UNUSED(tld_stats); + mi_stats_t* stats = &_mi_stats_main; + mi_assert_internal((size % _mi_os_page_size()) == 0); + if (addr == NULL || size == 0) return; // || _mi_os_is_huge_reserved(addr) + int err = _mi_prim_free(addr, size); + if (err != 0) { + _mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr); + } + if (still_committed) { _mi_stat_decrease(&stats->committed, size); } + _mi_stat_decrease(&stats->reserved, size); +} + +void _mi_os_free_ex(void* addr, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* tld_stats) { + if (mi_memkind_is_os(memid.memkind)) { + size_t csize = _mi_os_good_alloc_size(size); + void* base = addr; + // different base? (due to alignment) + if (memid.mem.os.base != NULL) { + mi_assert(memid.mem.os.base <= addr); + mi_assert((uint8_t*)memid.mem.os.base + memid.mem.os.alignment >= (uint8_t*)addr); + base = memid.mem.os.base; + csize += ((uint8_t*)addr - (uint8_t*)memid.mem.os.base); + } + // free it + if (memid.memkind == MI_MEM_OS_HUGE) { + mi_assert(memid.is_pinned); + mi_os_free_huge_os_pages(base, csize, tld_stats); + } + else { + mi_os_prim_free(base, csize, still_committed, tld_stats); + } + } + else { + // nothing to do + mi_assert(memid.memkind < MI_MEM_OS); + } +} + +void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* tld_stats) { + _mi_os_free_ex(p, size, true, memid, tld_stats); +} + + +/* ----------------------------------------------------------- + Primitive allocation from the OS. +-------------------------------------------------------------- */ + +// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. +static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, mi_stats_t* tld_stats) { + mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); + mi_assert_internal(is_zero != NULL); + mi_assert_internal(is_large != NULL); + if (size == 0) return NULL; + if (!commit) { allow_large = false; } + if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning + *is_zero = false; + void* p = NULL; + int err = _mi_prim_alloc(size, try_alignment, commit, allow_large, is_large, is_zero, &p); + if (err != 0) { + _mi_warning_message("unable to allocate OS memory (error: %d (0x%x), size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, size, try_alignment, commit, allow_large); + } + + MI_UNUSED(tld_stats); + mi_stats_t* stats = &_mi_stats_main; + mi_stat_counter_increase(stats->mmap_calls, 1); + if (p != NULL) { + _mi_stat_increase(&stats->reserved, size); + if (commit) { + _mi_stat_increase(&stats->committed, size); + // seems needed for asan (or `mimalloc-test-api` fails) + #ifdef MI_TRACK_ASAN + if (*is_zero) { mi_track_mem_defined(p,size); } + else { mi_track_mem_undefined(p,size); } + #endif + } + } + return p; +} + + +// Primitive aligned allocation from the OS. +// This function guarantees the allocated memory is aligned. +static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base, mi_stats_t* stats) { + mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0)); + mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); + mi_assert_internal(is_large != NULL); + mi_assert_internal(is_zero != NULL); + mi_assert_internal(base != NULL); + if (!commit) allow_large = false; + if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL; + size = _mi_align_up(size, _mi_os_page_size()); + + // try first with a hint (this will be aligned directly on Win 10+ or BSD) + void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero, stats); + if (p == NULL) return NULL; + + // aligned already? + if (((uintptr_t)p % alignment) == 0) { + *base = p; + } + else { + // if not aligned, free it, overallocate, and unmap around it + _mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit); + mi_os_prim_free(p, size, commit, stats); + if (size >= (SIZE_MAX - alignment)) return NULL; // overflow + const size_t over_size = size + alignment; + + if (!mi_os_mem_config.has_partial_free) { // win32 virtualAlloc cannot free parts of an allocated block + // over-allocate uncommitted (virtual) memory + p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero, stats); + if (p == NULL) return NULL; + + // set p to the aligned part in the full region + // note: this is dangerous on Windows as VirtualFree needs the actual base pointer + // this is handled though by having the `base` field in the memid's + *base = p; // remember the base + p = mi_align_up_ptr(p, alignment); + + // explicitly commit only the aligned part + if (commit) { + _mi_os_commit(p, size, NULL, stats); + } + } + else { // mmap can free inside an allocation + // overallocate... + p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero, stats); + if (p == NULL) return NULL; + + // and selectively unmap parts around the over-allocated area. + void* aligned_p = mi_align_up_ptr(p, alignment); + size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p; + size_t mid_size = _mi_align_up(size, _mi_os_page_size()); + size_t post_size = over_size - pre_size - mid_size; + mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size); + if (pre_size > 0) { mi_os_prim_free(p, pre_size, commit, stats); } + if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); } + // we can return the aligned pointer on `mmap` systems + p = aligned_p; + *base = aligned_p; // since we freed the pre part, `*base == p`. + } + } + + mi_assert_internal(p == NULL || (p != NULL && *base != NULL && ((uintptr_t)p % alignment) == 0)); + return p; +} + + +/* ----------------------------------------------------------- + OS API: alloc and alloc_aligned +----------------------------------------------------------- */ + +void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats) { + *memid = _mi_memid_none(); + if (size == 0) return NULL; + size = _mi_os_good_alloc_size(size); + bool os_is_large = false; + bool os_is_zero = false; + void* p = mi_os_prim_alloc(size, 0, true, false, &os_is_large, &os_is_zero, stats); + if (p != NULL) { + *memid = _mi_memid_create_os(true, os_is_zero, os_is_large); + } + return p; +} + +void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats) +{ + MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings + *memid = _mi_memid_none(); + if (size == 0) return NULL; + size = _mi_os_good_alloc_size(size); + alignment = _mi_align_up(alignment, _mi_os_page_size()); + + bool os_is_large = false; + bool os_is_zero = false; + void* os_base = NULL; + void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base, stats ); + if (p != NULL) { + *memid = _mi_memid_create_os(commit, os_is_zero, os_is_large); + memid->mem.os.base = os_base; + memid->mem.os.alignment = alignment; + } + return p; +} + +/* ----------------------------------------------------------- + OS aligned allocation with an offset. This is used + for large alignments > MI_BLOCK_ALIGNMENT_MAX. We use a large mimalloc + page where the object can be aligned at an offset from the start of the segment. + As we may need to overallocate, we need to free such pointers using `mi_free_aligned` + to use the actual start of the memory region. +----------------------------------------------------------- */ + +void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats) { + mi_assert(offset <= MI_SEGMENT_SIZE); + mi_assert(offset <= size); + mi_assert((alignment % _mi_os_page_size()) == 0); + *memid = _mi_memid_none(); + if (offset > MI_SEGMENT_SIZE) return NULL; + if (offset == 0) { + // regular aligned allocation + return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid, stats); + } + else { + // overallocate to align at an offset + const size_t extra = _mi_align_up(offset, alignment) - offset; + const size_t oversize = size + extra; + void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid, stats); + if (start == NULL) return NULL; + + void* const p = (uint8_t*)start + extra; + mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment)); + // decommit the overallocation at the start + if (commit && extra > _mi_os_page_size()) { + _mi_os_decommit(start, extra, stats); + } + return p; + } +} + +/* ----------------------------------------------------------- + OS memory API: reset, commit, decommit, protect, unprotect. +----------------------------------------------------------- */ + +// OS page align within a given area, either conservative (pages inside the area only), +// or not (straddling pages outside the area is possible) +static void* mi_os_page_align_areax(bool conservative, void* addr, size_t size, size_t* newsize) { + mi_assert(addr != NULL && size > 0); + if (newsize != NULL) *newsize = 0; + if (size == 0 || addr == NULL) return NULL; + + // page align conservatively within the range + void* start = (conservative ? mi_align_up_ptr(addr, _mi_os_page_size()) + : mi_align_down_ptr(addr, _mi_os_page_size())); + void* end = (conservative ? mi_align_down_ptr((uint8_t*)addr + size, _mi_os_page_size()) + : mi_align_up_ptr((uint8_t*)addr + size, _mi_os_page_size())); + ptrdiff_t diff = (uint8_t*)end - (uint8_t*)start; + if (diff <= 0) return NULL; + + mi_assert_internal((conservative && (size_t)diff <= size) || (!conservative && (size_t)diff >= size)); + if (newsize != NULL) *newsize = (size_t)diff; + return start; +} + +static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t* newsize) { + return mi_os_page_align_areax(true, addr, size, newsize); +} + +bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats) { + MI_UNUSED(tld_stats); + mi_stats_t* stats = &_mi_stats_main; + if (is_zero != NULL) { *is_zero = false; } + _mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit + _mi_stat_counter_increase(&stats->commit_calls, 1); + + // page align range + size_t csize; + void* start = mi_os_page_align_areax(false /* conservative? */, addr, size, &csize); + if (csize == 0) return true; + + // commit + bool os_is_zero = false; + int err = _mi_prim_commit(start, csize, &os_is_zero); + if (err != 0) { + _mi_warning_message("cannot commit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); + return false; + } + if (os_is_zero && is_zero != NULL) { + *is_zero = true; + mi_assert_expensive(mi_mem_is_zero(start, csize)); + } + // note: the following seems required for asan (otherwise `mimalloc-test-stress` fails) + #ifdef MI_TRACK_ASAN + if (os_is_zero) { mi_track_mem_defined(start,csize); } + else { mi_track_mem_undefined(start,csize); } + #endif + return true; +} + +static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, mi_stats_t* tld_stats) { + MI_UNUSED(tld_stats); + mi_stats_t* stats = &_mi_stats_main; + mi_assert_internal(needs_recommit!=NULL); + _mi_stat_decrease(&stats->committed, size); + + // page align + size_t csize; + void* start = mi_os_page_align_area_conservative(addr, size, &csize); + if (csize == 0) return true; + + // decommit + *needs_recommit = true; + int err = _mi_prim_decommit(start,csize,needs_recommit); + if (err != 0) { + _mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); + } + mi_assert_internal(err == 0); + return (err == 0); +} + +bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* tld_stats) { + bool needs_recommit; + return mi_os_decommit_ex(addr, size, &needs_recommit, tld_stats); +} + + +// Signal to the OS that the address range is no longer in use +// but may be used later again. This will release physical memory +// pages and reduce swapping while keeping the memory committed. +// We page align to a conservative area inside the range to reset. +bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { + // page align conservatively within the range + size_t csize; + void* start = mi_os_page_align_area_conservative(addr, size, &csize); + if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr) + _mi_stat_increase(&stats->reset, csize); + _mi_stat_counter_increase(&stats->reset_calls, 1); + + #if (MI_DEBUG>1) && !MI_SECURE && !MI_TRACK_ENABLED // && !MI_TSAN + memset(start, 0, csize); // pretend it is eagerly reset + #endif + + int err = _mi_prim_reset(start, csize); + if (err != 0) { + _mi_warning_message("cannot reset OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); + } + return (err == 0); +} + + +// either resets or decommits memory, returns true if the memory needs +// to be recommitted if it is to be re-used later on. +bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats) +{ + if (mi_option_get(mi_option_purge_delay) < 0) return false; // is purging allowed? + _mi_stat_counter_increase(&stats->purge_calls, 1); + _mi_stat_increase(&stats->purged, size); + + if (mi_option_is_enabled(mi_option_purge_decommits) && // should decommit? + !_mi_preloading()) // don't decommit during preloading (unsafe) + { + bool needs_recommit = true; + mi_os_decommit_ex(p, size, &needs_recommit, stats); + return needs_recommit; + } + else { + if (allow_reset) { // this can sometimes be not allowed if the range is not fully committed + _mi_os_reset(p, size, stats); + } + return false; // needs no recommit + } +} + +// either resets or decommits memory, returns true if the memory needs +// to be recommitted if it is to be re-used later on. +bool _mi_os_purge(void* p, size_t size, mi_stats_t * stats) { + return _mi_os_purge_ex(p, size, true, stats); +} + +// Protect a region in memory to be not accessible. +static bool mi_os_protectx(void* addr, size_t size, bool protect) { + // page align conservatively within the range + size_t csize = 0; + void* start = mi_os_page_align_area_conservative(addr, size, &csize); + if (csize == 0) return false; + /* + if (_mi_os_is_huge_reserved(addr)) { + _mi_warning_message("cannot mprotect memory allocated in huge OS pages\n"); + } + */ + int err = _mi_prim_protect(start,csize,protect); + if (err != 0) { + _mi_warning_message("cannot %s OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", (protect ? "protect" : "unprotect"), err, err, start, csize); + } + return (err == 0); +} + +bool _mi_os_protect(void* addr, size_t size) { + return mi_os_protectx(addr, size, true); +} + +bool _mi_os_unprotect(void* addr, size_t size) { + return mi_os_protectx(addr, size, false); +} + + + +/* ---------------------------------------------------------------------------- +Support for allocating huge OS pages (1Gib) that are reserved up-front +and possibly associated with a specific NUMA node. (use `numa_node>=0`) +-----------------------------------------------------------------------------*/ +#define MI_HUGE_OS_PAGE_SIZE (MI_GiB) + + +#if (MI_INTPTR_SIZE >= 8) +// To ensure proper alignment, use our own area for huge OS pages +static mi_decl_cache_align _Atomic(uintptr_t) mi_huge_start; // = 0 + +// Claim an aligned address range for huge pages +static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) { + if (total_size != NULL) *total_size = 0; + const size_t size = pages * MI_HUGE_OS_PAGE_SIZE; + + uintptr_t start = 0; + uintptr_t end = 0; + uintptr_t huge_start = mi_atomic_load_relaxed(&mi_huge_start); + do { + start = huge_start; + if (start == 0) { + // Initialize the start address after the 32TiB area + start = ((uintptr_t)32 << 40); // 32TiB virtual start address + #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode + uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap()); + start = start + ((uintptr_t)MI_HUGE_OS_PAGE_SIZE * ((r>>17) & 0x0FFF)); // (randomly 12bits)*1GiB == between 0 to 4TiB + #endif + } + end = start + size; + mi_assert_internal(end % MI_SEGMENT_SIZE == 0); + } while (!mi_atomic_cas_strong_acq_rel(&mi_huge_start, &huge_start, end)); + + if (total_size != NULL) *total_size = size; + return (uint8_t*)start; +} +#else +static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) { + MI_UNUSED(pages); + if (total_size != NULL) *total_size = 0; + return NULL; +} +#endif + +// Allocate MI_SEGMENT_SIZE aligned huge pages +void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_msecs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid) { + *memid = _mi_memid_none(); + if (psize != NULL) *psize = 0; + if (pages_reserved != NULL) *pages_reserved = 0; + size_t size = 0; + uint8_t* start = mi_os_claim_huge_pages(pages, &size); + if (start == NULL) return NULL; // or 32-bit systems + + // Allocate one page at the time but try to place them contiguously + // We allocate one page at the time to be able to abort if it takes too long + // or to at least allocate as many as available on the system. + mi_msecs_t start_t = _mi_clock_start(); + size_t page = 0; + bool all_zero = true; + while (page < pages) { + // allocate a page + bool is_zero = false; + void* addr = start + (page * MI_HUGE_OS_PAGE_SIZE); + void* p = NULL; + int err = _mi_prim_alloc_huge_os_pages(addr, MI_HUGE_OS_PAGE_SIZE, numa_node, &is_zero, &p); + if (!is_zero) { all_zero = false; } + if (err != 0) { + _mi_warning_message("unable to allocate huge OS page (error: %d (0x%x), address: %p, size: %zx bytes)\n", err, err, addr, MI_HUGE_OS_PAGE_SIZE); + break; + } + + // Did we succeed at a contiguous address? + if (p != addr) { + // no success, issue a warning and break + if (p != NULL) { + _mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr); + mi_os_prim_free(p, MI_HUGE_OS_PAGE_SIZE, true, &_mi_stats_main); + } + break; + } + + // success, record it + page++; // increase before timeout check (see issue #711) + _mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE); + _mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE); + + // check for timeout + if (max_msecs > 0) { + mi_msecs_t elapsed = _mi_clock_end(start_t); + if (page >= 1) { + mi_msecs_t estimate = ((elapsed / (page+1)) * pages); + if (estimate > 2*max_msecs) { // seems like we are going to timeout, break + elapsed = max_msecs + 1; + } + } + if (elapsed > max_msecs) { + _mi_warning_message("huge OS page allocation timed out (after allocating %zu page(s))\n", page); + break; + } + } + } + mi_assert_internal(page*MI_HUGE_OS_PAGE_SIZE <= size); + if (pages_reserved != NULL) { *pages_reserved = page; } + if (psize != NULL) { *psize = page * MI_HUGE_OS_PAGE_SIZE; } + if (page != 0) { + mi_assert(start != NULL); + *memid = _mi_memid_create_os(true /* is committed */, all_zero, true /* is_large */); + memid->memkind = MI_MEM_OS_HUGE; + mi_assert(memid->is_pinned); + #ifdef MI_TRACK_ASAN + if (all_zero) { mi_track_mem_defined(start,size); } + #endif + } + return (page == 0 ? NULL : start); +} + +// free every huge page in a range individually (as we allocated per page) +// note: needed with VirtualAlloc but could potentially be done in one go on mmap'd systems. +static void mi_os_free_huge_os_pages(void* p, size_t size, mi_stats_t* stats) { + if (p==NULL || size==0) return; + uint8_t* base = (uint8_t*)p; + while (size >= MI_HUGE_OS_PAGE_SIZE) { + mi_os_prim_free(base, MI_HUGE_OS_PAGE_SIZE, true, stats); + size -= MI_HUGE_OS_PAGE_SIZE; + base += MI_HUGE_OS_PAGE_SIZE; + } +} + +/* ---------------------------------------------------------------------------- +Support NUMA aware allocation +-----------------------------------------------------------------------------*/ + +_Atomic(size_t) _mi_numa_node_count; // = 0 // cache the node count + +size_t _mi_os_numa_node_count_get(void) { + size_t count = mi_atomic_load_acquire(&_mi_numa_node_count); + if (count <= 0) { + long ncount = mi_option_get(mi_option_use_numa_nodes); // given explicitly? + if (ncount > 0) { + count = (size_t)ncount; + } + else { + count = _mi_prim_numa_node_count(); // or detect dynamically + if (count == 0) count = 1; + } + mi_atomic_store_release(&_mi_numa_node_count, count); // save it + _mi_verbose_message("using %zd numa regions\n", count); + } + return count; +} + +int _mi_os_numa_node_get(mi_os_tld_t* tld) { + MI_UNUSED(tld); + size_t numa_count = _mi_os_numa_node_count(); + if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0 + // never more than the node count and >= 0 + size_t numa_node = _mi_prim_numa_node(); + if (numa_node >= numa_count) { numa_node = numa_node % numa_count; } + return (int)numa_node; +} diff --git a/ww/managers/mimalloc/src/page-queue.c b/ww/managers/mimalloc/src/page-queue.c new file mode 100644 index 00000000..ceea91ee --- /dev/null +++ b/ww/managers/mimalloc/src/page-queue.c @@ -0,0 +1,343 @@ +/*---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ----------------------------------------------------------- + Definition of page queues for each block size +----------------------------------------------------------- */ + +#ifndef MI_IN_PAGE_C +#error "this file should be included from 'page.c'" +// include to help an IDE +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#endif + +/* ----------------------------------------------------------- + Minimal alignment in machine words (i.e. `sizeof(void*)`) +----------------------------------------------------------- */ + +#if (MI_MAX_ALIGN_SIZE > 4*MI_INTPTR_SIZE) + #error "define alignment for more than 4x word size for this platform" +#elif (MI_MAX_ALIGN_SIZE > 2*MI_INTPTR_SIZE) + #define MI_ALIGN4W // 4 machine words minimal alignment +#elif (MI_MAX_ALIGN_SIZE > MI_INTPTR_SIZE) + #define MI_ALIGN2W // 2 machine words minimal alignment +#else + // ok, default alignment is 1 word +#endif + + +/* ----------------------------------------------------------- + Queue query +----------------------------------------------------------- */ + + +static inline bool mi_page_queue_is_huge(const mi_page_queue_t* pq) { + return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+sizeof(uintptr_t))); +} + +static inline bool mi_page_queue_is_full(const mi_page_queue_t* pq) { + return (pq->block_size == (MI_MEDIUM_OBJ_SIZE_MAX+(2*sizeof(uintptr_t)))); +} + +static inline bool mi_page_queue_is_special(const mi_page_queue_t* pq) { + return (pq->block_size > MI_MEDIUM_OBJ_SIZE_MAX); +} + +/* ----------------------------------------------------------- + Bins +----------------------------------------------------------- */ + +// Return the bin for a given field size. +// Returns MI_BIN_HUGE if the size is too large. +// We use `wsize` for the size in "machine word sizes", +// i.e. byte size == `wsize*sizeof(void*)`. +static inline uint8_t mi_bin(size_t size) { + size_t wsize = _mi_wsize_from_size(size); + uint8_t bin; + if (wsize <= 1) { + bin = 1; + } + #if defined(MI_ALIGN4W) + else if (wsize <= 4) { + bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + } + #elif defined(MI_ALIGN2W) + else if (wsize <= 8) { + bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + } + #else + else if (wsize <= 8) { + bin = (uint8_t)wsize; + } + #endif + else if (wsize > MI_MEDIUM_OBJ_WSIZE_MAX) { + bin = MI_BIN_HUGE; + } + else { + #if defined(MI_ALIGN4W) + if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes + #endif + wsize--; + // find the highest bit + uint8_t b = (uint8_t)mi_bsr(wsize); // note: wsize != 0 + // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation). + // - adjust with 3 because we use do not round the first 8 sizes + // which each get an exact bin + bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3; + mi_assert_internal(bin < MI_BIN_HUGE); + } + mi_assert_internal(bin > 0 && bin <= MI_BIN_HUGE); + return bin; +} + + + +/* ----------------------------------------------------------- + Queue of pages with free blocks +----------------------------------------------------------- */ + +uint8_t _mi_bin(size_t size) { + return mi_bin(size); +} + +size_t _mi_bin_size(uint8_t bin) { + return _mi_heap_empty.pages[bin].block_size; +} + +// Good size for allocation +size_t mi_good_size(size_t size) mi_attr_noexcept { + if (size <= MI_MEDIUM_OBJ_SIZE_MAX) { + return _mi_bin_size(mi_bin(size + MI_PADDING_SIZE)); + } + else { + return _mi_align_up(size + MI_PADDING_SIZE,_mi_os_page_size()); + } +} + +#if (MI_DEBUG>1) +static bool mi_page_queue_contains(mi_page_queue_t* queue, const mi_page_t* page) { + mi_assert_internal(page != NULL); + mi_page_t* list = queue->first; + while (list != NULL) { + mi_assert_internal(list->next == NULL || list->next->prev == list); + mi_assert_internal(list->prev == NULL || list->prev->next == list); + if (list == page) break; + list = list->next; + } + return (list == page); +} + +#endif + +#if (MI_DEBUG>1) +static bool mi_heap_contains_queue(const mi_heap_t* heap, const mi_page_queue_t* pq) { + return (pq >= &heap->pages[0] && pq <= &heap->pages[MI_BIN_FULL]); +} +#endif + +static inline bool mi_page_is_large_or_huge(const mi_page_t* page) { + return (mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_huge(page)); +} + +static mi_page_queue_t* mi_heap_page_queue_of(mi_heap_t* heap, const mi_page_t* page) { + mi_assert_internal(heap!=NULL); + uint8_t bin = (mi_page_is_in_full(page) ? MI_BIN_FULL : (mi_page_is_huge(page) ? MI_BIN_HUGE : mi_bin(mi_page_block_size(page)))); + mi_assert_internal(bin <= MI_BIN_FULL); + mi_page_queue_t* pq = &heap->pages[bin]; + mi_assert_internal((mi_page_block_size(page) == pq->block_size) || + (mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(pq)) || + (mi_page_is_in_full(page) && mi_page_queue_is_full(pq))); + return pq; +} + +static mi_page_queue_t* mi_page_queue_of(const mi_page_t* page) { + mi_heap_t* heap = mi_page_heap(page); + mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); + mi_assert_expensive(mi_page_queue_contains(pq, page)); + return pq; +} + +// The current small page array is for efficiency and for each +// small size (up to 256) it points directly to the page for that +// size without having to compute the bin. This means when the +// current free page queue is updated for a small bin, we need to update a +// range of entries in `_mi_page_small_free`. +static inline void mi_heap_queue_first_update(mi_heap_t* heap, const mi_page_queue_t* pq) { + mi_assert_internal(mi_heap_contains_queue(heap,pq)); + size_t size = pq->block_size; + if (size > MI_SMALL_SIZE_MAX) return; + + mi_page_t* page = pq->first; + if (pq->first == NULL) page = (mi_page_t*)&_mi_page_empty; + + // find index in the right direct page array + size_t start; + size_t idx = _mi_wsize_from_size(size); + mi_page_t** pages_free = heap->pages_free_direct; + + if (pages_free[idx] == page) return; // already set + + // find start slot + if (idx<=1) { + start = 0; + } + else { + // find previous size; due to minimal alignment upto 3 previous bins may need to be skipped + uint8_t bin = mi_bin(size); + const mi_page_queue_t* prev = pq - 1; + while( bin == mi_bin(prev->block_size) && prev > &heap->pages[0]) { + prev--; + } + start = 1 + _mi_wsize_from_size(prev->block_size); + if (start > idx) start = idx; + } + + // set size range to the right page + mi_assert(start <= idx); + for (size_t sz = start; sz <= idx; sz++) { + pages_free[sz] = page; + } +} + +/* +static bool mi_page_queue_is_empty(mi_page_queue_t* queue) { + return (queue->first == NULL); +} +*/ + +static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) { + mi_assert_internal(page != NULL); + mi_assert_expensive(mi_page_queue_contains(queue, page)); + mi_assert_internal(mi_page_block_size(page) == queue->block_size || + (mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) || + (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); + mi_heap_t* heap = mi_page_heap(page); + + if (page->prev != NULL) page->prev->next = page->next; + if (page->next != NULL) page->next->prev = page->prev; + if (page == queue->last) queue->last = page->prev; + if (page == queue->first) { + queue->first = page->next; + // update first + mi_assert_internal(mi_heap_contains_queue(heap, queue)); + mi_heap_queue_first_update(heap,queue); + } + heap->page_count--; + page->next = NULL; + page->prev = NULL; + // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), NULL); + mi_page_set_in_full(page,false); +} + + +static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) { + mi_assert_internal(mi_page_heap(page) == heap); + mi_assert_internal(!mi_page_queue_contains(queue, page)); + #if MI_HUGE_PAGE_ABANDON + mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); + #endif + mi_assert_internal(mi_page_block_size(page) == queue->block_size || + (mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(queue)) || + (mi_page_is_in_full(page) && mi_page_queue_is_full(queue))); + + mi_page_set_in_full(page, mi_page_queue_is_full(queue)); + // mi_atomic_store_ptr_release(mi_atomic_cast(void*, &page->heap), heap); + page->next = queue->first; + page->prev = NULL; + if (queue->first != NULL) { + mi_assert_internal(queue->first->prev == NULL); + queue->first->prev = page; + queue->first = page; + } + else { + queue->first = queue->last = page; + } + + // update direct + mi_heap_queue_first_update(heap, queue); + heap->page_count++; +} + + +static void mi_page_queue_enqueue_from(mi_page_queue_t* to, mi_page_queue_t* from, mi_page_t* page) { + mi_assert_internal(page != NULL); + mi_assert_expensive(mi_page_queue_contains(from, page)); + mi_assert_expensive(!mi_page_queue_contains(to, page)); + const size_t bsize = mi_page_block_size(page); + MI_UNUSED(bsize); + mi_assert_internal((bsize == to->block_size && bsize == from->block_size) || + (bsize == to->block_size && mi_page_queue_is_full(from)) || + (bsize == from->block_size && mi_page_queue_is_full(to)) || + (mi_page_is_large_or_huge(page) && mi_page_queue_is_huge(to)) || + (mi_page_is_large_or_huge(page) && mi_page_queue_is_full(to))); + + mi_heap_t* heap = mi_page_heap(page); + if (page->prev != NULL) page->prev->next = page->next; + if (page->next != NULL) page->next->prev = page->prev; + if (page == from->last) from->last = page->prev; + if (page == from->first) { + from->first = page->next; + // update first + mi_assert_internal(mi_heap_contains_queue(heap, from)); + mi_heap_queue_first_update(heap, from); + } + + page->prev = to->last; + page->next = NULL; + if (to->last != NULL) { + mi_assert_internal(heap == mi_page_heap(to->last)); + to->last->next = page; + to->last = page; + } + else { + to->first = page; + to->last = page; + mi_heap_queue_first_update(heap, to); + } + + mi_page_set_in_full(page, mi_page_queue_is_full(to)); +} + +// Only called from `mi_heap_absorb`. +size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append) { + mi_assert_internal(mi_heap_contains_queue(heap,pq)); + mi_assert_internal(pq->block_size == append->block_size); + + if (append->first==NULL) return 0; + + // set append pages to new heap and count + size_t count = 0; + for (mi_page_t* page = append->first; page != NULL; page = page->next) { + // inline `mi_page_set_heap` to avoid wrong assertion during absorption; + // in this case it is ok to be delayed freeing since both "to" and "from" heap are still alive. + mi_atomic_store_release(&page->xheap, (uintptr_t)heap); + // set the flag to delayed free (not overriding NEVER_DELAYED_FREE) which has as a + // side effect that it spins until any DELAYED_FREEING is finished. This ensures + // that after appending only the new heap will be used for delayed free operations. + _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, false); + count++; + } + + if (pq->last==NULL) { + // take over afresh + mi_assert_internal(pq->first==NULL); + pq->first = append->first; + pq->last = append->last; + mi_heap_queue_first_update(heap, pq); + } + else { + // append to end + mi_assert_internal(pq->last!=NULL); + mi_assert_internal(append->first!=NULL); + pq->last->next = append->first; + append->first->prev = pq->last; + pq->last = append->last; + } + return count; +} diff --git a/ww/managers/mimalloc/src/page.c b/ww/managers/mimalloc/src/page.c new file mode 100644 index 00000000..871ed215 --- /dev/null +++ b/ww/managers/mimalloc/src/page.c @@ -0,0 +1,943 @@ +/*---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ----------------------------------------------------------- + The core of the allocator. Every segment contains + pages of a certain block size. The main function + exported is `mi_malloc_generic`. +----------------------------------------------------------- */ + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" + +/* ----------------------------------------------------------- + Definition of page queues for each block size +----------------------------------------------------------- */ + +#define MI_IN_PAGE_C +#include "page-queue.c" +#undef MI_IN_PAGE_C + + +/* ----------------------------------------------------------- + Page helpers +----------------------------------------------------------- */ + +// Index a block in a page +static inline mi_block_t* mi_page_block_at(const mi_page_t* page, void* page_start, size_t block_size, size_t i) { + MI_UNUSED(page); + mi_assert_internal(page != NULL); + mi_assert_internal(i <= page->reserved); + return (mi_block_t*)((uint8_t*)page_start + (i * block_size)); +} + +static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t size, mi_tld_t* tld); +static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld); + +#if (MI_DEBUG>=3) +static size_t mi_page_list_count(mi_page_t* page, mi_block_t* head) { + size_t count = 0; + while (head != NULL) { + mi_assert_internal(page == _mi_ptr_page(head)); + count++; + head = mi_block_next(page, head); + } + return count; +} + +/* +// Start of the page available memory +static inline uint8_t* mi_page_area(const mi_page_t* page) { + return _mi_page_start(_mi_page_segment(page), page, NULL); +} +*/ + +static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) { + size_t psize; + uint8_t* page_area = _mi_segment_page_start(_mi_page_segment(page), page, &psize); + mi_block_t* start = (mi_block_t*)page_area; + mi_block_t* end = (mi_block_t*)(page_area + psize); + while(p != NULL) { + if (p < start || p >= end) return false; + p = mi_block_next(page, p); + } +#if MI_DEBUG>3 // generally too expensive to check this + if (page->free_is_zero) { + const size_t ubsize = mi_page_usable_block_size(page); + for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page, block)) { + mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t))); + } + } +#endif + return true; +} + +static bool mi_page_is_valid_init(mi_page_t* page) { + mi_assert_internal(mi_page_block_size(page) > 0); + mi_assert_internal(page->used <= page->capacity); + mi_assert_internal(page->capacity <= page->reserved); + + uint8_t* start = mi_page_start(page); + mi_assert_internal(start == _mi_segment_page_start(_mi_page_segment(page), page, NULL)); + mi_assert_internal(page->is_huge == (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE)); + //mi_assert_internal(start + page->capacity*page->block_size == page->top); + + mi_assert_internal(mi_page_list_is_valid(page,page->free)); + mi_assert_internal(mi_page_list_is_valid(page,page->local_free)); + + #if MI_DEBUG>3 // generally too expensive to check this + if (page->free_is_zero) { + const size_t ubsize = mi_page_usable_block_size(page); + for(mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) { + mi_assert_expensive(mi_mem_is_zero(block + 1, ubsize - sizeof(mi_block_t))); + } + } + #endif + + #if !MI_TRACK_ENABLED && !MI_TSAN + mi_block_t* tfree = mi_page_thread_free(page); + mi_assert_internal(mi_page_list_is_valid(page, tfree)); + //size_t tfree_count = mi_page_list_count(page, tfree); + //mi_assert_internal(tfree_count <= page->thread_freed + 1); + #endif + + size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free); + mi_assert_internal(page->used + free_count == page->capacity); + + return true; +} + +extern bool _mi_process_is_initialized; // has mi_process_init been called? + +bool _mi_page_is_valid(mi_page_t* page) { + mi_assert_internal(mi_page_is_valid_init(page)); + #if MI_SECURE + mi_assert_internal(page->keys[0] != 0); + #endif + if (mi_page_heap(page)!=NULL) { + mi_segment_t* segment = _mi_page_segment(page); + + mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id); + #if MI_HUGE_PAGE_ABANDON + if (segment->kind != MI_SEGMENT_HUGE) + #endif + { + mi_page_queue_t* pq = mi_page_queue_of(page); + mi_assert_internal(mi_page_queue_contains(pq, page)); + mi_assert_internal(pq->block_size==mi_page_block_size(page) || mi_page_block_size(page) > MI_MEDIUM_OBJ_SIZE_MAX || mi_page_is_in_full(page)); + mi_assert_internal(mi_heap_contains_queue(mi_page_heap(page),pq)); + } + } + return true; +} +#endif + +void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) { + while (!_mi_page_try_use_delayed_free(page, delay, override_never)) { + mi_atomic_yield(); + } +} + +bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never) { + mi_thread_free_t tfreex; + mi_delayed_t old_delay; + mi_thread_free_t tfree; + size_t yield_count = 0; + do { + tfree = mi_atomic_load_acquire(&page->xthread_free); // note: must acquire as we can break/repeat this loop and not do a CAS; + tfreex = mi_tf_set_delayed(tfree, delay); + old_delay = mi_tf_delayed(tfree); + if mi_unlikely(old_delay == MI_DELAYED_FREEING) { + if (yield_count >= 4) return false; // give up after 4 tries + yield_count++; + mi_atomic_yield(); // delay until outstanding MI_DELAYED_FREEING are done. + // tfree = mi_tf_set_delayed(tfree, MI_NO_DELAYED_FREE); // will cause CAS to busy fail + } + else if (delay == old_delay) { + break; // avoid atomic operation if already equal + } + else if (!override_never && old_delay == MI_NEVER_DELAYED_FREE) { + break; // leave never-delayed flag set + } + } while ((old_delay == MI_DELAYED_FREEING) || + !mi_atomic_cas_weak_release(&page->xthread_free, &tfree, tfreex)); + + return true; // success +} + +/* ----------------------------------------------------------- + Page collect the `local_free` and `thread_free` lists +----------------------------------------------------------- */ + +// Collect the local `thread_free` list using an atomic exchange. +// Note: The exchange must be done atomically as this is used right after +// moving to the full list in `mi_page_collect_ex` and we need to +// ensure that there was no race where the page became unfull just before the move. +static void _mi_page_thread_free_collect(mi_page_t* page) +{ + mi_block_t* head; + mi_thread_free_t tfreex; + mi_thread_free_t tfree = mi_atomic_load_relaxed(&page->xthread_free); + do { + head = mi_tf_block(tfree); + tfreex = mi_tf_set_block(tfree,NULL); + } while (!mi_atomic_cas_weak_acq_rel(&page->xthread_free, &tfree, tfreex)); + + // return if the list is empty + if (head == NULL) return; + + // find the tail -- also to get a proper count (without data races) + size_t max_count = page->capacity; // cannot collect more than capacity + size_t count = 1; + mi_block_t* tail = head; + mi_block_t* next; + while ((next = mi_block_next(page,tail)) != NULL && count <= max_count) { + count++; + tail = next; + } + // if `count > max_count` there was a memory corruption (possibly infinite list due to double multi-threaded free) + if (count > max_count) { + _mi_error_message(EFAULT, "corrupted thread-free list\n"); + return; // the thread-free items cannot be freed + } + + // and append the current local free list + mi_block_set_next(page,tail, page->local_free); + page->local_free = head; + + // update counts now + page->used -= (uint16_t)count; +} + +void _mi_page_free_collect(mi_page_t* page, bool force) { + mi_assert_internal(page!=NULL); + + // collect the thread free list + if (force || mi_page_thread_free(page) != NULL) { // quick test to avoid an atomic operation + _mi_page_thread_free_collect(page); + } + + // and the local free list + if (page->local_free != NULL) { + if mi_likely(page->free == NULL) { + // usual case + page->free = page->local_free; + page->local_free = NULL; + page->free_is_zero = false; + } + else if (force) { + // append -- only on shutdown (force) as this is a linear operation + mi_block_t* tail = page->local_free; + mi_block_t* next; + while ((next = mi_block_next(page, tail)) != NULL) { + tail = next; + } + mi_block_set_next(page, tail, page->free); + page->free = page->local_free; + page->local_free = NULL; + page->free_is_zero = false; + } + } + + mi_assert_internal(!force || page->local_free == NULL); +} + + + +/* ----------------------------------------------------------- + Page fresh and retire +----------------------------------------------------------- */ + +// called from segments when reclaiming abandoned pages +void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) { + mi_assert_expensive(mi_page_is_valid_init(page)); + + mi_assert_internal(mi_page_heap(page) == heap); + mi_assert_internal(mi_page_thread_free_flag(page) != MI_NEVER_DELAYED_FREE); + #if MI_HUGE_PAGE_ABANDON + mi_assert_internal(_mi_page_segment(page)->kind != MI_SEGMENT_HUGE); + #endif + + // TODO: push on full queue immediately if it is full? + mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page)); + mi_page_queue_push(heap, pq, page); + mi_assert_expensive(_mi_page_is_valid(page)); +} + +// allocate a fresh page from a segment +static mi_page_t* mi_page_fresh_alloc(mi_heap_t* heap, mi_page_queue_t* pq, size_t block_size, size_t page_alignment) { + #if !MI_HUGE_PAGE_ABANDON + mi_assert_internal(pq != NULL); + mi_assert_internal(mi_heap_contains_queue(heap, pq)); + mi_assert_internal(page_alignment > 0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || block_size == pq->block_size); + #endif + mi_page_t* page = _mi_segment_page_alloc(heap, block_size, page_alignment, &heap->tld->segments, &heap->tld->os); + if (page == NULL) { + // this may be out-of-memory, or an abandoned page was reclaimed (and in our queue) + return NULL; + } + #if MI_HUGE_PAGE_ABANDON + mi_assert_internal(pq==NULL || _mi_page_segment(page)->page_kind != MI_PAGE_HUGE); + #endif + mi_assert_internal(page_alignment >0 || block_size > MI_MEDIUM_OBJ_SIZE_MAX || _mi_page_segment(page)->kind != MI_SEGMENT_HUGE); + mi_assert_internal(pq!=NULL || mi_page_block_size(page) >= block_size); + // a fresh page was found, initialize it + const size_t full_block_size = (pq == NULL || mi_page_is_huge(page) ? mi_page_block_size(page) : block_size); // see also: mi_segment_huge_page_alloc + mi_assert_internal(full_block_size >= block_size); + mi_page_init(heap, page, full_block_size, heap->tld); + mi_heap_stat_increase(heap, pages, 1); + if (pq != NULL) { mi_page_queue_push(heap, pq, page); } + mi_assert_expensive(_mi_page_is_valid(page)); + return page; +} + +// Get a fresh page to use +static mi_page_t* mi_page_fresh(mi_heap_t* heap, mi_page_queue_t* pq) { + mi_assert_internal(mi_heap_contains_queue(heap, pq)); + mi_page_t* page = mi_page_fresh_alloc(heap, pq, pq->block_size, 0); + if (page==NULL) return NULL; + mi_assert_internal(pq->block_size==mi_page_block_size(page)); + mi_assert_internal(pq==mi_page_queue(heap, mi_page_block_size(page))); + return page; +} + +/* ----------------------------------------------------------- + Do any delayed frees + (put there by other threads if they deallocated in a full page) +----------------------------------------------------------- */ +void _mi_heap_delayed_free_all(mi_heap_t* heap) { + while (!_mi_heap_delayed_free_partial(heap)) { + mi_atomic_yield(); + } +} + +// returns true if all delayed frees were processed +bool _mi_heap_delayed_free_partial(mi_heap_t* heap) { + // take over the list (note: no atomic exchange since it is often NULL) + mi_block_t* block = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); + while (block != NULL && !mi_atomic_cas_ptr_weak_acq_rel(mi_block_t, &heap->thread_delayed_free, &block, NULL)) { /* nothing */ }; + bool all_freed = true; + + // and free them all + while(block != NULL) { + mi_block_t* next = mi_block_nextx(heap,block, heap->keys); + // use internal free instead of regular one to keep stats etc correct + if (!_mi_free_delayed_block(block)) { + // we might already start delayed freeing while another thread has not yet + // reset the delayed_freeing flag; in that case delay it further by reinserting the current block + // into the delayed free list + all_freed = false; + mi_block_t* dfree = mi_atomic_load_ptr_relaxed(mi_block_t, &heap->thread_delayed_free); + do { + mi_block_set_nextx(heap, block, dfree, heap->keys); + } while (!mi_atomic_cas_ptr_weak_release(mi_block_t,&heap->thread_delayed_free, &dfree, block)); + } + block = next; + } + return all_freed; +} + +/* ----------------------------------------------------------- + Unfull, abandon, free and retire +----------------------------------------------------------- */ + +// Move a page from the full list back to a regular list +void _mi_page_unfull(mi_page_t* page) { + mi_assert_internal(page != NULL); + mi_assert_expensive(_mi_page_is_valid(page)); + mi_assert_internal(mi_page_is_in_full(page)); + if (!mi_page_is_in_full(page)) return; + + mi_heap_t* heap = mi_page_heap(page); + mi_page_queue_t* pqfull = &heap->pages[MI_BIN_FULL]; + mi_page_set_in_full(page, false); // to get the right queue + mi_page_queue_t* pq = mi_heap_page_queue_of(heap, page); + mi_page_set_in_full(page, true); + mi_page_queue_enqueue_from(pq, pqfull, page); +} + +static void mi_page_to_full(mi_page_t* page, mi_page_queue_t* pq) { + mi_assert_internal(pq == mi_page_queue_of(page)); + mi_assert_internal(!mi_page_immediate_available(page)); + mi_assert_internal(!mi_page_is_in_full(page)); + + if (mi_page_is_in_full(page)) return; + mi_page_queue_enqueue_from(&mi_page_heap(page)->pages[MI_BIN_FULL], pq, page); + _mi_page_free_collect(page,false); // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set +} + + +// Abandon a page with used blocks at the end of a thread. +// Note: only call if it is ensured that no references exist from +// the `page->heap->thread_delayed_free` into this page. +// Currently only called through `mi_heap_collect_ex` which ensures this. +void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) { + mi_assert_internal(page != NULL); + mi_assert_expensive(_mi_page_is_valid(page)); + mi_assert_internal(pq == mi_page_queue_of(page)); + mi_assert_internal(mi_page_heap(page) != NULL); + + mi_heap_t* pheap = mi_page_heap(page); + + // remove from our page list + mi_segments_tld_t* segments_tld = &pheap->tld->segments; + mi_page_queue_remove(pq, page); + + // page is no longer associated with our heap + mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); + mi_page_set_heap(page, NULL); + +#if (MI_DEBUG>1) && !MI_TRACK_ENABLED + // check there are no references left.. + for (mi_block_t* block = (mi_block_t*)pheap->thread_delayed_free; block != NULL; block = mi_block_nextx(pheap, block, pheap->keys)) { + mi_assert_internal(_mi_ptr_page(block) != page); + } +#endif + + // and abandon it + mi_assert_internal(mi_page_heap(page) == NULL); + _mi_segment_page_abandon(page,segments_tld); +} + + +// Free a page with no more free blocks +void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) { + mi_assert_internal(page != NULL); + mi_assert_expensive(_mi_page_is_valid(page)); + mi_assert_internal(pq == mi_page_queue_of(page)); + mi_assert_internal(mi_page_all_free(page)); + mi_assert_internal(mi_page_thread_free_flag(page)!=MI_DELAYED_FREEING); + + // no more aligned blocks in here + mi_page_set_has_aligned(page, false); + + mi_heap_t* heap = mi_page_heap(page); + + // remove from the page list + // (no need to do _mi_heap_delayed_free first as all blocks are already free) + mi_segments_tld_t* segments_tld = &heap->tld->segments; + mi_page_queue_remove(pq, page); + + // and free it + mi_page_set_heap(page,NULL); + _mi_segment_page_free(page, force, segments_tld); +} + +#define MI_MAX_RETIRE_SIZE MI_MEDIUM_OBJ_SIZE_MAX // should be less than size for MI_BIN_HUGE +#define MI_RETIRE_CYCLES (16) + +// Retire a page with no more used blocks +// Important to not retire too quickly though as new +// allocations might coming. +// Note: called from `mi_free` and benchmarks often +// trigger this due to freeing everything and then +// allocating again so careful when changing this. +void _mi_page_retire(mi_page_t* page) mi_attr_noexcept { + mi_assert_internal(page != NULL); + mi_assert_expensive(_mi_page_is_valid(page)); + mi_assert_internal(mi_page_all_free(page)); + + mi_page_set_has_aligned(page, false); + + // don't retire too often.. + // (or we end up retiring and re-allocating most of the time) + // NOTE: refine this more: we should not retire if this + // is the only page left with free blocks. It is not clear + // how to check this efficiently though... + // for now, we don't retire if it is the only page left of this size class. + mi_page_queue_t* pq = mi_page_queue_of(page); + const size_t bsize = mi_page_block_size(page); + if mi_likely( /* bsize < MI_MAX_RETIRE_SIZE && */ !mi_page_queue_is_special(pq)) { // not full or huge queue? + if (pq->last==page && pq->first==page) { // the only page in the queue? + mi_stat_counter_increase(_mi_stats_main.page_no_retire,1); + page->retire_expire = (bsize <= MI_SMALL_OBJ_SIZE_MAX ? MI_RETIRE_CYCLES : MI_RETIRE_CYCLES/4); + mi_heap_t* heap = mi_page_heap(page); + mi_assert_internal(pq >= heap->pages); + const size_t index = pq - heap->pages; + mi_assert_internal(index < MI_BIN_FULL && index < MI_BIN_HUGE); + if (index < heap->page_retired_min) heap->page_retired_min = index; + if (index > heap->page_retired_max) heap->page_retired_max = index; + mi_assert_internal(mi_page_all_free(page)); + return; // don't free after all + } + } + _mi_page_free(page, pq, false); +} + +// free retired pages: we don't need to look at the entire queues +// since we only retire pages that are at the head position in a queue. +void _mi_heap_collect_retired(mi_heap_t* heap, bool force) { + size_t min = MI_BIN_FULL; + size_t max = 0; + for(size_t bin = heap->page_retired_min; bin <= heap->page_retired_max; bin++) { + mi_page_queue_t* pq = &heap->pages[bin]; + mi_page_t* page = pq->first; + if (page != NULL && page->retire_expire != 0) { + if (mi_page_all_free(page)) { + page->retire_expire--; + if (force || page->retire_expire == 0) { + _mi_page_free(pq->first, pq, force); + } + else { + // keep retired, update min/max + if (bin < min) min = bin; + if (bin > max) max = bin; + } + } + else { + page->retire_expire = 0; + } + } + } + heap->page_retired_min = min; + heap->page_retired_max = max; +} + + +/* ----------------------------------------------------------- + Initialize the initial free list in a page. + In secure mode we initialize a randomized list by + alternating between slices. +----------------------------------------------------------- */ + +#define MI_MAX_SLICE_SHIFT (6) // at most 64 slices +#define MI_MAX_SLICES (1UL << MI_MAX_SLICE_SHIFT) +#define MI_MIN_SLICES (2) + +static void mi_page_free_list_extend_secure(mi_heap_t* const heap, mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) { + MI_UNUSED(stats); + #if (MI_SECURE<=2) + mi_assert_internal(page->free == NULL); + mi_assert_internal(page->local_free == NULL); + #endif + mi_assert_internal(page->capacity + extend <= page->reserved); + mi_assert_internal(bsize == mi_page_block_size(page)); + void* const page_area = mi_page_start(page); + + // initialize a randomized free list + // set up `slice_count` slices to alternate between + size_t shift = MI_MAX_SLICE_SHIFT; + while ((extend >> shift) == 0) { + shift--; + } + const size_t slice_count = (size_t)1U << shift; + const size_t slice_extend = extend / slice_count; + mi_assert_internal(slice_extend >= 1); + mi_block_t* blocks[MI_MAX_SLICES]; // current start of the slice + size_t counts[MI_MAX_SLICES]; // available objects in the slice + for (size_t i = 0; i < slice_count; i++) { + blocks[i] = mi_page_block_at(page, page_area, bsize, page->capacity + i*slice_extend); + counts[i] = slice_extend; + } + counts[slice_count-1] += (extend % slice_count); // final slice holds the modulus too (todo: distribute evenly?) + + // and initialize the free list by randomly threading through them + // set up first element + const uintptr_t r = _mi_heap_random_next(heap); + size_t current = r % slice_count; + counts[current]--; + mi_block_t* const free_start = blocks[current]; + // and iterate through the rest; use `random_shuffle` for performance + uintptr_t rnd = _mi_random_shuffle(r|1); // ensure not 0 + for (size_t i = 1; i < extend; i++) { + // call random_shuffle only every INTPTR_SIZE rounds + const size_t round = i%MI_INTPTR_SIZE; + if (round == 0) rnd = _mi_random_shuffle(rnd); + // select a random next slice index + size_t next = ((rnd >> 8*round) & (slice_count-1)); + while (counts[next]==0) { // ensure it still has space + next++; + if (next==slice_count) next = 0; + } + // and link the current block to it + counts[next]--; + mi_block_t* const block = blocks[current]; + blocks[current] = (mi_block_t*)((uint8_t*)block + bsize); // bump to the following block + mi_block_set_next(page, block, blocks[next]); // and set next; note: we may have `current == next` + current = next; + } + // prepend to the free list (usually NULL) + mi_block_set_next(page, blocks[current], page->free); // end of the list + page->free = free_start; +} + +static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) +{ + MI_UNUSED(stats); + #if (MI_SECURE <= 2) + mi_assert_internal(page->free == NULL); + mi_assert_internal(page->local_free == NULL); + #endif + mi_assert_internal(page->capacity + extend <= page->reserved); + mi_assert_internal(bsize == mi_page_block_size(page)); + void* const page_area = mi_page_start(page); + + mi_block_t* const start = mi_page_block_at(page, page_area, bsize, page->capacity); + + // initialize a sequential free list + mi_block_t* const last = mi_page_block_at(page, page_area, bsize, page->capacity + extend - 1); + mi_block_t* block = start; + while(block <= last) { + mi_block_t* next = (mi_block_t*)((uint8_t*)block + bsize); + mi_block_set_next(page,block,next); + block = next; + } + // prepend to free list (usually `NULL`) + mi_block_set_next(page, last, page->free); + page->free = start; +} + +/* ----------------------------------------------------------- + Page initialize and extend the capacity +----------------------------------------------------------- */ + +#define MI_MAX_EXTEND_SIZE (4*1024) // heuristic, one OS page seems to work well. +#if (MI_SECURE>0) +#define MI_MIN_EXTEND (8*MI_SECURE) // extend at least by this many +#else +#define MI_MIN_EXTEND (4) +#endif + +// Extend the capacity (up to reserved) by initializing a free list +// We do at most `MI_MAX_EXTEND` to avoid touching too much memory +// Note: we also experimented with "bump" allocation on the first +// allocations but this did not speed up any benchmark (due to an +// extra test in malloc? or cache effects?) +static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) { + MI_UNUSED(tld); + mi_assert_expensive(mi_page_is_valid_init(page)); + #if (MI_SECURE<=2) + mi_assert(page->free == NULL); + mi_assert(page->local_free == NULL); + if (page->free != NULL) return; + #endif + if (page->capacity >= page->reserved) return; + + mi_stat_counter_increase(tld->stats.pages_extended, 1); + + // calculate the extend count + const size_t bsize = mi_page_block_size(page); + size_t extend = page->reserved - page->capacity; + mi_assert_internal(extend > 0); + + size_t max_extend = (bsize >= MI_MAX_EXTEND_SIZE ? MI_MIN_EXTEND : MI_MAX_EXTEND_SIZE/bsize); + if (max_extend < MI_MIN_EXTEND) { max_extend = MI_MIN_EXTEND; } + mi_assert_internal(max_extend > 0); + + if (extend > max_extend) { + // ensure we don't touch memory beyond the page to reduce page commit. + // the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%. + extend = max_extend; + } + + mi_assert_internal(extend > 0 && extend + page->capacity <= page->reserved); + mi_assert_internal(extend < (1UL<<16)); + + // and append the extend the free list + if (extend < MI_MIN_SLICES || MI_SECURE==0) { //!mi_option_is_enabled(mi_option_secure)) { + mi_page_free_list_extend(page, bsize, extend, &tld->stats ); + } + else { + mi_page_free_list_extend_secure(heap, page, bsize, extend, &tld->stats); + } + // enable the new free list + page->capacity += (uint16_t)extend; + mi_stat_increase(tld->stats.page_committed, extend * bsize); + mi_assert_expensive(mi_page_is_valid_init(page)); +} + +// Initialize a fresh page +static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi_tld_t* tld) { + mi_assert(page != NULL); + mi_segment_t* segment = _mi_page_segment(page); + mi_assert(segment != NULL); + mi_assert_internal(block_size > 0); + // set fields + mi_page_set_heap(page, heap); + page->block_size = block_size; + size_t page_size; + page->page_start = _mi_segment_page_start(segment, page, &page_size); + mi_track_mem_noaccess(page->page_start,page_size); + mi_assert_internal(mi_page_block_size(page) <= page_size); + mi_assert_internal(page_size <= page->slice_count*MI_SEGMENT_SLICE_SIZE); + mi_assert_internal(page_size / block_size < (1L<<16)); + page->reserved = (uint16_t)(page_size / block_size); + mi_assert_internal(page->reserved > 0); + #if (MI_PADDING || MI_ENCODE_FREELIST) + page->keys[0] = _mi_heap_random_next(heap); + page->keys[1] = _mi_heap_random_next(heap); + #endif + page->free_is_zero = page->is_zero_init; + #if MI_DEBUG>2 + if (page->is_zero_init) { + mi_track_mem_defined(page->page_start, page_size); + mi_assert_expensive(mi_mem_is_zero(page->page_start, page_size)); + } + #endif + mi_assert_internal(page->is_committed); + if (block_size > 0 && _mi_is_power_of_two(block_size)) { + page->block_size_shift = (uint8_t)(mi_ctz((uintptr_t)block_size)); + } + else { + page->block_size_shift = 0; + } + + mi_assert_internal(page->capacity == 0); + mi_assert_internal(page->free == NULL); + mi_assert_internal(page->used == 0); + mi_assert_internal(page->xthread_free == 0); + mi_assert_internal(page->next == NULL); + mi_assert_internal(page->prev == NULL); + mi_assert_internal(page->retire_expire == 0); + mi_assert_internal(!mi_page_has_aligned(page)); + #if (MI_PADDING || MI_ENCODE_FREELIST) + mi_assert_internal(page->keys[0] != 0); + mi_assert_internal(page->keys[1] != 0); + #endif + mi_assert_internal(page->block_size_shift == 0 || (block_size == ((size_t)1 << page->block_size_shift))); + mi_assert_expensive(mi_page_is_valid_init(page)); + + // initialize an initial free list + mi_page_extend_free(heap,page,tld); + mi_assert(mi_page_immediate_available(page)); +} + + +/* ----------------------------------------------------------- + Find pages with free blocks +-------------------------------------------------------------*/ + +// Find a page with free blocks of `page->block_size`. +static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try) +{ + // search through the pages in "next fit" order + #if MI_STAT + size_t count = 0; + #endif + mi_page_t* page = pq->first; + while (page != NULL) + { + mi_page_t* next = page->next; // remember next + #if MI_STAT + count++; + #endif + + // 0. collect freed blocks by us and other threads + _mi_page_free_collect(page, false); + + // 1. if the page contains free blocks, we are done + if (mi_page_immediate_available(page)) { + break; // pick this one + } + + // 2. Try to extend + if (page->capacity < page->reserved) { + mi_page_extend_free(heap, page, heap->tld); + mi_assert_internal(mi_page_immediate_available(page)); + break; + } + + // 3. If the page is completely full, move it to the `mi_pages_full` + // queue so we don't visit long-lived pages too often. + mi_assert_internal(!mi_page_is_in_full(page) && !mi_page_immediate_available(page)); + mi_page_to_full(page, pq); + + page = next; + } // for each page + + mi_heap_stat_counter_increase(heap, searches, count); + + if (page == NULL) { + _mi_heap_collect_retired(heap, false); // perhaps make a page available? + page = mi_page_fresh(heap, pq); + if (page == NULL && first_try) { + // out-of-memory _or_ an abandoned page with free blocks was reclaimed, try once again + page = mi_page_queue_find_free_ex(heap, pq, false); + } + } + else { + mi_assert(pq->first == page); + page->retire_expire = 0; + } + mi_assert_internal(page == NULL || mi_page_immediate_available(page)); + return page; +} + + + +// Find a page with free blocks of `size`. +static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { + mi_page_queue_t* pq = mi_page_queue(heap,size); + mi_page_t* page = pq->first; + if (page != NULL) { + #if (MI_SECURE>=3) // in secure mode, we extend half the time to increase randomness + if (page->capacity < page->reserved && ((_mi_heap_random_next(heap) & 1) == 1)) { + mi_page_extend_free(heap, page, heap->tld); + mi_assert_internal(mi_page_immediate_available(page)); + } + else + #endif + { + _mi_page_free_collect(page,false); + } + + if (mi_page_immediate_available(page)) { + page->retire_expire = 0; + return page; // fast path + } + } + return mi_page_queue_find_free_ex(heap, pq, true); +} + + +/* ----------------------------------------------------------- + Users can register a deferred free function called + when the `free` list is empty. Since the `local_free` + is separate this is deterministically called after + a certain number of allocations. +----------------------------------------------------------- */ + +static mi_deferred_free_fun* volatile deferred_free = NULL; +static _Atomic(void*) deferred_arg; // = NULL + +void _mi_deferred_free(mi_heap_t* heap, bool force) { + heap->tld->heartbeat++; + if (deferred_free != NULL && !heap->tld->recurse) { + heap->tld->recurse = true; + deferred_free(force, heap->tld->heartbeat, mi_atomic_load_ptr_relaxed(void,&deferred_arg)); + heap->tld->recurse = false; + } +} + +void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noexcept { + deferred_free = fn; + mi_atomic_store_ptr_release(void,&deferred_arg, arg); +} + + +/* ----------------------------------------------------------- + General allocation +----------------------------------------------------------- */ + +// Large and huge page allocation. +// Huge pages contain just one block, and the segment contains just that page (as `MI_SEGMENT_HUGE`). +// Huge pages are also use if the requested alignment is very large (> MI_BLOCK_ALIGNMENT_MAX) +// so their size is not always `> MI_LARGE_OBJ_SIZE_MAX`. +static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size, size_t page_alignment) { + size_t block_size = _mi_os_good_alloc_size(size); + mi_assert_internal(mi_bin(block_size) == MI_BIN_HUGE || page_alignment > 0); + bool is_huge = (block_size > MI_LARGE_OBJ_SIZE_MAX || page_alignment > 0); + #if MI_HUGE_PAGE_ABANDON + mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size)); + #else + mi_page_queue_t* pq = mi_page_queue(heap, is_huge ? MI_LARGE_OBJ_SIZE_MAX+1 : block_size); + mi_assert_internal(!is_huge || mi_page_queue_is_huge(pq)); + #endif + mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size, page_alignment); + if (page != NULL) { + mi_assert_internal(mi_page_immediate_available(page)); + + if (is_huge) { + mi_assert_internal(mi_page_is_huge(page)); + mi_assert_internal(_mi_page_segment(page)->kind == MI_SEGMENT_HUGE); + mi_assert_internal(_mi_page_segment(page)->used==1); + #if MI_HUGE_PAGE_ABANDON + mi_assert_internal(_mi_page_segment(page)->thread_id==0); // abandoned, not in the huge queue + mi_page_set_heap(page, NULL); + #endif + } + else { + mi_assert_internal(!mi_page_is_huge(page)); + } + + const size_t bsize = mi_page_usable_block_size(page); // note: not `mi_page_block_size` to account for padding + if (bsize <= MI_LARGE_OBJ_SIZE_MAX) { + mi_heap_stat_increase(heap, large, bsize); + mi_heap_stat_counter_increase(heap, large_count, 1); + } + else { + mi_heap_stat_increase(heap, huge, bsize); + mi_heap_stat_counter_increase(heap, huge_count, 1); + } + } + return page; +} + + +// Allocate a page +// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed. +static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignment) mi_attr_noexcept { + // huge allocation? + const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` + if mi_unlikely(req_size > (MI_MEDIUM_OBJ_SIZE_MAX - MI_PADDING_SIZE) || huge_alignment > 0) { + if mi_unlikely(req_size > MI_MAX_ALLOC_SIZE) { + _mi_error_message(EOVERFLOW, "allocation request is too large (%zu bytes)\n", req_size); + return NULL; + } + else { + return mi_large_huge_page_alloc(heap,size,huge_alignment); + } + } + else { + // otherwise find a page with free blocks in our size segregated queues + #if MI_PADDING + mi_assert_internal(size >= MI_PADDING_SIZE); + #endif + return mi_find_free_page(heap, size); + } +} + +// Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed. +// Note: in debug mode the size includes MI_PADDING_SIZE and might have overflowed. +// The `huge_alignment` is normally 0 but is set to a multiple of MI_SEGMENT_SIZE for +// very large requested alignments in which case we use a huge segment. +void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept +{ + mi_assert_internal(heap != NULL); + + // initialize if necessary + if mi_unlikely(!mi_heap_is_initialized(heap)) { + heap = mi_heap_get_default(); // calls mi_thread_init + if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; } + } + mi_assert_internal(mi_heap_is_initialized(heap)); + + // call potential deferred free routines + _mi_deferred_free(heap, false); + + // free delayed frees from other threads (but skip contended ones) + _mi_heap_delayed_free_partial(heap); + + // find (or allocate) a page of the right size + mi_page_t* page = mi_find_page(heap, size, huge_alignment); + if mi_unlikely(page == NULL) { // first time out of memory, try to collect and retry the allocation once more + mi_heap_collect(heap, true /* force */); + page = mi_find_page(heap, size, huge_alignment); + } + + if mi_unlikely(page == NULL) { // out of memory + const size_t req_size = size - MI_PADDING_SIZE; // correct for padding_size in case of an overflow on `size` + _mi_error_message(ENOMEM, "unable to allocate memory (%zu bytes)\n", req_size); + return NULL; + } + + mi_assert_internal(mi_page_immediate_available(page)); + mi_assert_internal(mi_page_block_size(page) >= size); + + // and try again, this time succeeding! (i.e. this should never recurse through _mi_page_malloc) + if mi_unlikely(zero && page->block_size == 0) { + // note: we cannot call _mi_page_malloc with zeroing for huge blocks; we zero it afterwards in that case. + void* p = _mi_page_malloc(heap, page, size); + mi_assert_internal(p != NULL); + _mi_memzero_aligned(p, mi_page_usable_block_size(page)); + return p; + } + else { + return _mi_page_malloc_zero(heap, page, size, zero); + } +} diff --git a/ww/managers/mimalloc/src/prim/emscripten/prim.c b/ww/managers/mimalloc/src/prim/emscripten/prim.c new file mode 100644 index 00000000..f3797c9e --- /dev/null +++ b/ww/managers/mimalloc/src/prim/emscripten/prim.c @@ -0,0 +1,244 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen, Alon Zakai +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// This file is included in `src/prim/prim.c` + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" + +// Design +// ====== +// +// mimalloc is built on top of emmalloc. emmalloc is a minimal allocator on top +// of sbrk. The reason for having three layers here is that we want mimalloc to +// be able to allocate and release system memory properly, the same way it would +// when using VirtualAlloc on Windows or mmap on POSIX, and sbrk is too limited. +// Specifically, sbrk can only go up and down, and not "skip" over regions, and +// so we end up either never freeing memory to the system, or we can get stuck +// with holes. +// +// Atm wasm generally does *not* free memory back the system: once grown, we do +// not shrink back down (https://github.com/WebAssembly/design/issues/1397). +// However, that is expected to improve +// (https://github.com/WebAssembly/memory-control/blob/main/proposals/memory-control/Overview.md) +// and so we do not want to bake those limitations in here. +// +// Even without that issue, we want our system allocator to handle holes, that +// is, it should merge freed regions and allow allocating new content there of +// the full size, etc., so that we do not waste space. That means that the +// system allocator really does need to handle the general problem of allocating +// and freeing variable-sized chunks of memory in a random order, like malloc/ +// free do. And so it makes sense to layer mimalloc on top of such an +// implementation. +// +// emmalloc makes sense for the lower level because it is small and simple while +// still fully handling merging of holes etc. It is not the most efficient +// allocator, but our assumption is that mimalloc needs to be fast while the +// system allocator underneath it is called much less frequently. +// + +//--------------------------------------------- +// init +//--------------------------------------------- + +void _mi_prim_mem_init( mi_os_mem_config_t* config) { + config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB + config->alloc_granularity = 16; + config->has_overcommit = false; + config->has_partial_free = false; + config->has_virtual_reserve = false; +} + +extern void emmalloc_free(void*); + +int _mi_prim_free(void* addr, size_t size) { + MI_UNUSED(size); + emmalloc_free(addr); + return 0; +} + + +//--------------------------------------------- +// Allocation +//--------------------------------------------- + +extern void* emmalloc_memalign(size_t alignment, size_t size); + +// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. +int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + MI_UNUSED(try_alignment); MI_UNUSED(allow_large); MI_UNUSED(commit); + *is_large = false; + // TODO: Track the highest address ever seen; first uses of it are zeroes. + // That assumes no one else uses sbrk but us (they could go up, + // scribble, and then down), but we could assert on that perhaps. + *is_zero = false; + // emmalloc has a minimum alignment size. + #define MIN_EMMALLOC_ALIGN 8 + if (try_alignment < MIN_EMMALLOC_ALIGN) { + try_alignment = MIN_EMMALLOC_ALIGN; + } + void* p = emmalloc_memalign(try_alignment, size); + *addr = p; + if (p == 0) { + return ENOMEM; + } + return 0; +} + + +//--------------------------------------------- +// Commit/Reset +//--------------------------------------------- + +int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { + MI_UNUSED(addr); MI_UNUSED(size); + // See TODO above. + *is_zero = false; + return 0; +} + +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) { + MI_UNUSED(addr); MI_UNUSED(size); + *needs_recommit = false; + return 0; +} + +int _mi_prim_reset(void* addr, size_t size) { + MI_UNUSED(addr); MI_UNUSED(size); + return 0; +} + +int _mi_prim_protect(void* addr, size_t size, bool protect) { + MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(protect); + return 0; +} + + +//--------------------------------------------- +// Huge pages and NUMA nodes +//--------------------------------------------- + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node); + *is_zero = true; + *addr = NULL; + return ENOSYS; +} + +size_t _mi_prim_numa_node(void) { + return 0; +} + +size_t _mi_prim_numa_node_count(void) { + return 1; +} + + +//---------------------------------------------------------------- +// Clock +//---------------------------------------------------------------- + +#include + +mi_msecs_t _mi_prim_clock_now(void) { + return emscripten_date_now(); +} + + +//---------------------------------------------------------------- +// Process info +//---------------------------------------------------------------- + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + // use defaults + MI_UNUSED(pinfo); +} + + +//---------------------------------------------------------------- +// Output +//---------------------------------------------------------------- + +#include + +void _mi_prim_out_stderr( const char* msg) { + emscripten_console_error(msg); +} + + +//---------------------------------------------------------------- +// Environment +//---------------------------------------------------------------- + +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + // For code size reasons, do not support environ customization for now. + MI_UNUSED(name); + MI_UNUSED(result); + MI_UNUSED(result_size); + return false; +} + + +//---------------------------------------------------------------- +// Random +//---------------------------------------------------------------- + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + int err = getentropy(buf, buf_len); + return !err; +} + + +//---------------------------------------------------------------- +// Thread init/done +//---------------------------------------------------------------- + +#ifdef __EMSCRIPTEN_SHARED_MEMORY__ + +// use pthread local storage keys to detect thread ending +// (and used with MI_TLS_PTHREADS for the default heap) +pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1); + +static void mi_pthread_done(void* value) { + if (value!=NULL) { + _mi_thread_done((mi_heap_t*)value); + } +} + +void _mi_prim_thread_init_auto_done(void) { + mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1)); + pthread_key_create(&_mi_heap_default_key, &mi_pthread_done); +} + +void _mi_prim_thread_done_auto_done(void) { + // nothing to do +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD + pthread_setspecific(_mi_heap_default_key, heap); + } +} + +#else + +void _mi_prim_thread_init_auto_done(void) { + // nothing +} + +void _mi_prim_thread_done_auto_done(void) { + // nothing +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); + +} +#endif diff --git a/ww/managers/mimalloc/src/prim/osx/alloc-override-zone.c b/ww/managers/mimalloc/src/prim/osx/alloc-override-zone.c new file mode 100644 index 00000000..1515b886 --- /dev/null +++ b/ww/managers/mimalloc/src/prim/osx/alloc-override-zone.c @@ -0,0 +1,461 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2022, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +#include "mimalloc.h" +#include "mimalloc/internal.h" + +#if defined(MI_MALLOC_OVERRIDE) + +#if !defined(__APPLE__) +#error "this file should only be included on macOS" +#endif + +/* ------------------------------------------------------ + Override system malloc on macOS + This is done through the malloc zone interface. + It seems to be most robust in combination with interposing + though or otherwise we may get zone errors as there are could + be allocations done by the time we take over the + zone. +------------------------------------------------------ */ + +#include +#include +#include // memset +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) +// only available from OSX 10.6 +extern malloc_zone_t* malloc_default_purgeable_zone(void) __attribute__((weak_import)); +#endif + +/* ------------------------------------------------------ + malloc zone members +------------------------------------------------------ */ + +static size_t zone_size(malloc_zone_t* zone, const void* p) { + MI_UNUSED(zone); + if (!mi_is_in_heap_region(p)){ return 0; } // not our pointer, bail out + return mi_usable_size(p); +} + +static void* zone_malloc(malloc_zone_t* zone, size_t size) { + MI_UNUSED(zone); + return mi_malloc(size); +} + +static void* zone_calloc(malloc_zone_t* zone, size_t count, size_t size) { + MI_UNUSED(zone); + return mi_calloc(count, size); +} + +static void* zone_valloc(malloc_zone_t* zone, size_t size) { + MI_UNUSED(zone); + return mi_malloc_aligned(size, _mi_os_page_size()); +} + +static void zone_free(malloc_zone_t* zone, void* p) { + MI_UNUSED(zone); + mi_cfree(p); +} + +static void* zone_realloc(malloc_zone_t* zone, void* p, size_t newsize) { + MI_UNUSED(zone); + return mi_realloc(p, newsize); +} + +static void* zone_memalign(malloc_zone_t* zone, size_t alignment, size_t size) { + MI_UNUSED(zone); + return mi_malloc_aligned(size,alignment); +} + +static void zone_destroy(malloc_zone_t* zone) { + MI_UNUSED(zone); + // todo: ignore for now? +} + +static unsigned zone_batch_malloc(malloc_zone_t* zone, size_t size, void** ps, unsigned count) { + size_t i; + for (i = 0; i < count; i++) { + ps[i] = zone_malloc(zone, size); + if (ps[i] == NULL) break; + } + return i; +} + +static void zone_batch_free(malloc_zone_t* zone, void** ps, unsigned count) { + for(size_t i = 0; i < count; i++) { + zone_free(zone, ps[i]); + ps[i] = NULL; + } +} + +static size_t zone_pressure_relief(malloc_zone_t* zone, size_t size) { + MI_UNUSED(zone); MI_UNUSED(size); + mi_collect(false); + return 0; +} + +static void zone_free_definite_size(malloc_zone_t* zone, void* p, size_t size) { + MI_UNUSED(size); + zone_free(zone,p); +} + +static boolean_t zone_claimed_address(malloc_zone_t* zone, void* p) { + MI_UNUSED(zone); + return mi_is_in_heap_region(p); +} + + +/* ------------------------------------------------------ + Introspection members +------------------------------------------------------ */ + +static kern_return_t intro_enumerator(task_t task, void* p, + unsigned type_mask, vm_address_t zone_address, + memory_reader_t reader, + vm_range_recorder_t recorder) +{ + // todo: enumerate all memory + MI_UNUSED(task); MI_UNUSED(p); MI_UNUSED(type_mask); MI_UNUSED(zone_address); + MI_UNUSED(reader); MI_UNUSED(recorder); + return KERN_SUCCESS; +} + +static size_t intro_good_size(malloc_zone_t* zone, size_t size) { + MI_UNUSED(zone); + return mi_good_size(size); +} + +static boolean_t intro_check(malloc_zone_t* zone) { + MI_UNUSED(zone); + return true; +} + +static void intro_print(malloc_zone_t* zone, boolean_t verbose) { + MI_UNUSED(zone); MI_UNUSED(verbose); + mi_stats_print(NULL); +} + +static void intro_log(malloc_zone_t* zone, void* p) { + MI_UNUSED(zone); MI_UNUSED(p); + // todo? +} + +static void intro_force_lock(malloc_zone_t* zone) { + MI_UNUSED(zone); + // todo? +} + +static void intro_force_unlock(malloc_zone_t* zone) { + MI_UNUSED(zone); + // todo? +} + +static void intro_statistics(malloc_zone_t* zone, malloc_statistics_t* stats) { + MI_UNUSED(zone); + // todo... + stats->blocks_in_use = 0; + stats->size_in_use = 0; + stats->max_size_in_use = 0; + stats->size_allocated = 0; +} + +static boolean_t intro_zone_locked(malloc_zone_t* zone) { + MI_UNUSED(zone); + return false; +} + + +/* ------------------------------------------------------ + At process start, override the default allocator +------------------------------------------------------ */ + +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic ignored "-Wmissing-field-initializers" +#endif + +#if defined(__clang__) +#pragma clang diagnostic ignored "-Wc99-extensions" +#endif + +static malloc_introspection_t mi_introspect = { + .enumerator = &intro_enumerator, + .good_size = &intro_good_size, + .check = &intro_check, + .print = &intro_print, + .log = &intro_log, + .force_lock = &intro_force_lock, + .force_unlock = &intro_force_unlock, +#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) && !defined(__ppc__) + .statistics = &intro_statistics, + .zone_locked = &intro_zone_locked, +#endif +}; + +static malloc_zone_t mi_malloc_zone = { + // note: even with designators, the order is important for C++ compilation + //.reserved1 = NULL, + //.reserved2 = NULL, + .size = &zone_size, + .malloc = &zone_malloc, + .calloc = &zone_calloc, + .valloc = &zone_valloc, + .free = &zone_free, + .realloc = &zone_realloc, + .destroy = &zone_destroy, + .zone_name = "mimalloc", + .batch_malloc = &zone_batch_malloc, + .batch_free = &zone_batch_free, + .introspect = &mi_introspect, +#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) && !defined(__ppc__) + #if defined(MAC_OS_X_VERSION_10_14) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_14) + .version = 10, + #else + .version = 9, + #endif + // switch to version 9+ on OSX 10.6 to support memalign. + .memalign = &zone_memalign, + .free_definite_size = &zone_free_definite_size, + #if defined(MAC_OS_X_VERSION_10_7) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_7) + .pressure_relief = &zone_pressure_relief, + #endif + #if defined(MAC_OS_X_VERSION_10_14) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_14) + .claimed_address = &zone_claimed_address, + #endif +#else + .version = 4, +#endif +}; + +#ifdef __cplusplus +} +#endif + + +#if defined(MI_OSX_INTERPOSE) && defined(MI_SHARED_LIB_EXPORT) + +// ------------------------------------------------------ +// Override malloc_xxx and malloc_zone_xxx api's to use only +// our mimalloc zone. Since even the loader uses malloc +// on macOS, this ensures that all allocations go through +// mimalloc (as all calls are interposed). +// The main `malloc`, `free`, etc calls are interposed in `alloc-override.c`, +// Here, we also override macOS specific API's like +// `malloc_zone_calloc` etc. see +// ------------------------------------------------------ + +static inline malloc_zone_t* mi_get_default_zone(void) +{ + static bool init; + if mi_unlikely(!init) { + init = true; + malloc_zone_register(&mi_malloc_zone); // by calling register we avoid a zone error on free (see ) + } + return &mi_malloc_zone; +} + +mi_decl_externc int malloc_jumpstart(uintptr_t cookie); +mi_decl_externc void _malloc_fork_prepare(void); +mi_decl_externc void _malloc_fork_parent(void); +mi_decl_externc void _malloc_fork_child(void); + + +static malloc_zone_t* mi_malloc_create_zone(vm_size_t size, unsigned flags) { + MI_UNUSED(size); MI_UNUSED(flags); + return mi_get_default_zone(); +} + +static malloc_zone_t* mi_malloc_default_zone (void) { + return mi_get_default_zone(); +} + +static malloc_zone_t* mi_malloc_default_purgeable_zone(void) { + return mi_get_default_zone(); +} + +static void mi_malloc_destroy_zone(malloc_zone_t* zone) { + MI_UNUSED(zone); + // nothing. +} + +static kern_return_t mi_malloc_get_all_zones (task_t task, memory_reader_t mr, vm_address_t** addresses, unsigned* count) { + MI_UNUSED(task); MI_UNUSED(mr); + if (addresses != NULL) *addresses = NULL; + if (count != NULL) *count = 0; + return KERN_SUCCESS; +} + +static const char* mi_malloc_get_zone_name(malloc_zone_t* zone) { + return (zone == NULL ? mi_malloc_zone.zone_name : zone->zone_name); +} + +static void mi_malloc_set_zone_name(malloc_zone_t* zone, const char* name) { + MI_UNUSED(zone); MI_UNUSED(name); +} + +static int mi_malloc_jumpstart(uintptr_t cookie) { + MI_UNUSED(cookie); + return 1; // or 0 for no error? +} + +static void mi__malloc_fork_prepare(void) { + // nothing +} +static void mi__malloc_fork_parent(void) { + // nothing +} +static void mi__malloc_fork_child(void) { + // nothing +} + +static void mi_malloc_printf(const char* fmt, ...) { + MI_UNUSED(fmt); +} + +static bool zone_check(malloc_zone_t* zone) { + MI_UNUSED(zone); + return true; +} + +static malloc_zone_t* zone_from_ptr(const void* p) { + MI_UNUSED(p); + return mi_get_default_zone(); +} + +static void zone_log(malloc_zone_t* zone, void* p) { + MI_UNUSED(zone); MI_UNUSED(p); +} + +static void zone_print(malloc_zone_t* zone, bool b) { + MI_UNUSED(zone); MI_UNUSED(b); +} + +static void zone_print_ptr_info(void* p) { + MI_UNUSED(p); +} + +static void zone_register(malloc_zone_t* zone) { + MI_UNUSED(zone); +} + +static void zone_unregister(malloc_zone_t* zone) { + MI_UNUSED(zone); +} + +// use interposing so `DYLD_INSERT_LIBRARIES` works without `DYLD_FORCE_FLAT_NAMESPACE=1` +// See: +struct mi_interpose_s { + const void* replacement; + const void* target; +}; +#define MI_INTERPOSE_FUN(oldfun,newfun) { (const void*)&newfun, (const void*)&oldfun } +#define MI_INTERPOSE_MI(fun) MI_INTERPOSE_FUN(fun,mi_##fun) +#define MI_INTERPOSE_ZONE(fun) MI_INTERPOSE_FUN(malloc_##fun,fun) +__attribute__((used)) static const struct mi_interpose_s _mi_zone_interposes[] __attribute__((section("__DATA, __interpose"))) = +{ + + MI_INTERPOSE_MI(malloc_create_zone), + MI_INTERPOSE_MI(malloc_default_purgeable_zone), + MI_INTERPOSE_MI(malloc_default_zone), + MI_INTERPOSE_MI(malloc_destroy_zone), + MI_INTERPOSE_MI(malloc_get_all_zones), + MI_INTERPOSE_MI(malloc_get_zone_name), + MI_INTERPOSE_MI(malloc_jumpstart), + MI_INTERPOSE_MI(malloc_printf), + MI_INTERPOSE_MI(malloc_set_zone_name), + MI_INTERPOSE_MI(_malloc_fork_child), + MI_INTERPOSE_MI(_malloc_fork_parent), + MI_INTERPOSE_MI(_malloc_fork_prepare), + + MI_INTERPOSE_ZONE(zone_batch_free), + MI_INTERPOSE_ZONE(zone_batch_malloc), + MI_INTERPOSE_ZONE(zone_calloc), + MI_INTERPOSE_ZONE(zone_check), + MI_INTERPOSE_ZONE(zone_free), + MI_INTERPOSE_ZONE(zone_from_ptr), + MI_INTERPOSE_ZONE(zone_log), + MI_INTERPOSE_ZONE(zone_malloc), + MI_INTERPOSE_ZONE(zone_memalign), + MI_INTERPOSE_ZONE(zone_print), + MI_INTERPOSE_ZONE(zone_print_ptr_info), + MI_INTERPOSE_ZONE(zone_realloc), + MI_INTERPOSE_ZONE(zone_register), + MI_INTERPOSE_ZONE(zone_unregister), + MI_INTERPOSE_ZONE(zone_valloc) +}; + + +#else + +// ------------------------------------------------------ +// hook into the zone api's without interposing +// This is the official way of adding an allocator but +// it seems less robust than using interpose. +// ------------------------------------------------------ + +static inline malloc_zone_t* mi_get_default_zone(void) +{ + // The first returned zone is the real default + malloc_zone_t** zones = NULL; + unsigned count = 0; + kern_return_t ret = malloc_get_all_zones(0, NULL, (vm_address_t**)&zones, &count); + if (ret == KERN_SUCCESS && count > 0) { + return zones[0]; + } + else { + // fallback + return malloc_default_zone(); + } +} + +#if defined(__clang__) +__attribute__((constructor(0))) +#else +__attribute__((constructor)) // seems not supported by g++-11 on the M1 +#endif +__attribute__((used)) +static void _mi_macos_override_malloc(void) { + malloc_zone_t* purgeable_zone = NULL; + + #if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6) + // force the purgeable zone to exist to avoid strange bugs + if (malloc_default_purgeable_zone) { + purgeable_zone = malloc_default_purgeable_zone(); + } + #endif + + // Register our zone. + // thomcc: I think this is still needed to put us in the zone list. + malloc_zone_register(&mi_malloc_zone); + // Unregister the default zone, this makes our zone the new default + // as that was the last registered. + malloc_zone_t *default_zone = mi_get_default_zone(); + // thomcc: Unsure if the next test is *always* false or just false in the + // cases I've tried. I'm also unsure if the code inside is needed. at all + if (default_zone != &mi_malloc_zone) { + malloc_zone_unregister(default_zone); + + // Reregister the default zone so free and realloc in that zone keep working. + malloc_zone_register(default_zone); + } + + // Unregister, and re-register the purgeable_zone to avoid bugs if it occurs + // earlier than the default zone. + if (purgeable_zone != NULL) { + malloc_zone_unregister(purgeable_zone); + malloc_zone_register(purgeable_zone); + } + +} +#endif // MI_OSX_INTERPOSE + +#endif // MI_MALLOC_OVERRIDE diff --git a/ww/managers/mimalloc/src/prim/osx/prim.c b/ww/managers/mimalloc/src/prim/osx/prim.c new file mode 100644 index 00000000..8a2f4e8a --- /dev/null +++ b/ww/managers/mimalloc/src/prim/osx/prim.c @@ -0,0 +1,9 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// We use the unix/prim.c with the mmap API on macOSX +#include "../unix/prim.c" diff --git a/ww/managers/mimalloc/src/prim/prim.c b/ww/managers/mimalloc/src/prim/prim.c new file mode 100644 index 00000000..3b7d3736 --- /dev/null +++ b/ww/managers/mimalloc/src/prim/prim.c @@ -0,0 +1,27 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// Select the implementation of the primitives +// depending on the OS. + +#if defined(_WIN32) +#include "windows/prim.c" // VirtualAlloc (Windows) + +#elif defined(__APPLE__) +#include "osx/prim.c" // macOSX (actually defers to mmap in unix/prim.c) + +#elif defined(__wasi__) +#define MI_USE_SBRK +#include "wasi/prim.c" // memory-grow or sbrk (Wasm) + +#elif defined(__EMSCRIPTEN__) +#include "emscripten/prim.c" // emmalloc_*, + pthread support + +#else +#include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.) + +#endif diff --git a/ww/managers/mimalloc/src/prim/readme.md b/ww/managers/mimalloc/src/prim/readme.md new file mode 100644 index 00000000..380dd3a7 --- /dev/null +++ b/ww/managers/mimalloc/src/prim/readme.md @@ -0,0 +1,9 @@ +## Portability Primitives + +This is the portability layer where all primitives needed from the OS are defined. + +- `include/mimalloc/prim.h`: primitive portability API definition. +- `prim.c`: Selects one of `unix/prim.c`, `wasi/prim.c`, or `windows/prim.c` depending on the host platform + (and on macOS, `osx/prim.c` defers to `unix/prim.c`). + +Note: still work in progress, there may still be places in the sources that still depend on OS ifdef's. \ No newline at end of file diff --git a/ww/managers/mimalloc/src/prim/unix/prim.c b/ww/managers/mimalloc/src/prim/unix/prim.c new file mode 100644 index 00000000..dd665d3d --- /dev/null +++ b/ww/managers/mimalloc/src/prim/unix/prim.c @@ -0,0 +1,878 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// This file is included in `src/prim/prim.c` + +#ifndef _DEFAULT_SOURCE +#define _DEFAULT_SOURCE // ensure mmap flags and syscall are defined +#endif + +#if defined(__sun) +// illumos provides new mman.h api when any of these are defined +// otherwise the old api based on caddr_t which predates the void pointers one. +// stock solaris provides only the former, chose to atomically to discard those +// flags only here rather than project wide tough. +#undef _XOPEN_SOURCE +#undef _POSIX_C_SOURCE +#endif + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" + +#include // mmap +#include // sysconf +#include // open, close, read, access + +#if defined(__linux__) + #include + #if defined(MI_NO_THP) + #include + #endif + #if defined(__GLIBC__) + #include // linux mmap flags + #else + #include + #endif +#elif defined(__APPLE__) + #include + #include + #if !defined(TARGET_OS_OSX) || TARGET_OS_OSX // see issue #879, used to be (!TARGET_IOS_IPHONE && !TARGET_IOS_SIMULATOR) + #include // VM_MAKE_TAG, VM_FLAGS_SUPERPAGE_SIZE_2MB, etc. + #endif + #if !defined(MAC_OS_X_VERSION_10_7) + #define MAC_OS_X_VERSION_10_7 1070 + #endif +#elif defined(__FreeBSD__) || defined(__DragonFly__) + #include + #if __FreeBSD_version >= 1200000 + #include + #include + #endif + #include +#endif + +#if defined(__linux__) || defined(__FreeBSD__) + #define MI_HAS_SYSCALL_H + #include +#endif + + +//------------------------------------------------------------------------------------ +// Use syscalls for some primitives to allow for libraries that override open/read/close etc. +// and do allocation themselves; using syscalls prevents recursion when mimalloc is +// still initializing (issue #713) +// Declare inline to avoid unused function warnings. +//------------------------------------------------------------------------------------ + +#if defined(MI_HAS_SYSCALL_H) && defined(SYS_open) && defined(SYS_close) && defined(SYS_read) && defined(SYS_access) + +static inline int mi_prim_open(const char* fpath, int open_flags) { + return syscall(SYS_open,fpath,open_flags,0); +} +static inline ssize_t mi_prim_read(int fd, void* buf, size_t bufsize) { + return syscall(SYS_read,fd,buf,bufsize); +} +static inline int mi_prim_close(int fd) { + return syscall(SYS_close,fd); +} +static inline int mi_prim_access(const char *fpath, int mode) { + return syscall(SYS_access,fpath,mode); +} + +#else + +static inline int mi_prim_open(const char* fpath, int open_flags) { + return open(fpath,open_flags); +} +static inline ssize_t mi_prim_read(int fd, void* buf, size_t bufsize) { + return read(fd,buf,bufsize); +} +static inline int mi_prim_close(int fd) { + return close(fd); +} +static inline int mi_prim_access(const char *fpath, int mode) { + return access(fpath,mode); +} + +#endif + + + +//--------------------------------------------- +// init +//--------------------------------------------- + +static bool unix_detect_overcommit(void) { + bool os_overcommit = true; +#if defined(__linux__) + int fd = mi_prim_open("/proc/sys/vm/overcommit_memory", O_RDONLY); + if (fd >= 0) { + char buf[32]; + ssize_t nread = mi_prim_read(fd, &buf, sizeof(buf)); + mi_prim_close(fd); + // + // 0: heuristic overcommit, 1: always overcommit, 2: never overcommit (ignore NORESERVE) + if (nread >= 1) { + os_overcommit = (buf[0] == '0' || buf[0] == '1'); + } + } +#elif defined(__FreeBSD__) + int val = 0; + size_t olen = sizeof(val); + if (sysctlbyname("vm.overcommit", &val, &olen, NULL, 0) == 0) { + os_overcommit = (val != 0); + } +#else + // default: overcommit is true +#endif + return os_overcommit; +} + +void _mi_prim_mem_init( mi_os_mem_config_t* config ) +{ + long psize = sysconf(_SC_PAGESIZE); + if (psize > 0) { + config->page_size = (size_t)psize; + config->alloc_granularity = (size_t)psize; + } + config->large_page_size = 2*MI_MiB; // TODO: can we query the OS for this? + config->has_overcommit = unix_detect_overcommit(); + config->has_partial_free = true; // mmap can free in parts + config->has_virtual_reserve = true; // todo: check if this true for NetBSD? (for anonymous mmap with PROT_NONE) + + // disable transparent huge pages for this process? + #if (defined(__linux__) || defined(__ANDROID__)) && defined(PR_GET_THP_DISABLE) + #if defined(MI_NO_THP) + if (true) + #else + if (!mi_option_is_enabled(mi_option_allow_large_os_pages)) // disable THP also if large OS pages are not allowed in the options + #endif + { + int val = 0; + if (prctl(PR_GET_THP_DISABLE, &val, 0, 0, 0) != 0) { + // Most likely since distros often come with always/madvise settings. + val = 1; + // Disabling only for mimalloc process rather than touching system wide settings + (void)prctl(PR_SET_THP_DISABLE, &val, 0, 0, 0); + } + } + #endif +} + + +//--------------------------------------------- +// free +//--------------------------------------------- + +int _mi_prim_free(void* addr, size_t size ) { + bool err = (munmap(addr, size) == -1); + return (err ? errno : 0); +} + + +//--------------------------------------------- +// mmap +//--------------------------------------------- + +static int unix_madvise(void* addr, size_t size, int advice) { + #if defined(__sun) + return madvise((caddr_t)addr, size, advice); // Solaris needs cast (issue #520) + #else + return madvise(addr, size, advice); + #endif +} + +static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) { + MI_UNUSED(try_alignment); + void* p = NULL; + #if defined(MAP_ALIGNED) // BSD + if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) { + size_t n = mi_bsr(try_alignment); + if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB + p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0); + if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { + int err = errno; + _mi_trace_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, addr); + } + if (p!=MAP_FAILED) return p; + // fall back to regular mmap + } + } + #elif defined(MAP_ALIGN) // Solaris + if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) { + p = mmap((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd, 0); // addr parameter is the required alignment + if (p!=MAP_FAILED) return p; + // fall back to regular mmap + } + #endif + #if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED) + // on 64-bit systems, use the virtual address area after 2TiB for 4MiB aligned allocations + if (addr == NULL) { + void* hint = _mi_os_get_aligned_hint(try_alignment, size); + if (hint != NULL) { + p = mmap(hint, size, protect_flags, flags, fd, 0); + if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) { + #if MI_TRACK_ENABLED // asan sometimes does not instrument errno correctly? + int err = 0; + #else + int err = errno; + #endif + _mi_trace_message("unable to directly request hinted aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint); + } + if (p!=MAP_FAILED) return p; + // fall back to regular mmap + } + } + #endif + // regular mmap + p = mmap(addr, size, protect_flags, flags, fd, 0); + if (p!=MAP_FAILED) return p; + // failed to allocate + return NULL; +} + +static int unix_mmap_fd(void) { + #if defined(VM_MAKE_TAG) + // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99) + int os_tag = (int)mi_option_get(mi_option_os_tag); + if (os_tag < 100 || os_tag > 255) { os_tag = 100; } + return VM_MAKE_TAG(os_tag); + #else + return -1; + #endif +} + +static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only, bool allow_large, bool* is_large) { + #if !defined(MAP_ANONYMOUS) + #define MAP_ANONYMOUS MAP_ANON + #endif + #if !defined(MAP_NORESERVE) + #define MAP_NORESERVE 0 + #endif + void* p = NULL; + const int fd = unix_mmap_fd(); + int flags = MAP_PRIVATE | MAP_ANONYMOUS; + if (_mi_os_has_overcommit()) { + flags |= MAP_NORESERVE; + } + #if defined(PROT_MAX) + protect_flags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD + #endif + // huge page allocation + if ((large_only || _mi_os_use_large_page(size, try_alignment)) && allow_large) { + static _Atomic(size_t) large_page_try_ok; // = 0; + size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); + if (!large_only && try_ok > 0) { + // If the OS is not configured for large OS pages, or the user does not have + // enough permission, the `mmap` will always fail (but it might also fail for other reasons). + // Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times + // to avoid too many failing calls to mmap. + mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1); + } + else { + int lflags = flags & ~MAP_NORESERVE; // using NORESERVE on huge pages seems to fail on Linux + int lfd = fd; + #ifdef MAP_ALIGNED_SUPER + lflags |= MAP_ALIGNED_SUPER; + #endif + #ifdef MAP_HUGETLB + lflags |= MAP_HUGETLB; + #endif + #ifdef MAP_HUGE_1GB + static bool mi_huge_pages_available = true; + if ((size % MI_GiB) == 0 && mi_huge_pages_available) { + lflags |= MAP_HUGE_1GB; + } + else + #endif + { + #ifdef MAP_HUGE_2MB + lflags |= MAP_HUGE_2MB; + #endif + } + #ifdef VM_FLAGS_SUPERPAGE_SIZE_2MB + lfd |= VM_FLAGS_SUPERPAGE_SIZE_2MB; + #endif + if (large_only || lflags != flags) { + // try large OS page allocation + *is_large = true; + p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd); + #ifdef MAP_HUGE_1GB + if (p == NULL && (lflags & MAP_HUGE_1GB) == MAP_HUGE_1GB) { + mi_huge_pages_available = false; // don't try huge 1GiB pages again + _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (errno: %i)\n", errno); + lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB); + p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd); + } + #endif + if (large_only) return p; + if (p == NULL) { + mi_atomic_store_release(&large_page_try_ok, (size_t)8); // on error, don't try again for the next N allocations + } + } + } + } + // regular allocation + if (p == NULL) { + *is_large = false; + p = unix_mmap_prim(addr, size, try_alignment, protect_flags, flags, fd); + if (p != NULL) { + #if defined(MADV_HUGEPAGE) + // Many Linux systems don't allow MAP_HUGETLB but they support instead + // transparent huge pages (THP). Generally, it is not required to call `madvise` with MADV_HUGE + // though since properly aligned allocations will already use large pages if available + // in that case -- in particular for our large regions (in `memory.c`). + // However, some systems only allow THP if called with explicit `madvise`, so + // when large OS pages are enabled for mimalloc, we call `madvise` anyways. + if (allow_large && _mi_os_use_large_page(size, try_alignment)) { + if (unix_madvise(p, size, MADV_HUGEPAGE) == 0) { + *is_large = true; // possibly + }; + } + #elif defined(__sun) + if (allow_large && _mi_os_use_large_page(size, try_alignment)) { + struct memcntl_mha cmd = {0}; + cmd.mha_pagesize = _mi_os_large_page_size(); + cmd.mha_cmd = MHA_MAPSIZE_VA; + if (memcntl((caddr_t)p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) { + *is_large = true; + } + } + #endif + } + } + return p; +} + +// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. +int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); + mi_assert_internal(commit || !allow_large); + mi_assert_internal(try_alignment > 0); + + *is_zero = true; + int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); + *addr = unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large); + return (*addr != NULL ? 0 : errno); +} + + +//--------------------------------------------- +// Commit/Reset +//--------------------------------------------- + +static void unix_mprotect_hint(int err) { + #if defined(__linux__) && (MI_SECURE>=2) // guard page around every mimalloc page + if (err == ENOMEM) { + _mi_warning_message("The next warning may be caused by a low memory map limit.\n" + " On Linux this is controlled by the vm.max_map_count -- maybe increase it?\n" + " For example: sudo sysctl -w vm.max_map_count=262144\n"); + } + #else + MI_UNUSED(err); + #endif +} + +int _mi_prim_commit(void* start, size_t size, bool* is_zero) { + // commit: ensure we can access the area + // note: we may think that *is_zero can be true since the memory + // was either from mmap PROT_NONE, or from decommit MADV_DONTNEED, but + // we sometimes call commit on a range with still partially committed + // memory and `mprotect` does not zero the range. + *is_zero = false; + int err = mprotect(start, size, (PROT_READ | PROT_WRITE)); + if (err != 0) { + err = errno; + unix_mprotect_hint(err); + } + return err; +} + +int _mi_prim_decommit(void* start, size_t size, bool* needs_recommit) { + int err = 0; + // decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE) + err = unix_madvise(start, size, MADV_DONTNEED); + #if !MI_DEBUG && !MI_SECURE + *needs_recommit = false; + #else + *needs_recommit = true; + mprotect(start, size, PROT_NONE); + #endif + /* + // decommit: use mmap with MAP_FIXED and PROT_NONE to discard the existing memory (and reduce rss) + *needs_recommit = true; + const int fd = unix_mmap_fd(); + void* p = mmap(start, size, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0); + if (p != start) { err = errno; } + */ + return err; +} + +int _mi_prim_reset(void* start, size_t size) { + // We try to use `MADV_FREE` as that is the fastest. A drawback though is that it + // will not reduce the `rss` stats in tools like `top` even though the memory is available + // to other processes. With the default `MIMALLOC_PURGE_DECOMMITS=1` we ensure that by + // default `MADV_DONTNEED` is used though. + #if defined(MADV_FREE) + static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE); + int oadvice = (int)mi_atomic_load_relaxed(&advice); + int err; + while ((err = unix_madvise(start, size, oadvice)) != 0 && errno == EAGAIN) { errno = 0; }; + if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) { + // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on + mi_atomic_store_release(&advice, (size_t)MADV_DONTNEED); + err = unix_madvise(start, size, MADV_DONTNEED); + } + #else + int err = unix_madvise(start, size, MADV_DONTNEED); + #endif + return err; +} + +int _mi_prim_protect(void* start, size_t size, bool protect) { + int err = mprotect(start, size, protect ? PROT_NONE : (PROT_READ | PROT_WRITE)); + if (err != 0) { err = errno; } + unix_mprotect_hint(err); + return err; +} + + + +//--------------------------------------------- +// Huge page allocation +//--------------------------------------------- + +#if (MI_INTPTR_SIZE >= 8) && !defined(__HAIKU__) && !defined(__CYGWIN__) + +#ifndef MPOL_PREFERRED +#define MPOL_PREFERRED 1 +#endif + +#if defined(MI_HAS_SYSCALL_H) && defined(SYS_mbind) +static long mi_prim_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) { + return syscall(SYS_mbind, start, len, mode, nmask, maxnode, flags); +} +#else +static long mi_prim_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) { + MI_UNUSED(start); MI_UNUSED(len); MI_UNUSED(mode); MI_UNUSED(nmask); MI_UNUSED(maxnode); MI_UNUSED(flags); + return 0; +} +#endif + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + bool is_large = true; + *is_zero = true; + *addr = unix_mmap(hint_addr, size, MI_SEGMENT_SIZE, PROT_READ | PROT_WRITE, true, true, &is_large); + if (*addr != NULL && numa_node >= 0 && numa_node < 8*MI_INTPTR_SIZE) { // at most 64 nodes + unsigned long numa_mask = (1UL << numa_node); + // TODO: does `mbind` work correctly for huge OS pages? should we + // use `set_mempolicy` before calling mmap instead? + // see: + long err = mi_prim_mbind(*addr, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0); + if (err != 0) { + err = errno; + _mi_warning_message("failed to bind huge (1GiB) pages to numa node %d (error: %d (0x%x))\n", numa_node, err, err); + } + } + return (*addr != NULL ? 0 : errno); +} + +#else + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node); + *is_zero = false; + *addr = NULL; + return ENOMEM; +} + +#endif + +//--------------------------------------------- +// NUMA nodes +//--------------------------------------------- + +#if defined(__linux__) + +size_t _mi_prim_numa_node(void) { + #if defined(MI_HAS_SYSCALL_H) && defined(SYS_getcpu) + unsigned long node = 0; + unsigned long ncpu = 0; + long err = syscall(SYS_getcpu, &ncpu, &node, NULL); + if (err != 0) return 0; + return node; + #else + return 0; + #endif +} + +size_t _mi_prim_numa_node_count(void) { + char buf[128]; + unsigned node = 0; + for(node = 0; node < 256; node++) { + // enumerate node entries -- todo: it there a more efficient way to do this? (but ensure there is no allocation) + _mi_snprintf(buf, 127, "/sys/devices/system/node/node%u", node + 1); + if (mi_prim_access(buf,R_OK) != 0) break; + } + return (node+1); +} + +#elif defined(__FreeBSD__) && __FreeBSD_version >= 1200000 + +size_t _mi_prim_numa_node(void) { + domainset_t dom; + size_t node; + int policy; + if (cpuset_getdomain(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, sizeof(dom), &dom, &policy) == -1) return 0ul; + for (node = 0; node < MAXMEMDOM; node++) { + if (DOMAINSET_ISSET(node, &dom)) return node; + } + return 0ul; +} + +size_t _mi_prim_numa_node_count(void) { + size_t ndomains = 0; + size_t len = sizeof(ndomains); + if (sysctlbyname("vm.ndomains", &ndomains, &len, NULL, 0) == -1) return 0ul; + return ndomains; +} + +#elif defined(__DragonFly__) + +size_t _mi_prim_numa_node(void) { + // TODO: DragonFly does not seem to provide any userland means to get this information. + return 0ul; +} + +size_t _mi_prim_numa_node_count(void) { + size_t ncpus = 0, nvirtcoresperphys = 0; + size_t len = sizeof(size_t); + if (sysctlbyname("hw.ncpu", &ncpus, &len, NULL, 0) == -1) return 0ul; + if (sysctlbyname("hw.cpu_topology_ht_ids", &nvirtcoresperphys, &len, NULL, 0) == -1) return 0ul; + return nvirtcoresperphys * ncpus; +} + +#else + +size_t _mi_prim_numa_node(void) { + return 0; +} + +size_t _mi_prim_numa_node_count(void) { + return 1; +} + +#endif + +// ---------------------------------------------------------------- +// Clock +// ---------------------------------------------------------------- + +#include + +#if defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC) + +mi_msecs_t _mi_prim_clock_now(void) { + struct timespec t; + #ifdef CLOCK_MONOTONIC + clock_gettime(CLOCK_MONOTONIC, &t); + #else + clock_gettime(CLOCK_REALTIME, &t); + #endif + return ((mi_msecs_t)t.tv_sec * 1000) + ((mi_msecs_t)t.tv_nsec / 1000000); +} + +#else + +// low resolution timer +mi_msecs_t _mi_prim_clock_now(void) { + #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0) + return (mi_msecs_t)clock(); + #elif (CLOCKS_PER_SEC < 1000) + return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); + #else + return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000); + #endif +} + +#endif + + + + +//---------------------------------------------------------------- +// Process info +//---------------------------------------------------------------- + +#if defined(__unix__) || defined(__unix) || defined(unix) || defined(__APPLE__) || defined(__HAIKU__) +#include +#include +#include + +#if defined(__APPLE__) +#include +#endif + +#if defined(__HAIKU__) +#include +#endif + +static mi_msecs_t timeval_secs(const struct timeval* tv) { + return ((mi_msecs_t)tv->tv_sec * 1000L) + ((mi_msecs_t)tv->tv_usec / 1000L); +} + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + struct rusage rusage; + getrusage(RUSAGE_SELF, &rusage); + pinfo->utime = timeval_secs(&rusage.ru_utime); + pinfo->stime = timeval_secs(&rusage.ru_stime); +#if !defined(__HAIKU__) + pinfo->page_faults = rusage.ru_majflt; +#endif +#if defined(__HAIKU__) + // Haiku does not have (yet?) a way to + // get these stats per process + thread_info tid; + area_info mem; + ssize_t c; + get_thread_info(find_thread(0), &tid); + while (get_next_area_info(tid.team, &c, &mem) == B_OK) { + pinfo->peak_rss += mem.ram_size; + } + pinfo->page_faults = 0; +#elif defined(__APPLE__) + pinfo->peak_rss = rusage.ru_maxrss; // macos reports in bytes + #ifdef MACH_TASK_BASIC_INFO + struct mach_task_basic_info info; + mach_msg_type_number_t infoCount = MACH_TASK_BASIC_INFO_COUNT; + if (task_info(mach_task_self(), MACH_TASK_BASIC_INFO, (task_info_t)&info, &infoCount) == KERN_SUCCESS) { + pinfo->current_rss = (size_t)info.resident_size; + } + #else + struct task_basic_info info; + mach_msg_type_number_t infoCount = TASK_BASIC_INFO_COUNT; + if (task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &infoCount) == KERN_SUCCESS) { + pinfo->current_rss = (size_t)info.resident_size; + } + #endif +#else + pinfo->peak_rss = rusage.ru_maxrss * 1024; // Linux/BSD report in KiB +#endif + // use defaults for commit +} + +#else + +#ifndef __wasi__ +// WebAssembly instances are not processes +#pragma message("define a way to get process info") +#endif + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + // use defaults + MI_UNUSED(pinfo); +} + +#endif + + +//---------------------------------------------------------------- +// Output +//---------------------------------------------------------------- + +void _mi_prim_out_stderr( const char* msg ) { + fputs(msg,stderr); +} + + +//---------------------------------------------------------------- +// Environment +//---------------------------------------------------------------- + +#if !defined(MI_USE_ENVIRON) || (MI_USE_ENVIRON!=0) +// On Posix systemsr use `environ` to access environment variables +// even before the C runtime is initialized. +#if defined(__APPLE__) && defined(__has_include) && __has_include() +#include +static char** mi_get_environ(void) { + return (*_NSGetEnviron()); +} +#else +extern char** environ; +static char** mi_get_environ(void) { + return environ; +} +#endif +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + if (name==NULL) return false; + const size_t len = _mi_strlen(name); + if (len == 0) return false; + char** env = mi_get_environ(); + if (env == NULL) return false; + // compare up to 10000 entries + for (int i = 0; i < 10000 && env[i] != NULL; i++) { + const char* s = env[i]; + if (_mi_strnicmp(name, s, len) == 0 && s[len] == '=') { // case insensitive + // found it + _mi_strlcpy(result, s + len + 1, result_size); + return true; + } + } + return false; +} +#else +// fallback: use standard C `getenv` but this cannot be used while initializing the C runtime +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + // cannot call getenv() when still initializing the C runtime. + if (_mi_preloading()) return false; + const char* s = getenv(name); + if (s == NULL) { + // we check the upper case name too. + char buf[64+1]; + size_t len = _mi_strnlen(name,sizeof(buf)-1); + for (size_t i = 0; i < len; i++) { + buf[i] = _mi_toupper(name[i]); + } + buf[len] = 0; + s = getenv(buf); + } + if (s == NULL || _mi_strnlen(s,result_size) >= result_size) return false; + _mi_strlcpy(result, s, result_size); + return true; +} +#endif // !MI_USE_ENVIRON + + +//---------------------------------------------------------------- +// Random +//---------------------------------------------------------------- + +#if defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_15) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_15) +#include +#include + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + // We prefere CCRandomGenerateBytes as it returns an error code while arc4random_buf + // may fail silently on macOS. See PR #390, and + return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess); +} + +#elif defined(__ANDROID__) || defined(__DragonFly__) || \ + defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \ + defined(__sun) || \ + (defined(__APPLE__) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_7)) + +#include +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + arc4random_buf(buf, buf_len); + return true; +} + +#elif defined(__APPLE__) || defined(__linux__) || defined(__HAIKU__) // also for old apple versions < 10.7 (issue #829) + +#include +#include +#include + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + // Modern Linux provides `getrandom` but different distributions either use `sys/random.h` or `linux/random.h` + // and for the latter the actual `getrandom` call is not always defined. + // (see ) + // We therefore use a syscall directly and fall back dynamically to /dev/urandom when needed. + #if defined(MI_HAS_SYSCALL_H) && defined(SYS_getrandom) + #ifndef GRND_NONBLOCK + #define GRND_NONBLOCK (1) + #endif + static _Atomic(uintptr_t) no_getrandom; // = 0 + if (mi_atomic_load_acquire(&no_getrandom)==0) { + ssize_t ret = syscall(SYS_getrandom, buf, buf_len, GRND_NONBLOCK); + if (ret >= 0) return (buf_len == (size_t)ret); + if (errno != ENOSYS) return false; + mi_atomic_store_release(&no_getrandom, (uintptr_t)1); // don't call again, and fall back to /dev/urandom + } + #endif + int flags = O_RDONLY; + #if defined(O_CLOEXEC) + flags |= O_CLOEXEC; + #endif + int fd = mi_prim_open("/dev/urandom", flags); + if (fd < 0) return false; + size_t count = 0; + while(count < buf_len) { + ssize_t ret = mi_prim_read(fd, (char*)buf + count, buf_len - count); + if (ret<=0) { + if (errno!=EAGAIN && errno!=EINTR) break; + } + else { + count += ret; + } + } + mi_prim_close(fd); + return (count==buf_len); +} + +#else + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + return false; +} + +#endif + + +//---------------------------------------------------------------- +// Thread init/done +//---------------------------------------------------------------- + +#if defined(MI_USE_PTHREADS) + +// use pthread local storage keys to detect thread ending +// (and used with MI_TLS_PTHREADS for the default heap) +pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1); + +static void mi_pthread_done(void* value) { + if (value!=NULL) { + _mi_thread_done((mi_heap_t*)value); + } +} + +void _mi_prim_thread_init_auto_done(void) { + mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1)); + pthread_key_create(&_mi_heap_default_key, &mi_pthread_done); +} + +void _mi_prim_thread_done_auto_done(void) { + if (_mi_heap_default_key != (pthread_key_t)(-1)) { // do not leak the key, see issue #809 + pthread_key_delete(_mi_heap_default_key); + } +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD + pthread_setspecific(_mi_heap_default_key, heap); + } +} + +#else + +void _mi_prim_thread_init_auto_done(void) { + // nothing +} + +void _mi_prim_thread_done_auto_done(void) { + // nothing +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); +} + +#endif diff --git a/ww/managers/mimalloc/src/prim/wasi/prim.c b/ww/managers/mimalloc/src/prim/wasi/prim.c new file mode 100644 index 00000000..e95f67f5 --- /dev/null +++ b/ww/managers/mimalloc/src/prim/wasi/prim.c @@ -0,0 +1,280 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// This file is included in `src/prim/prim.c` + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" + +#include // fputs +#include // getenv + +//--------------------------------------------- +// Initialize +//--------------------------------------------- + +void _mi_prim_mem_init( mi_os_mem_config_t* config ) { + config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB + config->alloc_granularity = 16; + config->has_overcommit = false; + config->has_partial_free = false; + config->has_virtual_reserve = false; +} + +//--------------------------------------------- +// Free +//--------------------------------------------- + +int _mi_prim_free(void* addr, size_t size ) { + MI_UNUSED(addr); MI_UNUSED(size); + // wasi heap cannot be shrunk + return 0; +} + + +//--------------------------------------------- +// Allocation: sbrk or memory_grow +//--------------------------------------------- + +#if defined(MI_USE_SBRK) + #include // for sbrk + + static void* mi_memory_grow( size_t size ) { + void* p = sbrk(size); + if (p == (void*)(-1)) return NULL; + #if !defined(__wasi__) // on wasi this is always zero initialized already (?) + memset(p,0,size); + #endif + return p; + } +#elif defined(__wasi__) + static void* mi_memory_grow( size_t size ) { + size_t base = (size > 0 ? __builtin_wasm_memory_grow(0,_mi_divide_up(size, _mi_os_page_size())) + : __builtin_wasm_memory_size(0)); + if (base == SIZE_MAX) return NULL; + return (void*)(base * _mi_os_page_size()); + } +#endif + +#if defined(MI_USE_PTHREADS) +static pthread_mutex_t mi_heap_grow_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif + +static void* mi_prim_mem_grow(size_t size, size_t try_alignment) { + void* p = NULL; + if (try_alignment <= 1) { + // `sbrk` is not thread safe in general so try to protect it (we could skip this on WASM but leave it in for now) + #if defined(MI_USE_PTHREADS) + pthread_mutex_lock(&mi_heap_grow_mutex); + #endif + p = mi_memory_grow(size); + #if defined(MI_USE_PTHREADS) + pthread_mutex_unlock(&mi_heap_grow_mutex); + #endif + } + else { + void* base = NULL; + size_t alloc_size = 0; + // to allocate aligned use a lock to try to avoid thread interaction + // between getting the current size and actual allocation + // (also, `sbrk` is not thread safe in general) + #if defined(MI_USE_PTHREADS) + pthread_mutex_lock(&mi_heap_grow_mutex); + #endif + { + void* current = mi_memory_grow(0); // get current size + if (current != NULL) { + void* aligned_current = mi_align_up_ptr(current, try_alignment); // and align from there to minimize wasted space + alloc_size = _mi_align_up( ((uint8_t*)aligned_current - (uint8_t*)current) + size, _mi_os_page_size()); + base = mi_memory_grow(alloc_size); + } + } + #if defined(MI_USE_PTHREADS) + pthread_mutex_unlock(&mi_heap_grow_mutex); + #endif + if (base != NULL) { + p = mi_align_up_ptr(base, try_alignment); + if ((uint8_t*)p + size > (uint8_t*)base + alloc_size) { + // another thread used wasm_memory_grow/sbrk in-between and we do not have enough + // space after alignment. Give up (and waste the space as we cannot shrink :-( ) + // (in `mi_os_mem_alloc_aligned` this will fall back to overallocation to align) + p = NULL; + } + } + } + /* + if (p == NULL) { + _mi_warning_message("unable to allocate sbrk/wasm_memory_grow OS memory (%zu bytes, %zu alignment)\n", size, try_alignment); + errno = ENOMEM; + return NULL; + } + */ + mi_assert_internal( p == NULL || try_alignment == 0 || (uintptr_t)p % try_alignment == 0 ); + return p; +} + +// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. +int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + MI_UNUSED(allow_large); MI_UNUSED(commit); + *is_large = false; + *is_zero = false; + *addr = mi_prim_mem_grow(size, try_alignment); + return (*addr != NULL ? 0 : ENOMEM); +} + + +//--------------------------------------------- +// Commit/Reset/Protect +//--------------------------------------------- + +int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { + MI_UNUSED(addr); MI_UNUSED(size); + *is_zero = false; + return 0; +} + +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) { + MI_UNUSED(addr); MI_UNUSED(size); + *needs_recommit = false; + return 0; +} + +int _mi_prim_reset(void* addr, size_t size) { + MI_UNUSED(addr); MI_UNUSED(size); + return 0; +} + +int _mi_prim_protect(void* addr, size_t size, bool protect) { + MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(protect); + return 0; +} + + +//--------------------------------------------- +// Huge pages and NUMA nodes +//--------------------------------------------- + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node); + *is_zero = true; + *addr = NULL; + return ENOSYS; +} + +size_t _mi_prim_numa_node(void) { + return 0; +} + +size_t _mi_prim_numa_node_count(void) { + return 1; +} + + +//---------------------------------------------------------------- +// Clock +//---------------------------------------------------------------- + +#include + +#if defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC) + +mi_msecs_t _mi_prim_clock_now(void) { + struct timespec t; + #ifdef CLOCK_MONOTONIC + clock_gettime(CLOCK_MONOTONIC, &t); + #else + clock_gettime(CLOCK_REALTIME, &t); + #endif + return ((mi_msecs_t)t.tv_sec * 1000) + ((mi_msecs_t)t.tv_nsec / 1000000); +} + +#else + +// low resolution timer +mi_msecs_t _mi_prim_clock_now(void) { + #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0) + return (mi_msecs_t)clock(); + #elif (CLOCKS_PER_SEC < 1000) + return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC); + #else + return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000); + #endif +} + +#endif + + +//---------------------------------------------------------------- +// Process info +//---------------------------------------------------------------- + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + // use defaults + MI_UNUSED(pinfo); +} + + +//---------------------------------------------------------------- +// Output +//---------------------------------------------------------------- + +void _mi_prim_out_stderr( const char* msg ) { + fputs(msg,stderr); +} + + +//---------------------------------------------------------------- +// Environment +//---------------------------------------------------------------- + +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + // cannot call getenv() when still initializing the C runtime. + if (_mi_preloading()) return false; + const char* s = getenv(name); + if (s == NULL) { + // we check the upper case name too. + char buf[64+1]; + size_t len = _mi_strnlen(name,sizeof(buf)-1); + for (size_t i = 0; i < len; i++) { + buf[i] = _mi_toupper(name[i]); + } + buf[len] = 0; + s = getenv(buf); + } + if (s == NULL || _mi_strnlen(s,result_size) >= result_size) return false; + _mi_strlcpy(result, s, result_size); + return true; +} + + +//---------------------------------------------------------------- +// Random +//---------------------------------------------------------------- + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + return false; +} + + +//---------------------------------------------------------------- +// Thread init/done +//---------------------------------------------------------------- + +void _mi_prim_thread_init_auto_done(void) { + // nothing +} + +void _mi_prim_thread_done_auto_done(void) { + // nothing +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); +} diff --git a/ww/managers/mimalloc/src/prim/windows/etw-mimalloc.wprp b/ww/managers/mimalloc/src/prim/windows/etw-mimalloc.wprp new file mode 100644 index 00000000..b00cd7ad --- /dev/null +++ b/ww/managers/mimalloc/src/prim/windows/etw-mimalloc.wprp @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ww/managers/mimalloc/src/prim/windows/etw.h b/ww/managers/mimalloc/src/prim/windows/etw.h new file mode 100644 index 00000000..4e0a092a --- /dev/null +++ b/ww/managers/mimalloc/src/prim/windows/etw.h @@ -0,0 +1,905 @@ +//**********************************************************************` +//* This is an include file generated by Message Compiler. *` +//* *` +//* Copyright (c) Microsoft Corporation. All Rights Reserved. *` +//**********************************************************************` +#pragma once + +//***************************************************************************** +// +// Notes on the ETW event code generated by MC: +// +// - Structures and arrays of structures are treated as an opaque binary blob. +// The caller is responsible for packing the data for the structure into a +// single region of memory, with no padding between values. The macro will +// have an extra parameter for the length of the blob. +// - Arrays of nul-terminated strings must be packed by the caller into a +// single binary blob containing the correct number of strings, with a nul +// after each string. The size of the blob is specified in characters, and +// includes the final nul. +// - Arrays of SID are treated as a single binary blob. The caller is +// responsible for packing the SID values into a single region of memory with +// no padding. +// - The length attribute on the data element in the manifest is significant +// for values with intype win:UnicodeString, win:AnsiString, or win:Binary. +// The length attribute must be specified for win:Binary, and is optional for +// win:UnicodeString and win:AnsiString (if no length is given, the strings +// are assumed to be nul-terminated). For win:UnicodeString, the length is +// measured in characters, not bytes. +// - For an array of win:UnicodeString, win:AnsiString, or win:Binary, the +// length attribute applies to every value in the array, so every value in +// the array must have the same length. The values in the array are provided +// to the macro via a single pointer -- the caller is responsible for packing +// all of the values into a single region of memory with no padding between +// values. +// - Values of type win:CountedUnicodeString, win:CountedAnsiString, and +// win:CountedBinary can be generated and collected on Vista or later. +// However, they may not decode properly without the Windows 10 2018 Fall +// Update. +// - Arrays of type win:CountedUnicodeString, win:CountedAnsiString, and +// win:CountedBinary must be packed by the caller into a single region of +// memory. The format for each item is a UINT16 byte-count followed by that +// many bytes of data. When providing the array to the generated macro, you +// must provide the total size of the packed array data, including the UINT16 +// sizes for each item. In the case of win:CountedUnicodeString, the data +// size is specified in WCHAR (16-bit) units. In the case of +// win:CountedAnsiString and win:CountedBinary, the data size is specified in +// bytes. +// +//***************************************************************************** + +#include +#include +#include + +#ifndef ETW_INLINE + #ifdef _ETW_KM_ + // In kernel mode, save stack space by never inlining templates. + #define ETW_INLINE DECLSPEC_NOINLINE __inline + #else + // In user mode, save code size by inlining templates as appropriate. + #define ETW_INLINE __inline + #endif +#endif // ETW_INLINE + +#if defined(__cplusplus) +extern "C" { +#endif + +// +// MCGEN_DISABLE_PROVIDER_CODE_GENERATION macro: +// Define this macro to have the compiler skip the generated functions in this +// header. +// +#ifndef MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +// +// MCGEN_USE_KERNEL_MODE_APIS macro: +// Controls whether the generated code uses kernel-mode or user-mode APIs. +// - Set to 0 to use Windows user-mode APIs such as EventRegister. +// - Set to 1 to use Windows kernel-mode APIs such as EtwRegister. +// Default is based on whether the _ETW_KM_ macro is defined (i.e. by wdm.h). +// Note that the APIs can also be overridden directly, e.g. by setting the +// MCGEN_EVENTWRITETRANSFER or MCGEN_EVENTREGISTER macros. +// +#ifndef MCGEN_USE_KERNEL_MODE_APIS + #ifdef _ETW_KM_ + #define MCGEN_USE_KERNEL_MODE_APIS 1 + #else + #define MCGEN_USE_KERNEL_MODE_APIS 0 + #endif +#endif // MCGEN_USE_KERNEL_MODE_APIS + +// +// MCGEN_HAVE_EVENTSETINFORMATION macro: +// Controls how McGenEventSetInformation uses the EventSetInformation API. +// - Set to 0 to disable the use of EventSetInformation +// (McGenEventSetInformation will always return an error). +// - Set to 1 to directly invoke MCGEN_EVENTSETINFORMATION. +// - Set to 2 to to locate EventSetInformation at runtime via GetProcAddress +// (user-mode) or MmGetSystemRoutineAddress (kernel-mode). +// Default is determined as follows: +// - If MCGEN_EVENTSETINFORMATION has been customized, set to 1 +// (i.e. use MCGEN_EVENTSETINFORMATION). +// - Else if the target OS version has EventSetInformation, set to 1 +// (i.e. use MCGEN_EVENTSETINFORMATION). +// - Else set to 2 (i.e. try to dynamically locate EventSetInformation). +// Note that an McGenEventSetInformation function will only be generated if one +// or more provider in a manifest has provider traits. +// +#ifndef MCGEN_HAVE_EVENTSETINFORMATION + #ifdef MCGEN_EVENTSETINFORMATION // if MCGEN_EVENTSETINFORMATION has been customized, + #define MCGEN_HAVE_EVENTSETINFORMATION 1 // directly invoke MCGEN_EVENTSETINFORMATION(...). + #elif MCGEN_USE_KERNEL_MODE_APIS // else if using kernel-mode APIs, + #if NTDDI_VERSION >= 0x06040000 // if target OS is Windows 10 or later, + #define MCGEN_HAVE_EVENTSETINFORMATION 1 // directly invoke MCGEN_EVENTSETINFORMATION(...). + #else // else + #define MCGEN_HAVE_EVENTSETINFORMATION 2 // find "EtwSetInformation" via MmGetSystemRoutineAddress. + #endif // else (using user-mode APIs) + #else // if target OS and SDK is Windows 8 or later, + #if WINVER >= 0x0602 && defined(EVENT_FILTER_TYPE_SCHEMATIZED) + #define MCGEN_HAVE_EVENTSETINFORMATION 1 // directly invoke MCGEN_EVENTSETINFORMATION(...). + #else // else + #define MCGEN_HAVE_EVENTSETINFORMATION 2 // find "EventSetInformation" via GetModuleHandleExW/GetProcAddress. + #endif + #endif +#endif // MCGEN_HAVE_EVENTSETINFORMATION + +// +// MCGEN Override Macros +// +// The following override macros may be defined before including this header +// to control the APIs used by this header: +// +// - MCGEN_EVENTREGISTER +// - MCGEN_EVENTUNREGISTER +// - MCGEN_EVENTSETINFORMATION +// - MCGEN_EVENTWRITETRANSFER +// +// If the the macro is undefined, the MC implementation will default to the +// corresponding ETW APIs. For example, if the MCGEN_EVENTREGISTER macro is +// undefined, the EventRegister[MyProviderName] macro will use EventRegister +// in user mode and will use EtwRegister in kernel mode. +// +// To prevent issues from conflicting definitions of these macros, the value +// of the override macro will be used as a suffix in certain internal function +// names. Because of this, the override macros must follow certain rules: +// +// - The macro must be defined before any MC-generated header is included and +// must not be undefined or redefined after any MC-generated header is +// included. Different translation units (i.e. different .c or .cpp files) +// may set the macros to different values, but within a translation unit +// (within a single .c or .cpp file), the macro must be set once and not +// changed. +// - The override must be an object-like macro, not a function-like macro +// (i.e. the override macro must not have a parameter list). +// - The override macro's value must be a simple identifier, i.e. must be +// something that starts with a letter or '_' and contains only letters, +// numbers, and '_' characters. +// - If the override macro's value is the name of a second object-like macro, +// the second object-like macro must follow the same rules. (The override +// macro's value can also be the name of a function-like macro, in which +// case the function-like macro does not need to follow the same rules.) +// +// For example, the following will cause compile errors: +// +// #define MCGEN_EVENTWRITETRANSFER MyNamespace::MyClass::MyFunction // Value has non-identifier characters (colon). +// #define MCGEN_EVENTWRITETRANSFER GetEventWriteFunctionPointer(7) // Value has non-identifier characters (parentheses). +// #define MCGEN_EVENTWRITETRANSFER(h,e,a,r,c,d) EventWrite(h,e,c,d) // Override is defined as a function-like macro. +// #define MY_OBJECT_LIKE_MACRO MyNamespace::MyClass::MyEventWriteFunction +// #define MCGEN_EVENTWRITETRANSFER MY_OBJECT_LIKE_MACRO // Evaluates to something with non-identifier characters (colon). +// +// The following would be ok: +// +// #define MCGEN_EVENTWRITETRANSFER MyEventWriteFunction1 // OK, suffix will be "MyEventWriteFunction1". +// #define MY_OBJECT_LIKE_MACRO MyEventWriteFunction2 +// #define MCGEN_EVENTWRITETRANSFER MY_OBJECT_LIKE_MACRO // OK, suffix will be "MyEventWriteFunction2". +// #define MY_FUNCTION_LIKE_MACRO(h,e,a,r,c,d) MyNamespace::MyClass::MyEventWriteFunction3(h,e,c,d) +// #define MCGEN_EVENTWRITETRANSFER MY_FUNCTION_LIKE_MACRO // OK, suffix will be "MY_FUNCTION_LIKE_MACRO". +// +#ifndef MCGEN_EVENTREGISTER + #if MCGEN_USE_KERNEL_MODE_APIS + #define MCGEN_EVENTREGISTER EtwRegister + #else + #define MCGEN_EVENTREGISTER EventRegister + #endif +#endif // MCGEN_EVENTREGISTER +#ifndef MCGEN_EVENTUNREGISTER + #if MCGEN_USE_KERNEL_MODE_APIS + #define MCGEN_EVENTUNREGISTER EtwUnregister + #else + #define MCGEN_EVENTUNREGISTER EventUnregister + #endif +#endif // MCGEN_EVENTUNREGISTER +#ifndef MCGEN_EVENTSETINFORMATION + #if MCGEN_USE_KERNEL_MODE_APIS + #define MCGEN_EVENTSETINFORMATION EtwSetInformation + #else + #define MCGEN_EVENTSETINFORMATION EventSetInformation + #endif +#endif // MCGEN_EVENTSETINFORMATION +#ifndef MCGEN_EVENTWRITETRANSFER + #if MCGEN_USE_KERNEL_MODE_APIS + #define MCGEN_EVENTWRITETRANSFER EtwWriteTransfer + #else + #define MCGEN_EVENTWRITETRANSFER EventWriteTransfer + #endif +#endif // MCGEN_EVENTWRITETRANSFER + +// +// MCGEN_EVENT_ENABLED macro: +// Override to control how the EventWrite[EventName] macros determine whether +// an event is enabled. The default behavior is for EventWrite[EventName] to +// use the EventEnabled[EventName] macros. +// +#ifndef MCGEN_EVENT_ENABLED +#define MCGEN_EVENT_ENABLED(EventName) EventEnabled##EventName() +#endif + +// +// MCGEN_EVENT_ENABLED_FORCONTEXT macro: +// Override to control how the EventWrite[EventName]_ForContext macros +// determine whether an event is enabled. The default behavior is for +// EventWrite[EventName]_ForContext to use the +// EventEnabled[EventName]_ForContext macros. +// +#ifndef MCGEN_EVENT_ENABLED_FORCONTEXT +#define MCGEN_EVENT_ENABLED_FORCONTEXT(pContext, EventName) EventEnabled##EventName##_ForContext(pContext) +#endif + +// +// MCGEN_ENABLE_CHECK macro: +// Determines whether the specified event would be considered as enabled +// based on the state of the specified context. Slightly faster than calling +// McGenEventEnabled directly. +// +#ifndef MCGEN_ENABLE_CHECK +#define MCGEN_ENABLE_CHECK(Context, Descriptor) (Context.IsEnabled && McGenEventEnabled(&Context, &Descriptor)) +#endif + +#if !defined(MCGEN_TRACE_CONTEXT_DEF) +#define MCGEN_TRACE_CONTEXT_DEF +// This structure is for use by MC-generated code and should not be used directly. +typedef struct _MCGEN_TRACE_CONTEXT +{ + TRACEHANDLE RegistrationHandle; + TRACEHANDLE Logger; // Used as pointer to provider traits. + ULONGLONG MatchAnyKeyword; + ULONGLONG MatchAllKeyword; + ULONG Flags; + ULONG IsEnabled; + UCHAR Level; + UCHAR Reserve; + USHORT EnableBitsCount; + PULONG EnableBitMask; + const ULONGLONG* EnableKeyWords; + const UCHAR* EnableLevel; +} MCGEN_TRACE_CONTEXT, *PMCGEN_TRACE_CONTEXT; +#endif // MCGEN_TRACE_CONTEXT_DEF + +#if !defined(MCGEN_LEVEL_KEYWORD_ENABLED_DEF) +#define MCGEN_LEVEL_KEYWORD_ENABLED_DEF +// +// Determines whether an event with a given Level and Keyword would be +// considered as enabled based on the state of the specified context. +// Note that you may want to use MCGEN_ENABLE_CHECK instead of calling this +// function directly. +// +FORCEINLINE +BOOLEAN +McGenLevelKeywordEnabled( + _In_ PMCGEN_TRACE_CONTEXT EnableInfo, + _In_ UCHAR Level, + _In_ ULONGLONG Keyword + ) +{ + // + // Check if the event Level is lower than the level at which + // the channel is enabled. + // If the event Level is 0 or the channel is enabled at level 0, + // all levels are enabled. + // + + if ((Level <= EnableInfo->Level) || // This also covers the case of Level == 0. + (EnableInfo->Level == 0)) { + + // + // Check if Keyword is enabled + // + + if ((Keyword == (ULONGLONG)0) || + ((Keyword & EnableInfo->MatchAnyKeyword) && + ((Keyword & EnableInfo->MatchAllKeyword) == EnableInfo->MatchAllKeyword))) { + return TRUE; + } + } + + return FALSE; +} +#endif // MCGEN_LEVEL_KEYWORD_ENABLED_DEF + +#if !defined(MCGEN_EVENT_ENABLED_DEF) +#define MCGEN_EVENT_ENABLED_DEF +// +// Determines whether the specified event would be considered as enabled based +// on the state of the specified context. Note that you may want to use +// MCGEN_ENABLE_CHECK instead of calling this function directly. +// +FORCEINLINE +BOOLEAN +McGenEventEnabled( + _In_ PMCGEN_TRACE_CONTEXT EnableInfo, + _In_ PCEVENT_DESCRIPTOR EventDescriptor + ) +{ + return McGenLevelKeywordEnabled(EnableInfo, EventDescriptor->Level, EventDescriptor->Keyword); +} +#endif // MCGEN_EVENT_ENABLED_DEF + +#if !defined(MCGEN_CONTROL_CALLBACK) +#define MCGEN_CONTROL_CALLBACK + +// This function is for use by MC-generated code and should not be used directly. +DECLSPEC_NOINLINE __inline +VOID +__stdcall +McGenControlCallbackV2( + _In_ LPCGUID SourceId, + _In_ ULONG ControlCode, + _In_ UCHAR Level, + _In_ ULONGLONG MatchAnyKeyword, + _In_ ULONGLONG MatchAllKeyword, + _In_opt_ PEVENT_FILTER_DESCRIPTOR FilterData, + _Inout_opt_ PVOID CallbackContext + ) +/*++ + +Routine Description: + + This is the notification callback for Windows Vista and later. + +Arguments: + + SourceId - The GUID that identifies the session that enabled the provider. + + ControlCode - The parameter indicates whether the provider + is being enabled or disabled. + + Level - The level at which the event is enabled. + + MatchAnyKeyword - The bitmask of keywords that the provider uses to + determine the category of events that it writes. + + MatchAllKeyword - This bitmask additionally restricts the category + of events that the provider writes. + + FilterData - The provider-defined data. + + CallbackContext - The context of the callback that is defined when the provider + called EtwRegister to register itself. + +Remarks: + + ETW calls this function to notify provider of enable/disable + +--*/ +{ + PMCGEN_TRACE_CONTEXT Ctx = (PMCGEN_TRACE_CONTEXT)CallbackContext; + ULONG Ix; +#ifndef MCGEN_PRIVATE_ENABLE_CALLBACK_V2 + UNREFERENCED_PARAMETER(SourceId); + UNREFERENCED_PARAMETER(FilterData); +#endif + + if (Ctx == NULL) { + return; + } + + switch (ControlCode) { + + case EVENT_CONTROL_CODE_ENABLE_PROVIDER: + Ctx->Level = Level; + Ctx->MatchAnyKeyword = MatchAnyKeyword; + Ctx->MatchAllKeyword = MatchAllKeyword; + Ctx->IsEnabled = EVENT_CONTROL_CODE_ENABLE_PROVIDER; + + for (Ix = 0; Ix < Ctx->EnableBitsCount; Ix += 1) { + if (McGenLevelKeywordEnabled(Ctx, Ctx->EnableLevel[Ix], Ctx->EnableKeyWords[Ix]) != FALSE) { + Ctx->EnableBitMask[Ix >> 5] |= (1 << (Ix % 32)); + } else { + Ctx->EnableBitMask[Ix >> 5] &= ~(1 << (Ix % 32)); + } + } + break; + + case EVENT_CONTROL_CODE_DISABLE_PROVIDER: + Ctx->IsEnabled = EVENT_CONTROL_CODE_DISABLE_PROVIDER; + Ctx->Level = 0; + Ctx->MatchAnyKeyword = 0; + Ctx->MatchAllKeyword = 0; + if (Ctx->EnableBitsCount > 0) { +#pragma warning(suppress: 26451) // Arithmetic overflow cannot occur, no matter the value of EnableBitCount + RtlZeroMemory(Ctx->EnableBitMask, (((Ctx->EnableBitsCount - 1) / 32) + 1) * sizeof(ULONG)); + } + break; + + default: + break; + } + +#ifdef MCGEN_PRIVATE_ENABLE_CALLBACK_V2 + // + // Call user defined callback + // + MCGEN_PRIVATE_ENABLE_CALLBACK_V2( + SourceId, + ControlCode, + Level, + MatchAnyKeyword, + MatchAllKeyword, + FilterData, + CallbackContext + ); +#endif // MCGEN_PRIVATE_ENABLE_CALLBACK_V2 + + return; +} + +#endif // MCGEN_CONTROL_CALLBACK + +#ifndef _mcgen_PENABLECALLBACK + #if MCGEN_USE_KERNEL_MODE_APIS + #define _mcgen_PENABLECALLBACK PETWENABLECALLBACK + #else + #define _mcgen_PENABLECALLBACK PENABLECALLBACK + #endif +#endif // _mcgen_PENABLECALLBACK + +#if !defined(_mcgen_PASTE2) +// This macro is for use by MC-generated code and should not be used directly. +#define _mcgen_PASTE2(a, b) _mcgen_PASTE2_imp(a, b) +#define _mcgen_PASTE2_imp(a, b) a##b +#endif // _mcgen_PASTE2 + +#if !defined(_mcgen_PASTE3) +// This macro is for use by MC-generated code and should not be used directly. +#define _mcgen_PASTE3(a, b, c) _mcgen_PASTE3_imp(a, b, c) +#define _mcgen_PASTE3_imp(a, b, c) a##b##_##c +#endif // _mcgen_PASTE3 + +// +// Macro validation +// + +// Validate MCGEN_EVENTREGISTER: + +// Trigger an error if MCGEN_EVENTREGISTER is not an unqualified (simple) identifier: +struct _mcgen_PASTE2(MCGEN_EVENTREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTREGISTER); + +// Trigger an error if MCGEN_EVENTREGISTER is redefined: +typedef struct _mcgen_PASTE2(MCGEN_EVENTREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTREGISTER) + MCGEN_EVENTREGISTER_must_not_be_redefined_between_headers; + +// Trigger an error if MCGEN_EVENTREGISTER is defined as a function-like macro: +typedef void MCGEN_EVENTREGISTER_must_not_be_a_functionLike_macro_MCGEN_EVENTREGISTER; +typedef int _mcgen_PASTE2(MCGEN_EVENTREGISTER_must_not_be_a_functionLike_macro_, MCGEN_EVENTREGISTER); + +// Validate MCGEN_EVENTUNREGISTER: + +// Trigger an error if MCGEN_EVENTUNREGISTER is not an unqualified (simple) identifier: +struct _mcgen_PASTE2(MCGEN_EVENTUNREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTUNREGISTER); + +// Trigger an error if MCGEN_EVENTUNREGISTER is redefined: +typedef struct _mcgen_PASTE2(MCGEN_EVENTUNREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTUNREGISTER) + MCGEN_EVENTUNREGISTER_must_not_be_redefined_between_headers; + +// Trigger an error if MCGEN_EVENTUNREGISTER is defined as a function-like macro: +typedef void MCGEN_EVENTUNREGISTER_must_not_be_a_functionLike_macro_MCGEN_EVENTUNREGISTER; +typedef int _mcgen_PASTE2(MCGEN_EVENTUNREGISTER_must_not_be_a_functionLike_macro_, MCGEN_EVENTUNREGISTER); + +// Validate MCGEN_EVENTSETINFORMATION: + +// Trigger an error if MCGEN_EVENTSETINFORMATION is not an unqualified (simple) identifier: +struct _mcgen_PASTE2(MCGEN_EVENTSETINFORMATION_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTSETINFORMATION); + +// Trigger an error if MCGEN_EVENTSETINFORMATION is redefined: +typedef struct _mcgen_PASTE2(MCGEN_EVENTSETINFORMATION_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTSETINFORMATION) + MCGEN_EVENTSETINFORMATION_must_not_be_redefined_between_headers; + +// Trigger an error if MCGEN_EVENTSETINFORMATION is defined as a function-like macro: +typedef void MCGEN_EVENTSETINFORMATION_must_not_be_a_functionLike_macro_MCGEN_EVENTSETINFORMATION; +typedef int _mcgen_PASTE2(MCGEN_EVENTSETINFORMATION_must_not_be_a_functionLike_macro_, MCGEN_EVENTSETINFORMATION); + +// Validate MCGEN_EVENTWRITETRANSFER: + +// Trigger an error if MCGEN_EVENTWRITETRANSFER is not an unqualified (simple) identifier: +struct _mcgen_PASTE2(MCGEN_EVENTWRITETRANSFER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTWRITETRANSFER); + +// Trigger an error if MCGEN_EVENTWRITETRANSFER is redefined: +typedef struct _mcgen_PASTE2(MCGEN_EVENTWRITETRANSFER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTWRITETRANSFER) + MCGEN_EVENTWRITETRANSFER_must_not_be_redefined_between_headers;; + +// Trigger an error if MCGEN_EVENTWRITETRANSFER is defined as a function-like macro: +typedef void MCGEN_EVENTWRITETRANSFER_must_not_be_a_functionLike_macro_MCGEN_EVENTWRITETRANSFER; +typedef int _mcgen_PASTE2(MCGEN_EVENTWRITETRANSFER_must_not_be_a_functionLike_macro_, MCGEN_EVENTWRITETRANSFER); + +#ifndef McGenEventWrite_def +#define McGenEventWrite_def + +// This macro is for use by MC-generated code and should not be used directly. +#define McGenEventWrite _mcgen_PASTE2(McGenEventWrite_, MCGEN_EVENTWRITETRANSFER) + +// This function is for use by MC-generated code and should not be used directly. +DECLSPEC_NOINLINE __inline +ULONG __stdcall +McGenEventWrite( + _In_ PMCGEN_TRACE_CONTEXT Context, + _In_ PCEVENT_DESCRIPTOR Descriptor, + _In_opt_ LPCGUID ActivityId, + _In_range_(1, 128) ULONG EventDataCount, + _Pre_cap_(EventDataCount) EVENT_DATA_DESCRIPTOR* EventData + ) +{ + const USHORT UNALIGNED* Traits; + + // Some customized MCGEN_EVENTWRITETRANSFER macros might ignore ActivityId. + UNREFERENCED_PARAMETER(ActivityId); + + Traits = (const USHORT UNALIGNED*)(UINT_PTR)Context->Logger; + + if (Traits == NULL) { + EventData[0].Ptr = 0; + EventData[0].Size = 0; + EventData[0].Reserved = 0; + } else { + EventData[0].Ptr = (ULONG_PTR)Traits; + EventData[0].Size = *Traits; + EventData[0].Reserved = 2; // EVENT_DATA_DESCRIPTOR_TYPE_PROVIDER_METADATA + } + + return MCGEN_EVENTWRITETRANSFER( + Context->RegistrationHandle, + Descriptor, + ActivityId, + NULL, + EventDataCount, + EventData); +} +#endif // McGenEventWrite_def + +#if !defined(McGenEventRegisterUnregister) +#define McGenEventRegisterUnregister + +// This macro is for use by MC-generated code and should not be used directly. +#define McGenEventRegister _mcgen_PASTE2(McGenEventRegister_, MCGEN_EVENTREGISTER) + +#pragma warning(push) +#pragma warning(disable:6103) +// This function is for use by MC-generated code and should not be used directly. +DECLSPEC_NOINLINE __inline +ULONG __stdcall +McGenEventRegister( + _In_ LPCGUID ProviderId, + _In_opt_ _mcgen_PENABLECALLBACK EnableCallback, + _In_opt_ PVOID CallbackContext, + _Inout_ PREGHANDLE RegHandle + ) +/*++ + +Routine Description: + + This function registers the provider with ETW. + +Arguments: + + ProviderId - Provider ID to register with ETW. + + EnableCallback - Callback to be used. + + CallbackContext - Context for the callback. + + RegHandle - Pointer to registration handle. + +Remarks: + + Should not be called if the provider is already registered (i.e. should not + be called if *RegHandle != 0). Repeatedly registering a provider is a bug + and may indicate a race condition. However, for compatibility with previous + behavior, this function will return SUCCESS in this case. + +--*/ +{ + ULONG Error; + + if (*RegHandle != 0) + { + Error = 0; // ERROR_SUCCESS + } + else + { + Error = MCGEN_EVENTREGISTER(ProviderId, EnableCallback, CallbackContext, RegHandle); + } + + return Error; +} +#pragma warning(pop) + +// This macro is for use by MC-generated code and should not be used directly. +#define McGenEventUnregister _mcgen_PASTE2(McGenEventUnregister_, MCGEN_EVENTUNREGISTER) + +// This function is for use by MC-generated code and should not be used directly. +DECLSPEC_NOINLINE __inline +ULONG __stdcall +McGenEventUnregister(_Inout_ PREGHANDLE RegHandle) +/*++ + +Routine Description: + + Unregister from ETW and set *RegHandle = 0. + +Arguments: + + RegHandle - the pointer to the provider registration handle + +Remarks: + + If provider has not been registered (i.e. if *RegHandle == 0), + return SUCCESS. It is safe to call McGenEventUnregister even if the + call to McGenEventRegister returned an error. + +--*/ +{ + ULONG Error; + + if(*RegHandle == 0) + { + Error = 0; // ERROR_SUCCESS + } + else + { + Error = MCGEN_EVENTUNREGISTER(*RegHandle); + *RegHandle = (REGHANDLE)0; + } + + return Error; +} + +#endif // McGenEventRegisterUnregister + +#ifndef _mcgen_EVENT_BIT_SET + #if defined(_M_IX86) || defined(_M_X64) + // This macro is for use by MC-generated code and should not be used directly. + #define _mcgen_EVENT_BIT_SET(EnableBits, BitPosition) ((((const unsigned char*)EnableBits)[BitPosition >> 3] & (1u << (BitPosition & 7))) != 0) + #else // CPU type + // This macro is for use by MC-generated code and should not be used directly. + #define _mcgen_EVENT_BIT_SET(EnableBits, BitPosition) ((EnableBits[BitPosition >> 5] & (1u << (BitPosition & 31))) != 0) + #endif // CPU type +#endif // _mcgen_EVENT_BIT_SET + +#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +// Provider "microsoft-windows-mimalloc" event count 2 +//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +// Provider GUID = 138f4dbb-ee04-4899-aa0a-572ad4475779 +EXTERN_C __declspec(selectany) const GUID ETW_MI_Provider = {0x138f4dbb, 0xee04, 0x4899, {0xaa, 0x0a, 0x57, 0x2a, 0xd4, 0x47, 0x57, 0x79}}; + +#ifndef ETW_MI_Provider_Traits +#define ETW_MI_Provider_Traits NULL +#endif // ETW_MI_Provider_Traits + +// +// Event Descriptors +// +EXTERN_C __declspec(selectany) const EVENT_DESCRIPTOR ETW_MI_ALLOC = {0x64, 0x1, 0x0, 0x4, 0x0, 0x0, 0x0}; +#define ETW_MI_ALLOC_value 0x64 +EXTERN_C __declspec(selectany) const EVENT_DESCRIPTOR ETW_MI_FREE = {0x65, 0x1, 0x0, 0x4, 0x0, 0x0, 0x0}; +#define ETW_MI_FREE_value 0x65 + +// +// MCGEN_DISABLE_PROVIDER_CODE_GENERATION macro: +// Define this macro to have the compiler skip the generated functions in this +// header. +// +#ifndef MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +// +// Event Enablement Bits +// These variables are for use by MC-generated code and should not be used directly. +// +EXTERN_C __declspec(selectany) DECLSPEC_CACHEALIGN ULONG microsoft_windows_mimallocEnableBits[1]; +EXTERN_C __declspec(selectany) const ULONGLONG microsoft_windows_mimallocKeywords[1] = {0x0}; +EXTERN_C __declspec(selectany) const unsigned char microsoft_windows_mimallocLevels[1] = {4}; + +// +// Provider context +// +EXTERN_C __declspec(selectany) MCGEN_TRACE_CONTEXT ETW_MI_Provider_Context = {0, (ULONG_PTR)ETW_MI_Provider_Traits, 0, 0, 0, 0, 0, 0, 1, microsoft_windows_mimallocEnableBits, microsoft_windows_mimallocKeywords, microsoft_windows_mimallocLevels}; + +// +// Provider REGHANDLE +// +#define microsoft_windows_mimallocHandle (ETW_MI_Provider_Context.RegistrationHandle) + +// +// This macro is set to 0, indicating that the EventWrite[Name] macros do not +// have an Activity parameter. This is controlled by the -km and -um options. +// +#define ETW_MI_Provider_EventWriteActivity 0 + +// +// Register with ETW using the control GUID specified in the manifest. +// Invoke this macro during module initialization (i.e. program startup, +// DLL process attach, or driver load) to initialize the provider. +// Note that if this function returns an error, the error means that +// will not work, but no action needs to be taken -- even if EventRegister +// returns an error, it is generally safe to use EventWrite and +// EventUnregister macros (they will be no-ops if EventRegister failed). +// +#ifndef EventRegistermicrosoft_windows_mimalloc +#define EventRegistermicrosoft_windows_mimalloc() McGenEventRegister(&ETW_MI_Provider, McGenControlCallbackV2, &ETW_MI_Provider_Context, µsoft_windows_mimallocHandle) +#endif + +// +// Register with ETW using a specific control GUID (i.e. a GUID other than what +// is specified in the manifest). Advanced scenarios only. +// +#ifndef EventRegisterByGuidmicrosoft_windows_mimalloc +#define EventRegisterByGuidmicrosoft_windows_mimalloc(Guid) McGenEventRegister(&(Guid), McGenControlCallbackV2, &ETW_MI_Provider_Context, µsoft_windows_mimallocHandle) +#endif + +// +// Unregister with ETW and close the provider. +// Invoke this macro during module shutdown (i.e. program exit, DLL process +// detach, or driver unload) to unregister the provider. +// Note that you MUST call EventUnregister before DLL or driver unload +// (not optional): failure to unregister a provider before DLL or driver unload +// will result in crashes. +// +#ifndef EventUnregistermicrosoft_windows_mimalloc +#define EventUnregistermicrosoft_windows_mimalloc() McGenEventUnregister(µsoft_windows_mimallocHandle) +#endif + +// +// MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION macro: +// Define this macro to enable support for caller-allocated provider context. +// +#ifdef MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION + +// +// Advanced scenarios: Caller-allocated provider context. +// Use when multiple differently-configured provider handles are needed, +// e.g. for container-aware drivers, one context per container. +// +// Usage: +// +// - Caller enables the feature before including this header, e.g. +// #define MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION 1 +// - Caller allocates memory, e.g. pContext = malloc(sizeof(McGenContext_microsoft_windows_mimalloc)); +// - Caller registers the provider, e.g. EventRegistermicrosoft_windows_mimalloc_ForContext(pContext); +// - Caller writes events, e.g. EventWriteMyEvent_ForContext(pContext, ...); +// - Caller unregisters, e.g. EventUnregistermicrosoft_windows_mimalloc_ForContext(pContext); +// - Caller frees memory, e.g. free(pContext); +// + +typedef struct tagMcGenContext_microsoft_windows_mimalloc { + // The fields of this structure are subject to change and should + // not be accessed directly. To access the provider's REGHANDLE, + // use microsoft_windows_mimallocHandle_ForContext(pContext). + MCGEN_TRACE_CONTEXT Context; + ULONG EnableBits[1]; +} McGenContext_microsoft_windows_mimalloc; + +#define EventRegistermicrosoft_windows_mimalloc_ForContext(pContext) _mcgen_PASTE2(_mcgen_RegisterForContext_microsoft_windows_mimalloc_, MCGEN_EVENTREGISTER)(&ETW_MI_Provider, pContext) +#define EventRegisterByGuidmicrosoft_windows_mimalloc_ForContext(Guid, pContext) _mcgen_PASTE2(_mcgen_RegisterForContext_microsoft_windows_mimalloc_, MCGEN_EVENTREGISTER)(&(Guid), pContext) +#define EventUnregistermicrosoft_windows_mimalloc_ForContext(pContext) McGenEventUnregister(&(pContext)->Context.RegistrationHandle) + +// +// Provider REGHANDLE for caller-allocated context. +// +#define microsoft_windows_mimallocHandle_ForContext(pContext) ((pContext)->Context.RegistrationHandle) + +// This function is for use by MC-generated code and should not be used directly. +// Initialize and register the caller-allocated context. +__inline +ULONG __stdcall +_mcgen_PASTE2(_mcgen_RegisterForContext_microsoft_windows_mimalloc_, MCGEN_EVENTREGISTER)( + _In_ LPCGUID pProviderId, + _Out_ McGenContext_microsoft_windows_mimalloc* pContext) +{ + RtlZeroMemory(pContext, sizeof(*pContext)); + pContext->Context.Logger = (ULONG_PTR)ETW_MI_Provider_Traits; + pContext->Context.EnableBitsCount = 1; + pContext->Context.EnableBitMask = pContext->EnableBits; + pContext->Context.EnableKeyWords = microsoft_windows_mimallocKeywords; + pContext->Context.EnableLevel = microsoft_windows_mimallocLevels; + return McGenEventRegister( + pProviderId, + McGenControlCallbackV2, + &pContext->Context, + &pContext->Context.RegistrationHandle); +} + +// This function is for use by MC-generated code and should not be used directly. +// Trigger a compile error if called with the wrong parameter type. +FORCEINLINE +_Ret_ McGenContext_microsoft_windows_mimalloc* +_mcgen_CheckContextType_microsoft_windows_mimalloc(_In_ McGenContext_microsoft_windows_mimalloc* pContext) +{ + return pContext; +} + +#endif // MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION + +// +// Enablement check macro for event "ETW_MI_ALLOC" +// +#define EventEnabledETW_MI_ALLOC() _mcgen_EVENT_BIT_SET(microsoft_windows_mimallocEnableBits, 0) +#define EventEnabledETW_MI_ALLOC_ForContext(pContext) _mcgen_EVENT_BIT_SET(_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->EnableBits, 0) + +// +// Event write macros for event "ETW_MI_ALLOC" +// +#define EventWriteETW_MI_ALLOC(Address, Size) \ + MCGEN_EVENT_ENABLED(ETW_MI_ALLOC) \ + ? _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&ETW_MI_Provider_Context, &ETW_MI_ALLOC, Address, Size) : 0 +#define EventWriteETW_MI_ALLOC_AssumeEnabled(Address, Size) \ + _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&ETW_MI_Provider_Context, &ETW_MI_ALLOC, Address, Size) +#define EventWriteETW_MI_ALLOC_ForContext(pContext, Address, Size) \ + MCGEN_EVENT_ENABLED_FORCONTEXT(pContext, ETW_MI_ALLOC) \ + ? _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&(pContext)->Context, &ETW_MI_ALLOC, Address, Size) : 0 +#define EventWriteETW_MI_ALLOC_ForContextAssumeEnabled(pContext, Address, Size) \ + _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->Context, &ETW_MI_ALLOC, Address, Size) + +// This macro is for use by MC-generated code and should not be used directly. +#define _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC _mcgen_PASTE2(McTemplateU0xx_, MCGEN_EVENTWRITETRANSFER) + +// +// Enablement check macro for event "ETW_MI_FREE" +// +#define EventEnabledETW_MI_FREE() _mcgen_EVENT_BIT_SET(microsoft_windows_mimallocEnableBits, 0) +#define EventEnabledETW_MI_FREE_ForContext(pContext) _mcgen_EVENT_BIT_SET(_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->EnableBits, 0) + +// +// Event write macros for event "ETW_MI_FREE" +// +#define EventWriteETW_MI_FREE(Address, Size) \ + MCGEN_EVENT_ENABLED(ETW_MI_FREE) \ + ? _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&ETW_MI_Provider_Context, &ETW_MI_FREE, Address, Size) : 0 +#define EventWriteETW_MI_FREE_AssumeEnabled(Address, Size) \ + _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&ETW_MI_Provider_Context, &ETW_MI_FREE, Address, Size) +#define EventWriteETW_MI_FREE_ForContext(pContext, Address, Size) \ + MCGEN_EVENT_ENABLED_FORCONTEXT(pContext, ETW_MI_FREE) \ + ? _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&(pContext)->Context, &ETW_MI_FREE, Address, Size) : 0 +#define EventWriteETW_MI_FREE_ForContextAssumeEnabled(pContext, Address, Size) \ + _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->Context, &ETW_MI_FREE, Address, Size) + +// This macro is for use by MC-generated code and should not be used directly. +#define _mcgen_TEMPLATE_FOR_ETW_MI_FREE _mcgen_PASTE2(McTemplateU0xx_, MCGEN_EVENTWRITETRANSFER) + +#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +// +// MCGEN_DISABLE_PROVIDER_CODE_GENERATION macro: +// Define this macro to have the compiler skip the generated functions in this +// header. +// +#ifndef MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +// +// Template Functions +// + +// +// Function for template "ETW_CUSTOM_HEAP_ALLOC_DATA" (and possibly others). +// This function is for use by MC-generated code and should not be used directly. +// +#ifndef McTemplateU0xx_def +#define McTemplateU0xx_def +ETW_INLINE +ULONG +_mcgen_PASTE2(McTemplateU0xx_, MCGEN_EVENTWRITETRANSFER)( + _In_ PMCGEN_TRACE_CONTEXT Context, + _In_ PCEVENT_DESCRIPTOR Descriptor, + _In_ const unsigned __int64 _Arg0, + _In_ const unsigned __int64 _Arg1 + ) +{ +#define McTemplateU0xx_ARGCOUNT 2 + + EVENT_DATA_DESCRIPTOR EventData[McTemplateU0xx_ARGCOUNT + 1]; + + EventDataDescCreate(&EventData[1],&_Arg0, sizeof(const unsigned __int64) ); + + EventDataDescCreate(&EventData[2],&_Arg1, sizeof(const unsigned __int64) ); + + return McGenEventWrite(Context, Descriptor, NULL, McTemplateU0xx_ARGCOUNT + 1, EventData); +} +#endif // McTemplateU0xx_def + +#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION + +#if defined(__cplusplus) +} +#endif diff --git a/ww/managers/mimalloc/src/prim/windows/etw.man b/ww/managers/mimalloc/src/prim/windows/etw.man new file mode 100644 index 0000000000000000000000000000000000000000..cfd1f8a9eaacd50af63f1e28f9540aa88c20f90c GIT binary patch literal 3926 zcmeH~T~8B16o${WiT`2c+NGc<*i;EYh$d8xl<0*CS-Mb~vPP(R>hsQY*nU!q z$b})3?##}d?{nTW+uy%xwr*doYaNU1!Vc}sa%7F%g+hVAmL$hwL?4dodnmuAKhUXm0)X9|WHj>XRahh@~SWDIkbduX;B#u6^Q>@U= zDO8U+KXa0*th&%f*z^Udh4ol@uE=Q&`emUsh_CA`FOXgI{i-`XZ9C#bR1yBm=PJ*p z9kVN$J6O;h;8HY>p)RnhY8A#Hb?z)_!y(Iaen(I)@-9CrSSp(;_Jn9I*$S&ATjP1? zVxB>pV@LVsy;^jZr7r$HNAm06TcUiI`l@~F$Mt$E%Shfd3O+h1vFhR9a8yQZ@wpne zr3bI-p=VEdo{)#uWxSVJeYQF|-5tnq>~f+CP~A0&{v=(up=ngEDl>5!$EDwPRaNjW zXj|wbG$OwmwaW-hMvBK%pbm3wpic71O^dzbxT%*13+SP9h- zI~rA5hapTVnk|qmiIVYy{__+x9f7OV4j3`g4;{{8_SWnLBSu2PUc%~`t%Ae^>J`SS zdtZg-r<0xAH*_ALtK;Nv(d9nbKK1jK=Ld)I(jQrKhBjgToR#Wm8{0a}@6ZuEO(lvG=y=Jh{M!4!-$(E)!vYUrf47 znf4W$e&QFOovV_$>J%X*KN>oXI@jt%BJms>IU}I$<7Hr{PChchs%+mx{% zt(fa_PM4r63?1h#YOk~;byc5`>%q>sLHA1gohNq{qOREhxu<y~`}YVhGe0?R_ceQ8vt^BpSNp6kErj_0hUNFyWQ1IOZ|GEgWBPwYFLgFu Oo!*uqEBu%Ae18C_t1cS= literal 0 HcmV?d00001 diff --git a/ww/managers/mimalloc/src/prim/windows/prim.c b/ww/managers/mimalloc/src/prim/windows/prim.c new file mode 100644 index 00000000..5074ad4c --- /dev/null +++ b/ww/managers/mimalloc/src/prim/windows/prim.c @@ -0,0 +1,663 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +// This file is included in `src/prim/prim.c` + +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" +#include // fputs, stderr + + +//--------------------------------------------- +// Dynamically bind Windows API points for portability +//--------------------------------------------- + +// We use VirtualAlloc2 for aligned allocation, but it is only supported on Windows 10 and Windows Server 2016. +// So, we need to look it up dynamically to run on older systems. (use __stdcall for 32-bit compatibility) +// NtAllocateVirtualAllocEx is used for huge OS page allocation (1GiB) +// We define a minimal MEM_EXTENDED_PARAMETER ourselves in order to be able to compile with older SDK's. +typedef enum MI_MEM_EXTENDED_PARAMETER_TYPE_E { + MiMemExtendedParameterInvalidType = 0, + MiMemExtendedParameterAddressRequirements, + MiMemExtendedParameterNumaNode, + MiMemExtendedParameterPartitionHandle, + MiMemExtendedParameterUserPhysicalHandle, + MiMemExtendedParameterAttributeFlags, + MiMemExtendedParameterMax +} MI_MEM_EXTENDED_PARAMETER_TYPE; + +typedef struct DECLSPEC_ALIGN(8) MI_MEM_EXTENDED_PARAMETER_S { + struct { DWORD64 Type : 8; DWORD64 Reserved : 56; } Type; + union { DWORD64 ULong64; PVOID Pointer; SIZE_T Size; HANDLE Handle; DWORD ULong; } Arg; +} MI_MEM_EXTENDED_PARAMETER; + +typedef struct MI_MEM_ADDRESS_REQUIREMENTS_S { + PVOID LowestStartingAddress; + PVOID HighestEndingAddress; + SIZE_T Alignment; +} MI_MEM_ADDRESS_REQUIREMENTS; + +#define MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE 0x00000010 + +#include +typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG); +typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG); +static PVirtualAlloc2 pVirtualAlloc2 = NULL; +static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL; + +// Similarly, GetNumaProcesorNodeEx is only supported since Windows 7 +typedef struct MI_PROCESSOR_NUMBER_S { WORD Group; BYTE Number; BYTE Reserved; } MI_PROCESSOR_NUMBER; + +typedef VOID (__stdcall *PGetCurrentProcessorNumberEx)(MI_PROCESSOR_NUMBER* ProcNumber); +typedef BOOL (__stdcall *PGetNumaProcessorNodeEx)(MI_PROCESSOR_NUMBER* Processor, PUSHORT NodeNumber); +typedef BOOL (__stdcall* PGetNumaNodeProcessorMaskEx)(USHORT Node, PGROUP_AFFINITY ProcessorMask); +typedef BOOL (__stdcall *PGetNumaProcessorNode)(UCHAR Processor, PUCHAR NodeNumber); +static PGetCurrentProcessorNumberEx pGetCurrentProcessorNumberEx = NULL; +static PGetNumaProcessorNodeEx pGetNumaProcessorNodeEx = NULL; +static PGetNumaNodeProcessorMaskEx pGetNumaNodeProcessorMaskEx = NULL; +static PGetNumaProcessorNode pGetNumaProcessorNode = NULL; + +//--------------------------------------------- +// Enable large page support dynamically (if possible) +//--------------------------------------------- + +static bool win_enable_large_os_pages(size_t* large_page_size) +{ + static bool large_initialized = false; + if (large_initialized) return (_mi_os_large_page_size() > 0); + large_initialized = true; + + // Try to see if large OS pages are supported + // To use large pages on Windows, we first need access permission + // Set "Lock pages in memory" permission in the group policy editor + // + unsigned long err = 0; + HANDLE token = NULL; + BOOL ok = OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token); + if (ok) { + TOKEN_PRIVILEGES tp; + ok = LookupPrivilegeValue(NULL, TEXT("SeLockMemoryPrivilege"), &tp.Privileges[0].Luid); + if (ok) { + tp.PrivilegeCount = 1; + tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; + ok = AdjustTokenPrivileges(token, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0); + if (ok) { + err = GetLastError(); + ok = (err == ERROR_SUCCESS); + if (ok && large_page_size != NULL) { + *large_page_size = GetLargePageMinimum(); + } + } + } + CloseHandle(token); + } + if (!ok) { + if (err == 0) err = GetLastError(); + _mi_warning_message("cannot enable large OS page support, error %lu\n", err); + } + return (ok!=0); +} + + +//--------------------------------------------- +// Initialize +//--------------------------------------------- + +void _mi_prim_mem_init( mi_os_mem_config_t* config ) +{ + config->has_overcommit = false; + config->has_partial_free = false; + config->has_virtual_reserve = true; + // get the page size + SYSTEM_INFO si; + GetSystemInfo(&si); + if (si.dwPageSize > 0) { config->page_size = si.dwPageSize; } + if (si.dwAllocationGranularity > 0) { config->alloc_granularity = si.dwAllocationGranularity; } + // get the VirtualAlloc2 function + HINSTANCE hDll; + hDll = LoadLibrary(TEXT("kernelbase.dll")); + if (hDll != NULL) { + // use VirtualAlloc2FromApp if possible as it is available to Windows store apps + pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2FromApp"); + if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2"); + FreeLibrary(hDll); + } + // NtAllocateVirtualMemoryEx is used for huge page allocation + hDll = LoadLibrary(TEXT("ntdll.dll")); + if (hDll != NULL) { + pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)(void (*)(void))GetProcAddress(hDll, "NtAllocateVirtualMemoryEx"); + FreeLibrary(hDll); + } + // Try to use Win7+ numa API + hDll = LoadLibrary(TEXT("kernel32.dll")); + if (hDll != NULL) { + pGetCurrentProcessorNumberEx = (PGetCurrentProcessorNumberEx)(void (*)(void))GetProcAddress(hDll, "GetCurrentProcessorNumberEx"); + pGetNumaProcessorNodeEx = (PGetNumaProcessorNodeEx)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNodeEx"); + pGetNumaNodeProcessorMaskEx = (PGetNumaNodeProcessorMaskEx)(void (*)(void))GetProcAddress(hDll, "GetNumaNodeProcessorMaskEx"); + pGetNumaProcessorNode = (PGetNumaProcessorNode)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNode"); + FreeLibrary(hDll); + } + if (mi_option_is_enabled(mi_option_allow_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) { + win_enable_large_os_pages(&config->large_page_size); + } +} + + +//--------------------------------------------- +// Free +//--------------------------------------------- + +int _mi_prim_free(void* addr, size_t size ) { + MI_UNUSED(size); + DWORD errcode = 0; + bool err = (VirtualFree(addr, 0, MEM_RELEASE) == 0); + if (err) { errcode = GetLastError(); } + if (errcode == ERROR_INVALID_ADDRESS) { + // In mi_os_mem_alloc_aligned the fallback path may have returned a pointer inside + // the memory region returned by VirtualAlloc; in that case we need to free using + // the start of the region. + MEMORY_BASIC_INFORMATION info = { 0 }; + VirtualQuery(addr, &info, sizeof(info)); + if (info.AllocationBase < addr && ((uint8_t*)addr - (uint8_t*)info.AllocationBase) < (ptrdiff_t)MI_SEGMENT_SIZE) { + errcode = 0; + err = (VirtualFree(info.AllocationBase, 0, MEM_RELEASE) == 0); + if (err) { errcode = GetLastError(); } + } + } + return (int)errcode; +} + + +//--------------------------------------------- +// VirtualAlloc +//--------------------------------------------- + +static void* win_virtual_alloc_prim_once(void* addr, size_t size, size_t try_alignment, DWORD flags) { + #if (MI_INTPTR_SIZE >= 8) + // on 64-bit systems, try to use the virtual address area after 2TiB for 4MiB aligned allocations + if (addr == NULL) { + void* hint = _mi_os_get_aligned_hint(try_alignment,size); + if (hint != NULL) { + void* p = VirtualAlloc(hint, size, flags, PAGE_READWRITE); + if (p != NULL) return p; + _mi_verbose_message("warning: unable to allocate hinted aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), hint, try_alignment, flags); + // fall through on error + } + } + #endif + // on modern Windows try use VirtualAlloc2 for aligned allocation + if (try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) { + MI_MEM_ADDRESS_REQUIREMENTS reqs = { 0, 0, 0 }; + reqs.Alignment = try_alignment; + MI_MEM_EXTENDED_PARAMETER param = { {0, 0}, {0} }; + param.Type.Type = MiMemExtendedParameterAddressRequirements; + param.Arg.Pointer = &reqs; + void* p = (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, ¶m, 1); + if (p != NULL) return p; + _mi_warning_message("unable to allocate aligned OS memory (0x%zx bytes, error code: 0x%x, address: %p, alignment: 0x%zx, flags: 0x%x)\n", size, GetLastError(), addr, try_alignment, flags); + // fall through on error + } + // last resort + return VirtualAlloc(addr, size, flags, PAGE_READWRITE); +} + +static bool win_is_out_of_memory_error(DWORD err) { + switch (err) { + case ERROR_COMMITMENT_MINIMUM: + case ERROR_COMMITMENT_LIMIT: + case ERROR_PAGEFILE_QUOTA: + case ERROR_NOT_ENOUGH_MEMORY: + return true; + default: + return false; + } +} + +static void* win_virtual_alloc_prim(void* addr, size_t size, size_t try_alignment, DWORD flags) { + long max_retry_msecs = mi_option_get_clamp(mi_option_retry_on_oom, 0, 2000); // at most 2 seconds + if (max_retry_msecs == 1) { max_retry_msecs = 100; } // if one sets the option to "true" + for (long tries = 1; tries <= 10; tries++) { // try at most 10 times (=2200ms) + void* p = win_virtual_alloc_prim_once(addr, size, try_alignment, flags); + if (p != NULL) { + // success, return the address + return p; + } + else if (max_retry_msecs > 0 && (try_alignment <= 2*MI_SEGMENT_ALIGN) && + (flags&MEM_COMMIT) != 0 && (flags&MEM_LARGE_PAGES) == 0 && + win_is_out_of_memory_error(GetLastError())) { + // if committing regular memory and being out-of-memory, + // keep trying for a bit in case memory frees up after all. See issue #894 + _mi_warning_message("out-of-memory on OS allocation, try again... (attempt %lu, 0x%zx bytes, error code: 0x%x, address: %p, alignment: 0x%zx, flags: 0x%x)\n", tries, size, GetLastError(), addr, try_alignment, flags); + long sleep_msecs = tries*40; // increasing waits + if (sleep_msecs > max_retry_msecs) { sleep_msecs = max_retry_msecs; } + max_retry_msecs -= sleep_msecs; + Sleep(sleep_msecs); + } + else { + // otherwise return with an error + break; + } + } + return NULL; +} + +static void* win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) { + mi_assert_internal(!(large_only && !allow_large)); + static _Atomic(size_t) large_page_try_ok; // = 0; + void* p = NULL; + // Try to allocate large OS pages (2MiB) if allowed or required. + if ((large_only || _mi_os_use_large_page(size, try_alignment)) + && allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) { + size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok); + if (!large_only && try_ok > 0) { + // if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive. + // therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times. + mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1); + } + else { + // large OS pages must always reserve and commit. + *is_large = true; + p = win_virtual_alloc_prim(addr, size, try_alignment, flags | MEM_LARGE_PAGES); + if (large_only) return p; + // fall back to non-large page allocation on error (`p == NULL`). + if (p == NULL) { + mi_atomic_store_release(&large_page_try_ok,10UL); // on error, don't try again for the next N allocations + } + } + } + // Fall back to regular page allocation + if (p == NULL) { + *is_large = ((flags&MEM_LARGE_PAGES) != 0); + p = win_virtual_alloc_prim(addr, size, try_alignment, flags); + } + //if (p == NULL) { _mi_warning_message("unable to allocate OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x, large only: %d, allow large: %d)\n", size, GetLastError(), addr, try_alignment, flags, large_only, allow_large); } + return p; +} + +int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr) { + mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); + mi_assert_internal(commit || !allow_large); + mi_assert_internal(try_alignment > 0); + *is_zero = true; + int flags = MEM_RESERVE; + if (commit) { flags |= MEM_COMMIT; } + *addr = win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large); + return (*addr != NULL ? 0 : (int)GetLastError()); +} + + +//--------------------------------------------- +// Commit/Reset/Protect +//--------------------------------------------- +#ifdef _MSC_VER +#pragma warning(disable:6250) // suppress warning calling VirtualFree without MEM_RELEASE (for decommit) +#endif + +int _mi_prim_commit(void* addr, size_t size, bool* is_zero) { + *is_zero = false; + /* + // zero'ing only happens on an initial commit... but checking upfront seems expensive.. + _MEMORY_BASIC_INFORMATION meminfo; _mi_memzero_var(meminfo); + if (VirtualQuery(addr, &meminfo, size) > 0) { + if ((meminfo.State & MEM_COMMIT) == 0) { + *is_zero = true; + } + } + */ + // commit + void* p = VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE); + if (p == NULL) return (int)GetLastError(); + return 0; +} + +int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit) { + BOOL ok = VirtualFree(addr, size, MEM_DECOMMIT); + *needs_recommit = true; // for safety, assume always decommitted even in the case of an error. + return (ok ? 0 : (int)GetLastError()); +} + +int _mi_prim_reset(void* addr, size_t size) { + void* p = VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); + mi_assert_internal(p == addr); + #if 0 + if (p != NULL) { + VirtualUnlock(addr,size); // VirtualUnlock after MEM_RESET removes the memory directly from the working set + } + #endif + return (p != NULL ? 0 : (int)GetLastError()); +} + +int _mi_prim_protect(void* addr, size_t size, bool protect) { + DWORD oldprotect = 0; + BOOL ok = VirtualProtect(addr, size, protect ? PAGE_NOACCESS : PAGE_READWRITE, &oldprotect); + return (ok ? 0 : (int)GetLastError()); +} + + +//--------------------------------------------- +// Huge page allocation +//--------------------------------------------- + +static void* _mi_prim_alloc_huge_os_pagesx(void* hint_addr, size_t size, int numa_node) +{ + const DWORD flags = MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE; + + win_enable_large_os_pages(NULL); + + MI_MEM_EXTENDED_PARAMETER params[3] = { {{0,0},{0}},{{0,0},{0}},{{0,0},{0}} }; + // on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages + static bool mi_huge_pages_available = true; + if (pNtAllocateVirtualMemoryEx != NULL && mi_huge_pages_available) { + params[0].Type.Type = MiMemExtendedParameterAttributeFlags; + params[0].Arg.ULong64 = MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE; + ULONG param_count = 1; + if (numa_node >= 0) { + param_count++; + params[1].Type.Type = MiMemExtendedParameterNumaNode; + params[1].Arg.ULong = (unsigned)numa_node; + } + SIZE_T psize = size; + void* base = hint_addr; + NTSTATUS err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags, PAGE_READWRITE, params, param_count); + if (err == 0 && base != NULL) { + return base; + } + else { + // fall back to regular large pages + mi_huge_pages_available = false; // don't try further huge pages + _mi_warning_message("unable to allocate using huge (1GiB) pages, trying large (2MiB) pages instead (status 0x%lx)\n", err); + } + } + // on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation + if (pVirtualAlloc2 != NULL && numa_node >= 0) { + params[0].Type.Type = MiMemExtendedParameterNumaNode; + params[0].Arg.ULong = (unsigned)numa_node; + return (*pVirtualAlloc2)(GetCurrentProcess(), hint_addr, size, flags, PAGE_READWRITE, params, 1); + } + + // otherwise use regular virtual alloc on older windows + return VirtualAlloc(hint_addr, size, flags, PAGE_READWRITE); +} + +int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr) { + *is_zero = true; + *addr = _mi_prim_alloc_huge_os_pagesx(hint_addr,size,numa_node); + return (*addr != NULL ? 0 : (int)GetLastError()); +} + + +//--------------------------------------------- +// Numa nodes +//--------------------------------------------- + +size_t _mi_prim_numa_node(void) { + USHORT numa_node = 0; + if (pGetCurrentProcessorNumberEx != NULL && pGetNumaProcessorNodeEx != NULL) { + // Extended API is supported + MI_PROCESSOR_NUMBER pnum; + (*pGetCurrentProcessorNumberEx)(&pnum); + USHORT nnode = 0; + BOOL ok = (*pGetNumaProcessorNodeEx)(&pnum, &nnode); + if (ok) { numa_node = nnode; } + } + else if (pGetNumaProcessorNode != NULL) { + // Vista or earlier, use older API that is limited to 64 processors. Issue #277 + DWORD pnum = GetCurrentProcessorNumber(); + UCHAR nnode = 0; + BOOL ok = pGetNumaProcessorNode((UCHAR)pnum, &nnode); + if (ok) { numa_node = nnode; } + } + return numa_node; +} + +size_t _mi_prim_numa_node_count(void) { + ULONG numa_max = 0; + GetNumaHighestNodeNumber(&numa_max); + // find the highest node number that has actual processors assigned to it. Issue #282 + while(numa_max > 0) { + if (pGetNumaNodeProcessorMaskEx != NULL) { + // Extended API is supported + GROUP_AFFINITY affinity; + if ((*pGetNumaNodeProcessorMaskEx)((USHORT)numa_max, &affinity)) { + if (affinity.Mask != 0) break; // found the maximum non-empty node + } + } + else { + // Vista or earlier, use older API that is limited to 64 processors. + ULONGLONG mask; + if (GetNumaNodeProcessorMask((UCHAR)numa_max, &mask)) { + if (mask != 0) break; // found the maximum non-empty node + }; + } + // max node was invalid or had no processor assigned, try again + numa_max--; + } + return ((size_t)numa_max + 1); +} + + +//---------------------------------------------------------------- +// Clock +//---------------------------------------------------------------- + +static mi_msecs_t mi_to_msecs(LARGE_INTEGER t) { + static LARGE_INTEGER mfreq; // = 0 + if (mfreq.QuadPart == 0LL) { + LARGE_INTEGER f; + QueryPerformanceFrequency(&f); + mfreq.QuadPart = f.QuadPart/1000LL; + if (mfreq.QuadPart == 0) mfreq.QuadPart = 1; + } + return (mi_msecs_t)(t.QuadPart / mfreq.QuadPart); +} + +mi_msecs_t _mi_prim_clock_now(void) { + LARGE_INTEGER t; + QueryPerformanceCounter(&t); + return mi_to_msecs(t); +} + + +//---------------------------------------------------------------- +// Process Info +//---------------------------------------------------------------- + +#include +#include + +static mi_msecs_t filetime_msecs(const FILETIME* ftime) { + ULARGE_INTEGER i; + i.LowPart = ftime->dwLowDateTime; + i.HighPart = ftime->dwHighDateTime; + mi_msecs_t msecs = (i.QuadPart / 10000); // FILETIME is in 100 nano seconds + return msecs; +} + +typedef BOOL (WINAPI *PGetProcessMemoryInfo)(HANDLE, PPROCESS_MEMORY_COUNTERS, DWORD); +static PGetProcessMemoryInfo pGetProcessMemoryInfo = NULL; + +void _mi_prim_process_info(mi_process_info_t* pinfo) +{ + FILETIME ct; + FILETIME ut; + FILETIME st; + FILETIME et; + GetProcessTimes(GetCurrentProcess(), &ct, &et, &st, &ut); + pinfo->utime = filetime_msecs(&ut); + pinfo->stime = filetime_msecs(&st); + + // load psapi on demand + if (pGetProcessMemoryInfo == NULL) { + HINSTANCE hDll = LoadLibrary(TEXT("psapi.dll")); + if (hDll != NULL) { + pGetProcessMemoryInfo = (PGetProcessMemoryInfo)(void (*)(void))GetProcAddress(hDll, "GetProcessMemoryInfo"); + } + } + + // get process info + PROCESS_MEMORY_COUNTERS info; + memset(&info, 0, sizeof(info)); + if (pGetProcessMemoryInfo != NULL) { + pGetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info)); + } + pinfo->current_rss = (size_t)info.WorkingSetSize; + pinfo->peak_rss = (size_t)info.PeakWorkingSetSize; + pinfo->current_commit = (size_t)info.PagefileUsage; + pinfo->peak_commit = (size_t)info.PeakPagefileUsage; + pinfo->page_faults = (size_t)info.PageFaultCount; +} + +//---------------------------------------------------------------- +// Output +//---------------------------------------------------------------- + +void _mi_prim_out_stderr( const char* msg ) +{ + // on windows with redirection, the C runtime cannot handle locale dependent output + // after the main thread closes so we use direct console output. + if (!_mi_preloading()) { + // _cputs(msg); // _cputs cannot be used as it aborts when failing to lock the console + static HANDLE hcon = INVALID_HANDLE_VALUE; + static bool hconIsConsole; + if (hcon == INVALID_HANDLE_VALUE) { + CONSOLE_SCREEN_BUFFER_INFO sbi; + hcon = GetStdHandle(STD_ERROR_HANDLE); + hconIsConsole = ((hcon != INVALID_HANDLE_VALUE) && GetConsoleScreenBufferInfo(hcon, &sbi)); + } + const size_t len = _mi_strlen(msg); + if (len > 0 && len < UINT32_MAX) { + DWORD written = 0; + if (hconIsConsole) { + WriteConsoleA(hcon, msg, (DWORD)len, &written, NULL); + } + else if (hcon != INVALID_HANDLE_VALUE) { + // use direct write if stderr was redirected + WriteFile(hcon, msg, (DWORD)len, &written, NULL); + } + else { + // finally fall back to fputs after all + fputs(msg, stderr); + } + } + } +} + + +//---------------------------------------------------------------- +// Environment +//---------------------------------------------------------------- + +// On Windows use GetEnvironmentVariable instead of getenv to work +// reliably even when this is invoked before the C runtime is initialized. +// i.e. when `_mi_preloading() == true`. +// Note: on windows, environment names are not case sensitive. +bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { + result[0] = 0; + size_t len = GetEnvironmentVariableA(name, result, (DWORD)result_size); + return (len > 0 && len < result_size); +} + + + +//---------------------------------------------------------------- +// Random +//---------------------------------------------------------------- + +#if defined(MI_USE_RTLGENRANDOM) // || defined(__cplusplus) +// We prefer to use BCryptGenRandom instead of (the unofficial) RtlGenRandom but when using +// dynamic overriding, we observed it can raise an exception when compiled with C++, and +// sometimes deadlocks when also running under the VS debugger. +// In contrast, issue #623 implies that on Windows Server 2019 we need to use BCryptGenRandom. +// To be continued.. +#pragma comment (lib,"advapi32.lib") +#define RtlGenRandom SystemFunction036 +mi_decl_externc BOOLEAN NTAPI RtlGenRandom(PVOID RandomBuffer, ULONG RandomBufferLength); + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + return (RtlGenRandom(buf, (ULONG)buf_len) != 0); +} + +#else + +#ifndef BCRYPT_USE_SYSTEM_PREFERRED_RNG +#define BCRYPT_USE_SYSTEM_PREFERRED_RNG 0x00000002 +#endif + +typedef LONG (NTAPI *PBCryptGenRandom)(HANDLE, PUCHAR, ULONG, ULONG); +static PBCryptGenRandom pBCryptGenRandom = NULL; + +bool _mi_prim_random_buf(void* buf, size_t buf_len) { + if (pBCryptGenRandom == NULL) { + HINSTANCE hDll = LoadLibrary(TEXT("bcrypt.dll")); + if (hDll != NULL) { + pBCryptGenRandom = (PBCryptGenRandom)(void (*)(void))GetProcAddress(hDll, "BCryptGenRandom"); + } + if (pBCryptGenRandom == NULL) return false; + } + return (pBCryptGenRandom(NULL, (PUCHAR)buf, (ULONG)buf_len, BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0); +} + +#endif // MI_USE_RTLGENRANDOM + +//---------------------------------------------------------------- +// Thread init/done +//---------------------------------------------------------------- + +#if !defined(MI_SHARED_LIB) + +// use thread local storage keys to detect thread ending +// note: another design could be to use special linker sections (see issue #869) +#include +#if (_WIN32_WINNT < 0x600) // before Windows Vista +WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback ); +WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex ); +WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData ); +WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex); +#endif + +static DWORD mi_fls_key = (DWORD)(-1); + +static void NTAPI mi_fls_done(PVOID value) { + mi_heap_t* heap = (mi_heap_t*)value; + if (heap != NULL) { + _mi_thread_done(heap); + FlsSetValue(mi_fls_key, NULL); // prevent recursion as _mi_thread_done may set it back to the main heap, issue #672 + } +} + +void _mi_prim_thread_init_auto_done(void) { + mi_fls_key = FlsAlloc(&mi_fls_done); +} + +void _mi_prim_thread_done_auto_done(void) { + // call thread-done on all threads (except the main thread) to prevent + // dangling callback pointer if statically linked with a DLL; Issue #208 + FlsFree(mi_fls_key); +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + mi_assert_internal(mi_fls_key != (DWORD)(-1)); + FlsSetValue(mi_fls_key, heap); +} + +#else + +// Dll; nothing to do as in that case thread_done is handled through the DLL_THREAD_DETACH event. + +void _mi_prim_thread_init_auto_done(void) { +} + +void _mi_prim_thread_done_auto_done(void) { +} + +void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { + MI_UNUSED(heap); +} + +#endif diff --git a/ww/managers/mimalloc/src/prim/windows/readme.md b/ww/managers/mimalloc/src/prim/windows/readme.md new file mode 100644 index 00000000..217c3d17 --- /dev/null +++ b/ww/managers/mimalloc/src/prim/windows/readme.md @@ -0,0 +1,17 @@ +## Primitives: + +- `prim.c` contains Windows primitives for OS allocation. + +## Event Tracing for Windows (ETW) + +- `etw.h` is generated from `etw.man` which contains the manifest for mimalloc events. + (100 is an allocation, 101 is for a free) + +- `etw-mimalloc.wprp` is a profile for the Windows Performance Recorder (WPR). + In an admin prompt, you can use: + ``` + > wpr -start src\prim\windows\etw-mimalloc.wprp -filemode + > + > wpr -stop test.etl + ``` + and then open `test.etl` in the Windows Performance Analyzer (WPA). \ No newline at end of file diff --git a/ww/managers/mimalloc/src/random.c b/ww/managers/mimalloc/src/random.c new file mode 100644 index 00000000..4fc8b2f8 --- /dev/null +++ b/ww/managers/mimalloc/src/random.c @@ -0,0 +1,254 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2019-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/prim.h" // _mi_prim_random_buf +#include // memset + +/* ---------------------------------------------------------------------------- +We use our own PRNG to keep predictable performance of random number generation +and to avoid implementations that use a lock. We only use the OS provided +random source to initialize the initial seeds. Since we do not need ultimate +performance but we do rely on the security (for secret cookies in secure mode) +we use a cryptographically secure generator (chacha20). +-----------------------------------------------------------------------------*/ + +#define MI_CHACHA_ROUNDS (20) // perhaps use 12 for better performance? + + +/* ---------------------------------------------------------------------------- +Chacha20 implementation as the original algorithm with a 64-bit nonce +and counter: https://en.wikipedia.org/wiki/Salsa20 +The input matrix has sixteen 32-bit values: +Position 0 to 3: constant key +Position 4 to 11: the key +Position 12 to 13: the counter. +Position 14 to 15: the nonce. + +The implementation uses regular C code which compiles very well on modern compilers. +(gcc x64 has no register spills, and clang 6+ uses SSE instructions) +-----------------------------------------------------------------------------*/ + +static inline uint32_t rotl(uint32_t x, uint32_t shift) { + return (x << shift) | (x >> (32 - shift)); +} + +static inline void qround(uint32_t x[16], size_t a, size_t b, size_t c, size_t d) { + x[a] += x[b]; x[d] = rotl(x[d] ^ x[a], 16); + x[c] += x[d]; x[b] = rotl(x[b] ^ x[c], 12); + x[a] += x[b]; x[d] = rotl(x[d] ^ x[a], 8); + x[c] += x[d]; x[b] = rotl(x[b] ^ x[c], 7); +} + +static void chacha_block(mi_random_ctx_t* ctx) +{ + // scramble into `x` + uint32_t x[16]; + for (size_t i = 0; i < 16; i++) { + x[i] = ctx->input[i]; + } + for (size_t i = 0; i < MI_CHACHA_ROUNDS; i += 2) { + qround(x, 0, 4, 8, 12); + qround(x, 1, 5, 9, 13); + qround(x, 2, 6, 10, 14); + qround(x, 3, 7, 11, 15); + qround(x, 0, 5, 10, 15); + qround(x, 1, 6, 11, 12); + qround(x, 2, 7, 8, 13); + qround(x, 3, 4, 9, 14); + } + + // add scrambled data to the initial state + for (size_t i = 0; i < 16; i++) { + ctx->output[i] = x[i] + ctx->input[i]; + } + ctx->output_available = 16; + + // increment the counter for the next round + ctx->input[12] += 1; + if (ctx->input[12] == 0) { + ctx->input[13] += 1; + if (ctx->input[13] == 0) { // and keep increasing into the nonce + ctx->input[14] += 1; + } + } +} + +static uint32_t chacha_next32(mi_random_ctx_t* ctx) { + if (ctx->output_available <= 0) { + chacha_block(ctx); + ctx->output_available = 16; // (assign again to suppress static analysis warning) + } + const uint32_t x = ctx->output[16 - ctx->output_available]; + ctx->output[16 - ctx->output_available] = 0; // reset once the data is handed out + ctx->output_available--; + return x; +} + +static inline uint32_t read32(const uint8_t* p, size_t idx32) { + const size_t i = 4*idx32; + return ((uint32_t)p[i+0] | (uint32_t)p[i+1] << 8 | (uint32_t)p[i+2] << 16 | (uint32_t)p[i+3] << 24); +} + +static void chacha_init(mi_random_ctx_t* ctx, const uint8_t key[32], uint64_t nonce) +{ + // since we only use chacha for randomness (and not encryption) we + // do not _need_ to read 32-bit values as little endian but we do anyways + // just for being compatible :-) + memset(ctx, 0, sizeof(*ctx)); + for (size_t i = 0; i < 4; i++) { + const uint8_t* sigma = (uint8_t*)"expand 32-byte k"; + ctx->input[i] = read32(sigma,i); + } + for (size_t i = 0; i < 8; i++) { + ctx->input[i + 4] = read32(key,i); + } + ctx->input[12] = 0; + ctx->input[13] = 0; + ctx->input[14] = (uint32_t)nonce; + ctx->input[15] = (uint32_t)(nonce >> 32); +} + +static void chacha_split(mi_random_ctx_t* ctx, uint64_t nonce, mi_random_ctx_t* ctx_new) { + memset(ctx_new, 0, sizeof(*ctx_new)); + _mi_memcpy(ctx_new->input, ctx->input, sizeof(ctx_new->input)); + ctx_new->input[12] = 0; + ctx_new->input[13] = 0; + ctx_new->input[14] = (uint32_t)nonce; + ctx_new->input[15] = (uint32_t)(nonce >> 32); + mi_assert_internal(ctx->input[14] != ctx_new->input[14] || ctx->input[15] != ctx_new->input[15]); // do not reuse nonces! + chacha_block(ctx_new); +} + + +/* ---------------------------------------------------------------------------- +Random interface +-----------------------------------------------------------------------------*/ + +#if MI_DEBUG>1 +static bool mi_random_is_initialized(mi_random_ctx_t* ctx) { + return (ctx != NULL && ctx->input[0] != 0); +} +#endif + +void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* ctx_new) { + mi_assert_internal(mi_random_is_initialized(ctx)); + mi_assert_internal(ctx != ctx_new); + chacha_split(ctx, (uintptr_t)ctx_new /*nonce*/, ctx_new); +} + +uintptr_t _mi_random_next(mi_random_ctx_t* ctx) { + mi_assert_internal(mi_random_is_initialized(ctx)); + #if MI_INTPTR_SIZE <= 4 + return chacha_next32(ctx); + #elif MI_INTPTR_SIZE == 8 + return (((uintptr_t)chacha_next32(ctx) << 32) | chacha_next32(ctx)); + #else + # error "define mi_random_next for this platform" + #endif +} + + +/* ---------------------------------------------------------------------------- +To initialize a fresh random context. +If we cannot get good randomness, we fall back to weak randomness based on a timer and ASLR. +-----------------------------------------------------------------------------*/ + +uintptr_t _mi_os_random_weak(uintptr_t extra_seed) { + uintptr_t x = (uintptr_t)&_mi_os_random_weak ^ extra_seed; // ASLR makes the address random + x ^= _mi_prim_clock_now(); + // and do a few randomization steps + uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1; + for (uintptr_t i = 0; i < max; i++) { + x = _mi_random_shuffle(x); + } + mi_assert_internal(x != 0); + return x; +} + +static void mi_random_init_ex(mi_random_ctx_t* ctx, bool use_weak) { + uint8_t key[32]; + if (use_weak || !_mi_prim_random_buf(key, sizeof(key))) { + // if we fail to get random data from the OS, we fall back to a + // weak random source based on the current time + #if !defined(__wasi__) + if (!use_weak) { _mi_warning_message("unable to use secure randomness\n"); } + #endif + uintptr_t x = _mi_os_random_weak(0); + for (size_t i = 0; i < 8; i++) { // key is eight 32-bit words. + x = _mi_random_shuffle(x); + ((uint32_t*)key)[i] = (uint32_t)x; + } + ctx->weak = true; + } + else { + ctx->weak = false; + } + chacha_init(ctx, key, (uintptr_t)ctx /*nonce*/ ); +} + +void _mi_random_init(mi_random_ctx_t* ctx) { + mi_random_init_ex(ctx, false); +} + +void _mi_random_init_weak(mi_random_ctx_t * ctx) { + mi_random_init_ex(ctx, true); +} + +void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx) { + if (ctx->weak) { + _mi_random_init(ctx); + } +} + +/* -------------------------------------------------------- +test vectors from +----------------------------------------------------------- */ +/* +static bool array_equals(uint32_t* x, uint32_t* y, size_t n) { + for (size_t i = 0; i < n; i++) { + if (x[i] != y[i]) return false; + } + return true; +} +static void chacha_test(void) +{ + uint32_t x[4] = { 0x11111111, 0x01020304, 0x9b8d6f43, 0x01234567 }; + uint32_t x_out[4] = { 0xea2a92f4, 0xcb1cf8ce, 0x4581472e, 0x5881c4bb }; + qround(x, 0, 1, 2, 3); + mi_assert_internal(array_equals(x, x_out, 4)); + + uint32_t y[16] = { + 0x879531e0, 0xc5ecf37d, 0x516461b1, 0xc9a62f8a, + 0x44c20ef3, 0x3390af7f, 0xd9fc690b, 0x2a5f714c, + 0x53372767, 0xb00a5631, 0x974c541a, 0x359e9963, + 0x5c971061, 0x3d631689, 0x2098d9d6, 0x91dbd320 }; + uint32_t y_out[16] = { + 0x879531e0, 0xc5ecf37d, 0xbdb886dc, 0xc9a62f8a, + 0x44c20ef3, 0x3390af7f, 0xd9fc690b, 0xcfacafd2, + 0xe46bea80, 0xb00a5631, 0x974c541a, 0x359e9963, + 0x5c971061, 0xccc07c79, 0x2098d9d6, 0x91dbd320 }; + qround(y, 2, 7, 8, 13); + mi_assert_internal(array_equals(y, y_out, 16)); + + mi_random_ctx_t r = { + { 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574, + 0x03020100, 0x07060504, 0x0b0a0908, 0x0f0e0d0c, + 0x13121110, 0x17161514, 0x1b1a1918, 0x1f1e1d1c, + 0x00000001, 0x09000000, 0x4a000000, 0x00000000 }, + {0}, + 0 + }; + uint32_t r_out[16] = { + 0xe4e7f110, 0x15593bd1, 0x1fdd0f50, 0xc47120a3, + 0xc7f4d1c7, 0x0368c033, 0x9aaa2204, 0x4e6cd4c3, + 0x466482d2, 0x09aa9f07, 0x05d7c214, 0xa2028bd9, + 0xd19c12b5, 0xb94e16de, 0xe883d0cb, 0x4e3c50a2 }; + chacha_block(&r); + mi_assert_internal(array_equals(r.output, r_out, 16)); +} +*/ diff --git a/ww/managers/mimalloc/src/segment-map.c b/ww/managers/mimalloc/src/segment-map.c new file mode 100644 index 00000000..1efb1e23 --- /dev/null +++ b/ww/managers/mimalloc/src/segment-map.c @@ -0,0 +1,155 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2019-2023, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* ----------------------------------------------------------- + The following functions are to reliably find the segment or + block that encompasses any pointer p (or NULL if it is not + in any of our segments). + We maintain a bitmap of all memory with 1 bit per MI_SEGMENT_SIZE (64MiB) + set to 1 if it contains the segment meta data. +----------------------------------------------------------- */ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" + +#if (MI_INTPTR_SIZE>=8) && MI_TRACK_ASAN +#define MI_MAX_ADDRESS ((size_t)140 << 40) // 140TB (see issue #881) +#elif (MI_INTPTR_SIZE >= 8) +#define MI_MAX_ADDRESS ((size_t)40 << 40) // 40TB (to include huge page areas) +#else +#define MI_MAX_ADDRESS ((size_t)2 << 30) // 2Gb +#endif + +#define MI_SEGMENT_MAP_BITS (MI_MAX_ADDRESS / MI_SEGMENT_SIZE) +#define MI_SEGMENT_MAP_SIZE (MI_SEGMENT_MAP_BITS / 8) +#define MI_SEGMENT_MAP_WSIZE (MI_SEGMENT_MAP_SIZE / MI_INTPTR_SIZE) + +static _Atomic(uintptr_t) mi_segment_map[MI_SEGMENT_MAP_WSIZE + 1]; // 2KiB per TB with 64MiB segments + +static size_t mi_segment_map_index_of(const mi_segment_t* segment, size_t* bitidx) { + // note: segment can be invalid or NULL. + mi_assert_internal(_mi_ptr_segment(segment + 1) == segment); // is it aligned on MI_SEGMENT_SIZE? + if ((uintptr_t)segment >= MI_MAX_ADDRESS) { + *bitidx = 0; + return MI_SEGMENT_MAP_WSIZE; + } + else { + const uintptr_t segindex = ((uintptr_t)segment) / MI_SEGMENT_SIZE; + *bitidx = segindex % MI_INTPTR_BITS; + const size_t mapindex = segindex / MI_INTPTR_BITS; + mi_assert_internal(mapindex < MI_SEGMENT_MAP_WSIZE); + return mapindex; + } +} + +void _mi_segment_map_allocated_at(const mi_segment_t* segment) { + size_t bitidx; + size_t index = mi_segment_map_index_of(segment, &bitidx); + mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE); + if (index==MI_SEGMENT_MAP_WSIZE) return; + uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); + uintptr_t newmask; + do { + newmask = (mask | ((uintptr_t)1 << bitidx)); + } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask)); +} + +void _mi_segment_map_freed_at(const mi_segment_t* segment) { + size_t bitidx; + size_t index = mi_segment_map_index_of(segment, &bitidx); + mi_assert_internal(index <= MI_SEGMENT_MAP_WSIZE); + if (index == MI_SEGMENT_MAP_WSIZE) return; + uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); + uintptr_t newmask; + do { + newmask = (mask & ~((uintptr_t)1 << bitidx)); + } while (!mi_atomic_cas_weak_release(&mi_segment_map[index], &mask, newmask)); +} + +// Determine the segment belonging to a pointer or NULL if it is not in a valid segment. +static mi_segment_t* _mi_segment_of(const void* p) { + if (p == NULL) return NULL; + mi_segment_t* segment = _mi_ptr_segment(p); // segment can be NULL + size_t bitidx; + size_t index = mi_segment_map_index_of(segment, &bitidx); + // fast path: for any pointer to valid small/medium/large object or first MI_SEGMENT_SIZE in huge + const uintptr_t mask = mi_atomic_load_relaxed(&mi_segment_map[index]); + if mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0) { + return segment; // yes, allocated by us + } + if (index==MI_SEGMENT_MAP_WSIZE) return NULL; + + // TODO: maintain max/min allocated range for efficiency for more efficient rejection of invalid pointers? + + // search downwards for the first segment in case it is an interior pointer + // could be slow but searches in MI_INTPTR_SIZE * MI_SEGMENT_SIZE (512MiB) steps trough + // valid huge objects + // note: we could maintain a lowest index to speed up the path for invalid pointers? + size_t lobitidx; + size_t loindex; + uintptr_t lobits = mask & (((uintptr_t)1 << bitidx) - 1); + if (lobits != 0) { + loindex = index; + lobitidx = mi_bsr(lobits); // lobits != 0 + } + else if (index == 0) { + return NULL; + } + else { + mi_assert_internal(index > 0); + uintptr_t lomask = mask; + loindex = index; + do { + loindex--; + lomask = mi_atomic_load_relaxed(&mi_segment_map[loindex]); + } while (lomask != 0 && loindex > 0); + if (lomask == 0) return NULL; + lobitidx = mi_bsr(lomask); // lomask != 0 + } + mi_assert_internal(loindex < MI_SEGMENT_MAP_WSIZE); + // take difference as the addresses could be larger than the MAX_ADDRESS space. + size_t diff = (((index - loindex) * (8*MI_INTPTR_SIZE)) + bitidx - lobitidx) * MI_SEGMENT_SIZE; + segment = (mi_segment_t*)((uint8_t*)segment - diff); + + if (segment == NULL) return NULL; + mi_assert_internal((void*)segment < p); + bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie); + mi_assert_internal(cookie_ok); + if mi_unlikely(!cookie_ok) return NULL; + if (((uint8_t*)segment + mi_segment_size(segment)) <= (uint8_t*)p) return NULL; // outside the range + mi_assert_internal(p >= (void*)segment && (uint8_t*)p < (uint8_t*)segment + mi_segment_size(segment)); + return segment; +} + +// Is this a valid pointer in our heap? +static bool mi_is_valid_pointer(const void* p) { + return ((_mi_segment_of(p) != NULL) || (_mi_arena_contains(p))); +} + +mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept { + return mi_is_valid_pointer(p); +} + +/* +// Return the full segment range belonging to a pointer +static void* mi_segment_range_of(const void* p, size_t* size) { + mi_segment_t* segment = _mi_segment_of(p); + if (segment == NULL) { + if (size != NULL) *size = 0; + return NULL; + } + else { + if (size != NULL) *size = segment->segment_size; + return segment; + } + mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); + mi_assert_internal(page == NULL || (mi_segment_page_size(_mi_page_segment(page)) - (MI_SECURE == 0 ? 0 : _mi_os_page_size())) >= block_size); + mi_reset_delayed(tld); + mi_assert_internal(page == NULL || mi_page_not_in_queue(page, tld)); + return page; +} +*/ diff --git a/ww/managers/mimalloc/src/segment.c b/ww/managers/mimalloc/src/segment.c new file mode 100644 index 00000000..4e4dcb80 --- /dev/null +++ b/ww/managers/mimalloc/src/segment.c @@ -0,0 +1,1524 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2024, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" + +#include // memset +#include + +// ------------------------------------------------------------------- +// Segments +// mimalloc pages reside in segments. See `mi_segment_valid` for invariants. +// ------------------------------------------------------------------- + + +static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats); + + +// ------------------------------------------------------------------- +// commit mask +// ------------------------------------------------------------------- + +static bool mi_commit_mask_all_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + if ((commit->mask[i] & cm->mask[i]) != cm->mask[i]) return false; + } + return true; +} + +static bool mi_commit_mask_any_set(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + if ((commit->mask[i] & cm->mask[i]) != 0) return true; + } + return false; +} + +static void mi_commit_mask_create_intersect(const mi_commit_mask_t* commit, const mi_commit_mask_t* cm, mi_commit_mask_t* res) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + res->mask[i] = (commit->mask[i] & cm->mask[i]); + } +} + +static void mi_commit_mask_clear(mi_commit_mask_t* res, const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + res->mask[i] &= ~(cm->mask[i]); + } +} + +static void mi_commit_mask_set(mi_commit_mask_t* res, const mi_commit_mask_t* cm) { + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + res->mask[i] |= cm->mask[i]; + } +} + +static void mi_commit_mask_create(size_t bitidx, size_t bitcount, mi_commit_mask_t* cm) { + mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS); + mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS); + if (bitcount == MI_COMMIT_MASK_BITS) { + mi_assert_internal(bitidx==0); + mi_commit_mask_create_full(cm); + } + else if (bitcount == 0) { + mi_commit_mask_create_empty(cm); + } + else { + mi_commit_mask_create_empty(cm); + size_t i = bitidx / MI_COMMIT_MASK_FIELD_BITS; + size_t ofs = bitidx % MI_COMMIT_MASK_FIELD_BITS; + while (bitcount > 0) { + mi_assert_internal(i < MI_COMMIT_MASK_FIELD_COUNT); + size_t avail = MI_COMMIT_MASK_FIELD_BITS - ofs; + size_t count = (bitcount > avail ? avail : bitcount); + size_t mask = (count >= MI_COMMIT_MASK_FIELD_BITS ? ~((size_t)0) : (((size_t)1 << count) - 1) << ofs); + cm->mask[i] = mask; + bitcount -= count; + ofs = 0; + i++; + } + } +} + +size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total) { + mi_assert_internal((total%MI_COMMIT_MASK_BITS)==0); + size_t count = 0; + for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) { + size_t mask = cm->mask[i]; + if (~mask == 0) { + count += MI_COMMIT_MASK_FIELD_BITS; + } + else { + for (; mask != 0; mask >>= 1) { // todo: use popcount + if ((mask&1)!=0) count++; + } + } + } + // we use total since for huge segments each commit bit may represent a larger size + return ((total / MI_COMMIT_MASK_BITS) * count); +} + + +size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx) { + size_t i = (*idx) / MI_COMMIT_MASK_FIELD_BITS; + size_t ofs = (*idx) % MI_COMMIT_MASK_FIELD_BITS; + size_t mask = 0; + // find first ones + while (i < MI_COMMIT_MASK_FIELD_COUNT) { + mask = cm->mask[i]; + mask >>= ofs; + if (mask != 0) { + while ((mask&1) == 0) { + mask >>= 1; + ofs++; + } + break; + } + i++; + ofs = 0; + } + if (i >= MI_COMMIT_MASK_FIELD_COUNT) { + // not found + *idx = MI_COMMIT_MASK_BITS; + return 0; + } + else { + // found, count ones + size_t count = 0; + *idx = (i*MI_COMMIT_MASK_FIELD_BITS) + ofs; + do { + mi_assert_internal(ofs < MI_COMMIT_MASK_FIELD_BITS && (mask&1) == 1); + do { + count++; + mask >>= 1; + } while ((mask&1) == 1); + if ((((*idx + count) % MI_COMMIT_MASK_FIELD_BITS) == 0)) { + i++; + if (i >= MI_COMMIT_MASK_FIELD_COUNT) break; + mask = cm->mask[i]; + ofs = 0; + } + } while ((mask&1) == 1); + mi_assert_internal(count > 0); + return count; + } +} + + +/* -------------------------------------------------------------------------------- + Segment allocation +-------------------------------------------------------------------------------- */ + + +/* ----------------------------------------------------------- + Slices +----------------------------------------------------------- */ + + +static const mi_slice_t* mi_segment_slices_end(const mi_segment_t* segment) { + return &segment->slices[segment->slice_entries]; +} + +static uint8_t* mi_slice_start(const mi_slice_t* slice) { + mi_segment_t* segment = _mi_ptr_segment(slice); + mi_assert_internal(slice >= segment->slices && slice < mi_segment_slices_end(segment)); + return ((uint8_t*)segment + ((slice - segment->slices)*MI_SEGMENT_SLICE_SIZE)); +} + + +/* ----------------------------------------------------------- + Bins +----------------------------------------------------------- */ +// Use bit scan forward to quickly find the first zero bit if it is available + +static inline size_t mi_slice_bin8(size_t slice_count) { + if (slice_count<=1) return slice_count; + mi_assert_internal(slice_count <= MI_SLICES_PER_SEGMENT); + slice_count--; + size_t s = mi_bsr(slice_count); // slice_count > 1 + if (s <= 2) return slice_count + 1; + size_t bin = ((s << 2) | ((slice_count >> (s - 2))&0x03)) - 4; + return bin; +} + +static inline size_t mi_slice_bin(size_t slice_count) { + mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_SEGMENT_SIZE); + mi_assert_internal(mi_slice_bin8(MI_SLICES_PER_SEGMENT) <= MI_SEGMENT_BIN_MAX); + size_t bin = mi_slice_bin8(slice_count); + mi_assert_internal(bin <= MI_SEGMENT_BIN_MAX); + return bin; +} + +static inline size_t mi_slice_index(const mi_slice_t* slice) { + mi_segment_t* segment = _mi_ptr_segment(slice); + ptrdiff_t index = slice - segment->slices; + mi_assert_internal(index >= 0 && index < (ptrdiff_t)segment->slice_entries); + return index; +} + + +/* ----------------------------------------------------------- + Slice span queues +----------------------------------------------------------- */ + +static void mi_span_queue_push(mi_span_queue_t* sq, mi_slice_t* slice) { + // todo: or push to the end? + mi_assert_internal(slice->prev == NULL && slice->next==NULL); + slice->prev = NULL; // paranoia + slice->next = sq->first; + sq->first = slice; + if (slice->next != NULL) slice->next->prev = slice; + else sq->last = slice; + slice->block_size = 0; // free +} + +static mi_span_queue_t* mi_span_queue_for(size_t slice_count, mi_segments_tld_t* tld) { + size_t bin = mi_slice_bin(slice_count); + mi_span_queue_t* sq = &tld->spans[bin]; + mi_assert_internal(sq->slice_count >= slice_count); + return sq; +} + +static void mi_span_queue_delete(mi_span_queue_t* sq, mi_slice_t* slice) { + mi_assert_internal(slice->block_size==0 && slice->slice_count>0 && slice->slice_offset==0); + // should work too if the queue does not contain slice (which can happen during reclaim) + if (slice->prev != NULL) slice->prev->next = slice->next; + if (slice == sq->first) sq->first = slice->next; + if (slice->next != NULL) slice->next->prev = slice->prev; + if (slice == sq->last) sq->last = slice->prev; + slice->prev = NULL; + slice->next = NULL; + slice->block_size = 1; // no more free +} + + +/* ----------------------------------------------------------- + Invariant checking +----------------------------------------------------------- */ + +static bool mi_slice_is_used(const mi_slice_t* slice) { + return (slice->block_size > 0); +} + + +#if (MI_DEBUG>=3) +static bool mi_span_queue_contains(mi_span_queue_t* sq, mi_slice_t* slice) { + for (mi_slice_t* s = sq->first; s != NULL; s = s->next) { + if (s==slice) return true; + } + return false; +} + +static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) { + mi_assert_internal(segment != NULL); + mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie); + mi_assert_internal(segment->abandoned <= segment->used); + mi_assert_internal(segment->thread_id == 0 || segment->thread_id == _mi_thread_id()); + mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); // can only decommit committed blocks + //mi_assert_internal(segment->segment_info_size % MI_SEGMENT_SLICE_SIZE == 0); + mi_slice_t* slice = &segment->slices[0]; + const mi_slice_t* end = mi_segment_slices_end(segment); + size_t used_count = 0; + mi_span_queue_t* sq; + while(slice < end) { + mi_assert_internal(slice->slice_count > 0); + mi_assert_internal(slice->slice_offset == 0); + size_t index = mi_slice_index(slice); + size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1; + if (mi_slice_is_used(slice)) { // a page in use, we need at least MAX_SLICE_OFFSET_COUNT valid back offsets + used_count++; + mi_assert_internal(slice->is_huge == (segment->kind == MI_SEGMENT_HUGE)); + for (size_t i = 0; i <= MI_MAX_SLICE_OFFSET_COUNT && index + i <= maxindex; i++) { + mi_assert_internal(segment->slices[index + i].slice_offset == i*sizeof(mi_slice_t)); + mi_assert_internal(i==0 || segment->slices[index + i].slice_count == 0); + mi_assert_internal(i==0 || segment->slices[index + i].block_size == 1); + } + // and the last entry as well (for coalescing) + const mi_slice_t* last = slice + slice->slice_count - 1; + if (last > slice && last < mi_segment_slices_end(segment)) { + mi_assert_internal(last->slice_offset == (slice->slice_count-1)*sizeof(mi_slice_t)); + mi_assert_internal(last->slice_count == 0); + mi_assert_internal(last->block_size == 1); + } + } + else { // free range of slices; only last slice needs a valid back offset + mi_slice_t* last = &segment->slices[maxindex]; + if (segment->kind != MI_SEGMENT_HUGE || slice->slice_count <= (segment->slice_entries - segment->segment_info_slices)) { + mi_assert_internal((uint8_t*)slice == (uint8_t*)last - last->slice_offset); + } + mi_assert_internal(slice == last || last->slice_count == 0 ); + mi_assert_internal(last->block_size == 0 || (segment->kind==MI_SEGMENT_HUGE && last->block_size==1)); + if (segment->kind != MI_SEGMENT_HUGE && segment->thread_id != 0) { // segment is not huge or abandoned + sq = mi_span_queue_for(slice->slice_count,tld); + mi_assert_internal(mi_span_queue_contains(sq,slice)); + } + } + slice = &segment->slices[maxindex+1]; + } + mi_assert_internal(slice == end); + mi_assert_internal(used_count == segment->used + 1); + return true; +} +#endif + +/* ----------------------------------------------------------- + Segment size calculations +----------------------------------------------------------- */ + +static size_t mi_segment_info_size(mi_segment_t* segment) { + return segment->segment_info_slices * MI_SEGMENT_SLICE_SIZE; +} + +static uint8_t* _mi_segment_page_start_from_slice(const mi_segment_t* segment, const mi_slice_t* slice, size_t block_size, size_t* page_size) +{ + const ptrdiff_t idx = slice - segment->slices; + const size_t psize = (size_t)slice->slice_count * MI_SEGMENT_SLICE_SIZE; + uint8_t* const pstart = (uint8_t*)segment + (idx*MI_SEGMENT_SLICE_SIZE); + // make the start not OS page aligned for smaller blocks to avoid page/cache effects + // note: the offset must always be a block_size multiple since we assume small allocations + // are aligned (see `mi_heap_malloc_aligned`). + size_t start_offset = 0; + if (block_size > 0 && block_size <= MI_MAX_ALIGN_GUARANTEE) { + // for small objects, ensure the page start is aligned with the block size (PR#66 by kickunderscore) + const size_t adjust = block_size - ((uintptr_t)pstart % block_size); + if (adjust < block_size && psize >= block_size + adjust) { + start_offset += adjust; + } + } + if (block_size >= MI_INTPTR_SIZE) { + if (block_size <= 64) { start_offset += 3*block_size; } + else if (block_size <= 512) { start_offset += block_size; } + } + if (page_size != NULL) { *page_size = psize - start_offset; } + return (pstart + start_offset); +} + +// Start of the page available memory; can be used on uninitialized pages +uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) +{ + const mi_slice_t* slice = mi_page_to_slice((mi_page_t*)page); + uint8_t* p = _mi_segment_page_start_from_slice(segment, slice, mi_page_block_size(page), page_size); + mi_assert_internal(mi_page_block_size(page) > 0 || _mi_ptr_page(p) == page); + mi_assert_internal(_mi_ptr_segment(p) == segment); + return p; +} + + +static size_t mi_segment_calculate_slices(size_t required, size_t* info_slices) { + size_t page_size = _mi_os_page_size(); + size_t isize = _mi_align_up(sizeof(mi_segment_t), page_size); + size_t guardsize = 0; + + if (MI_SECURE>0) { + // in secure mode, we set up a protected page in between the segment info + // and the page data (and one at the end of the segment) + guardsize = page_size; + if (required > 0) { + required = _mi_align_up(required, MI_SEGMENT_SLICE_SIZE) + page_size; + } + } + + isize = _mi_align_up(isize + guardsize, MI_SEGMENT_SLICE_SIZE); + if (info_slices != NULL) *info_slices = isize / MI_SEGMENT_SLICE_SIZE; + size_t segment_size = (required==0 ? MI_SEGMENT_SIZE : _mi_align_up( required + isize + guardsize, MI_SEGMENT_SLICE_SIZE) ); + mi_assert_internal(segment_size % MI_SEGMENT_SLICE_SIZE == 0); + return (segment_size / MI_SEGMENT_SLICE_SIZE); +} + + +/* ---------------------------------------------------------------------------- +Segment caches +We keep a small segment cache per thread to increase local +reuse and avoid setting/clearing guard pages in secure mode. +------------------------------------------------------------------------------- */ + +static void mi_segments_track_size(long segment_size, mi_segments_tld_t* tld) { + if (segment_size>=0) _mi_stat_increase(&tld->stats->segments,1); + else _mi_stat_decrease(&tld->stats->segments,1); + tld->count += (segment_size >= 0 ? 1 : -1); + if (tld->count > tld->peak_count) tld->peak_count = tld->count; + tld->current_size += segment_size; + if (tld->current_size > tld->peak_size) tld->peak_size = tld->current_size; +} + +static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) { + segment->thread_id = 0; + _mi_segment_map_freed_at(segment); + mi_segments_track_size(-((long)mi_segment_size(segment)),tld); + if (segment->was_reclaimed) { + tld->reclaim_count--; + segment->was_reclaimed = false; + } + if (MI_SECURE>0) { + // _mi_os_unprotect(segment, mi_segment_size(segment)); // ensure no more guard pages are set + // unprotect the guard pages; we cannot just unprotect the whole segment size as part may be decommitted + size_t os_pagesize = _mi_os_page_size(); + _mi_os_unprotect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize); + uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize; + _mi_os_unprotect(end, os_pagesize); + } + + // purge delayed decommits now? (no, leave it to the arena) + // mi_segment_try_purge(segment,true,tld->stats); + + const size_t size = mi_segment_size(segment); + const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size); + + _mi_abandoned_await_readers(); // wait until safe to free + _mi_arena_free(segment, mi_segment_size(segment), csize, segment->memid, tld->stats); +} + +/* ----------------------------------------------------------- + Commit/Decommit ranges +----------------------------------------------------------- */ + +static void mi_segment_commit_mask(mi_segment_t* segment, bool conservative, uint8_t* p, size_t size, uint8_t** start_p, size_t* full_size, mi_commit_mask_t* cm) { + mi_assert_internal(_mi_ptr_segment(p + 1) == segment); + mi_assert_internal(segment->kind != MI_SEGMENT_HUGE); + mi_commit_mask_create_empty(cm); + if (size == 0 || size > MI_SEGMENT_SIZE || segment->kind == MI_SEGMENT_HUGE) return; + const size_t segstart = mi_segment_info_size(segment); + const size_t segsize = mi_segment_size(segment); + if (p >= (uint8_t*)segment + segsize) return; + + size_t pstart = (p - (uint8_t*)segment); + mi_assert_internal(pstart + size <= segsize); + + size_t start; + size_t end; + if (conservative) { + // decommit conservative + start = _mi_align_up(pstart, MI_COMMIT_SIZE); + end = _mi_align_down(pstart + size, MI_COMMIT_SIZE); + mi_assert_internal(start >= segstart); + mi_assert_internal(end <= segsize); + } + else { + // commit liberal + start = _mi_align_down(pstart, MI_MINIMAL_COMMIT_SIZE); + end = _mi_align_up(pstart + size, MI_MINIMAL_COMMIT_SIZE); + } + if (pstart >= segstart && start < segstart) { // note: the mask is also calculated for an initial commit of the info area + start = segstart; + } + if (end > segsize) { + end = segsize; + } + + mi_assert_internal(start <= pstart && (pstart + size) <= end); + mi_assert_internal(start % MI_COMMIT_SIZE==0 && end % MI_COMMIT_SIZE == 0); + *start_p = (uint8_t*)segment + start; + *full_size = (end > start ? end - start : 0); + if (*full_size == 0) return; + + size_t bitidx = start / MI_COMMIT_SIZE; + mi_assert_internal(bitidx < MI_COMMIT_MASK_BITS); + + size_t bitcount = *full_size / MI_COMMIT_SIZE; // can be 0 + if (bitidx + bitcount > MI_COMMIT_MASK_BITS) { + _mi_warning_message("commit mask overflow: idx=%zu count=%zu start=%zx end=%zx p=0x%p size=%zu fullsize=%zu\n", bitidx, bitcount, start, end, p, size, *full_size); + } + mi_assert_internal((bitidx + bitcount) <= MI_COMMIT_MASK_BITS); + mi_commit_mask_create(bitidx, bitcount, cm); +} + +static bool mi_segment_commit(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { + mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); + + // commit liberal + uint8_t* start = NULL; + size_t full_size = 0; + mi_commit_mask_t mask; + mi_segment_commit_mask(segment, false /* conservative? */, p, size, &start, &full_size, &mask); + if (mi_commit_mask_is_empty(&mask) || full_size == 0) return true; + + if (!mi_commit_mask_all_set(&segment->commit_mask, &mask)) { + // committing + bool is_zero = false; + mi_commit_mask_t cmask; + mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); + _mi_stat_decrease(&_mi_stats_main.committed, _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for overlap + if (!_mi_os_commit(start, full_size, &is_zero, stats)) return false; + mi_commit_mask_set(&segment->commit_mask, &mask); + } + + // increase purge expiration when using part of delayed purges -- we assume more allocations are coming soon. + if (mi_commit_mask_any_set(&segment->purge_mask, &mask)) { + segment->purge_expire = _mi_clock_now() + mi_option_get(mi_option_purge_delay); + } + + // always clear any delayed purges in our range (as they are either committed now) + mi_commit_mask_clear(&segment->purge_mask, &mask); + return true; +} + +static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { + mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); + // note: assumes commit_mask is always full for huge segments as otherwise the commit mask bits can overflow + if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->purge_mask)) return true; // fully committed + mi_assert_internal(segment->kind != MI_SEGMENT_HUGE); + return mi_segment_commit(segment, p, size, stats); +} + +static bool mi_segment_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { + mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); + if (!segment->allow_purge) return true; + + // purge conservative + uint8_t* start = NULL; + size_t full_size = 0; + mi_commit_mask_t mask; + mi_segment_commit_mask(segment, true /* conservative? */, p, size, &start, &full_size, &mask); + if (mi_commit_mask_is_empty(&mask) || full_size==0) return true; + + if (mi_commit_mask_any_set(&segment->commit_mask, &mask)) { + // purging + mi_assert_internal((void*)start != (void*)segment); + mi_assert_internal(segment->allow_decommit); + const bool decommitted = _mi_os_purge(start, full_size, stats); // reset or decommit + if (decommitted) { + mi_commit_mask_t cmask; + mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); + _mi_stat_increase(&_mi_stats_main.committed, full_size - _mi_commit_mask_committed_size(&cmask, MI_SEGMENT_SIZE)); // adjust for double counting + mi_commit_mask_clear(&segment->commit_mask, &mask); + } + } + + // always clear any scheduled purges in our range + mi_commit_mask_clear(&segment->purge_mask, &mask); + return true; +} + +static void mi_segment_schedule_purge(mi_segment_t* segment, uint8_t* p, size_t size, mi_stats_t* stats) { + if (!segment->allow_purge) return; + + if (mi_option_get(mi_option_purge_delay) == 0) { + mi_segment_purge(segment, p, size, stats); + } + else { + // register for future purge in the purge mask + uint8_t* start = NULL; + size_t full_size = 0; + mi_commit_mask_t mask; + mi_segment_commit_mask(segment, true /*conservative*/, p, size, &start, &full_size, &mask); + if (mi_commit_mask_is_empty(&mask) || full_size==0) return; + + // update delayed commit + mi_assert_internal(segment->purge_expire > 0 || mi_commit_mask_is_empty(&segment->purge_mask)); + mi_commit_mask_t cmask; + mi_commit_mask_create_intersect(&segment->commit_mask, &mask, &cmask); // only purge what is committed; span_free may try to decommit more + mi_commit_mask_set(&segment->purge_mask, &cmask); + mi_msecs_t now = _mi_clock_now(); + if (segment->purge_expire == 0) { + // no previous purgess, initialize now + segment->purge_expire = now + mi_option_get(mi_option_purge_delay); + } + else if (segment->purge_expire <= now) { + // previous purge mask already expired + if (segment->purge_expire + mi_option_get(mi_option_purge_extend_delay) <= now) { + mi_segment_try_purge(segment, true, stats); + } + else { + segment->purge_expire = now + mi_option_get(mi_option_purge_extend_delay); // (mi_option_get(mi_option_purge_delay) / 8); // wait a tiny bit longer in case there is a series of free's + } + } + else { + // previous purge mask is not yet expired, increase the expiration by a bit. + segment->purge_expire += mi_option_get(mi_option_purge_extend_delay); + } + } +} + +static void mi_segment_try_purge(mi_segment_t* segment, bool force, mi_stats_t* stats) { + if (!segment->allow_purge || segment->purge_expire == 0 || mi_commit_mask_is_empty(&segment->purge_mask)) return; + mi_msecs_t now = _mi_clock_now(); + if (!force && now < segment->purge_expire) return; + + mi_commit_mask_t mask = segment->purge_mask; + segment->purge_expire = 0; + mi_commit_mask_create_empty(&segment->purge_mask); + + size_t idx; + size_t count; + mi_commit_mask_foreach(&mask, idx, count) { + // if found, decommit that sequence + if (count > 0) { + uint8_t* p = (uint8_t*)segment + (idx*MI_COMMIT_SIZE); + size_t size = count * MI_COMMIT_SIZE; + mi_segment_purge(segment, p, size, stats); + } + } + mi_commit_mask_foreach_end() + mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask)); +} + +// called from `mi_heap_collect_ex` +// this can be called per-page so it is important that try_purge has fast exit path +void _mi_segment_collect(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) { + mi_segment_try_purge(segment, force, tld->stats); +} + +/* ----------------------------------------------------------- + Span free +----------------------------------------------------------- */ + +static bool mi_segment_is_abandoned(mi_segment_t* segment) { + return (mi_atomic_load_relaxed(&segment->thread_id) == 0); +} + +// note: can be called on abandoned segments +static void mi_segment_span_free(mi_segment_t* segment, size_t slice_index, size_t slice_count, bool allow_purge, mi_segments_tld_t* tld) { + mi_assert_internal(slice_index < segment->slice_entries); + mi_span_queue_t* sq = (segment->kind == MI_SEGMENT_HUGE || mi_segment_is_abandoned(segment) + ? NULL : mi_span_queue_for(slice_count,tld)); + if (slice_count==0) slice_count = 1; + mi_assert_internal(slice_index + slice_count - 1 < segment->slice_entries); + + // set first and last slice (the intermediates can be undetermined) + mi_slice_t* slice = &segment->slices[slice_index]; + slice->slice_count = (uint32_t)slice_count; + mi_assert_internal(slice->slice_count == slice_count); // no overflow? + slice->slice_offset = 0; + if (slice_count > 1) { + mi_slice_t* last = slice + slice_count - 1; + mi_slice_t* end = (mi_slice_t*)mi_segment_slices_end(segment); + if (last > end) { last = end; } + last->slice_count = 0; + last->slice_offset = (uint32_t)(sizeof(mi_page_t)*(slice_count - 1)); + last->block_size = 0; + } + + // perhaps decommit + if (allow_purge) { + mi_segment_schedule_purge(segment, mi_slice_start(slice), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats); + } + + // and push it on the free page queue (if it was not a huge page) + if (sq != NULL) mi_span_queue_push( sq, slice ); + else slice->block_size = 0; // mark huge page as free anyways +} + +/* +// called from reclaim to add existing free spans +static void mi_segment_span_add_free(mi_slice_t* slice, mi_segments_tld_t* tld) { + mi_segment_t* segment = _mi_ptr_segment(slice); + mi_assert_internal(slice->xblock_size==0 && slice->slice_count>0 && slice->slice_offset==0); + size_t slice_index = mi_slice_index(slice); + mi_segment_span_free(segment,slice_index,slice->slice_count,tld); +} +*/ + +static void mi_segment_span_remove_from_queue(mi_slice_t* slice, mi_segments_tld_t* tld) { + mi_assert_internal(slice->slice_count > 0 && slice->slice_offset==0 && slice->block_size==0); + mi_assert_internal(_mi_ptr_segment(slice)->kind != MI_SEGMENT_HUGE); + mi_span_queue_t* sq = mi_span_queue_for(slice->slice_count, tld); + mi_span_queue_delete(sq, slice); +} + +// note: can be called on abandoned segments +static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_tld_t* tld) { + mi_assert_internal(slice != NULL && slice->slice_count > 0 && slice->slice_offset == 0); + mi_segment_t* const segment = _mi_ptr_segment(slice); + const bool is_abandoned = (segment->thread_id == 0); // mi_segment_is_abandoned(segment); + + // for huge pages, just mark as free but don't add to the queues + if (segment->kind == MI_SEGMENT_HUGE) { + // issue #691: segment->used can be 0 if the huge page block was freed while abandoned (reclaim will get here in that case) + mi_assert_internal((segment->used==0 && slice->block_size==0) || segment->used == 1); // decreased right after this call in `mi_segment_page_clear` + slice->block_size = 0; // mark as free anyways + // we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to + // avoid a possible cache miss (and the segment is about to be freed) + return slice; + } + + // otherwise coalesce the span and add to the free span queues + size_t slice_count = slice->slice_count; + mi_slice_t* next = slice + slice->slice_count; + mi_assert_internal(next <= mi_segment_slices_end(segment)); + if (next < mi_segment_slices_end(segment) && next->block_size==0) { + // free next block -- remove it from free and merge + mi_assert_internal(next->slice_count > 0 && next->slice_offset==0); + slice_count += next->slice_count; // extend + if (!is_abandoned) { mi_segment_span_remove_from_queue(next, tld); } + } + if (slice > segment->slices) { + mi_slice_t* prev = mi_slice_first(slice - 1); + mi_assert_internal(prev >= segment->slices); + if (prev->block_size==0) { + // free previous slice -- remove it from free and merge + mi_assert_internal(prev->slice_count > 0 && prev->slice_offset==0); + slice_count += prev->slice_count; + if (!is_abandoned) { mi_segment_span_remove_from_queue(prev, tld); } + slice = prev; + } + } + + // and add the new free page + mi_segment_span_free(segment, mi_slice_index(slice), slice_count, true, tld); + return slice; +} + + + +/* ----------------------------------------------------------- + Page allocation +----------------------------------------------------------- */ + +// Note: may still return NULL if committing the memory failed +static mi_page_t* mi_segment_span_allocate(mi_segment_t* segment, size_t slice_index, size_t slice_count, mi_segments_tld_t* tld) { + mi_assert_internal(slice_index < segment->slice_entries); + mi_slice_t* const slice = &segment->slices[slice_index]; + mi_assert_internal(slice->block_size==0 || slice->block_size==1); + + // commit before changing the slice data + if (!mi_segment_ensure_committed(segment, _mi_segment_page_start_from_slice(segment, slice, 0, NULL), slice_count * MI_SEGMENT_SLICE_SIZE, tld->stats)) { + return NULL; // commit failed! + } + + // convert the slices to a page + slice->slice_offset = 0; + slice->slice_count = (uint32_t)slice_count; + mi_assert_internal(slice->slice_count == slice_count); + const size_t bsize = slice_count * MI_SEGMENT_SLICE_SIZE; + slice->block_size = bsize; + mi_page_t* page = mi_slice_to_page(slice); + mi_assert_internal(mi_page_block_size(page) == bsize); + + // set slice back pointers for the first MI_MAX_SLICE_OFFSET_COUNT entries + size_t extra = slice_count-1; + if (extra > MI_MAX_SLICE_OFFSET_COUNT) extra = MI_MAX_SLICE_OFFSET_COUNT; + if (slice_index + extra >= segment->slice_entries) extra = segment->slice_entries - slice_index - 1; // huge objects may have more slices than avaiable entries in the segment->slices + + mi_slice_t* slice_next = slice + 1; + for (size_t i = 1; i <= extra; i++, slice_next++) { + slice_next->slice_offset = (uint32_t)(sizeof(mi_slice_t)*i); + slice_next->slice_count = 0; + slice_next->block_size = 1; + } + + // and also for the last one (if not set already) (the last one is needed for coalescing and for large alignments) + // note: the cast is needed for ubsan since the index can be larger than MI_SLICES_PER_SEGMENT for huge allocations (see #543) + mi_slice_t* last = slice + slice_count - 1; + mi_slice_t* end = (mi_slice_t*)mi_segment_slices_end(segment); + if (last > end) last = end; + if (last > slice) { + last->slice_offset = (uint32_t)(sizeof(mi_slice_t) * (last - slice)); + last->slice_count = 0; + last->block_size = 1; + } + + // and initialize the page + page->is_committed = true; + page->is_huge = (segment->kind == MI_SEGMENT_HUGE); + segment->used++; + return page; +} + +static void mi_segment_slice_split(mi_segment_t* segment, mi_slice_t* slice, size_t slice_count, mi_segments_tld_t* tld) { + mi_assert_internal(_mi_ptr_segment(slice) == segment); + mi_assert_internal(slice->slice_count >= slice_count); + mi_assert_internal(slice->block_size > 0); // no more in free queue + if (slice->slice_count <= slice_count) return; + mi_assert_internal(segment->kind != MI_SEGMENT_HUGE); + size_t next_index = mi_slice_index(slice) + slice_count; + size_t next_count = slice->slice_count - slice_count; + mi_segment_span_free(segment, next_index, next_count, false /* don't purge left-over part */, tld); + slice->slice_count = (uint32_t)slice_count; +} + +static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld) { + mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_LARGE_OBJ_SIZE_MAX); + // search from best fit up + mi_span_queue_t* sq = mi_span_queue_for(slice_count, tld); + if (slice_count == 0) slice_count = 1; + while (sq <= &tld->spans[MI_SEGMENT_BIN_MAX]) { + for (mi_slice_t* slice = sq->first; slice != NULL; slice = slice->next) { + if (slice->slice_count >= slice_count) { + // found one + mi_segment_t* segment = _mi_ptr_segment(slice); + if (_mi_arena_memid_is_suitable(segment->memid, req_arena_id)) { + // found a suitable page span + mi_span_queue_delete(sq, slice); + + if (slice->slice_count > slice_count) { + mi_segment_slice_split(segment, slice, slice_count, tld); + } + mi_assert_internal(slice != NULL && slice->slice_count == slice_count && slice->block_size > 0); + mi_page_t* page = mi_segment_span_allocate(segment, mi_slice_index(slice), slice->slice_count, tld); + if (page == NULL) { + // commit failed; return NULL but first restore the slice + mi_segment_span_free_coalesce(slice, tld); + return NULL; + } + return page; + } + } + } + sq++; + } + // could not find a page.. + return NULL; +} + + +/* ----------------------------------------------------------- + Segment allocation +----------------------------------------------------------- */ + +static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment, bool eager_delayed, mi_arena_id_t req_arena_id, + size_t* psegment_slices, size_t* pinfo_slices, + bool commit, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) + +{ + mi_memid_t memid; + bool allow_large = (!eager_delayed && (MI_SECURE == 0)); // only allow large OS pages once we are no longer lazy + size_t align_offset = 0; + size_t alignment = MI_SEGMENT_ALIGN; + + if (page_alignment > 0) { + // mi_assert_internal(huge_page != NULL); + mi_assert_internal(page_alignment >= MI_SEGMENT_ALIGN); + alignment = page_alignment; + const size_t info_size = (*pinfo_slices) * MI_SEGMENT_SLICE_SIZE; + align_offset = _mi_align_up( info_size, MI_SEGMENT_ALIGN ); + const size_t extra = align_offset - info_size; + // recalculate due to potential guard pages + *psegment_slices = mi_segment_calculate_slices(required + extra, pinfo_slices); + mi_assert_internal(*psegment_slices > 0 && *psegment_slices <= UINT32_MAX); + } + + const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE; + mi_segment_t* segment = (mi_segment_t*)_mi_arena_alloc_aligned(segment_size, alignment, align_offset, commit, allow_large, req_arena_id, &memid, os_tld); + if (segment == NULL) { + return NULL; // failed to allocate + } + + // ensure metadata part of the segment is committed + mi_commit_mask_t commit_mask; + if (memid.initially_committed) { + mi_commit_mask_create_full(&commit_mask); + } + else { + // at least commit the info slices + const size_t commit_needed = _mi_divide_up((*pinfo_slices)*MI_SEGMENT_SLICE_SIZE, MI_COMMIT_SIZE); + mi_assert_internal(commit_needed>0); + mi_commit_mask_create(0, commit_needed, &commit_mask); + mi_assert_internal(commit_needed*MI_COMMIT_SIZE >= (*pinfo_slices)*MI_SEGMENT_SLICE_SIZE); + if (!_mi_os_commit(segment, commit_needed*MI_COMMIT_SIZE, NULL, tld->stats)) { + _mi_arena_free(segment,segment_size,0,memid,tld->stats); + return NULL; + } + } + mi_assert_internal(segment != NULL && (uintptr_t)segment % MI_SEGMENT_SIZE == 0); + + segment->memid = memid; + segment->allow_decommit = !memid.is_pinned; + segment->allow_purge = segment->allow_decommit && (mi_option_get(mi_option_purge_delay) >= 0); + segment->segment_size = segment_size; + segment->commit_mask = commit_mask; + segment->purge_expire = 0; + mi_commit_mask_create_empty(&segment->purge_mask); + + mi_segments_track_size((long)(segment_size), tld); + _mi_segment_map_allocated_at(segment); + return segment; +} + + +// Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` . +static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) +{ + mi_assert_internal((required==0 && huge_page==NULL) || (required>0 && huge_page != NULL)); + + // calculate needed sizes first + size_t info_slices; + size_t segment_slices = mi_segment_calculate_slices(required, &info_slices); + mi_assert_internal(segment_slices > 0 && segment_slices <= UINT32_MAX); + + // Commit eagerly only if not the first N lazy segments (to reduce impact of many threads that allocate just a little) + const bool eager_delay = (// !_mi_os_has_overcommit() && // never delay on overcommit systems + _mi_current_thread_count() > 1 && // do not delay for the first N threads + tld->count < (size_t)mi_option_get(mi_option_eager_commit_delay)); + const bool eager = !eager_delay && mi_option_is_enabled(mi_option_eager_commit); + bool commit = eager || (required > 0); + + // Allocate the segment from the OS + mi_segment_t* segment = mi_segment_os_alloc(required, page_alignment, eager_delay, req_arena_id, + &segment_slices, &info_slices, commit, tld, os_tld); + if (segment == NULL) return NULL; + + // zero the segment info? -- not always needed as it may be zero initialized from the OS + if (!segment->memid.initially_zero) { + ptrdiff_t ofs = offsetof(mi_segment_t, next); + size_t prefix = offsetof(mi_segment_t, slices) - ofs; + size_t zsize = prefix + (sizeof(mi_slice_t) * (segment_slices + 1)); // one more + _mi_memzero((uint8_t*)segment + ofs, zsize); + } + + // initialize the rest of the segment info + const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices); + segment->segment_slices = segment_slices; + segment->segment_info_slices = info_slices; + segment->thread_id = _mi_thread_id(); + segment->cookie = _mi_ptr_cookie(segment); + segment->slice_entries = slice_entries; + segment->kind = (required == 0 ? MI_SEGMENT_NORMAL : MI_SEGMENT_HUGE); + + // _mi_memzero(segment->slices, sizeof(mi_slice_t)*(info_slices+1)); + _mi_stat_increase(&tld->stats->page_committed, mi_segment_info_size(segment)); + + // set up guard pages + size_t guard_slices = 0; + if (MI_SECURE>0) { + // in secure mode, we set up a protected page in between the segment info + // and the page data, and at the end of the segment. + size_t os_pagesize = _mi_os_page_size(); + _mi_os_protect((uint8_t*)segment + mi_segment_info_size(segment) - os_pagesize, os_pagesize); + uint8_t* end = (uint8_t*)segment + mi_segment_size(segment) - os_pagesize; + mi_segment_ensure_committed(segment, end, os_pagesize, tld->stats); + _mi_os_protect(end, os_pagesize); + if (slice_entries == segment_slices) segment->slice_entries--; // don't use the last slice :-( + guard_slices = 1; + } + + // reserve first slices for segment info + mi_page_t* page0 = mi_segment_span_allocate(segment, 0, info_slices, tld); + mi_assert_internal(page0!=NULL); if (page0==NULL) return NULL; // cannot fail as we always commit in advance + mi_assert_internal(segment->used == 1); + segment->used = 0; // don't count our internal slices towards usage + + // initialize initial free pages + if (segment->kind == MI_SEGMENT_NORMAL) { // not a huge page + mi_assert_internal(huge_page==NULL); + mi_segment_span_free(segment, info_slices, segment->slice_entries - info_slices, false /* don't purge */, tld); + } + else { + mi_assert_internal(huge_page!=NULL); + mi_assert_internal(mi_commit_mask_is_empty(&segment->purge_mask)); + mi_assert_internal(mi_commit_mask_is_full(&segment->commit_mask)); + *huge_page = mi_segment_span_allocate(segment, info_slices, segment_slices - info_slices - guard_slices, tld); + mi_assert_internal(*huge_page != NULL); // cannot fail as we commit in advance + } + + mi_assert_expensive(mi_segment_is_valid(segment,tld)); + return segment; +} + + +static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t* tld) { + MI_UNUSED(force); + mi_assert_internal(segment != NULL); + mi_assert_internal(segment->next == NULL); + mi_assert_internal(segment->used == 0); + + // Remove the free pages + mi_slice_t* slice = &segment->slices[0]; + const mi_slice_t* end = mi_segment_slices_end(segment); + #if MI_DEBUG>1 + size_t page_count = 0; + #endif + while (slice < end) { + mi_assert_internal(slice->slice_count > 0); + mi_assert_internal(slice->slice_offset == 0); + mi_assert_internal(mi_slice_index(slice)==0 || slice->block_size == 0); // no more used pages .. + if (slice->block_size == 0 && segment->kind != MI_SEGMENT_HUGE) { + mi_segment_span_remove_from_queue(slice, tld); + } + #if MI_DEBUG>1 + page_count++; + #endif + slice = slice + slice->slice_count; + } + mi_assert_internal(page_count == 2); // first page is allocated by the segment itself + + // stats + _mi_stat_decrease(&tld->stats->page_committed, mi_segment_info_size(segment)); + + // return it to the OS + mi_segment_os_free(segment, tld); +} + + +/* ----------------------------------------------------------- + Page Free +----------------------------------------------------------- */ + +static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld); + +// note: can be called on abandoned pages +static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld) { + mi_assert_internal(page->block_size > 0); + mi_assert_internal(mi_page_all_free(page)); + mi_segment_t* segment = _mi_ptr_segment(page); + mi_assert_internal(segment->used > 0); + + size_t inuse = page->capacity * mi_page_block_size(page); + _mi_stat_decrease(&tld->stats->page_committed, inuse); + _mi_stat_decrease(&tld->stats->pages, 1); + + // reset the page memory to reduce memory pressure? + if (segment->allow_decommit && mi_option_is_enabled(mi_option_deprecated_page_reset)) { + size_t psize; + uint8_t* start = _mi_segment_page_start(segment, page, &psize); + _mi_os_reset(start, psize, tld->stats); + } + + // zero the page data, but not the segment fields and heap tag + page->is_zero_init = false; + uint8_t heap_tag = page->heap_tag; + ptrdiff_t ofs = offsetof(mi_page_t, capacity); + _mi_memzero((uint8_t*)page + ofs, sizeof(*page) - ofs); + page->block_size = 1; + page->heap_tag = heap_tag; + + // and free it + mi_slice_t* slice = mi_segment_span_free_coalesce(mi_page_to_slice(page), tld); + segment->used--; + // cannot assert segment valid as it is called during reclaim + // mi_assert_expensive(mi_segment_is_valid(segment, tld)); + return slice; +} + +void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld) +{ + mi_assert(page != NULL); + + mi_segment_t* segment = _mi_page_segment(page); + mi_assert_expensive(mi_segment_is_valid(segment,tld)); + + // mark it as free now + mi_segment_page_clear(page, tld); + mi_assert_expensive(mi_segment_is_valid(segment, tld)); + + if (segment->used == 0) { + // no more used pages; remove from the free list and free the segment + mi_segment_free(segment, force, tld); + } + else if (segment->used == segment->abandoned) { + // only abandoned pages; remove from free list and abandon + mi_segment_abandon(segment,tld); + } + else { + // perform delayed purges + mi_segment_try_purge(segment, false /* force? */, tld->stats); + } +} + + +/* ----------------------------------------------------------- +Abandonment + +When threads terminate, they can leave segments with +live blocks (reachable through other threads). Such segments +are "abandoned" and will be reclaimed by other threads to +reuse their pages and/or free them eventually. The +`thread_id` of such segments is 0. + +When a block is freed in an abandoned segment, the segment +is reclaimed into that thread. + +Moreover, if threads are looking for a fresh segment, they +will first consider abondoned segments -- these can be found +by scanning the arena memory +(segments outside arena memoryare only reclaimed by a free). +----------------------------------------------------------- */ + +// legacy: Wait until there are no more pending reads on segments that used to be in the abandoned list +void _mi_abandoned_await_readers(void) { + // nothing needed +} + +/* ----------------------------------------------------------- + Abandon segment/page +----------------------------------------------------------- */ + +static void mi_segment_abandon(mi_segment_t* segment, mi_segments_tld_t* tld) { + mi_assert_internal(segment->used == segment->abandoned); + mi_assert_internal(segment->used > 0); + mi_assert_internal(segment->abandoned_visits == 0); + mi_assert_expensive(mi_segment_is_valid(segment,tld)); + + // remove the free pages from the free page queues + mi_slice_t* slice = &segment->slices[0]; + const mi_slice_t* end = mi_segment_slices_end(segment); + while (slice < end) { + mi_assert_internal(slice->slice_count > 0); + mi_assert_internal(slice->slice_offset == 0); + if (slice->block_size == 0) { // a free page + mi_segment_span_remove_from_queue(slice,tld); + slice->block_size = 0; // but keep it free + } + slice = slice + slice->slice_count; + } + + // perform delayed decommits (forcing is much slower on mstress) + // Only abandoned segments in arena memory can be reclaimed without a free + // so if a segment is not from an arena we force purge here to be conservative. + const bool force_purge = (segment->memid.memkind != MI_MEM_ARENA) || mi_option_is_enabled(mi_option_abandoned_page_purge); + mi_segment_try_purge(segment, force_purge, tld->stats); + + // all pages in the segment are abandoned; add it to the abandoned list + _mi_stat_increase(&tld->stats->segments_abandoned, 1); + mi_segments_track_size(-((long)mi_segment_size(segment)), tld); + segment->thread_id = 0; + segment->abandoned_visits = 1; // from 0 to 1 to signify it is abandoned + if (segment->was_reclaimed) { + tld->reclaim_count--; + segment->was_reclaimed = false; + } + _mi_arena_segment_mark_abandoned(segment); +} + +void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld) { + mi_assert(page != NULL); + mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); + mi_assert_internal(mi_page_heap(page) == NULL); + mi_segment_t* segment = _mi_page_segment(page); + + mi_assert_expensive(mi_segment_is_valid(segment,tld)); + segment->abandoned++; + + _mi_stat_increase(&tld->stats->pages_abandoned, 1); + mi_assert_internal(segment->abandoned <= segment->used); + if (segment->used == segment->abandoned) { + // all pages are abandoned, abandon the entire segment + mi_segment_abandon(segment, tld); + } +} + +/* ----------------------------------------------------------- + Reclaim abandoned pages +----------------------------------------------------------- */ + +static mi_slice_t* mi_slices_start_iterate(mi_segment_t* segment, const mi_slice_t** end) { + mi_slice_t* slice = &segment->slices[0]; + *end = mi_segment_slices_end(segment); + mi_assert_internal(slice->slice_count>0 && slice->block_size>0); // segment allocated page + slice = slice + slice->slice_count; // skip the first segment allocated page + return slice; +} + +// Possibly free pages and check if free space is available +static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, size_t block_size, mi_segments_tld_t* tld) +{ + mi_assert_internal(mi_segment_is_abandoned(segment)); + bool has_page = false; + + // for all slices + const mi_slice_t* end; + mi_slice_t* slice = mi_slices_start_iterate(segment, &end); + while (slice < end) { + mi_assert_internal(slice->slice_count > 0); + mi_assert_internal(slice->slice_offset == 0); + if (mi_slice_is_used(slice)) { // used page + // ensure used count is up to date and collect potential concurrent frees + mi_page_t* const page = mi_slice_to_page(slice); + _mi_page_free_collect(page, false); + if (mi_page_all_free(page)) { + // if this page is all free now, free it without adding to any queues (yet) + mi_assert_internal(page->next == NULL && page->prev==NULL); + _mi_stat_decrease(&tld->stats->pages_abandoned, 1); + segment->abandoned--; + slice = mi_segment_page_clear(page, tld); // re-assign slice due to coalesce! + mi_assert_internal(!mi_slice_is_used(slice)); + if (slice->slice_count >= slices_needed) { + has_page = true; + } + } + else if (mi_page_block_size(page) == block_size && mi_page_has_any_available(page)) { + // a page has available free blocks of the right size + has_page = true; + } + } + else { + // empty span + if (slice->slice_count >= slices_needed) { + has_page = true; + } + } + slice = slice + slice->slice_count; + } + return has_page; +} + +// Reclaim an abandoned segment; returns NULL if the segment was freed +// set `right_page_reclaimed` to `true` if it reclaimed a page of the right `block_size` that was not full. +static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap, size_t requested_block_size, bool* right_page_reclaimed, mi_segments_tld_t* tld) { + if (right_page_reclaimed != NULL) { *right_page_reclaimed = false; } + // can be 0 still with abandoned_next, or already a thread id for segments outside an arena that are reclaimed on a free. + mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id) == 0 || mi_atomic_load_relaxed(&segment->thread_id) == _mi_thread_id()); + mi_atomic_store_release(&segment->thread_id, _mi_thread_id()); + segment->abandoned_visits = 0; + segment->was_reclaimed = true; + tld->reclaim_count++; + mi_segments_track_size((long)mi_segment_size(segment), tld); + mi_assert_internal(segment->next == NULL); + _mi_stat_decrease(&tld->stats->segments_abandoned, 1); + + // for all slices + const mi_slice_t* end; + mi_slice_t* slice = mi_slices_start_iterate(segment, &end); + while (slice < end) { + mi_assert_internal(slice->slice_count > 0); + mi_assert_internal(slice->slice_offset == 0); + if (mi_slice_is_used(slice)) { + // in use: reclaim the page in our heap + mi_page_t* page = mi_slice_to_page(slice); + mi_assert_internal(page->is_committed); + mi_assert_internal(mi_page_thread_free_flag(page)==MI_NEVER_DELAYED_FREE); + mi_assert_internal(mi_page_heap(page) == NULL); + mi_assert_internal(page->next == NULL && page->prev==NULL); + _mi_stat_decrease(&tld->stats->pages_abandoned, 1); + segment->abandoned--; + // set the heap again and allow heap thread delayed free again. + mi_heap_t* target_heap = _mi_heap_by_tag(heap, page->heap_tag); // allow custom heaps to separate objects + if (target_heap == NULL) { + target_heap = heap; + _mi_error_message(EINVAL, "page with tag %u cannot be reclaimed by a heap with the same tag (using %u instead)\n", page->heap_tag, heap->tag ); + } + mi_page_set_heap(page, target_heap); + _mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set) + _mi_page_free_collect(page, false); // ensure used count is up to date + if (mi_page_all_free(page)) { + // if everything free by now, free the page + slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing + } + else { + // otherwise reclaim it into the heap + _mi_page_reclaim(target_heap, page); + if (requested_block_size == mi_page_block_size(page) && mi_page_has_any_available(page) && heap == target_heap) { + if (right_page_reclaimed != NULL) { *right_page_reclaimed = true; } + } + } + } + else { + // the span is free, add it to our page queues + slice = mi_segment_span_free_coalesce(slice, tld); // set slice again due to coalesceing + } + mi_assert_internal(slice->slice_count>0 && slice->slice_offset==0); + slice = slice + slice->slice_count; + } + + mi_assert(segment->abandoned == 0); + mi_assert_expensive(mi_segment_is_valid(segment, tld)); + if (segment->used == 0) { // due to page_clear + mi_assert_internal(right_page_reclaimed == NULL || !(*right_page_reclaimed)); + mi_segment_free(segment, false, tld); + return NULL; + } + else { + return segment; + } +} + +// attempt to reclaim a particular segment (called from multi threaded free `alloc.c:mi_free_block_mt`) +bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment) { + if (mi_atomic_load_relaxed(&segment->thread_id) != 0) return false; // it is not abandoned + // don't reclaim more from a free than half the current segments + // this is to prevent a pure free-ing thread to start owning too many segments + if (heap->tld->segments.reclaim_count * 2 > heap->tld->segments.count) return false; + if (_mi_arena_segment_clear_abandoned(segment)) { // atomically unabandon + mi_segment_t* res = mi_segment_reclaim(segment, heap, 0, NULL, &heap->tld->segments); + mi_assert_internal(res == segment); + return (res != NULL); + } + return false; +} + +void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld) { + mi_segment_t* segment; + mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, ¤t); + while ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL) { + mi_segment_reclaim(segment, heap, 0, NULL, tld); + } +} + +static long mi_segment_get_reclaim_tries(void) { + // limit the tries to 10% (default) of the abandoned segments with at least 8 and at most 1024 tries. + const size_t perc = (size_t)mi_option_get_clamp(mi_option_max_segment_reclaim, 0, 100); + if (perc <= 0) return 0; + const size_t total_count = _mi_arena_segment_abandoned_count(); + if (total_count == 0) return 0; + const size_t relative_count = (total_count > 10000 ? (total_count / 100) * perc : (total_count * perc) / 100); // avoid overflow + long max_tries = (long)(relative_count <= 1 ? 1 : (relative_count > 1024 ? 1024 : relative_count)); + if (max_tries < 8 && total_count > 8) { max_tries = 8; } + return max_tries; +} + +static mi_segment_t* mi_segment_try_reclaim(mi_heap_t* heap, size_t needed_slices, size_t block_size, bool* reclaimed, mi_segments_tld_t* tld) +{ + *reclaimed = false; + long max_tries = mi_segment_get_reclaim_tries(); + if (max_tries <= 0) return NULL; + + mi_segment_t* segment; + mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, ¤t); + while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL)) + { + segment->abandoned_visits++; + // todo: should we respect numa affinity for abondoned reclaim? perhaps only for the first visit? + // todo: an arena exclusive heap will potentially visit many abandoned unsuitable segments and use many tries + // Perhaps we can skip non-suitable ones in a better way? + bool is_suitable = _mi_heap_memid_is_suitable(heap, segment->memid); + bool has_page = mi_segment_check_free(segment,needed_slices,block_size,tld); // try to free up pages (due to concurrent frees) + if (segment->used == 0) { + // free the segment (by forced reclaim) to make it available to other threads. + // note1: we prefer to free a segment as that might lead to reclaiming another + // segment that is still partially used. + // note2: we could in principle optimize this by skipping reclaim and directly + // freeing but that would violate some invariants temporarily) + mi_segment_reclaim(segment, heap, 0, NULL, tld); + } + else if (has_page && is_suitable) { + // found a large enough free span, or a page of the right block_size with free space + // we return the result of reclaim (which is usually `segment`) as it might free + // the segment due to concurrent frees (in which case `NULL` is returned). + return mi_segment_reclaim(segment, heap, block_size, reclaimed, tld); + } + else if (segment->abandoned_visits > 3 && is_suitable) { + // always reclaim on 3rd visit to limit the abandoned queue length. + mi_segment_reclaim(segment, heap, 0, NULL, tld); + } + else { + // otherwise, push on the visited list so it gets not looked at too quickly again + mi_segment_try_purge(segment, false /* true force? */, tld->stats); // force purge if needed as we may not visit soon again + _mi_arena_segment_mark_abandoned(segment); + } + } + return NULL; +} + + +void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld) +{ + mi_segment_t* segment; + mi_arena_field_cursor_t current; _mi_arena_field_cursor_init(heap, ¤t); + long max_tries = (force ? (long)_mi_arena_segment_abandoned_count() : 1024); // limit latency + while ((max_tries-- > 0) && ((segment = _mi_arena_segment_clear_abandoned_next(¤t)) != NULL)) { + mi_segment_check_free(segment,0,0,tld); // try to free up pages (due to concurrent frees) + if (segment->used == 0) { + // free the segment (by forced reclaim) to make it available to other threads. + // note: we could in principle optimize this by skipping reclaim and directly + // freeing but that would violate some invariants temporarily) + mi_segment_reclaim(segment, heap, 0, NULL, tld); + } + else { + // otherwise, purge if needed and push on the visited list + // note: forced purge can be expensive if many threads are destroyed/created as in mstress. + mi_segment_try_purge(segment, force, tld->stats); + _mi_arena_segment_mark_abandoned(segment); + } + } +} + +/* ----------------------------------------------------------- + Reclaim or allocate +----------------------------------------------------------- */ + +static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) +{ + mi_assert_internal(block_size <= MI_LARGE_OBJ_SIZE_MAX); + + // 1. try to reclaim an abandoned segment + bool reclaimed; + mi_segment_t* segment = mi_segment_try_reclaim(heap, needed_slices, block_size, &reclaimed, tld); + if (reclaimed) { + // reclaimed the right page right into the heap + mi_assert_internal(segment != NULL); + return NULL; // pretend out-of-memory as the page will be in the page queue of the heap with available blocks + } + else if (segment != NULL) { + // reclaimed a segment with a large enough empty span in it + return segment; + } + // 2. otherwise allocate a fresh segment + return mi_segment_alloc(0, 0, heap->arena_id, tld, os_tld, NULL); +} + + +/* ----------------------------------------------------------- + Page allocation +----------------------------------------------------------- */ + +static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_kind, size_t required, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) +{ + mi_assert_internal(required <= MI_LARGE_OBJ_SIZE_MAX && page_kind <= MI_PAGE_LARGE); + + // find a free page + size_t page_size = _mi_align_up(required, (required > MI_MEDIUM_PAGE_SIZE ? MI_MEDIUM_PAGE_SIZE : MI_SEGMENT_SLICE_SIZE)); + size_t slices_needed = page_size / MI_SEGMENT_SLICE_SIZE; + mi_assert_internal(slices_needed * MI_SEGMENT_SLICE_SIZE == page_size); + mi_page_t* page = mi_segments_page_find_and_allocate(slices_needed, heap->arena_id, tld); //(required <= MI_SMALL_SIZE_MAX ? 0 : slices_needed), tld); + if (page==NULL) { + // no free page, allocate a new segment and try again + if (mi_segment_reclaim_or_alloc(heap, slices_needed, block_size, tld, os_tld) == NULL) { + // OOM or reclaimed a good page in the heap + return NULL; + } + else { + // otherwise try again + return mi_segments_page_alloc(heap, page_kind, required, block_size, tld, os_tld); + } + } + mi_assert_internal(page != NULL && page->slice_count*MI_SEGMENT_SLICE_SIZE == page_size); + mi_assert_internal(_mi_ptr_segment(page)->thread_id == _mi_thread_id()); + mi_segment_try_purge(_mi_ptr_segment(page), false, tld->stats); + return page; +} + + + +/* ----------------------------------------------------------- + Huge page allocation +----------------------------------------------------------- */ + +static mi_page_t* mi_segment_huge_page_alloc(size_t size, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) +{ + mi_page_t* page = NULL; + mi_segment_t* segment = mi_segment_alloc(size,page_alignment,req_arena_id,tld,os_tld,&page); + if (segment == NULL || page==NULL) return NULL; + mi_assert_internal(segment->used==1); + mi_assert_internal(mi_page_block_size(page) >= size); + #if MI_HUGE_PAGE_ABANDON + segment->thread_id = 0; // huge segments are immediately abandoned + #endif + + // for huge pages we initialize the block_size as we may + // overallocate to accommodate large alignments. + size_t psize; + uint8_t* start = _mi_segment_page_start(segment, page, &psize); + page->block_size = psize; + mi_assert_internal(page->is_huge); + + // decommit the part of the prefix of a page that will not be used; this can be quite large (close to MI_SEGMENT_SIZE) + if (page_alignment > 0 && segment->allow_decommit) { + uint8_t* aligned_p = (uint8_t*)_mi_align_up((uintptr_t)start, page_alignment); + mi_assert_internal(_mi_is_aligned(aligned_p, page_alignment)); + mi_assert_internal(psize - (aligned_p - start) >= size); + uint8_t* decommit_start = start + sizeof(mi_block_t); // for the free list + ptrdiff_t decommit_size = aligned_p - decommit_start; + _mi_os_reset(decommit_start, decommit_size, &_mi_stats_main); // note: cannot use segment_decommit on huge segments + } + + return page; +} + +#if MI_HUGE_PAGE_ABANDON +// free huge block from another thread +void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) { + // huge page segments are always abandoned and can be freed immediately by any thread + mi_assert_internal(segment->kind==MI_SEGMENT_HUGE); + mi_assert_internal(segment == _mi_page_segment(page)); + mi_assert_internal(mi_atomic_load_relaxed(&segment->thread_id)==0); + + // claim it and free + mi_heap_t* heap = mi_heap_get_default(); // issue #221; don't use the internal get_default_heap as we need to ensure the thread is initialized. + // paranoia: if this it the last reference, the cas should always succeed + size_t expected_tid = 0; + if (mi_atomic_cas_strong_acq_rel(&segment->thread_id, &expected_tid, heap->thread_id)) { + mi_block_set_next(page, block, page->free); + page->free = block; + page->used--; + page->is_zero_init = false; + mi_assert(page->used == 0); + mi_tld_t* tld = heap->tld; + _mi_segment_page_free(page, true, &tld->segments); + } +#if (MI_DEBUG!=0) + else { + mi_assert_internal(false); + } +#endif +} + +#else +// reset memory of a huge block from another thread +void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block) { + MI_UNUSED(page); + mi_assert_internal(segment->kind == MI_SEGMENT_HUGE); + mi_assert_internal(segment == _mi_page_segment(page)); + mi_assert_internal(page->used == 1); // this is called just before the free + mi_assert_internal(page->free == NULL); + if (segment->allow_decommit) { + size_t csize = mi_usable_size(block); + if (csize > sizeof(mi_block_t)) { + csize = csize - sizeof(mi_block_t); + uint8_t* p = (uint8_t*)block + sizeof(mi_block_t); + _mi_os_reset(p, csize, &_mi_stats_main); // note: cannot use segment_decommit on huge segments + } + } +} +#endif + +/* ----------------------------------------------------------- + Page allocation and free +----------------------------------------------------------- */ +mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { + mi_page_t* page; + if mi_unlikely(page_alignment > MI_BLOCK_ALIGNMENT_MAX) { + mi_assert_internal(_mi_is_power_of_two(page_alignment)); + mi_assert_internal(page_alignment >= MI_SEGMENT_SIZE); + if (page_alignment < MI_SEGMENT_SIZE) { page_alignment = MI_SEGMENT_SIZE; } + page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld); + } + else if (block_size <= MI_SMALL_OBJ_SIZE_MAX) { + page = mi_segments_page_alloc(heap,MI_PAGE_SMALL,block_size,block_size,tld,os_tld); + } + else if (block_size <= MI_MEDIUM_OBJ_SIZE_MAX) { + page = mi_segments_page_alloc(heap,MI_PAGE_MEDIUM,MI_MEDIUM_PAGE_SIZE,block_size,tld, os_tld); + } + else if (block_size <= MI_LARGE_OBJ_SIZE_MAX) { + page = mi_segments_page_alloc(heap,MI_PAGE_LARGE,block_size,block_size,tld, os_tld); + } + else { + page = mi_segment_huge_page_alloc(block_size,page_alignment,heap->arena_id,tld,os_tld); + } + mi_assert_internal(page == NULL || _mi_heap_memid_is_suitable(heap, _mi_page_segment(page)->memid)); + mi_assert_expensive(page == NULL || mi_segment_is_valid(_mi_page_segment(page),tld)); + return page; +} + + diff --git a/ww/managers/mimalloc/src/static.c b/ww/managers/mimalloc/src/static.c new file mode 100644 index 00000000..bf025eb7 --- /dev/null +++ b/ww/managers/mimalloc/src/static.c @@ -0,0 +1,41 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#ifndef _DEFAULT_SOURCE +#define _DEFAULT_SOURCE +#endif +#if defined(__sun) +// same remarks as os.c for the static's context. +#undef _XOPEN_SOURCE +#undef _POSIX_C_SOURCE +#endif + +#include "mimalloc.h" +#include "mimalloc/internal.h" + +// For a static override we create a single object file +// containing the whole library. If it is linked first +// it will override all the standard library allocation +// functions (on Unix's). +#include "alloc.c" // includes alloc-override.c +#include "alloc-aligned.c" +#include "alloc-posix.c" +#include "arena.c" +#include "bitmap.c" +#include "heap.c" +#include "init.c" +#include "libc.c" +#include "options.c" +#include "os.c" +#include "page.c" // includes page-queue.c +#include "random.c" +#include "segment.c" +#include "segment-map.c" +#include "stats.c" +#include "prim/prim.c" +#if MI_OSX_ZONE +#include "prim/osx/alloc-override-zone.c" +#endif diff --git a/ww/managers/mimalloc/src/stats.c b/ww/managers/mimalloc/src/stats.c new file mode 100644 index 00000000..a9364027 --- /dev/null +++ b/ww/managers/mimalloc/src/stats.c @@ -0,0 +1,467 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2021, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/internal.h" +#include "mimalloc/atomic.h" +#include "mimalloc/prim.h" + +#include // memset + +#if defined(_MSC_VER) && (_MSC_VER < 1920) +#pragma warning(disable:4204) // non-constant aggregate initializer +#endif + +/* ----------------------------------------------------------- + Statistics operations +----------------------------------------------------------- */ + +static bool mi_is_in_main(void* stat) { + return ((uint8_t*)stat >= (uint8_t*)&_mi_stats_main + && (uint8_t*)stat < ((uint8_t*)&_mi_stats_main + sizeof(mi_stats_t))); +} + +static void mi_stat_update(mi_stat_count_t* stat, int64_t amount) { + if (amount == 0) return; + if (mi_is_in_main(stat)) + { + // add atomically (for abandoned pages) + int64_t current = mi_atomic_addi64_relaxed(&stat->current, amount); + mi_atomic_maxi64_relaxed(&stat->peak, current + amount); + if (amount > 0) { + mi_atomic_addi64_relaxed(&stat->allocated,amount); + } + else { + mi_atomic_addi64_relaxed(&stat->freed, -amount); + } + } + else { + // add thread local + stat->current += amount; + if (stat->current > stat->peak) stat->peak = stat->current; + if (amount > 0) { + stat->allocated += amount; + } + else { + stat->freed += -amount; + } + } +} + +void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount) { + if (mi_is_in_main(stat)) { + mi_atomic_addi64_relaxed( &stat->count, 1 ); + mi_atomic_addi64_relaxed( &stat->total, (int64_t)amount ); + } + else { + stat->count++; + stat->total += amount; + } +} + +void _mi_stat_increase(mi_stat_count_t* stat, size_t amount) { + mi_stat_update(stat, (int64_t)amount); +} + +void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount) { + mi_stat_update(stat, -((int64_t)amount)); +} + +// must be thread safe as it is called from stats_merge +static void mi_stat_add(mi_stat_count_t* stat, const mi_stat_count_t* src, int64_t unit) { + if (stat==src) return; + if (src->allocated==0 && src->freed==0) return; + mi_atomic_addi64_relaxed( &stat->allocated, src->allocated * unit); + mi_atomic_addi64_relaxed( &stat->current, src->current * unit); + mi_atomic_addi64_relaxed( &stat->freed, src->freed * unit); + // peak scores do not work across threads.. + mi_atomic_addi64_relaxed( &stat->peak, src->peak * unit); +} + +static void mi_stat_counter_add(mi_stat_counter_t* stat, const mi_stat_counter_t* src, int64_t unit) { + if (stat==src) return; + mi_atomic_addi64_relaxed( &stat->total, src->total * unit); + mi_atomic_addi64_relaxed( &stat->count, src->count * unit); +} + +// must be thread safe as it is called from stats_merge +static void mi_stats_add(mi_stats_t* stats, const mi_stats_t* src) { + if (stats==src) return; + mi_stat_add(&stats->segments, &src->segments,1); + mi_stat_add(&stats->pages, &src->pages,1); + mi_stat_add(&stats->reserved, &src->reserved, 1); + mi_stat_add(&stats->committed, &src->committed, 1); + mi_stat_add(&stats->reset, &src->reset, 1); + mi_stat_add(&stats->purged, &src->purged, 1); + mi_stat_add(&stats->page_committed, &src->page_committed, 1); + + mi_stat_add(&stats->pages_abandoned, &src->pages_abandoned, 1); + mi_stat_add(&stats->segments_abandoned, &src->segments_abandoned, 1); + mi_stat_add(&stats->threads, &src->threads, 1); + + mi_stat_add(&stats->malloc, &src->malloc, 1); + mi_stat_add(&stats->segments_cache, &src->segments_cache, 1); + mi_stat_add(&stats->normal, &src->normal, 1); + mi_stat_add(&stats->huge, &src->huge, 1); + mi_stat_add(&stats->large, &src->large, 1); + + mi_stat_counter_add(&stats->pages_extended, &src->pages_extended, 1); + mi_stat_counter_add(&stats->mmap_calls, &src->mmap_calls, 1); + mi_stat_counter_add(&stats->commit_calls, &src->commit_calls, 1); + mi_stat_counter_add(&stats->reset_calls, &src->reset_calls, 1); + mi_stat_counter_add(&stats->purge_calls, &src->purge_calls, 1); + + mi_stat_counter_add(&stats->page_no_retire, &src->page_no_retire, 1); + mi_stat_counter_add(&stats->searches, &src->searches, 1); + mi_stat_counter_add(&stats->normal_count, &src->normal_count, 1); + mi_stat_counter_add(&stats->huge_count, &src->huge_count, 1); + mi_stat_counter_add(&stats->large_count, &src->large_count, 1); +#if MI_STAT>1 + for (size_t i = 0; i <= MI_BIN_HUGE; i++) { + if (src->normal_bins[i].allocated > 0 || src->normal_bins[i].freed > 0) { + mi_stat_add(&stats->normal_bins[i], &src->normal_bins[i], 1); + } + } +#endif +} + +/* ----------------------------------------------------------- + Display statistics +----------------------------------------------------------- */ + +// unit > 0 : size in binary bytes +// unit == 0: count as decimal +// unit < 0 : count in binary +static void mi_printf_amount(int64_t n, int64_t unit, mi_output_fun* out, void* arg, const char* fmt) { + char buf[32]; buf[0] = 0; + int len = 32; + const char* suffix = (unit <= 0 ? " " : "B"); + const int64_t base = (unit == 0 ? 1000 : 1024); + if (unit>0) n *= unit; + + const int64_t pos = (n < 0 ? -n : n); + if (pos < base) { + if (n!=1 || suffix[0] != 'B') { // skip printing 1 B for the unit column + _mi_snprintf(buf, len, "%lld %-3s", (long long)n, (n==0 ? "" : suffix)); + } + } + else { + int64_t divider = base; + const char* magnitude = "K"; + if (pos >= divider*base) { divider *= base; magnitude = "M"; } + if (pos >= divider*base) { divider *= base; magnitude = "G"; } + const int64_t tens = (n / (divider/10)); + const long whole = (long)(tens/10); + const long frac1 = (long)(tens%10); + char unitdesc[8]; + _mi_snprintf(unitdesc, 8, "%s%s%s", magnitude, (base==1024 ? "i" : ""), suffix); + _mi_snprintf(buf, len, "%ld.%ld %-3s", whole, (frac1 < 0 ? -frac1 : frac1), unitdesc); + } + _mi_fprintf(out, arg, (fmt==NULL ? "%12s" : fmt), buf); +} + + +static void mi_print_amount(int64_t n, int64_t unit, mi_output_fun* out, void* arg) { + mi_printf_amount(n,unit,out,arg,NULL); +} + +static void mi_print_count(int64_t n, int64_t unit, mi_output_fun* out, void* arg) { + if (unit==1) _mi_fprintf(out, arg, "%12s"," "); + else mi_print_amount(n,0,out,arg); +} + +static void mi_stat_print_ex(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg, const char* notok ) { + _mi_fprintf(out, arg,"%10s:", msg); + if (unit != 0) { + if (unit > 0) { + mi_print_amount(stat->peak, unit, out, arg); + mi_print_amount(stat->allocated, unit, out, arg); + mi_print_amount(stat->freed, unit, out, arg); + mi_print_amount(stat->current, unit, out, arg); + mi_print_amount(unit, 1, out, arg); + mi_print_count(stat->allocated, unit, out, arg); + } + else { + mi_print_amount(stat->peak, -1, out, arg); + mi_print_amount(stat->allocated, -1, out, arg); + mi_print_amount(stat->freed, -1, out, arg); + mi_print_amount(stat->current, -1, out, arg); + if (unit == -1) { + _mi_fprintf(out, arg, "%24s", ""); + } + else { + mi_print_amount(-unit, 1, out, arg); + mi_print_count((stat->allocated / -unit), 0, out, arg); + } + } + if (stat->allocated > stat->freed) { + _mi_fprintf(out, arg, " "); + _mi_fprintf(out, arg, (notok == NULL ? "not all freed" : notok)); + _mi_fprintf(out, arg, "\n"); + } + else { + _mi_fprintf(out, arg, " ok\n"); + } + } + else { + mi_print_amount(stat->peak, 1, out, arg); + mi_print_amount(stat->allocated, 1, out, arg); + _mi_fprintf(out, arg, "%11s", " "); // no freed + mi_print_amount(stat->current, 1, out, arg); + _mi_fprintf(out, arg, "\n"); + } +} + +static void mi_stat_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) { + mi_stat_print_ex(stat, msg, unit, out, arg, NULL); +} + +static void mi_stat_peak_print(const mi_stat_count_t* stat, const char* msg, int64_t unit, mi_output_fun* out, void* arg) { + _mi_fprintf(out, arg, "%10s:", msg); + mi_print_amount(stat->peak, unit, out, arg); + _mi_fprintf(out, arg, "\n"); +} + +static void mi_stat_counter_print(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg ) { + _mi_fprintf(out, arg, "%10s:", msg); + mi_print_amount(stat->total, -1, out, arg); + _mi_fprintf(out, arg, "\n"); +} + + +static void mi_stat_counter_print_avg(const mi_stat_counter_t* stat, const char* msg, mi_output_fun* out, void* arg) { + const int64_t avg_tens = (stat->count == 0 ? 0 : (stat->total*10 / stat->count)); + const long avg_whole = (long)(avg_tens/10); + const long avg_frac1 = (long)(avg_tens%10); + _mi_fprintf(out, arg, "%10s: %5ld.%ld avg\n", msg, avg_whole, avg_frac1); +} + + +static void mi_print_header(mi_output_fun* out, void* arg ) { + _mi_fprintf(out, arg, "%10s: %11s %11s %11s %11s %11s %11s\n", "heap stats", "peak ", "total ", "freed ", "current ", "unit ", "count "); +} + +#if MI_STAT>1 +static void mi_stats_print_bins(const mi_stat_count_t* bins, size_t max, const char* fmt, mi_output_fun* out, void* arg) { + bool found = false; + char buf[64]; + for (size_t i = 0; i <= max; i++) { + if (bins[i].allocated > 0) { + found = true; + int64_t unit = _mi_bin_size((uint8_t)i); + _mi_snprintf(buf, 64, "%s %3lu", fmt, (long)i); + mi_stat_print(&bins[i], buf, unit, out, arg); + } + } + if (found) { + _mi_fprintf(out, arg, "\n"); + mi_print_header(out, arg); + } +} +#endif + + + +//------------------------------------------------------------ +// Use an output wrapper for line-buffered output +// (which is nice when using loggers etc.) +//------------------------------------------------------------ +typedef struct buffered_s { + mi_output_fun* out; // original output function + void* arg; // and state + char* buf; // local buffer of at least size `count+1` + size_t used; // currently used chars `used <= count` + size_t count; // total chars available for output +} buffered_t; + +static void mi_buffered_flush(buffered_t* buf) { + buf->buf[buf->used] = 0; + _mi_fputs(buf->out, buf->arg, NULL, buf->buf); + buf->used = 0; +} + +static void mi_cdecl mi_buffered_out(const char* msg, void* arg) { + buffered_t* buf = (buffered_t*)arg; + if (msg==NULL || buf==NULL) return; + for (const char* src = msg; *src != 0; src++) { + char c = *src; + if (buf->used >= buf->count) mi_buffered_flush(buf); + mi_assert_internal(buf->used < buf->count); + buf->buf[buf->used++] = c; + if (c == '\n') mi_buffered_flush(buf); + } +} + +//------------------------------------------------------------ +// Print statistics +//------------------------------------------------------------ + +static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0) mi_attr_noexcept { + // wrap the output function to be line buffered + char buf[256]; + buffered_t buffer = { out0, arg0, NULL, 0, 255 }; + buffer.buf = buf; + mi_output_fun* out = &mi_buffered_out; + void* arg = &buffer; + + // and print using that + mi_print_header(out,arg); + #if MI_STAT>1 + mi_stats_print_bins(stats->normal_bins, MI_BIN_HUGE, "normal",out,arg); + #endif + #if MI_STAT + mi_stat_print(&stats->normal, "normal", (stats->normal_count.count == 0 ? 1 : -(stats->normal.allocated / stats->normal_count.count)), out, arg); + mi_stat_print(&stats->large, "large", (stats->large_count.count == 0 ? 1 : -(stats->large.allocated / stats->large_count.count)), out, arg); + mi_stat_print(&stats->huge, "huge", (stats->huge_count.count == 0 ? 1 : -(stats->huge.allocated / stats->huge_count.count)), out, arg); + mi_stat_count_t total = { 0,0,0,0 }; + mi_stat_add(&total, &stats->normal, 1); + mi_stat_add(&total, &stats->large, 1); + mi_stat_add(&total, &stats->huge, 1); + mi_stat_print(&total, "total", 1, out, arg); + #endif + #if MI_STAT>1 + mi_stat_print(&stats->malloc, "malloc req", 1, out, arg); + _mi_fprintf(out, arg, "\n"); + #endif + mi_stat_print_ex(&stats->reserved, "reserved", 1, out, arg, ""); + mi_stat_print_ex(&stats->committed, "committed", 1, out, arg, ""); + mi_stat_peak_print(&stats->reset, "reset", 1, out, arg ); + mi_stat_peak_print(&stats->purged, "purged", 1, out, arg ); + mi_stat_print(&stats->page_committed, "touched", 1, out, arg); + mi_stat_print(&stats->segments, "segments", -1, out, arg); + mi_stat_print(&stats->segments_abandoned, "-abandoned", -1, out, arg); + mi_stat_print(&stats->segments_cache, "-cached", -1, out, arg); + mi_stat_print(&stats->pages, "pages", -1, out, arg); + mi_stat_print(&stats->pages_abandoned, "-abandoned", -1, out, arg); + mi_stat_counter_print(&stats->pages_extended, "-extended", out, arg); + mi_stat_counter_print(&stats->page_no_retire, "-noretire", out, arg); + mi_stat_counter_print(&stats->arena_count, "arenas", out, arg); + mi_stat_counter_print(&stats->arena_crossover_count, "-crossover", out, arg); + mi_stat_counter_print(&stats->arena_rollback_count, "-rollback", out, arg); + mi_stat_counter_print(&stats->mmap_calls, "mmaps", out, arg); + mi_stat_counter_print(&stats->commit_calls, "commits", out, arg); + mi_stat_counter_print(&stats->reset_calls, "resets", out, arg); + mi_stat_counter_print(&stats->purge_calls, "purges", out, arg); + mi_stat_print(&stats->threads, "threads", -1, out, arg); + mi_stat_counter_print_avg(&stats->searches, "searches", out, arg); + _mi_fprintf(out, arg, "%10s: %5zu\n", "numa nodes", _mi_os_numa_node_count()); + + size_t elapsed; + size_t user_time; + size_t sys_time; + size_t current_rss; + size_t peak_rss; + size_t current_commit; + size_t peak_commit; + size_t page_faults; + mi_process_info(&elapsed, &user_time, &sys_time, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults); + _mi_fprintf(out, arg, "%10s: %5ld.%03ld s\n", "elapsed", elapsed/1000, elapsed%1000); + _mi_fprintf(out, arg, "%10s: user: %ld.%03ld s, system: %ld.%03ld s, faults: %lu, rss: ", "process", + user_time/1000, user_time%1000, sys_time/1000, sys_time%1000, (unsigned long)page_faults ); + mi_printf_amount((int64_t)peak_rss, 1, out, arg, "%s"); + if (peak_commit > 0) { + _mi_fprintf(out, arg, ", commit: "); + mi_printf_amount((int64_t)peak_commit, 1, out, arg, "%s"); + } + _mi_fprintf(out, arg, "\n"); +} + +static mi_msecs_t mi_process_start; // = 0 + +static mi_stats_t* mi_stats_get_default(void) { + mi_heap_t* heap = mi_heap_get_default(); + return &heap->tld->stats; +} + +static void mi_stats_merge_from(mi_stats_t* stats) { + if (stats != &_mi_stats_main) { + mi_stats_add(&_mi_stats_main, stats); + memset(stats, 0, sizeof(mi_stats_t)); + } +} + +void mi_stats_reset(void) mi_attr_noexcept { + mi_stats_t* stats = mi_stats_get_default(); + if (stats != &_mi_stats_main) { memset(stats, 0, sizeof(mi_stats_t)); } + memset(&_mi_stats_main, 0, sizeof(mi_stats_t)); + if (mi_process_start == 0) { mi_process_start = _mi_clock_start(); }; +} + +void mi_stats_merge(void) mi_attr_noexcept { + mi_stats_merge_from( mi_stats_get_default() ); +} + +void _mi_stats_done(mi_stats_t* stats) { // called from `mi_thread_done` + mi_stats_merge_from(stats); +} + +void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept { + mi_stats_merge_from(mi_stats_get_default()); + _mi_stats_print(&_mi_stats_main, out, arg); +} + +void mi_stats_print(void* out) mi_attr_noexcept { + // for compatibility there is an `out` parameter (which can be `stdout` or `stderr`) + mi_stats_print_out((mi_output_fun*)out, NULL); +} + +void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept { + _mi_stats_print(mi_stats_get_default(), out, arg); +} + + +// ---------------------------------------------------------------- +// Basic timer for convenience; use milli-seconds to avoid doubles +// ---------------------------------------------------------------- + +static mi_msecs_t mi_clock_diff; + +mi_msecs_t _mi_clock_now(void) { + return _mi_prim_clock_now(); +} + +mi_msecs_t _mi_clock_start(void) { + if (mi_clock_diff == 0.0) { + mi_msecs_t t0 = _mi_clock_now(); + mi_clock_diff = _mi_clock_now() - t0; + } + return _mi_clock_now(); +} + +mi_msecs_t _mi_clock_end(mi_msecs_t start) { + mi_msecs_t end = _mi_clock_now(); + return (end - start - mi_clock_diff); +} + + +// -------------------------------------------------------- +// Basic process statistics +// -------------------------------------------------------- + +mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept +{ + mi_process_info_t pinfo; + _mi_memzero_var(pinfo); + pinfo.elapsed = _mi_clock_end(mi_process_start); + pinfo.current_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.current)); + pinfo.peak_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.peak)); + pinfo.current_rss = pinfo.current_commit; + pinfo.peak_rss = pinfo.peak_commit; + pinfo.utime = 0; + pinfo.stime = 0; + pinfo.page_faults = 0; + + _mi_prim_process_info(&pinfo); + + if (elapsed_msecs!=NULL) *elapsed_msecs = (pinfo.elapsed < 0 ? 0 : (pinfo.elapsed < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.elapsed : PTRDIFF_MAX)); + if (user_msecs!=NULL) *user_msecs = (pinfo.utime < 0 ? 0 : (pinfo.utime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.utime : PTRDIFF_MAX)); + if (system_msecs!=NULL) *system_msecs = (pinfo.stime < 0 ? 0 : (pinfo.stime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.stime : PTRDIFF_MAX)); + if (current_rss!=NULL) *current_rss = pinfo.current_rss; + if (peak_rss!=NULL) *peak_rss = pinfo.peak_rss; + if (current_commit!=NULL) *current_commit = pinfo.current_commit; + if (peak_commit!=NULL) *peak_commit = pinfo.peak_commit; + if (page_faults!=NULL) *page_faults = pinfo.page_faults; +} diff --git a/ww/managers/mimalloc/test/CMakeLists.txt b/ww/managers/mimalloc/test/CMakeLists.txt new file mode 100644 index 00000000..e76ffa64 --- /dev/null +++ b/ww/managers/mimalloc/test/CMakeLists.txt @@ -0,0 +1,54 @@ +cmake_minimum_required(VERSION 3.0) +project(mimalloc-test C CXX) + +set(CMAKE_C_STANDARD 11) +set(CMAKE_CXX_STANDARD 17) + +# Set default build type +if (NOT CMAKE_BUILD_TYPE) + if ("${CMAKE_BINARY_DIR}" MATCHES ".*(D|d)ebug$") + message(STATUS "No build type selected, default to *** Debug ***") + set(CMAKE_BUILD_TYPE "Debug") + else() + message(STATUS "No build type selected, default to *** Release ***") + set(CMAKE_BUILD_TYPE "Release") + endif() +endif() + +# Import mimalloc (if installed) +find_package(mimalloc 2.0 REQUIRED NO_SYSTEM_ENVIRONMENT_PATH) +message(STATUS "Found mimalloc installed at: ${MIMALLOC_LIBRARY_DIR} (${MIMALLOC_VERSION_DIR})") + +# overriding with a dynamic library +add_executable(dynamic-override main-override.c) +target_link_libraries(dynamic-override PUBLIC mimalloc) + +add_executable(dynamic-override-cxx main-override.cpp) +target_link_libraries(dynamic-override-cxx PUBLIC mimalloc) + + +# overriding with a static object file works reliable as the symbols in the +# object file have priority over those in library files +add_executable(static-override-obj main-override.c ${MIMALLOC_OBJECT_DIR}/mimalloc.o) +target_include_directories(static-override-obj PUBLIC ${MIMALLOC_INCLUDE_DIR}) +target_link_libraries(static-override-obj PUBLIC pthread) + + +# overriding with a static library works too if using the `mimalloc-override.h` +# header to redefine malloc/free. (the library already overrides new/delete) +add_executable(static-override-static main-override-static.c) +target_link_libraries(static-override-static PUBLIC mimalloc-static) + + +# overriding with a static library: this may not work if the library is linked too late +# on the command line after the C runtime library; but we cannot control that well in CMake +add_executable(static-override main-override.c) +target_link_libraries(static-override PUBLIC mimalloc-static) + +add_executable(static-override-cxx main-override.cpp) +target_link_libraries(static-override-cxx PUBLIC mimalloc-static) + + +## test memory errors +add_executable(test-wrong test-wrong.c) +target_link_libraries(test-wrong PUBLIC mimalloc) diff --git a/ww/managers/mimalloc/test/main-override-static.c b/ww/managers/mimalloc/test/main-override-static.c new file mode 100644 index 00000000..e71be29e --- /dev/null +++ b/ww/managers/mimalloc/test/main-override-static.c @@ -0,0 +1,415 @@ +#include +#include +#include +#include +#include + +#include +#include // redefines malloc etc. + + +static void double_free1(); +static void double_free2(); +static void corrupt_free(); +static void block_overflow1(); +static void invalid_free(); +static void test_aslr(void); +static void test_process_info(void); +static void test_reserved(void); +static void negative_stat(void); +static void alloc_huge(void); +static void test_heap_walk(void); +static void test_heap_arena(void); +static void test_align(void); + +int main() { + mi_version(); + mi_stats_reset(); + // detect double frees and heap corruption + // double_free1(); + // double_free2(); + // corrupt_free(); + // block_overflow1(); + // test_aslr(); + // invalid_free(); + // test_reserved(); + // negative_stat(); + // test_heap_walk(); + // alloc_huge(); + // test_heap_walk(); + // test_heap_arena(); + // test_align(); + + void* p1 = malloc(78); + void* p2 = malloc(24); + free(p1); + p1 = mi_malloc(8); + char* s = strdup("hello\n"); + free(p2); + + mi_heap_t* h = mi_heap_new(); + mi_heap_set_default(h); + + p2 = malloc(16); + p1 = realloc(p1, 32); + free(p1); + free(p2); + free(s); + + /* now test if override worked by allocating/freeing across the api's*/ + //p1 = mi_malloc(32); + //free(p1); + //p2 = malloc(32); + //mi_free(p2); + + //mi_collect(true); + //mi_stats_print(NULL); + + // test_process_info(); + + return 0; +} + +static void test_align() { + void* p = mi_malloc_aligned(256, 256); + if (((uintptr_t)p % 256) != 0) { + fprintf(stderr, "%p is not 256 alignend!\n", p); + } +} + +static void invalid_free() { + free((void*)0xBADBEEF); + realloc((void*)0xBADBEEF,10); +} + +static void block_overflow1() { + uint8_t* p = (uint8_t*)mi_malloc(17); + p[18] = 0; + free(p); +} + +// The double free samples come ArcHeap [1] by Insu Yun (issue #161) +// [1]: https://arxiv.org/pdf/1903.00503.pdf + +static void double_free1() { + void* p[256]; + //uintptr_t buf[256]; + + p[0] = mi_malloc(622616); + p[1] = mi_malloc(655362); + p[2] = mi_malloc(786432); + mi_free(p[2]); + // [VULN] Double free + mi_free(p[2]); + p[3] = mi_malloc(786456); + // [BUG] Found overlap + // p[3]=0x429b2ea2000 (size=917504), p[1]=0x429b2e42000 (size=786432) + fprintf(stderr, "p3: %p-%p, p1: %p-%p, p2: %p\n", p[3], (uint8_t*)(p[3]) + 786456, p[1], (uint8_t*)(p[1]) + 655362, p[2]); +} + +static void double_free2() { + void* p[256]; + //uintptr_t buf[256]; + // [INFO] Command buffer: 0x327b2000 + // [INFO] Input size: 182 + p[0] = malloc(712352); + p[1] = malloc(786432); + free(p[0]); + // [VULN] Double free + free(p[0]); + p[2] = malloc(786440); + p[3] = malloc(917504); + p[4] = malloc(786440); + // [BUG] Found overlap + // p[4]=0x433f1402000 (size=917504), p[1]=0x433f14c2000 (size=786432) + fprintf(stderr, "p1: %p-%p, p2: %p-%p\n", p[4], (uint8_t*)(p[4]) + 917504, p[1], (uint8_t*)(p[1]) + 786432); +} + + +// Try to corrupt the heap through buffer overflow +#define N 256 +#define SZ 64 + +static void corrupt_free() { + void* p[N]; + // allocate + for (int i = 0; i < N; i++) { + p[i] = malloc(SZ); + } + // free some + for (int i = 0; i < N; i += (N/10)) { + free(p[i]); + p[i] = NULL; + } + // try to corrupt the free list + for (int i = 0; i < N; i++) { + if (p[i] != NULL) { + memset(p[i], 0, SZ+8); + } + } + // allocate more.. trying to trigger an allocation from a corrupted entry + // this may need many allocations to get there (if at all) + for (int i = 0; i < 4096; i++) { + malloc(SZ); + } +} + +static void test_aslr(void) { + void* p[256]; + p[0] = malloc(378200); + p[1] = malloc(1134626); + printf("p1: %p, p2: %p\n", p[0], p[1]); +} + +static void test_process_info(void) { + size_t elapsed = 0; + size_t user_msecs = 0; + size_t system_msecs = 0; + size_t current_rss = 0; + size_t peak_rss = 0; + size_t current_commit = 0; + size_t peak_commit = 0; + size_t page_faults = 0; + for (int i = 0; i < 100000; i++) { + void* p = calloc(100,10); + free(p); + } + mi_process_info(&elapsed, &user_msecs, &system_msecs, ¤t_rss, &peak_rss, ¤t_commit, &peak_commit, &page_faults); + printf("\n\n*** process info: elapsed %3zd.%03zd s, user: %3zd.%03zd s, rss: %zd b, commit: %zd b\n\n", elapsed/1000, elapsed%1000, user_msecs/1000, user_msecs%1000, peak_rss, peak_commit); +} + +static void test_reserved(void) { +#define KiB 1024ULL +#define MiB (KiB*KiB) +#define GiB (MiB*KiB) + mi_reserve_os_memory(4*GiB, false, true); + void* p1 = malloc(100); + void* p2 = malloc(100000); + void* p3 = malloc(2*GiB); + void* p4 = malloc(1*GiB + 100000); + free(p1); + free(p2); + free(p3); + p3 = malloc(1*GiB); + free(p4); +} + + + +static void negative_stat(void) { + int* p = mi_malloc(60000); + mi_stats_print_out(NULL, NULL); + *p = 100; + mi_free(p); + mi_stats_print_out(NULL, NULL); +} + +static void alloc_huge(void) { + void* p = mi_malloc(67108872); + mi_free(p); +} + +static bool test_visit(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) { + if (block == NULL) { + printf("visiting an area with blocks of size %zu (including padding)\n", area->full_block_size); + } + else { + printf(" block of size %zu (allocated size is %zu)\n", block_size, mi_usable_size(block)); + } + return true; +} + +static void test_heap_walk(void) { + mi_heap_t* heap = mi_heap_new(); + mi_heap_malloc(heap, 16*2097152); + mi_heap_malloc(heap, 2067152); + mi_heap_malloc(heap, 2097160); + mi_heap_malloc(heap, 24576); + mi_heap_visit_blocks(heap, true, &test_visit, NULL); +} + +static void test_heap_arena(void) { + mi_arena_id_t arena_id; + int err = mi_reserve_os_memory_ex(100 * 1024 * 1024, false /* commit */, false /* allow large */, true /* exclusive */, &arena_id); + if (err) abort(); + mi_heap_t* heap = mi_heap_new_in_arena(arena_id); + for (int i = 0; i < 500000; i++) { + void* p = mi_heap_malloc(heap, 1024); + if (p == NULL) { + printf("out of memory after %d kb (expecting about 100_000kb)\n", i); + break; + } + } +} + +// ---------------------------- +// bin size experiments +// ------------------------------ + +#if 0 +#include +#include + +#define MI_INTPTR_SIZE 8 +#define MI_LARGE_WSIZE_MAX (4*1024*1024 / MI_INTPTR_SIZE) + +#define MI_BIN_HUGE 100 +//#define MI_ALIGN2W + +// Bit scan reverse: return the index of the highest bit. +static inline uint8_t mi_bsr32(uint32_t x); + +#if defined(_MSC_VER) +#include +#include +static inline uint8_t mi_bsr32(uint32_t x) { + uint32_t idx; + _BitScanReverse((DWORD*)&idx, x); + return idx; +} +#elif defined(__GNUC__) || defined(__clang__) +static inline uint8_t mi_bsr32(uint32_t x) { + return (31 - __builtin_clz(x)); +} +#else +static inline uint8_t mi_bsr32(uint32_t x) { + // de Bruijn multiplication, see + static const uint8_t debruijn[32] = { + 31, 0, 22, 1, 28, 23, 18, 2, 29, 26, 24, 10, 19, 7, 3, 12, + 30, 21, 27, 17, 25, 9, 6, 11, 20, 16, 8, 5, 15, 4, 14, 13, + }; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x++; + return debruijn[(x*0x076be629) >> 27]; +} +#endif + +/* +// Bit scan reverse: return the index of the highest bit. +uint8_t _mi_bsr(uintptr_t x) { + if (x == 0) return 0; + #if MI_INTPTR_SIZE==8 + uint32_t hi = (x >> 32); + return (hi == 0 ? mi_bsr32((uint32_t)x) : 32 + mi_bsr32(hi)); + #elif MI_INTPTR_SIZE==4 + return mi_bsr32(x); + #else + # error "define bsr for non-32 or 64-bit platforms" + #endif +} +*/ + + +static inline size_t _mi_wsize_from_size(size_t size) { + return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t); +} + +// Return the bin for a given field size. +// Returns MI_BIN_HUGE if the size is too large. +// We use `wsize` for the size in "machine word sizes", +// i.e. byte size == `wsize*sizeof(void*)`. +extern inline uint8_t _mi_bin8(size_t size) { + size_t wsize = _mi_wsize_from_size(size); + uint8_t bin; + if (wsize <= 1) { + bin = 1; + } +#if defined(MI_ALIGN4W) + else if (wsize <= 4) { + bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + } +#elif defined(MI_ALIGN2W) + else if (wsize <= 8) { + bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + } +#else + else if (wsize <= 8) { + bin = (uint8_t)wsize; + } +#endif + else if (wsize > MI_LARGE_WSIZE_MAX) { + bin = MI_BIN_HUGE; + } + else { +#if defined(MI_ALIGN4W) + if (wsize <= 16) { wsize = (wsize+3)&~3; } // round to 4x word sizes +#endif + wsize--; + // find the highest bit + uint8_t b = mi_bsr32((uint32_t)wsize); + // and use the top 3 bits to determine the bin (~12.5% worst internal fragmentation). + // - adjust with 3 because we use do not round the first 8 sizes + // which each get an exact bin + bin = ((b << 2) + (uint8_t)((wsize >> (b - 2)) & 0x03)) - 3; + } + return bin; +} + +static inline uint8_t _mi_bin4(size_t size) { + size_t wsize = _mi_wsize_from_size(size); + uint8_t bin; + if (wsize <= 1) { + bin = 1; + } +#if defined(MI_ALIGN4W) + else if (wsize <= 4) { + bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + } +#elif defined(MI_ALIGN2W) + else if (wsize <= 8) { + bin = (uint8_t)((wsize+1)&~1); // round to double word sizes + } +#else + else if (wsize <= 8) { + bin = (uint8_t)wsize; + } +#endif + else if (wsize > MI_LARGE_WSIZE_MAX) { + bin = MI_BIN_HUGE; + } + else { + uint8_t b = mi_bsr32((uint32_t)wsize); + bin = ((b << 1) + (uint8_t)((wsize >> (b - 1)) & 0x01)) + 3; + } + return bin; +} + +static size_t _mi_binx4(size_t bsize) { + if (bsize==0) return 0; + uint8_t b = mi_bsr32((uint32_t)bsize); + if (b <= 1) return bsize; + size_t bin = ((b << 1) | (bsize >> (b - 1))&0x01); + return bin; +} + +static size_t _mi_binx8(size_t bsize) { + if (bsize<=1) return bsize; + uint8_t b = mi_bsr32((uint32_t)bsize); + if (b <= 2) return bsize; + size_t bin = ((b << 2) | (bsize >> (b - 2))&0x03) - 5; + return bin; +} + +static void mi_bins(void) { + //printf(" QNULL(1), /* 0 */ \\\n "); + size_t last_bin = 0; + size_t min_bsize = 0; + size_t last_bsize = 0; + for (size_t bsize = 1; bsize < 2*1024; bsize++) { + size_t size = bsize * 64 * 1024; + size_t bin = _mi_binx8(bsize); + if (bin != last_bin) { + printf("min bsize: %6zd, max bsize: %6zd, bin: %6zd\n", min_bsize, last_bsize, last_bin); + //printf("QNULL(%6zd), ", wsize); + //if (last_bin%8 == 0) printf("/* %i */ \\\n ", last_bin); + last_bin = bin; + min_bsize = bsize; + } + last_bsize = bsize; + } +} +#endif diff --git a/ww/managers/mimalloc/test/main-override.c b/ww/managers/mimalloc/test/main-override.c new file mode 100644 index 00000000..284fdd20 --- /dev/null +++ b/ww/managers/mimalloc/test/main-override.c @@ -0,0 +1,36 @@ +#include +#include +#include +#include + +#include + +int main() { + mi_version(); // ensure mimalloc library is linked + void* p1 = malloc(78); + void* p2 = malloc(24); + free(p1); + p1 = malloc(8); + //char* s = strdup("hello\n"); + free(p2); + p2 = malloc(16); + p1 = realloc(p1, 32); + free(p1); + free(p2); + //free(s); + //mi_collect(true); + + /* now test if override worked by allocating/freeing across the api's*/ + //p1 = mi_malloc(32); + //free(p1); + //p2 = malloc(32); + //mi_free(p2); + p1 = malloc(24); + p2 = reallocarray(p1, 16, 16); + free(p2); + p1 = malloc(24); + assert(reallocarr(&p1, 16, 16) == 0); + free(p1); + mi_stats_print(NULL); + return 0; +} diff --git a/ww/managers/mimalloc/test/main-override.cpp b/ww/managers/mimalloc/test/main-override.cpp new file mode 100644 index 00000000..582f24ee --- /dev/null +++ b/ww/managers/mimalloc/test/main-override.cpp @@ -0,0 +1,400 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef _WIN32 +#include +#endif + +#ifdef _WIN32 +#include +static void msleep(unsigned long msecs) { Sleep(msecs); } +#else +#include +static void msleep(unsigned long msecs) { usleep(msecs * 1000UL); } +#endif + +static void heap_thread_free_large(); // issue #221 +static void heap_no_delete(); // issue #202 +static void heap_late_free(); // issue #204 +static void padding_shrink(); // issue #209 +static void various_tests(); +static void test_mt_shutdown(); +static void large_alloc(void); // issue #363 +static void fail_aslr(); // issue #372 +static void tsan_numa_test(); // issue #414 +static void strdup_test(); // issue #445 +static void bench_alloc_large(void); // issue #xxx +//static void test_large_migrate(void); // issue #691 +static void heap_thread_free_huge(); +static void test_std_string(); // issue #697 + +static void test_stl_allocators(); + + +int main() { + // mi_stats_reset(); // ignore earlier allocations + + // test_std_string(); + // heap_thread_free_huge(); + /* + heap_thread_free_huge(); + heap_thread_free_large(); + heap_no_delete(); + heap_late_free(); + padding_shrink(); + various_tests(); + large_alloc(); + tsan_numa_test(); + strdup_test(); + */ + // test_stl_allocators(); + // test_mt_shutdown(); + // test_large_migrate(); + + //fail_aslr(); + // bench_alloc_large(); + // mi_stats_print(NULL); + return 0; +} + +static void* p = malloc(8); + +void free_p() { + free(p); + return; +} + +class Test { +private: + int i; +public: + Test(int x) { i = x; } + ~Test() { } +}; + + +static void various_tests() { + atexit(free_p); + void* p1 = malloc(78); + void* p2 = mi_malloc_aligned(24, 16); + free(p1); + p1 = malloc(8); + char* s = mi_strdup("hello\n"); + + mi_free(p2); + p2 = malloc(16); + p1 = realloc(p1, 32); + free(p1); + free(p2); + mi_free(s); + + Test* t = new Test(42); + delete t; + t = new (std::nothrow) Test(42); + delete t; + auto tbuf = new unsigned char[sizeof(Test)]; + t = new (tbuf) Test(42); + t->~Test(); + delete[] tbuf; +} + +class Static { +private: + void* p; +public: + Static() { + p = malloc(64); + return; + } + ~Static() { + free(p); + return; + } +}; + +static Static s = Static(); + + +static bool test_stl_allocator1() { + std::vector > vec; + vec.push_back(1); + vec.pop_back(); + return vec.size() == 0; +} + +struct some_struct { int i; int j; double z; }; + +static bool test_stl_allocator2() { + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +} + +#if MI_HAS_HEAP_STL_ALLOCATOR +static bool test_stl_allocator3() { + std::vector > vec; + vec.push_back(1); + vec.pop_back(); + return vec.size() == 0; +} + +static bool test_stl_allocator4() { + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +} + +static bool test_stl_allocator5() { + std::vector > vec; + vec.push_back(1); + vec.pop_back(); + return vec.size() == 0; +} + +static bool test_stl_allocator6() { + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +} +#endif + +static void test_stl_allocators() { + test_stl_allocator1(); + test_stl_allocator2(); +#if MI_HAS_HEAP_STL_ALLOCATOR + test_stl_allocator3(); + test_stl_allocator4(); + test_stl_allocator5(); + test_stl_allocator6(); +#endif +} + +#if 0 +// issue #691 +static char* cptr; + +static void* thread1_allocate() +{ + cptr = mi_calloc_tp(char,22085632); + return NULL; +} + +static void* thread2_free() +{ + assert(cptr); + mi_free(cptr); + cptr = NULL; + return NULL; +} + +static void test_large_migrate(void) { + auto t1 = std::thread(thread1_allocate); + t1.join(); + auto t2 = std::thread(thread2_free); + t2.join(); + /* + pthread_t thread1, thread2; + + pthread_create(&thread1, NULL, &thread1_allocate, NULL); + pthread_join(thread1, NULL); + + pthread_create(&thread2, NULL, &thread2_free, NULL); + pthread_join(thread2, NULL); + */ + return; +} +#endif + +// issue 445 +static void strdup_test() { +#ifdef _MSC_VER + char* s = _strdup("hello\n"); + char* buf = NULL; + size_t len; + _dupenv_s(&buf, &len, "MIMALLOC_VERBOSE"); + mi_free(buf); + mi_free(s); +#endif +} + +// Issue #202 +static void heap_no_delete_worker() { + mi_heap_t* heap = mi_heap_new(); + void* q = mi_heap_malloc(heap, 1024); (void)(q); + // mi_heap_delete(heap); // uncomment to prevent assertion +} + +static void heap_no_delete() { + auto t1 = std::thread(heap_no_delete_worker); + t1.join(); +} + + +// Issue #697 +static void test_std_string() { + std::string path = "/Users/xxxx/Library/Developer/Xcode/DerivedData/xxxxxxxxxx/Build/Intermediates.noindex/xxxxxxxxxxx/arm64/XX_lto.o/0.arm64.lto.o"; + std::string path1 = "/Users/xxxx/Library/Developer/Xcode/DerivedData/xxxxxxxxxx/Build/Intermediates.noindex/xxxxxxxxxxx/arm64/XX_lto.o/1.arm64.lto.o"; + std::cout << path + "\n>>> " + path1 + "\n>>> " << std::endl; +} + +// Issue #204 +static volatile void* global_p; + +static void t1main() { + mi_heap_t* heap = mi_heap_new(); + global_p = mi_heap_malloc(heap, 1024); + mi_heap_delete(heap); +} + +static void heap_late_free() { + auto t1 = std::thread(t1main); + + msleep(2000); + assert(global_p); + mi_free((void*)global_p); + + t1.join(); +} + +// issue #209 +static void* shared_p; +static void alloc0(/* void* arg */) +{ + shared_p = mi_malloc(8); +} + +static void padding_shrink(void) +{ + auto t1 = std::thread(alloc0); + t1.join(); + mi_free(shared_p); +} + + +// Issue #221 +static void heap_thread_free_large_worker() { + mi_free(shared_p); +} + +static void heap_thread_free_large() { + for (int i = 0; i < 100; i++) { + shared_p = mi_malloc_aligned(2 * 1024 * 1024 + 1, 8); + auto t1 = std::thread(heap_thread_free_large_worker); + t1.join(); + } +} + +static void heap_thread_free_huge_worker() { + mi_free(shared_p); +} + +static void heap_thread_free_huge() { + for (int i = 0; i < 100; i++) { + shared_p = mi_malloc(1024 * 1024 * 1024); + auto t1 = std::thread(heap_thread_free_huge_worker); + t1.join(); + } +} + +static void test_mt_shutdown() +{ + const int threads = 5; + std::vector< std::future< std::vector< char* > > > ts; + + auto fn = [&]() + { + std::vector< char* > ps; + ps.reserve(1000); + for (int i = 0; i < 1000; i++) + ps.emplace_back(new char[1]); + return ps; + }; + + for (int i = 0; i < threads; i++) + ts.emplace_back(std::async(std::launch::async, fn)); + + for (auto& f : ts) + for (auto& p : f.get()) + delete[] p; + + std::cout << "done" << std::endl; +} + +// issue #363 +using namespace std; + +void large_alloc(void) +{ + char* a = new char[1ull << 25]; + thread th([&] { + delete[] a; + }); + th.join(); +} + +// issue #372 +static void fail_aslr() { + size_t sz = (4ULL << 40); // 4TiB + void* p = malloc(sz); + printf("pointer p: %p: area up to %p\n", p, (uint8_t*)p + sz); + *(int*)0x5FFFFFFF000 = 0; // should segfault +} + +// issues #414 +static void dummy_worker() { + void* p = mi_malloc(0); + mi_free(p); +} + +static void tsan_numa_test() { + auto t1 = std::thread(dummy_worker); + dummy_worker(); + t1.join(); +} + +// issue #? +#include +#include +#include + +static void bench_alloc_large(void) { + static constexpr int kNumBuffers = 20; + static constexpr size_t kMinBufferSize = 5 * 1024 * 1024; + static constexpr size_t kMaxBufferSize = 25 * 1024 * 1024; + std::unique_ptr buffers[kNumBuffers]; + + std::random_device rd; (void)rd; + std::mt19937 gen(42); //rd()); + std::uniform_int_distribution<> size_distribution(kMinBufferSize, kMaxBufferSize); + std::uniform_int_distribution<> buf_number_distribution(0, kNumBuffers - 1); + + static constexpr int kNumIterations = 2000; + const auto start = std::chrono::steady_clock::now(); + for (int i = 0; i < kNumIterations; ++i) { + int buffer_idx = buf_number_distribution(gen); + size_t new_size = size_distribution(gen); + buffers[buffer_idx] = std::make_unique(new_size); + } + const auto end = std::chrono::steady_clock::now(); + const auto num_ms = std::chrono::duration_cast(end - start).count(); + const auto us_per_allocation = std::chrono::duration_cast(end - start).count() / kNumIterations; + std::cout << kNumIterations << " allocations Done in " << num_ms << "ms." << std::endl; + std::cout << "Avg " << us_per_allocation << " us per allocation" << std::endl; +} + diff --git a/ww/managers/mimalloc/test/main.c b/ww/managers/mimalloc/test/main.c new file mode 100644 index 00000000..b148f712 --- /dev/null +++ b/ww/managers/mimalloc/test/main.c @@ -0,0 +1,46 @@ +#include +#include +#include + +void test_heap(void* p_out) { + mi_heap_t* heap = mi_heap_new(); + void* p1 = mi_heap_malloc(heap,32); + void* p2 = mi_heap_malloc(heap,48); + mi_free(p_out); + mi_heap_destroy(heap); + //mi_heap_delete(heap); mi_free(p1); mi_free(p2); +} + +void test_large() { + const size_t N = 1000; + + for (size_t i = 0; i < N; ++i) { + size_t sz = 1ull << 21; + char* a = mi_mallocn_tp(char,sz); + for (size_t k = 0; k < sz; k++) { a[k] = 'x'; } + mi_free(a); + } +} + +int main() { + void* p1 = mi_malloc(16); + void* p2 = mi_malloc(1000000); + mi_free(p1); + mi_free(p2); + p1 = mi_malloc(16); + p2 = mi_malloc(16); + mi_free(p1); + mi_free(p2); + + test_heap(mi_malloc(32)); + + p1 = mi_malloc_aligned(64, 16); + p2 = mi_malloc_aligned(160,24); + mi_free(p2); + mi_free(p1); + //test_large(); + + mi_collect(true); + mi_stats_print(NULL); + return 0; +} diff --git a/ww/managers/mimalloc/test/readme.md b/ww/managers/mimalloc/test/readme.md new file mode 100644 index 00000000..db3524cd --- /dev/null +++ b/ww/managers/mimalloc/test/readme.md @@ -0,0 +1,16 @@ +Testing allocators is difficult as bugs may only surface after particular +allocation patterns. The main approach to testing _mimalloc_ is therefore +to have extensive internal invariant checking (see `page_is_valid` in `page.c` +for example), which is enabled in debug mode with `-DMI_DEBUG_FULL=ON`. +The main testing strategy is then to run [`mimalloc-bench`][bench] using full +invariant checking to catch any potential problems over a wide range of intensive +allocation benchmarks and programs. + +However, this does not test well for the entire API surface and this is tested +with `test-api.c` when using `make test` (from `out/debug` etc). (This is +not complete yet, please add to it.) + +The `main.c` and `main-override.c` are there to test if building and overriding +from a local install works and therefore these build a separate `test/CMakeLists.txt`. + +[bench]: https://github.com/daanx/mimalloc-bench diff --git a/ww/managers/mimalloc/test/test-api-fill.c b/ww/managers/mimalloc/test/test-api-fill.c new file mode 100644 index 00000000..3fca3b9d --- /dev/null +++ b/ww/managers/mimalloc/test/test-api-fill.c @@ -0,0 +1,343 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#include "mimalloc.h" +#include "mimalloc/types.h" + +#include "testhelper.h" + +// --------------------------------------------------------------------------- +// Helper functions +// --------------------------------------------------------------------------- +bool check_zero_init(uint8_t* p, size_t size); +#if MI_DEBUG >= 2 +bool check_debug_fill_uninit(uint8_t* p, size_t size); +bool check_debug_fill_freed(uint8_t* p, size_t size); +#endif + +// --------------------------------------------------------------------------- +// Main testing +// --------------------------------------------------------------------------- +int main(void) { + mi_option_disable(mi_option_verbose); + + // --------------------------------------------------- + // Zeroing allocation + // --------------------------------------------------- + CHECK_BODY("zeroinit-zalloc-small") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc(zalloc_size); + result = check_zero_init(p, zalloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-zalloc-large") { + size_t zalloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_zalloc(zalloc_size); + result = check_zero_init(p, zalloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-zalloc_small") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc_small(zalloc_size); + result = check_zero_init(p, zalloc_size); + mi_free(p); + }; + + CHECK_BODY("zeroinit-calloc-small") { + size_t calloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_calloc(calloc_size, 1); + result = check_zero_init(p, calloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-calloc-large") { + size_t calloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_calloc(calloc_size, 1); + result = check_zero_init(p, calloc_size); + mi_free(p); + }; + + CHECK_BODY("zeroinit-rezalloc-small") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc(zalloc_size); + result = check_zero_init(p, zalloc_size); + zalloc_size *= 3; + p = (uint8_t*)mi_rezalloc(p, zalloc_size); + result &= check_zero_init(p, zalloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-rezalloc-large") { + size_t zalloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_zalloc(zalloc_size); + result = check_zero_init(p, zalloc_size); + zalloc_size *= 3; + p = (uint8_t*)mi_rezalloc(p, zalloc_size); + result &= check_zero_init(p, zalloc_size); + mi_free(p); + }; + + CHECK_BODY("zeroinit-recalloc-small") { + size_t calloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_calloc(calloc_size, 1); + result = check_zero_init(p, calloc_size); + calloc_size *= 3; + p = (uint8_t*)mi_recalloc(p, calloc_size, 1); + result &= check_zero_init(p, calloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-recalloc-large") { + size_t calloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_calloc(calloc_size, 1); + result = check_zero_init(p, calloc_size); + calloc_size *= 3; + p = (uint8_t*)mi_recalloc(p, calloc_size, 1); + result &= check_zero_init(p, calloc_size); + mi_free(p); + }; + + // --------------------------------------------------- + // Zeroing in aligned API + // --------------------------------------------------- + CHECK_BODY("zeroinit-zalloc_aligned-small") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, zalloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-zalloc_aligned-large") { + size_t zalloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, zalloc_size); + mi_free(p); + }; + + CHECK_BODY("zeroinit-calloc_aligned-small") { + size_t calloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_calloc_aligned(calloc_size, 1, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, calloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-calloc_aligned-large") { + size_t calloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_calloc_aligned(calloc_size, 1, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, calloc_size); + mi_free(p); + }; + + CHECK_BODY("zeroinit-rezalloc_aligned-small") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, zalloc_size); + zalloc_size *= 3; + p = (uint8_t*)mi_rezalloc_aligned(p, zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result &= check_zero_init(p, zalloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-rezalloc_aligned-large") { + size_t zalloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, zalloc_size); + zalloc_size *= 3; + p = (uint8_t*)mi_rezalloc_aligned(p, zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result &= check_zero_init(p, zalloc_size); + mi_free(p); + }; + + CHECK_BODY("zeroinit-recalloc_aligned-small") { + size_t calloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_calloc_aligned(calloc_size, 1, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, calloc_size); + calloc_size *= 3; + p = (uint8_t*)mi_recalloc_aligned(p, calloc_size, 1, MI_MAX_ALIGN_SIZE * 2); + result &= check_zero_init(p, calloc_size); + mi_free(p); + }; + CHECK_BODY("zeroinit-recalloc_aligned-large") { + size_t calloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_calloc_aligned(calloc_size, 1, MI_MAX_ALIGN_SIZE * 2); + result = check_zero_init(p, calloc_size); + calloc_size *= 3; + p = (uint8_t*)mi_recalloc_aligned(p, calloc_size, 1, MI_MAX_ALIGN_SIZE * 2); + result &= check_zero_init(p, calloc_size); + mi_free(p); + }; + +#if (MI_DEBUG >= 2) && !MI_TSAN + // --------------------------------------------------- + // Debug filling + // --------------------------------------------------- + CHECK_BODY("uninit-malloc-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_malloc(malloc_size); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + CHECK_BODY("uninit-malloc-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_malloc(malloc_size); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + CHECK_BODY("uninit-malloc_small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_malloc_small(malloc_size); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + CHECK_BODY("uninit-realloc-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_malloc(malloc_size); + result = check_debug_fill_uninit(p, malloc_size); + malloc_size *= 3; + p = (uint8_t*)mi_realloc(p, malloc_size); + result &= check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + CHECK_BODY("uninit-realloc-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_malloc(malloc_size); + result = check_debug_fill_uninit(p, malloc_size); + malloc_size *= 3; + p = (uint8_t*)mi_realloc(p, malloc_size); + result &= check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + CHECK_BODY("uninit-mallocn-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_mallocn(malloc_size, 1); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + CHECK_BODY("uninit-mallocn-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_mallocn(malloc_size, 1); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + CHECK_BODY("uninit-reallocn-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_mallocn(malloc_size, 1); + result = check_debug_fill_uninit(p, malloc_size); + malloc_size *= 3; + p = (uint8_t*)mi_reallocn(p, malloc_size, 1); + result &= check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + CHECK_BODY("uninit-reallocn-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_mallocn(malloc_size, 1); + result = check_debug_fill_uninit(p, malloc_size); + malloc_size *= 3; + p = (uint8_t*)mi_reallocn(p, malloc_size, 1); + result &= check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + CHECK_BODY("uninit-malloc_aligned-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_malloc_aligned(malloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + CHECK_BODY("uninit-malloc_aligned-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_malloc_aligned(malloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + CHECK_BODY("uninit-realloc_aligned-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_malloc_aligned(malloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_debug_fill_uninit(p, malloc_size); + malloc_size *= 3; + p = (uint8_t*)mi_realloc_aligned(p, malloc_size, MI_MAX_ALIGN_SIZE * 2); + result &= check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + CHECK_BODY("uninit-realloc_aligned-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_malloc_aligned(malloc_size, MI_MAX_ALIGN_SIZE * 2); + result = check_debug_fill_uninit(p, malloc_size); + malloc_size *= 3; + p = (uint8_t*)mi_realloc_aligned(p, malloc_size, MI_MAX_ALIGN_SIZE * 2); + result &= check_debug_fill_uninit(p, malloc_size); + mi_free(p); + }; + + #if !(MI_TRACK_VALGRIND || MI_TRACK_ASAN) + CHECK_BODY("fill-freed-small") { + size_t malloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_malloc(malloc_size); + mi_free(p); + // First sizeof(void*) bytes will contain housekeeping data, skip these + result = check_debug_fill_freed(p + sizeof(void*), malloc_size - sizeof(void*)); + }; + CHECK_BODY("fill-freed-large") { + size_t malloc_size = MI_SMALL_SIZE_MAX * 2; + uint8_t* p = (uint8_t*)mi_malloc(malloc_size); + mi_free(p); + // First sizeof(void*) bytes will contain housekeeping data, skip these + result = check_debug_fill_freed(p + sizeof(void*), malloc_size - sizeof(void*)); + }; + #endif +#endif + + // --------------------------------------------------- + // Done + // ---------------------------------------------------[] + return print_test_summary(); +} + +// --------------------------------------------------------------------------- +// Helper functions +// --------------------------------------------------------------------------- +bool check_zero_init(uint8_t* p, size_t size) { + if(!p) + return false; + bool result = true; + for (size_t i = 0; i < size; ++i) { + result &= p[i] == 0; + } + return result; +} + +#if MI_DEBUG >= 2 +bool check_debug_fill_uninit(uint8_t* p, size_t size) { +#if MI_TRACK_VALGRIND || MI_TRACK_ASAN + (void)p; (void)size; + return true; // when compiled with valgrind we don't init on purpose +#else + if(!p) + return false; + + bool result = true; + for (size_t i = 0; i < size; ++i) { + result &= p[i] == MI_DEBUG_UNINIT; + } + return result; +#endif +} + +bool check_debug_fill_freed(uint8_t* p, size_t size) { +#if MI_TRACK_VALGRIND + (void)p; (void)size; + return true; // when compiled with valgrind we don't fill on purpose +#else + if(!p) + return false; + + bool result = true; + for (size_t i = 0; i < size; ++i) { + result &= p[i] == MI_DEBUG_FREED; + } + return result; +#endif +} +#endif diff --git a/ww/managers/mimalloc/test/test-api.c b/ww/managers/mimalloc/test/test-api.c new file mode 100644 index 00000000..76101980 --- /dev/null +++ b/ww/managers/mimalloc/test/test-api.c @@ -0,0 +1,451 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic ignored "-Walloc-size-larger-than=" +#endif + +/* +Testing allocators is difficult as bugs may only surface after particular +allocation patterns. The main approach to testing _mimalloc_ is therefore +to have extensive internal invariant checking (see `page_is_valid` in `page.c` +for example), which is enabled in debug mode with `-DMI_DEBUG_FULL=ON`. +The main testing is then to run `mimalloc-bench` [1] using full invariant checking +to catch any potential problems over a wide range of intensive allocation bench +marks. + +However, this does not test well for the entire API surface. In this test file +we therefore test the API over various inputs. Please add more tests :-) + +[1] https://github.com/daanx/mimalloc-bench +*/ + +#include +#include +#include +#include + +#ifdef __cplusplus +#include +#endif + +#include "mimalloc.h" +// #include "mimalloc/internal.h" +#include "mimalloc/types.h" // for MI_DEBUG and MI_BLOCK_ALIGNMENT_MAX + +#include "testhelper.h" + +// --------------------------------------------------------------------------- +// Test functions +// --------------------------------------------------------------------------- +bool test_heap1(void); +bool test_heap2(void); +bool test_stl_allocator1(void); +bool test_stl_allocator2(void); + +bool test_stl_heap_allocator1(void); +bool test_stl_heap_allocator2(void); +bool test_stl_heap_allocator3(void); +bool test_stl_heap_allocator4(void); + +bool mem_is_zero(uint8_t* p, size_t size) { + if (p==NULL) return false; + for (size_t i = 0; i < size; ++i) { + if (p[i] != 0) return false; + } + return true; +} + +// --------------------------------------------------------------------------- +// Main testing +// --------------------------------------------------------------------------- +int main(void) { + mi_option_disable(mi_option_verbose); + + // --------------------------------------------------- + // Malloc + // --------------------------------------------------- + + CHECK_BODY("malloc-zero") { + void* p = mi_malloc(0); + result = (p != NULL); + mi_free(p); + }; + CHECK_BODY("malloc-nomem1") { + result = (mi_malloc((size_t)PTRDIFF_MAX + (size_t)1) == NULL); + }; + CHECK_BODY("malloc-null") { + mi_free(NULL); + }; + CHECK_BODY("calloc-overflow") { + // use (size_t)&mi_calloc to get some number without triggering compiler warnings + result = (mi_calloc((size_t)&mi_calloc,SIZE_MAX/1000) == NULL); + }; + CHECK_BODY("calloc0") { + void* p = mi_calloc(0,1000); + result = (mi_usable_size(p) <= 16); + mi_free(p); + }; + CHECK_BODY("malloc-large") { // see PR #544. + void* p = mi_malloc(67108872); + mi_free(p); + }; + + // --------------------------------------------------- + // Extended + // --------------------------------------------------- + CHECK_BODY("posix_memalign1") { + void* p = &p; + int err = mi_posix_memalign(&p, sizeof(void*), 32); + result = ((err==0 && (uintptr_t)p % sizeof(void*) == 0) || p==&p); + mi_free(p); + }; + CHECK_BODY("posix_memalign_no_align") { + void* p = &p; + int err = mi_posix_memalign(&p, 3, 32); + result = (err==EINVAL && p==&p); + }; + CHECK_BODY("posix_memalign_zero") { + void* p = &p; + int err = mi_posix_memalign(&p, sizeof(void*), 0); + mi_free(p); + result = (err==0); + }; + CHECK_BODY("posix_memalign_nopow2") { + void* p = &p; + int err = mi_posix_memalign(&p, 3*sizeof(void*), 32); + result = (err==EINVAL && p==&p); + }; + CHECK_BODY("posix_memalign_nomem") { + void* p = &p; + int err = mi_posix_memalign(&p, sizeof(void*), SIZE_MAX); + result = (err==ENOMEM && p==&p); + }; + + // --------------------------------------------------- + // Aligned API + // --------------------------------------------------- + CHECK_BODY("malloc-aligned1") { + void* p = mi_malloc_aligned(32,32); result = (p != NULL && (uintptr_t)(p) % 32 == 0); mi_free(p); + }; + CHECK_BODY("malloc-aligned2") { + void* p = mi_malloc_aligned(48,32); result = (p != NULL && (uintptr_t)(p) % 32 == 0); mi_free(p); + }; + CHECK_BODY("malloc-aligned3") { + void* p1 = mi_malloc_aligned(48,32); bool result1 = (p1 != NULL && (uintptr_t)(p1) % 32 == 0); + void* p2 = mi_malloc_aligned(48,32); bool result2 = (p2 != NULL && (uintptr_t)(p2) % 32 == 0); + mi_free(p2); + mi_free(p1); + result = (result1&&result2); + }; + CHECK_BODY("malloc-aligned4") { + void* p; + bool ok = true; + for (int i = 0; i < 8 && ok; i++) { + p = mi_malloc_aligned(8, 16); + ok = (p != NULL && (uintptr_t)(p) % 16 == 0); mi_free(p); + } + result = ok; + }; + CHECK_BODY("malloc-aligned5") { + void* p = mi_malloc_aligned(4097,4096); + size_t usable = mi_usable_size(p); + result = (usable >= 4097 && usable < 16000); + printf("malloc_aligned5: usable size: %zi\n", usable); + mi_free(p); + }; + CHECK_BODY("malloc-aligned6") { + bool ok = true; + for (size_t align = 1; align <= MI_BLOCK_ALIGNMENT_MAX && ok; align *= 2) { + void* ps[8]; + for (int i = 0; i < 8 && ok; i++) { + ps[i] = mi_malloc_aligned(align*13 // size + , align); + if (ps[i] == NULL || (uintptr_t)(ps[i]) % align != 0) { + ok = false; + } + } + for (int i = 0; i < 8 && ok; i++) { + mi_free(ps[i]); + } + } + result = ok; + }; + CHECK_BODY("malloc-aligned7") { + void* p = mi_malloc_aligned(1024,MI_BLOCK_ALIGNMENT_MAX); + mi_free(p); + result = ((uintptr_t)p % MI_BLOCK_ALIGNMENT_MAX) == 0; + }; + CHECK_BODY("malloc-aligned8") { + bool ok = true; + for (int i = 0; i < 5 && ok; i++) { + int n = (1 << i); + void* p = mi_malloc_aligned(1024, n * MI_BLOCK_ALIGNMENT_MAX); + ok = ((uintptr_t)p % (n*MI_BLOCK_ALIGNMENT_MAX)) == 0; + mi_free(p); + } + result = ok; + }; + CHECK_BODY("malloc-aligned9") { + bool ok = true; + void* p[8]; + size_t sizes[8] = { 8, 512, 1024 * 1024, MI_BLOCK_ALIGNMENT_MAX, MI_BLOCK_ALIGNMENT_MAX + 1, 2 * MI_BLOCK_ALIGNMENT_MAX, 8 * MI_BLOCK_ALIGNMENT_MAX, 0 }; + for (int i = 0; i < 28 && ok; i++) { + int align = (1 << i); + for (int j = 0; j < 8 && ok; j++) { + p[j] = mi_zalloc_aligned(sizes[j], align); + ok = ((uintptr_t)p[j] % align) == 0; + } + for (int j = 0; j < 8; j++) { + mi_free(p[j]); + } + } + result = ok; + }; + CHECK_BODY("malloc-aligned10") { + bool ok = true; + void* p[10+1]; + int align; + int j; + for(j = 0, align = 1; j <= 10 && ok; align *= 2, j++ ) { + p[j] = mi_malloc_aligned(43 + align, align); + ok = ((uintptr_t)p[j] % align) == 0; + } + for ( ; j > 0; j--) { + mi_free(p[j-1]); + } + result = ok; + } + CHECK_BODY("malloc_aligned11") { + mi_heap_t* heap = mi_heap_new(); + void* p = mi_heap_malloc_aligned(heap, 33554426, 8); + result = mi_heap_contains_block(heap, p); + mi_heap_destroy(heap); + } + CHECK_BODY("mimalloc-aligned12") { + void* p = mi_malloc_aligned(0x100, 0x100); + result = (((uintptr_t)p % 0x100) == 0); // #602 + mi_free(p); + } + CHECK_BODY("mimalloc-aligned13") { + bool ok = true; + for( size_t size = 1; size <= (MI_SMALL_SIZE_MAX * 2) && ok; size++ ) { + for(size_t align = 1; align <= size && ok; align *= 2 ) { + void* p[10]; + for(int i = 0; i < 10 && ok; i++) { + p[i] = mi_malloc_aligned(size,align);; + ok = (p[i] != NULL && ((uintptr_t)(p[i]) % align) == 0); + } + for(int i = 0; i < 10 && ok; i++) { + mi_free(p[i]); + } + /* + if (ok && align <= size && ((size + MI_PADDING_SIZE) & (align-1)) == 0) { + size_t bsize = mi_good_size(size); + ok = (align <= bsize && (bsize & (align-1)) == 0); + } + */ + } + } + result = ok; + } + CHECK_BODY("malloc-aligned-at1") { + void* p = mi_malloc_aligned_at(48,32,0); result = (p != NULL && ((uintptr_t)(p) + 0) % 32 == 0); mi_free(p); + }; + CHECK_BODY("malloc-aligned-at2") { + void* p = mi_malloc_aligned_at(50,32,8); result = (p != NULL && ((uintptr_t)(p) + 8) % 32 == 0); mi_free(p); + }; + CHECK_BODY("memalign1") { + void* p; + bool ok = true; + for (int i = 0; i < 8 && ok; i++) { + p = mi_memalign(16,8); + ok = (p != NULL && (uintptr_t)(p) % 16 == 0); mi_free(p); + } + result = ok; + }; + CHECK_BODY("zalloc-aligned-small1") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = mem_is_zero(p, zalloc_size); + mi_free(p); + }; + CHECK_BODY("rezalloc_aligned-small1") { + size_t zalloc_size = MI_SMALL_SIZE_MAX / 2; + uint8_t* p = (uint8_t*)mi_zalloc_aligned(zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = mem_is_zero(p, zalloc_size); + zalloc_size *= 3; + p = (uint8_t*)mi_rezalloc_aligned(p, zalloc_size, MI_MAX_ALIGN_SIZE * 2); + result = result && mem_is_zero(p, zalloc_size); + mi_free(p); + }; + + // --------------------------------------------------- + // Reallocation + // --------------------------------------------------- + CHECK_BODY("realloc-null") { + void* p = mi_realloc(NULL,4); + result = (p != NULL); + mi_free(p); + }; + + CHECK_BODY("realloc-null-sizezero") { + void* p = mi_realloc(NULL,0); // "If ptr is NULL, the behavior is the same as calling malloc(new_size)." + result = (p != NULL); + mi_free(p); + }; + + CHECK_BODY("realloc-sizezero") { + void* p = mi_malloc(4); + void* q = mi_realloc(p, 0); + result = (q != NULL); + mi_free(q); + }; + + CHECK_BODY("reallocarray-null-sizezero") { + void* p = mi_reallocarray(NULL,0,16); // issue #574 + result = (p != NULL && errno == 0); + mi_free(p); + }; + + // --------------------------------------------------- + // Heaps + // --------------------------------------------------- + CHECK("heap_destroy", test_heap1()); + CHECK("heap_delete", test_heap2()); + + //mi_stats_print(NULL); + + // --------------------------------------------------- + // various + // --------------------------------------------------- + #if !defined(MI_TRACK_ASAN) // realpath may leak with ASAN enabled (as the ASAN allocator intercepts it) + CHECK_BODY("realpath") { + char* s = mi_realpath( ".", NULL ); + // printf("realpath: %s\n",s); + mi_free(s); + }; + #endif + + CHECK("stl_allocator1", test_stl_allocator1()); + CHECK("stl_allocator2", test_stl_allocator2()); + + CHECK("stl_heap_allocator1", test_stl_heap_allocator1()); + CHECK("stl_heap_allocator2", test_stl_heap_allocator2()); + CHECK("stl_heap_allocator3", test_stl_heap_allocator3()); + CHECK("stl_heap_allocator4", test_stl_heap_allocator4()); + + // --------------------------------------------------- + // Done + // ---------------------------------------------------[] + return print_test_summary(); +} + +// --------------------------------------------------- +// Larger test functions +// --------------------------------------------------- + +bool test_heap1(void) { + mi_heap_t* heap = mi_heap_new(); + int* p1 = mi_heap_malloc_tp(heap,int); + int* p2 = mi_heap_malloc_tp(heap,int); + *p1 = *p2 = 43; + mi_heap_destroy(heap); + return true; +} + +bool test_heap2(void) { + mi_heap_t* heap = mi_heap_new(); + int* p1 = mi_heap_malloc_tp(heap,int); + int* p2 = mi_heap_malloc_tp(heap,int); + mi_heap_delete(heap); + *p1 = 42; + mi_free(p1); + mi_free(p2); + return true; +} + +bool test_stl_allocator1(void) { +#ifdef __cplusplus + std::vector > vec; + vec.push_back(1); + vec.pop_back(); + return vec.size() == 0; +#else + return true; +#endif +} + +struct some_struct { int i; int j; double z; }; + +bool test_stl_allocator2(void) { +#ifdef __cplusplus + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +#else + return true; +#endif +} + +bool test_stl_heap_allocator1(void) { +#ifdef __cplusplus + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +#else + return true; +#endif +} + +bool test_stl_heap_allocator2(void) { +#ifdef __cplusplus + std::vector > vec; + vec.push_back(some_struct()); + vec.pop_back(); + return vec.size() == 0; +#else + return true; +#endif +} + +bool test_stl_heap_allocator3(void) { +#ifdef __cplusplus + mi_heap_t* heap = mi_heap_new(); + bool good = false; + { + mi_heap_stl_allocator myAlloc(heap); + std::vector > vec(myAlloc); + vec.push_back(some_struct()); + vec.pop_back(); + good = vec.size() == 0; + } + mi_heap_delete(heap); + return good; +#else + return true; +#endif +} + +bool test_stl_heap_allocator4(void) { +#ifdef __cplusplus + mi_heap_t* heap = mi_heap_new(); + bool good = false; + { + mi_heap_destroy_stl_allocator myAlloc(heap); + std::vector > vec(myAlloc); + vec.push_back(some_struct()); + vec.pop_back(); + good = vec.size() == 0; + } + mi_heap_destroy(heap); + return good; +#else + return true; +#endif +} diff --git a/ww/managers/mimalloc/test/test-stress.c b/ww/managers/mimalloc/test/test-stress.c new file mode 100644 index 00000000..15d0e25b --- /dev/null +++ b/ww/managers/mimalloc/test/test-stress.c @@ -0,0 +1,364 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020 Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. +-----------------------------------------------------------------------------*/ + +/* This is a stress test for the allocator, using multiple threads and + transferring objects between threads. It tries to reflect real-world workloads: + - allocation size is distributed linearly in powers of two + - with some fraction extra large (and some very large) + - the allocations are initialized and read again at free + - pointers transfer between threads + - threads are terminated and recreated with some objects surviving in between + - uses deterministic "randomness", but execution can still depend on + (random) thread scheduling. Do not use this test as a benchmark! +*/ + +#include +#include +#include +#include +#include +#include + +// > mimalloc-test-stress [THREADS] [SCALE] [ITER] +// +// argument defaults +static int THREADS = 32; // more repeatable if THREADS <= #processors +static int SCALE = 25; // scaling factor + +#if defined(MI_TSAN) +static int ITER = 10; // N full iterations destructing and re-creating all threads (on tsan reduce for azure pipeline limits) +#else +static int ITER = 50; // N full iterations destructing and re-creating all threads +#endif + +// static int THREADS = 8; // more repeatable if THREADS <= #processors +// static int SCALE = 100; // scaling factor + +#define STRESS // undefine for leak test + +static bool allow_large_objects = true; // allow very large objects? (set to `true` if SCALE>100) +static size_t use_one_size = 0; // use single object size of `N * sizeof(uintptr_t)`? + +static bool main_participates = false; // main thread participates as a worker too + +// #define USE_STD_MALLOC +#ifdef USE_STD_MALLOC +#define custom_calloc(n,s) calloc(n,s) +#define custom_realloc(p,s) realloc(p,s) +#define custom_free(p) free(p) +#else +#include +#define custom_calloc(n,s) mi_calloc(n,s) +#define custom_realloc(p,s) mi_realloc(p,s) +#define custom_free(p) mi_free(p) +#endif + +// transfer pointer between threads +#define TRANSFERS (1000) +static volatile void* transfer[TRANSFERS]; + + +#if (UINTPTR_MAX != UINT32_MAX) +const uintptr_t cookie = 0xbf58476d1ce4e5b9UL; +#else +const uintptr_t cookie = 0x1ce4e5b9UL; +#endif + +static void* atomic_exchange_ptr(volatile void** p, void* newval); + +typedef uintptr_t* random_t; + +static uintptr_t pick(random_t r) { + uintptr_t x = *r; +#if (UINTPTR_MAX > UINT32_MAX) + // by Sebastiano Vigna, see: + x ^= x >> 30; + x *= 0xbf58476d1ce4e5b9UL; + x ^= x >> 27; + x *= 0x94d049bb133111ebUL; + x ^= x >> 31; +#else + // by Chris Wellons, see: + x ^= x >> 16; + x *= 0x7feb352dUL; + x ^= x >> 15; + x *= 0x846ca68bUL; + x ^= x >> 16; +#endif + *r = x; + return x; +} + +static bool chance(size_t perc, random_t r) { + return (pick(r) % 100 <= perc); +} + +static void* alloc_items(size_t items, random_t r) { + if (chance(1, r)) { + if (chance(1, r) && allow_large_objects) items *= 10000; // 0.01% giant + else if (chance(10, r) && allow_large_objects) items *= 1000; // 0.1% huge + else items *= 100; // 1% large objects; + } + if (items == 40) items++; // pthreads uses that size for stack increases + if (use_one_size > 0) items = (use_one_size / sizeof(uintptr_t)); + if (items==0) items = 1; + uintptr_t* p = (uintptr_t*)custom_calloc(items,sizeof(uintptr_t)); + if (p != NULL) { + for (uintptr_t i = 0; i < items; i++) { + assert(p[i] == 0); + p[i] = (items - i) ^ cookie; + } + } + return p; +} + +static void free_items(void* p) { + if (p != NULL) { + uintptr_t* q = (uintptr_t*)p; + uintptr_t items = (q[0] ^ cookie); + for (uintptr_t i = 0; i < items; i++) { + if ((q[i] ^ cookie) != items - i) { + fprintf(stderr, "memory corruption at block %p at %zu\n", p, i); + abort(); + } + } + } + custom_free(p); +} + + +static void stress(intptr_t tid) { + //bench_start_thread(); + uintptr_t r = ((tid + 1) * 43); // rand(); + const size_t max_item_shift = 5; // 128 + const size_t max_item_retained_shift = max_item_shift + 2; + size_t allocs = 100 * ((size_t)SCALE) * (tid % 8 + 1); // some threads do more + size_t retain = allocs / 2; + void** data = NULL; + size_t data_size = 0; + size_t data_top = 0; + void** retained = (void**)custom_calloc(retain,sizeof(void*)); + size_t retain_top = 0; + + while (allocs > 0 || retain > 0) { + if (retain == 0 || (chance(50, &r) && allocs > 0)) { + // 50%+ alloc + allocs--; + if (data_top >= data_size) { + data_size += 100000; + data = (void**)custom_realloc(data, data_size * sizeof(void*)); + } + data[data_top++] = alloc_items(1ULL << (pick(&r) % max_item_shift), &r); + } + else { + // 25% retain + retained[retain_top++] = alloc_items( 1ULL << (pick(&r) % max_item_retained_shift), &r); + retain--; + } + if (chance(66, &r) && data_top > 0) { + // 66% free previous alloc + size_t idx = pick(&r) % data_top; + free_items(data[idx]); + data[idx] = NULL; + } + if (chance(25, &r) && data_top > 0) { + // 25% exchange a local pointer with the (shared) transfer buffer. + size_t data_idx = pick(&r) % data_top; + size_t transfer_idx = pick(&r) % TRANSFERS; + void* p = data[data_idx]; + void* q = atomic_exchange_ptr(&transfer[transfer_idx], p); + data[data_idx] = q; + } + } + // free everything that is left + for (size_t i = 0; i < retain_top; i++) { + free_items(retained[i]); + } + for (size_t i = 0; i < data_top; i++) { + free_items(data[i]); + } + custom_free(retained); + custom_free(data); + //bench_end_thread(); +} + +static void run_os_threads(size_t nthreads, void (*entry)(intptr_t tid)); + +static void test_stress(void) { + uintptr_t r = rand(); + for (int n = 0; n < ITER; n++) { + run_os_threads(THREADS, &stress); + for (int i = 0; i < TRANSFERS; i++) { + if (chance(50, &r) || n + 1 == ITER) { // free all on last run, otherwise free half of the transfers + void* p = atomic_exchange_ptr(&transfer[i], NULL); + free_items(p); + } + } + #ifndef NDEBUG + //mi_collect(false); + //mi_debug_show_arenas(); + #endif + #if !defined(NDEBUG) || defined(MI_TSAN) + if ((n + 1) % 10 == 0) { printf("- iterations left: %3d\n", ITER - (n + 1)); } + #endif + } +} + +#ifndef STRESS +static void leak(intptr_t tid) { + uintptr_t r = rand(); + void* p = alloc_items(1 /*pick(&r)%128*/, &r); + if (chance(50, &r)) { + intptr_t i = (pick(&r) % TRANSFERS); + void* q = atomic_exchange_ptr(&transfer[i], p); + free_items(q); + } +} + +static void test_leak(void) { + for (int n = 0; n < ITER; n++) { + run_os_threads(THREADS, &leak); + mi_collect(false); +#ifndef NDEBUG + if ((n + 1) % 10 == 0) { printf("- iterations left: %3d\n", ITER - (n + 1)); } +#endif + } +} +#endif + +int main(int argc, char** argv) { + #ifndef USE_STD_MALLOC + mi_stats_reset(); + #endif + + // > mimalloc-test-stress [THREADS] [SCALE] [ITER] + if (argc >= 2) { + char* end; + long n = strtol(argv[1], &end, 10); + if (n > 0) THREADS = n; + } + if (argc >= 3) { + char* end; + long n = (strtol(argv[2], &end, 10)); + if (n > 0) SCALE = n; + } + if (argc >= 4) { + char* end; + long n = (strtol(argv[3], &end, 10)); + if (n > 0) ITER = n; + } + if (SCALE > 100) { + allow_large_objects = true; + } + printf("Using %d threads with a %d%% load-per-thread and %d iterations %s\n", THREADS, SCALE, ITER, (allow_large_objects ? "(allow large objects)" : "")); + //mi_reserve_os_memory(1024*1024*1024ULL, false, true); + //int res = mi_reserve_huge_os_pages(4,1); + //printf("(reserve huge: %i\n)", res); + + //bench_start_program(); + + // Run ITER full iterations where half the objects in the transfer buffer survive to the next round. + srand(0x7feb352d); + + //mi_reserve_os_memory(512ULL << 20, true, true); + +#if !defined(NDEBUG) && !defined(USE_STD_MALLOC) + mi_stats_reset(); +#endif + +#ifdef STRESS + test_stress(); +#else + test_leak(); +#endif + +#ifndef USE_STD_MALLOC + #ifndef NDEBUG + // mi_collect(true); + mi_debug_show_arenas(true,true,true); + #endif + mi_stats_print(NULL); +#endif + //bench_end_program(); + return 0; +} + + +static void (*thread_entry_fun)(intptr_t) = &stress; + +#ifdef _WIN32 + +#include + +static DWORD WINAPI thread_entry(LPVOID param) { + thread_entry_fun((intptr_t)param); + return 0; +} + +static void run_os_threads(size_t nthreads, void (*fun)(intptr_t)) { + thread_entry_fun = fun; + DWORD* tids = (DWORD*)custom_calloc(nthreads,sizeof(DWORD)); + HANDLE* thandles = (HANDLE*)custom_calloc(nthreads,sizeof(HANDLE)); + const size_t start = (main_participates ? 1 : 0); + for (size_t i = start; i < nthreads; i++) { + thandles[i] = CreateThread(0, 8*1024, &thread_entry, (void*)(i), 0, &tids[i]); + } + if (main_participates) fun(0); // run the main thread as well + for (size_t i = start; i < nthreads; i++) { + WaitForSingleObject(thandles[i], INFINITE); + } + for (size_t i = start; i < nthreads; i++) { + CloseHandle(thandles[i]); + } + custom_free(tids); + custom_free(thandles); +} + +static void* atomic_exchange_ptr(volatile void** p, void* newval) { +#if (INTPTR_MAX == INT32_MAX) + return (void*)InterlockedExchange((volatile LONG*)p, (LONG)newval); +#else + return (void*)InterlockedExchange64((volatile LONG64*)p, (LONG64)newval); +#endif +} +#else + +#include + +static void* thread_entry(void* param) { + thread_entry_fun((uintptr_t)param); + return NULL; +} + +static void run_os_threads(size_t nthreads, void (*fun)(intptr_t)) { + thread_entry_fun = fun; + pthread_t* threads = (pthread_t*)custom_calloc(nthreads,sizeof(pthread_t)); + memset(threads, 0, sizeof(pthread_t) * nthreads); + const size_t start = (main_participates ? 1 : 0); + //pthread_setconcurrency(nthreads); + for (size_t i = start; i < nthreads; i++) { + pthread_create(&threads[i], NULL, &thread_entry, (void*)i); + } + if (main_participates) fun(0); // run the main thread as well + for (size_t i = start; i < nthreads; i++) { + pthread_join(threads[i], NULL); + } + custom_free(threads); +} + +#ifdef __cplusplus +#include +static void* atomic_exchange_ptr(volatile void** p, void* newval) { + return std::atomic_exchange((volatile std::atomic*)p, newval); +} +#else +#include +static void* atomic_exchange_ptr(volatile void** p, void* newval) { + return atomic_exchange((volatile _Atomic(void*)*)p, newval); +} +#endif + +#endif diff --git a/ww/managers/mimalloc/test/test-wrong.c b/ww/managers/mimalloc/test/test-wrong.c new file mode 100644 index 00000000..56a2339a --- /dev/null +++ b/ww/managers/mimalloc/test/test-wrong.c @@ -0,0 +1,92 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ + +/* test file for valgrind/asan support. + + VALGRIND: + ---------- + Compile in an "out/debug" folder: + + > cd out/debug + > cmake ../.. -DMI_TRACK_VALGRIND=1 + > make -j8 + + and then compile this file as: + + > gcc -g -o test-wrong -I../../include ../../test/test-wrong.c libmimalloc-valgrind-debug.a -lpthread + + and test as: + + > valgrind ./test-wrong + + + ASAN + ---------- + Compile in an "out/debug" folder: + + > cd out/debug + > cmake ../.. -DMI_TRACK_ASAN=1 + > make -j8 + + and then compile this file as: + + > clang -g -o test-wrong -I../../include ../../test/test-wrong.c libmimalloc-asan-debug.a -lpthread -fsanitize=address -fsanitize-recover=address + + and test as: + + > ASAN_OPTIONS=verbosity=1:halt_on_error=0 ./test-wrong + + +*/ +#include +#include +#include "mimalloc.h" + +#ifdef USE_STD_MALLOC +# define mi(x) x +#else +# define mi(x) mi_##x +#endif + +int main(int argc, char** argv) { + int* p = (int*)mi(malloc)(3*sizeof(int)); + + int* r = (int*)mi_malloc_aligned(8,16); + mi_free(r); + + // illegal byte wise read + char* c = (char*)mi(malloc)(3); + printf("invalid byte: over: %d, under: %d\n", c[4], c[-1]); + mi(free)(c); + + // undefined access + int* q = (int*)mi(malloc)(sizeof(int)); + printf("undefined: %d\n", *q); + + // illegal int read + printf("invalid: over: %d, under: %d\n", q[1], q[-1]); + + *q = 42; + + // buffer overflow + q[1] = 43; + + // buffer underflow + q[-1] = 44; + + mi(free)(q); + + // double free + mi(free)(q); + + // use after free + printf("use-after-free: %d\n", *q); + + // leak p + // mi_free(p) + return 0; +} \ No newline at end of file diff --git a/ww/managers/mimalloc/test/testhelper.h b/ww/managers/mimalloc/test/testhelper.h new file mode 100644 index 00000000..a9727584 --- /dev/null +++ b/ww/managers/mimalloc/test/testhelper.h @@ -0,0 +1,49 @@ +/* ---------------------------------------------------------------------------- +Copyright (c) 2018-2020, Microsoft Research, Daan Leijen +This is free software; you can redistribute it and/or modify it under the +terms of the MIT license. A copy of the license can be found in the file +"LICENSE" at the root of this distribution. +-----------------------------------------------------------------------------*/ +#ifndef TESTHELPER_H_ +#define TESTHELPER_H_ + +#include +#include +#include + +// --------------------------------------------------------------------------- +// Test macros: CHECK(name,predicate) and CHECK_BODY(name,body) +// --------------------------------------------------------------------------- +static int ok = 0; +static int failed = 0; + +static bool check_result(bool result, const char* testname, const char* fname, long lineno) { + if (!(result)) { + failed++; + fprintf(stderr,"\n FAILED: %s: %s:%ld\n", testname, fname, lineno); + /* exit(1); */ + } + else { + ok++; + fprintf(stderr, "ok.\n"); + } + return true; +} + +#define CHECK_BODY(name) \ + fprintf(stderr,"test: %s... ", name ); \ + errno = 0; \ + for(bool done = false, result = true; !done; done = check_result(result,name,__FILE__,__LINE__)) + +#define CHECK(name,expr) CHECK_BODY(name){ result = (expr); } + +// Print summary of test. Return value can be directly use as a return value for main(). +static inline int print_test_summary(void) +{ + fprintf(stderr,"\n\n---------------------------------------------\n" + "succeeded: %i\n" + "failed : %i\n\n", ok, failed); + return failed; +} + +#endif // TESTHELPER_H_