From d1cb3311cd8a305896d72c2f1f0dff5495d4da29 Mon Sep 17 00:00:00 2001 From: wf9a5m75 Date: Thu, 25 Oct 2018 14:48:39 -0700 Subject: [PATCH] Bump up to v1.1.0 (redis-5.0.0) --- .idea/misc.xml | 8 +- CHANGELOGS.md | 67 +- README.md | 15 +- build.gradle | 2 +- gradle/wrapper/gradle-wrapper.properties | 4 +- redis-android/build.gradle | 6 +- .../gradle/wrapper/gradle-wrapper.properties | 2 +- .../java/io/wf9a5m75/redis/RedisAndroid.java | 20 +- redis-android/src/main/jni/Application.mk | 2 +- .../deps => }/hiredis/.gitignore | 0 .../deps => }/hiredis/.travis.yml | 0 .../deps => }/hiredis/Android.mk | 0 .../deps => }/hiredis/CHANGELOG.md | 0 .../{redis-4.0.11/deps => }/hiredis/COPYING | 0 .../{redis-4.0.11/deps => }/hiredis/Makefile | 0 .../{redis-4.0.11/deps => }/hiredis/README.md | 0 .../deps => }/hiredis/adapters/ae.h | 0 .../deps => }/hiredis/adapters/glib.h | 0 .../deps => }/hiredis/adapters/ivykis.h | 0 .../deps => }/hiredis/adapters/libev.h | 0 .../deps => }/hiredis/adapters/libevent.h | 0 .../deps => }/hiredis/adapters/libuv.h | 0 .../deps => }/hiredis/adapters/macosx.h | 0 .../deps => }/hiredis/adapters/qt.h | 0 .../deps => }/hiredis/appveyor.yml | 0 .../{redis-4.0.11/deps => }/hiredis/async.c | 0 .../{redis-4.0.11/deps => }/hiredis/async.h | 0 .../{redis-4.0.11/deps => }/hiredis/dict.c | 0 .../{redis-4.0.11/deps => }/hiredis/dict.h | 0 .../deps => }/hiredis/examples/example-ae.c | 0 .../deps => }/hiredis/examples/example-glib.c | 0 .../hiredis/examples/example-ivykis.c | 0 .../hiredis/examples/example-libev.c | 0 .../hiredis/examples/example-libevent.c | 0 .../hiredis/examples/example-libuv.c | 0 .../hiredis/examples/example-macosx.c | 0 .../deps => }/hiredis/examples/example-qt.cpp | 0 .../deps => }/hiredis/examples/example-qt.h | 0 .../deps => }/hiredis/examples/example.c | 0 .../{redis-4.0.11/deps => }/hiredis/fmacros.h | 0 .../{redis-4.0.11/deps => }/hiredis/hiredis.c | 0 .../{redis-4.0.11/deps => }/hiredis/hiredis.h | 0 .../jni/{redis-4.0.11/deps => }/hiredis/net.c | 0 .../jni/{redis-4.0.11/deps => }/hiredis/net.h | 0 .../{redis-4.0.11/deps => }/hiredis/read.c | 0 .../{redis-4.0.11/deps => }/hiredis/read.h | 0 .../jni/{redis-4.0.11/deps => }/hiredis/sds.c | 0 .../jni/{redis-4.0.11/deps => }/hiredis/sds.h | 0 .../deps => }/hiredis/sdsalloc.h | 0 .../{redis-4.0.11/deps => }/hiredis/test.c | 0 .../{redis-4.0.11/deps => }/hiredis/win32.h | 0 .../{redis-4.0.11/deps => }/lua/Android.mk | 0 .../jni/{redis-4.0.11/deps => }/lua/COPYRIGHT | 0 .../jni/{redis-4.0.11/deps => }/lua/HISTORY | 0 .../jni/{redis-4.0.11/deps => }/lua/INSTALL | 0 .../jni/{redis-4.0.11/deps => }/lua/Makefile | 0 .../jni/{redis-4.0.11/deps => }/lua/README | 0 redis-android/src/main/jni/lua/_Android.mk | 33 + .../deps => }/lua/doc/contents.html | 0 .../{redis-4.0.11/deps => }/lua/doc/cover.png | Bin .../{redis-4.0.11/deps => }/lua/doc/logo.gif | Bin .../jni/{redis-4.0.11/deps => }/lua/doc/lua.1 | 0 .../{redis-4.0.11/deps => }/lua/doc/lua.css | 0 .../{redis-4.0.11/deps => }/lua/doc/lua.html | 0 .../{redis-4.0.11/deps => }/lua/doc/luac.1 | 0 .../{redis-4.0.11/deps => }/lua/doc/luac.html | 0 .../deps => }/lua/doc/manual.css | 0 .../deps => }/lua/doc/manual.html | 0 .../deps => }/lua/doc/readme.html | 0 .../{redis-4.0.11/deps => }/lua/etc/Makefile | 0 .../{redis-4.0.11/deps => }/lua/etc/README | 0 .../jni/{redis-4.0.11/deps => }/lua/etc/all.c | 0 .../{redis-4.0.11/deps => }/lua/etc/lua.hpp | 0 .../{redis-4.0.11/deps => }/lua/etc/lua.ico | Bin .../{redis-4.0.11/deps => }/lua/etc/lua.pc | 0 .../{redis-4.0.11/deps => }/lua/etc/luavs.bat | 0 .../jni/{redis-4.0.11/deps => }/lua/etc/min.c | 0 .../deps => }/lua/etc/noparser.c | 0 .../deps => }/lua/etc/strict.lua | 0 .../{redis-4.0.11/deps => }/lua/src/Makefile | 0 .../{redis-4.0.11/deps => }/lua/src/fpconv.c | 0 .../{redis-4.0.11/deps => }/lua/src/fpconv.h | 0 .../{redis-4.0.11/deps => }/lua/src/lapi.c | 0 .../{redis-4.0.11/deps => }/lua/src/lapi.h | 0 .../{redis-4.0.11/deps => }/lua/src/lauxlib.c | 0 .../{redis-4.0.11/deps => }/lua/src/lauxlib.h | 0 .../deps => }/lua/src/lbaselib.c | 0 .../{redis-4.0.11/deps => }/lua/src/lcode.c | 0 .../{redis-4.0.11/deps => }/lua/src/lcode.h | 0 .../{redis-4.0.11/deps => }/lua/src/ldblib.c | 0 .../{redis-4.0.11/deps => }/lua/src/ldebug.c | 0 .../{redis-4.0.11/deps => }/lua/src/ldebug.h | 0 .../jni/{redis-4.0.11/deps => }/lua/src/ldo.c | 0 .../jni/{redis-4.0.11/deps => }/lua/src/ldo.h | 0 .../{redis-4.0.11/deps => }/lua/src/ldump.c | 0 .../{redis-4.0.11/deps => }/lua/src/lfunc.c | 0 .../{redis-4.0.11/deps => }/lua/src/lfunc.h | 0 .../jni/{redis-4.0.11/deps => }/lua/src/lgc.c | 0 .../jni/{redis-4.0.11/deps => }/lua/src/lgc.h | 0 .../{redis-4.0.11/deps => }/lua/src/linit.c | 0 .../{redis-4.0.11/deps => }/lua/src/liolib.c | 0 .../{redis-4.0.11/deps => }/lua/src/llex.c | 0 .../{redis-4.0.11/deps => }/lua/src/llex.h | 0 .../{redis-4.0.11/deps => }/lua/src/llimits.h | 0 .../deps => }/lua/src/lmathlib.c | 0 .../{redis-4.0.11/deps => }/lua/src/lmem.c | 0 .../{redis-4.0.11/deps => }/lua/src/lmem.h | 0 .../{redis-4.0.11/deps => }/lua/src/loadlib.c | 0 .../{redis-4.0.11/deps => }/lua/src/lobject.c | 0 .../{redis-4.0.11/deps => }/lua/src/lobject.h | 0 .../deps => }/lua/src/lopcodes.c | 0 .../deps => }/lua/src/lopcodes.h | 0 .../{redis-4.0.11/deps => }/lua/src/loslib.c | 0 .../{redis-4.0.11/deps => }/lua/src/lparser.c | 0 .../{redis-4.0.11/deps => }/lua/src/lparser.h | 0 .../{redis-4.0.11/deps => }/lua/src/lstate.c | 0 .../{redis-4.0.11/deps => }/lua/src/lstate.h | 0 .../{redis-4.0.11/deps => }/lua/src/lstring.c | 0 .../{redis-4.0.11/deps => }/lua/src/lstring.h | 0 .../{redis-4.0.11/deps => }/lua/src/lstrlib.c | 0 .../{redis-4.0.11/deps => }/lua/src/ltable.c | 0 .../{redis-4.0.11/deps => }/lua/src/ltable.h | 0 .../{redis-4.0.11/deps => }/lua/src/ltablib.c | 0 .../jni/{redis-4.0.11/deps => }/lua/src/ltm.c | 0 .../jni/{redis-4.0.11/deps => }/lua/src/ltm.h | 0 .../jni/{redis-4.0.11/deps => }/lua/src/lua.c | 0 .../jni/{redis-4.0.11/deps => }/lua/src/lua.h | 0 .../{redis-4.0.11/deps => }/lua/src/lua_bit.c | 0 .../deps => }/lua/src/lua_cjson.c | 2 +- .../deps => }/lua/src/lua_cmsgpack.c | 0 .../deps => }/lua/src/lua_struct.c | 0 .../{redis-4.0.11/deps => }/lua/src/luac.c | 0 .../{redis-4.0.11/deps => }/lua/src/luaconf.h | 0 .../{redis-4.0.11/deps => }/lua/src/lualib.h | 0 .../{redis-4.0.11/deps => }/lua/src/lundump.c | 0 .../{redis-4.0.11/deps => }/lua/src/lundump.h | 0 .../jni/{redis-4.0.11/deps => }/lua/src/lvm.c | 0 .../jni/{redis-4.0.11/deps => }/lua/src/lvm.h | 0 .../{redis-4.0.11/deps => }/lua/src/lzio.c | 0 .../{redis-4.0.11/deps => }/lua/src/lzio.h | 0 .../{redis-4.0.11/deps => }/lua/src/print.c | 0 .../{redis-4.0.11/deps => }/lua/src/strbuf.c | 0 .../{redis-4.0.11/deps => }/lua/src/strbuf.h | 0 .../{redis-4.0.11/deps => }/lua/test/README | 0 .../deps => }/lua/test/bisect.lua | 0 .../{redis-4.0.11/deps => }/lua/test/cf.lua | 0 .../{redis-4.0.11/deps => }/lua/test/echo.lua | 0 .../{redis-4.0.11/deps => }/lua/test/env.lua | 0 .../deps => }/lua/test/factorial.lua | 0 .../{redis-4.0.11/deps => }/lua/test/fib.lua | 0 .../deps => }/lua/test/fibfor.lua | 0 .../deps => }/lua/test/globals.lua | 0 .../deps => }/lua/test/hello.lua | 0 .../{redis-4.0.11/deps => }/lua/test/life.lua | 0 .../{redis-4.0.11/deps => }/lua/test/luac.lua | 0 .../deps => }/lua/test/printf.lua | 0 .../deps => }/lua/test/readonly.lua | 0 .../deps => }/lua/test/sieve.lua | 0 .../{redis-4.0.11/deps => }/lua/test/sort.lua | 0 .../deps => }/lua/test/table.lua | 0 .../deps => }/lua/test/trace-calls.lua | 0 .../deps => }/lua/test/trace-globals.lua | 0 .../{redis-4.0.11/deps => }/lua/test/xd.lua | 0 .../src/main/jni/redis-4.0.11/00-RELEASENOTES | 4752 ------------ .../deps/jemalloc/doc/stylesheet.xsl | 7 - .../include/jemalloc/internal/atomic.h | 651 -- .../include/jemalloc/internal/bitmap.h | 230 - .../jemalloc/include/jemalloc/internal/ckh.h | 88 - .../jemalloc/include/jemalloc/internal/ctl.h | 111 - .../internal/jemalloc_internal_macros.h | 57 - .../include/jemalloc/internal/mutex.h | 111 - .../include/jemalloc/internal/pages.h | 26 - .../jemalloc/internal/private_namespace.sh | 5 - .../jemalloc/include/jemalloc/internal/prng.h | 60 - .../include/jemalloc/internal/rtree.h | 294 - .../include/jemalloc/internal/stats.h | 183 - .../jemalloc/include/jemalloc/internal/tsd.h | 665 -- .../jemalloc/include/jemalloc/internal/util.h | 314 - .../include/jemalloc/jemalloc_typedefs.h.in | 57 - .../jemalloc/include/msvc_compat/strings.h | 29 - .../include/msvc_compat/windows_extra.h | 26 - .../redis-4.0.11/deps/jemalloc/src/arena.c | 3318 -------- .../jni/redis-4.0.11/deps/jemalloc/src/base.c | 174 - .../redis-4.0.11/deps/jemalloc/src/bitmap.c | 78 - .../jni/redis-4.0.11/deps/jemalloc/src/ctl.c | 2123 ------ .../redis-4.0.11/deps/jemalloc/src/extent.c | 53 - .../jni/redis-4.0.11/deps/jemalloc/src/hash.c | 2 - .../redis-4.0.11/deps/jemalloc/src/jemalloc.c | 2625 ------- .../redis-4.0.11/deps/jemalloc/src/mutex.c | 153 - .../redis-4.0.11/deps/jemalloc/src/pages.c | 173 - .../redis-4.0.11/deps/jemalloc/src/rtree.c | 127 - .../redis-4.0.11/deps/jemalloc/src/stats.c | 640 -- .../redis-4.0.11/deps/jemalloc/src/tcache.c | 537 -- .../jni/redis-4.0.11/deps/jemalloc/src/tsd.c | 193 - .../jni/redis-4.0.11/deps/jemalloc/src/zone.c | 274 - .../deps/jemalloc/test/include/test/test.h | 329 - .../deps/jemalloc/test/include/test/timer.h | 26 - .../deps/jemalloc/test/integration/mallocx.c | 182 - .../test/integration/thread_tcache_enabled.c | 113 - .../deps/jemalloc/test/src/btalloc.c | 8 - .../deps/jemalloc/test/src/test.c | 107 - .../deps/jemalloc/test/src/timer.c | 85 - .../deps/jemalloc/test/test.sh.in | 53 - .../deps/jemalloc/test/unit/atomic.c | 122 - .../deps/jemalloc/test/unit/bitmap.c | 159 - .../deps/jemalloc/test/unit/junk.c | 254 - .../deps/jemalloc/test/unit/junk_alloc.c | 3 - .../deps/jemalloc/test/unit/junk_free.c | 3 - .../deps/jemalloc/test/unit/mallctl.c | 633 -- .../deps/jemalloc/test/unit/rtree.c | 151 - .../deps/jemalloc/test/unit/size_classes.c | 89 - .../deps/jemalloc/test/unit/stats.c | 447 -- .../deps/jemalloc/test/unit/tsd.c | 107 - .../redis-4.0.11/deps/linenoise/.gitignore | 3 - .../jni/redis-4.0.11/deps/linenoise/Makefile | 21 - .../deps/linenoise/README.markdown | 224 - .../jni/redis-4.0.11/deps/linenoise/example.c | 74 - .../redis-4.0.11/deps/linenoise/linenoise.c | 1199 --- .../redis-4.0.11/deps/linenoise/linenoise.h | 73 - .../src/main/jni/redis-4.0.11/src/blocked.c | 195 - .../src/main/jni/redis-4.0.11/src/defrag.c | 579 -- .../src/main/jni/redis-4.0.11/src/redis-cli.c | 2985 -------- .../main/jni/redis-4.0.11/src/redis-trib.rb | 1830 ----- .../src/main/jni/redis-4.0.11/src/version.h | 1 - .../tests/unit/introspection-2.tcl | 23 - .../redis-4.0.11/tests/unit/memefficiency.tcl | 85 - .../{redis-4.0.11 => redis-5.0.0}/.gitignore | 0 .../src/main/jni/redis-5.0.0/00-RELEASENOTES | 2150 ++++++ .../{redis-4.0.11 => redis-5.0.0}/Android.mk | 49 +- .../jni/{redis-4.0.11 => redis-5.0.0}/BUGS | 0 .../CONTRIBUTING | 6 +- .../jni/{redis-4.0.11 => redis-5.0.0}/COPYING | 0 .../jni/{redis-4.0.11 => redis-5.0.0}/INSTALL | 0 .../{redis-4.0.11 => redis-5.0.0}/MANIFESTO | 0 .../{redis-4.0.11 => redis-5.0.0}/Makefile | 0 .../{redis-4.0.11 => redis-5.0.0}/README.md | 20 +- .../deps/Android.mk | 0 .../deps/Makefile | 2 +- .../deps/README.md | 32 +- .../redis-5.0.0/deps/jemalloc/.appveyor.yml | 42 + .../deps/jemalloc/.autom4te.cfg | 0 .../deps/jemalloc/.gitattributes | 0 .../deps/jemalloc/.gitignore | 46 +- .../jni/redis-5.0.0/deps/jemalloc/.travis.yml | 156 + .../jni/redis-5.0.0/deps/jemalloc/Android.mk | 35 + .../deps/jemalloc/COPYING | 4 +- .../deps/jemalloc/ChangeLog | 596 +- .../deps/jemalloc/INSTALL | 0 .../jni/redis-5.0.0/deps/jemalloc/INSTALL.md | 423 ++ .../deps/jemalloc/Makefile.in | 329 +- .../deps/jemalloc/README | 14 +- .../jni/redis-5.0.0/deps/jemalloc/TUNING.md | 129 + .../deps/jemalloc/autogen.sh | 0 .../deps/jemalloc/bin/jemalloc-config.in | 4 + .../deps/jemalloc/bin/jemalloc.sh.in | 0 .../deps/jemalloc/bin/jeprof.in | 172 +- .../deps/jemalloc/build-aux/config.guess | 1462 ++++ .../deps/jemalloc/build-aux/config.sub | 1825 +++++ .../deps/jemalloc/build-aux}/install-sh | 0 .../deps/jemalloc/config.guess | 0 .../deps/jemalloc/config.stamp.in | 0 .../deps/jemalloc/config.sub | 0 .../deps/jemalloc/configure.ac | 1371 +++- .../deps/jemalloc/coverage.sh | 0 .../deps/jemalloc/doc/html.xsl.in | 1 + .../deps/jemalloc/doc/jemalloc.xml.in | 1899 +++-- .../deps/jemalloc/doc/manpages.xsl.in | 0 .../deps/jemalloc/doc/stylesheet.xsl | 10 + .../include/jemalloc/internal/arena.h | 0 .../include/jemalloc/internal/arena_externs.h | 94 + .../jemalloc/internal/arena_inlines_a.h | 57 + .../jemalloc/internal/arena_inlines_b.h | 354 + .../include/jemalloc/internal/arena_stats.h | 237 + .../jemalloc/internal/arena_structs_a.h | 11 + .../jemalloc/internal/arena_structs_b.h | 229 + .../include/jemalloc/internal/arena_types.h | 43 + .../include/jemalloc/internal/assert.h | 56 + .../include/jemalloc/internal/atomic.h | 77 + .../include/jemalloc/internal/atomic_c11.h | 97 + .../jemalloc/internal/atomic_gcc_atomic.h | 127 + .../jemalloc/internal/atomic_gcc_sync.h | 191 + .../include/jemalloc/internal/atomic_msvc.h | 158 + .../internal/background_thread_externs.h | 33 + .../internal/background_thread_inlines.h | 57 + .../internal/background_thread_structs.h | 53 + .../jemalloc/include/jemalloc/internal/base.h | 0 .../include/jemalloc/internal/base_externs.h | 22 + .../include/jemalloc/internal/base_inlines.h | 13 + .../include/jemalloc/internal/base_structs.h | 59 + .../include/jemalloc/internal/base_types.h | 33 + .../jemalloc/include/jemalloc/internal/bin.h | 106 + .../include/jemalloc/internal/bin_stats.h | 51 + .../include/jemalloc/internal/bit_util.h | 165 + .../include/jemalloc/internal/bitmap.h | 369 + .../include/jemalloc/internal/cache_bin.h | 114 + .../include/jemalloc/internal/chunk.h | 0 .../include/jemalloc/internal/chunk_dss.h | 0 .../include/jemalloc/internal/chunk_mmap.h | 0 .../jemalloc/include/jemalloc/internal/ckh.h | 101 + .../jemalloc/include/jemalloc/internal/ctl.h | 131 + .../jemalloc/include/jemalloc/internal/div.h | 41 + .../include/jemalloc/internal/emitter.h | 435 ++ .../include/jemalloc/internal/extent.h | 0 .../include/jemalloc/internal/extent_dss.h | 26 + .../jemalloc/internal/extent_externs.h | 73 + .../jemalloc/internal/extent_inlines.h | 433 ++ .../include/jemalloc/internal/extent_mmap.h | 10 + .../jemalloc/internal/extent_structs.h | 219 + .../include/jemalloc/internal/extent_types.h | 17 + .../jemalloc/include/jemalloc/internal/hash.h | 156 +- .../include/jemalloc/internal/hooks.h | 19 + .../jemalloc/include/jemalloc/internal/huge.h | 0 .../jemalloc/internal/jemalloc_internal.h.in | 0 .../internal/jemalloc_internal_decls.h | 37 +- .../internal/jemalloc_internal_defs.h.in | 204 +- .../internal/jemalloc_internal_externs.h | 53 + .../internal/jemalloc_internal_includes.h | 94 + .../internal/jemalloc_internal_inlines_a.h | 172 + .../internal/jemalloc_internal_inlines_b.h | 86 + .../internal/jemalloc_internal_inlines_c.h | 246 + .../internal/jemalloc_internal_macros.h | 43 + .../internal/jemalloc_internal_types.h | 185 + .../jemalloc/internal/jemalloc_preamble.h.in | 194 + .../include/jemalloc/internal/large_externs.h | 26 + .../jemalloc/include/jemalloc/internal/log.h | 115 + .../include/jemalloc/internal/malloc_io.h | 102 + .../jemalloc/include/jemalloc/internal/mb.h | 0 .../include/jemalloc/internal/mutex.h | 248 + .../include/jemalloc/internal/mutex_pool.h | 94 + .../include/jemalloc/internal/mutex_prof.h | 99 + .../include/jemalloc/internal/nstime.h | 34 + .../include/jemalloc/internal/pages.h | 88 + .../jemalloc/include/jemalloc/internal/ph.h | 391 + .../jemalloc/internal/private_namespace.sh | 5 + .../jemalloc/internal/private_symbols.sh | 51 + .../jemalloc/internal/private_symbols.txt | 0 .../jemalloc/internal/private_unnamespace.sh | 0 .../jemalloc/include/jemalloc/internal/prng.h | 185 + .../jemalloc/include/jemalloc/internal/prof.h | 0 .../include/jemalloc/internal/prof_externs.h | 92 + .../jemalloc/internal/prof_inlines_a.h | 83 + .../jemalloc/internal/prof_inlines_b.h | 206 + .../include/jemalloc/internal/prof_structs.h | 201 + .../include/jemalloc/internal/prof_types.h | 56 + .../jemalloc/internal/public_namespace.sh | 2 +- .../jemalloc/internal/public_unnamespace.sh | 0 .../jemalloc/include/jemalloc/internal/ql.h | 43 +- .../jemalloc/include/jemalloc/internal/qr.h | 35 +- .../include/jemalloc/internal/quarantine.h | 0 .../jemalloc/include/jemalloc/internal/rb.h | 325 +- .../include/jemalloc/internal/rtree.h | 492 ++ .../include/jemalloc/internal/rtree_tsd.h | 50 + .../include/jemalloc/internal/size_classes.sh | 173 +- .../include/jemalloc/internal/smoothstep.h | 232 + .../include/jemalloc/internal/smoothstep.sh | 101 + .../jemalloc/include/jemalloc/internal/spin.h | 40 + .../include/jemalloc/internal/stats.h | 30 + .../jemalloc/include/jemalloc/internal/sz.h | 317 + .../include/jemalloc/internal/tcache.h | 0 .../jemalloc/internal/tcache_externs.h | 55 + .../jemalloc/internal/tcache_inlines.h | 223 + .../jemalloc/internal/tcache_structs.h | 61 + .../include/jemalloc/internal/tcache_types.h | 56 + .../include/jemalloc/internal/ticker.h | 78 + .../jemalloc/include/jemalloc/internal/tsd.h | 326 + .../include/jemalloc/internal/tsd_generic.h | 157 + .../internal/tsd_malloc_thread_cleanup.h | 60 + .../include/jemalloc/internal/tsd_tls.h | 59 + .../include/jemalloc/internal/tsd_types.h | 10 + .../include/jemalloc/internal/tsd_win.h | 139 + .../jemalloc/include/jemalloc/internal/util.h | 67 + .../include/jemalloc/internal/valgrind.h | 0 .../include/jemalloc/internal/witness.h | 346 + .../jemalloc/include/jemalloc/jemalloc.sh | 3 +- .../include/jemalloc/jemalloc_defs.h.in | 8 + .../include/jemalloc/jemalloc_macros.h.in | 106 +- .../include/jemalloc/jemalloc_mangle.sh | 2 +- .../include/jemalloc/jemalloc_protos.h.in | 0 .../include/jemalloc/jemalloc_rename.sh | 0 .../include/jemalloc/jemalloc_typedefs.h.in | 77 + .../include/msvc_compat/C99/stdbool.h | 0 .../jemalloc/include/msvc_compat/C99/stdint.h | 0 .../jemalloc/include/msvc_compat/strings.h | 58 + .../include/msvc_compat/windows_extra.h | 6 + .../jni/redis-5.0.0/deps/jemalloc/install-sh | 250 + .../deps/jemalloc/jemalloc.pc.in | 4 +- .../deps/jemalloc/m4/ax_cxx_compile_stdcxx.m4 | 562 ++ .../redis-5.0.0/deps/jemalloc/msvc/ReadMe.txt | 23 + .../deps/jemalloc/msvc/jemalloc_vc2015.sln | 63 + .../deps/jemalloc/msvc/jemalloc_vc2017.sln | 63 + .../projects/vc2015/jemalloc/jemalloc.vcxproj | 348 + .../vc2015/jemalloc/jemalloc.vcxproj.filters | 101 + .../vc2015/test_threads/test_threads.vcxproj | 327 + .../test_threads/test_threads.vcxproj.filters | 26 + .../projects/vc2017/jemalloc/jemalloc.vcxproj | 347 + .../vc2017/jemalloc/jemalloc.vcxproj.filters | 101 + .../vc2017/test_threads/test_threads.vcxproj | 326 + .../test_threads/test_threads.vcxproj.filters | 26 + .../msvc/test_threads/test_threads.cpp | 88 + .../jemalloc/msvc/test_threads/test_threads.h | 3 + .../msvc/test_threads/test_threads_main.cpp | 11 + .../jni/redis-5.0.0/deps/jemalloc/release.c | 0 .../redis-5.0.0/deps/jemalloc/run_tests.sh | 1 + .../deps/jemalloc/scripts/gen_run_tests.py | 112 + .../deps/jemalloc/scripts/gen_travis.py | 107 + .../jni/redis-5.0.0/deps/jemalloc/src/arena.c | 2043 +++++ .../deps/jemalloc/src/atomic.c | 0 .../deps/jemalloc/src/background_thread.c | 909 +++ .../jni/redis-5.0.0/deps/jemalloc/src/base.c | 514 ++ .../jni/redis-5.0.0/deps/jemalloc/src/bin.c | 50 + .../redis-5.0.0/deps/jemalloc/src/bitmap.c | 121 + .../deps/jemalloc/src/chunk.c | 0 .../deps/jemalloc/src/chunk_dss.c | 0 .../deps/jemalloc/src/chunk_mmap.c | 0 .../deps/jemalloc/src/ckh.c | 233 +- .../jni/redis-5.0.0/deps/jemalloc/src/ctl.c | 2883 +++++++ .../jni/redis-5.0.0/deps/jemalloc/src/div.c | 55 + .../redis-5.0.0/deps/jemalloc/src/extent.c | 2177 ++++++ .../deps/jemalloc/src/extent_dss.c | 270 + .../deps/jemalloc/src/extent_mmap.c | 42 + .../jni/redis-5.0.0/deps/jemalloc/src/hash.c | 3 + .../jni/redis-5.0.0/deps/jemalloc/src/hooks.c | 12 + .../deps/jemalloc/src/huge.c | 0 .../redis-5.0.0/deps/jemalloc/src/jemalloc.c | 3337 ++++++++ .../deps/jemalloc/src/jemalloc_cpp.cpp | 141 + .../jni/redis-5.0.0/deps/jemalloc/src/large.c | 371 + .../jni/redis-5.0.0/deps/jemalloc/src/log.c | 78 + .../redis-5.0.0/deps/jemalloc/src/malloc_io.c | 676 ++ .../deps/jemalloc/src/mb.c | 0 .../jni/redis-5.0.0/deps/jemalloc/src/mutex.c | 224 + .../deps/jemalloc/src/mutex_pool.c | 18 + .../redis-5.0.0/deps/jemalloc/src/nstime.c | 170 + .../jni/redis-5.0.0/deps/jemalloc/src/pages.c | 606 ++ .../jni/redis-5.0.0/deps/jemalloc/src/prng.c | 3 + .../deps/jemalloc/src/prof.c | 1610 ++-- .../deps/jemalloc/src/quarantine.c | 0 .../jni/redis-5.0.0/deps/jemalloc/src/rtree.c | 320 + .../jni/redis-5.0.0/deps/jemalloc/src/stats.c | 1286 ++++ .../jni/redis-5.0.0/deps/jemalloc/src/sz.c | 107 + .../redis-5.0.0/deps/jemalloc/src/tcache.c | 717 ++ .../redis-5.0.0/deps/jemalloc/src/ticker.c | 3 + .../jni/redis-5.0.0/deps/jemalloc/src/tsd.c | 351 + .../deps/jemalloc/src/util.c | 0 .../deps/jemalloc/src/valgrind.c | 0 .../redis-5.0.0/deps/jemalloc/src/witness.c | 100 + .../jni/redis-5.0.0/deps/jemalloc/src/zone.c | 469 ++ .../jemalloc/test/include/test/SFMT-alti.h | 12 +- .../jemalloc/test/include/test/SFMT-params.h | 0 .../test/include/test/SFMT-params11213.h | 0 .../test/include/test/SFMT-params1279.h | 0 .../test/include/test/SFMT-params132049.h | 0 .../test/include/test/SFMT-params19937.h | 0 .../test/include/test/SFMT-params216091.h | 0 .../test/include/test/SFMT-params2281.h | 0 .../test/include/test/SFMT-params4253.h | 0 .../test/include/test/SFMT-params44497.h | 0 .../test/include/test/SFMT-params607.h | 0 .../test/include/test/SFMT-params86243.h | 0 .../jemalloc/test/include/test/SFMT-sse2.h | 12 +- .../deps/jemalloc/test/include/test/SFMT.h | 49 +- .../deps/jemalloc/test/include/test/btalloc.h | 13 +- .../jemalloc/test/include/test/extent_hooks.h | 289 + .../test/include/test/jemalloc_test.h.in | 114 +- .../test/include/test/jemalloc_test_defs.h.in | 0 .../deps/jemalloc/test/include/test/math.h | 95 +- .../deps/jemalloc/test/include/test/mq.h | 44 +- .../deps/jemalloc/test/include/test/mtx.h | 2 + .../deps/jemalloc/test/include/test/test.h | 338 + .../deps/jemalloc/test/include/test/thd.h | 0 .../deps/jemalloc/test/include/test/timer.h | 11 + .../jemalloc/test/integration/MALLOCX_ARENA.c | 25 +- .../jemalloc/test/integration/aligned_alloc.c | 42 +- .../jemalloc/test/integration/allocated.c | 51 +- .../deps/jemalloc/test/integration/chunk.c | 0 .../deps/jemalloc/test/integration/extent.c | 248 + .../deps/jemalloc/test/integration/extent.sh | 5 + .../deps/jemalloc/test/integration/mallocx.c | 228 + .../deps/jemalloc/test/integration/mallocx.sh | 5 + .../deps/jemalloc/test/integration/overflow.c | 25 +- .../test/integration/posix_memalign.c | 42 +- .../deps/jemalloc/test/integration/rallocx.c | 118 +- .../deps/jemalloc/test/integration/sdallocx.c | 24 +- .../jemalloc/test/integration/thread_arena.c | 49 +- .../test/integration/thread_tcache_enabled.c | 87 + .../deps/jemalloc/test/integration/xallocx.c | 311 +- .../deps/jemalloc/test/integration/xallocx.sh | 5 + .../deps/jemalloc/test/src/SFMT.c | 76 +- .../deps/jemalloc/test/src/btalloc.c | 6 + .../deps/jemalloc/test/src/btalloc_0.c | 0 .../deps/jemalloc/test/src/btalloc_1.c | 0 .../deps/jemalloc/test/src/math.c | 2 +- .../deps/jemalloc/test/src/mq.c | 4 +- .../deps/jemalloc/test/src/mtx.c | 40 +- .../redis-5.0.0/deps/jemalloc/test/src/test.c | 217 + .../deps/jemalloc/test/src/thd.c | 21 +- .../deps/jemalloc/test/src/timer.c | 56 + .../deps/jemalloc/test/stress/microbench.c | 70 +- .../redis-5.0.0/deps/jemalloc/test/test.sh.in | 80 + .../deps/jemalloc/test/unit/SFMT.c | 28 +- .../redis-5.0.0/deps/jemalloc/test/unit/a0.c | 16 + .../deps/jemalloc/test/unit/arena_reset.c | 344 + .../jemalloc/test/unit/arena_reset_prof.c | 4 + .../jemalloc/test/unit/arena_reset_prof.sh | 3 + .../deps/jemalloc/test/unit/atomic.c | 229 + .../jemalloc/test/unit/background_thread.c | 119 + .../test/unit/background_thread_enable.c | 83 + .../deps/jemalloc/test/unit/base.c | 234 + .../deps/jemalloc/test/unit/bit_util.c | 57 + .../deps/jemalloc/test/unit/bitmap.c | 431 ++ .../deps/jemalloc/test/unit/ckh.c | 33 +- .../deps/jemalloc/test/unit/decay.c | 599 ++ .../deps/jemalloc/test/unit/decay.sh | 3 + .../redis-5.0.0/deps/jemalloc/test/unit/div.c | 29 + .../deps/jemalloc/test/unit/emitter.c | 413 + .../deps/jemalloc/test/unit/extent_quantize.c | 141 + .../deps/jemalloc/test/unit/fork.c | 141 + .../deps/jemalloc/test/unit/hash.c | 76 +- .../deps/jemalloc/test/unit/hooks.c | 38 + .../deps/jemalloc/test/unit/junk.c | 141 + .../deps/jemalloc/test/unit/junk.sh | 5 + .../deps/jemalloc/test/unit/junk_alloc.c | 1 + .../deps/jemalloc/test/unit/junk_alloc.sh | 5 + .../deps/jemalloc/test/unit/junk_free.c | 1 + .../deps/jemalloc/test/unit/junk_free.sh | 5 + .../deps/jemalloc/test/unit/lg_chunk.c | 0 .../redis-5.0.0/deps/jemalloc/test/unit/log.c | 193 + .../deps/jemalloc/test/unit/mallctl.c | 805 ++ .../deps/jemalloc/test/unit/malloc_io.c | 258 + .../deps/jemalloc/test/unit/math.c | 52 +- .../deps/jemalloc/test/unit/mq.c | 34 +- .../deps/jemalloc/test/unit/mtx.c | 29 +- .../deps/jemalloc/test/unit/nstime.c | 249 + .../deps/jemalloc/test/unit/pack.c | 166 + .../deps/jemalloc/test/unit/pack.sh | 4 + .../deps/jemalloc/test/unit/pages.c | 29 + .../redis-5.0.0/deps/jemalloc/test/unit/ph.c | 318 + .../deps/jemalloc/test/unit/prng.c | 237 + .../deps/jemalloc/test/unit/prof_accum.c | 48 +- .../deps/jemalloc/test/unit/prof_accum.sh | 5 + .../deps/jemalloc/test/unit/prof_active.c | 57 +- .../deps/jemalloc/test/unit/prof_active.sh | 5 + .../deps/jemalloc/test/unit/prof_gdump.c | 41 +- .../deps/jemalloc/test/unit/prof_gdump.sh | 6 + .../deps/jemalloc/test/unit/prof_idump.c | 27 +- .../deps/jemalloc/test/unit/prof_idump.sh | 8 + .../deps/jemalloc/test/unit/prof_reset.c | 82 +- .../deps/jemalloc/test/unit/prof_reset.sh | 5 + .../deps/jemalloc/test/unit/prof_tctx.c | 46 + .../deps/jemalloc/test/unit/prof_tctx.sh | 5 + .../jemalloc/test/unit/prof_thread_name.c | 63 +- .../jemalloc/test/unit/prof_thread_name.sh | 5 + .../deps/jemalloc/test/unit/ql.c | 51 +- .../deps/jemalloc/test/unit/qr.c | 65 +- .../deps/jemalloc/test/unit/quarantine.c | 0 .../deps/jemalloc/test/unit/rb.c | 157 +- .../deps/jemalloc/test/unit/retained.c | 181 + .../deps/jemalloc/test/unit/rtree.c | 227 + .../deps/jemalloc/test/unit/size_classes.c | 183 + .../deps/jemalloc/test/unit/slab.c | 32 + .../deps/jemalloc/test/unit/smoothstep.c | 102 + .../deps/jemalloc/test/unit/spin.c | 18 + .../deps/jemalloc/test/unit/stats.c | 368 + .../deps/jemalloc/test/unit/stats_print.c | 999 +++ .../deps/jemalloc/test/unit/ticker.c | 73 + .../redis-5.0.0/deps/jemalloc/test/unit/tsd.c | 139 + .../deps/jemalloc/test/unit/util.c | 0 .../deps/jemalloc/test/unit/witness.c | 280 + .../deps/jemalloc/test/unit/zero.c | 51 +- .../deps/jemalloc/test/unit/zero.sh | 5 + .../deps/update-jemalloc.sh | 0 .../{redis-4.0.11 => redis-5.0.0}/redis.conf | 315 +- .../jni/{redis-4.0.11 => redis-5.0.0}/runtest | 2 +- .../runtest-cluster | 0 .../runtest-sentinel | 0 .../sentinel.conf | 73 +- .../src/.gitignore | 0 .../src/Makefile | 10 +- .../src/adlist.c | 0 .../src/adlist.h | 0 .../{redis-4.0.11 => redis-5.0.0}/src/ae.c | 9 +- .../{redis-4.0.11 => redis-5.0.0}/src/ae.h | 0 .../src/ae_epoll.c | 2 + .../src/ae_evport.c | 2 + .../src/ae_kqueue.c | 1 + .../src/ae_select.c | 1 + .../{redis-4.0.11 => redis-5.0.0}/src/anet.c | 2 +- .../{redis-4.0.11 => redis-5.0.0}/src/anet.h | 0 .../{redis-4.0.11 => redis-5.0.0}/src/aof.c | 194 +- .../src/asciilogo.h | 0 .../src/atomicvar.h | 2 +- .../{redis-4.0.11 => redis-5.0.0}/src/bio.c | 10 +- .../{redis-4.0.11 => redis-5.0.0}/src/bio.h | 0 .../src/bitops.c | 8 +- .../src/main/jni/redis-5.0.0/src/blocked.c | 608 ++ .../src/childinfo.c | 0 .../src/cluster.c | 236 +- .../src/cluster.h | 29 +- .../src/config.c | 347 +- .../src/config.h | 36 +- .../{redis-4.0.11 => redis-5.0.0}/src/crc16.c | 0 .../{redis-4.0.11 => redis-5.0.0}/src/crc64.c | 0 .../{redis-4.0.11 => redis-5.0.0}/src/crc64.h | 0 .../{redis-4.0.11 => redis-5.0.0}/src/db.c | 113 +- .../{redis-4.0.11 => redis-5.0.0}/src/debug.c | 147 +- .../src/debugmacro.h | 0 .../src/main/jni/redis-5.0.0/src/defrag.c | 1140 +++ .../{redis-4.0.11 => redis-5.0.0}/src/dict.c | 14 +- .../{redis-4.0.11 => redis-5.0.0}/src/dict.h | 0 .../src/endianconv.c | 0 .../src/endianconv.h | 8 +- .../{redis-4.0.11 => redis-5.0.0}/src/evict.c | 92 +- .../src/expire.c | 2 +- .../src/fmacros.h | 0 .../{redis-4.0.11 => redis-5.0.0}/src/geo.c | 2 +- .../{redis-4.0.11 => redis-5.0.0}/src/geo.h | 0 .../src/geohash.c | 8 +- .../src/geohash.h | 0 .../src/geohash_helper.c | 0 .../src/geohash_helper.h | 0 .../{redis-4.0.11 => redis-5.0.0}/src/help.h | 177 +- .../src/hyperloglog.c | 267 +- .../src/intset.c | 0 .../src/intset.h | 0 .../src/latency.c | 7 +- .../src/latency.h | 0 .../src/lazyfree.c | 15 +- .../src/main/jni/redis-5.0.0/src/listpack.c | 803 ++ .../src/main/jni/redis-5.0.0/src/listpack.h | 61 + .../jni/redis-5.0.0/src/listpack_malloc.h | 45 + .../src/main/jni/redis-5.0.0/src/localtime.c | 123 + .../src/main/jni/redis-5.0.0/src/lolwut.c | 56 + .../src/main/jni/redis-5.0.0/src/lolwut5.c | 282 + .../{redis-4.0.11 => redis-5.0.0}/src/lzf.h | 0 .../{redis-4.0.11 => redis-5.0.0}/src/lzfP.h | 0 .../{redis-4.0.11 => redis-5.0.0}/src/lzf_c.c | 0 .../{redis-4.0.11 => redis-5.0.0}/src/lzf_d.c | 28 +- .../src/memtest.c | 0 .../src/mkreleasehdr.sh | 3 + .../src/module.c | 1014 ++- .../src/modules/.gitignore | 0 .../src/modules/Makefile | 17 +- .../src/modules/gendoc.rb | 2 +- .../src/modules/helloblock.c | 25 +- .../redis-5.0.0/src/modules/hellocluster.c | 118 + .../jni/redis-5.0.0/src/modules/hellodict.c | 132 + .../jni/redis-5.0.0/src/modules/hellotimer.c | 76 + .../src/modules/hellotype.c | 0 .../src/modules/helloworld.c | 0 .../src/modules/testmodule.c | 0 .../{redis-4.0.11 => redis-5.0.0}/src/multi.c | 2 +- .../src/networking.c | 672 +- .../src/notify.c | 6 +- .../src/object.c | 311 +- .../src/pqsort.c | 0 .../src/pqsort.h | 0 .../src/pubsub.c | 16 +- .../src/quicklist.c | 4 +- .../src/quicklist.h | 0 .../{redis-4.0.11 => redis-5.0.0}/src/rand.c | 0 .../{redis-4.0.11 => redis-5.0.0}/src/rand.h | 0 .../{redis-4.0.11 => redis-5.0.0}/src/rax.c | 345 +- .../{redis-4.0.11 => redis-5.0.0}/src/rax.h | 61 +- .../src/rax_malloc.h | 0 .../{redis-4.0.11 => redis-5.0.0}/src/rdb.c | 491 +- .../{redis-4.0.11 => redis-5.0.0}/src/rdb.h | 27 +- .../src/redis-benchmark.c | 0 .../src/redis-check-aof.c | 1 + .../src/redis-check-rdb.c | 37 +- .../src/main/jni/redis-5.0.0/src/redis-cli.c | 6753 +++++++++++++++++ .../main/jni/redis-5.0.0/src/redis-trib.rb | 129 + .../src/redisassert.h | 2 +- .../src/redismodule.h | 142 +- .../src/release.c | 0 .../src/replication.c | 116 +- .../{redis-4.0.11 => redis-5.0.0}/src/rio.c | 2 +- .../{redis-4.0.11 => redis-5.0.0}/src/rio.h | 0 .../src/scripting.c | 95 +- .../{redis-4.0.11 => redis-5.0.0}/src/sds.c | 13 +- .../{redis-4.0.11 => redis-5.0.0}/src/sds.h | 1 + .../src/sdsalloc.h | 0 .../src/sentinel.c | 234 +- .../src/server.c | 507 +- .../src/server.h | 197 +- .../src/setproctitle.c | 0 .../{redis-4.0.11 => redis-5.0.0}/src/sha1.c | 0 .../{redis-4.0.11 => redis-5.0.0}/src/sha1.h | 0 .../src/siphash.c | 24 +- .../src/slowlog.c | 15 +- .../src/slowlog.h | 0 .../src/solarisfixes.h | 0 .../{redis-4.0.11 => redis-5.0.0}/src/sort.c | 10 +- .../src/sparkline.c | 0 .../src/sparkline.h | 0 .../src/main/jni/redis-5.0.0/src/stream.h | 113 + .../src/sync_file_range_flags.h | 2 +- .../src/syncio.c | 0 .../src/t_hash.c | 0 .../src/t_list.c | 218 +- .../{redis-4.0.11 => redis-5.0.0}/src/t_set.c | 10 +- .../src/main/jni/redis-5.0.0/src/t_stream.c | 2523 ++++++ .../src/t_string.c | 2 +- .../src/t_zset.c | 267 +- .../src/testhelp.h | 0 .../{redis-4.0.11 => redis-5.0.0}/src/util.c | 113 +- .../{redis-4.0.11 => redis-5.0.0}/src/util.h | 0 .../src/valgrind.sup | 0 .../src/main/jni/redis-5.0.0/src/version.h | 1 + .../{redis-4.0.11 => redis-5.0.0}/src/wait3.c | 2 +- .../{redis-4.0.11 => redis-5.0.0}/src/wait3.h | 3 +- .../src/ziplist.c | 8 +- .../src/ziplist.h | 0 .../src/zipmap.c | 0 .../src/zipmap.h | 0 .../src/zmalloc.c | 62 +- .../src/zmalloc.h | 14 +- .../tests/assets/default.conf | 0 .../tests/assets/encodings.rdb | Bin .../tests/assets/hash-zipmap.rdb | Bin .../tests/cluster/cluster.tcl | 0 .../tests/cluster/run.tcl | 0 .../tests/cluster/tests/00-base.tcl | 0 .../tests/cluster/tests/01-faildet.tcl | 0 .../tests/cluster/tests/02-failover.tcl | 0 .../tests/cluster/tests/03-failover-loop.tcl | 0 .../tests/cluster/tests/04-resharding.tcl | 10 +- .../cluster/tests/05-slave-selection.tcl | 77 + .../cluster/tests/06-slave-stop-cond.tcl | 0 .../cluster/tests/07-replica-migration.tcl | 0 .../tests/cluster/tests/08-update-msg.tcl | 0 .../tests/cluster/tests/09-pubsub.tcl | 0 .../cluster/tests/10-manual-failover.tcl | 0 .../cluster/tests/11-manual-takeover.tcl | 0 .../cluster/tests/12-replica-migration-2.tcl | 14 +- .../cluster/tests/13-no-failover-option.tcl | 0 .../tests/cluster/tests/helpers/onlydots.tcl | 0 .../cluster/tests/includes/init-tests.tcl | 0 .../tests/cluster/tmp/.gitignore | 0 .../redis-5.0.0/tests/helpers/bg_block_op.tcl | 52 + .../tests/helpers/bg_complex_data.tcl | 0 .../tests/helpers/gen_write_load.tcl | 0 .../tests/instances.tcl | 0 .../tests/integration/aof-race.tcl | 2 +- .../tests/integration/aof.tcl | 0 .../tests/integration/block-repl.tcl | 58 + .../convert-zipmap-hash-on-load.tcl | 0 .../tests/integration/logging.tcl | 0 .../tests/integration/psync2-reg.tcl | 6 +- .../tests/integration/psync2.tcl | 24 +- .../tests/integration/rdb.tcl | 19 + .../tests/integration/redis-cli.tcl | 0 .../tests/integration/replication-2.tcl | 8 +- .../tests/integration/replication-3.tcl | 4 +- .../tests/integration/replication-4.tcl | 2 +- .../tests/integration/replication-psync.tcl | 6 +- .../tests/integration/replication.tcl | 70 +- .../tests/sentinel/run.tcl | 0 .../tests/sentinel/tests/00-base.tcl | 4 +- .../tests/sentinel/tests/01-conf-update.tcl | 4 +- .../tests/sentinel/tests/02-slaves-reconf.tcl | 2 +- .../sentinel/tests/03-runtime-reconf.tcl | 0 .../sentinel/tests/04-slave-selection.tcl | 0 .../tests/sentinel/tests/05-manual.tcl | 2 +- .../tests/sentinel/tests/06-ckquorum.tcl | 0 .../sentinel/tests/07-down-conditions.tcl | 10 + .../sentinel/tests/includes/init-tests.tcl | 0 .../tests/sentinel/tmp/.gitignore | 0 .../tests/support/cluster.tcl | 0 .../tests/support/redis.tcl | 0 .../tests/support/server.tcl | 6 + .../tests/support/test.tcl | 20 + .../tests/support/tmpfile.tcl | 0 .../tests/support/util.tcl | 16 + .../tests/test_helper.tcl | 97 +- .../tests/unit/aofrw.tcl | 2 +- .../tests/unit/auth.tcl | 0 .../tests/unit/bitfield.tcl | 0 .../tests/unit/bitops.tcl | 0 .../tests/unit/dump.tcl | 35 +- .../tests/unit/expire.tcl | 2 +- .../tests/unit/geo.tcl | 0 .../tests/unit/hyperloglog.tcl | 0 .../tests/unit/introspection-2.tcl | 80 + .../tests/unit/introspection.tcl | 2 +- .../tests/unit/keyspace.tcl | 0 .../tests/unit/latency-monitor.tcl | 0 .../tests/unit/lazyfree.tcl | 0 .../tests/unit/limits.tcl | 0 .../tests/unit/maxmemory.tcl | 98 + .../redis-5.0.0/tests/unit/memefficiency.tcl | 212 + .../tests/unit/multi.tcl | 0 .../tests/unit/obuf-limits.tcl | 0 .../tests/unit/other.tcl | 2 + .../tests/unit/pendingquerybuf.tcl | 35 + .../tests/unit/printver.tcl | 0 .../tests/unit/protocol.tcl | 0 .../tests/unit/pubsub.tcl | 0 .../tests/unit/quit.tcl | 0 .../tests/unit/scan.tcl | 46 + .../tests/unit/scripting.tcl | 32 +- .../tests/unit/slowlog.tcl | 10 + .../tests/unit/sort.tcl | 0 .../tests/unit/type/hash.tcl | 0 .../tests/unit/type/incr.tcl | 0 .../tests/unit/type/list-2.tcl | 0 .../tests/unit/type/list-3.tcl | 0 .../tests/unit/type/list-common.tcl | 0 .../tests/unit/type/list.tcl | 0 .../tests/unit/type/set.tcl | 0 .../tests/unit/type/stream-cgroups.tcl | 159 + .../redis-5.0.0/tests/unit/type/stream.tcl | 413 + .../tests/unit/type/string.tcl | 0 .../tests/unit/type/zset.tcl | 182 +- .../tests/unit/wait.tcl | 0 .../utils/build-static-symbols.tcl | 0 .../utils/cluster_fail_time.tcl | 0 .../utils/corrupt_rdb.c | 0 .../utils/create-cluster/.gitignore | 0 .../utils/create-cluster/README | 2 +- .../utils/create-cluster/create-cluster | 4 +- .../utils/generate-command-help.rb | 3 +- .../utils/graphs/commits-over-time/README.md | 0 .../graphs/commits-over-time/genhtml.tcl | 0 .../utils/hashtable/README | 2 +- .../utils/hashtable/rehashing.c | 0 .../utils/hyperloglog/.gitignore | 0 .../utils/hyperloglog/hll-err.rb | 2 +- .../utils/hyperloglog/hll-gnuplot-graph.rb | 0 .../utils/install_server.sh | 0 .../utils/lru/README | 0 .../utils/lru/lfu-simulation.c | 0 .../utils/lru/test-lru.rb | 0 .../utils/redis-copy.rb | 0 .../utils/redis-sha1.rb | 0 .../utils/redis_init_script | 0 .../utils/redis_init_script.tpl | 0 .../utils/releasetools/01_create_tarball.sh | 0 .../utils/releasetools/02_upload_tarball.sh | 0 .../utils/releasetools/03_test_release.sh | 0 .../utils/releasetools/04_release_hash.sh | 0 .../utils/releasetools/changelog.tcl | 11 +- .../utils/speed-regression.tcl | 0 .../utils/whatisdoing.sh | 0 redis-android/src/main/jni/release.h | 6 +- .../src/main/libs/arm64-v8a/libredis.so | Bin 1244032 -> 1344064 bytes .../src/main/libs/arm64-v8a/redis-check-aof | Bin 994376 -> 1069832 bytes .../src/main/libs/arm64-v8a/redis-check-rdb | Bin 994376 -> 108744 bytes .../src/main/libs/arm64-v8a/redis-cli | Bin 137272 -> 200440 bytes .../src/main/libs/armeabi-v7a/libredis.so | Bin 922776 -> 1014076 bytes .../src/main/libs/armeabi-v7a/redis-check-aof | Bin 738588 -> 813520 bytes .../src/main/libs/armeabi-v7a/redis-check-rdb | Bin 738588 -> 83732 bytes .../src/main/libs/armeabi-v7a/redis-cli | Bin 108316 -> 158332 bytes redis-android/src/main/libs/x86/libredis.so | Bin 1257808 -> 1381804 bytes .../src/main/libs/x86/redis-check-aof | Bin 1032612 -> 1136128 bytes .../src/main/libs/x86/redis-check-rdb | Bin 1032612 -> 108248 bytes redis-android/src/main/libs/x86/redis-cli | Bin 128720 -> 191024 bytes .../src/main/libs/x86_64/libredis.so | Bin 1277112 -> 1401720 bytes .../src/main/libs/x86_64/redis-check-aof | Bin 1015064 -> 1115096 bytes .../src/main/libs/x86_64/redis-check-rdb | Bin 1015064 -> 117120 bytes redis-android/src/main/libs/x86_64/redis-cli | Bin 141560 -> 212928 bytes .../1.1.0/redis-android-1.1.0.aar | Bin 0 -> 2325240 bytes .../1.1.0/redis-android-1.1.0.aar.md5 | 1 + .../1.1.0/redis-android-1.1.0.aar.sha1 | 1 + .../1.1.0/redis-android-1.1.0.pom | 9 + .../1.1.0/redis-android-1.1.0.pom.md5 | 1 + .../1.1.0/redis-android-1.1.0.pom.sha1 | 1 + .../wf9a5m75/redis-android/maven-metadata.xml | 5 +- .../redis-android/maven-metadata.xml.md5 | 2 +- .../redis-android/maven-metadata.xml.sha1 | 2 +- 868 files changed, 73565 insertions(+), 33847 deletions(-) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/.gitignore (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/.travis.yml (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/Android.mk (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/CHANGELOG.md (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/COPYING (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/Makefile (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/README.md (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/adapters/ae.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/adapters/glib.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/adapters/ivykis.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/adapters/libev.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/adapters/libevent.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/adapters/libuv.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/adapters/macosx.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/adapters/qt.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/appveyor.yml (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/async.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/async.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/dict.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/dict.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/examples/example-ae.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/examples/example-glib.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/examples/example-ivykis.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/examples/example-libev.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/examples/example-libevent.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/examples/example-libuv.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/examples/example-macosx.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/examples/example-qt.cpp (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/examples/example-qt.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/examples/example.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/fmacros.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/hiredis.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/hiredis.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/net.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/net.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/read.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/read.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/sds.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/sds.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/sdsalloc.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/test.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/hiredis/win32.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/Android.mk (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/COPYRIGHT (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/HISTORY (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/INSTALL (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/Makefile (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/README (100%) create mode 100644 redis-android/src/main/jni/lua/_Android.mk rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/doc/contents.html (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/doc/cover.png (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/doc/logo.gif (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/doc/lua.1 (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/doc/lua.css (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/doc/lua.html (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/doc/luac.1 (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/doc/luac.html (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/doc/manual.css (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/doc/manual.html (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/doc/readme.html (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/etc/Makefile (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/etc/README (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/etc/all.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/etc/lua.hpp (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/etc/lua.ico (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/etc/lua.pc (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/etc/luavs.bat (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/etc/min.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/etc/noparser.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/etc/strict.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/Makefile (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/fpconv.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/fpconv.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lapi.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lapi.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lauxlib.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lauxlib.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lbaselib.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lcode.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lcode.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/ldblib.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/ldebug.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/ldebug.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/ldo.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/ldo.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/ldump.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lfunc.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lfunc.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lgc.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lgc.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/linit.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/liolib.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/llex.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/llex.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/llimits.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lmathlib.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lmem.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lmem.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/loadlib.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lobject.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lobject.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lopcodes.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lopcodes.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/loslib.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lparser.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lparser.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lstate.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lstate.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lstring.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lstring.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lstrlib.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/ltable.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/ltable.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/ltablib.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/ltm.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/ltm.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lua.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lua.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lua_bit.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lua_cjson.c (99%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lua_cmsgpack.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lua_struct.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/luac.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/luaconf.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lualib.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lundump.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lundump.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lvm.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lvm.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lzio.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/lzio.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/print.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/strbuf.c (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/src/strbuf.h (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/README (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/bisect.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/cf.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/echo.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/env.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/factorial.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/fib.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/fibfor.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/globals.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/hello.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/life.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/luac.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/printf.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/readonly.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/sieve.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/sort.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/table.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/trace-calls.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/trace-globals.lua (100%) rename redis-android/src/main/jni/{redis-4.0.11/deps => }/lua/test/xd.lua (100%) delete mode 100644 redis-android/src/main/jni/redis-4.0.11/00-RELEASENOTES delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/doc/stylesheet.xsl delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/atomic.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/bitmap.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/ckh.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/ctl.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/mutex.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/pages.h delete mode 100755 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/private_namespace.sh delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/prng.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/rtree.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/stats.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/tsd.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/util.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/msvc_compat/strings.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/msvc_compat/windows_extra.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/arena.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/base.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/bitmap.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/ctl.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/extent.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/hash.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/jemalloc.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/mutex.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/pages.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/rtree.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/stats.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/tcache.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/tsd.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/zone.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/test.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/timer.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/mallocx.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/thread_tcache_enabled.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/btalloc.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/test.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/timer.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/test.sh.in delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/atomic.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/bitmap.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/junk.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/junk_alloc.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/junk_free.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/mallctl.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/rtree.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/size_classes.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/stats.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/tsd.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/linenoise/.gitignore delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/linenoise/Makefile delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/linenoise/README.markdown delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/linenoise/example.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/linenoise/linenoise.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/deps/linenoise/linenoise.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/src/blocked.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/src/defrag.c delete mode 100644 redis-android/src/main/jni/redis-4.0.11/src/redis-cli.c delete mode 100755 redis-android/src/main/jni/redis-4.0.11/src/redis-trib.rb delete mode 100644 redis-android/src/main/jni/redis-4.0.11/src/version.h delete mode 100644 redis-android/src/main/jni/redis-4.0.11/tests/unit/introspection-2.tcl delete mode 100644 redis-android/src/main/jni/redis-4.0.11/tests/unit/memefficiency.tcl rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/.gitignore (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/00-RELEASENOTES rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/Android.mk (86%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/BUGS (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/CONTRIBUTING (87%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/COPYING (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/INSTALL (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/MANIFESTO (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/Makefile (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/README.md (97%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/Android.mk (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/Makefile (92%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/README.md (64%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.appveyor.yml rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/.autom4te.cfg (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/.gitattributes (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/.gitignore (64%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.travis.yml create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/Android.mk rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/COPYING (92%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/ChangeLog (55%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/INSTALL (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/INSTALL.md rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/Makefile.in (52%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/README (56%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/TUNING.md rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/autogen.sh (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/bin/jemalloc-config.in (93%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/bin/jemalloc.sh.in (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/bin/jeprof.in (97%) create mode 100755 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/build-aux/config.guess create mode 100755 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/build-aux/config.sub rename redis-android/src/main/jni/{redis-4.0.11/deps/jemalloc => redis-5.0.0/deps/jemalloc/build-aux}/install-sh (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/config.guess (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/config.stamp.in (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/config.sub (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/configure.ac (59%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/coverage.sh (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/doc/html.xsl.in (81%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/doc/jemalloc.xml.in (58%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/doc/manpages.xsl.in (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/doc/stylesheet.xsl rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/arena.h (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_externs.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_stats.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_structs_a.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_structs_b.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_types.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/assert.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_c11.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_msvc.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/background_thread_externs.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/background_thread_structs.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/base.h (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_externs.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_inlines.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_structs.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_types.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bin.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bin_stats.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bit_util.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bitmap.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/cache_bin.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/chunk.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/chunk_dss.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ckh.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ctl.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/div.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/emitter.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/extent.h (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_dss.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_externs.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_inlines.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_mmap.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_structs.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_types.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/hash.h (67%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/hooks.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/huge.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h (60%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in (53%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/large_externs.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/log.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/malloc_io.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/mb.h (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mutex.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mutex_pool.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mutex_prof.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/nstime.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/pages.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ph.h create mode 100755 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/private_namespace.sh create mode 100755 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/private_symbols.sh rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/private_symbols.txt (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/private_unnamespace.sh (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prng.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/prof.h (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_externs.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_structs.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_types.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/public_namespace.sh (67%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/ql.h (59%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/qr.h (68%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/quarantine.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/rb.h (80%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/rtree.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/rtree_tsd.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/size_classes.sh (58%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/smoothstep.h create mode 100755 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/smoothstep.sh create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/spin.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/stats.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/sz.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/tcache.h (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_externs.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_inlines.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_structs.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_types.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ticker.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_generic.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_tls.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_types.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_win.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/util.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/internal/valgrind.h (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/witness.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/jemalloc.sh (87%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/jemalloc_defs.h.in (89%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/jemalloc_macros.h.in (65%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh (98%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/jemalloc/jemalloc_rename.sh (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/msvc_compat/C99/stdbool.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/include/msvc_compat/C99/stdint.h (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/msvc_compat/strings.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/msvc_compat/windows_extra.h create mode 100755 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/install-sh rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/jemalloc.pc.in (70%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/m4/ax_cxx_compile_stdcxx.m4 create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/ReadMe.txt create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/jemalloc_vc2015.sln create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/jemalloc_vc2017.sln create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/test_threads/test_threads.cpp create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/test_threads/test_threads.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/test_threads/test_threads_main.cpp create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/release.c create mode 100755 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/run_tests.sh create mode 100755 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/scripts/gen_run_tests.py create mode 100755 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/scripts/gen_travis.py create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/arena.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/src/atomic.c (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/background_thread.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/base.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/bin.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/bitmap.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/src/chunk.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/src/chunk_dss.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/src/chunk_mmap.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/src/ckh.c (76%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/ctl.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/div.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/extent.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/extent_dss.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/extent_mmap.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/hash.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/hooks.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/src/huge.c (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/jemalloc.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/jemalloc_cpp.cpp create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/large.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/log.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/malloc_io.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/src/mb.c (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/mutex.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/mutex_pool.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/nstime.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/pages.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/prng.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/src/prof.c (53%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/src/quarantine.c (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/rtree.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/stats.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/sz.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/tcache.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/ticker.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/tsd.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/src/util.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/src/valgrind.c (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/witness.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/zone.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT-alti.h (96%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT-params.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT-params11213.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT-params1279.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT-params132049.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT-params19937.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT-params216091.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT-params2281.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT-params4253.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT-params44497.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT-params607.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT-params86243.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT-sse2.h (97%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/SFMT.h (83%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/btalloc.h (77%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/extent_hooks.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/jemalloc_test.h.in (77%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/math.h (84%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/mq.h (77%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/mtx.h (89%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/test.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/include/test/thd.h (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/timer.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/integration/MALLOCX_ARENA.c (80%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/integration/aligned_alloc.c (78%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/integration/allocated.c (73%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/integration/chunk.c (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/extent.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/extent.sh create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/mallocx.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/mallocx.sh rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/integration/overflow.c (68%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/integration/posix_memalign.c (77%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/integration/rallocx.c (63%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/integration/sdallocx.c (74%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/integration/thread_arena.c (53%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/thread_tcache_enabled.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/integration/xallocx.c (50%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/xallocx.sh rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/src/SFMT.c (91%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/btalloc.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/src/btalloc_0.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/src/btalloc_1.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/src/math.c (66%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/src/mq.c (93%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/src/mtx.c (57%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/test.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/src/thd.c (65%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/timer.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/stress/microbench.c (77%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/test.sh.in rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/SFMT.c (99%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/a0.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/arena_reset.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/arena_reset_prof.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/arena_reset_prof.sh create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/atomic.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/background_thread.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/background_thread_enable.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/base.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/bit_util.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/bitmap.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/ckh.c (91%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/decay.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/decay.sh create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/div.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/emitter.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/extent_quantize.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/fork.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/hash.c (74%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/hooks.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk.sh create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_alloc.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_alloc.sh create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_free.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_free.sh rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/lg_chunk.c (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/log.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/mallctl.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/malloc_io.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/math.c (97%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/mq.c (82%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/mtx.c (74%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/nstime.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/pack.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/pack.sh create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/pages.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ph.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prng.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/prof_accum.c (68%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_accum.sh rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/prof_active.c (78%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_active.sh rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/prof_gdump.c (68%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_gdump.sh rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/prof_idump.c (60%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_idump.sh rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/prof_reset.c (85%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_reset.sh create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_tctx.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_tctx.sh rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/prof_thread_name.c (68%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_thread_name.sh rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/ql.c (88%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/qr.c (82%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/quarantine.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/rb.c (71%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/retained.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/rtree.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/size_classes.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/slab.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/smoothstep.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/spin.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/stats.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/stats_print.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ticker.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/tsd.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/util.c (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/witness.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/jemalloc/test/unit/zero.c (56%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/zero.sh rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/deps/update-jemalloc.sh (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/redis.conf (82%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/runtest (86%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/runtest-cluster (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/runtest-sentinel (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/sentinel.conf (70%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/.gitignore (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/Makefile (96%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/adlist.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/adlist.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/ae.c (98%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/ae.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/ae_epoll.c (98%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/ae_evport.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/ae_kqueue.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/ae_select.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/anet.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/anet.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/aof.c (89%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/asciilogo.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/atomicvar.h (98%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/bio.c (98%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/bio.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/bitops.c (99%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/blocked.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/childinfo.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/cluster.c (95%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/cluster.h (93%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/config.c (86%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/config.h (91%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/crc16.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/crc64.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/crc64.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/db.c (94%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/debug.c (90%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/debugmacro.h (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/defrag.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/dict.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/dict.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/endianconv.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/endianconv.h (94%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/evict.c (89%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/expire.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/fmacros.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/geo.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/geo.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/geohash.c (97%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/geohash.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/geohash_helper.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/geohash_helper.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/help.h (80%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/hyperloglog.c (90%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/intset.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/intset.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/latency.c (98%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/latency.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/lazyfree.c (91%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/listpack.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/listpack.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/listpack_malloc.h create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/localtime.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/lolwut.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/lolwut5.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/lzf.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/lzfP.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/lzf_c.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/lzf_d.c (88%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/memtest.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/mkreleasehdr.sh (77%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/module.c (80%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/modules/.gitignore (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/modules/Makefile (67%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/modules/gendoc.rb (95%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/modules/helloblock.c (86%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/modules/hellocluster.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/modules/hellodict.c create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/modules/hellotimer.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/modules/hellotype.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/modules/helloworld.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/modules/testmodule.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/multi.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/networking.c (78%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/notify.c (97%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/object.c (73%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/pqsort.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/pqsort.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/pubsub.c (95%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/quicklist.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/quicklist.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/rand.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/rand.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/rax.c (84%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/rax.h (67%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/rax_malloc.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/rdb.c (79%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/rdb.h (86%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/redis-benchmark.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/redis-check-aof.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/redis-check-rdb.c (92%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/redis-cli.c create mode 100755 redis-android/src/main/jni/redis-5.0.0/src/redis-trib.rb rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/redisassert.h (96%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/redismodule.h (71%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/release.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/replication.c (96%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/rio.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/rio.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/scripting.c (95%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/sds.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/sds.h (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/sdsalloc.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/sentinel.c (95%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/server.c (88%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/server.h (91%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/setproctitle.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/sha1.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/sha1.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/siphash.c (94%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/slowlog.c (93%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/slowlog.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/solarisfixes.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/sort.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/sparkline.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/sparkline.h (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/stream.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/sync_file_range_flags.h (66%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/syncio.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/t_hash.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/t_list.c (75%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/t_set.c (99%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/t_stream.c rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/t_string.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/t_zset.c (92%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/testhelp.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/util.c (92%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/util.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/valgrind.sup (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/src/version.h rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/wait3.c (98%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/wait3.h (98%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/ziplist.c (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/ziplist.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/zipmap.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/zipmap.h (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/zmalloc.c (85%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/src/zmalloc.h (91%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/assets/default.conf (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/assets/encodings.rdb (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/assets/hash-zipmap.rdb (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/cluster.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/run.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/00-base.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/01-faildet.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/02-failover.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/03-failover-loop.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/04-resharding.tcl (96%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/05-slave-selection.tcl (54%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/06-slave-stop-cond.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/07-replica-migration.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/08-update-msg.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/09-pubsub.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/10-manual-failover.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/11-manual-takeover.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/12-replica-migration-2.tcl (81%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/13-no-failover-option.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/helpers/onlydots.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tests/includes/init-tests.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/cluster/tmp/.gitignore (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/tests/helpers/bg_block_op.tcl rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/helpers/bg_complex_data.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/helpers/gen_write_load.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/instances.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/integration/aof-race.tcl (97%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/integration/aof.tcl (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/tests/integration/block-repl.tcl rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/integration/convert-zipmap-hash-on-load.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/integration/logging.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/integration/psync2-reg.tcl (92%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/integration/psync2.tcl (92%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/integration/rdb.tcl (86%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/integration/redis-cli.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/integration/replication-2.tcl (93%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/integration/replication-3.tcl (97%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/integration/replication-4.tcl (98%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/integration/replication-psync.tcl (96%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/integration/replication.tcl (78%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/sentinel/run.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/sentinel/tests/00-base.tcl (96%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/sentinel/tests/01-conf-update.tcl (89%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/sentinel/tests/02-slaves-reconf.tcl (97%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/sentinel/tests/03-runtime-reconf.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/sentinel/tests/04-slave-selection.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/sentinel/tests/05-manual.tcl (94%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/sentinel/tests/06-ckquorum.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/sentinel/tests/07-down-conditions.tcl (84%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/sentinel/tests/includes/init-tests.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/sentinel/tmp/.gitignore (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/support/cluster.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/support/redis.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/support/server.tcl (98%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/support/test.tcl (85%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/support/tmpfile.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/support/util.tcl (95%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/test_helper.tcl (84%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/aofrw.tcl (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/auth.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/bitfield.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/bitops.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/dump.tcl (92%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/expire.tcl (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/geo.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/hyperloglog.tcl (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/tests/unit/introspection-2.tcl rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/introspection.tcl (96%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/keyspace.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/latency-monitor.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/lazyfree.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/limits.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/maxmemory.tcl (55%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/tests/unit/memefficiency.tcl rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/multi.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/obuf-limits.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/other.tcl (98%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/tests/unit/pendingquerybuf.tcl rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/printver.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/protocol.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/pubsub.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/quit.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/scan.tcl (80%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/scripting.tcl (95%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/slowlog.tcl (90%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/sort.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/type/hash.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/type/incr.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/type/list-2.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/type/list-3.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/type/list-common.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/type/list.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/type/set.tcl (100%) create mode 100644 redis-android/src/main/jni/redis-5.0.0/tests/unit/type/stream-cgroups.tcl create mode 100644 redis-android/src/main/jni/redis-5.0.0/tests/unit/type/stream.tcl rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/type/string.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/type/zset.tcl (86%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/tests/unit/wait.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/build-static-symbols.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/cluster_fail_time.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/corrupt_rdb.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/create-cluster/.gitignore (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/create-cluster/README (93%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/create-cluster/create-cluster (93%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/generate-command-help.rb (99%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/graphs/commits-over-time/README.md (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/graphs/commits-over-time/genhtml.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/hashtable/README (83%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/hashtable/rehashing.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/hyperloglog/.gitignore (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/hyperloglog/hll-err.rb (94%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/hyperloglog/hll-gnuplot-graph.rb (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/install_server.sh (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/lru/README (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/lru/lfu-simulation.c (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/lru/test-lru.rb (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/redis-copy.rb (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/redis-sha1.rb (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/redis_init_script (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/redis_init_script.tpl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/releasetools/01_create_tarball.sh (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/releasetools/02_upload_tarball.sh (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/releasetools/03_test_release.sh (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/releasetools/04_release_hash.sh (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/releasetools/changelog.tcl (73%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/speed-regression.tcl (100%) rename redis-android/src/main/jni/{redis-4.0.11 => redis-5.0.0}/utils/whatisdoing.sh (100%) create mode 100644 repository/io/wf9a5m75/redis-android/1.1.0/redis-android-1.1.0.aar create mode 100644 repository/io/wf9a5m75/redis-android/1.1.0/redis-android-1.1.0.aar.md5 create mode 100644 repository/io/wf9a5m75/redis-android/1.1.0/redis-android-1.1.0.aar.sha1 create mode 100644 repository/io/wf9a5m75/redis-android/1.1.0/redis-android-1.1.0.pom create mode 100644 repository/io/wf9a5m75/redis-android/1.1.0/redis-android-1.1.0.pom.md5 create mode 100644 repository/io/wf9a5m75/redis-android/1.1.0/redis-android-1.1.0.pom.sha1 diff --git a/.idea/misc.xml b/.idea/misc.xml index 99202cc..e0d5b93 100644 --- a/.idea/misc.xml +++ b/.idea/misc.xml @@ -5,22 +5,26 @@ diff --git a/CHANGELOGS.md b/CHANGELOGS.md index deef04c..e40c91c 100644 --- a/CHANGELOGS.md +++ b/CHANGELOGS.md @@ -1,52 +1,45 @@ Redis on Android 1.0 release notes ==================================== -================================================================================ -Redis on Android 1.0.8 Released Aug/6/2018 9:43 PDT -================================================================================ +## Redis on Android 1.1.0 + - Released Aug/6/2018 9:43 PDT + - Upgrade Redis v5.0.0 -Upgrade the redis 4.0.11 +## Redis on Android 1.0.8 + - Released Aug/6/2018 9:43 PDT + - Upgrade Redis 4.0.11 -================================================================================ -Redis on Android 1.0.7 Released Jul/2/2018 10:42 PDT -================================================================================ +## Redis on Android 1.0.8 + - Released Aug/6/2018 9:43 PDT + - Upgrade Redis 4.0.11 -Upgrade the redis 4.0.10 +## Redis on Android 1.0.7 + - Released Jul/2/2018 10:42 PDT + - Upgrade Redis 4.0.10 -================================================================================ -Redis on Android 1.0.6 Released Mar/30/2018 11:15 PDT -================================================================================ +## Redis on Android 1.0.6 + - Released Mar/30/2018 11:15 PDT + - Upgrade Redis 4.0.9 -Upgrade the redis 4.0.9 +## Redis on Android 1.0.5 + - Released Feb/21/2018 17:47 PST + - (fix) `redis-check-aof`, `redis-check-rdb` commands do not work -================================================================================ -Redis on Android 1.0.5 Released Feb/21/2018 17:47 PST -================================================================================ +## Redis on Android 1.0.4 + - Released Feb/13/2018 15:32 PST + - Upgrade Redis 4.0.8 -(fix) `redis-check-aof`, `redis-check-rdb` commands do not work +## Redis on Android 1.0.3 + - Released Jan/03/2018 17:10 PST + - (fix) Change the "wait3()" implementation -================================================================================ -Redis on Android 1.0.4 Released Feb/13/2018 15:32 PST -================================================================================ +## Redis on Android 1.0.2 -Upgrade the redis 4.0.8 + - (fix) Error message "wait3() returned an error: No child processes. rdb_child_pid = 25182, aof_child_pid = -1" -================================================================================ -Redis on Android 1.0.3 Released Jan/03/2018 17:10 PST -================================================================================ +## Redis on Android 1.0.1 + - Released Dec/22/2017 15:30 PST -(fix) Change the "wait3()" implementation + - (fix) Error message "wait3() returned an error: No child processes. rdb_child_pid = 25182, aof_child_pid = -1" -================================================================================ -Redis on Android 1.0.2 -================================================================================ - -(fix) Error message "wait3() returned an error: No child processes. rdb_child_pid = 25182, aof_child_pid = -1" - -================================================================================ -Redis on Android 1.0.1 Released Dec/22/2017 15:30 PST -================================================================================ - -(fix) Error message "wait3() returned an error: No child processes. rdb_child_pid = 25182, aof_child_pid = -1" - -(fix) `zfree(argv);` in network.c causes crash on some Android devices. + - (fix) `zfree(argv);` in network.c causes crash on some Android devices. diff --git a/README.md b/README.md index e40c774..22badd4 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ## Redis version -Aug/6/2018 Redis 4.0.11 +Oct/25/2018 ** Redis 5.0.0** ## Description @@ -55,7 +55,7 @@ repositories { } dependencies { - compile 'io.wf9a5m75:redis-android:1.0.8' + compile 'io.wf9a5m75:redis-android:1.1.0' } ``` @@ -168,3 +168,14 @@ OK ``` ![](https://github.com/wf9a5m75/redis-android/blob/master/images/playing.gif?raw=true) + +# Version compatibles + +| Redis version | redis-android version | +|---------------|-----------------------| +| Redis 5.0.0 | v1.1.0 | +| Redis 4.0.11 | v1.0.8 | +| Redis 4.0.10 | v1.0.7 | +| Redis 4.0.9 | v1.0.6 | +| Redis 4.0.8 | v1.0.4 - v1.0.5 | +| Redis 4.0.6 | v1.0.0 - v1.0.3 | diff --git a/build.gradle b/build.gradle index 47825b2..4e8009d 100644 --- a/build.gradle +++ b/build.gradle @@ -7,7 +7,7 @@ buildscript { jcenter() } dependencies { - classpath 'com.android.tools.build:gradle:3.1.0' + classpath 'com.android.tools.build:gradle:3.2.0' // NOTE: Do not place your application dependencies here; they belong diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index e0f4f6d..632a661 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ -#Fri Mar 30 09:52:06 PDT 2018 +#Thu Oct 25 09:31:17 PDT 2018 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-4.4-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-4.6-all.zip diff --git a/redis-android/build.gradle b/redis-android/build.gradle index 055df99..6e799f5 100644 --- a/redis-android/build.gradle +++ b/redis-android/build.gradle @@ -4,7 +4,7 @@ buildscript { google() } dependencies { - classpath 'com.android.tools.build:gradle:3.1.0' + classpath 'com.android.tools.build:gradle:3.2.0' } } @@ -12,7 +12,7 @@ apply plugin: 'com.android.library' android { compileSdkVersion 16 - buildToolsVersion '27.0.3' + buildToolsVersion '28.0.3' defaultConfig { minSdkVersion 16 @@ -36,7 +36,7 @@ uploadArchives { repositories { mavenDeployer { repository url: "file://${repo.absolutePath}" - pom.version = "1.0.8" + pom.version = "1.1.0" pom.groupId = "io.wf9a5m75" pom.artifactId = "redis-android" } diff --git a/redis-android/gradle/wrapper/gradle-wrapper.properties b/redis-android/gradle/wrapper/gradle-wrapper.properties index 0dbd08b..e52b1b5 100644 --- a/redis-android/gradle/wrapper/gradle-wrapper.properties +++ b/redis-android/gradle/wrapper/gradle-wrapper.properties @@ -3,4 +3,4 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-4.4-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.2-bin.zip diff --git a/redis-android/src/main/java/io/wf9a5m75/redis/RedisAndroid.java b/redis-android/src/main/java/io/wf9a5m75/redis/RedisAndroid.java index 3c95a67..5a69048 100644 --- a/redis-android/src/main/java/io/wf9a5m75/redis/RedisAndroid.java +++ b/redis-android/src/main/java/io/wf9a5m75/redis/RedisAndroid.java @@ -72,20 +72,20 @@ public static void start(Context context, Bundle options) { //------------------- // Replication //------------------- - //configs.putString("slaveof", "no one"); + //configs.putString("replicaof", "no one"); //configs.putString("masterauth", ""); - configs.putString("slave-serve-stale-data", "yes"); - configs.putString("slave-read-only", "yes"); + configs.putString("replica-serve-stale-data", "yes"); + configs.putString("replica-read-only", "yes"); configs.putString("repl-diskless-sync", "no"); configs.putString("repl-diskless-sync-delay", "5"); -// configs.putString("repl-ping-slave-period", "10"); +// configs.putString("repl-ping-replica-period", "10"); // configs.putString("repl-timeout", "60"); configs.putString("repl-disable-tcp-nodelay", "no"); // configs.putString("repl-backlog-size", "1mb"); // configs.putString("repl-backlog-ttl", "3600"); - configs.putString("slave-priority", "100"); -// configs.putString("slave-announce-ip", "5.5.5.5"); -// configs.putString("slave-announce-port", "1234"); + configs.putString("replica-priority", "100"); +// configs.putString("replica-announce-ip", "5.5.5.5"); +// configs.putString("replica-announce-port", "1234"); //------------------- // Security @@ -110,7 +110,7 @@ public static void start(Context context, Bundle options) { configs.putString("lazyfree-lazy-eviction", "no"); configs.putString("lazyfree-lazy-expire", "no"); configs.putString("lazyfree-lazy-server-del", "no"); - configs.putString("slave-lazy-flush", "no"); + configs.putString("replica-lazy-flush", "no"); //------------------- // Append only mode @@ -135,7 +135,7 @@ public static void start(Context context, Bundle options) { // configs.putString("cluster-enabled", "no"); // configs.putString("cluster-config-file", "nodes-" + hashCode + ".conf"); // configs.putString("cluster-node-timeout", "15000"); -// configs.putString("cluster-slave-validity-factor", "10"); +// configs.putString("cluster-replica-validity-factor", "10"); // configs.putString("cluster-migration-barrier", "1"); // configs.putString("cluster-require-full-coverage", "yes"); @@ -177,7 +177,7 @@ public static void start(Context context, Bundle options) { ArrayList clientOutputBufferLimits = new ArrayList(); clientOutputBufferLimits.add("normal 0 0 0"); - clientOutputBufferLimits.add("slave 256mb 64mb 60"); + clientOutputBufferLimits.add("replica 256mb 64mb 60"); clientOutputBufferLimits.add("pubsub 32mb 8mb 60"); configs.putStringArrayList("client-output-buffer-limit", clientOutputBufferLimits); diff --git a/redis-android/src/main/jni/Application.mk b/redis-android/src/main/jni/Application.mk index 708f933..3dcddac 100644 --- a/redis-android/src/main/jni/Application.mk +++ b/redis-android/src/main/jni/Application.mk @@ -2,4 +2,4 @@ #APP_CFLAGS := -fexceptions -std=c11 APP_PLATFORM := android-16 APP_PIE := true -#APP_ABI := armeabi-v7a \ No newline at end of file +#APP_ABI := armeabi-v7a diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/.gitignore b/redis-android/src/main/jni/hiredis/.gitignore similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/.gitignore rename to redis-android/src/main/jni/hiredis/.gitignore diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/.travis.yml b/redis-android/src/main/jni/hiredis/.travis.yml similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/.travis.yml rename to redis-android/src/main/jni/hiredis/.travis.yml diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/Android.mk b/redis-android/src/main/jni/hiredis/Android.mk similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/Android.mk rename to redis-android/src/main/jni/hiredis/Android.mk diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/CHANGELOG.md b/redis-android/src/main/jni/hiredis/CHANGELOG.md similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/CHANGELOG.md rename to redis-android/src/main/jni/hiredis/CHANGELOG.md diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/COPYING b/redis-android/src/main/jni/hiredis/COPYING similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/COPYING rename to redis-android/src/main/jni/hiredis/COPYING diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/Makefile b/redis-android/src/main/jni/hiredis/Makefile similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/Makefile rename to redis-android/src/main/jni/hiredis/Makefile diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/README.md b/redis-android/src/main/jni/hiredis/README.md similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/README.md rename to redis-android/src/main/jni/hiredis/README.md diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/ae.h b/redis-android/src/main/jni/hiredis/adapters/ae.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/ae.h rename to redis-android/src/main/jni/hiredis/adapters/ae.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/glib.h b/redis-android/src/main/jni/hiredis/adapters/glib.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/glib.h rename to redis-android/src/main/jni/hiredis/adapters/glib.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/ivykis.h b/redis-android/src/main/jni/hiredis/adapters/ivykis.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/ivykis.h rename to redis-android/src/main/jni/hiredis/adapters/ivykis.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/libev.h b/redis-android/src/main/jni/hiredis/adapters/libev.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/libev.h rename to redis-android/src/main/jni/hiredis/adapters/libev.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/libevent.h b/redis-android/src/main/jni/hiredis/adapters/libevent.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/libevent.h rename to redis-android/src/main/jni/hiredis/adapters/libevent.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/libuv.h b/redis-android/src/main/jni/hiredis/adapters/libuv.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/libuv.h rename to redis-android/src/main/jni/hiredis/adapters/libuv.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/macosx.h b/redis-android/src/main/jni/hiredis/adapters/macosx.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/macosx.h rename to redis-android/src/main/jni/hiredis/adapters/macosx.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/qt.h b/redis-android/src/main/jni/hiredis/adapters/qt.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/adapters/qt.h rename to redis-android/src/main/jni/hiredis/adapters/qt.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/appveyor.yml b/redis-android/src/main/jni/hiredis/appveyor.yml similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/appveyor.yml rename to redis-android/src/main/jni/hiredis/appveyor.yml diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/async.c b/redis-android/src/main/jni/hiredis/async.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/async.c rename to redis-android/src/main/jni/hiredis/async.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/async.h b/redis-android/src/main/jni/hiredis/async.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/async.h rename to redis-android/src/main/jni/hiredis/async.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/dict.c b/redis-android/src/main/jni/hiredis/dict.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/dict.c rename to redis-android/src/main/jni/hiredis/dict.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/dict.h b/redis-android/src/main/jni/hiredis/dict.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/dict.h rename to redis-android/src/main/jni/hiredis/dict.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-ae.c b/redis-android/src/main/jni/hiredis/examples/example-ae.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-ae.c rename to redis-android/src/main/jni/hiredis/examples/example-ae.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-glib.c b/redis-android/src/main/jni/hiredis/examples/example-glib.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-glib.c rename to redis-android/src/main/jni/hiredis/examples/example-glib.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-ivykis.c b/redis-android/src/main/jni/hiredis/examples/example-ivykis.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-ivykis.c rename to redis-android/src/main/jni/hiredis/examples/example-ivykis.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-libev.c b/redis-android/src/main/jni/hiredis/examples/example-libev.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-libev.c rename to redis-android/src/main/jni/hiredis/examples/example-libev.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-libevent.c b/redis-android/src/main/jni/hiredis/examples/example-libevent.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-libevent.c rename to redis-android/src/main/jni/hiredis/examples/example-libevent.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-libuv.c b/redis-android/src/main/jni/hiredis/examples/example-libuv.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-libuv.c rename to redis-android/src/main/jni/hiredis/examples/example-libuv.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-macosx.c b/redis-android/src/main/jni/hiredis/examples/example-macosx.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-macosx.c rename to redis-android/src/main/jni/hiredis/examples/example-macosx.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-qt.cpp b/redis-android/src/main/jni/hiredis/examples/example-qt.cpp similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-qt.cpp rename to redis-android/src/main/jni/hiredis/examples/example-qt.cpp diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-qt.h b/redis-android/src/main/jni/hiredis/examples/example-qt.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example-qt.h rename to redis-android/src/main/jni/hiredis/examples/example-qt.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example.c b/redis-android/src/main/jni/hiredis/examples/example.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/examples/example.c rename to redis-android/src/main/jni/hiredis/examples/example.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/fmacros.h b/redis-android/src/main/jni/hiredis/fmacros.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/fmacros.h rename to redis-android/src/main/jni/hiredis/fmacros.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/hiredis.c b/redis-android/src/main/jni/hiredis/hiredis.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/hiredis.c rename to redis-android/src/main/jni/hiredis/hiredis.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/hiredis.h b/redis-android/src/main/jni/hiredis/hiredis.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/hiredis.h rename to redis-android/src/main/jni/hiredis/hiredis.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/net.c b/redis-android/src/main/jni/hiredis/net.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/net.c rename to redis-android/src/main/jni/hiredis/net.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/net.h b/redis-android/src/main/jni/hiredis/net.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/net.h rename to redis-android/src/main/jni/hiredis/net.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/read.c b/redis-android/src/main/jni/hiredis/read.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/read.c rename to redis-android/src/main/jni/hiredis/read.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/read.h b/redis-android/src/main/jni/hiredis/read.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/read.h rename to redis-android/src/main/jni/hiredis/read.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/sds.c b/redis-android/src/main/jni/hiredis/sds.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/sds.c rename to redis-android/src/main/jni/hiredis/sds.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/sds.h b/redis-android/src/main/jni/hiredis/sds.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/sds.h rename to redis-android/src/main/jni/hiredis/sds.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/sdsalloc.h b/redis-android/src/main/jni/hiredis/sdsalloc.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/sdsalloc.h rename to redis-android/src/main/jni/hiredis/sdsalloc.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/test.c b/redis-android/src/main/jni/hiredis/test.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/test.c rename to redis-android/src/main/jni/hiredis/test.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/hiredis/win32.h b/redis-android/src/main/jni/hiredis/win32.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/hiredis/win32.h rename to redis-android/src/main/jni/hiredis/win32.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/Android.mk b/redis-android/src/main/jni/lua/Android.mk similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/Android.mk rename to redis-android/src/main/jni/lua/Android.mk diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/COPYRIGHT b/redis-android/src/main/jni/lua/COPYRIGHT similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/COPYRIGHT rename to redis-android/src/main/jni/lua/COPYRIGHT diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/HISTORY b/redis-android/src/main/jni/lua/HISTORY similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/HISTORY rename to redis-android/src/main/jni/lua/HISTORY diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/INSTALL b/redis-android/src/main/jni/lua/INSTALL similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/INSTALL rename to redis-android/src/main/jni/lua/INSTALL diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/Makefile b/redis-android/src/main/jni/lua/Makefile similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/Makefile rename to redis-android/src/main/jni/lua/Makefile diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/README b/redis-android/src/main/jni/lua/README similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/README rename to redis-android/src/main/jni/lua/README diff --git a/redis-android/src/main/jni/lua/_Android.mk b/redis-android/src/main/jni/lua/_Android.mk new file mode 100644 index 0000000..12030a2 --- /dev/null +++ b/redis-android/src/main/jni/lua/_Android.mk @@ -0,0 +1,33 @@ + +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) + +LOCAL_MODULE := lua +LOCAL_CFLAGS := -O2 -D__ANDROID__ + +# To get ARM stack traces if Redis crashes we need a special C flag. +LOCAL_CFLAGS += -funwind-tables + + +SRC := $(LOCAL_PATH)/src +LOCAL_C_INCLUDES += $(SRC) +LOCAL_CFLAGS += -I$(SRC) + +LOCAL_SRC_FILES += \ + $(SRC)/fpconv.c $(SRC)/lapi.c $(SRC)/lauxlib.c $(SRC)/lbaselib.c $(SRC)/lcode.c \ + $(SRC)/ldblib.c $(SRC)/ldebug.c $(SRC)/ldo.c \ + $(SRC)/ldump.c $(SRC)/lfunc.c $(SRC)/lgc.c $(SRC)/linit.c $(SRC)/liolib.c \ + $(SRC)/llex.c $(SRC)/lmathlib.c $(SRC)/lmem.c $(SRC)/loadlib.c \ + $(SRC)/lobject.c $(SRC)/lopcodes.c $(SRC)/loslib.c $(SRC)/lparser.c \ + $(SRC)/lstate.c $(SRC)/lstring.c $(SRC)/lstrlib.c $(SRC)/ltable.c $(SRC)/ltablib.c \ + $(SRC)/ltm.c $(SRC)/lua.c $(SRC)/lua_bit.c $(SRC)/lua_cjson.c \ + $(SRC)/lua_cmsgpack.c $(SRC)/lua_struct.c $(SRC)/luac.c $(SRC)/lundump.c $(SRC)/lvm.c \ + $(SRC)/lzio.c $(SRC)/print.c $(SRC)/strbuf.c + + +# Allow to use the header files from external library. +# http://serenegiant.com/blog/?p=2119 +LOCAL_EXPORT_C_INCLUDES := $(SRC) + +include $(BUILD_STATIC_LIBRARY) diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/contents.html b/redis-android/src/main/jni/lua/doc/contents.html similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/contents.html rename to redis-android/src/main/jni/lua/doc/contents.html diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/cover.png b/redis-android/src/main/jni/lua/doc/cover.png similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/cover.png rename to redis-android/src/main/jni/lua/doc/cover.png diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/logo.gif b/redis-android/src/main/jni/lua/doc/logo.gif similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/logo.gif rename to redis-android/src/main/jni/lua/doc/logo.gif diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/lua.1 b/redis-android/src/main/jni/lua/doc/lua.1 similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/lua.1 rename to redis-android/src/main/jni/lua/doc/lua.1 diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/lua.css b/redis-android/src/main/jni/lua/doc/lua.css similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/lua.css rename to redis-android/src/main/jni/lua/doc/lua.css diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/lua.html b/redis-android/src/main/jni/lua/doc/lua.html similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/lua.html rename to redis-android/src/main/jni/lua/doc/lua.html diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/luac.1 b/redis-android/src/main/jni/lua/doc/luac.1 similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/luac.1 rename to redis-android/src/main/jni/lua/doc/luac.1 diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/luac.html b/redis-android/src/main/jni/lua/doc/luac.html similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/luac.html rename to redis-android/src/main/jni/lua/doc/luac.html diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/manual.css b/redis-android/src/main/jni/lua/doc/manual.css similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/manual.css rename to redis-android/src/main/jni/lua/doc/manual.css diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/manual.html b/redis-android/src/main/jni/lua/doc/manual.html similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/manual.html rename to redis-android/src/main/jni/lua/doc/manual.html diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/readme.html b/redis-android/src/main/jni/lua/doc/readme.html similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/doc/readme.html rename to redis-android/src/main/jni/lua/doc/readme.html diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/Makefile b/redis-android/src/main/jni/lua/etc/Makefile similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/Makefile rename to redis-android/src/main/jni/lua/etc/Makefile diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/README b/redis-android/src/main/jni/lua/etc/README similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/README rename to redis-android/src/main/jni/lua/etc/README diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/all.c b/redis-android/src/main/jni/lua/etc/all.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/all.c rename to redis-android/src/main/jni/lua/etc/all.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/lua.hpp b/redis-android/src/main/jni/lua/etc/lua.hpp similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/lua.hpp rename to redis-android/src/main/jni/lua/etc/lua.hpp diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/lua.ico b/redis-android/src/main/jni/lua/etc/lua.ico similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/lua.ico rename to redis-android/src/main/jni/lua/etc/lua.ico diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/lua.pc b/redis-android/src/main/jni/lua/etc/lua.pc similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/lua.pc rename to redis-android/src/main/jni/lua/etc/lua.pc diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/luavs.bat b/redis-android/src/main/jni/lua/etc/luavs.bat similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/luavs.bat rename to redis-android/src/main/jni/lua/etc/luavs.bat diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/min.c b/redis-android/src/main/jni/lua/etc/min.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/min.c rename to redis-android/src/main/jni/lua/etc/min.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/noparser.c b/redis-android/src/main/jni/lua/etc/noparser.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/noparser.c rename to redis-android/src/main/jni/lua/etc/noparser.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/strict.lua b/redis-android/src/main/jni/lua/etc/strict.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/etc/strict.lua rename to redis-android/src/main/jni/lua/etc/strict.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/Makefile b/redis-android/src/main/jni/lua/src/Makefile similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/Makefile rename to redis-android/src/main/jni/lua/src/Makefile diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/fpconv.c b/redis-android/src/main/jni/lua/src/fpconv.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/fpconv.c rename to redis-android/src/main/jni/lua/src/fpconv.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/fpconv.h b/redis-android/src/main/jni/lua/src/fpconv.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/fpconv.h rename to redis-android/src/main/jni/lua/src/fpconv.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lapi.c b/redis-android/src/main/jni/lua/src/lapi.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lapi.c rename to redis-android/src/main/jni/lua/src/lapi.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lapi.h b/redis-android/src/main/jni/lua/src/lapi.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lapi.h rename to redis-android/src/main/jni/lua/src/lapi.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lauxlib.c b/redis-android/src/main/jni/lua/src/lauxlib.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lauxlib.c rename to redis-android/src/main/jni/lua/src/lauxlib.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lauxlib.h b/redis-android/src/main/jni/lua/src/lauxlib.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lauxlib.h rename to redis-android/src/main/jni/lua/src/lauxlib.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lbaselib.c b/redis-android/src/main/jni/lua/src/lbaselib.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lbaselib.c rename to redis-android/src/main/jni/lua/src/lbaselib.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lcode.c b/redis-android/src/main/jni/lua/src/lcode.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lcode.c rename to redis-android/src/main/jni/lua/src/lcode.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lcode.h b/redis-android/src/main/jni/lua/src/lcode.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lcode.h rename to redis-android/src/main/jni/lua/src/lcode.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ldblib.c b/redis-android/src/main/jni/lua/src/ldblib.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ldblib.c rename to redis-android/src/main/jni/lua/src/ldblib.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ldebug.c b/redis-android/src/main/jni/lua/src/ldebug.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ldebug.c rename to redis-android/src/main/jni/lua/src/ldebug.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ldebug.h b/redis-android/src/main/jni/lua/src/ldebug.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ldebug.h rename to redis-android/src/main/jni/lua/src/ldebug.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ldo.c b/redis-android/src/main/jni/lua/src/ldo.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ldo.c rename to redis-android/src/main/jni/lua/src/ldo.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ldo.h b/redis-android/src/main/jni/lua/src/ldo.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ldo.h rename to redis-android/src/main/jni/lua/src/ldo.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ldump.c b/redis-android/src/main/jni/lua/src/ldump.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ldump.c rename to redis-android/src/main/jni/lua/src/ldump.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lfunc.c b/redis-android/src/main/jni/lua/src/lfunc.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lfunc.c rename to redis-android/src/main/jni/lua/src/lfunc.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lfunc.h b/redis-android/src/main/jni/lua/src/lfunc.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lfunc.h rename to redis-android/src/main/jni/lua/src/lfunc.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lgc.c b/redis-android/src/main/jni/lua/src/lgc.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lgc.c rename to redis-android/src/main/jni/lua/src/lgc.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lgc.h b/redis-android/src/main/jni/lua/src/lgc.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lgc.h rename to redis-android/src/main/jni/lua/src/lgc.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/linit.c b/redis-android/src/main/jni/lua/src/linit.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/linit.c rename to redis-android/src/main/jni/lua/src/linit.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/liolib.c b/redis-android/src/main/jni/lua/src/liolib.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/liolib.c rename to redis-android/src/main/jni/lua/src/liolib.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/llex.c b/redis-android/src/main/jni/lua/src/llex.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/llex.c rename to redis-android/src/main/jni/lua/src/llex.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/llex.h b/redis-android/src/main/jni/lua/src/llex.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/llex.h rename to redis-android/src/main/jni/lua/src/llex.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/llimits.h b/redis-android/src/main/jni/lua/src/llimits.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/llimits.h rename to redis-android/src/main/jni/lua/src/llimits.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lmathlib.c b/redis-android/src/main/jni/lua/src/lmathlib.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lmathlib.c rename to redis-android/src/main/jni/lua/src/lmathlib.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lmem.c b/redis-android/src/main/jni/lua/src/lmem.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lmem.c rename to redis-android/src/main/jni/lua/src/lmem.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lmem.h b/redis-android/src/main/jni/lua/src/lmem.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lmem.h rename to redis-android/src/main/jni/lua/src/lmem.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/loadlib.c b/redis-android/src/main/jni/lua/src/loadlib.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/loadlib.c rename to redis-android/src/main/jni/lua/src/loadlib.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lobject.c b/redis-android/src/main/jni/lua/src/lobject.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lobject.c rename to redis-android/src/main/jni/lua/src/lobject.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lobject.h b/redis-android/src/main/jni/lua/src/lobject.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lobject.h rename to redis-android/src/main/jni/lua/src/lobject.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lopcodes.c b/redis-android/src/main/jni/lua/src/lopcodes.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lopcodes.c rename to redis-android/src/main/jni/lua/src/lopcodes.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lopcodes.h b/redis-android/src/main/jni/lua/src/lopcodes.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lopcodes.h rename to redis-android/src/main/jni/lua/src/lopcodes.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/loslib.c b/redis-android/src/main/jni/lua/src/loslib.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/loslib.c rename to redis-android/src/main/jni/lua/src/loslib.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lparser.c b/redis-android/src/main/jni/lua/src/lparser.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lparser.c rename to redis-android/src/main/jni/lua/src/lparser.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lparser.h b/redis-android/src/main/jni/lua/src/lparser.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lparser.h rename to redis-android/src/main/jni/lua/src/lparser.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lstate.c b/redis-android/src/main/jni/lua/src/lstate.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lstate.c rename to redis-android/src/main/jni/lua/src/lstate.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lstate.h b/redis-android/src/main/jni/lua/src/lstate.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lstate.h rename to redis-android/src/main/jni/lua/src/lstate.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lstring.c b/redis-android/src/main/jni/lua/src/lstring.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lstring.c rename to redis-android/src/main/jni/lua/src/lstring.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lstring.h b/redis-android/src/main/jni/lua/src/lstring.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lstring.h rename to redis-android/src/main/jni/lua/src/lstring.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lstrlib.c b/redis-android/src/main/jni/lua/src/lstrlib.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lstrlib.c rename to redis-android/src/main/jni/lua/src/lstrlib.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ltable.c b/redis-android/src/main/jni/lua/src/ltable.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ltable.c rename to redis-android/src/main/jni/lua/src/ltable.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ltable.h b/redis-android/src/main/jni/lua/src/ltable.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ltable.h rename to redis-android/src/main/jni/lua/src/ltable.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ltablib.c b/redis-android/src/main/jni/lua/src/ltablib.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ltablib.c rename to redis-android/src/main/jni/lua/src/ltablib.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ltm.c b/redis-android/src/main/jni/lua/src/ltm.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ltm.c rename to redis-android/src/main/jni/lua/src/ltm.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ltm.h b/redis-android/src/main/jni/lua/src/ltm.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/ltm.h rename to redis-android/src/main/jni/lua/src/ltm.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lua.c b/redis-android/src/main/jni/lua/src/lua.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lua.c rename to redis-android/src/main/jni/lua/src/lua.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lua.h b/redis-android/src/main/jni/lua/src/lua.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lua.h rename to redis-android/src/main/jni/lua/src/lua.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lua_bit.c b/redis-android/src/main/jni/lua/src/lua_bit.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lua_bit.c rename to redis-android/src/main/jni/lua/src/lua_bit.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lua_cjson.c b/redis-android/src/main/jni/lua/src/lua_cjson.c similarity index 99% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lua_cjson.c rename to redis-android/src/main/jni/lua/src/lua_cjson.c index c26c0d7..45ac24b 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lua_cjson.c +++ b/redis-android/src/main/jni/lua/src/lua_cjson.c @@ -46,7 +46,7 @@ #include "strbuf.h" #include "fpconv.h" -#include "../../../src/solarisfixes.h" +#include "../redis-5.0.0/src/solarisfixes.h" #ifndef CJSON_MODNAME #define CJSON_MODNAME "cjson" diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lua_cmsgpack.c b/redis-android/src/main/jni/lua/src/lua_cmsgpack.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lua_cmsgpack.c rename to redis-android/src/main/jni/lua/src/lua_cmsgpack.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lua_struct.c b/redis-android/src/main/jni/lua/src/lua_struct.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lua_struct.c rename to redis-android/src/main/jni/lua/src/lua_struct.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/luac.c b/redis-android/src/main/jni/lua/src/luac.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/luac.c rename to redis-android/src/main/jni/lua/src/luac.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/luaconf.h b/redis-android/src/main/jni/lua/src/luaconf.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/luaconf.h rename to redis-android/src/main/jni/lua/src/luaconf.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lualib.h b/redis-android/src/main/jni/lua/src/lualib.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lualib.h rename to redis-android/src/main/jni/lua/src/lualib.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lundump.c b/redis-android/src/main/jni/lua/src/lundump.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lundump.c rename to redis-android/src/main/jni/lua/src/lundump.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lundump.h b/redis-android/src/main/jni/lua/src/lundump.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lundump.h rename to redis-android/src/main/jni/lua/src/lundump.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lvm.c b/redis-android/src/main/jni/lua/src/lvm.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lvm.c rename to redis-android/src/main/jni/lua/src/lvm.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lvm.h b/redis-android/src/main/jni/lua/src/lvm.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lvm.h rename to redis-android/src/main/jni/lua/src/lvm.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lzio.c b/redis-android/src/main/jni/lua/src/lzio.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lzio.c rename to redis-android/src/main/jni/lua/src/lzio.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lzio.h b/redis-android/src/main/jni/lua/src/lzio.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/lzio.h rename to redis-android/src/main/jni/lua/src/lzio.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/print.c b/redis-android/src/main/jni/lua/src/print.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/print.c rename to redis-android/src/main/jni/lua/src/print.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/strbuf.c b/redis-android/src/main/jni/lua/src/strbuf.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/strbuf.c rename to redis-android/src/main/jni/lua/src/strbuf.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/src/strbuf.h b/redis-android/src/main/jni/lua/src/strbuf.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/src/strbuf.h rename to redis-android/src/main/jni/lua/src/strbuf.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/README b/redis-android/src/main/jni/lua/test/README similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/README rename to redis-android/src/main/jni/lua/test/README diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/bisect.lua b/redis-android/src/main/jni/lua/test/bisect.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/bisect.lua rename to redis-android/src/main/jni/lua/test/bisect.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/cf.lua b/redis-android/src/main/jni/lua/test/cf.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/cf.lua rename to redis-android/src/main/jni/lua/test/cf.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/echo.lua b/redis-android/src/main/jni/lua/test/echo.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/echo.lua rename to redis-android/src/main/jni/lua/test/echo.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/env.lua b/redis-android/src/main/jni/lua/test/env.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/env.lua rename to redis-android/src/main/jni/lua/test/env.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/factorial.lua b/redis-android/src/main/jni/lua/test/factorial.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/factorial.lua rename to redis-android/src/main/jni/lua/test/factorial.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/fib.lua b/redis-android/src/main/jni/lua/test/fib.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/fib.lua rename to redis-android/src/main/jni/lua/test/fib.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/fibfor.lua b/redis-android/src/main/jni/lua/test/fibfor.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/fibfor.lua rename to redis-android/src/main/jni/lua/test/fibfor.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/globals.lua b/redis-android/src/main/jni/lua/test/globals.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/globals.lua rename to redis-android/src/main/jni/lua/test/globals.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/hello.lua b/redis-android/src/main/jni/lua/test/hello.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/hello.lua rename to redis-android/src/main/jni/lua/test/hello.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/life.lua b/redis-android/src/main/jni/lua/test/life.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/life.lua rename to redis-android/src/main/jni/lua/test/life.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/luac.lua b/redis-android/src/main/jni/lua/test/luac.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/luac.lua rename to redis-android/src/main/jni/lua/test/luac.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/printf.lua b/redis-android/src/main/jni/lua/test/printf.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/printf.lua rename to redis-android/src/main/jni/lua/test/printf.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/readonly.lua b/redis-android/src/main/jni/lua/test/readonly.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/readonly.lua rename to redis-android/src/main/jni/lua/test/readonly.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/sieve.lua b/redis-android/src/main/jni/lua/test/sieve.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/sieve.lua rename to redis-android/src/main/jni/lua/test/sieve.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/sort.lua b/redis-android/src/main/jni/lua/test/sort.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/sort.lua rename to redis-android/src/main/jni/lua/test/sort.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/table.lua b/redis-android/src/main/jni/lua/test/table.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/table.lua rename to redis-android/src/main/jni/lua/test/table.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/trace-calls.lua b/redis-android/src/main/jni/lua/test/trace-calls.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/trace-calls.lua rename to redis-android/src/main/jni/lua/test/trace-calls.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/trace-globals.lua b/redis-android/src/main/jni/lua/test/trace-globals.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/trace-globals.lua rename to redis-android/src/main/jni/lua/test/trace-globals.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/lua/test/xd.lua b/redis-android/src/main/jni/lua/test/xd.lua similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/lua/test/xd.lua rename to redis-android/src/main/jni/lua/test/xd.lua diff --git a/redis-android/src/main/jni/redis-4.0.11/00-RELEASENOTES b/redis-android/src/main/jni/redis-4.0.11/00-RELEASENOTES deleted file mode 100644 index 1e9aa55..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/00-RELEASENOTES +++ /dev/null @@ -1,4752 +0,0 @@ -Redis 4.0 release notes -======================= - --------------------------------------------------------------------------------- -Upgrade urgency levels: - -LOW: No need to upgrade unless there are new features you want to use. -MODERATE: Program an upgrade of the server, but it's not urgent. -HIGH: There is a critical bug that may affect a subset of users. Upgrade! -CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP. --------------------------------------------------------------------------------- - -================================================================================ -Redis 4.0.11 Released Fri Aug 03 17:09:24 CEST 2018 -================================================================================ - -Upgrade urgency HIGH: not critical but very important bugs fixed. - -Dear users, this is just a bugfix release of Redis 4. All new work -is now focused on Redis 5, however we backported a number of bug fixes here: - -* The disconnection time between the master and slave was reset in an - incorrect place, sometimes a good slave will not be able to failover - because it claims it was disconnected for too much time from the master. -* A replication bug, rare to trigger but non impossible, is in Redis for - years. It was lately discovered at Redis Labs and fixed by Oran Agra. - It may cause disconnections, desynchronizations and other issues. -* RANDOMKEY may go in infinite loop on rare situations. Now fixed. -* EXISTS now works in a more consistent way on slaves. -* Sentinel: backport of an option to deny a potential security problem - when the SENTINEL command is used to configure an arbitrary script - to execute. - -Many of these issues are there for a very long time, however upgrading -is a good idea. - -This is the full list of commits: - -antirez in commit 677f7585: - Set repl_down_since to zero on state change. - 1 file changed, 2 insertions(+), 1 deletion(-) - -WuYunlong in commit 8c6223f9: - fix server.repl_down_since resetting, so that slaves could failover automatically as expected. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Oran Agra in commit 9535c215: - fix rare replication stream corruption with disk-based replication - 3 files changed, 19 insertions(+), 9 deletions(-) - -zhaozhao.zz in commit 5f1fcc59: - fix exists command on slave - 1 file changed, 1 insertion(+), 2 deletions(-) - -antirez in commit ab145a9f: - Fix infinite loop in dbRandomKey(). - 1 file changed, 13 insertions(+) - -antirez in commit 2fa43ece: - Sentinel: add an option to deny online script reconfiguration. - 2 files changed, 41 insertions(+) - -================================================================================ -Redis 4.0.10 Released Wed Jun 13 12:49:13 CEST 2018 -================================================================================ - -Upgrade urgency CRITICAL: This release fixes important security issues. - HIGH: This release fixes a SCAN commands family bug. - MODERATE: This release fixes a PSYNC2 edge case with expires. - MODERATE: Sentinel related fixes. - LOW: All the other issues - -Redis 4.0.10 fixes a number of important issues: - -* Important security issues related to the Lua scripting engine. - Please check https://github.com/antirez/redis/issues/5017 - for more information. - -* A bug with SCAN, SSCAN, HSCAN and ZSCAN, that may not return all the elements. - We also add a regression test that can trigger the issue often when present, and - may in theory be able to find unrelated regressions. - -* A PSYNC2 bug is fixed: Redis should not expire keys when saving RDB files - because otherwise it is no longer possible to use such RDB file as a base - for partial resynchronization. It no longer represents the right state. - -* Compatibility of AOF with RDB preamble when the RDB checksum is disabled. - -* Sentinel bug that in some cases prevented Sentinel to detect that the master - was down immediately. A delay was added to the detection. - -* Other minor issues. - -The following is the list of commits composing the release, please check -the commit messages and authors for credits. - -antirez in commit 9fdcc159: - Security: fix redis-cli buffer overflow. - 1 file changed, 16 insertions(+), 11 deletions(-) - -antirez in commit cf760071: - Security: fix Lua struct package offset handling. - 1 file changed, 6 insertions(+), 2 deletions(-) - -antirez in commit a57595ca: - Security: more cmsgpack fixes by @soloestoy. - 1 file changed, 7 insertions(+) - -antirez in commit 8783fb94: - Security: update Lua struct package for security. - 1 file changed, 23 insertions(+), 23 deletions(-) - -antirez in commit 8cb9344b: - Security: fix Lua cmsgpack library stack overflow. - 1 file changed, 3 insertions(+) - -赵磊 in commit 59080f60: - Fix dictScan(): It can't scan all buckets when dict is shrinking. - 1 file changed, 14 insertions(+), 11 deletions(-) - -dejun.xdj in commit ac2a824a: - Fix redis-cli memory leak when sending set preference command. - 1 file changed, 2 insertions(+) - -dejun.xdj in commit c7197ff5: - Check if the repeat value is positive in while loop of cliSendCommand(). - 1 file changed, 1 insertion(+), 1 deletion(-) - -dejun.xdj in commit 3f77777f: - Change the type of repeat argument to long for function cliSendCommand. - 1 file changed, 1 insertion(+), 1 deletion(-) - -dejun.xdj in commit 7a565d72: - Fix negtive repeat command value issue. - 1 file changed, 11 insertions(+), 3 deletions(-) - -dejun.xdj in commit 64bf60fb: - Detect and stop saving history for auth command with repeat option. - 1 file changed, 17 insertions(+), 10 deletions(-) - -dejun.xdj in commit 5bed12aa: - Change the warning message a little bit to avoid trademark issuses. - 1 file changed, 1 insertion(+), 1 deletion(-) - -dejun.xdj in commit d71c4961: - Stop saving auth command in redis-cli history. - 1 file changed, 4 insertions(+), 2 deletions(-) - -dejun.xdj in commit fca99e41: - Add warning message when using password on command line - 1 file changed, 1 insertion(+) - -antirez in commit 01407a3a: - Don't expire keys while loading RDB from AOF preamble. - 3 files changed, 5 insertions(+), 5 deletions(-) - -WuYunlong in commit fb5408cf: - Fix rdb save by allowing dumping of expire keys, so that when we add a new slave, and do a failover, eighter by manual or not, other local slaves will delete the expired keys properly. - 2 files changed, 3 insertions(+), 7 deletions(-) - -antirez in commit 0b8b6df4: - Backport hiredis issue 525 fix to compile on FreeBSD. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit e98627c5: - Add INIT INFO to the provided init script. - 1 file changed, 8 insertions(+) - -antirez in commit 17f5de89: - Fix ae.c when a timer finalizerProc adds an event. - 2 files changed, 10 insertions(+), 6 deletions(-) - -antirez in commit 266e6423: - Sentinel: fix delay in detecting ODOWN. - 1 file changed, 9 insertions(+), 5 deletions(-) - -zhaozhao.zz in commit eafaf172: - AOF & RDB: be compatible with rdbchecksum no - 1 file changed, 9 insertions(+), 7 deletions(-) - -huijing.whj in commit 4630da37: - fix int overflow problem in freeMemoryIfNeeded - 1 file changed, 1 insertion(+), 1 deletion(-) - - -================================================================================ -Redis 4.0.9 Released Mon Mar 26 17:52:32 CEST 2018 -================================================================================ - -Upgrade urgency CRITICAL: Critical upgrade for users using AOF with the - fsync policy set to "always". - -Dear Redis users, - -Redis version 4.0.9 adds a few interesting new features and fixes a very -critical bug regarding the Append Only File. Let's start with the bad news -(the critical bug), explaining what happens and in what conditions: - -Critical AOF bug explained --------------------------- - -When AOF is enabled with the fsync policy set to "always", we have a -(rarely used) setup where Redis fsyncs every new write on disk. On this -setup Redis MUST reply to the client with an OK code to the write, only -after the write is already persisted on disk. - -Because of a bug, in particular conditions, it sometimes happens (verified -experimentally that the condition can be actually created) that in the -same event loop cycle the command is both processed and the reply sent, before -the beforeSleep() function has the ability to fsync the write on disk. - -The redis 4.0.9 release fixes this problem introducing the concept of -write barriers in the Redis event loop (ae.c). If you are using a different -AOF setup, like fsync everysec, you are not affected because such guarantee -is not provided anyway. Similarly if you have fsync set to always but you -do not semantically use the fact that the reply is only sent after the -successful fsync, you may avoid upgrading. - -Other bugfixes --------------- - -Other things that we fixed in this release include: - -* Latency monitor could report wrong latencies under certain conditions. -* AOF rewriting could fail when a backgronud rewrite is triggered and - at the same time the AOF is switched on/off. -* Redis Cluster crash-recovery safety improved. -* Other smaller fixes (check commnits). - -New features ------------- - -* Redis Cluster has now the ability to configure certain slaves so that - they'll never attempt a failover. -* Keyspace notifications API in modules. -* RM_Call() is now faster by reusing the same client. -* Tracking of the percentage of keys already logically expired but yet - not evicted. -* Other smaller improvements (check commits) - -This is the list of commits composing this release: - -zhaozhao.zz in commit 5b722bd7: - fix missed call on freeaddrinfo - 1 file changed, 1 insertion(+), 1 deletion(-) - -zhaozhao.zz in commit 2551b0f6: - anet: avoid double close - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 8d92885b: - Cluster: add test for the nofailover flag. - 2 files changed, 71 insertions(+) - -antirez in commit 70597a30: - Cluster: ability to prevent slaves from failing over their masters. - 6 files changed, 70 insertions(+), 2 deletions(-) - -antirez in commit 16cad10a: - redis-cli: fix missed unit in array. Change define name. - 1 file changed, 5 insertions(+), 5 deletions(-) - -charsyam in commit 640fa434: - fix-out-of-index-range-for-redis-cli-findbigkey - 1 file changed, 6 insertions(+), 4 deletions(-) - -antirez in commit 83390f55: - expireIfNeeded() needed a top comment documenting the behavior. - 1 file changed, 19 insertions(+) - -antirez in commit 888039ca: - expireIfNeeded() comment: claim -> pretend. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit e09c8c10: - Actually use ae_flags to add AE_BARRIER if needed. - 1 file changed, 1 insertion(+), 1 deletion(-) - -charsyam in commit fb7560bc: - refactoring-make-condition-clear-for-rdb - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit 1e2f0d69: - ae.c: insetad of not firing, on AE_BARRIER invert the sequence. - 1 file changed, 38 insertions(+), 22 deletions(-) - -antirez in commit b2e4aad9: - AOF: fix a bug that may prevent proper fsyncing when fsync=always. - 1 file changed, 18 insertions(+), 6 deletions(-) - -antirez in commit 93bad8ae: - Cluster: improve crash-recovery safety after failover auth vote. - 1 file changed, 3 insertions(+), 2 deletions(-) - -antirez in commit e32752e8: - ae.c: introduce the concept of read->write barrier. - 2 files changed, 29 insertions(+), 6 deletions(-) - -antirez in commit 262f4039: - Fix ziplist prevlen encoding description. See #4705. - 1 file changed, 6 insertions(+), 6 deletions(-) - -antirez in commit 83923afa: - Track number of logically expired keys still in memory. - 3 files changed, 28 insertions(+), 1 deletion(-) - -antirez in commit 256ddbf6: - Remove non semantical spaces from module.c. - 1 file changed, 36 insertions(+), 41 deletions(-) - -antirez in commit 280c3e39: - Fix typo in notifyKeyspaceEvent() comment. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Dvir Volk in commit 7c4623b0: - Add doc comment about notification flags - 1 file changed, 1 insertion(+) - -Dvir Volk in commit f4e7502e: - Fix indentation and comment style in testmodule - 1 file changed, 92 insertions(+), 98 deletions(-) - -Dvir Volk in commit 3c8456c6: - Use one static client for all keyspace notification callbacks - 1 file changed, 11 insertions(+), 7 deletions(-) - -Dvir Volk in commit aaaff8bd: - Remove the NOTIFY_MODULE flag and simplify the module notification flow if there aren't subscribers - 3 files changed, 5 insertions(+), 9 deletions(-) - -Dvir Volk in commit 0be51b8f: - Document flags for notifications - 1 file changed, 17 insertions(+), 1 deletion(-) - -Dvir Volk in commit 3b95c89c: - removed some trailing whitespaces - 1 file changed, 2 deletions(-) - -Dvir Volk in commit 84c6f1e3: - removed hellonotify.c - 3 files changed, 1 insertion(+), 87 deletions(-) - -Dvir Volk in commit 53b85e53: - fixed test - 1 file changed, 7 insertions(+), 1 deletion(-) - -Dvir Volk in commit b43f66c9: - finished implementation of notifications. Tests unfinished - 7 files changed, 339 insertions(+), 3 deletions(-) - -antirez in commit eddf5deb: - More verbose logging when slave sends errors to master. - 1 file changed, 6 insertions(+), 2 deletions(-) - -oranagra in commit c09cc0a9: - when a slave experiances an error on commands that come from master, print to the log - 1 file changed, 2 insertions(+) - -charsyam in commit 5c374f94: - getting rid of duplicated code - 1 file changed, 2 insertions(+), 2 deletions(-) - -Guy Benoish in commit a64f36e5: - enlarged buffer given to ld2string - 3 files changed, 7 insertions(+), 2 deletions(-) - -antirez in commit f1705801: - Make it explicit with a comment why we kill the old AOF rewrite. - 1 file changed, 3 insertions(+) - -Guy Benoish in commit 0c030dea: - rewriteAppendOnlyFileBackground() failure fix - 1 file changed, 31 insertions(+), 21 deletions(-) - -Oran Agra in commit 58073974: - fix to latency monitor reporting wrong max latency - 1 file changed, 1 insertion(+) - -================================================================================ -Redis 4.0.8 Released Fri Feb 2 11:17:40 CET 2018 -================================================================================ - -Upgrade urgency CRITICAL ONLY for Redis Cluster users. Otherwise no reason -to upgrade at all. - -Redis 4.0.8 fixes a single critical bug in the radix tree data structure -used for Redis Cluster keys slot tracking. The problem was actually fixed -10 months ago into unstable, but it was fixed in a commit related to Streams -so it was never backported (for error) into the 4.0 branch. - -The problem will crash Redis Cluster instances during deletions, but it is -very hard to trigger: only when the node removed is in the edge of a memory -mapped area there are the conditions to create an issue, because otherwise -the code just accesses an out of range word in read-only way in an allocated -structure: this is almost always harmless. - -The single commit in this release: - -f603940f Rax updated to latest antirez/rax commit. (Salvatore Sanfilippo) - -Cheers, -Salvatore - -================================================================================ -Redis 4.0.7 Released Wed Jan 24 11:01:40 CET 2018 -================================================================================ - -Upgrade urgency MODERATE: Several bugs fixed, but none of critical level. - -Dear Redis Users, - -Redis 4.0.7 addresses a number of problems and adds a few things that are -very useful to have and was worth to backport into a patchlevel release. -Here is a list of the most important things, but you can find the full list -of commits below as usually: - -* Many 32 bit overflows were addressed in order to allow to use Redis with - a very significant amount of data, memory size permitting. (zhaozhao.zz, - Oran Agra) - -* MEMORY USAGE fixed for the list type. (gnuhpc) - -* Allow read-only scripts in Redis Cluster. (Salvatore Sanfilippo) - -* Fix AOF pipes setup in edge case. (heqin) - -* AUTH option for MIGRATE. (AlexStocks, Salvatore Sanfilippo, Fabio Nicotra) - -* HyperLogLogs are no longer converted from sparse to dense in order - to be merged. (Salvatore Sanfilippo) - -* Fix AOF rewrite dead loop under edge cases. (heqin) - -* Fix processing of large bulk strings (>= 2GB). (Oran Agra) - -* Added RM_UnlinkKey in modules API. (Dvir Volk) - -* Fix Redis Cluster crashes when certain commands with a variable number - of arguments are called in an improper way. (Salvatore Sanfilippo) - -* Fix memory leak in lazyfree engine. (zhaozhao.zz) - -* Fix many potentially successful partial synchronizations that end - doing a full SYNC, because of a bug destroying the replication - backlog on the slave. So after a failover the slave was often not able - to PSYNC with masters, and a full SYNC was triggered. The bug only - happened after 1 hour of uptime so escaped the unit tests. (Oran Agra) - -* Improve anti-affinity in master/slave allocation for Redis Cluster - when the cluster is created. (Salvatore Sanfilippo) - -* Improve output buffer handling for slaves, by not limiting the amount - of writes a slave could receive. (Guy Benoish) - -The full list of commits follow. - -Enjoy, -Salvatore - -jianqingdu in commit 2b99d77a: - fix not call va_end when syncWrite() failed - 1 file changed, 2 insertions(+), 2 deletions(-) - -Yusaku Kaneta in commit 5f9b9e11: - Fix the firstkey, lastkey, and keystep of moduleCommand - 1 file changed, 1 insertion(+), 1 deletion(-) - -Mark Nunberg in commit ba2d3e8e: - redismodule.h: Check ModuleNameBusy before calling it - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 05c1f18d: - Fix integration test NOREPLICAS error time dependent false positive. - 1 file changed, 6 insertions(+), 3 deletions(-) - -antirez in commit 4acd6973: - Fix migrateCommand() access of not initialized byte. - 1 file changed, 5 insertions(+), 2 deletions(-) - -Guy Benoish in commit 548e4fe0: - Replication buffer fills up on high rate traffic. - 1 file changed, 7 insertions(+), 2 deletions(-) - -antirez in commit efa7063c: - Cluster: improve anti-affinity algo in redis-trib.rb. - 1 file changed, 131 insertions(+), 1 deletion(-) - -antirez in commit 48568ab6: - Remove useless comment from serverCron(). - 1 file changed, 2 insertions(+), 3 deletions(-) - -heqin in commit 0201dea5: - fixbug for #4545 dead loop aof rewrite - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 926beaa3: - Hopefully more clear comment to explain the change in #4607. - 1 file changed, 4 insertions(+), 3 deletions(-) - -qinchao in commit 019ad3e2: - fix assert problem in ZIP_DECODE_PREVLENSIZE , see issue: https://github.com/antirez/redis/issues/4587 - 1 file changed, 1 insertion(+), 1 deletion(-) - -Oran Agra in commit 8d9dff84: - PSYNC2 fix - promoted slave should hold on to it's backlog - 1 file changed, 5 insertions(+) - -zhaozhao.zz in commit fba2e169: - aof: format code and comment - 1 file changed, 5 insertions(+), 5 deletions(-) - -antirez in commit 7777be7b: - Put more details in the comment introduced by #4601. - 1 file changed, 8 insertions(+), 3 deletions(-) - -zhaozhao.zz in commit 91c1568b: - lazyfree: fix memory leak for lazyfree-lazy-server-del - 1 file changed, 4 insertions(+), 3 deletions(-) - -antirez in commit f9c2c1ac: - Fix getKeysUsingCommandTable() in the case of nagative arity. - 1 file changed, 7 insertions(+), 5 deletions(-) - -antirez in commit 61135f18: - Document new protocol options in #4568 into redis.conf. - 1 file changed, 14 insertions(+) - -antirez in commit e77fba4d: - proto-max-querybuf-len -> client-query-buffer-limit. - 1 file changed, 4 insertions(+), 4 deletions(-) - -antirez in commit 87fe813b: - New config options about protocol prefixed with "proto". - 4 files changed, 13 insertions(+), 13 deletions(-) - -gnuhpc in commit 2e0d2414: - Fix a typo(maybe instruction?) in crash log - 1 file changed, 1 insertion(+), 1 deletion(-) - -Dvir Volk in commit 9f7e214e: - Added RM_UnlinkKey - a low level analog to UNLINK command - 3 files changed, 56 insertions(+) - -zhaozhao.zz in commit 947077bb: - redis-benchmark: bugfix - handle zero liveclients in right way - 1 file changed, 1 insertion(+), 1 deletion(-) - -Oran Agra in commit ff2e628f: - Add config options for max-bulk-len and max-querybuf-len mainly to support RESTORE of large keys - 4 files changed, 16 insertions(+), 1 deletion(-) - -Oran Agra in commit aefa9caa: - fix processing of large bulks (above 2GB) - 8 files changed, 39 insertions(+), 33 deletions(-) - -heqin in commit 896cf1a9: - fixbug for #4545 dead loop aof rewrite - 1 file changed, 3 insertions(+), 1 deletion(-) - -antirez in commit 5abb12e0: - Hyperloglog: refresh hdr variable correctly. - 1 file changed, 3 insertions(+), 1 deletion(-) - -antirez in commit c39a0f7c: - Hyperloglog: Support for PFMERGE sparse encoding as target. - 1 file changed, 14 insertions(+), 3 deletions(-) - -antirez in commit 8a012df9: - Hyperloglog: refactoring of sparse/dense add function. - 1 file changed, 38 insertions(+), 20 deletions(-) - -antirez in commit 549409ff: - Test: MIGRATE AUTH test added. - 1 file changed, 24 insertions(+) - -antirez in commit 47717222: - Rewrite MIGRATE AUTH option. - 1 file changed, 38 insertions(+), 12 deletions(-) - -heqin in commit d8da89ea: - fixbug for #4538 Error opening /setting AOF rewrite IPC pipes: No such file or directory - 1 file changed, 6 insertions(+), 4 deletions(-) - -antirez in commit 4fcc564a: - safe_write -> aofWrite. Function commented. - 1 file changed, 9 insertions(+), 2 deletions(-) - -zhaozhao.zz in commit 27d9c729: - aof: cast sdslen to ssize_t - 1 file changed, 1 insertion(+), 1 deletion(-) - -zhaozhao.zz in commit de4fb877: - aof: fix the short write - 1 file changed, 22 insertions(+), 1 deletion(-) - -Tomasz Poradowski in commit 1fade3d3: - always enable command history in redis-cli - 1 file changed, 2 insertions(+), 1 deletion(-) - -antirez in commit 9f4d4eef: - Cluster: allow read-only EVAL/EVALSHA in slaves. - 1 file changed, 2 insertions(+), 1 deletion(-) - -nashe in commit 8eeceabd: - Prevent off-by-one read in stringmatchlen() (fixes #4527) - 1 file changed, 1 insertion(+), 1 deletion(-) - -gnuhpc in commit 733af148: - Fix memory usage list bug - 1 file changed, 1 insertion(+), 1 deletion(-) - -zhaozhao.zz in commit c9cb699b: - dict: fix the int problem for defrag - 3 files changed, 5 insertions(+), 5 deletions(-) - -zhaozhao.zz in commit b37099a1: - dict: fix the int problem - 1 file changed, 9 insertions(+), 9 deletions(-) - -zhaozhao.zz in commit 8fe586d3: - set: fix the int problem for qsort - 1 file changed, 8 insertions(+), 2 deletions(-) - -zhaozhao.zz in commit 219e29af: - set: fix the int problem for SPOP & SRANDMEMBER - 1 file changed, 2 insertions(+), 2 deletions(-) - -================================================================================ -Redis 4.0.6 Released Thu Dec 4 17:54:10 CET 2017 -================================================================================ - -Upgrade urgency CRITICAL: More errors in the fixes for PSYNC2 in Redis 4.0.5 - were identified. - -This release fixes yet more errors present in the 4.0.5 fixes, that could -affect slaves. Moreover another critical issue in quicklists, when they are -used at a massive memory scale, was fixed in this release. Upgrading from -any 4.0.x release, especially if you are running 4.0.4 or 4.0.5, is highly -recommended. - -Note that while this fix for 4.0.6 was written in an hurry as well, this -time we took extra precautions in order to avoid writing a broken patch: - -1. The code was reviewed by two developers independently. -2. A regression test about the problem introduced in 4.0.4/5 was added. -3. Resisting to duplicated Lua scripts loading into the Lua engine is now - the default action of the loading function, thus it's simpler to stress - its behavior. -4. The code section was tested with Valgrind. - -The following is the list of commits included in this release: - -zhaozhao.zz in commit 57786b14: - quicklist: change the len of quicklist to unsigned long - 2 files changed, 4 insertions(+), 4 deletions(-) - -zhaozhao.zz in commit 2211540d: - quicklist: fix the return value of quicklistCount - 2 files changed, 2 insertions(+), 2 deletions(-) - -antirez in commit c85c84be: - Refactoring: improve luaCreateFunction() API. - 3 files changed, 38 insertions(+), 58 deletions(-) - -antirez in commit 85b24770: - Remove useless variable check from luaCreateFunction(). - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit a945e5c0: - Fix issue #4505, Lua RDB AUX field loading of existing scripts. - 1 file changed, 9 insertions(+), 3 deletions(-) - -antirez in commit 65a2e40a: - Regression test for #4505 (Lua AUX field loading). - 1 file changed, 22 insertions(+), 1 deletion(-) - -antirez in commit d6c70f22: - DEBUG change-repl-id implemented. - 1 file changed, 7 insertions(+) - -================================================================================ -Redis 4.0.5 Released Thu Dec 1 16:03:32 CET 2017 -================================================================================ - -Upgrade urgency CRITICAL: Redis 4.0.4 fix for PSYNC2 was broken, causing the - slave to crash when receiving an RDB file from the - master that contained a duplicated Lua script. - -Please upgrade ASAP if you are with 4.0.4 and you use any form of Lua scripting -because this problem will easily crash Redis. - -================================================================================ -Redis 4.0.4 Released Thu Nov 30 18:42:12 CET 2017 -================================================================================ - -Upgrade urgency CRITICAL: Several PSYNC2 bugs can corrupt the slave data set - after a restart and a successful PSYNC2 handshake. - -This is a quick followup to Redis 4.0.3 since I forgot to add a few fixes... -that are actually described in the 4.0.3 changelog (but not in the list of -commits). Basically it's the following commits, implementing the ability -to persist scripts into RDB files for a successful PSYNC, otherwise a corruption -could happen when a slave is restarted and receives EVALSHA from the master -about scripts it does not know: - -8449227f PSYNC2: Fix off by one buffer size in luaCreateFunction(). -eeac1d35 PSYNC2: just store script bodies into RDB. -fb0441a8 PSYNC2: luaCreateFunction() should handle NULL client parameter. -0429db3c PSYNC2: Save Lua scripts state into RDB file. -d06fbbdd Regression test: Slave restart with EVALSHA in backlog issue #4483. -ab3d3aca Prevent corruption of server.executable after DEBUG RESTART. -b7c7edf9 Be more verbose when DEBUG RESTART fails. - -Please upgrade ASAP to 4.0.4 becuase 4.0.3 had an incomplete set of fixes. - -Cheers and sorry for the 4.0.3 fiasco ;-) -Salvatore - -================================================================================ -Redis 4.0.3 Released Thu Nov 30 13:14:50 CET 2017 -================================================================================ - -Upgrade urgency CRITICAL: Several PSYNC2 bugs can corrupt the slave data set - after a restart and a successful PSYNC2 handshake. - -Hi all, Redis 4.0.3 contains several bug fixes to different parts of Redis 4.0, -but the highlight is definitely in the "PSYNC after restart" that the new -RDB format, containing replication metadata information, was able to provide -to Redis 4.0. There were several bugs that are addressed in this release. -Moreover several LFU fixes improve the ability of Redis to correctly estimate -the popularity of keys. This release also fixes important bugs in Redis modules, -including bugs related to replication of modules commands, reloading the same -module multiple times, and other related things. Finally there is even a -security fix related to loading a corrupted Cluster state from a corrupted -file. We advice to upgrade ASAP. Check the list of commits for credits, several -people helped a lot in this release. I'm grateful to each of them. - -Cheers, -Salvatore - -antirez in commit d766322e: - LFU: Fix LFUDecrAndReturn() to just decrement. - 1 file changed, 3 insertions(+), 13 deletions(-) - -zhaozhao.zz in commit 6544796a: - LFU: add hotkeys option to redis-cli - 1 file changed, 135 insertions(+) - -zhaozhao.zz in commit e2355c19: - LFU: do some changes about LFU to find hotkeys - 4 files changed, 39 insertions(+), 19 deletions(-) - -zhaozhao.zz in commit 22969a13: - LFU: change lfu* parameters to int - 2 files changed, 3 insertions(+), 3 deletions(-) - -zhaozhao.zz in commit 6b71f714: - LFU: fix the missing of config get and rewrite - 1 file changed, 6 insertions(+), 2 deletions(-) - -Felix Krause in commit 2090052e: - Update link to https and use inline link - 1 file changed, 1 insertion(+), 1 deletion(-) - -Bo Cai in commit a75f2025: - redis-cli.c typo: Requets -> Requests. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Bo Cai in commit 76aab08f: - redis-cli.c typo: helpe -> helper. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Sébastien Fievet in commit b6fe5074: - Fix some typos - 1 file changed, 3 insertions(+), 3 deletions(-) - -antirez in commit eda5cb0a: - t_hash.c: clarify calling two times the same function. - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit 4a60fbd8: - adlist: fix listJoin() in the case the second list is empty. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Chris Lamb in commit 060eb3b2: - Correct spelling of "faield". - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 3c942b12: - Improve OBJECT HELP descriptions. - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit 6b6a83c7: - Fix entry command table entry for OBJECT for HELP option. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Itamar Haber in commit 048097ad: - Adds `OBJECT help` - 1 file changed, 18 insertions(+), 3 deletions(-) - -David Carlier in commit 906134fe: - Fix undefined behavior constant defined. - 2 files changed, 10 insertions(+), 2 deletions(-) - -rouzier in commit 03657e88: - Fix file descriptor leak and error handling - 1 file changed, 6 insertions(+), 3 deletions(-) - -Itamar Haber in commit 52fda013: - Prevents `OBJECT freq` with `noeviction` - 1 file changed, 2 insertions(+), 2 deletions(-) - -Itamar Haber in commit 15bc8e97: - Adds -u option to redis-cli. - 1 file changed, 89 insertions(+) - -antirez in commit f30454c1: - Test: regression test for latency expire events logging bug. - 1 file changed, 14 insertions(+) - -zhaozhao.zz in commit 1e7227f4: - expire & latency: fix the missing latency records generated by expire - 1 file changed, 11 insertions(+), 8 deletions(-) - -antirez in commit 9524fce0: - Modules: fix memory leak in RM_IsModuleNameBusy(). - 1 file changed, 3 insertions(+), 7 deletions(-) - -antirez in commit 2a27da1c: - PSYNC2: reorganize comments related to recent fixes. - 2 files changed, 24 insertions(+), 26 deletions(-) - -zhaozhao.zz in commit e0c2a0ec: - PSYNC2: persist cached_master's dbid inside the RDB - 1 file changed, 16 insertions(+), 2 deletions(-) - -zhaozhao.zz in commit 2eca8aed: - PSYNC2: make repl_stream_db never be -1 - 1 file changed, 6 insertions(+), 9 deletions(-) - -zhaozhao.zz in commit 35942383: - PSYNC2: clarify the scenario when repl_stream_db can be -1 - 2 files changed, 21 insertions(+), 9 deletions(-) - -zhaozhao.zz in commit be1b9ee0: - PSYNC2 & RDB: fix the missing rdbSaveInfo for BGSAVE - 1 file changed, 4 insertions(+), 1 deletion(-) - -zhaozhao.zz in commit 9f69e179: - PSYNC2: safe free backlog when reach the time limit - 1 file changed, 12 insertions(+) - -zhaozhao.zz in commit 0205dd01: - Modules: handle the busy module name - 2 files changed, 19 insertions(+), 2 deletions(-) - -zhaozhao.zz in commit 3cce566e: - Modules: handle the conflict of registering commands - 1 file changed, 28 insertions(+), 21 deletions(-) - -Oran Agra in commit d01f163c: - fix string to double conversion, stopped parsing on \0 even if the string has more data. - 2 files changed, 9 insertions(+), 2 deletions(-) - -antirez in commit 9a3e15c6: - Modules: fix for scripting replication of modules commands. - 2 files changed, 9 insertions(+), 7 deletions(-) - -Yossi Gottlieb in commit fa87879b: - Nested MULTI/EXEC may replicate in different cases. - 2 files changed, 10 insertions(+) - -zhaozhao.zz in commit bc7076b0: - rehash: handle one db until finished - 1 file changed, 5 insertions(+), 2 deletions(-) - -kmiku7 in commit 7675b00a: - fix boundary case for _dictNextPower - 1 file changed, 1 insertion(+), 1 deletion(-) - -Itamar Haber in commit f31d9b12: - Fixes an off-by-one in argument handling of `MEMORY USAGE` - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 897d8571: - SDS: improve sdsRemoveFreeSpace() to avoid useless data copy. - 1 file changed, 12 insertions(+), 5 deletions(-) - -antirez in commit 1ee6af4d: - Fix saving of zero-length lists. - 1 file changed, 3 insertions(+), 2 deletions(-) - -antirez in commit 1740300f: - Fix buffer overflows occurring reading redis.conf. - 1 file changed, 3 insertions(+) - -antirez in commit b25c2451: - Regression test for issue #4391. - 1 file changed, 4 insertions(+) - -antirez in commit 1847b987: - More robust object -> double conversion. - 1 file changed, 8 insertions(+), 4 deletions(-) - -antirez in commit c94cd1d8: - Limit statement in RM_BlockClient() to 80 cols. - 1 file changed, 5 insertions(+), 4 deletions(-) - -Dvir Volk in commit 193e4acc: - Added safety net preventing redis from crashing if a module decide to block in MULTI - 1 file changed, 8 insertions(+), 5 deletions(-) - -Dvir Volk in commit d131921c: - Renamed GetCtxFlags to GetContextFlags - 3 files changed, 11 insertions(+), 11 deletions(-) - -Dvir Volk in commit 2e71edcc: - Added support for module context flags with RM_GetCtxFlags - 3 files changed, 177 insertions(+) - -================================================================================ -Redis 4.0.2 Released Thu Sep 21 15:47:53 CEST 2017 -================================================================================ - -Upgrade urgency HIGH: Several potentially critical bugs fixed. - -Hello, this release addresses several significant bugs in Redis 4.0: - -1. A number of bugs were fixed in the area of PSYNC2 replication in the -specific area of restarting an instance with an RDB file having the -repliacation meta-data to continue without a full resynchronization. The -old code allowed several inconsistencies under certain conditions, like -starting a master with an RDB file generated by a slave, and later using -such master to connect previous slaves having the same replication history. -Because of other bugs, sometimes the replication resulted in a full -synchronization even if actually a partial resynchronization was possible -and so forth. Several commits by different authors fix different bugs here. - -2. AOF flush on SHUTDOWN did not cared to really write the AOF buffers -(not in the kernel but in the Redis process memory) to disk before exiting. -Calling SHUTDOWN during traffic resulted into not every operation to be -persisted on disk. - -3. The SLOWLOG could reference values inside string objects stored at keys, -creating a race condition during FLUSHALL ASYNC while the DB is reclaimed -in another thread. - -There are other smaller bugs addessed in this relase, see the full commit -history below for more information. - -A big thank you to all the contributors of this release. Without the -help I received, Redis 4.0 would take a much longer time to mature. It's -a real pleasure to work together with people around the world, while making -Redis better. - -antirez in commit 1c60b7a6: - Clarify comment in change fixing #4323. - 1 file changed, 6 insertions(+), 2 deletions(-) - -zhaozhao.zz in commit 368124e8: - Lazyfree: avoid memory leak when free slowlog entry - 1 file changed, 5 insertions(+), 2 deletions(-) - -antirez in commit 79567b6e: - PSYNC2: More refinements related to #4316. - 2 files changed, 14 insertions(+), 11 deletions(-) - -zhaozhao.zz in commit f1194649: - PSYNC2: make persisiting replication info more solid - 4 files changed, 33 insertions(+), 9 deletions(-) - -antirez in commit 097a5556: - PSYNC2: Fix the way replication info is saved/loaded from RDB. - 4 files changed, 49 insertions(+), 23 deletions(-) - -antirez in commit f1a2cbfd: - PSYNC2: Create backlog on slave partial sync as well. - 1 file changed, 5 insertions(+) - -antirez in commit 0c0b77d1: - Add MEMORY DOCTOR to MEMORY HELP. - 1 file changed, 3 insertions(+), 1 deletion(-) - -Mota in commit fa6bd1b2: - redis-benchmark: default value size usage update. - 1 file changed, 2 insertions(+), 2 deletions(-) - -jybaek in commit ad0ddcf3: - Remove Duplicate Processing - 1 file changed, 1 deletion(-) - -Oran Agra (and also BuÄŸra Gedik) in commit 8651e5d5: - Flush append only buffers before existing. - 1 file changed, 2 insertions(+), 1 deletion(-) - -antirez in commit f2b2897f: - Changelog: note that 4.0 CLUSTER NODES output changed. - 1 file changed, 6 insertions(+) - -Itamar Haber in commit 363be783: - Changes command stats iteration to being dict-based - 1 file changed, 17 insertions(+), 10 deletions(-) - -================================================================================ -Redis 4.0.1 Released Mon Jul 24 15:51:31 CEST 2017 -================================================================================ - -Upgrade urgency MODERATE: A few serious but non critical bugs in the modules - subsystem. A rare Redis Cluster crash fixed. - Many other minor fixes. - -Hi everybody. This is the first patch level release of Redis 4.0, fixing the -obvious last minute issues discovered just after the 4.0.0 GA release, plus -a few other things that were no-brainers. The highlights here in terms -of bug fixing are: - -1. Loading two or more modules exporting native data types resulted into the - inability to reload the RDB file. -2. Crash in modules when calling from Lua scripts module commands that would - block. -3. A Redis Cluster crash due to mis-handling of the "migrate-to" internal - flag. -4. Other smaller fixes not worth of a release per se, but nice to add here. - -If you upgraded to 4.0 to use modules, definitely a required upgrade. -If you are using Redis Cluster, also a good idea to upgrade. -Otherwise... You can definitely wait for the next one :-) - -Cheers, -Salvatore - -Here is the commits in detail: - -Jan-Erik Rediger in commit a8c2ef76: - Check that the whole first argument is a number - 1 file changed, 3 insertions(+), 2 deletions(-) - -WuYunlong in commit bfe5008b: - fix rewrite config: auto-aof-rewrite-min-size - 1 file changed, 2 insertions(+), 2 deletions(-) - -Chris Lamb in commit a6abc216: - Correct proceding -> proceeding typo. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Byron Grobe in commit 1d901b02: - Fixed issue #1996 (Missing '-' in help message for redis-benchmark) - 1 file changed, 1 insertion(+), 1 deletion(-) - -Jan-Erik Rediger in commit 19e5e5ea: - Don't use extended Regexp Syntax - 1 file changed, 6 insertions(+), 6 deletions(-) - -Leon Chen in commit 62474219: - fix return wrong value of clusterDelNodeSlots - 1 file changed, 4 insertions(+), 2 deletions(-) - -Leon Chen in commit dc782ceb: - fix mismatch argument - 1 file changed, 1 insertion(+), 1 deletion(-) - -liangsijian in commit 07631ff1: - Fix lua ldb command log - 1 file changed, 1 insertion(+) - -antirez in commit 41e3617d: - Modules: don't crash when Lua calls a module blocking command. - 1 file changed, 12 insertions(+), 2 deletions(-) - -antirez in commit 10370b20: - Fix typo in unblockClientFromModule() top comment. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit b6c55a89: - Make representClusterNodeFlags() more robust. - 1 file changed, 18 insertions(+), 17 deletions(-) - -antirez in commit 9a4f3d72: - Fix two bugs in moduleTypeLookupModuleByID(). - 1 file changed, 7 insertions(+), 4 deletions(-) - -antirez in commit 7302e186: - Allow certain modules APIs only defining REDISMODULE_EXPERIMENTAL_API. - 2 files changed, 20 insertions(+), 12 deletions(-) - -================================================================================ -Redis 4.0.0 Released Fri Jul 14 13:04:44 CEST 2017 -================================================================================ - -Upgrade urgency CRITICAL: 4.0.0 GA fixes many important bugs. - -Dear Redis users, - -this is the first stable version of Redis 4.0. There are a number -of bug fixes and improvements compared to the previous RC, mainly: - -* Different replication fixes to PSYNC2, the new 4.0 replication engine. -* Modules thread safe contexts were introduced. They are an experimental API right now, but the API is considered to be stable and usable when needed. -* SLOWLOG now logs the offending client name and address. Note that this is a backward compatibility breakage in case old code assumes that the slowlog entry is composed of exactly three entries. -* The modules native data types RDB format changed. -* The AOF check utility is now able to deal with RDB preambles. -* GEORADIUS_RO and GEORADIUSBYMEMBER_RO variants, not supporting the STORE option, were added in order to allow read-only scaling of such queries. -* HSET is now variadic, and HMSET is considered deprecated (but will be supported for years to come). Please use HSET in new code. -* GEORADIUS huge radius (>= ~6000 km) corner cases fixed, certain elements near the edges were not returned. -* DEBUG DIGEST modules API added. -* HyperLogLog commands no longer crash on certain input (non HLL) strings. -* Fixed SLAVEOF inside MULTI/EXEC blocks. -* Many other minor bug fixes and improvements. - -Note that 4.0 is probably one of the most extreme releases of Redis ever -made in terms of changes inside the internals: all the aggregated data types -no longer use Redis Objects structures but directly SDS objects, certain -deletion operations are now threaded, the replication engine was modified -in many ways. So please handle this release with care. A few patch-level -releases will follow in the next weeks and months fixing the important issues -discovered by the users. - -You can read the new set of features below in this file, there are a lot -of improvements that can make a real difference in real world use cases. -Also note that Redis 4.0 is, as usually, almost a perfect superset of Redis -3.2, so it is very rare that compatibility with the past is broken in terms -of exported commands. - -IMPORTANT: Redis Cluster users, please note that, as specified in the list -of incompatibilities, Redis 4.0 cluster bus protocol is not compatible with -Redis 3.2, so in order to upgrade, a mass reboot of the instances is needed -and rolling upgrades are not possible. This change was needed in order to -add compatibility for Containers/NAT, where the bus port at a fixed offset -was not an acceptable design, so we had to change many things, resulting -in the incompatible protocol. - -Have fun with Redis 4.0! -Salvatore - -antirez in commit c29852ff: - Modules: fix thread safe context DB selection. - 1 file changed, 3 insertions(+) - -antirez in commit b73f186a: - Modules documentation removed from source. - 4 files changed, 2830 deletions(-) - -antirez in commit 09d93ec9: - Markdown generation of Redis Modules API reference improved. - 2 files changed, 83 insertions(+), 74 deletions(-) - -antirez in commit 87aabb1a: - Fix replication of SLAVEOF inside transaction. - 2 files changed, 19 insertions(+), 3 deletions(-) - -antirez in commit 44f89d1d: - CLUSTER GETKEYSINSLOT: avoid overallocating. - 1 file changed, 5 insertions(+) - -antirez in commit 0df24b68: - Fix isHLLObjectOrReply() to handle integer encoded strings. - 1 file changed, 1 insertion(+) - -antirez in commit 884ceb69: - Clients blocked in modules: free argv/argc later. - 2 files changed, 15 insertions(+), 3 deletions(-) - -antirez in commit ccbdd762: - Event loop: call after sleep() only from top level. - 2 files changed, 4 insertions(+), 2 deletions(-) - -antirez in commit 10925e46: - redis-check-aof: tell users there is a --fix option. - 1 file changed, 2 insertions(+), 1 deletion(-) - -Guy Benoish in commit 99bb1c74: - Modules: Fix io->bytes calculation in RDB save - 1 file changed, 55 insertions(+), 30 deletions(-) - -antirez in commit cfdcd440: - AOF check utility: ability to check files with RDB preamble. - 6 files changed, 61 insertions(+), 35 deletions(-) - -sunweinan in commit 1cefb1c5: - minor fix in listJoin(). - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit db791a1e: - Free IO context if any in RDB loading code. - 1 file changed, 4 insertions(+) - -antirez in commit 419dacfe: - Modules: DEBUG DIGEST interface. - 5 files changed, 108 insertions(+), 1 deletion(-) - -spinlock in commit 5d03b831: - update Makefile for test-sds - 1 file changed, 1 insertion(+), 1 deletion(-) - -spinlock in commit ed437b82: - Optimize addReplyBulkSds for better performance - 1 file changed, 1 insertion(+), 2 deletions(-) - -antirez in commit 4ebfe265: - Avoid closing invalid FDs to make Valgrind happier. - 1 file changed, 5 insertions(+), 1 deletion(-) - -antirez in commit b6cab88c: - Modules: no MULTI/EXEC for commands replicated from async contexts. - 1 file changed, 5 insertions(+) - -antirez in commit 5c5e8a50: - Add symmetrical assertion to track c->reply_buffer infinite growth. - 1 file changed, 4 insertions(+) - -Dvir Volk in commit c63a97f8: - fixed #4100 - 1 file changed, 1 insertion(+) - -antirez in commit eeb90571: - Fix GEORADIUS edge case with huge radius. - 2 files changed, 39 insertions(+), 20 deletions(-) - -antirez in commit 670456a7: - redis-cli --latency: ability to run non interactively. - 1 file changed, 39 insertions(+), 3 deletions(-) - -antirez in commit 64db8044: - HMSET and MSET implementations unified. HSET now variadic. - 2 files changed, 18 insertions(+), 22 deletions(-) - -antirez in commit e43c890e: - Aesthetic changes to #4068 PR to conform to Redis coding standard. - 1 file changed, 6 insertions(+), 7 deletions(-) - -itamar in commit 3f3dc3b8: - Sets up fake client to select current db in RM_Call() - 1 file changed, 1 insertion(+) - -antirez in commit ba773724: - Fix abort typo in Lua debugger help screen. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit bdd6de96: - Added GEORADIUS(BYMEMBER)_RO variants for read-only operations. - 3 files changed, 32 insertions(+), 11 deletions(-) - -Suraj Narkhede in commit de391ff1: - Fix brpop command table entry and redirect blocked clients. - 2 files changed, 3 insertions(+), 2 deletions(-) - -antirez in commit 5af0fc0c: - RDB modules values serialization format version 2. - 4 files changed, 127 insertions(+), 28 deletions(-) - -antirez in commit 6516958e: - ARM: Fix stack trace generation on crash. - 1 file changed, 5 insertions(+) - -antirez in commit 3669f96e: - Issue #4027: unify comment and modify return value in freeMemoryIfNeeded(). - 2 files changed, 7 insertions(+), 7 deletions(-) - -Suraj Narkhede in commit 896c4690: - Fix following issues in blocking commands: 1. brpop last key index, thus checking all keys for slots. 2. Memory leak in clusterRedirectBlockedClientIfNeeded. 3. Remove while loop in clusterRedirectBlockedClientIfNeeded. - 1 file changed, 1 insertion(+) - -Zachary Marquez in commit deeb795a: - Prevent expirations and evictions while paused - 2 files changed, 10 insertions(+) - -antirez in commit a6615423: - Upgrade 4.0 changelog with more backward incompatibilities. - 1 file changed, 8 insertions(+) - -xuzhou in commit 0b367871: - Optimize set command with ex/px when updating aof. - 1 file changed, 3 insertions(+), 3 deletions(-) - -antirez in commit 2ae733d9: - redis-benchmark: add -t hset target. - 1 file changed, 7 insertions(+) - -xuzhou in commit 63e1c9f2: - Fix set with ex/px option when propagated to aof - 4 files changed, 36 insertions(+), 1 deletion(-) - -minghang.zmh in commit 0231156f: - fix server.stat_net_output_bytes calc bug - 1 file changed, 1 insertion(+), 1 deletion(-) - -xuchengxuan in commit e99954e4: - Fixed comments of slowlog duration - 1 file changed, 1 insertion(+), 1 deletion(-) - -cbgbt in commit d048f972: - cli: Only print elapsed time on OUTPUT_STANDARD - 1 file changed, 3 insertions(+), 1 deletion(-) - -Aric Huang in commit b5f22939: - (fix) Update create-cluster README - 1 file changed, 4 insertions(+), 4 deletions(-) - -antirez in commit 0b7ba621: - SLOWLOG: log offending client address and name. - 4 files changed, 27 insertions(+), 7 deletions(-) - -Antonio Mallia in commit 1fbc90fe: - Removed duplicate 'sys/socket.h' include - 1 file changed, 1 deletion(-) - -Antonio Mallia in commit c7a6b711: - Fixed comment in clusterMsg version field - 1 file changed, 1 insertion(+), 1 deletion(-) - -Qu Chen in commit 73d358f7: - Implement getKeys procedure for georadius and georadiusbymember commands. - 3 files changed, 41 insertions(+), 2 deletions(-) - -antirez in commit c782d189: - Fix PERSIST expired key resuscitation issue #4048. - 2 files changed, 4 insertions(+), 7 deletions(-) - -antirez in commit cb548bf3: - More informative -MISCONF error message. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 8cd6a2bd: - Collect fork() timing info only if fork succeeded. - 1 file changed, 4 insertions(+), 3 deletions(-) - -antirez in commit a3941aa5: - redis-cli --bigkeys: show error when TYPE fails. - 1 file changed, 7 insertions(+), 2 deletions(-) - -antirez in commit 6b21cebd: - Modules TSC: use atomic var for server.unixtime. - 3 files changed, 15 insertions(+), 5 deletions(-) - -antirez in commit 54bd224f: - atomicvar.h: show used API in INFO. Add macro to force __sync builtin. - 2 files changed, 13 insertions(+), 6 deletions(-) - -antirez in commit a864d25c: - zmalloc.c: remove thread safe mode, it's the default way. - 3 files changed, 3 insertions(+), 23 deletions(-) - -antirez in commit b338f2b9: - Modules TSC: Add mutex for server.lruclock. - 2 files changed, 2 insertions(+) - -antirez in commit 7e9c658d: - Modules TSC: Improve inter-thread synchronization. - 5 files changed, 75 insertions(+), 20 deletions(-) - -antirez in commit e69af32f: - Simplify atomicvar.h usage by having the mutex name implicit. - 3 files changed, 25 insertions(+), 27 deletions(-) - -antirez in commit 26e57f17: - Lazyfree: fix lazyfreeGetPendingObjectsCount() race reading counter. - 1 file changed, 3 insertions(+), 1 deletion(-) - -antirez in commit 2acf003c: - Modules TSC: HELLO.KEYS reply format fixed. - 1 file changed, 15 insertions(+), 13 deletions(-) - -antirez in commit 12fd298f: - Modules TSC: put the client in the pending write list. - 1 file changed, 13 insertions(+), 1 deletion(-) - -antirez in commit 5b1afa4a: - adlist: fix final list count in listJoin(). - 1 file changed, 1 insertion(+) - -antirez in commit 717b2eea: - adlist: fix listJoin() to handle empty lists. - 1 file changed, 8 insertions(+), 2 deletions(-) - -antirez in commit a839036a: - Modules: remove unused var in example module. - 1 file changed, 2 insertions(+), 3 deletions(-) - -antirez in commit eda5ee5e: - Modules TSC: HELLO.KEYS example draft finished. - 1 file changed, 35 insertions(+), 6 deletions(-) - -antirez in commit fb8734fe: - Module: fix RedisModule_Call() "l" specifier to create a raw string. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit c4b88495: - Modules TSC: Release the GIL for all the time we are blocked. - 6 files changed, 100 insertions(+), 22 deletions(-) - -antirez in commit fcd9a07d: - Modules TSC: Export symbols of the new API. - 2 files changed, 12 insertions(+) - -antirez in commit 8affa3e7: - Modules TSC: Handling of RM_Reply* functions. - 3 files changed, 82 insertions(+), 14 deletions(-) - -antirez in commit 31b1f3c1: - Modules TSC: Basic TS context creeation and handling. - 1 file changed, 60 insertions(+), 1 deletion(-) - -antirez in commit 74f3a843: - Modules TSC: GIL and cooperative multi tasking setup. - 3 files changed, 31 insertions(+), 1 deletion(-) - -antirez in commit 5021fda2: - Regression test for #3899 fixed. - 1 file changed, 36 insertions(+), 19 deletions(-) - -antirez in commit 166bdbda: - Regression test for PSYNC2 issue #3899 added. - 2 files changed, 62 insertions(+) - -antirez in commit b506eb74: - Check event loop creation return value. Fix #3951. - 1 file changed, 6 insertions(+) - -antirez in commit 80690562: - PSYNC2: fix master cleanup when caching it. - 3 files changed, 20 insertions(+), 7 deletions(-) - -antirez in commit 8c4b0f41: - Defrag: test currently disabled, too many false positives. - 1 file changed, 40 insertions(+), 38 deletions(-) - -================================================================================ -Redis 4.0-RC3 Released Sat Apr 22 11:19:56 CEST 2017 -================================================================================ - -Upgrade urgency HIGH: Many bugs fixed and many improvements operated, so who - is using 4.0 RC2 should upgrade if possible, or at least - should carefully read this release notes to understand if - it may be affected by some bug. - -Redis 3.9.103 (4.0 RC3 for the friends) is finally out, and the list of commits -is pretty long, so here is a synopsis of all the major changes operated in this -release. - -Major things: - -* Finally the infamous leakage of keys with an expire, in slaves that are - configured as writable, is fixed. (Salvatore Sanfilippo) -* A serious MIGRATE issue forgetting to store the TTL of the key under - certain conditions is fixed. (reported by Jinbei Wang, fixed by Jan-Erik - Rediger). -* An in-depth investigation of the ziplist implementation was performed. - The final result was: - 1. An in depth auditing (Salvatore Sanfilippo and Oran Agra). - 2. The discovery of a bug and its fix (Salvatore Sanfilippo). - 3. The creation of a new specification to replace ziplists with something - better, listpacks: https://gist.github.com/antirez/66ffab20190ece8a7485bd9accfbc175 (Yuval Inbar, Salvatore Sanfilippo, Oran Agra). - 4. Refactoring and function to dump a ziplist for debugging purposes - (Salvatore Sanfilippo). -* New major feature: Memory de-fragmentation. This feature, contributed by - Oran Agra, allows Redis to perform "online defragmentation" of memory - if the Jemalloc allocator is used. There is some documentation in the - example `redis.conf` file. (Oran Agra) -* Support for ARM. (Salvatore Sanfilippo) -* Fix a very important "family of bugs" about PSYNC2, thanks to the help - of Kevin McGehee, Siran Yang and Oran Agra. -* Hash function moved to SipHash 1-2 variant. (Salvatore Sanfilippo) -* Redis Cluster failure detection improved in different ways, the most - important result is to exchange a lot less messages among nodes. (Salvatore - Sanfilippo) - -All the rest: - -* A bug in the modules API ziplist iterator was fixed. (Dvir Volk). -* The HyperLogLog implementation final cardinality approximation function - was improved to use the LogLog-Beta algorithm. (Harish Murthy and - Salvatore Sanfilippo). -* Edge case copy-on-write disaster fix (Oran Agra). -* Now the Redis ASCII art logo is shown only if logging to stdout and if the - stdout is a tty. (Salvatore Sanfilippo because of users request). -* A new GEO bug fixed (and there is another pending when huge radiuses are - used, where elements at the edge may be miss-reported). (Salvatore - Sanfilippo). ***IMPORTANT*** See the list of incompatibilities below - since the GEO API has some potentially backward incompatible change. -* ZADD crash on syntax error fixed. (Itamar Haber) -* Fixed a division by zero but in MEMORY DOCTOR. (Jan-Erik Rediger) -* More MIGRATE bugs fixed. (Salvatore Sanfilippo) -* Different file descriptors potential lakes fixed. (multiple authors) -* Support for Solaris improved. (Salvatore Sanfilippo) -* Improve memory reclaiming in freeMemoryIfNeeded(). (Oran Agra and - Salvatore Sanfilippo) -* Fixes multiple bugs in the test suite. (Salvatore Sanfilippo) -* Fix asynchronous commands in modules API. (Dvir Volk and Salvatore Sanfilippo) -* A new data structure, the radix tree (rax.c) was introduced into Redis in - order to fix a major Redis Cluster slowdown. (Salvatore Sanfilippo) -* Fix `lua-time-limit` config ignored in `redis.conf`. (Salvatore Sanfilippo) -* Jemalloc downgraded to an apparently safe version, given that recent - releases apparently hang under certain conditions. See Redis issue #3799. - -For details and full credits, see the list of commits here. However note that -many information is inside the full commit message, while here only the first -line is shown. - -antirez in commit 6bc6bd4c: - PSYNC2: discard pending transactions from cached master. - 1 file changed, 3 insertions(+), 1 deletion(-) - -antirez in commit a91cc5bc: - Fix PSYNC2 incomplete command bug as described in #3899. - 3 files changed, 47 insertions(+), 8 deletions(-) - -antirez in commit 278972ce: - Fix getKeysUsingCommandTable() in cluster mode. - 1 file changed, 14 insertions(+), 1 deletion(-) - -张文康 in commit 20285017: - update block->free after some diff data are written to the child process - 1 file changed, 1 insertion(+) - -Jan-Erik Rediger in commit 05ac217f: - Reorder to make dict-benchmark compile on Linux - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit 8d44c52a: - Fix #3848 by closing the descriptor on error. - 1 file changed, 2 insertions(+), 1 deletion(-) - -antirez in commit 5c107c62: - Clarify why we save ziplist elements in revserse order. - 1 file changed, 13 insertions(+), 7 deletions(-) - -spinlock in commit 22996414: - rdb: saving skiplist in reversed order to accelerate the deserialisation process - 1 file changed, 8 insertions(+), 7 deletions(-) - -antirez in commit d98ef35a: - Cluster: discard pong times in the future. - 1 file changed, 8 insertions(+), 1 deletion(-) - -antirez in commit e47c8e3f: - Test: fix, hopefully, false PSYNC failure like in issue #2715. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 1e659a04: - Cluster: always add PFAIL nodes at end of gossip section. - 2 files changed, 71 insertions(+), 23 deletions(-) - -antirez in commit 39d34487: - Cluster: fix gossip section ping/pong times encoding. - 1 file changed, 4 insertions(+), 3 deletions(-) - -antirez in commit 78148d0e: - Cluster: add clean-logs command to create-cluster script. - 2 files changed, 9 insertions(+), 1 deletion(-) - -antirez in commit a5c1c77e: - Cluster: decrease ping/pong traffic by trusting other nodes reports. - 1 file changed, 13 insertions(+) - -antirez in commit 51901396: - Cluster: collect more specific bus messages stats. - 2 files changed, 82 insertions(+), 31 deletions(-) - -antirez in commit f7b91b6c: - Add a top comment in crucial functions inside networking.c. - 1 file changed, 23 insertions(+), 1 deletion(-) - -antirez in commit 6e1489ae: - Set lua-time-limit default value at safe place. - 2 files changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 5fd841c0: - Fix preprocessor if/else chain broken in order to fix #3927. - 1 file changed, 3 insertions(+) - -antirez in commit 185b361a: - Fix typo in feedReplicationBacklog() top comment. - 1 file changed, 1 insertion(+), 1 deletion(-) - -lorneli in commit b740fc1e: - Expire: Update comment of activeExpireCycle function - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 56cafcce: - Fix zmalloc_get_memory_size() ifdefs to actually use the else branch. - 1 file changed, 2 deletions(-) - -antirez in commit a5b66da8: - Make more obvious why there was issue #3843. - 1 file changed, 3 insertions(+) - -antirez in commit f60d6f09: - Fix modules blocking commands awake delay. - 3 files changed, 42 insertions(+) - -antirez in commit c56668c8: - Rax library updated. - 3 files changed, 152 insertions(+), 265 deletions(-) - -antirez in commit c4716d33: - Cluster: hash slots tracking using a radix tree. - 9 files changed, 2115 insertions(+), 65 deletions(-) - -vienna in commit a9fefbce: - fix #3847: add close socket before return ANET_ERR. - 1 file changed, 3 insertions(+), 1 deletion(-) - -Dvir Volk in commit 17250409: - fixed free of blocked client before refering to it - 1 file changed, 1 insertion(+), 1 deletion(-) - -Oran Agra in commit 8aced9e9: - add LFU policies to the test suite, just for coverage - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit 3aa656ab: - Use sha256 instead of sha1 to generate tarball hashes. - 1 file changed, 2 insertions(+), 2 deletions(-) - -Salvatore Sanfilippo in commit 42d6a6c3: - Makefile: fix building with Solaris C compiler, 64 bit. - 1 file changed, 7 insertions(+) - -Salvatore Sanfilippo in commit e082d056: - Use ARM unaligned accesses ifdefs for SPARC as well. - 3 files changed, 11 insertions(+), 2 deletions(-) - -Salvatore Sanfilippo in commit 7269d547: - Fix BITPOS unaligned memory access. - 1 file changed, 14 insertions(+), 7 deletions(-) - -antirez in commit 15520588: - Solaris fixes about tail usage and atomic vars. - 4 files changed, 7 insertions(+), 7 deletions(-) - -antirez in commit 9faeed04: - Test: replication-psync, wait more to detect write load. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit b3440b35: - Test: fix conditional execution of HINCRBYFLOAT representation test. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 5a413303: - SipHash 2-4 -> SipHash 1-2. - 1 file changed, 15 insertions(+), 14 deletions(-) - -antirez in commit a8cbc3ec: - freeMemoryIfNeeded(): improve code and lazyfree handling. - 1 file changed, 42 insertions(+), 19 deletions(-) - -antirez in commit 857e6d56: - Use locale agnostic tolower() in dict.c hash function. - 3 files changed, 27 insertions(+), 17 deletions(-) - -antirez in commit 34387cea: - SipHash x86 optimizations. - 1 file changed, 23 insertions(+), 2 deletions(-) - -antirez in commit ba647598: - Use SipHash hash function to mitigate HashDos attempts. - 12 files changed, 361 insertions(+), 82 deletions(-) - -Salvatore Sanfilippo in commit 2ee19d98: - ARM: Avoid fast path for BITOP. - 1 file changed, 5 insertions(+), 1 deletion(-) - -Salvatore Sanfilippo in commit eb62cfea: - ARM: Use libc malloc by default. - 1 file changed, 6 insertions(+), 3 deletions(-) - -Salvatore Sanfilippo in commit 620e48b1: - ARM: Avoid memcpy() in MurmurHash64A() if we are using 64 bit ARM. - 3 files changed, 5 insertions(+), 1 deletion(-) - -Salvatore Sanfilippo in commit 980d8805: - ARM: Fix 64 bit unaligned access in MurmurHash64A(). - 2 files changed, 10 insertions(+) - -John.Koepi in commit 522b10e4: - fix #2883, #2857 pipe fds leak when fork() failed on bg aof rw - 1 file changed, 1 insertion(+) - -antirez in commit 03f55722: - Don't leak file descriptor on syncWithMaster(). - 1 file changed, 2 insertions(+), 1 deletion(-) - -antirez in commit 8d55aeb5: - Fix MIGRATE closing of cached socket on error. - 1 file changed, 23 insertions(+), 6 deletions(-) - -antirez in commit 7c22d768: - Fix ziplist fix... - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit 8327b813: - Ziplist: insertion bug under particular conditions fixed. - 1 file changed, 9 insertions(+), 1 deletion(-) - -antirez in commit 1688ccff: - ziplist: better comments, some refactoring. - 1 file changed, 250 insertions(+), 100 deletions(-) - -antirez in commit 36c1acc2: - Jemalloc updated to 4.4.0. - 150 files changed, 17242 insertions(+), 6359 deletions(-) - -Jan-Erik Rediger in commit 37b4c954: - Don't divide by zero - 1 file changed, 1 insertion(+), 1 deletion(-) - -miter in commit aee1ddca: - Change switch statment to if statment - 1 file changed, 2 insertions(+), 4 deletions(-) - -oranagra in commit af292b54: - fix rare assertion in DEBUG DIGEST - 1 file changed, 1 insertion(+), 1 deletion(-) - -Itamar Haber in commit c3c2aa3b: - Verify pairs are provided after subcommands - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 7c2153da: - Add panic() into redisassert.h. - 1 file changed, 2 insertions(+) - -antirez in commit dc83ddf0: - serverPanic(): allow printf() alike formatting. - 2 files changed, 14 insertions(+), 4 deletions(-) - -antirez in commit 3ef81eb3: - Ziplist: remove static from functions, they prevent good crash reports. - 1 file changed, 14 insertions(+), 14 deletions(-) - -Jan-Erik Rediger in commit 96f75faa: - Initialize help only in repl mode - 1 file changed, 5 insertions(+), 5 deletions(-) - -antirez in commit bcd51a6a: - Use const in modules types mem_usage method. - 3 files changed, 4 insertions(+), 4 deletions(-) - -antirez in commit 354ccf0c: - Add memory defragmenting capability in 4.0 release notes. - 1 file changed, 3 insertions(+) - -antirez in commit 57c81853: - Defrag: don't crash when a module value is encountered. - 1 file changed, 3 insertions(+) - -antirez in commit e36d5222: - MEMORY USAGE: support for modules data types. - 3 files changed, 16 insertions(+), 5 deletions(-) - -antirez in commit 82ec0fe6: - Defrag: document the feature in redis.conf. - 1 file changed, 49 insertions(+), 6 deletions(-) - -antirez in commit 19bf0249: - Defrag: not enabled by default. Error on CONFIG SET if not available. - 2 files changed, 11 insertions(+), 1 deletion(-) - -antirez in commit fa0d8b62: - Defrag: fix function name typo defarg -> defrag. - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit ebb9a7e7: - Defrag: do not crash on empty quicklist. - 1 file changed, 3 insertions(+), 2 deletions(-) - -antirez in commit da84b9c4: - Defrag: fix comments & code to conform to the Redis code base. - 1 file changed, 84 insertions(+), 54 deletions(-) - -antirez in commit a18f3cf3: - Defrag: activate it only if running modified version of Jemalloc. - 4 files changed, 40 insertions(+), 26 deletions(-) - -oranagra in commit 1ad48837: - active defrag improvements - 4 files changed, 63 insertions(+), 44 deletions(-) - -oranagra in commit 67def261: - active memory defragmentation - 13 files changed, 755 insertions(+), 8 deletions(-) - -antirez in commit b4f3c5a4: - deps/hiredis updated to latest version. - 34 files changed, 2393 insertions(+), 817 deletions(-) - -antirez in commit 6549c6cf: - Fix test "server is up" detection after logging changes. - 2 files changed, 2 insertions(+), 1 deletion(-) - -Alexander Zhukov in commit b87fd120: - Fix an article usage - 1 file changed, 1 insertion(+), 1 deletion(-) - -whatacold in commit bd845493: - fix the wrong description of intsetGet(). - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 952e8706: - 4.0 release notes updated with API incompatibility notice about GEO. - 1 file changed, 12 insertions(+) - -antirez in commit f3add069: - Geo: fuzzy test inconsistency report fixed to show all points. - 1 file changed, 3 insertions(+), 2 deletions(-) - -antirez in commit 056c81e4: - Geo: fix GEOHASH return value for consistency. - 1 file changed, 3 insertions(+), 4 deletions(-) - -antirez in commit d5036018: - Geo: fix edge case return values for uniformity. - 1 file changed, 5 insertions(+), 6 deletions(-) - -Justin Carvalho in commit 47b46253: - Fix missing brackets around encoding variable in ZIP_DECODE_LENGTH macro - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit a0e95118: - Remove first version of ASCII wave, later discarded. - 1 file changed, 2 deletions(-) - -antirez in commit 3334a409: - Only show Redis logo if logging to stdout / TTY. - 4 files changed, 44 insertions(+), 11 deletions(-) - -antirez in commit db53c230: - adjustOpenFilesLimit() comment made hopefully more clear. - 1 file changed, 4 insertions(+), 1 deletion(-) - -antirez in commit bc00ef45: - Hopefully improve code comments for issue #3616. - 1 file changed, 8 insertions(+), 7 deletions(-) - -itamar in commit 075a3381: - Corrects a couple of omissions in the modules docs - 2 files changed, 3 insertions(+), 2 deletions(-) - -andyli in commit 8d82b3b1: - Modify MIN->MAX - 1 file changed, 1 insertion(+), 1 deletion(-) - -oranagra in commit 69282df8: - when a slave loads an RDB, stop an AOFRW fork before flusing db and parsing rdb file, to avoid a CoW disaster. - 1 file changed, 19 insertions(+), 13 deletions(-) - -hylepo in commit 869dda84: - Update redis-benchmark.c - 1 file changed, 1 insertion(+), 1 deletion(-) - -oranagra in commit 7f870fad: - fix unsigned int overflow in adjustOpenFilesLimit - 1 file changed, 3 insertions(+), 3 deletions(-) - -antirez in commit 2e375d4f: - Switch PFCOUNT to LogLog-Beta algorithm. - 4 files changed, 16 insertions(+), 57 deletions(-) - -antirez in commit 735b928b: - Use llroundl() before converting loglog-beta output to integer. - 1 file changed, 4 insertions(+), 4 deletions(-) - -antirez in commit 6cae609e: - Fix HLL gnuplot graph generator script for new redis-rb versions. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Harish Murthy in commit 4d475e0f: - LogLog-Beta Algorithm support within HLL Config option to use LogLog-Beta Algorithm for Cardinality - 4 files changed, 57 insertions(+), 27 deletions(-) - -Dvir Volk in commit 90d918bd: - fixed stop condition in RM_ZsetRangeNext and RM_ZsetRangePrev - 1 file changed, 3 insertions(+), 3 deletions(-) - -antirez in commit 3b19580a: - ziplist.c explanation of format improved a bit. - 1 file changed, 18 insertions(+), 11 deletions(-) - -antirez in commit 457c6878: - DEBUG: new "ziplist" subcommand added. Dumps a ziplist on stdout. - 4 files changed, 35 insertions(+), 13 deletions(-) - -antirez in commit 17cda261: - MIGRATE: Remove upfront ttl initialization. - 1 file changed, 3 insertions(+), 4 deletions(-) - -Jan-Erik Rediger in commit 9515648d: - Reset the ttl for additional keys - 1 file changed, 1 insertion(+) - -antirez in commit 1eec780e: - Writable slaves expires: unit test. - 1 file changed, 12 insertions(+) - -antirez in commit 9a8bc6d2: - Writable slaves expires: fix leak in key tracking. - 2 files changed, 16 insertions(+), 2 deletions(-) - -antirez in commit 746d70b0: - INFO: show num of slave-expires keys tracked. - 3 files changed, 11 insertions(+), 2 deletions(-) - -antirez in commit 1469c4ed: - Fix created->created typo in expire.c - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit c65dfb43: - Replication: fix the infamous key leakage of writable slaves + EXPIRE. - 8 files changed, 161 insertions(+), 11 deletions(-) - -Yossi Gottlieb in commit 80944aac: - Fix redis-cli rare crash. - 1 file changed, 4 insertions(+) - -================================================================================ -Redis 4.0.0-RC2 Released Tue Dec 06 09:25:35 CET 2016 -================================================================================ - -Upgrade urgency LOW: This release mainly fixes a rare GEO API bug and a crash - related to the modules subsystem. - -Redis 4.0.0-RC2 (version number 3.9.102) just fixes two bugs: - -1. GEORADIUS could fail reporting items with very big radius lengths because - of a bug in the bounding box computation function. This was fixed and tests - with large radius sizes were added. - -2. There was a crash in the modules subsystem that was not merged into RC1 for - an error, while it was already fixed into the "unstable" branch. - -The list of commits follows: - -wangshaonan in commit 77241e8: - Add '\n' to MEMORY DOCTOR command output message when num_reports is 0 or empty is 1 - 1 file changed, 2 insertions(+), 2 deletions(-) - -Chris Lamb in commit 0ee6a23: - src/rdb.c: Correct "whenver" -> "whenever" typo. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Yossi Gottlieb in commit 2d0d2c8: - Fix typo in RedisModuleTypeMethods declaration. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Dvir Volk in commit 0fb9f34: - fix memory corruption on RM_FreeCallReply - 1 file changed, 3 insertions(+), 1 deletion(-) - -antirez in commit 41994f2: - Geo: improve fuzz test. - 1 file changed, 11 insertions(+), 3 deletions(-) - -antirez in commit ef9b4cf: - Geo: fix computation of bounding box. - 2 files changed, 33 insertions(+), 44 deletions(-) - -================================================================================ -Redis 4.0.0-RC1 Released Fri Dec 2 10:40:01 CEST 2016 -================================================================================ - -Redis 4.0.0-RC1 (version number 3.9.101) is the first release candidate of -Redis 4.0. It is not a production quality release but all the major features -received some testing at this point, the release in feature freeze excluding -things that are considered in the middle ground between fixes and improvements. - -Redis 4.0 is not called 3.4 because it is a major release that adds a number -of important and non trivial features. Many core functionalities of Redis were -seriously reworked. In the next release candidates, with your help, we'll gain -informations about the stability level of this release and will produce a new -RC every 2-4 weeks. When the entity and frequency of bug reports will slow down -under a certain level, Redis 4.0 final will be released. - -As usually, certain changes to Cluster and Sentinel may not follow the Redis -release cycle and could be incorporated into Redis 4.0 at RC stage or even -after the final version, every time the changes are considered to benefit -the user base enough to be worth incorporating them ASAP without waiting -for Redis 4.2. - -So, what's new in Redis 4.0? - -Major features ---- - -* Redis modules system. Redis now allows developers to write modules that can extend the Redis functionalities and implement new data types. The module API implements a complete abstraction layer that separates the Redis core from the module implementation, allowing the same module to be loaded by different versions of Redis without modifications. See the modules documentation here for more info: https://github.com/antirez/redis/blob/unstable/src/modules/INTRO.md - -* Partial Replication (PSYNC) version 2. The replication protocol was enhanced (in a backward compatible way, so that 4.0 can still act as a slave of older instances) in order to be able to partially resynchronize slaves and masters in conditions where it was impossible in the past: after a master switch because of a failover and when a slave instance is restared. Even masters turned into slaves will usually be able to partially resynchronize with the new master, if the new master was a former slave of the old master instance. The way the replication work for chained slaves (sub-slaves) is now very different, and each slave receives the same replication stream generated by the top-level master. - -* Cache eviction improvements. Redis 4.0 implements LFU (Least Frequently Used) as a new eviction algorithm, and improves the functionality, performances and precision of the existing algorithms. This blog post contains info about the changes: http://antirez.com/news/109 - -* Lazy freeing of keys. Redis is now able to delete keys in the background in a different thread without blocking the server. The new `UNLINK` command is the same as `DEL` but working in a non blocking way. Similarly an `ASYNC` option was added to `FLUSHALL` and `FLUSHDB` in order to let the entire dataset or a single database to be freed asynchronously. - -* Mixed RDB-AOF format. If enabled the new format is used when rewriting the AOF file: the rewrite uses the more compact and faster to generate RDB format, and an AOF stream is appended to the file. This allows faster rewrites and reloads when using the AOF persistence. - -* A new MEMORY command, able to perform memory analysis of different kinds: troubleshooting of memory issues (with MEMORY DOCTOR, similar to LATENCY DOCTOR), reporting of the amount of memory used by a single key, more in-depth reporting of Redis memory usage compared to what the INFO command offers. - -* Redis Cluster support for NAT / Docker. There are new functionalities in order to force cluster instances to announce specific sets of IP address, client and bus ports, to the rest of the cluster, regardless of the auto detected IP. This required a bus protocol change that will force users to mass-restart all the nodes of a Redis 3.2 installation in order to upgrade to 4.0. - -* Redis uses now less memory in order to store the same amount of data. The gain depends a lot on the kind of dataset stored. - -* Redis is now able to defragment the used memory and reclaim space incrementally -while running. See the example `redis.conf` for more information. - -Smaller features ---- - -* Improvements to the RDB format to support 64 bit lengths, binary sorted set scores, and more. The RDB file check utility now uses the same code base of the one used by Redis itself in order to load the RDB file in memory. - -* SWAPDB command: ability to completely and immediately (no latency) replace two Redis databases. - -* Improvements to `dict.c`, the Redis hash table implementation. - -* Security improvements mapping POST and Host: commands to QUIT in order to prevent cross protocol scripting attacks. - -* RPUSHX and LPUSHX now accept a variable number of elements. - -* Reporting of additional memory used by copy on write in the INFO output. - -* Serious refactoring of many core parts of Redis. - -List of commits ---- - -antirez in commit 434e6b2: - PSYNC2: Do not accept WAIT in slave instances. - 1 file changed, 5 insertions(+) - -antirez in commit 71e8d15: - Modules: change type registration API to use a struct of methods. - 4 files changed, 65 insertions(+), 10 deletions(-) - -antirez in commit ce1f9cf: - PSYNC2 test: check ability to resync after restart. - 1 file changed, 35 insertions(+), 1 deletion(-) - -antirez in commit 93c5198: - PSYNC2 test: 20 seconds are enough... - 1 file changed, 5 insertions(+), 1 deletion(-) - -antirez in commit f6e42f0: - PSYNC2 test: test added to the default tests. - 1 file changed, 1 insertion(+) - -antirez in commit 6eb720f: - PSYNC2: Minor memory leak reading -NOMASTERLINK master reply fixed. - 1 file changed, 1 insertion(+) - -antirez in commit c8f0690: - PSYNC2 test: modify the test for production. - 1 file changed, 33 insertions(+), 17 deletions(-) - -antirez in commit eab865a: - PSYNC2: stop sending newlines to sub-slaves when master is down. - 3 files changed, 6 insertions(+), 30 deletions(-) - -antirez in commit 16559a0: - PSYNC2: Test (WIP). - 1 file changed, 127 insertions(+) - -antirez in commit 790310d: - Better protocol errors logging. - 1 file changed, 29 insertions(+), 10 deletions(-) - -antirez in commit e09e31b: - PSYNC2: on transient error jump to error, not write_error. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 1f55170: - Modules: fix client blocking calls access to invalid struct field. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 5b7d42f: - PSYNC2: bugfixing pre release. - 2 files changed, 39 insertions(+), 11 deletions(-) - -antirez in commit 73dd51c: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -antirez in commit f115461: - Test: WAIT tests added in wait.tcl unit. - 2 files changed, 43 insertions(+) - -Salvatore Sanfilippo in commit 5b83fa4: - Merge pull request #3612 from deep011/unstable -antirez in commit 8fb3ad2: - Merge branch 'psync2' into unstable -antirez in commit 59f2e7c: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -antirez in commit cfdb3a2: - Cluster: handle zero bytes at the end of nodes.conf. - 1 file changed, 1 insertion(+), 1 deletion(-) - -deep011 in commit 13a92a5: - fix a possible bug for 'replconf getack' - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 28c96d7: - PSYNC2: Save replication ID/offset on RDB file. - 4 files changed, 29 insertions(+), 3 deletions(-) - -antirez in commit 4e5e366: - PSYNC2: Wrap debugging code with if(0) - 1 file changed, 3 insertions(+), 1 deletion(-) - -antirez in commit 2669fb8: - PSYNC2: different improvements to Redis replication. - 10 files changed, 440 insertions(+), 143 deletions(-) - -Salvatore Sanfilippo in commit b399288: - Merge pull request #3568 from MichaelTSS/patch-1 -antirez in commit 18d32c7: - redis-cli typo fixed: perferences -> preferences. - 1 file changed, 2 insertions(+), 2 deletions(-) - -Salvatore Sanfilippo in commit fa2dc4b: - Merge pull request #3514 from charsyam/feature/simple-refactoring -Salvatore Sanfilippo in commit 25811bc: - Merge pull request #3547 from yyoshiki41/refactor/redis-trib -Salvatore Sanfilippo in commit b3e7073: - Merge pull request #3575 from deep011/unstable -Salvatore Sanfilippo in commit c17fc39: - Merge pull request #3581 from dvirsky/fix_io_ctx_sizeof -Dvir Volk in commit ec8fd6e: - fixed sizeof in allocating io RedisModuleCtx* - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 9749e96: - Test: regression test for #3564 added. - 1 file changed, 9 insertions(+) - -Salvatore Sanfilippo in commit 77b1abf: - Merge pull request #3565 from sunheehnus/bitfield-fix-highest_write_offset -Salvatore Sanfilippo in commit f48ca55: - Merge pull request #3573 from jybaek/module-io-context -Salvatore Sanfilippo in commit fbfa0a1: - Merge pull request #3579 from guybe7/unstable -Guy Benoish in commit 8b070b5: - Fixed wrong sizeof(client) in object.c - 1 file changed, 7 insertions(+), 7 deletions(-) - -deep in commit 7f1bb22: - fix a bug for quicklistDup() function - 1 file changed, 3 insertions(+), 3 deletions(-) - -jybaek in commit a06d59b: - Add missing fclose() - 1 file changed, 1 insertion(+) - -Michel Tresseras in commit 20feafd: - Typo - 1 file changed, 1 insertion(+), 1 deletion(-) - -sunhe in commit 949a274: - bitops.c/bitfieldCommand: update higest_write_offset with check - 1 file changed, 3 insertions(+), 2 deletions(-) - -antirez in commit f39e7d4: - Remove "Hey!" warning... - 1 file changed, 1 deletion(-) - -antirez in commit a9f50a3: - Better target MacOS on __atomic macros conditional compilation. - 1 file changed, 2 insertions(+), 1 deletion(-) - -Salvatore Sanfilippo in commit ea95262: - Merge pull request #3560 from melo/fix-macos-10-8-compile -Pedro Melo in commit 2000abc: - Fixes compilation on MacOS 10.8.5, Clang tags/Apple/clang-421.0.57 - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit f633212: - Fix SELECT test, broken cause change in error msg. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit c7a4e69: - SWAPDB command. - 3 files changed, 88 insertions(+), 1 deletion(-) - -antirez in commit a3b3ca7: - Modules: use RedisModule_AbortBlock() in the example. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 95c17c0: - Modules: AbortBlock() API implemented. - 3 files changed, 12 insertions(+), 1 deletion(-) - -antirez in commit 58601c8: - Modules: blocking API documented. - 1 file changed, 265 insertions(+) - -antirez in commit 553aa0e: - module.c: trim comment to 80 cols. - 1 file changed, 4 insertions(+), 4 deletions(-) - -antirez in commit 870274b: - Example modules: remove warnings about types and not used args. - 2 files changed, 13 insertions(+), 2 deletions(-) - -yyoshiki41 in commit 16f6506: - Refactor redis-trib.rb - 1 file changed, 1 insertion(+), 2 deletions(-) - -antirez in commit 7dde8bf: - Modules: blocking command example added. - 1 file changed, 115 insertions(+) - -antirez in commit 3459969: - Modules: fixes to the blocking commands API: examples now works. - 1 file changed, 10 insertions(+), 2 deletions(-) - -antirez in commit f156038: - Modules: RM_Milliseconds() API added. - 2 files changed, 8 insertions(+) - -antirez in commit ffb00fb: - Modules: blocking commands WIP: API exported, a first example. - 3 files changed, 38 insertions(+), 3 deletions(-) - -antirez in commit 3aa816e: - Modules: introduce warning suppression macro for unused args. - 3 files changed, 22 insertions(+) - -antirez in commit 3879923: - Enable warning in example modules Makefile. - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit 8fadfe5: - Module: API to block clients with threading support. - 4 files changed, 185 insertions(+), 11 deletions(-) - -antirez in commit a5998d1: - Fix typos in GetContextFromIO API declaration. - 1 file changed, 2 insertions(+), 3 deletions(-) - -antirez in commit 799208d: - Fix name of mispelled function. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 152c1b6: - Module: Ability to get context from IO context. - 5 files changed, 30 insertions(+) - -antirez in commit 72279e3: - Copyright notice added to module.c. - 2 files changed, 30 insertions(+), 1 deletion(-) - -antirez in commit 3dc84c5: - Modules: API to save/load single precision floating point numbers. - 4 files changed, 48 insertions(+), 2 deletions(-) - -antirez in commit a1b1fd4: - Modules: API to log from module I/O callbacks. - 2 files changed, 40 insertions(+), 16 deletions(-) - -antirez in commit 4674efd: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -antirez in commit 0d9febf: - Add compiler optimizations to example module makefile. - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit 6782e77: - debug.c: include dlfcn.h regardless of BACKTRACE support. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 2564031: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -antirez in commit 6d9f8e2: - Security: CONFIG SET client-output-buffer-limit overflow fixed. - 1 file changed, 5 insertions(+), 3 deletions(-) - -charsyam in commit ca6fc4f: - Simple change just using slaves instead of server.slaves - 1 file changed, 1 insertion(+), 1 deletion(-) - -Salvatore Sanfilippo in commit 6e866ee: - Merge pull request #3511 from dvirsky/create_string_printf -Dvir Volk in commit a91650f: - added RM_CreateStringPrintf - 4 files changed, 52 insertions(+), 3 deletions(-) - -antirez in commit 6705867: - dict.c: fix dictGenericDelete() return ASAP condition. - 1 file changed, 2 insertions(+), 1 deletion(-) - -antirez in commit e9d861e: - Clear child data when opening the pipes. - 1 file changed, 2 insertions(+) - -antirez in commit e565632: - Child -> Parent pipe for COW info transferring. - 6 files changed, 132 insertions(+), 3 deletions(-) - -antirez in commit e1eccf9: - zmalloc: Make fp var non local to fix build. - 1 file changed, 3 insertions(+), 2 deletions(-) - -antirez in commit 945a2f9: - zmalloc: zmalloc_get_smap_bytes_by_field() modified to work for any PID. - 5 files changed, 24 insertions(+), 12 deletions(-) - -antirez in commit b13759e: - redis-cli: "allocator-stats" -> "malloc-stats". - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 4263b12: - Typo fixed from MEMORY DOCTOR output. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 8a00ffc: - Surround allocator name with quotes in MEMORY DOCTOR output. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 44e714a: - MEMORY DOCTOR initial implementation. - 4 files changed, 104 insertions(+), 8 deletions(-) - -antirez in commit d9325ac: - Provide percentage of memory peak used info. - 3 files changed, 13 insertions(+), 1 deletion(-) - -oranagra in commit 309c2bc: - add zmalloc used mem to DEBUG SDSLEN - 2 files changed, 7 insertions(+), 3 deletions(-) - -antirez in commit 78f35f8: - Memory related subcommands of DEBUG moved to MEMORY. - 3 files changed, 43 insertions(+), 41 deletions(-) - -antirez in commit 123891d: - Group MEMORY command related APIs together in the source code. - 1 file changed, 53 insertions(+), 53 deletions(-) - -antirez in commit adcfb77: - objectComputeSize(): skiplist nodes have different sizes. - 1 file changed, 6 insertions(+), 6 deletions(-) - -antirez in commit e9629e1: - MEMORY command: HELP + dataset percentage (like in INFO). - 3 files changed, 20 insertions(+), 8 deletions(-) - -antirez in commit 5443726: - MEMORY USAGE: SAMPLES option added + fixes to size computation. - 1 file changed, 27 insertions(+), 18 deletions(-) - -antirez in commit 7229af3: - INFO: new memory reporting fields added. - 1 file changed, 15 insertions(+), 1 deletion(-) - -antirez in commit bf2624e: - C struct memoh renamed redisMemOverhead. API prototypes added. - 2 files changed, 28 insertions(+), 26 deletions(-) - -antirez in commit be5439b: - MEMORY OVERHEAD refactored into a generic API. - 1 file changed, 130 insertions(+), 73 deletions(-) - -antirez in commit 09a50d3: - dict.c: dictReplaceRaw() -> dictAddOrFind(). - 3 files changed, 4 insertions(+), 4 deletions(-) - -antirez in commit 041ab04: - Trim comment to 80 cols. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit a636aea: - Apply the new dictUnlink() where possible. - 3 files changed, 9 insertions(+), 8 deletions(-) - -oranagra in commit afcbcc0: - dict.c: introduce dictUnlink(). - 3 files changed, 45 insertions(+), 11 deletions(-) - -antirez in commit 8c84c96: - MEMORY OVERHEAD implemented (using Oran Agra initial implementation). - 3 files changed, 104 insertions(+), 2 deletions(-) - -antirez in commit 89dec69: - objectComputeSize(): estimate collections sampling N elements. - 3 files changed, 51 insertions(+), 31 deletions(-) - -oranagra in commit 8c24325: - Adding objectComputeSize() function. - 1 file changed, 113 insertions(+) - -oranagra in commit 68bf45f: - Optimize repeated keyname hashing. - 5 files changed, 56 insertions(+), 59 deletions(-) - -Salvatore Sanfilippo in commit d680eb6: - Merge pull request #3492 from wyxustcsa09/fix-memory -antirez in commit c6dc8d5: - Merge branch 'unstable' of github.com:antirez/redis into unstable -antirez in commit 56dba3a: - Example modules: Add C99 standard to cflags. - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit 3793afa: - Merge branch 'aofrdb' into unstable -antirez in commit f962481: - fix the fix for the TCP binding. - 1 file changed, 15 insertions(+), 10 deletions(-) - -oranagra in commit 9203828: - fix tcp binding when IPv6 is unsupported - 2 files changed, 14 insertions(+), 10 deletions(-) - -antirez in commit d35deb2: - debug.c: no need to define _GNU_SOURCE, is defined in fmacros.h. - 1 file changed, 1 deletion(-) - -antirez in commit 6211e77: - crash log - improve code dump with more info and called symbols. - 1 file changed, 59 insertions(+), 20 deletions(-) - -wyx in commit f9c9b4b: - fix memory error on module unload - 1 file changed, 1 insertion(+), 1 deletion(-) - -oranagra in commit 24811fc: - crash log - add hex dump of function code - 1 file changed, 22 insertions(+) - -antirez in commit 0d179d1: - dict.c benchmark minor improvements. - 1 file changed, 19 insertions(+), 1 deletion(-) - -antirez in commit bd6c4ca: - dict.c benchmark: mixed del/insert benchmark. - 1 file changed, 11 insertions(+) - -antirez in commit 0f708ab: - dict.c benchmark: finish rehashing before testing lookups. - 1 file changed, 5 insertions(+) - -antirez in commit ed6a451: - dict.c benchmark improvements. - 1 file changed, 27 insertions(+), 4 deletions(-) - -antirez in commit 1074f73: - dict.c benchmark: take optional count argument. - 1 file changed, 11 insertions(+), 3 deletions(-) - -antirez in commit 91a59e0: - dict.c benchmark. - 2 files changed, 60 insertions(+), 1 deletion(-) - -antirez in commit 57a0db9: - Fix rdb.c var types when calling rdbLoadLen(). - 1 file changed, 3 insertions(+), 3 deletions(-) - -antirez in commit 9f76d82: - sds: don't check for impossible string size in 32 bit systems. - 1 file changed, 3 insertions(+) - -antirez in commit dacb69e: - RDB AOF preamble: test it in the aofrw unit. - 1 file changed, 52 insertions(+), 49 deletions(-) - -antirez in commit 764cc69: - Document RDB preamble in AOF rewrites in redis.conf. - 1 file changed, 14 insertions(+) - -antirez in commit e0d4146: - Sentinel example config: warn about protected mode. - 1 file changed, 16 insertions(+), 1 deletion(-) - -antirez in commit 543e25e: - RDB AOF preamble: WIP 4 (Mixed RDB/AOF loading). - 2 files changed, 33 insertions(+), 6 deletions(-) - -antirez in commit f1c32f0: - RDB AOF preamble: WIP 3 (RDB loading refactoring). - 1 file changed, 38 insertions(+), 29 deletions(-) - -antirez in commit feda523: - RDB AOF preamble: WIP 2. - 5 files changed, 37 insertions(+), 28 deletions(-) - -antirez in commit 4426cb1: - RDB AOF preamble: WIP 1. - 4 files changed, 72 insertions(+), 35 deletions(-) - -Salvatore Sanfilippo in commit 9f779b3: - Merge pull request #3340 from rojingeorge/unstable -Salvatore Sanfilippo in commit c5414ce: - Merge pull request #3429 from guoxiao/warning -Guo Xiao in commit 4bd72ab: - Use the standard predefined identifier __func__ (since C99) - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit e7f1798: - Modules: basic call/reply tests in test module. - 1 file changed, 71 insertions(+) - -antirez in commit 13f18d2: - Modules: handle NULL replies more gracefully. - 1 file changed, 6 insertions(+) - -antirez in commit a81a92c: - Security: Cross Protocol Scripting protection. - 3 files changed, 27 insertions(+), 2 deletions(-) - -antirez in commit ede6e22: - Fix comment over 80 cols. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 04340e1: - Modules: initial draft for a testing module. - 3 files changed, 235 insertions(+), 93 deletions(-) - -antirez in commit 7829e4e: - Modules: StringAppendBuffer() and ability to retain strings. - 3 files changed, 91 insertions(+), 5 deletions(-) - -Qu Chen in commit d982f44: - Fix a bug to delay bgsave while AOF rewrite in progress for replication - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 9424fe4: - Remove extra "-" from ASCII horizontal bar in comment. - 2 files changed, 2 insertions(+), 2 deletions(-) - -antirez in commit 9d52411: - Update linenoise to fix insecure redis-cli history file creation. - 1 file changed, 6 insertions(+), 1 deletion(-) - -antirez in commit 8966d4c: - Changelog format modified to be less verbose. - 1 file changed, 5 insertions(+), 1 deletion(-) - -antirez in commit 55385f9: - Ability of slave to announce arbitrary ip/port to master. - 6 files changed, 129 insertions(+), 17 deletions(-) - -antirez in commit 356a630: - Multiple GEORADIUS bugs fixed. - 6 files changed, 199 insertions(+), 21 deletions(-) - -antirez in commit 03f5b50: - Replication: when possible start RDB saving ASAP. - 1 file changed, 8 insertions(+), 2 deletions(-) - -antirez in commit 8b76d55: - Sentinel: new test unit 07 that tests master down conditions. - 1 file changed, 68 insertions(+) - -antirez in commit 3e9ce38: - Sentinel: check Slave INFO state more often when disconnected. - 2 files changed, 10 insertions(+), 3 deletions(-) - -antirez in commit 0a628e5: - Avoid simultaneous RDB and AOF child process. - 4 files changed, 51 insertions(+), 8 deletions(-) - -antirez in commit 780a8b1: - Replication: start BGSAVE for replication always in replicationCron(). - 1 file changed, 15 insertions(+), 12 deletions(-) - -antirez in commit e0582b3: - Fix maxmemory shared integer check bug introduced with LFU. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 2d5eb1f: - Volatile-ttl eviction policy implemented in terms of the pool. - 2 files changed, 25 insertions(+), 48 deletions(-) - -antirez in commit 9f1b7ab: - test-lru.rb: support for testing volatile-ttl policy. - 1 file changed, 49 insertions(+), 14 deletions(-) - -antirez in commit 6854c7b: - LFU: make counter log factor and decay time configurable. - 5 files changed, 95 insertions(+), 14 deletions(-) - -antirez in commit 6416ab1: - LFU: Use the LRU pool for the LFU algorithm. - 1 file changed, 25 insertions(+), 36 deletions(-) - -antirez in commit dbce190: - LFU: Fix bugs in frequency decay code. - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit a8e2d08: - LFU: Initial naive eviction cycle. - 3 files changed, 49 insertions(+), 4 deletions(-) - -antirez in commit 24dd4a8: - redis-cli LRU test mode: randomize value of key when setting. - 1 file changed, 4 insertions(+), 1 deletion(-) - -antirez in commit b8450d7: - redis-cli LRU test mode: remove newline from key names. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 5d07984: - LFU: Redis object level implementation. - 5 files changed, 142 insertions(+), 15 deletions(-) - -antirez in commit ada70c7: - LFU simulator: remove dead code. - 1 file changed, 5 deletions(-) - -antirez in commit fc92c66: - LRU simulator: fix new entry creation decr time. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit f50dc38: - LRU simulator: fix new entry creation. - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit 09fcb00: - LFU: Simulation of the algorithm planned for Redis. - 1 file changed, 163 insertions(+) - -antirez in commit e423f76: - LRU: Make cross-database choices for eviction. - 3 files changed, 161 insertions(+), 110 deletions(-) - -antirez in commit e64bf05: - LRU: cache SDS strings in the eviction pool. - 1 file changed, 29 insertions(+), 13 deletions(-) - -antirez in commit 965905c: - Move the struct evictionPoolEntry() into only file using it. - 2 files changed, 22 insertions(+), 12 deletions(-) - -antirez in commit d8e92a8: - Move prototype of evictionPoolAlloc() in server.h. - 2 files changed, 3 insertions(+), 2 deletions(-) - -antirez in commit 3b9495d: - LRU: use C99 variable len stack array in evictionPoolPopulate(). - 1 file changed, 1 insertion(+), 11 deletions(-) - -antirez in commit 2a12473: - redis-benchmark: new option to show server errors on stdout. - 1 file changed, 17 insertions(+), 1 deletion(-) - -antirez in commit 382991f: - Remove useless memmove() from freeMemoryIfNeeded(). - 1 file changed, 2 insertions(+), 7 deletions(-) - -antirez in commit b19b2df: - LRU: Fix output fixes to new test-lru.rb. - 1 file changed, 6 insertions(+), 6 deletions(-) - -antirez in commit 6a1c00c: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -antirez in commit 32a5494: - LRU: test-lru.rb improved in different ways. - 2 files changed, 180 insertions(+), 98 deletions(-) - -antirez in commit 51c1d40: - redis_check_rdb(): the rio structure must be global. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit eee878c: - redis_check_rdb_main(): create shared objects only if needed. - 1 file changed, 5 insertions(+), 1 deletion(-) - -antirez in commit 24882e3: - Fix redis_check_rdb() return value. - 1 file changed, 3 insertions(+), 2 deletions(-) - -antirez in commit 1e6bb9e: - Remove dead code from geohash_helper.c. - 1 file changed, 6 deletions(-) - -antirez in commit 3961071: - Fix signess issue in geohashEstimateStepsByRadius(). - 1 file changed, 2 insertions(+), 1 deletion(-) - -antirez in commit 504ccad: - Fix definition of M_PI in geohash_helper.c. - 2 files changed, 2 insertions(+), 1 deletion(-) - -antirez in commit eaa713e: - geohash.c and geohash_helper.c are part of Redis. - 11 files changed, 686 insertions(+), 717 deletions(-) - -antirez in commit 4a140d3: - Add expire.c and evict.c. - 2 files changed, 718 insertions(+) - -antirez in commit b46239e: - Expire and LRU related code moved into different files. - 5 files changed, 4 insertions(+), 633 deletions(-) - -antirez in commit 0610683: - Makefile: don't build dependencies file for clean, distclean. - 1 file changed, 3 insertions(+) - -antirez in commit 1036182: - Generate Makefile.dep at every build. - 3 files changed, 6 insertions(+), 194 deletions(-) - -antirez in commit abb3385: - Regression test for issue #3333. - 1 file changed, 6 insertions(+) - -antirez in commit 2379182: - getLongLongFromObject: use string2ll() instead of strict_strtoll(). - 1 file changed, 1 insertion(+), 15 deletions(-) - -antirez in commit ef6a4df: - redis-cli: check SELECT reply type just in state updated. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Salvatore Sanfilippo in commit 0df2865: - Merge pull request #3365 from sskorgal/unstable -antirez in commit c383be3: - Sentinel: fix cross-master Sentinel address update. - 1 file changed, 9 insertions(+), 2 deletions(-) - -antirez in commit b2cc8bc: - CONFIG GET is now no longer case sensitive. - 1 file changed, 12 insertions(+), 12 deletions(-) - -antirez in commit a0dd014: - Fix test for new RDB checksum failure message. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit b99ad1b: - Make tcp-keepalive default to 300 in internal conf. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 7e220a9: - In Redis RDB check: more details in error reportings. - 1 file changed, 21 insertions(+), 13 deletions(-) - -antirez in commit e697153: - In Redis RDB check: log decompression errors. - 2 files changed, 23 insertions(+), 2 deletions(-) - -antirez in commit df3c69e: - In Redis RDB check: log object type on error. - 1 file changed, 27 insertions(+), 2 deletions(-) - -antirez in commit c0f4d19: - Added a trivial program to randomly corrupt RDB files in /utils. - 1 file changed, 44 insertions(+) - -antirez in commit 2ab7097: - In Redis RDB check: minor output message changes. - 1 file changed, 4 insertions(+), 1 deletion(-) - -antirez in commit e9f31ba: - In Redis RDB check: better error reporting. - 4 files changed, 71 insertions(+), 14 deletions(-) - -sskorgal in commit 9dfd9d1: - Fix for redis_cli printing default DB when select command fails. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit e97fadb: - In Redis RDB check: initial POC. - 2 files changed, 189 insertions(+), 660 deletions(-) - -Rojin George in commit d0f5307: - Merge remote-tracking branch 'refs/remotes/antirez/unstable' into unstable -antirez in commit 24bd9b1: - Test: new randomized stress tester for #3343 alike bugs. - 1 file changed, 27 insertions(+), 2 deletions(-) - -antirez in commit f983318: - Stress tester WIP. - 1 file changed, 3 insertions(+) - -antirez in commit 4989986: - Regression test for issue #3343 exact min crash sequence. - 1 file changed, 16 insertions(+) - -antirez in commit dc18a6a: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -antirez in commit 5e176e1: - Fix quicklistReplaceAtIndex() by updating the quicklist ziplist size. - 1 file changed, 1 insertion(+) - -Salvatore Sanfilippo in commit ae4f5b3: - Merge pull request #3342 from yossigo/fix_calloc -Yossi Gottlieb in commit 19c401d: - Fix RedisModule_Calloc() definition typo. - 1 file changed, 1 insertion(+), 1 deletion(-) - -rojingeorge in commit 4242fdf: - Display the nodes summary once the cluster is established using redis-trib.rb - 1 file changed, 5 insertions(+) - -rojingeorge in commit 4aab50a: - Merge remote-tracking branch 'refs/remotes/antirez/unstable' into unstable -Salvatore Sanfilippo in commit f60aa4d: - Merge pull request #3324 from mishan/fix-wrong-comment-about-sentinel-mode -antirez in commit 1898311: - Modules: mention RedisModule_Calloc() in the doc. - 1 file changed, 1 insertion(+) - -Salvatore Sanfilippo in commit 3a0b776: - Merge pull request #3335 from dvirsky/rm_calloc -antirez in commit c026b5c: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -antirez in commit 0f484d8: - Actually remove static from #3331. - 1 file changed, 1 insertion(+), 2 deletions(-) - -Salvatore Sanfilippo in commit 28ea585: - Merge pull request #3336 from yossigo/create_string_from_string -antirez in commit c0ca87d: - Minor change to conform PR #3331 to Redis code base style. - 1 file changed, 1 insertion(+), 2 deletions(-) - -Salvatore Sanfilippo in commit a66dd43: - Merge pull request #3331 from yossigo/fix_openkey_crash -Salvatore Sanfilippo in commit 393c468: - Merge pull request #3338 from tielei/unstable -tielei in commit f648c5a: - A string with 21 chars is not representable as a 64-bit integer. - 1 file changed, 2 insertions(+), 2 deletions(-) - -Salvatore Sanfilippo in commit 3d48c93: - Merge pull request #3330 from yossigo/fix_const -antirez in commit 4b12c6a: - Modules: changes to logging function. - 3 files changed, 39 insertions(+), 19 deletions(-) - -Yossi Gottlieb in commit 715794b: - Add RedisModule_Log() logging API function. - 3 files changed, 42 insertions(+) - -antirez in commit b507289: - Commit change in autoMemoryFreed(): first -> last. - 1 file changed, 2 insertions(+), 1 deletion(-) - -antirez in commit f2dbc02: - Modules: implement zig-zag scanning in autoMemoryFreed(). - 1 file changed, 20 insertions(+), 16 deletions(-) - -Salvatore Sanfilippo in commit 2fe9b79: - Merge pull request #3244 from dvirsky/optimize_autoMemoryFreed -Yossi Gottlieb in commit 61172ed: - Add RedisModule_CreateStringFromString(). - 5 files changed, 26 insertions(+), 2 deletions(-) - -Dvir Volk in commit dc7f3fe: - added RM_Calloc implementation - 2 files changed, 13 insertions(+), 1 deletion(-) - -antirez in commit 4e10b08: - Modules doc: hint about replacing libc malloc calls. - 1 file changed, 15 insertions(+), 1 deletion(-) - -Yossi Gottlieb in commit e22f3e4: - Cleanup: remove zset reset function from RM_ZsetRangeStop(). - 1 file changed, 13 insertions(+), 7 deletions(-) - -Yossi Gottlieb in commit a8e2034: - Fix occasional RM_OpenKey() crashes. - 1 file changed, 1 insertion(+) - -Yossi Gottlieb in commit 8f3a4df: - Use const in Redis Module API where possible. - 13 files changed, 38 insertions(+), 38 deletions(-) - -Salvatore Sanfilippo in commit 0b4b7eb: - Merge pull request #3252 from oranagra/config_fix -Misha Nasledov in commit 7a5538d: - Fix incorrect comment for checkForSentinelMode function - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit f7351f4: - Fix Sentinel pending commands counting. - 1 file changed, 1 insertion(+) - -antirez in commit 7c8f275: - redis-cli: really connect to the right server. - 1 file changed, 5 insertions(+), 2 deletions(-) - -antirez in commit a3f893b: - RESTORE: accept RDB dumps with older versions. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Salvatore Sanfilippo in commit 8272cea: - Merge pull request #3255 from oranagra/error_string -Salvatore Sanfilippo in commit 64b834b: - Merge pull request #3256 from oranagra/georasius_neg -antirez in commit 2f2fd64: - Minor aesthetic fixes to PR #3264. - 1 file changed, 5 insertions(+), 5 deletions(-) - -Salvatore Sanfilippo in commit 33a9836: - Merge pull request #3264 from oranagra/bitfield_fix2 -Salvatore Sanfilippo in commit 5d83f6c: - Merge pull request #3274 from MOON-CLJ/fix_promoted_slave -antirez in commit 3bd20ea: - Test TOUCH and new TTL / TYPE behavior about object access time. - 2 files changed, 24 insertions(+) - -Salvatore Sanfilippo in commit 226f679: - Merge pull request #3283 from ideal/unstable -Salvatore Sanfilippo in commit bd2cd70: - Merge pull request #3281 from jamespedwards42/unstable -antirez in commit 2d86995: - GETRANGE: return empty string with negative, inverted start/end. - 2 files changed, 6 insertions(+), 2 deletions(-) - -antirez in commit eb45e11: - Remove additional round brackets from fix for #3282. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit ca54335: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -Salvatore Sanfilippo in commit 001cadc: - Merge pull request #3282 from wenduo/unstable -antirez in commit 212f157: - Regression test for #3282. - 1 file changed, 10 insertions(+) - -Salvatore Sanfilippo in commit 3deb7ba: - Merge pull request #3226 from MichielDeMey/patch-1 -Salvatore Sanfilippo in commit 82554ca: - Merge pull request #3313 from zshipko/unstable -zach shipko in commit b7b9aa6: - BSDs don't have -ldl - 1 file changed, 15 insertions(+), 5 deletions(-) - -antirez in commit 41d804d: - TTL and TYPE LRU access fixed. TOUCH implemented. - 3 files changed, 60 insertions(+), 8 deletions(-) - -antirez in commit cd8e688: - redis-cli help.h updated. - 1 file changed, 13 insertions(+), 8 deletions(-) - -antirez in commit c6e3ce3: - Enable tcp-keepalive by default. - 1 file changed, 3 insertions(+), 2 deletions(-) - -antirez in commit 5ba9bde: - Modules: document how to pass config params to modules. - 1 file changed, 19 insertions(+), 1 deletion(-) - -antirez in commit 5831dd8: - Fix example modules to have the right OnLoad() prototype. - 2 files changed, 8 insertions(+), 2 deletions(-) - -antirez in commit a4bce77: - Don't assume no padding or specific ordering in moduleLoadQueueEntry structure. - 2 files changed, 4 insertions(+), 3 deletions(-) - -antirez in commit 9a02dac: - Free module context after loading. - 1 file changed, 1 insertion(+) - -antirez in commit b6cd008: - Make sure modules arguments are raw strings. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 1ad5c22: - Minor changes to unifor C style to Redis code base for PR #3293. - 3 files changed, 8 insertions(+), 6 deletions(-) - -Salvatore Sanfilippo in commit e8d5387: - Merge pull request #3293 from yossigo/module_config -antirez in commit e71f22f: - Fix typo: after -> before. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit e4567f2: - Explain why module type names are 9 chars. - 1 file changed, 52 insertions(+) - -Salvatore Sanfilippo in commit 688996f: - Merge pull request #3295 from catwell/pr-1-warnings -Salvatore Sanfilippo in commit b4327ae: - Merge pull request #3294 from yossigo/fix_unload -antirez in commit a1684ff: - Remove tryObjectEncoding() calls from list type. - 1 file changed, 3 deletions(-) - -antirez in commit 5beec97: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -Michiel De Mey in commit 90781de: - Added documentation for non-interactive install procedure - 1 file changed, 19 insertions(+), 3 deletions(-) - -Salvatore Sanfilippo in commit ab73544: - Merge pull request #3296 from catwell/pr-2-variadic-pushx -andyli in commit 93a0987: - fix comment "b>a" to "a > b" - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 2a57ad5: - Fixed typo in Sentinel compareSlavesForPromotion() comment. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 243c9dc: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -Salvatore Sanfilippo in commit 01a34b1: - Merge pull request #2870 from SaurabhJha/documenation-fixes -antirez in commit 67fcd26: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -Salvatore Sanfilippo in commit bac4de7: - Merge pull request #3180 from bogdanvlviv/fix_pidfile -antirez in commit 5481336: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -Salvatore Sanfilippo in commit 53c008a: - Merge pull request #3303 from jspraul/patch-1 -antirez in commit b664aeb: - Improve timer callback creation comment. - 1 file changed, 3 insertions(+), 2 deletions(-) - -jspraul in commit 4df95e8: - Include 'fd_set' type name - 1 file changed, 1 insertion(+) - -Salvatore Sanfilippo in commit 30c1a85: - Merge pull request #3301 from yossigo/fix_unused_warning -Yossi Gottlieb in commit 2fd6ca3: - Remove gcc warning when redismodule.h is included by a multi-file module. - 1 file changed, 1 insertion(+) - -Saurabh Jha in commit 319b126: - Fixup - 1 file changed, 8 insertions(+), 8 deletions(-) - -Saurabh Jha in commit 61717ac: - More edits to README - 1 file changed, 15 insertions(+), 15 deletions(-) - -Saurabh Jha in commit 0f10b16: - Address grammatical comments - 1 file changed, 2 insertions(+), 2 deletions(-) - -Saurabh Jha in commit 90a3647: - Fix typos in documentation - 2 files changed, 15 insertions(+), 16 deletions(-) - -Pierre Chapuis in commit d88c3c7: - make RPUSHX and LPUSHX variadic - 3 files changed, 18 insertions(+), 9 deletions(-) - -Pierre Chapuis in commit b670a16: - remove unused variable - 1 file changed, 2 insertions(+), 2 deletions(-) - -Pierre Chapuis in commit 3e9c20f: - untangle LINSERT and {L,R}PUSHX implementations - 1 file changed, 45 insertions(+), 40 deletions(-) - -Pierre Chapuis in commit 188d90f: - fix some compiler warnings - 3 files changed, 17 insertions(+), 10 deletions(-) - -Yossi Gottlieb in commit 87312ff: - Fix MODULE UNLOAD crash and/or wrong error message. - 1 file changed, 14 insertions(+), 7 deletions(-) - -Yossi Gottlieb in commit cc58f11: - Use RedisModuleString for OnLoad argv. - 3 files changed, 5 insertions(+), 11 deletions(-) - -Yossi Gottlieb in commit 2bd13cf: - Allow passing arguments to modules on load. - 3 files changed, 46 insertions(+), 13 deletions(-) - -antirez in commit 550fa7e: - modules API.md updated. - 1 file changed, 232 insertions(+), 10 deletions(-) - -antirez in commit c3f5b6e: - Modules: native types doc, 70% done. - 1 file changed, 305 insertions(+) - -antirez in commit 5830d88: - Modules: pool allocator doc. - 1 file changed, 53 insertions(+), 1 deletion(-) - -antirez in commit 31eb8ec: - Modules: top comments in helloworld.c and hellotype.c. - 2 files changed, 74 insertions(+) - -antirez in commit 8ec2800: - Modules: support for modules native data types. - 11 files changed, 991 insertions(+), 35 deletions(-) - -antirez in commit 27e5f38: - RDB v8: fix rdbLoadLen() return value. - 3 files changed, 61 insertions(+), 36 deletions(-) - -antirez in commit e6554be: - RDB v8: new ZSET storage format with binary doubles. - 2 files changed, 27 insertions(+), 5 deletions(-) - -antirez in commit 4aae4f7: - RDB v8: ability to save uint64_t lengths. - 3 files changed, 34 insertions(+), 44 deletions(-) - -antirez in commit b64fcbc: - Test: run GEO tests by default. - 1 file changed, 1 insertion(+) - -antirez in commit 231c9db: - Now that SPOP can be called by scripts use BLPOP on 's' flag test. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 2503acf: - Avoid undefined behavior in BITFIELD implementation. - 1 file changed, 15 insertions(+), 8 deletions(-) - -Salvatore Sanfilippo in commit 9200312: - Merge pull request #3278 from itamarhaber/patch-8 -antirez in commit 5d4b5fb: - Geo: fix typo in geohashEstimateStepsByRadius(). - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 18a513f: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -antirez in commit 4eff3dc: - Fix GEORADIUS wrong output with radius > Earth radius. - 1 file changed, 2 insertions(+) - -ideal in commit 4067132: - fix mistake comment in object.c - 1 file changed, 1 insertion(+), 1 deletion(-) - -wenduo in commit 41dacdb: - bitcount bug:return non-zero value when start > end (both negative) - 1 file changed, 4 insertions(+) - -jamespedwards42 in commit 3432061: - Fix modules intro typos. - 1 file changed, 3 insertions(+), 4 deletions(-) - -Itamar Haber in commit 2866e02: - Allow SPOP from Lua scripts - 1 file changed, 1 insertion(+), 1 deletion(-) - -MOON_CLJ in commit aa57844: - fix check when can't send the command to the promoted slave - 1 file changed, 1 insertion(+), 1 deletion(-) - -oranagra in commit 5d96b7e: - check WRONGTYPE in BITFIELD before looping on the operations. - 1 file changed, 18 insertions(+), 9 deletions(-) - -oranagra in commit c4433d2: - fix crash in BITFIELD GET on non existing key or wrong type see #3259 - 1 file changed, 5 insertions(+), 3 deletions(-) - -oranagra in commit f3e81de: - fix georadius returns multiple replies - 1 file changed, 5 insertions(+), 2 deletions(-) - -oranagra in commit 8d9d8d1: - CLIENT error message was out of date - 1 file changed, 1 insertion(+), 1 deletion(-) - -Salvatore Sanfilippo in commit 8c4f4d1: - Merge pull request #3249 from badboy/rcli-debug-printing -oranagra in commit 5fa711f: - config set list-max-ziplist-size didn't support negative values, unlike config file - 1 file changed, 3 insertions(+), 3 deletions(-) - -Jan-Erik Rediger in commit 892565f: - Remove debug printing - 1 file changed, 3 deletions(-) - -Dvir Volk in commit 137fd86: - optimized amFree even further - 1 file changed, 9 insertions(+), 4 deletions(-) - -Dvir Volk in commit 46b07cb: - Optimized autoMemoryFreed loop - 1 file changed, 4 insertions(+), 1 deletion(-) - -antirez in commit b09a6b6: - Fix modules compilation when libc malloc is used. - 1 file changed, 2 insertions(+), 2 deletions(-) - -Salvatore Sanfilippo in commit bafed3d: - Merge pull request #3222 from oranagra/more_minir_fixes -antirez in commit e3edae9: - Modules: RM_HashSet() SDS ownership business clarified in comments. - 1 file changed, 5 insertions(+), 5 deletions(-) - -Salvatore Sanfilippo in commit 01a83d0: - Merge pull request #3239 from dvirsky/fix_hashset_crash -antirez in commit bee963c: - Code to access object string bytes repeated 3x refactored into 1 function. - 1 file changed, 39 insertions(+), 35 deletions(-) - -antirez in commit ffd1600: - Clarify that the LOG_STR_SIZE includes null term. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Salvatore Sanfilippo in commit 4c9c9d7: - Merge pull request #3221 from oranagra/bitfield_fix -antirez in commit 078f461: - Test for BITFIELD regression #3221. - 1 file changed, 5 insertions(+) - -antirez in commit c6c86ea: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -antirez in commit 968e838: - Actually use --with-lg-quantum=3 to build jemalloc. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Dvir Volk in commit cfaef8d: - fixed bad transfer of ownership in HashSet causing a potential crash - 1 file changed, 7 insertions(+), 4 deletions(-) - -Salvatore Sanfilippo in commit b1b1f4e: - Merge pull request #3238 from oranagra/struct_fix -oranagra in commit 283a812: - reduce struct padding by reordering members - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 9aff564: - Modules: initial pool allocator and a LEFTPAD usage example. - 3 files changed, 147 insertions(+), 4 deletions(-) - -Michiel De Mey in commit af1e63c: - Allow non-interactive execution of install_server - 1 file changed, 45 insertions(+), 28 deletions(-) - -antirez in commit 646c958: - Modules: doc layout improved. - 4 files changed, 1475 insertions(+), 575 deletions(-) - -antirez in commit 745845d: - Modules doc: mention the functions not yet documented. - 1 file changed, 25 insertions(+), 8 deletions(-) - -oranagra in commit 77a9144: - fix crash in BITFIELD GET when key is integer encoded - 1 file changed, 15 insertions(+), 3 deletions(-) - -antirez in commit 5daece2: - RM_ZsetRangeNext()/Prev() typo in define name leading to crash fixed. - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit 5f977c3: - Modules: commandFlagsFromString() top comment back to 80 cols max. - 1 file changed, 29 insertions(+), 24 deletions(-) - -antirez in commit ef2b4f6: - Trailing spaces removed from moduleCreateArgvFromUserFormat(). - 1 file changed, 4 insertions(+), 4 deletions(-) - -antirez in commit 6250a6b: - Modules: RM_GetClientId() implemented. - 2 files changed, 19 insertions(+) - -Dvir Volk in commit 9a71df5: - fixed crash when calling CreateStringFromCallReply on array elements - 1 file changed, 1 insertion(+) - -Itamar Haber in commit 3816f16: - Avoids reallocating and double String on truncate - 1 file changed, 17 insertions(+), 18 deletions(-) - -Dvir Volk in commit d41bd23: - fixed comment formatting in RM_CreateCommand - 1 file changed, 13 insertions(+), 13 deletions(-) - -Ramon Snir in commit 909a707: - vector of strings is implemented now - 1 file changed, 1 insertion(+), 1 deletion(-) - -Dvir Volk in commit a00e872: - another small comment fix - 1 file changed, 2 insertions(+), 1 deletion(-) - -Dvir Volk in commit 87de31f: - fixed comment - 1 file changed, 3 insertions(+), 1 deletion(-) - -Dvir Volk in commit 8fc6708: - second attempt at vector formtting - 1 file changed, 12 insertions(+), 1 deletion(-) - -antirez in commit 7f5e134: - Modules: add ZADD_INCR flag to zset increment API. - 1 file changed, 1 insertion(+) - -Itamar Haber in commit 1186f92: - typo: %s/Emtpy/Empty/g - 1 file changed, 5 insertions(+), 5 deletions(-) - -antirez in commit 227d680: - Modules: command <-> core interface modified to get flags & keys. - 7 files changed, 177 insertions(+), 40 deletions(-) - -Ramon Snir in commit 676a6a4: - tiny typo in Redis Modules API documentation - 1 file changed, 1 insertion(+), 1 deletion(-) - -Dvir Volk in commit a1f8e22: - fixed return value of HashGet (and a slight error in the documentation) - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit 02c4a6c: - Modules: REDISMODULE_POSTPONED_ARRAY_LEN doc. - 1 file changed, 45 insertions(+) - -antirez in commit 42f7221: - Modules: Hash API defines made more uniform. - 3 files changed, 38 insertions(+), 42 deletions(-) - -antirez in commit 9b0556c: - Modules: Hash type API WIP #2. - 3 files changed, 60 insertions(+), 4 deletions(-) - -antirez in commit 10993ca: - Modules: Hash type API WIP #1. - 4 files changed, 212 insertions(+), 6 deletions(-) - -antirez in commit 5bf5fd2: - Modules: a few fixes for the zset iterator. - 2 files changed, 13 insertions(+), 6 deletions(-) - -antirez in commit 33e1231: - Modules: postponed array lengths. - 2 files changed, 86 insertions(+), 3 deletions(-) - -antirez in commit 00109e1: - Modules: zset lex iterator #3. - 3 files changed, 49 insertions(+), 3 deletions(-) - -antirez in commit db3ade2: - Modules: zset lex iterator #2. - 3 files changed, 30 insertions(+), 6 deletions(-) - -antirez in commit 2b04f86: - Modules: zset lex iterator #1. - 3 files changed, 77 insertions(+), 4 deletions(-) - -antirez in commit 083f527: - Modules: zset iterator redesign #1. - 4 files changed, 76 insertions(+), 106 deletions(-) - -antirez in commit d998170: - Simple Ruby script to generate reference doc added. - 1 file changed, 38 insertions(+) - -antirez in commit 0fd6d54: - Modules: fix top comments to be user-facing doc quality. About 33% done. - 1 file changed, 137 insertions(+), 51 deletions(-) - -antirez in commit f362f7a: - Modules: sorted set iterators WIP #3. - 4 files changed, 134 insertions(+), 24 deletions(-) - -antirez in commit bdbb5a0: - Modules: put zset iterator current element in auto memory pool. - 1 file changed, 6 insertions(+), 2 deletions(-) - -antirez in commit 6eeeda3: - Modules: sorted set iterators WIP #2. - 3 files changed, 34 insertions(+), 4 deletions(-) - -antirez in commit eac5a13: - Modules: sorted set iterators WIP. - 4 files changed, 221 insertions(+), 1 deletion(-) - -antirez in commit 556d593: - Remove useless space. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit d5ecedd: - Modules: ZSET API WIP #4. - 2 files changed, 33 insertions(+), 1 deletion(-) - -antirez in commit e1b34ec: - Modules: ZSET API WIP #3. - 2 files changed, 24 insertions(+) - -antirez in commit 4457e4a: - Modules: ZSET API WIP #2. - 2 files changed, 93 insertions(+), 1 deletion(-) - -antirez in commit f199504: - Modules: ZSET API WIP. - 2 files changed, 14 insertions(+), 5 deletions(-) - -antirez in commit 11b3df2: - Modules: expire API and documentation. - 4 files changed, 101 insertions(+) - -antirez in commit f4e0129: - Modules: RedisModule_ReplyWithCallReply(). - 4 files changed, 31 insertions(+), 1 deletion(-) - -Itamar Haber in commit 6054089: - Stops SPLICE from accepting negative counts - 1 file changed, 6 insertions(+), 2 deletions(-) - -Sun He in commit 2e464bf: - modules/RM_StringTruncate: correct reallocate condition - 1 file changed, 1 insertion(+), 1 deletion(-) - -Sun He in commit 3a7b170: - modules/RM_OpenKey: avoid decrRefCount obj twice - 1 file changed, 1 deletion(-) - -Sun He in commit 1868dee: - modules/RM_StringSet: set key->value - 1 file changed, 1 insertion(+) - -Sun He in commit fded8aa: - modules: correct protolen - 1 file changed, 4 insertions(+), 4 deletions(-) - -antirez in commit 4efe9e1: - Add the last break for consistency in moduleCreateCallReplyFromProto. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Dvir Volk in commit e453d36e: - fixed case in moduleCreateCallReplyFromProto - 1 file changed, 4 insertions(+), 4 deletions(-) - -Dvir Volk in commit ae5cb3f: - renamed RedisModule_ReplyWithNull to RM_ReplyWithNull to fix compilation - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 2967c00: - Modules: RedisModule_ReplyWithNull() implemented. - 2 files changed, 9 insertions(+) - -Dvir Volk in commit e711a9a: - fixed the doc with a right function name - 1 file changed, 3 insertions(+), 2 deletions(-) - -Yossi Gottlieb in commit e443ad9: - Log loadmodule dlopen() errors. - 1 file changed, 4 insertions(+), 1 deletion(-) - -antirez in commit 85919f8: - Modules: avoid conflict between modules func pointers and dynamic symbols. - 2 files changed, 95 insertions(+), 88 deletions(-) - -antirez in commit 6020469: - Modules: remove warnings due to void/function pointer conversion. - 1 file changed, 7 insertions(+), 6 deletions(-) - -Dvir Volk in commit 34f2fb7: - fixed makefile for linux - 1 file changed, 12 insertions(+), 2 deletions(-) - -antirez in commit 6dead2c: - Modules: first preview 31 March 2016. - 13 files changed, 2625 insertions(+), 4 deletions(-) - -oranagra in commit 9682b61: - minor fixes - mainly signalModifiedKey, and GEORADIUS - 6 files changed, 19 insertions(+), 16 deletions(-) - -antirez in commit 3b644e8: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -Salvatore Sanfilippo in commit b44ad30: - Merge pull request #732 from evilpacket/remove_dofile -antirez in commit 8eb43bf: - redis-cli: integrate help.h with COMMAND output. - 1 file changed, 70 insertions(+), 10 deletions(-) - -antirez in commit f9ee039: - Scripting test: match new error message. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 2205c46: - Cluster: don't check scripts key slots during AOF loading. - 1 file changed, 4 insertions(+), 2 deletions(-) - -antirez in commit 94dc71f: - redis-cli: remove debugging message. - 1 file changed, 1 deletion(-) - -antirez in commit bdbeb07: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -antirez in commit 68dd1c9: - Revert "Fix commandCommand arity" - 1 file changed, 1 insertion(+), 1 deletion(-) - -Salvatore Sanfilippo in commit f0fcc36: - Merge pull request #2956 from pkulchenko/global-protection-msg-typo -Ruben Bridgewater in commit efa0840: - Fix commandCommand arity - 1 file changed, 1 insertion(+), 1 deletion(-) - -Salvatore Sanfilippo in commit f5ff91f: - Merge pull request #2998 from danielhtshih/unstable -Salvatore Sanfilippo in commit 0c1f84f: - Merge pull request #3006 from baishaofei/unstable -Salvatore Sanfilippo in commit 7b90815: - Merge pull request #3008 from badboy/fix-2911 -antirez in commit 02db338: - redis-cli: don't free historyfile, is used later. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 4c53bab: - Cluster test 12: reshard back just a few slots to speedup the test. - 1 file changed, 7 insertions(+), 7 deletions(-) - -antirez in commit 971e3c5: - Cluster: make getNodeByQuery() responsible of -CLUSTERDOWN errors. - 2 files changed, 21 insertions(+), 18 deletions(-) - -Salvatore Sanfilippo in commit 330715a: - Merge pull request #3039 from itamarhaber/patch-3 -Salvatore Sanfilippo in commit a4df156: - Merge pull request #3077 from Palethorn/ipv6-redirect-parse -antirez in commit c77b95f: - Bind both IPv4 and IPv6 or exit with an error by default. - 1 file changed, 10 insertions(+), 8 deletions(-) - -antirez in commit 0bb787d: - Quick fix to avoid false positive in replica migration test. - 1 file changed, 4 insertions(+), 1 deletion(-) - -Chris Thunes in commit d827dbf: - Ensure slots are rechecked on EXEC. - 1 file changed, 7 insertions(+), 2 deletions(-) - -Salvatore Sanfilippo in commit 91b4966: - Merge pull request #3188 from therealbill/unstable -Salvatore Sanfilippo in commit de3a673: - Merge pull request #3193 from sethbergman/patch-1 -Salvatore Sanfilippo in commit 09153b9: - Merge pull request #3152 from be-hase/fix/check_open_slots -antirez in commit b76d27c: - Added a tool for generating changelogs automatically. - 1 file changed, 26 insertions(+) - -antirez in commit b632f78: - Minor redis-cli wording change in --help output. - 1 file changed, 2 insertions(+), 1 deletion(-) - -antirez in commit 995b9ff: - Allow CONFIG GET during loading. - 2 files changed, 7 insertions(+), 1 deletion(-) - -antirez in commit 5500c51: - Command "r" flag removed from commands not accessing the key space. - 1 file changed, 35 insertions(+), 35 deletions(-) - -antirez in commit 840ac20: - DEBUG command self documentation. - 2 files changed, 49 insertions(+), 2 deletions(-) - -Salvatore Sanfilippo in commit b5352ee: - Merge pull request #3191 from oranagra/minor_fix -antirez in commit 2c22f59: - Reply with error on negative geo radius. - 1 file changed, 4 insertions(+), 1 deletion(-) - -antirez in commit 9c48f28: - Cluster regression test for #3043. - 1 file changed, 61 insertions(+) - -antirez in commit 4fdde78: - New masters with slots are now targets of migration if others are. - 1 file changed, 35 insertions(+), 1 deletion(-) - -Seth Bergman in commit da26f2b: - Fixed typo in README.md - 1 file changed, 1 insertion(+), 1 deletion(-) - -Oran Agra in commit 5e3880a: - various cleanups and minor fixes - 12 files changed, 39 insertions(+), 45 deletions(-) - -Oran Agra in commit 6ed8c28: - dict.c minor optimization - 1 file changed, 4 insertions(+), 4 deletions(-) - -Oran Agra in commit 7b52ef1: - networking.c minor optimization - 1 file changed, 5 insertions(+), 6 deletions(-) - -Oran Agra in commit f8909a2: - add DEBUG JEMALLC PURGE and JEMALLOC INFO cleanup - 2 files changed, 17 insertions(+), 2 deletions(-) - -Oran Agra in commit 7ba9022: - fix small issues in redis 3.2 - 2 files changed, 3 insertions(+), 1 deletion(-) - -Oran Agra in commit b554895: - additional fix to issue #2948 - 1 file changed, 3 insertions(+) - -therealbill in commit 14086a4: - fix for #3187 - 3 files changed, 4 insertions(+), 4 deletions(-) - -bogdanvlviv in commit 5565cc6: - fix pidfile in redis.conf - 1 file changed, 1 insertion(+), 1 deletion(-) - -Salvatore Sanfilippo in commit 4cbe044: - Merge pull request #3174 from djanowski/fix-zincrby-return-value -Damian Janowski in commit 0b4bb50: - Fix ZINCRBY return value. - 2 files changed, 8 insertions(+) - -antirez in commit dda0f37: - ZREM refactored into proper API. - 2 files changed, 48 insertions(+), 49 deletions(-) - -antirez in commit 6f926c3: - ZRANK refactored into proper API. - 2 files changed, 72 insertions(+), 50 deletions(-) - -antirez in commit b73c7af: - zsetAdd() API exposed into server.h. - 2 files changed, 16 insertions(+), 15 deletions(-) - -antirez in commit b1f181a: - ZADD refactored into a proper API. - 1 file changed, 182 insertions(+), 86 deletions(-) - -antirez in commit e0eb5f6: - redis-cli preferences and rc file support. - 1 file changed, 85 insertions(+), 17 deletions(-) - -antirez in commit 70b3314: - redis-cli help.h updated. - 1 file changed, 18 insertions(+), 8 deletions(-) - -antirez in commit d1ddf7e: - redis-cli hints. - 3 files changed, 62 insertions(+), 1 deletion(-) - -antirez in commit d6e2cc7: - Linenoise updated again (hints support). - 4 files changed, 248 insertions(+), 8 deletions(-) - -antirez in commit 8a98b8d: - Linenoise updated. - 1 file changed, 45 insertions(+), 15 deletions(-) - -antirez in commit b9feef9: - ae.c: Fix delay until next timer event. - 1 file changed, 12 insertions(+), 9 deletions(-) - -antirez in commit ace780c: - ae.c: comment to explain why we have a useless maxId check. - 1 file changed, 5 insertions(+), 1 deletion(-) - -antirez in commit 67b70a1: - Fix ae.c to avoid timers infinite loop. - 4 files changed, 28 insertions(+), 30 deletions(-) - -Ryosuke Hasebe in commit cad9ea5: - fix variable - 1 file changed, 5 insertions(+), 5 deletions(-) - -Ryosuke Hasebe in commit d5aa7e2: - fix check_open_slots - 1 file changed, 2 insertions(+), 1 deletion(-) - -antirez in commit 28c291c: - BITFIELD: overflow wrap behavior fuzz tester. - 1 file changed, 51 insertions(+) - -antirez in commit ae7317e: - BITFIELD basic unit tests. - 1 file changed, 85 insertions(+), 1 deletion(-) - -antirez in commit fc84378: - BITFIELD: Farest bit set is offset+bits-1. Off by one error fixed. - 1 file changed, 4 insertions(+), 2 deletions(-) - -antirez in commit 9a00da0: - BITFIELD: overflow fuzzy testing. - 2 files changed, 53 insertions(+) - -antirez in commit 27fc01a: - Fix typo in bitops.tcl comment. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit fe64960: - More BITFIELD fixes. Overflow conditional simplified. - 1 file changed, 8 insertions(+), 9 deletions(-) - -Salvatore Sanfilippo in commit 235f553: - Merge pull request #3118 from sunheehnus/bitfield-fix-minor-bug -Sun He in commit 93cc8ba: - bitops/bitfield: fix length, overflow condition and *sign - 1 file changed, 8 insertions(+), 5 deletions(-) - -antirez in commit e85d6f2: - Fix INFO commandstats reporting when argv is rewritten. - 2 files changed, 3 insertions(+), 3 deletions(-) - -antirez in commit 32289d5: - BITFIELD: refactoring & fix of retval on FAIL. - 1 file changed, 24 insertions(+), 8 deletions(-) - -antirez in commit 11745e0: - BITFIELD: Fix # form parsing. - 1 file changed, 4 insertions(+), 6 deletions(-) - -antirez in commit 2800d09: - BITFIELD: Support # offsets form. - 1 file changed, 23 insertions(+), 6 deletions(-) - -antirez in commit 70af626: - BITFIELD command initial implementation. - 3 files changed, 476 insertions(+), 32 deletions(-) - -Salvatore Sanfilippo in commit 438ae49: - Merge pull request #3101 from itamarhaber/geohumanized -Itamar Haber in commit b5149f0: - Eliminates engineers near the equator & prime meridian - 1 file changed, 4 insertions(+), 4 deletions(-) - -Itamar Haber in commit 41030ae: - Fixes a typo in a comment - 1 file changed, 1 insertion(+), 1 deletion(-) - -Itamar Haber in commit 4e9c302: - Adjusts accuracy for GEODIST - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit cf42c48: - addReplyHumanLongDouble() API added. - 2 files changed, 10 insertions(+) - -antirez in commit f4befcc: - GEOADD STORE/STOREDIST tests. - 1 file changed, 54 insertions(+) - -antirez in commit bb75ecd: - New options for GEORADIUS: STORE and STOREDIST. - 4 files changed, 116 insertions(+), 49 deletions(-) - -antirez in commit 15f37eb: - Cluster: resharding test provides more state when failing. - 1 file changed, 8 insertions(+), 3 deletions(-) - -antirez in commit b0ec22f: - Include full paths on RDB/AOF files errors. - 2 files changed, 32 insertions(+), 4 deletions(-) - -antirez in commit fcd7df5: - Remove Lua state reference from buffers in lua_cmsgpack. - 1 file changed, 25 insertions(+), 28 deletions(-) - -Salvatore Sanfilippo in commit 66dbc5e: - Merge pull request #3072 from yoav-steinberg/cmsgpack_fix -David Cavar in commit c30ffaa: - Reverse redirect address parse - 1 file changed, 1 insertion(+), 1 deletion(-) - -yoav@monfort.co.il in commit fdbefc9: - cmsgpack: pass correct osize values to lua allocator, update correct buf free space in cmsgpack - 1 file changed, 4 insertions(+), 4 deletions(-) - -Salvatore Sanfilippo in commit aa953b6: - Merge pull request #3059 from itamarhaber/keyspacenotif-lrem -Itamar Haber in commit 31a70a8: - Fixes a typo - 1 file changed, 2 insertions(+), 2 deletions(-) - -Itamar Haber in commit 68e779f: - Adds keyspace notifications for lrem - 1 file changed, 6 insertions(+), 1 deletion(-) - -antirez in commit 5b7b235: - Fix to Cluster test to support @busport format. - 1 file changed, 4 insertions(+), 2 deletions(-) - -antirez in commit cbcffed: - Cluster: redis-trib: support @busport format in ClusterNode. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit b841f3a: - Cluster: store busport with different separator in CLUSTER NODES. - 1 file changed, 13 insertions(+), 9 deletions(-) - -antirez in commit 92b9de2: - Cluster announce: WIP, allow building again. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 5ac5e3e: - Cluster announce ip/port/bus-port documented in redis.conf. - 1 file changed, 33 insertions(+) - -antirez in commit e27b9b1: - Merge branch 'cluster-docker' into unstable -antirez in commit cdbe8a6: - Typo ASII -> ASCII fixed in comment. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit c285862: - Cluster: include node IDs in SLOTS output. - 1 file changed, 6 insertions(+), 2 deletions(-) - -antirez in commit d0a8512: - Cluster anounce-ip/port WIP. - 1 file changed, 1 insertion(+) - -antirez in commit 4abf486: - Cluster announce port: set port/bport for myself at startup. - 1 file changed, 7 insertions(+), 2 deletions(-) - -antirez in commit 1c03837: - Cluster: persist bus port in nodes.conf. - 1 file changed, 8 insertions(+), 2 deletions(-) - -antirez in commit dc98907: - Cluster announce ip: take myself->ip always in sync. - 1 file changed, 30 insertions(+), 6 deletions(-) - -antirez in commit 11436b1: - Cluster announce ip / port initial implementation. - 5 files changed, 127 insertions(+), 42 deletions(-) - -antirez in commit b093930: - Cluster announce ip / port configuration handling. - 3 files changed, 12 insertions(+) - -antirez in commit a455e4b: - Cluster: add announce ip field in messages header. - 1 file changed, 3 insertions(+), 2 deletions(-) - -Itamar Haber in commit 9e46bf2: - Fixes a typo - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 8870a7e: - 03_test_release.sh: proper cleanup before testing. - 1 file changed, 2 insertions(+) - -antirez in commit 5bbb09e: - Cluster: check packets length before accessing far fields. - 1 file changed, 10 insertions(+), 5 deletions(-) - -antirez in commit 751b566: - Sentinel: improve handling of known Sentinel instances. - 1 file changed, 34 insertions(+), 19 deletions(-) - -antirez in commit 5bc7e01: - Use a smoother running average for avg_ttl in INFO. - 1 file changed, 9 insertions(+), 5 deletions(-) - -antirez in commit fe44a7c: - Cluster: mismatch sender ID log put back at DEBUG level. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit d6c5922: - Cluster: fix missing ntohs() call to access gossip section port. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 592419b: - Better address udpate strategy when processing gossip sections. - 1 file changed, 15 insertions(+), 6 deletions(-) - -antirez in commit 22892ce: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -antirez in commit bc15586: - Fix memory leak in masterauth config option loading. - 1 file changed, 1 insertion(+) - -Salvatore Sanfilippo in commit 7837c48: - Merge pull request #3023 from itamarhaber/patch-2 -Itamar Haber in commit 57f8230: - Removes an extra space in protected mode message - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 83b862a: - Minor MIGRATE refactoring. - 1 file changed, 12 insertions(+), 5 deletions(-) - -antirez in commit f5a1e60: - More variadic MIGRATE fixes. - 1 file changed, 9 insertions(+), 8 deletions(-) - -antirez in commit 00d3a40: - Various fixes to MIGRATE with multiple keys. - 1 file changed, 45 insertions(+), 12 deletions(-) - -antirez in commit cfc879b: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -antirez in commit b01b32b: - Test: Handle LOADING in restart_instance. - 1 file changed, 12 insertions(+) - -Salvatore Sanfilippo in commit 8637384: - Merge pull request #2726 from seppo0010/patch-2 -antirez in commit 5432fc8: - Detect and show crashes on Sentinel/Cluster tests. - 1 file changed, 18 insertions(+) - -antirez in commit fc3ca8f: - Cluster: fix setting nodes slaveof pointer to NULL on node release. - 1 file changed, 3 insertions(+), 14 deletions(-) - -antirez in commit a411d55: - Cluster: clarify node->slave may be NULL. - 1 file changed, 4 insertions(+), 1 deletion(-) - -Jan-Erik Rediger in commit 15dacfe: - Fix nanosecond conversion - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit f984cef: - Cluster: fix rebalancing to always empty nodes. - 1 file changed, 24 insertions(+), 3 deletions(-) - -root in commit 28e80bf: - fix linux compile bug - 2 files changed, 9 insertions(+), 9 deletions(-) - -antirez in commit 152e9f6: - Cluster: redis-trib move_to_slot: don't send SETSLOT to slaves. - 1 file changed, 1 insertion(+) - -Daniel Shih in commit e6d9705: - Fix a possible race condition of sdown detection if the connection to master/slave/sentinel decames disconnected just after the last PONG and before the next PING. - 1 file changed, 2 insertions(+) - -antirez in commit c6e5088: - Cluster: fix redis-trib reference of variable in warning. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 02c40c9: - CLUSTER BUMPEPOCH initial implementation fixed. - 2 files changed, 25 insertions(+), 14 deletions(-) - -antirez in commit e4eb6c7: - Cluster: implement redis-trib fix when slot is open without owners. - 1 file changed, 39 insertions(+), 9 deletions(-) - -antirez in commit 04ae459: - Cluster: implement redis-trib fix for uncovered slots. - 1 file changed, 32 insertions(+), 9 deletions(-) - -antirez in commit b58796f: - Cluster: CLUSTER BUMPEPOCH introduced to help redis-trib fix. - 1 file changed, 7 insertions(+) - -antirez in commit 524be1e: - Cluster: don't allow CLUSTER SETSLOT with slaves. - 1 file changed, 5 insertions(+) - -antirez in commit f43c794: - Scripting: handle trailing comments. - 2 files changed, 5 insertions(+), 1 deletion(-) - -antirez in commit e15e518: - Allow MIGRATE to always be called on local keys for open slots. - 1 file changed, 7 insertions(+), 6 deletions(-) - -antirez in commit 36704d6: - Fix typos & grammar in clusterBumpConfigEpochWithoutConsensus() comment. - 1 file changed, 4 insertions(+), 4 deletions(-) - -antirez in commit 7c1a5ff: - Lua debugger: support direct calls to SCRIPT DEBUG in redis-cli. - 1 file changed, 21 insertions(+) - -antirez in commit a75aa4b: - Lua debugger: fix crash printing nested or deep objects. - 1 file changed, 14 insertions(+), 4 deletions(-) - -antirez in commit 1e7a8f8: - Another typo in protected mode error message. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 08c7bba: - Fix protected mode error message typo. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit edd4d55: - New security feature: Redis protected mode. - 5 files changed, 75 insertions(+), 3 deletions(-) - -antirez in commit 00d637f: - Cluster: don't send -ASK to MIGRATE. - 1 file changed, 5 insertions(+), 3 deletions(-) - -antirez in commit 8b3aa73: - Cluster test: do leaks detection with OSX leaks utility. - 1 file changed, 32 insertions(+) - -antirez in commit 190babe: - redis-trib: Remove duplicated key in hash initialization. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit b1f84d4: - Cluster/Sentinel test: report ability to run via valgrind. - 1 file changed, 1 insertion(+) - -Salvatore Sanfilippo in commit 075ea16: - Merge pull request #2954 from pkulchenko/debug-table-pretty-printing -Salvatore Sanfilippo in commit f054b4a: - Merge pull request #2957 from pkulchenko/debug-userdata-pretty-printing -antirez in commit 80b7037: - Cluster: rebalance now supports --threshold option. - 1 file changed, 24 insertions(+), 2 deletions(-) - -antirez in commit 628af70: - Cluster: redis-trib reshard / rebalance --pipeline support. - 1 file changed, 21 insertions(+), 5 deletions(-) - -antirez in commit 77f849b: - Cluster: verify slaves consistency after resharding. - 1 file changed, 23 insertions(+) - -antirez in commit 9b4dd92: - Cluster: resharding test now checks AOF consistency. - 1 file changed, 42 insertions(+) - -antirez in commit bb21537: - Fix a race that may lead to the active (slave) client to be freed. - 1 file changed, 6 insertions(+) - -antirez in commit 218e522: - Fix processCommand() comment about return value. - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit a1c9c05: - Hopefully better memory test on crash. - 3 files changed, 133 insertions(+), 86 deletions(-) - -antirez in commit b9aeb98: - Suppress harmless warnings. - 3 files changed, 8 insertions(+), 4 deletions(-) - -antirez in commit ac8f4a6: - memtest.c now can be called as API in non interactive mode. - 2 files changed, 73 insertions(+), 43 deletions(-) - -antirez in commit 30f057d: - Crash report format improvements. - 1 file changed, 35 insertions(+), 24 deletions(-) - -Paul Kulchenko in commit b754c8e: - Update pretty printing in debugging to generate valid Lua code for userdata-like types. - 1 file changed, 2 insertions(+), 2 deletions(-) - -Paul Kulchenko in commit 3969e9d: - Update pretty printing in debugging to generate valid Lua code for tables. - 1 file changed, 2 insertions(+), 1 deletion(-) - -Paul Kulchenko in commit 2f3f3fd: - Update global protection error message to fix a typo. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 6db8e85: - Log address causing SIGSEGV. - 1 file changed, 4 insertions(+) - -antirez in commit 8f8c399: - Cluster: allows abbreviated node IDs with rebalance --weight option. - 1 file changed, 19 insertions(+), 3 deletions(-) - -antirez in commit 9df1ae8: - Cluster: rebalancing option --simulate, and a fix. - 1 file changed, 12 insertions(+), 8 deletions(-) - -antirez in commit cba1c29: - Cluster: redis-trib rebalance initial implementation. - 1 file changed, 154 insertions(+), 17 deletions(-) - -antirez in commit 3782902: - Initial implementation of redis-trib info subcommand. - 1 file changed, 22 insertions(+) - -Salvatore Sanfilippo in commit 9f63e75: - Merge pull request #2943 from sunheehnus/issue2855 -Sun He in commit 3a47c8c: - lua_struct.c/getnum: throw error if overflow happen - 1 file changed, 6 insertions(+), 4 deletions(-) - -antirez in commit f0b168e: - Cluster: redis-trib: use variadic MIGRATE. - 1 file changed, 13 insertions(+), 15 deletions(-) - -antirez in commit 4e252e4: - MIGRATE: Fix key extraction for new form. - 3 files changed, 29 insertions(+), 1 deletion(-) - -antirez in commit 82fd74a: - MIGRATE: test more corner cases. - 1 file changed, 37 insertions(+) - -antirez in commit ac0a731: - MIGRATE: Fix new argument rewriting refcount handling. - 1 file changed, 2 insertions(+), 3 deletions(-) - -antirez in commit d85fc1e: - MIGRATE: fix replies processing and argument rewriting. - 2 files changed, 40 insertions(+), 15 deletions(-) - -antirez in commit 29d680e: - Test: pipelined MIGRATE tests added. - 1 file changed, 54 insertions(+) - -antirez in commit 9ebf7a6: - Pipelined multiple keys MIGRATE. - 2 files changed, 115 insertions(+), 63 deletions(-) - -antirez in commit e7945cf: - Cluster: redis-trib migrate default timeout set to 60 sec. - 1 file changed, 1 insertion(+), 1 deletion(-) - -daniele in commit 3d254e0: - redis-trib.rb: --timeout XXXXX option added to fix and reshard commands. Defaults to 15000 milliseconds - 1 file changed, 10 insertions(+), 4 deletions(-) - -antirez in commit adc2fe6: - Cluster: replica migration with delay. - 2 files changed, 39 insertions(+), 17 deletions(-) - -antirez in commit 41db54a: - Cluster: more reliable migration tests. - 1 file changed, 24 insertions(+), 43 deletions(-) - -antirez in commit b55affb: - Cluster: more reliable replicas migration test. - 1 file changed, 75 insertions(+) - -antirez in commit 4159055: - Remove debugging message left there for error. - 1 file changed, 1 deletion(-) - -antirez in commit 69897f5: - unlinkClient(): clear flags according to ops performed. - 1 file changed, 2 insertions(+) - -antirez in commit e0f22df: - Fix replicas migration by adding a new flag. - 2 files changed, 31 insertions(+), 20 deletions(-) - -antirez in commit f147225: - Fix typo UNCOMMENT -> COMMENT in example redis.conf. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit acc2336: - Centralize slave replication handshake aborting. - 1 file changed, 23 insertions(+), 22 deletions(-) - -antirez in commit fceaa46: - Test HINCRBYFLOAT rounding only in x86_64 and when valgrind is not in use. - 1 file changed, 16 insertions(+), 5 deletions(-) - -antirez in commit 96628cc: - fix sprintf and snprintf format string - 2 files changed, 3 insertions(+), 3 deletions(-) - -antirez in commit e6a5117: - Fix typo in prepareClientToWrite() comment. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit c2c68c5: - Merge branch 'unstable' of github.com:/antirez/redis into unstable -antirez in commit da82723: - Handle wait3() errors. - 1 file changed, 7 insertions(+), 1 deletion(-) - -Salvatore Sanfilippo in commit 8164418: - Merge pull request #2899 from itamarhaber/patch-1 -Itamar Haber in commit 36801f7: - Revert Lua's `redis.LOG_` to original - 1 file changed, 4 insertions(+), 4 deletions(-) - -antirez in commit a0d41e5: - Redis Cluster: hint about validity factor when slave can't failover. - 1 file changed, 3 insertions(+), 1 deletion(-) - -antirez in commit 4d625bb: - Added Tcl program to show commits graphicaly. - 2 files changed, 112 insertions(+) - -antirez in commit 4b0b28b: - Lua debugger: infinite loop detection. - 1 file changed, 35 insertions(+), 7 deletions(-) - -antirez in commit 1f35f2d: - Lua debugger: fix trace command infinite loop. - 1 file changed, 6 insertions(+), 5 deletions(-) - -antirez in commit 6604e04: - Lua debugger: redis-cli: allow restart after end of session. - 1 file changed, 3 insertions(+), 1 deletion(-) - -antirez in commit 58573f1: - Lua debugger: redis-cli can restart Lua debugging sessions. - 1 file changed, 74 insertions(+), 44 deletions(-) - -antirez in commit 0cc1917: - Lua debugger: maxlen command implemented. - 2 files changed, 55 insertions(+), 14 deletions(-) - -antirez in commit c560c64: - Lua debugger: trace command implemented. - 1 file changed, 24 insertions(+) - -antirez in commit 22959e0: - Lua debugger: redis-cli: show compile errors in LDB mode. - 1 file changed, 12 insertions(+), 3 deletions(-) - -antirez in commit 70a5169: - Lua debugger: print without args show all local vars. - 1 file changed, 34 insertions(+), 5 deletions(-) - -antirez in commit fb53459: - Lua debugger: default behavior of "list" command changed. - 1 file changed, 14 insertions(+), 4 deletions(-) - -antirez in commit 6de2306: - Lua debugger: redis-cli error when --ldb is without --eval. - 1 file changed, 7 insertions(+) - -antirez in commit e57cccd: - Lua debugger: use sds_malloc() to allocate eval cli array. - 5 files changed, 35 insertions(+), 1 deletion(-) - -antirez in commit 34aadf7: - Lua debugging: fix error message for SCRIPT DEBUG. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 3d24cd6: - Lua debugger: reply +OK to SCRIPT DEBUG no. - 1 file changed, 1 insertion(+) - -antirez in commit 333547d: - Lua debugger: call wait3() if there are pending forked debugging sessions. - 3 files changed, 10 insertions(+), 1 deletion(-) - -antirez in commit 0163643: - Lua debugger: abort implemented. - 1 file changed, 5 insertions(+) - -antirez in commit d99ce09: - Lua debugger: ldbSendLogs() memory leak fixed. - 1 file changed, 1 insertion(+) - -antirez in commit 87672ad: - Lua debugger: better support for synchronous mode. - 2 files changed, 22 insertions(+), 3 deletions(-) - -antirez in commit 7be9170: - Lua debugger: handle forked sessions children during shutdown. - 3 files changed, 51 insertions(+), 5 deletions(-) - -antirez in commit 3ab0b4d: - Lua debugger: inform user changes are rolled back. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 56d9bb8: - Lua debugger: fix help typo, beark -> break. - 1 file changed, 4 insertions(+), 4 deletions(-) - -antirez in commit e386cd8: - Lua debugger: clear end of session protocol. - 2 files changed, 23 insertions(+), 9 deletions(-) - -antirez in commit 7492237: - Lua debugger: redis.debug() implemented. - 2 files changed, 44 insertions(+), 15 deletions(-) - -antirez in commit 23a4d70: - Lua debugger: redis-cli, mark end of debugging session. - 1 file changed, 1 insertion(+) - -antirez in commit cd112db: - Lua debugger: removing breakpoints now works. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit a076e42: - Lua debugger: redis command implemented. - 1 file changed, 26 insertions(+), 1 deletion(-) - -antirez in commit e6eb6ea: - Lua debugger: try to eval as expression first. - 1 file changed, 13 insertions(+), 4 deletions(-) - -antirez in commit 1f8fdaf: - Lua debugger: much better Lua values pretty printer. - 2 files changed, 70 insertions(+), 17 deletions(-) - -antirez in commit f480580: - Lua debugger: print now handles ARGV and KEYS. - 1 file changed, 10 insertions(+), 2 deletions(-) - -antirez in commit 36392dd: - Lua debugger: added comment about helper functions. - 1 file changed, 3 insertions(+) - -antirez in commit 3a04cb0: - Lua debugger: redis.breakpoint() implemented. - 1 file changed, 27 insertions(+), 2 deletions(-) - -antirez in commit cf4700b: - Lua debugger: output improvements, eval command. - 2 files changed, 186 insertions(+), 54 deletions(-) - -antirez in commit 1f8d614: - Lua debugger: breakpoints. - 2 files changed, 148 insertions(+), 12 deletions(-) - -antirez in commit 5c4f492: - Lua debugger: ability to show local vars content. - 1 file changed, 85 insertions(+), 7 deletions(-) - -antirez in commit 5417217: - Lua debugger: log Redis commands. List command. - 2 files changed, 82 insertions(+), 7 deletions(-) - -antirez in commit d3d1fa9: - Lua debugger: initial REPL. - 2 files changed, 164 insertions(+), 20 deletions(-) - -antirez in commit c494db8: - Lua debugger: foundations implemented. - 5 files changed, 206 insertions(+), 21 deletions(-) - -antirez in commit 7cfdccd: - Remove "s" flag for MIGRATE in command table. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit f3dd472: - Update redis-cli help and the script to generate it. - 2 files changed, 183 insertions(+), 9 deletions(-) - -antirez in commit 0cb66fa: - Fix MIGRATE entry in command table. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 8a0258a: - AOF: rewriting child killed by SIGUSR1 is not an error. - 1 file changed, 4 insertions(+), 2 deletions(-) - -antirez in commit 54f5ecf: - call() deserves a good top-comment. - 1 file changed, 38 insertions(+), 2 deletions(-) - -Salvatore Sanfilippo in commit c950fac: - Merge pull request #2848 from badboy/removed-printf -antirez in commit 87a12a6: - Best effort flush of slave buffers before SHUTDOWN. - 2 files changed, 14 insertions(+), 1 deletion(-) - -antirez in commit b719eed: - Use clientHasPendingReplies() in flushSlavesOutputBuffers() - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 1b5d24e: - Scripting: fix redis.call() error reporting. - 1 file changed, 18 insertions(+), 5 deletions(-) - -antirez in commit 111d24f: - Fix error reply in subscribed Pub/Sub mode. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit cd8f19e: - Initialize all Lua scripting related things into scripting.c - 3 files changed, 33 insertions(+), 12 deletions(-) - -antirez in commit 9aa1f94: - scripting.c source code better organized into sections. - 2 files changed, 182 insertions(+), 155 deletions(-) - -antirez in commit 71aa9b7: - Fix HINCRBYFLOAT to work with long doubles. - 3 files changed, 8 insertions(+), 8 deletions(-) - -antirez in commit f625570: - Add regression test for HINCRBYFLOAT formatting change. - 1 file changed, 7 insertions(+) - -Salvatore Sanfilippo in commit de776a4: - Merge pull request #2850 from Joe8Bit/fix_typo-in-readme -antirez in commit dfe90ac: - README new internals section improved a bit more. - 1 file changed, 9 insertions(+), 8 deletions(-) - -Joe Pettersson in commit 8bc8f6f: - Fix typo in READme sever/server - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 462026c: - README: remove garbage at end of line. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 73510f4: - README operations -> commands, is more clear now #2. - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit b9fb240: - README operations -> commands, is more clear now. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 8badf16: - A few README typos fixed #2. - 1 file changed, 2 insertions(+), 2 deletions(-) - -antirez in commit d80d051: - A few README typos fixed. - 1 file changed, 6 insertions(+), 6 deletions(-) - -antirez in commit 15d57c3: - README now has info about Redis codebase layout. - 1 file changed, 257 insertions(+), 2 deletions(-) - -Jan-Erik Rediger in commit 35afefc: - Remove printf - 1 file changed, 1 deletion(-) - -antirez in commit 30b3246: - Test: improve PFCOUNT with multiple keys testing. - 1 file changed, 17 insertions(+), 2 deletions(-) - -antirez in commit 77362b9: - Dependencies updated. - 1 file changed, 150 insertions(+), 106 deletions(-) - -antirez in commit 5b63ae3: - Scripting: commands replication tests. - 1 file changed, 107 insertions(+), 1 deletion(-) - -antirez in commit f26072e: - More reliable DEBUG loadaof. - 1 file changed, 1 insertion(+) - -antirez in commit 073a42b: - Scripting: execute tests with command replication as well. - 1 file changed, 88 insertions(+), 79 deletions(-) - -antirez in commit ff6d296: - Scripting: ability to turn on Lua commands style replication globally. - 4 files changed, 8 insertions(+), 1 deletion(-) - -antirez in commit eda06b5: - Scripting: test Redis provided Lua functions error reporting. - 1 file changed, 9 insertions(+) - -antirez in commit ebaa922: - Scripting: fix error reporting of many Redis provided functions. - 1 file changed, 14 insertions(+), 14 deletions(-) - -antirez in commit 2dabf82: - Fix call() FORCE_REPL/AOF flags setting. - 2 files changed, 16 insertions(+), 15 deletions(-) - -antirez in commit 514a234: - Lua script selective replication fixes. - 3 files changed, 32 insertions(+), 20 deletions(-) - -antirez in commit a3e8de0: - Lua script selective replication WIP. - 2 files changed, 58 insertions(+), 3 deletions(-) - -antirez in commit fc38235: - Scripting: single commands replication mode implemented. - 3 files changed, 72 insertions(+), 8 deletions(-) - -antirez in commit cdda674: - call(): selective ability to prevent propagation on AOF / slaves. - 2 files changed, 35 insertions(+), 7 deletions(-) - -antirez in commit 9dd3d2e: - call(): don't inherit CLIENT_PREVENT_PROP + minor refactoring. - 1 file changed, 6 insertions(+), 3 deletions(-) - -antirez in commit d37ef78: - CONTRIBUTING updated. - 1 file changed, 7 insertions(+), 5 deletions(-) - -antirez in commit 86f0a2e: - CLIENT REPLY command implemented: ON, OFF and SKIP modes. - 2 files changed, 31 insertions(+), 1 deletion(-) - -Salvatore Sanfilippo in commit bdcb145: - Merge pull request #2810 from dwlt/add-copy-replace-parameters-to-redis-trib-import -antirez in commit 6ed12bd: - Redis.conf example: make clear user must pass its path as argument. - 1 file changed, 6 insertions(+), 1 deletion(-) - -antirez in commit 5f0fef5: - Regression test for issue #2813. - 1 file changed, 53 insertions(+) - -antirez in commit ed62288: - PR 2813 fix ported to unstable. - 2 files changed, 25 insertions(+), 20 deletions(-) - -David Thomson in commit 8a85ad2: - Add back blank line - 1 file changed, 1 insertion(+) - -David Thomson in commit 03d0de7: - Update import command to optionally use copy and replace parameters - 1 file changed, 7 insertions(+), 3 deletions(-) - -antirez in commit 35a0c77: - DEBUG RESTART/CRASH-AND-RECOVER [delay] implemented. - 2 files changed, 17 insertions(+) - -antirez in commit 7342746: - Server: restartServer() API. - 2 files changed, 73 insertions(+), 4 deletions(-) - -antirez in commit c372a59: - Cluster: redis-trib fix, coverage for migrating=1 case. - 1 file changed, 12 insertions(+), 2 deletions(-) - -antirez in commit 6ddcba6: - Test: basic lazyfree unit test. - 2 files changed, 40 insertions(+) - -antirez in commit 363c0f6: - Test: fix attach_to_replication_stream to handle newlines. - 1 file changed, 5 insertions(+), 2 deletions(-) - -antirez in commit f29e384: - Fix extractLongLatOrReply() sanity check conditionals. - 1 file changed, 9 insertions(+), 9 deletions(-) - -antirez in commit 3f38b51: - Jemalloc configure script fixed to work nested. - 1 file changed, 18 deletions(-) - -antirez in commit a9951b1: - Jemalloc updated to 4.0.3. - 140 files changed, 25333 insertions(+), 15474 deletions(-) - -antirez in commit e3ded02: - Added a README into deps on dependencies and how to upgrade. - 1 file changed, 66 insertions(+) - -antirez in commit ceaf58d: - Regression test for GEORADIUS COUNT arity check. - 1 file changed, 5 insertions(+) - -antirez in commit 319d180: - Fix GEORADIUS COUNT option arity checks. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 19f6ab5: - Lazyfree options documented in the example redis.conf. - 1 file changed, 52 insertions(+), 1 deletion(-) - -antirez in commit 880c606: - Lazyfree options implemented in the configuration. - 1 file changed, 36 insertions(+) - -antirez in commit 252cfa0: - Lazyfree: cond vars to enabled/disable it based on DEL context. - 4 files changed, 51 insertions(+), 35 deletions(-) - -antirez in commit 5359696: - Fixed a bug in the emptyDb() new implementation. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit ecdbc33: - FLUSHDB and FLUSHALL ASYNC option implemented. - 2 files changed, 39 insertions(+), 8 deletions(-) - -antirez in commit 1f26a94: - Lazyfree: pending objects count in INFO output. - 3 files changed, 10 insertions(+), 2 deletions(-) - -antirez in commit c69c6c8: - Lazyfree: ability to free whole DBs in background. - 7 files changed, 106 insertions(+), 14 deletions(-) - -antirez in commit b08c36c: - Lazyfree: keep count of objects to free. - 2 files changed, 11 insertions(+), 3 deletions(-) - -antirez in commit c7b46a4: - zmalloc.c converted to use atomicvar.h. - 1 file changed, 5 insertions(+), 33 deletions(-) - -antirez in commit 7e5d690: - Atomic vars implemented in a more general way. - 1 file changed, 94 insertions(+) - -antirez in commit 7af4eeb: - Lazyfree: incremental removed, only threaded survived. - 3 files changed, 24 insertions(+), 191 deletions(-) - -antirez in commit 9253d85: - Threaded lazyfree WIP #1. - 5 files changed, 36 insertions(+), 8 deletions(-) - -antirez in commit 4d50d69: - bio.c: new API bioWaitStepOfType(). - 2 files changed, 32 insertions(+), 5 deletions(-) - -antirez in commit 5b850d7: - Test: stack_logging var should be initialized to 0. - 1 file changed, 1 insertion(+) - -antirez in commit 1dab60d: - Hash new implementation memleaks fixed. - 2 files changed, 52 insertions(+), 12 deletions(-) - -antirez in commit 97ba4e3: - Lazyfree: Hash converted to use plain SDS WIP 5. - 3 files changed, 30 insertions(+), 34 deletions(-) - -antirez in commit 36be34b: - Test: support for stack logging for OSX malloc/leaks. - 2 files changed, 7 insertions(+) - -antirez in commit 974514b: - Lazyfree: Hash converted to use plain SDS WIP 4. - 9 files changed, 76 insertions(+), 70 deletions(-) - -antirez in commit 4a18352: - Lazyfree: Hash converted to use plain SDS WIP 3. - 2 files changed, 20 insertions(+), 29 deletions(-) - -antirez in commit 777396a: - Lazyfree: Hash converted to use plain SDS WIP 2. - 4 files changed, 64 insertions(+), 37 deletions(-) - -antirez in commit 1c24755: - Lazyfree: Hash converted to use plain SDS WIP 1. - 5 files changed, 188 insertions(+), 163 deletions(-) - -antirez in commit afc4b92: - DEBUG DIGEST Set type memory leak fixed. - 1 file changed, 1 insertion(+) - -antirez in commit 34e489c: - SORT memory leak fixed. - 1 file changed, 3 insertions(+), 3 deletions(-) - -antirez in commit a7c5be1: - Lazyfree: Sorted sets convereted to plain SDS. (several commits squashed) - 10 files changed, 306 insertions(+), 266 deletions(-) - -antirez in commit 86d48ef: - Lazyfree: Convert Sets to use plains SDS (several commits squashed). - 9 files changed, 183 insertions(+), 163 deletions(-) - -antirez in commit 4ff3c17: - Lazyfree: client output buffers no longer use Redis Objects. - 2 files changed, 72 insertions(+), 97 deletions(-) - -antirez in commit 0c05436: - Lazyfree: a first implementation of non blocking DEL. - 8 files changed, 341 insertions(+), 53 deletions(-) - -antirez in commit 712ea72: - Call writeToClient() directly instead of the write handler. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 01c08b5: - Fix processEventsWhileBlocked() to handle PENDING_WRITE clients. - 2 files changed, 7 insertions(+), 3 deletions(-) - -antirez in commit 1e71538: - Refactoring: unlinkClient() added to lower freeClient() complexity. - 3 files changed, 48 insertions(+), 51 deletions(-) - -antirez in commit fdb3be9: - Refactoring: new function to test if client has pending output. - 3 files changed, 17 insertions(+), 8 deletions(-) - -antirez in commit 825f65d: - Reverse list of clients with pending writes. - 1 file changed, 1 insertion(+), 1 deletion(-) - -antirez in commit 063ecbd: - writeToClient(): don't remove write handler if not needed. - 1 file changed, 4 insertions(+), 4 deletions(-) - -antirez in commit b741a90: - handleClientsWithPendingWrites(): detect dead clients. - 1 file changed, 17 insertions(+), 7 deletions(-) - -antirez in commit 481a0db: - Move handleClientsWithPendingWrites() in networking.c. - 3 files changed, 29 insertions(+), 28 deletions(-) - -antirez in commit 1c7d87d: - Avoid installing the client write handler when possible. - 4 files changed, 56 insertions(+), 7 deletions(-) - -antirez in commit d1b6a17: - redis-cli pipe mode: don't stay in the write loop forever. - 1 file changed, 6 insertions(+), 1 deletion(-) - -antirez in commit 622366a: - Mark version of unstable branch in an unique way. - 1 file changed, 1 insertion(+), 1 deletion(-) - -Sebastian Waisbrot in commit 97a2248: - Fix race condition in unit/introspection - 1 file changed, 4 insertions(+), 3 deletions(-) - -Adam Baldwin in commit 45fa113: - Removed dofile() from Lua - 1 file changed, 2 insertions(+) - -Migrating from 3.2 to 4.0 -========================= - -Redis 3.2 is mostly a strict subset of 4.0, you should not have any problem -upgrading your application from 3.2 to 4.0. However this is a list of small -non-backward compatible changes introduced in the 4.0 release: - -* The Redis Cluster bus protocol of 4.0 is no longer compatible with Redis 3.2. - This was needed in order to provide Docker / NAT compatibility to Redis - Cluster. SO in order to upgrade a Redis Cluster to 4.0, a mass restart of - all the instances is needed. - -* Redis Cluster CLUSTER NODES output is now slightly different. Nodes - addresses are now in the form host:port@bus-port instead of host:port. - Clients should use CLUSTER SLOTS in order to fetch the cluster configuration - however if they are still using CLUSTER NODES, they should be modified in - order to ignore the @bus-port part. - -* Writable slaves do not propagate writes to their sub-slaves, so writes to - writable slaves remain just local. - -* The RDB format changed. Redis 4.0 is still able to read 3.2 (and all the - past versions) files, but not the other way around. - -* Certain log formats and sentences are different in Redis 4.0. - -* Certain INFO fields, especially related to replication, are now different. - -* GEODIST, GEOPOS and GEOHASH return values changed for non existing keys: - Previously the return value was inconsistent between non existing key - and non existing elements. Now the return is always like if the key - contains no elements if the key does not exist. So, for instance, - "GEOHASH non_existing_key A B C" now returns an array of 3 NULL values - as it should. In general this change should not break compatibility with - the past since in the past two different forms were provided by the server - and the new behavior is one of the two. - -* The SLOWLOG command entires contain additional two fields: the client address - and name. This is documented in the SLOWLOG command online documentation. - --------------------------------------------------------------------------------- - -Credits: For each release, a list of changes with the relative author is -provided. Where not specified the implementation and design is done by -Salvatore Sanfilippo. Thanks to Redis Labs for making all this possible. -Also many thanks to all the other contributors and the amazing community -we have. - -Commit messages may contain additional credits. - -Enjoy, -Salvatore diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/doc/stylesheet.xsl b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/doc/stylesheet.xsl deleted file mode 100644 index 4e334a8..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/doc/stylesheet.xsl +++ /dev/null @@ -1,7 +0,0 @@ - - ansi - - - "" - - diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/atomic.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/atomic.h deleted file mode 100644 index a9aad35..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/atomic.h +++ /dev/null @@ -1,651 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#define atomic_read_uint64(p) atomic_add_uint64(p, 0) -#define atomic_read_uint32(p) atomic_add_uint32(p, 0) -#define atomic_read_p(p) atomic_add_p(p, NULL) -#define atomic_read_z(p) atomic_add_z(p, 0) -#define atomic_read_u(p) atomic_add_u(p, 0) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -/* - * All arithmetic functions return the arithmetic result of the atomic - * operation. Some atomic operation APIs return the value prior to mutation, in - * which case the following functions must redundantly compute the result so - * that it can be returned. These functions are normally inlined, so the extra - * operations can be optimized away if the return values aren't used by the - * callers. - * - * atomic_read_( *p) { return (*p); } - * atomic_add_( *p, x) { return (*p + x); } - * atomic_sub_( *p, x) { return (*p - x); } - * bool atomic_cas_( *p, c, s) - * { - * if (*p != c) - * return (true); - * *p = s; - * return (false); - * } - * void atomic_write_( *p, x) { *p = x; } - */ - -#ifndef JEMALLOC_ENABLE_INLINE -uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); -uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); -bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s); -void atomic_write_uint64(uint64_t *p, uint64_t x); -uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); -uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); -bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s); -void atomic_write_uint32(uint32_t *p, uint32_t x); -void *atomic_add_p(void **p, void *x); -void *atomic_sub_p(void **p, void *x); -bool atomic_cas_p(void **p, void *c, void *s); -void atomic_write_p(void **p, const void *x); -size_t atomic_add_z(size_t *p, size_t x); -size_t atomic_sub_z(size_t *p, size_t x); -bool atomic_cas_z(size_t *p, size_t c, size_t s); -void atomic_write_z(size_t *p, size_t x); -unsigned atomic_add_u(unsigned *p, unsigned x); -unsigned atomic_sub_u(unsigned *p, unsigned x); -bool atomic_cas_u(unsigned *p, unsigned c, unsigned s); -void atomic_write_u(unsigned *p, unsigned x); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) -/******************************************************************************/ -/* 64-bit operations. */ -#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) -# if (defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - uint64_t t = x; - - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (t), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (t + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - uint64_t t; - - x = (uint64_t)(-(int64_t)x); - t = x; - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (t), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (t + x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - uint8_t success; - - asm volatile ( - "lock; cmpxchgq %4, %0;" - "sete %1;" - : "=m" (*p), "=a" (success) /* Outputs. */ - : "m" (*p), "a" (c), "r" (s) /* Inputs. */ - : "memory" /* Clobbers. */ - ); - - return (!(bool)success); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - - asm volatile ( - "xchgq %1, %0;" /* Lock is implied by xchgq. */ - : "=m" (*p), "+r" (x) /* Outputs. */ - : "m" (*p) /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -# elif (defined(JEMALLOC_C11ATOMICS)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; - return (atomic_fetch_add(a, x) + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; - return (atomic_fetch_sub(a, x) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; - return (!atomic_compare_exchange_strong(a, &c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; - atomic_store(a, x); -} -# elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - /* - * atomic_fetchadd_64() doesn't exist, but we only ever use this - * function on LP64 systems, so atomic_fetchadd_long() will do. - */ - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)x) + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s)); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - atomic_store_rel_long(p, x); -} -# elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - - return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p)); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - uint64_t o; - - /*The documented OSAtomic*() API does not expose an atomic exchange. */ - do { - o = atomic_read_uint64(p); - } while (atomic_cas_uint64(p, o, x)); -} -# elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, x) + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - uint64_t o; - - o = InterlockedCompareExchange64(p, s, c); - return (o != c); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - - InterlockedExchange64(p, x); -} -# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \ - defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} - -JEMALLOC_INLINE bool -atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) -{ - - return (!__sync_bool_compare_and_swap(p, c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint64(uint64_t *p, uint64_t x) -{ - - __sync_lock_test_and_set(p, x); -} -# else -# error "Missing implementation for 64-bit atomic operations" -# endif -#endif - -/******************************************************************************/ -/* 32-bit operations. */ -#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - uint32_t t = x; - - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (t), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (t + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - uint32_t t; - - x = (uint32_t)(-(int32_t)x); - t = x; - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (t), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (t + x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - uint8_t success; - - asm volatile ( - "lock; cmpxchgl %4, %0;" - "sete %1;" - : "=m" (*p), "=a" (success) /* Outputs. */ - : "m" (*p), "a" (c), "r" (s) /* Inputs. */ - : "memory" - ); - - return (!(bool)success); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - - asm volatile ( - "xchgl %1, %0;" /* Lock is implied by xchgl. */ - : "=m" (*p), "+r" (x) /* Outputs. */ - : "m" (*p) /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -# elif (defined(JEMALLOC_C11ATOMICS)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; - return (atomic_fetch_add(a, x) + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; - return (atomic_fetch_sub(a, x) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; - return (!atomic_compare_exchange_strong(a, &c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; - atomic_store(a, x); -} -#elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, x) + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - - return (!atomic_cmpset_32(p, c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - - atomic_store_rel_32(p, x); -} -#elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - - return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p)); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - uint32_t o; - - /*The documented OSAtomic*() API does not expose an atomic exchange. */ - do { - o = atomic_read_uint32(p); - } while (atomic_cas_uint32(p, o, x)); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (InterlockedExchangeAdd(p, x) + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (InterlockedExchangeAdd(p, -((int32_t)x)) - x); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - uint32_t o; - - o = InterlockedCompareExchange(p, s, c); - return (o != c); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - - InterlockedExchange(p, x); -} -#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \ - defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} - -JEMALLOC_INLINE bool -atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) -{ - - return (!__sync_bool_compare_and_swap(p, c, s)); -} - -JEMALLOC_INLINE void -atomic_write_uint32(uint32_t *p, uint32_t x) -{ - - __sync_lock_test_and_set(p, x); -} -#else -# error "Missing implementation for 32-bit atomic operations" -#endif - -/******************************************************************************/ -/* Pointer operations. */ -JEMALLOC_INLINE void * -atomic_add_p(void **p, void *x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_PTR == 2) - return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE void * -atomic_sub_p(void **p, void *x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((void *)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_PTR == 2) - return ((void *)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} - -JEMALLOC_INLINE bool -atomic_cas_p(void **p, void *c, void *s) -{ - -#if (LG_SIZEOF_PTR == 3) - return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); -#elif (LG_SIZEOF_PTR == 2) - return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); -#endif -} - -JEMALLOC_INLINE void -atomic_write_p(void **p, const void *x) -{ - -#if (LG_SIZEOF_PTR == 3) - atomic_write_uint64((uint64_t *)p, (uint64_t)x); -#elif (LG_SIZEOF_PTR == 2) - atomic_write_uint32((uint32_t *)p, (uint32_t)x); -#endif -} - -/******************************************************************************/ -/* size_t operations. */ -JEMALLOC_INLINE size_t -atomic_add_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE size_t -atomic_sub_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} - -JEMALLOC_INLINE bool -atomic_cas_z(size_t *p, size_t c, size_t s) -{ - -#if (LG_SIZEOF_PTR == 3) - return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); -#elif (LG_SIZEOF_PTR == 2) - return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); -#endif -} - -JEMALLOC_INLINE void -atomic_write_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - atomic_write_uint64((uint64_t *)p, (uint64_t)x); -#elif (LG_SIZEOF_PTR == 2) - atomic_write_uint32((uint32_t *)p, (uint32_t)x); -#endif -} - -/******************************************************************************/ -/* unsigned operations. */ -JEMALLOC_INLINE unsigned -atomic_add_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE unsigned -atomic_sub_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} - -JEMALLOC_INLINE bool -atomic_cas_u(unsigned *p, unsigned c, unsigned s) -{ - -#if (LG_SIZEOF_INT == 3) - return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); -#elif (LG_SIZEOF_INT == 2) - return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); -#endif -} - -JEMALLOC_INLINE void -atomic_write_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - atomic_write_uint64((uint64_t *)p, (uint64_t)x); -#elif (LG_SIZEOF_INT == 2) - atomic_write_uint32((uint32_t *)p, (uint32_t)x); -#endif -} - -/******************************************************************************/ -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/bitmap.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/bitmap.h deleted file mode 100644 index fcc6005..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/bitmap.h +++ /dev/null @@ -1,230 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ -#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS -#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) - -typedef struct bitmap_level_s bitmap_level_t; -typedef struct bitmap_info_s bitmap_info_t; -typedef unsigned long bitmap_t; -#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG - -/* Number of bits per group. */ -#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) -#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) -#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) - -/* Number of groups required to store a given number of bits. */ -#define BITMAP_BITS2GROUPS(nbits) \ - ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) - -/* - * Number of groups required at a particular level for a given number of bits. - */ -#define BITMAP_GROUPS_L0(nbits) \ - BITMAP_BITS2GROUPS(nbits) -#define BITMAP_GROUPS_L1(nbits) \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) -#define BITMAP_GROUPS_L2(nbits) \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) -#define BITMAP_GROUPS_L3(nbits) \ - BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ - BITMAP_BITS2GROUPS((nbits))))) - -/* - * Assuming the number of levels, number of groups required for a given number - * of bits. - */ -#define BITMAP_GROUPS_1_LEVEL(nbits) \ - BITMAP_GROUPS_L0(nbits) -#define BITMAP_GROUPS_2_LEVEL(nbits) \ - (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) -#define BITMAP_GROUPS_3_LEVEL(nbits) \ - (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) -#define BITMAP_GROUPS_4_LEVEL(nbits) \ - (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) - -/* - * Maximum number of groups required to support LG_BITMAP_MAXBITS. - */ -#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) -#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) -#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) -#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 -# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) -#else -# error "Unsupported bitmap size" -#endif - -/* Maximum number of levels possible. */ -#define BITMAP_MAX_LEVELS \ - (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ - + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct bitmap_level_s { - /* Offset of this level's groups within the array of groups. */ - size_t group_offset; -}; - -struct bitmap_info_s { - /* Logical number of bits in bitmap (stored at bottom level). */ - size_t nbits; - - /* Number of levels necessary for nbits. */ - unsigned nlevels; - - /* - * Only the first (nlevels+1) elements are used, and levels are ordered - * bottom to top (e.g. the bottom level is stored in levels[0]). - */ - bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); -size_t bitmap_info_ngroups(const bitmap_info_t *binfo); -size_t bitmap_size(size_t nbits); -void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); -bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); -void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) -JEMALLOC_INLINE bool -bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1; - bitmap_t rg = bitmap[rgoff]; - /* The bitmap is full iff the root group is 0. */ - return (rg == 0); -} - -JEMALLOC_INLINE bool -bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t g; - - assert(bit < binfo->nbits); - goff = bit >> LG_BITMAP_GROUP_NBITS; - g = bitmap[goff]; - return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))); -} - -JEMALLOC_INLINE void -bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t *gp; - bitmap_t g; - - assert(bit < binfo->nbits); - assert(!bitmap_get(bitmap, binfo, bit)); - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[goff]; - g = *gp; - assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - assert(bitmap_get(bitmap, binfo, bit)); - /* Propagate group state transitions up the tree. */ - if (g == 0) { - unsigned i; - for (i = 1; i < binfo->nlevels; i++) { - bit = goff; - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[binfo->levels[i].group_offset + goff]; - g = *gp; - assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - if (g != 0) - break; - } - } -} - -/* sfu: set first unset. */ -JEMALLOC_INLINE size_t -bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - size_t bit; - bitmap_t g; - unsigned i; - - assert(!bitmap_full(bitmap, binfo)); - - i = binfo->nlevels - 1; - g = bitmap[binfo->levels[i].group_offset]; - bit = jemalloc_ffsl(g) - 1; - while (i > 0) { - i--; - g = bitmap[binfo->levels[i].group_offset + bit]; - bit = (bit << LG_BITMAP_GROUP_NBITS) + (jemalloc_ffsl(g) - 1); - } - - bitmap_set(bitmap, binfo, bit); - return (bit); -} - -JEMALLOC_INLINE void -bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ - size_t goff; - bitmap_t *gp; - bitmap_t g; - bool propagate; - - assert(bit < binfo->nbits); - assert(bitmap_get(bitmap, binfo, bit)); - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[goff]; - g = *gp; - propagate = (g == 0); - assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - assert(!bitmap_get(bitmap, binfo, bit)); - /* Propagate group state transitions up the tree. */ - if (propagate) { - unsigned i; - for (i = 1; i < binfo->nlevels; i++) { - bit = goff; - goff = bit >> LG_BITMAP_GROUP_NBITS; - gp = &bitmap[binfo->levels[i].group_offset + goff]; - g = *gp; - propagate = (g == 0); - assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) - == 0); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); - *gp = g; - if (!propagate) - break; - } - } -} - -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/ckh.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/ckh.h deleted file mode 100644 index 75c1c97..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/ckh.h +++ /dev/null @@ -1,88 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ckh_s ckh_t; -typedef struct ckhc_s ckhc_t; - -/* Typedefs to allow easy function pointer passing. */ -typedef void ckh_hash_t (const void *, size_t[2]); -typedef bool ckh_keycomp_t (const void *, const void *); - -/* Maintain counters used to get an idea of performance. */ -/* #define CKH_COUNT */ -/* Print counter values in ckh_delete() (requires CKH_COUNT). */ -/* #define CKH_VERBOSE */ - -/* - * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit - * one bucket per L1 cache line. - */ -#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -/* Hash table cell. */ -struct ckhc_s { - const void *key; - const void *data; -}; - -struct ckh_s { -#ifdef CKH_COUNT - /* Counters used to get an idea of performance. */ - uint64_t ngrows; - uint64_t nshrinks; - uint64_t nshrinkfails; - uint64_t ninserts; - uint64_t nrelocs; -#endif - - /* Used for pseudo-random number generation. */ -#define CKH_A 1103515241 -#define CKH_C 12347 - uint32_t prng_state; - - /* Total number of items. */ - size_t count; - - /* - * Minimum and current number of hash table buckets. There are - * 2^LG_CKH_BUCKET_CELLS cells per bucket. - */ - unsigned lg_minbuckets; - unsigned lg_curbuckets; - - /* Hash and comparison functions. */ - ckh_hash_t *hash; - ckh_keycomp_t *keycomp; - - /* Hash table with 2^lg_curbuckets buckets. */ - ckhc_t *tab; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, - ckh_keycomp_t *keycomp); -void ckh_delete(tsd_t *tsd, ckh_t *ckh); -size_t ckh_count(ckh_t *ckh); -bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); -bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); -bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, - void **data); -bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data); -void ckh_string_hash(const void *key, size_t r_hash[2]); -bool ckh_string_keycomp(const void *k1, const void *k2); -void ckh_pointer_hash(const void *key, size_t r_hash[2]); -bool ckh_pointer_keycomp(const void *k1, const void *k2); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/ctl.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/ctl.h deleted file mode 100644 index 751c14b..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/ctl.h +++ /dev/null @@ -1,111 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ctl_node_s ctl_node_t; -typedef struct ctl_named_node_s ctl_named_node_t; -typedef struct ctl_indexed_node_s ctl_indexed_node_t; -typedef struct ctl_arena_stats_s ctl_arena_stats_t; -typedef struct ctl_stats_s ctl_stats_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct ctl_node_s { - bool named; -}; - -struct ctl_named_node_s { - struct ctl_node_s node; - const char *name; - /* If (nchildren == 0), this is a terminal node. */ - unsigned nchildren; - const ctl_node_t *children; - int (*ctl)(const size_t *, size_t, void *, size_t *, - void *, size_t); -}; - -struct ctl_indexed_node_s { - struct ctl_node_s node; - const ctl_named_node_t *(*index)(const size_t *, size_t, size_t); -}; - -struct ctl_arena_stats_s { - bool initialized; - unsigned nthreads; - const char *dss; - ssize_t lg_dirty_mult; - size_t pactive; - size_t pdirty; - arena_stats_t astats; - - /* Aggregate stats for small size classes, based on bin stats. */ - size_t allocated_small; - uint64_t nmalloc_small; - uint64_t ndalloc_small; - uint64_t nrequests_small; - - malloc_bin_stats_t bstats[NBINS]; - malloc_large_stats_t *lstats; /* nlclasses elements. */ - malloc_huge_stats_t *hstats; /* nhclasses elements. */ -}; - -struct ctl_stats_s { - size_t allocated; - size_t active; - size_t metadata; - size_t resident; - size_t mapped; - unsigned narenas; - ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen); -int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp); - -int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen); -bool ctl_boot(void); -void ctl_prefork(void); -void ctl_postfork_parent(void); -void ctl_postfork_child(void); - -#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ - != 0) { \ - malloc_printf( \ - ": Failure in xmallctl(\"%s\", ...)\n", \ - name); \ - abort(); \ - } \ -} while (0) - -#define xmallctlnametomib(name, mibp, miblenp) do { \ - if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ - malloc_printf(": Failure in " \ - "xmallctlnametomib(\"%s\", ...)\n", name); \ - abort(); \ - } \ -} while (0) - -#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ - if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ - newlen) != 0) { \ - malloc_write( \ - ": Failure in xmallctlbymib()\n"); \ - abort(); \ - } \ -} while (0) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h deleted file mode 100644 index a08ba77..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for - * functions that are static inline functions if inlining is enabled, and - * single-definition library-private functions if inlining is disabled. - * - * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in - * which case the denoted functions are always static, regardless of whether - * inlining is enabled. - */ -#if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE) - /* Disable inlining to make debugging/profiling easier. */ -# define JEMALLOC_ALWAYS_INLINE -# define JEMALLOC_ALWAYS_INLINE_C static -# define JEMALLOC_INLINE -# define JEMALLOC_INLINE_C static -# define inline -#else -# define JEMALLOC_ENABLE_INLINE -# ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_ALWAYS_INLINE \ - static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) -# define JEMALLOC_ALWAYS_INLINE_C \ - static inline JEMALLOC_ATTR(always_inline) -# else -# define JEMALLOC_ALWAYS_INLINE static inline -# define JEMALLOC_ALWAYS_INLINE_C static inline -# endif -# define JEMALLOC_INLINE static inline -# define JEMALLOC_INLINE_C static inline -# ifdef _MSC_VER -# define inline _inline -# endif -#endif - -#ifdef JEMALLOC_CC_SILENCE -# define UNUSED JEMALLOC_ATTR(unused) -#else -# define UNUSED -#endif - -#define ZU(z) ((size_t)z) -#define ZI(z) ((ssize_t)z) -#define QU(q) ((uint64_t)q) -#define QI(q) ((int64_t)q) - -#define KZU(z) ZU(z##ULL) -#define KZI(z) ZI(z##LL) -#define KQU(q) QU(q##ULL) -#define KQI(q) QI(q##LL) - -#ifndef __DECONST -# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) -#endif - -#ifndef JEMALLOC_HAS_RESTRICT -# define restrict -#endif diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/mutex.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/mutex.h deleted file mode 100644 index f051f29..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/mutex.h +++ /dev/null @@ -1,111 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct malloc_mutex_s malloc_mutex_t; - -#ifdef _WIN32 -# define MALLOC_MUTEX_INITIALIZER -#elif (defined(JEMALLOC_OSSPIN)) -# define MALLOC_MUTEX_INITIALIZER {0} -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} -#else -# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \ - defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} -# else -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER} -# endif -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct malloc_mutex_s { -#ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 - SRWLOCK lock; -# else - CRITICAL_SECTION lock; -# endif -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLock lock; -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) - pthread_mutex_t lock; - malloc_mutex_t *postponed_next; -#else - pthread_mutex_t lock; -#endif -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#ifdef JEMALLOC_LAZY_LOCK -extern bool isthreaded; -#else -# undef isthreaded /* Undo private_namespace.h definition. */ -# define isthreaded true -#endif - -bool malloc_mutex_init(malloc_mutex_t *mutex); -void malloc_mutex_prefork(malloc_mutex_t *mutex); -void malloc_mutex_postfork_parent(malloc_mutex_t *mutex); -void malloc_mutex_postfork_child(malloc_mutex_t *mutex); -bool mutex_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void malloc_mutex_lock(malloc_mutex_t *mutex); -void malloc_mutex_unlock(malloc_mutex_t *mutex); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) -JEMALLOC_INLINE void -malloc_mutex_lock(malloc_mutex_t *mutex) -{ - - if (isthreaded) { -#ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 - AcquireSRWLockExclusive(&mutex->lock); -# else - EnterCriticalSection(&mutex->lock); -# endif -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockLock(&mutex->lock); -#else - pthread_mutex_lock(&mutex->lock); -#endif - } -} - -JEMALLOC_INLINE void -malloc_mutex_unlock(malloc_mutex_t *mutex) -{ - - if (isthreaded) { -#ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 - ReleaseSRWLockExclusive(&mutex->lock); -# else - LeaveCriticalSection(&mutex->lock); -# endif -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockUnlock(&mutex->lock); -#else - pthread_mutex_unlock(&mutex->lock); -#endif - } -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/pages.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/pages.h deleted file mode 100644 index da7eb96..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/pages.h +++ /dev/null @@ -1,26 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *pages_map(void *addr, size_t size); -void pages_unmap(void *addr, size_t size); -void *pages_trim(void *addr, size_t alloc_size, size_t leadsize, - size_t size); -bool pages_commit(void *addr, size_t size); -bool pages_decommit(void *addr, size_t size); -bool pages_purge(void *addr, size_t size); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/private_namespace.sh b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/private_namespace.sh deleted file mode 100755 index cd25eb3..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/private_namespace.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -for symbol in `cat $1` ; do - echo "#define ${symbol} JEMALLOC_N(${symbol})" -done diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/prng.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/prng.h deleted file mode 100644 index 216d0ef..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/prng.h +++ /dev/null @@ -1,60 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * Simple linear congruential pseudo-random number generator: - * - * prng(y) = (a*x + c) % m - * - * where the following constants ensure maximal period: - * - * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. - * c == Odd number (relatively prime to 2^n). - * m == 2^32 - * - * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. - * - * This choice of m has the disadvantage that the quality of the bits is - * proportional to bit position. For example, the lowest bit has a cycle of 2, - * the next has a cycle of 4, etc. For this reason, we prefer to use the upper - * bits. - * - * Macro parameters: - * uint32_t r : Result. - * unsigned lg_range : (0..32], number of least significant bits to return. - * uint32_t state : Seed value. - * const uint32_t a, c : See above discussion. - */ -#define prng32(r, lg_range, state, a, c) do { \ - assert((lg_range) > 0); \ - assert((lg_range) <= 32); \ - \ - r = (state * (a)) + (c); \ - state = r; \ - r >>= (32 - (lg_range)); \ -} while (false) - -/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */ -#define prng64(r, lg_range, state, a, c) do { \ - assert((lg_range) > 0); \ - assert((lg_range) <= 64); \ - \ - r = (state * (a)) + (c); \ - state = r; \ - r >>= (64 - (lg_range)); \ -} while (false) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/rtree.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/rtree.h deleted file mode 100644 index 28ae9d1..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/rtree.h +++ /dev/null @@ -1,294 +0,0 @@ -/* - * This radix tree implementation is tailored to the singular purpose of - * associating metadata with chunks that are currently owned by jemalloc. - * - ******************************************************************************* - */ -#ifdef JEMALLOC_H_TYPES - -typedef struct rtree_node_elm_s rtree_node_elm_t; -typedef struct rtree_level_s rtree_level_t; -typedef struct rtree_s rtree_t; - -/* - * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the - * machine address width. - */ -#define LG_RTREE_BITS_PER_LEVEL 4 -#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL) -#define RTREE_HEIGHT_MAX \ - ((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL) - -/* Used for two-stage lock-free node initialization. */ -#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1) - -/* - * The node allocation callback function's argument is the number of contiguous - * rtree_node_elm_t structures to allocate, and the resulting memory must be - * zeroed. - */ -typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t); -typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *); - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct rtree_node_elm_s { - union { - void *pun; - rtree_node_elm_t *child; - extent_node_t *val; - }; -}; - -struct rtree_level_s { - /* - * A non-NULL subtree points to a subtree rooted along the hypothetical - * path to the leaf node corresponding to key 0. Depending on what keys - * have been used to store to the tree, an arbitrary combination of - * subtree pointers may remain NULL. - * - * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4. - * This results in a 3-level tree, and the leftmost leaf can be directly - * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding - * 0x00000000) can be accessed via subtrees[1], and the remainder of the - * tree can be accessed via subtrees[0]. - * - * levels[0] : [ | 0x0001******** | 0x0002******** | ...] - * - * levels[1] : [ | 0x00000001**** | 0x00000002**** | ... ] - * - * levels[2] : [val(0x000000000000) | val(0x000000000001) | ...] - * - * This has practical implications on x64, which currently uses only the - * lower 47 bits of virtual address space in userland, thus leaving - * subtrees[0] unused and avoiding a level of tree traversal. - */ - union { - void *subtree_pun; - rtree_node_elm_t *subtree; - }; - /* Number of key bits distinguished by this level. */ - unsigned bits; - /* - * Cumulative number of key bits distinguished by traversing to - * corresponding tree level. - */ - unsigned cumbits; -}; - -struct rtree_s { - rtree_node_alloc_t *alloc; - rtree_node_dalloc_t *dalloc; - unsigned height; - /* - * Precomputed table used to convert from the number of leading 0 key - * bits to which subtree level to start at. - */ - unsigned start_level[RTREE_HEIGHT_MAX]; - rtree_level_t levels[RTREE_HEIGHT_MAX]; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, - rtree_node_dalloc_t *dalloc); -void rtree_delete(rtree_t *rtree); -rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree, - unsigned level); -rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree, - rtree_node_elm_t *elm, unsigned level); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -unsigned rtree_start_level(rtree_t *rtree, uintptr_t key); -uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level); - -bool rtree_node_valid(rtree_node_elm_t *node); -rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm); -rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, - unsigned level); -extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, - bool dependent); -void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, - const extent_node_t *val); -rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level); -rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level); - -extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent); -bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) -JEMALLOC_INLINE unsigned -rtree_start_level(rtree_t *rtree, uintptr_t key) -{ - unsigned start_level; - - if (unlikely(key == 0)) - return (rtree->height - 1); - - start_level = rtree->start_level[lg_floor(key) >> - LG_RTREE_BITS_PER_LEVEL]; - assert(start_level < rtree->height); - return (start_level); -} - -JEMALLOC_INLINE uintptr_t -rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level) -{ - - return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - - rtree->levels[level].cumbits)) & ((ZU(1) << - rtree->levels[level].bits) - 1)); -} - -JEMALLOC_INLINE bool -rtree_node_valid(rtree_node_elm_t *node) -{ - - return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING); -} - -JEMALLOC_INLINE rtree_node_elm_t * -rtree_child_tryread(rtree_node_elm_t *elm) -{ - rtree_node_elm_t *child; - - /* Double-checked read (first read may be stale. */ - child = elm->child; - if (!rtree_node_valid(child)) - child = atomic_read_p(&elm->pun); - return (child); -} - -JEMALLOC_INLINE rtree_node_elm_t * -rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) -{ - rtree_node_elm_t *child; - - child = rtree_child_tryread(elm); - if (unlikely(!rtree_node_valid(child))) - child = rtree_child_read_hard(rtree, elm, level); - return (child); -} - -JEMALLOC_INLINE extent_node_t * -rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent) -{ - - if (dependent) { - /* - * Reading a val on behalf of a pointer to a valid allocation is - * guaranteed to be a clean read even without synchronization, - * because the rtree update became visible in memory before the - * pointer came into existence. - */ - return (elm->val); - } else { - /* - * An arbitrary read, e.g. on behalf of ivsalloc(), may not be - * dependent on a previous rtree write, which means a stale read - * could result if synchronization were omitted here. - */ - return (atomic_read_p(&elm->pun)); - } -} - -JEMALLOC_INLINE void -rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val) -{ - - atomic_write_p(&elm->pun, val); -} - -JEMALLOC_INLINE rtree_node_elm_t * -rtree_subtree_tryread(rtree_t *rtree, unsigned level) -{ - rtree_node_elm_t *subtree; - - /* Double-checked read (first read may be stale. */ - subtree = rtree->levels[level].subtree; - if (!rtree_node_valid(subtree)) - subtree = atomic_read_p(&rtree->levels[level].subtree_pun); - return (subtree); -} - -JEMALLOC_INLINE rtree_node_elm_t * -rtree_subtree_read(rtree_t *rtree, unsigned level) -{ - rtree_node_elm_t *subtree; - - subtree = rtree_subtree_tryread(rtree, level); - if (unlikely(!rtree_node_valid(subtree))) - subtree = rtree_subtree_read_hard(rtree, level); - return (subtree); -} - -JEMALLOC_INLINE extent_node_t * -rtree_get(rtree_t *rtree, uintptr_t key, bool dependent) -{ - uintptr_t subkey; - unsigned i, start_level; - rtree_node_elm_t *node, *child; - - start_level = rtree_start_level(rtree, key); - - for (i = start_level, node = rtree_subtree_tryread(rtree, start_level); - /**/; i++, node = child) { - if (!dependent && unlikely(!rtree_node_valid(node))) - return (NULL); - subkey = rtree_subkey(rtree, key, i); - if (i == rtree->height - 1) { - /* - * node is a leaf, so it contains values rather than - * child pointers. - */ - return (rtree_val_read(rtree, &node[subkey], - dependent)); - } - assert(i < rtree->height - 1); - child = rtree_child_tryread(&node[subkey]); - } - not_reached(); -} - -JEMALLOC_INLINE bool -rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val) -{ - uintptr_t subkey; - unsigned i, start_level; - rtree_node_elm_t *node, *child; - - start_level = rtree_start_level(rtree, key); - - node = rtree_subtree_read(rtree, start_level); - if (node == NULL) - return (true); - for (i = start_level; /**/; i++, node = child) { - subkey = rtree_subkey(rtree, key, i); - if (i == rtree->height - 1) { - /* - * node is a leaf, so it contains values rather than - * child pointers. - */ - rtree_val_write(rtree, &node[subkey], val); - return (false); - } - assert(i + 1 < rtree->height); - child = rtree_child_read(rtree, &node[subkey], i); - if (child == NULL) - return (true); - } - not_reached(); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/stats.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/stats.h deleted file mode 100644 index c91dba9..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/stats.h +++ /dev/null @@ -1,183 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct tcache_bin_stats_s tcache_bin_stats_t; -typedef struct malloc_bin_stats_s malloc_bin_stats_t; -typedef struct malloc_large_stats_s malloc_large_stats_t; -typedef struct malloc_huge_stats_s malloc_huge_stats_t; -typedef struct arena_stats_s arena_stats_t; -typedef struct chunk_stats_s chunk_stats_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct tcache_bin_stats_s { - /* - * Number of allocation requests that corresponded to the size of this - * bin. - */ - uint64_t nrequests; -}; - -struct malloc_bin_stats_s { - /* - * Total number of allocation/deallocation requests served directly by - * the bin. Note that tcache may allocate an object, then recycle it - * many times, resulting many increments to nrequests, but only one - * each to nmalloc and ndalloc. - */ - uint64_t nmalloc; - uint64_t ndalloc; - - /* - * Number of allocation requests that correspond to the size of this - * bin. This includes requests served by tcache, though tcache only - * periodically merges into this counter. - */ - uint64_t nrequests; - - /* - * Current number of regions of this size class, including regions - * currently cached by tcache. - */ - size_t curregs; - - /* Number of tcache fills from this bin. */ - uint64_t nfills; - - /* Number of tcache flushes to this bin. */ - uint64_t nflushes; - - /* Total number of runs created for this bin's size class. */ - uint64_t nruns; - - /* - * Total number of runs reused by extracting them from the runs tree for - * this bin's size class. - */ - uint64_t reruns; - - /* Current number of runs in this bin. */ - size_t curruns; -}; - -struct malloc_large_stats_s { - /* - * Total number of allocation/deallocation requests served directly by - * the arena. Note that tcache may allocate an object, then recycle it - * many times, resulting many increments to nrequests, but only one - * each to nmalloc and ndalloc. - */ - uint64_t nmalloc; - uint64_t ndalloc; - - /* - * Number of allocation requests that correspond to this size class. - * This includes requests served by tcache, though tcache only - * periodically merges into this counter. - */ - uint64_t nrequests; - - /* - * Current number of runs of this size class, including runs currently - * cached by tcache. - */ - size_t curruns; -}; - -struct malloc_huge_stats_s { - /* - * Total number of allocation/deallocation requests served directly by - * the arena. - */ - uint64_t nmalloc; - uint64_t ndalloc; - - /* Current number of (multi-)chunk allocations of this size class. */ - size_t curhchunks; -}; - -struct arena_stats_s { - /* Number of bytes currently mapped. */ - size_t mapped; - - /* - * Total number of purge sweeps, total number of madvise calls made, - * and total pages purged in order to keep dirty unused memory under - * control. - */ - uint64_t npurge; - uint64_t nmadvise; - uint64_t purged; - - /* - * Number of bytes currently mapped purely for metadata purposes, and - * number of bytes currently allocated for internal metadata. - */ - size_t metadata_mapped; - size_t metadata_allocated; /* Protected via atomic_*_z(). */ - - /* Per-size-category statistics. */ - size_t allocated_large; - uint64_t nmalloc_large; - uint64_t ndalloc_large; - uint64_t nrequests_large; - - size_t allocated_huge; - uint64_t nmalloc_huge; - uint64_t ndalloc_huge; - - /* One element for each large size class. */ - malloc_large_stats_t *lstats; - - /* One element for each huge size class. */ - malloc_huge_stats_t *hstats; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_stats_print; - -extern size_t stats_cactive; - -void stats_print(void (*write)(void *, const char *), void *cbopaque, - const char *opts); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -size_t stats_cactive_get(void); -void stats_cactive_add(size_t size); -void stats_cactive_sub(size_t size); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) -JEMALLOC_INLINE size_t -stats_cactive_get(void) -{ - - return (atomic_read_z(&stats_cactive)); -} - -JEMALLOC_INLINE void -stats_cactive_add(size_t size) -{ - - atomic_add_z(&stats_cactive, size); -} - -JEMALLOC_INLINE void -stats_cactive_sub(size_t size) -{ - - atomic_sub_z(&stats_cactive, size); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/tsd.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/tsd.h deleted file mode 100644 index eed7aa0..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/tsd.h +++ /dev/null @@ -1,665 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* Maximum number of malloc_tsd users with cleanup functions. */ -#define MALLOC_TSD_CLEANUPS_MAX 2 - -typedef bool (*malloc_tsd_cleanup_t)(void); - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -typedef struct tsd_init_block_s tsd_init_block_t; -typedef struct tsd_init_head_s tsd_init_head_t; -#endif - -typedef struct tsd_s tsd_t; - -typedef enum { - tsd_state_uninitialized, - tsd_state_nominal, - tsd_state_purgatory, - tsd_state_reincarnated -} tsd_state_t; - -/* - * TLS/TSD-agnostic macro-based implementation of thread-specific data. There - * are five macros that support (at least) three use cases: file-private, - * library-private, and library-private inlined. Following is an example - * library-private tsd variable: - * - * In example.h: - * typedef struct { - * int x; - * int y; - * } example_t; - * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) - * malloc_tsd_types(example_, example_t) - * malloc_tsd_protos(, example_, example_t) - * malloc_tsd_externs(example_, example_t) - * In example.c: - * malloc_tsd_data(, example_, example_t, EX_INITIALIZER) - * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER, - * example_tsd_cleanup) - * - * The result is a set of generated functions, e.g.: - * - * bool example_tsd_boot(void) {...} - * example_t *example_tsd_get() {...} - * void example_tsd_set(example_t *val) {...} - * - * Note that all of the functions deal in terms of (a_type *) rather than - * (a_type) so that it is possible to support non-pointer types (unlike - * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is - * cast to (void *). This means that the cleanup function needs to cast the - * function argument to (a_type *), then dereference the resulting pointer to - * access fields, e.g. - * - * void - * example_tsd_cleanup(void *arg) - * { - * example_t *example = (example_t *)arg; - * - * example->x = 42; - * [...] - * if ([want the cleanup function to be called again]) - * example_tsd_set(example); - * } - * - * If example_tsd_set() is called within example_tsd_cleanup(), it will be - * called again. This is similar to how pthreads TSD destruction works, except - * that pthreads only calls the cleanup function again if the value was set to - * non-NULL. - */ - -/* malloc_tsd_types(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_types(a_name, a_type) -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_types(a_name, a_type) -#elif (defined(_WIN32)) -#define malloc_tsd_types(a_name, a_type) \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##tsd_wrapper_t; -#else -#define malloc_tsd_types(a_name, a_type) \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##tsd_wrapper_t; -#endif - -/* malloc_tsd_protos(). */ -#define malloc_tsd_protos(a_attr, a_name, a_type) \ -a_attr bool \ -a_name##tsd_boot0(void); \ -a_attr void \ -a_name##tsd_boot1(void); \ -a_attr bool \ -a_name##tsd_boot(void); \ -a_attr a_type * \ -a_name##tsd_get(void); \ -a_attr void \ -a_name##tsd_set(a_type *val); - -/* malloc_tsd_externs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##tsd_tls; \ -extern __thread bool a_name##tsd_initialized; \ -extern bool a_name##tsd_booted; -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##tsd_tls; \ -extern pthread_key_t a_name##tsd_tsd; \ -extern bool a_name##tsd_booted; -#elif (defined(_WIN32)) -#define malloc_tsd_externs(a_name, a_type) \ -extern DWORD a_name##tsd_tsd; \ -extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ -extern bool a_name##tsd_booted; -#else -#define malloc_tsd_externs(a_name, a_type) \ -extern pthread_key_t a_name##tsd_tsd; \ -extern tsd_init_head_t a_name##tsd_init_head; \ -extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ -extern bool a_name##tsd_booted; -#endif - -/* malloc_tsd_data(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##tsd_tls = a_initializer; \ -a_attr __thread bool JEMALLOC_TLS_MODEL \ - a_name##tsd_initialized = false; \ -a_attr bool a_name##tsd_booted = false; -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##tsd_tls = a_initializer; \ -a_attr pthread_key_t a_name##tsd_tsd; \ -a_attr bool a_name##tsd_booted = false; -#elif (defined(_WIN32)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr DWORD a_name##tsd_tsd; \ -a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ - false, \ - a_initializer \ -}; \ -a_attr bool a_name##tsd_booted = false; -#else -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr pthread_key_t a_name##tsd_tsd; \ -a_attr tsd_init_head_t a_name##tsd_init_head = { \ - ql_head_initializer(blocks), \ - MALLOC_MUTEX_INITIALIZER \ -}; \ -a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ - false, \ - a_initializer \ -}; \ -a_attr bool a_name##tsd_booted = false; -#endif - -/* malloc_tsd_funcs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##tsd_cleanup_wrapper(void) \ -{ \ - \ - if (a_name##tsd_initialized) { \ - a_name##tsd_initialized = false; \ - a_cleanup(&a_name##tsd_tls); \ - } \ - return (a_name##tsd_initialized); \ -} \ -a_attr bool \ -a_name##tsd_boot0(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##tsd_cleanup_wrapper); \ - } \ - a_name##tsd_booted = true; \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_boot1(void) \ -{ \ - \ - /* Do nothing. */ \ -} \ -a_attr bool \ -a_name##tsd_boot(void) \ -{ \ - \ - return (a_name##tsd_boot0()); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##tsd_get(void) \ -{ \ - \ - assert(a_name##tsd_booted); \ - return (&a_name##tsd_tls); \ -} \ -a_attr void \ -a_name##tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##tsd_booted); \ - a_name##tsd_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - a_name##tsd_initialized = true; \ -} -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##tsd_boot0(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \ - 0) \ - return (true); \ - } \ - a_name##tsd_booted = true; \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_boot1(void) \ -{ \ - \ - /* Do nothing. */ \ -} \ -a_attr bool \ -a_name##tsd_boot(void) \ -{ \ - \ - return (a_name##tsd_boot0()); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##tsd_get(void) \ -{ \ - \ - assert(a_name##tsd_booted); \ - return (&a_name##tsd_tls); \ -} \ -a_attr void \ -a_name##tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##tsd_booted); \ - a_name##tsd_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_setspecific(a_name##tsd_tsd, \ - (void *)(&a_name##tsd_tls))) { \ - malloc_write(": Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - } \ -} -#elif (defined(_WIN32)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##tsd_cleanup_wrapper(void) \ -{ \ - DWORD error = GetLastError(); \ - a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ - TlsGetValue(a_name##tsd_tsd); \ - SetLastError(error); \ - \ - if (wrapper == NULL) \ - return (false); \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - wrapper->initialized = false; \ - a_cleanup(&wrapper->val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - return (true); \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ -{ \ - \ - if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \ - malloc_write(": Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ -} \ -a_attr a_name##tsd_wrapper_t * \ -a_name##tsd_wrapper_get(void) \ -{ \ - DWORD error = GetLastError(); \ - a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ - TlsGetValue(a_name##tsd_tsd); \ - SetLastError(error); \ - \ - if (unlikely(wrapper == NULL)) { \ - wrapper = (a_name##tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - wrapper->initialized = false; \ - wrapper->val = a_initializer; \ - } \ - a_name##tsd_wrapper_set(wrapper); \ - } \ - return (wrapper); \ -} \ -a_attr bool \ -a_name##tsd_boot0(void) \ -{ \ - \ - a_name##tsd_tsd = TlsAlloc(); \ - if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \ - return (true); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##tsd_cleanup_wrapper); \ - } \ - a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ - a_name##tsd_booted = true; \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_boot1(void) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - wrapper = (a_name##tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ - memcpy(wrapper, &a_name##tsd_boot_wrapper, \ - sizeof(a_name##tsd_wrapper_t)); \ - a_name##tsd_wrapper_set(wrapper); \ -} \ -a_attr bool \ -a_name##tsd_boot(void) \ -{ \ - \ - if (a_name##tsd_boot0()) \ - return (true); \ - a_name##tsd_boot1(); \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##tsd_get(void) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - \ - assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##tsd_set(a_type *val) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - \ - assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ -} -#else -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr void \ -a_name##tsd_cleanup_wrapper(void *arg) \ -{ \ - a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \ - \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - wrapper->initialized = false; \ - a_cleanup(&wrapper->val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - if (pthread_setspecific(a_name##tsd_tsd, \ - (void *)wrapper)) { \ - malloc_write(": Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - return; \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ -} \ -a_attr void \ -a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ -{ \ - \ - if (pthread_setspecific(a_name##tsd_tsd, \ - (void *)wrapper)) { \ - malloc_write(": Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ -} \ -a_attr a_name##tsd_wrapper_t * \ -a_name##tsd_wrapper_get(void) \ -{ \ - a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ - pthread_getspecific(a_name##tsd_tsd); \ - \ - if (unlikely(wrapper == NULL)) { \ - tsd_init_block_t block; \ - wrapper = tsd_init_check_recursion( \ - &a_name##tsd_init_head, &block); \ - if (wrapper) \ - return (wrapper); \ - wrapper = (a_name##tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ - block.data = wrapper; \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - wrapper->initialized = false; \ - wrapper->val = a_initializer; \ - } \ - a_name##tsd_wrapper_set(wrapper); \ - tsd_init_finish(&a_name##tsd_init_head, &block); \ - } \ - return (wrapper); \ -} \ -a_attr bool \ -a_name##tsd_boot0(void) \ -{ \ - \ - if (pthread_key_create(&a_name##tsd_tsd, \ - a_name##tsd_cleanup_wrapper) != 0) \ - return (true); \ - a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ - a_name##tsd_booted = true; \ - return (false); \ -} \ -a_attr void \ -a_name##tsd_boot1(void) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - wrapper = (a_name##tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write(": Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ - memcpy(wrapper, &a_name##tsd_boot_wrapper, \ - sizeof(a_name##tsd_wrapper_t)); \ - a_name##tsd_wrapper_set(wrapper); \ -} \ -a_attr bool \ -a_name##tsd_boot(void) \ -{ \ - \ - if (a_name##tsd_boot0()) \ - return (true); \ - a_name##tsd_boot1(); \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##tsd_get(void) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - \ - assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##tsd_set(a_type *val) \ -{ \ - a_name##tsd_wrapper_t *wrapper; \ - \ - assert(a_name##tsd_booted); \ - wrapper = a_name##tsd_wrapper_get(); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ -} -#endif - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -struct tsd_init_block_s { - ql_elm(tsd_init_block_t) link; - pthread_t thread; - void *data; -}; -struct tsd_init_head_s { - ql_head(tsd_init_block_t) blocks; - malloc_mutex_t lock; -}; -#endif - -#define MALLOC_TSD \ -/* O(name, type) */ \ - O(tcache, tcache_t *) \ - O(thread_allocated, uint64_t) \ - O(thread_deallocated, uint64_t) \ - O(prof_tdata, prof_tdata_t *) \ - O(arena, arena_t *) \ - O(arenas_cache, arena_t **) \ - O(narenas_cache, unsigned) \ - O(arenas_cache_bypass, bool) \ - O(tcache_enabled, tcache_enabled_t) \ - O(quarantine, quarantine_t *) \ - -#define TSD_INITIALIZER { \ - tsd_state_uninitialized, \ - NULL, \ - 0, \ - 0, \ - NULL, \ - NULL, \ - NULL, \ - 0, \ - false, \ - tcache_enabled_default, \ - NULL \ -} - -struct tsd_s { - tsd_state_t state; -#define O(n, t) \ - t n; -MALLOC_TSD -#undef O -}; - -static const tsd_t tsd_initializer = TSD_INITIALIZER; - -malloc_tsd_types(, tsd_t) - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *malloc_tsd_malloc(size_t size); -void malloc_tsd_dalloc(void *wrapper); -void malloc_tsd_no_cleanup(void *arg); -void malloc_tsd_cleanup_register(bool (*f)(void)); -bool malloc_tsd_boot0(void); -void malloc_tsd_boot1(void); -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -void *tsd_init_check_recursion(tsd_init_head_t *head, - tsd_init_block_t *block); -void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); -#endif -void tsd_cleanup(void *arg); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t) - -tsd_t *tsd_fetch(void); -bool tsd_nominal(tsd_t *tsd); -#define O(n, t) \ -t *tsd_##n##p_get(tsd_t *tsd); \ -t tsd_##n##_get(tsd_t *tsd); \ -void tsd_##n##_set(tsd_t *tsd, t n); -MALLOC_TSD -#undef O -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_)) -malloc_tsd_externs(, tsd_t) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup) - -JEMALLOC_ALWAYS_INLINE tsd_t * -tsd_fetch(void) -{ - tsd_t *tsd = tsd_get(); - - if (unlikely(tsd->state != tsd_state_nominal)) { - if (tsd->state == tsd_state_uninitialized) { - tsd->state = tsd_state_nominal; - /* Trigger cleanup handler registration. */ - tsd_set(tsd); - } else if (tsd->state == tsd_state_purgatory) { - tsd->state = tsd_state_reincarnated; - tsd_set(tsd); - } else - assert(tsd->state == tsd_state_reincarnated); - } - - return (tsd); -} - -JEMALLOC_INLINE bool -tsd_nominal(tsd_t *tsd) -{ - - return (tsd->state == tsd_state_nominal); -} - -#define O(n, t) \ -JEMALLOC_ALWAYS_INLINE t * \ -tsd_##n##p_get(tsd_t *tsd) \ -{ \ - \ - return (&tsd->n); \ -} \ - \ -JEMALLOC_ALWAYS_INLINE t \ -tsd_##n##_get(tsd_t *tsd) \ -{ \ - \ - return (*tsd_##n##p_get(tsd)); \ -} \ - \ -JEMALLOC_ALWAYS_INLINE void \ -tsd_##n##_set(tsd_t *tsd, t n) \ -{ \ - \ - assert(tsd->state == tsd_state_nominal); \ - tsd->n = n; \ -} -MALLOC_TSD -#undef O -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/util.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/util.h deleted file mode 100644 index b2ea740..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/util.h +++ /dev/null @@ -1,314 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#ifdef _WIN32 -# ifdef _WIN64 -# define FMT64_PREFIX "ll" -# define FMTPTR_PREFIX "ll" -# else -# define FMT64_PREFIX "ll" -# define FMTPTR_PREFIX "" -# endif -# define FMTd32 "d" -# define FMTu32 "u" -# define FMTx32 "x" -# define FMTd64 FMT64_PREFIX "d" -# define FMTu64 FMT64_PREFIX "u" -# define FMTx64 FMT64_PREFIX "x" -# define FMTdPTR FMTPTR_PREFIX "d" -# define FMTuPTR FMTPTR_PREFIX "u" -# define FMTxPTR FMTPTR_PREFIX "x" -#else -# include -# define FMTd32 PRId32 -# define FMTu32 PRIu32 -# define FMTx32 PRIx32 -# define FMTd64 PRId64 -# define FMTu64 PRIu64 -# define FMTx64 PRIx64 -# define FMTdPTR PRIdPTR -# define FMTuPTR PRIuPTR -# define FMTxPTR PRIxPTR -#endif - -/* Size of stack-allocated buffer passed to buferror(). */ -#define BUFERROR_BUF 64 - -/* - * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be - * large enough for all possible uses within jemalloc. - */ -#define MALLOC_PRINTF_BUFSIZE 4096 - -/* - * Wrap a cpp argument that contains commas such that it isn't broken up into - * multiple arguments. - */ -#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ - -/* - * Silence compiler warnings due to uninitialized values. This is used - * wherever the compiler fails to recognize that the variable is never used - * uninitialized. - */ -#ifdef JEMALLOC_CC_SILENCE -# define JEMALLOC_CC_SILENCE_INIT(v) = v -#else -# define JEMALLOC_CC_SILENCE_INIT(v) -#endif - -#define JEMALLOC_GNUC_PREREQ(major, minor) \ - (!defined(__clang__) && \ - (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))) -#ifndef __has_builtin -# define __has_builtin(builtin) (0) -#endif -#define JEMALLOC_CLANG_HAS_BUILTIN(builtin) \ - (defined(__clang__) && __has_builtin(builtin)) - -#ifdef __GNUC__ -# define likely(x) __builtin_expect(!!(x), 1) -# define unlikely(x) __builtin_expect(!!(x), 0) -# if JEMALLOC_GNUC_PREREQ(4, 6) || \ - JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable) -# define unreachable() __builtin_unreachable() -# else -# define unreachable() -# endif -#else -# define likely(x) !!(x) -# define unlikely(x) !!(x) -# define unreachable() -#endif - -/* - * Define a custom assert() in order to reduce the chances of deadlock during - * assertion failure. - */ -#ifndef assert -#define assert(e) do { \ - if (unlikely(config_debug && !(e))) { \ - malloc_printf( \ - ": %s:%d: Failed assertion: \"%s\"\n", \ - __FILE__, __LINE__, #e); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef not_reached -#define not_reached() do { \ - if (config_debug) { \ - malloc_printf( \ - ": %s:%d: Unreachable code reached\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ - unreachable(); \ -} while (0) -#endif - -#ifndef not_implemented -#define not_implemented() do { \ - if (config_debug) { \ - malloc_printf(": %s:%d: Not implemented\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef assert_not_implemented -#define assert_not_implemented(e) do { \ - if (unlikely(config_debug && !(e))) \ - not_implemented(); \ -} while (0) -#endif - -/* Use to assert a particular configuration, e.g., cassert(config_debug). */ -#define cassert(c) do { \ - if (unlikely(!(c))) \ - not_reached(); \ -} while (0) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int buferror(int err, char *buf, size_t buflen); -uintmax_t malloc_strtoumax(const char *restrict nptr, - char **restrict endptr, int base); -void malloc_write(const char *s); - -/* - * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating - * point math. - */ -int malloc_vsnprintf(char *str, size_t size, const char *format, - va_list ap); -int malloc_snprintf(char *str, size_t size, const char *format, ...) - JEMALLOC_FORMAT_PRINTF(3, 4); -void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap); -void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, - const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); -void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -int jemalloc_ffsl(long bitmap); -int jemalloc_ffs(int bitmap); -size_t pow2_ceil(size_t x); -size_t lg_floor(size_t x); -void set_errno(int errnum); -int get_errno(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) - -/* Sanity check. */ -#if !defined(JEMALLOC_INTERNAL_FFSL) || !defined(JEMALLOC_INTERNAL_FFS) -# error Both JEMALLOC_INTERNAL_FFSL && JEMALLOC_INTERNAL_FFS should have been defined by configure -#endif - -JEMALLOC_ALWAYS_INLINE int -jemalloc_ffsl(long bitmap) -{ - - return (JEMALLOC_INTERNAL_FFSL(bitmap)); -} - -JEMALLOC_ALWAYS_INLINE int -jemalloc_ffs(int bitmap) -{ - - return (JEMALLOC_INTERNAL_FFS(bitmap)); -} - -/* Compute the smallest power of 2 that is >= x. */ -JEMALLOC_INLINE size_t -pow2_ceil(size_t x) -{ - - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; -#if (LG_SIZEOF_PTR == 3) - x |= x >> 32; -#endif - x++; - return (x); -} - -#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE size_t -lg_floor(size_t x) -{ - size_t ret; - - assert(x != 0); - - asm ("bsr %1, %0" - : "=r"(ret) // Outputs. - : "r"(x) // Inputs. - ); - return (ret); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE size_t -lg_floor(size_t x) -{ - unsigned long ret; - - assert(x != 0); - -#if (LG_SIZEOF_PTR == 3) - _BitScanReverse64(&ret, x); -#elif (LG_SIZEOF_PTR == 2) - _BitScanReverse(&ret, x); -#else -# error "Unsupported type sizes for lg_floor()" -#endif - return (ret); -} -#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) -JEMALLOC_INLINE size_t -lg_floor(size_t x) -{ - - assert(x != 0); - -#if (LG_SIZEOF_PTR == LG_SIZEOF_INT) - return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x)); -#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) - return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x)); -#else -# error "Unsupported type sizes for lg_floor()" -#endif -} -#else -JEMALLOC_INLINE size_t -lg_floor(size_t x) -{ - - assert(x != 0); - - x |= (x >> 1); - x |= (x >> 2); - x |= (x >> 4); - x |= (x >> 8); - x |= (x >> 16); -#if (LG_SIZEOF_PTR == 3 && LG_SIZEOF_PTR == LG_SIZEOF_LONG) - x |= (x >> 32); - if (x == KZU(0xffffffffffffffff)) - return (63); - x++; - return (jemalloc_ffsl(x) - 2); -#elif (LG_SIZEOF_PTR == 2) - if (x == KZU(0xffffffff)) - return (31); - x++; - return (jemalloc_ffs(x) - 2); -#else -# error "Unsupported type sizes for lg_floor()" -#endif -} -#endif - -/* Set error code. */ -JEMALLOC_INLINE void -set_errno(int errnum) -{ - -#ifdef _WIN32 - SetLastError(errnum); -#else - errno = errnum; -#endif -} - -/* Get last error code. */ -JEMALLOC_INLINE int -get_errno(void) -{ - -#ifdef _WIN32 - return (GetLastError()); -#else - return (errno); -#endif -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in deleted file mode 100644 index fa7b350..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in +++ /dev/null @@ -1,57 +0,0 @@ -/* - * void * - * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, - * bool *commit, unsigned arena_ind); - */ -typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, bool *, unsigned); - -/* - * bool - * chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind); - */ -typedef bool (chunk_dalloc_t)(void *, size_t, bool, unsigned); - -/* - * bool - * chunk_commit(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_commit_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_decommit_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_purge(void *chunk, size_t size, size_t offset, size_t length, - * unsigned arena_ind); - */ -typedef bool (chunk_purge_t)(void *, size_t, size_t, size_t, unsigned); - -/* - * bool - * chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, - * bool committed, unsigned arena_ind); - */ -typedef bool (chunk_split_t)(void *, size_t, size_t, size_t, bool, unsigned); - -/* - * bool - * chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, - * bool committed, unsigned arena_ind); - */ -typedef bool (chunk_merge_t)(void *, size_t, void *, size_t, bool, unsigned); - -typedef struct { - chunk_alloc_t *alloc; - chunk_dalloc_t *dalloc; - chunk_commit_t *commit; - chunk_decommit_t *decommit; - chunk_purge_t *purge; - chunk_split_t *split; - chunk_merge_t *merge; -} chunk_hooks_t; diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/msvc_compat/strings.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/msvc_compat/strings.h deleted file mode 100644 index f01ffdd..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/msvc_compat/strings.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef strings_h -#define strings_h - -/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided - * for both */ -#ifdef _MSC_VER -# include -# pragma intrinsic(_BitScanForward) -static __forceinline int ffsl(long x) -{ - unsigned long i; - - if (_BitScanForward(&i, x)) - return (i + 1); - return (0); -} - -static __forceinline int ffs(int x) -{ - - return (ffsl(x)); -} - -#else -# define ffsl(x) __builtin_ffsl(x) -# define ffs(x) __builtin_ffs(x) -#endif - -#endif /* strings_h */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/msvc_compat/windows_extra.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/msvc_compat/windows_extra.h deleted file mode 100644 index 0c5e323..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/msvc_compat/windows_extra.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H -#define MSVC_COMPAT_WINDOWS_EXTRA_H - -#ifndef ENOENT -# define ENOENT ERROR_PATH_NOT_FOUND -#endif -#ifndef EINVAL -# define EINVAL ERROR_BAD_ARGUMENTS -#endif -#ifndef EAGAIN -# define EAGAIN ERROR_OUTOFMEMORY -#endif -#ifndef EPERM -# define EPERM ERROR_WRITE_FAULT -#endif -#ifndef EFAULT -# define EFAULT ERROR_INVALID_ADDRESS -#endif -#ifndef ENOMEM -# define ENOMEM ERROR_NOT_ENOUGH_MEMORY -#endif -#ifndef ERANGE -# define ERANGE ERROR_INVALID_DATA -#endif - -#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/arena.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/arena.c deleted file mode 100644 index 3081519..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/arena.c +++ /dev/null @@ -1,3318 +0,0 @@ -#define JEMALLOC_ARENA_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; -static ssize_t lg_dirty_mult_default; -arena_bin_info_t arena_bin_info[NBINS]; - -size_t map_bias; -size_t map_misc_offset; -size_t arena_maxrun; /* Max run size for arenas. */ -size_t large_maxclass; /* Max large size class. */ -static size_t small_maxrun; /* Max run size used for small size classes. */ -static bool *small_run_tab; /* Valid small run page multiples. */ -unsigned nlclasses; /* Number of large size classes. */ -unsigned nhclasses; /* Number of huge size classes. */ - -/******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ - -static void arena_purge(arena_t *arena, bool all); -static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, - bool cleaned, bool decommitted); -static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); -static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); - -/******************************************************************************/ - -#define CHUNK_MAP_KEY ((uintptr_t)0x1U) - -JEMALLOC_INLINE_C arena_chunk_map_misc_t * -arena_miscelm_key_create(size_t size) -{ - - return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) | - CHUNK_MAP_KEY)); -} - -JEMALLOC_INLINE_C bool -arena_miscelm_is_key(const arena_chunk_map_misc_t *miscelm) -{ - - return (((uintptr_t)miscelm & CHUNK_MAP_KEY) != 0); -} - -#undef CHUNK_MAP_KEY - -JEMALLOC_INLINE_C size_t -arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm) -{ - - assert(arena_miscelm_is_key(miscelm)); - - return (arena_mapbits_size_decode((uintptr_t)miscelm)); -} - -JEMALLOC_INLINE_C size_t -arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm) -{ - arena_chunk_t *chunk; - size_t pageind, mapbits; - - assert(!arena_miscelm_is_key(miscelm)); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); - pageind = arena_miscelm_to_pageind(miscelm); - mapbits = arena_mapbits_get(chunk, pageind); - return (arena_mapbits_size_decode(mapbits)); -} - -JEMALLOC_INLINE_C int -arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) -{ - uintptr_t a_miscelm = (uintptr_t)a; - uintptr_t b_miscelm = (uintptr_t)b; - - assert(a != NULL); - assert(b != NULL); - - return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); -} - -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, - rb_link, arena_run_comp) - -static size_t -run_quantize(size_t size) -{ - size_t qsize; - - assert(size != 0); - assert(size == PAGE_CEILING(size)); - - /* Don't change sizes that are valid small run sizes. */ - if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) - return (size); - - /* - * Round down to the nearest run size that can actually be requested - * during normal large allocation. Add large_pad so that cache index - * randomization can offset the allocation from the page boundary. - */ - qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad; - if (qsize <= SMALL_MAXCLASS + large_pad) - return (run_quantize(size - large_pad)); - assert(qsize <= size); - return (qsize); -} - -static size_t -run_quantize_next(size_t size) -{ - size_t large_run_size_next; - - assert(size != 0); - assert(size == PAGE_CEILING(size)); - - /* - * Return the next quantized size greater than the input size. - * Quantized sizes comprise the union of run sizes that back small - * region runs, and run sizes that back large regions with no explicit - * alignment constraints. - */ - - if (size > SMALL_MAXCLASS) { - large_run_size_next = PAGE_CEILING(index2size(size2index(size - - large_pad) + 1) + large_pad); - } else - large_run_size_next = SIZE_T_MAX; - if (size >= small_maxrun) - return (large_run_size_next); - - while (true) { - size += PAGE; - assert(size <= small_maxrun); - if (small_run_tab[size >> LG_PAGE]) { - if (large_run_size_next < size) - return (large_run_size_next); - return (size); - } - } -} - -static size_t -run_quantize_first(size_t size) -{ - size_t qsize = run_quantize(size); - - if (qsize < size) { - /* - * Skip a quantization that may have an adequately large run, - * because under-sized runs may be mixed in. This only happens - * when an unusual size is requested, i.e. for aligned - * allocation, and is just one of several places where linear - * search would potentially find sufficiently aligned available - * memory somewhere lower. - */ - qsize = run_quantize_next(size); - } - return (qsize); -} - -JEMALLOC_INLINE_C int -arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) -{ - int ret; - uintptr_t a_miscelm = (uintptr_t)a; - size_t a_qsize = run_quantize(arena_miscelm_is_key(a) ? - arena_miscelm_key_size_get(a) : arena_miscelm_size_get(a)); - size_t b_qsize = run_quantize(arena_miscelm_size_get(b)); - - /* - * Compare based on quantized size rather than size, in order to sort - * equally useful runs only by address. - */ - ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); - if (ret == 0) { - if (!arena_miscelm_is_key(a)) { - uintptr_t b_miscelm = (uintptr_t)b; - - ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm); - } else { - /* - * Treat keys as if they are lower than anything else. - */ - ret = -1; - } - } - - return (ret); -} - -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, - arena_chunk_map_misc_t, rb_link, arena_avail_comp) - -static void -arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages) -{ - - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk, - pageind)); -} - -static void -arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages) -{ - - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk, - pageind)); -} - -static void -arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages) -{ - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); - - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); - assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == - CHUNK_MAP_DIRTY); - - qr_new(&miscelm->rd, rd_link); - qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); - arena->ndirty += npages; -} - -static void -arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages) -{ - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); - - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); - assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == - CHUNK_MAP_DIRTY); - - qr_remove(&miscelm->rd, rd_link); - assert(arena->ndirty >= npages); - arena->ndirty -= npages; -} - -static size_t -arena_chunk_dirty_npages(const extent_node_t *node) -{ - - return (extent_node_size_get(node) >> LG_PAGE); -} - -void -arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) -{ - - if (cache) { - extent_node_dirty_linkage_init(node); - extent_node_dirty_insert(node, &arena->runs_dirty, - &arena->chunks_cache); - arena->ndirty += arena_chunk_dirty_npages(node); - } -} - -void -arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) -{ - - if (dirty) { - extent_node_dirty_remove(node); - assert(arena->ndirty >= arena_chunk_dirty_npages(node)); - arena->ndirty -= arena_chunk_dirty_npages(node); - } -} - -JEMALLOC_INLINE_C void * -arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) -{ - void *ret; - unsigned regind; - arena_chunk_map_misc_t *miscelm; - void *rpages; - - assert(run->nfree > 0); - assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); - - regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info); - miscelm = arena_run_to_miscelm(run); - rpages = arena_miscelm_to_rpages(miscelm); - ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + - (uintptr_t)(bin_info->reg_interval * regind)); - run->nfree--; - return (ret); -} - -JEMALLOC_INLINE_C void -arena_run_reg_dalloc(arena_run_t *run, void *ptr) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t mapbits = arena_mapbits_get(chunk, pageind); - szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - unsigned regind = arena_run_regind(run, bin_info, ptr); - - assert(run->nfree < bin_info->nregs); - /* Freeing an interior pointer can cause assertion failure. */ - assert(((uintptr_t)ptr - - ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + - (uintptr_t)bin_info->reg0_offset)) % - (uintptr_t)bin_info->reg_interval == 0); - assert((uintptr_t)ptr >= - (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + - (uintptr_t)bin_info->reg0_offset); - /* Freeing an unallocated pointer can cause assertion failure. */ - assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); - - bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); - run->nfree++; -} - -JEMALLOC_INLINE_C void -arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) -{ - - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (npages << LG_PAGE)); - memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, - (npages << LG_PAGE)); -} - -JEMALLOC_INLINE_C void -arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ - - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind - << LG_PAGE)), PAGE); -} - -JEMALLOC_INLINE_C void -arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ - size_t i; - UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); - - arena_run_page_mark_zeroed(chunk, run_ind); - for (i = 0; i < PAGE / sizeof(size_t); i++) - assert(p[i] == 0); -} - -static void -arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) -{ - - if (config_stats) { - ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + add_pages - - sub_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << - LG_PAGE); - if (cactive_diff != 0) - stats_cactive_add(cactive_diff); - } -} - -static void -arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, - size_t flag_dirty, size_t flag_decommitted, size_t need_pages) -{ - size_t total_pages, rem_pages; - - assert(flag_dirty == 0 || flag_decommitted == 0); - - total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> - LG_PAGE; - assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == - flag_dirty); - assert(need_pages <= total_pages); - rem_pages = total_pages - need_pages; - - arena_avail_remove(arena, chunk, run_ind, total_pages); - if (flag_dirty != 0) - arena_run_dirty_remove(arena, chunk, run_ind, total_pages); - arena_cactive_update(arena, need_pages, 0); - arena->nactive += need_pages; - - /* Keep track of trailing unused pages for later use. */ - if (rem_pages > 0) { - size_t flags = flag_dirty | flag_decommitted; - size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : - 0; - - arena_mapbits_unallocated_set(chunk, run_ind+need_pages, - (rem_pages << LG_PAGE), flags | - (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & - flag_unzeroed_mask)); - arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, - (rem_pages << LG_PAGE), flags | - (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & - flag_unzeroed_mask)); - if (flag_dirty != 0) { - arena_run_dirty_insert(arena, chunk, run_ind+need_pages, - rem_pages); - } - arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); - } -} - -static bool -arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, - bool remove, bool zero) -{ - arena_chunk_t *chunk; - arena_chunk_map_misc_t *miscelm; - size_t flag_dirty, flag_decommitted, run_ind, need_pages; - size_t flag_unzeroed_mask; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - miscelm = arena_run_to_miscelm(run); - run_ind = arena_miscelm_to_pageind(miscelm); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); - flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); - - if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, - run_ind << LG_PAGE, size, arena->ind)) - return (true); - - if (remove) { - arena_run_split_remove(arena, chunk, run_ind, flag_dirty, - flag_decommitted, need_pages); - } - - if (zero) { - if (flag_decommitted != 0) { - /* The run is untouched, and therefore zeroed. */ - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void - *)((uintptr_t)chunk + (run_ind << LG_PAGE)), - (need_pages << LG_PAGE)); - } else if (flag_dirty != 0) { - /* The run is dirty, so all pages must be zeroed. */ - arena_run_zero(chunk, run_ind, need_pages); - } else { - /* - * The run is clean, so some pages may be zeroed (i.e. - * never before touched). - */ - size_t i; - for (i = 0; i < need_pages; i++) { - if (arena_mapbits_unzeroed_get(chunk, run_ind+i) - != 0) - arena_run_zero(chunk, run_ind+i, 1); - else if (config_debug) { - arena_run_page_validate_zeroed(chunk, - run_ind+i); - } else { - arena_run_page_mark_zeroed(chunk, - run_ind+i); - } - } - } - } else { - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); - } - - /* - * Set the last element first, in case the run only contains one page - * (i.e. both statements set the same element). - */ - flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? - CHUNK_MAP_UNZEROED : 0; - arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - run_ind+need_pages-1))); - arena_mapbits_large_set(chunk, run_ind, size, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind))); - return (false); -} - -static bool -arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) -{ - - return (arena_run_split_large_helper(arena, run, size, true, zero)); -} - -static bool -arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) -{ - - return (arena_run_split_large_helper(arena, run, size, false, zero)); -} - -static bool -arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, - szind_t binind) -{ - arena_chunk_t *chunk; - arena_chunk_map_misc_t *miscelm; - size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; - - assert(binind != BININD_INVALID); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - miscelm = arena_run_to_miscelm(run); - run_ind = arena_miscelm_to_pageind(miscelm); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); - flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); - - if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, - run_ind << LG_PAGE, size, arena->ind)) - return (true); - - arena_run_split_remove(arena, chunk, run_ind, flag_dirty, - flag_decommitted, need_pages); - - for (i = 0; i < need_pages; i++) { - size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, - run_ind+i); - arena_mapbits_small_set(chunk, run_ind+i, i, binind, - flag_unzeroed); - if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) - arena_run_page_validate_zeroed(chunk, run_ind+i); - } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); - return (false); -} - -static arena_chunk_t * -arena_chunk_init_spare(arena_t *arena) -{ - arena_chunk_t *chunk; - - assert(arena->spare != NULL); - - chunk = arena->spare; - arena->spare = NULL; - - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxrun); - assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxrun); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); - - return (chunk); -} - -static bool -arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero) -{ - - /* - * The extent node notion of "committed" doesn't directly apply to - * arena chunks. Arbitrarily mark them as committed. The commit state - * of runs is tracked individually, and upon chunk deallocation the - * entire chunk is in a consistent commit state. - */ - extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); - extent_node_achunk_set(&chunk->node, true); - return (chunk_register(chunk, &chunk->node)); -} - -static arena_chunk_t * -arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, - bool *zero, bool *commit) -{ - arena_chunk_t *chunk; - - malloc_mutex_unlock(&arena->lock); - - chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL, - chunksize, chunksize, zero, commit); - if (chunk != NULL && !*commit) { - /* Commit header. */ - if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << - LG_PAGE, arena->ind)) { - chunk_dalloc_wrapper(arena, chunk_hooks, - (void *)chunk, chunksize, *commit); - chunk = NULL; - } - } - if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) { - if (!*commit) { - /* Undo commit of header. */ - chunk_hooks->decommit(chunk, chunksize, 0, map_bias << - LG_PAGE, arena->ind); - } - chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk, - chunksize, *commit); - chunk = NULL; - } - - malloc_mutex_lock(&arena->lock); - return (chunk); -} - -static arena_chunk_t * -arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit) -{ - arena_chunk_t *chunk; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - - chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize, - chunksize, zero, true); - if (chunk != NULL) { - if (arena_chunk_register(arena, chunk, *zero)) { - chunk_dalloc_cache(arena, &chunk_hooks, chunk, - chunksize, true); - return (NULL); - } - *commit = true; - } - if (chunk == NULL) { - chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks, - zero, commit); - } - - if (config_stats && chunk != NULL) { - arena->stats.mapped += chunksize; - arena->stats.metadata_mapped += (map_bias << LG_PAGE); - } - - return (chunk); -} - -static arena_chunk_t * -arena_chunk_init_hard(arena_t *arena) -{ - arena_chunk_t *chunk; - bool zero, commit; - size_t flag_unzeroed, flag_decommitted, i; - - assert(arena->spare == NULL); - - zero = false; - commit = false; - chunk = arena_chunk_alloc_internal(arena, &zero, &commit); - if (chunk == NULL) - return (NULL); - - /* - * Initialize the map to contain one maximal free untouched run. Mark - * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted - * chunk. - */ - flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; - flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; - arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, - flag_unzeroed | flag_decommitted); - /* - * There is no need to initialize the internal page map entries unless - * the chunk is not zeroed. - */ - if (!zero) { - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( - (void *)arena_bitselm_get(chunk, map_bias+1), - (size_t)((uintptr_t) arena_bitselm_get(chunk, - chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, - map_bias+1))); - for (i = map_bias+1; i < chunk_npages-1; i++) - arena_mapbits_internal_set(chunk, i, flag_unzeroed); - } else { - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void - *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) - arena_bitselm_get(chunk, chunk_npages-1) - - (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); - if (config_debug) { - for (i = map_bias+1; i < chunk_npages-1; i++) { - assert(arena_mapbits_unzeroed_get(chunk, i) == - flag_unzeroed); - } - } - } - arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, - flag_unzeroed); - - return (chunk); -} - -static arena_chunk_t * -arena_chunk_alloc(arena_t *arena) -{ - arena_chunk_t *chunk; - - if (arena->spare != NULL) - chunk = arena_chunk_init_spare(arena); - else { - chunk = arena_chunk_init_hard(arena); - if (chunk == NULL) - return (NULL); - } - - /* Insert the run into the runs_avail tree. */ - arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); - - return (chunk); -} - -static void -arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) -{ - - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxrun); - assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxrun); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); - assert(arena_mapbits_decommitted_get(chunk, map_bias) == - arena_mapbits_decommitted_get(chunk, chunk_npages-1)); - - /* - * Remove run from the runs_avail tree, so that the arena does not use - * it. - */ - arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); - - if (arena->spare != NULL) { - arena_chunk_t *spare = arena->spare; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - bool committed; - - arena->spare = chunk; - if (arena_mapbits_dirty_get(spare, map_bias) != 0) { - arena_run_dirty_remove(arena, spare, map_bias, - chunk_npages-map_bias); - } - - chunk_deregister(spare, &spare->node); - - committed = (arena_mapbits_decommitted_get(spare, map_bias) == - 0); - if (!committed) { - /* - * Decommit the header. Mark the chunk as decommitted - * even if header decommit fails, since treating a - * partially committed chunk as committed has a high - * potential for causing later access of decommitted - * memory. - */ - chunk_hooks = chunk_hooks_get(arena); - chunk_hooks.decommit(spare, chunksize, 0, map_bias << - LG_PAGE, arena->ind); - } - - chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare, - chunksize, committed); - - if (config_stats) { - arena->stats.mapped -= chunksize; - arena->stats.metadata_mapped -= (map_bias << LG_PAGE); - } - } else - arena->spare = chunk; -} - -static void -arena_huge_malloc_stats_update(arena_t *arena, size_t usize) -{ - szind_t index = size2index(usize) - nlclasses - NBINS; - - cassert(config_stats); - - arena->stats.nmalloc_huge++; - arena->stats.allocated_huge += usize; - arena->stats.hstats[index].nmalloc++; - arena->stats.hstats[index].curhchunks++; -} - -static void -arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) -{ - szind_t index = size2index(usize) - nlclasses - NBINS; - - cassert(config_stats); - - arena->stats.nmalloc_huge--; - arena->stats.allocated_huge -= usize; - arena->stats.hstats[index].nmalloc--; - arena->stats.hstats[index].curhchunks--; -} - -static void -arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) -{ - szind_t index = size2index(usize) - nlclasses - NBINS; - - cassert(config_stats); - - arena->stats.ndalloc_huge++; - arena->stats.allocated_huge -= usize; - arena->stats.hstats[index].ndalloc++; - arena->stats.hstats[index].curhchunks--; -} - -static void -arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) -{ - szind_t index = size2index(usize) - nlclasses - NBINS; - - cassert(config_stats); - - arena->stats.ndalloc_huge--; - arena->stats.allocated_huge += usize; - arena->stats.hstats[index].ndalloc--; - arena->stats.hstats[index].curhchunks++; -} - -static void -arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) -{ - - arena_huge_dalloc_stats_update(arena, oldsize); - arena_huge_malloc_stats_update(arena, usize); -} - -static void -arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, - size_t usize) -{ - - arena_huge_dalloc_stats_update_undo(arena, oldsize); - arena_huge_malloc_stats_update_undo(arena, usize); -} - -extent_node_t * -arena_node_alloc(arena_t *arena) -{ - extent_node_t *node; - - malloc_mutex_lock(&arena->node_cache_mtx); - node = ql_last(&arena->node_cache, ql_link); - if (node == NULL) { - malloc_mutex_unlock(&arena->node_cache_mtx); - return (base_alloc(sizeof(extent_node_t))); - } - ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); - malloc_mutex_unlock(&arena->node_cache_mtx); - return (node); -} - -void -arena_node_dalloc(arena_t *arena, extent_node_t *node) -{ - - malloc_mutex_lock(&arena->node_cache_mtx); - ql_elm_new(node, ql_link); - ql_tail_insert(&arena->node_cache, node, ql_link); - malloc_mutex_unlock(&arena->node_cache_mtx); -} - -static void * -arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, - size_t usize, size_t alignment, bool *zero, size_t csize) -{ - void *ret; - bool commit = true; - - ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment, - zero, &commit); - if (ret == NULL) { - /* Revert optimistic stats updates. */ - malloc_mutex_lock(&arena->lock); - if (config_stats) { - arena_huge_malloc_stats_update_undo(arena, usize); - arena->stats.mapped -= usize; - } - arena->nactive -= (usize >> LG_PAGE); - malloc_mutex_unlock(&arena->lock); - } - - return (ret); -} - -void * -arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, - bool *zero) -{ - void *ret; - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - size_t csize = CHUNK_CEILING(usize); - - malloc_mutex_lock(&arena->lock); - - /* Optimistically update stats. */ - if (config_stats) { - arena_huge_malloc_stats_update(arena, usize); - arena->stats.mapped += usize; - } - arena->nactive += (usize >> LG_PAGE); - - ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment, - zero, true); - malloc_mutex_unlock(&arena->lock); - if (ret == NULL) { - ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize, - alignment, zero, csize); - } - - if (config_stats && ret != NULL) - stats_cactive_add(usize); - return (ret); -} - -void -arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) -{ - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - size_t csize; - - csize = CHUNK_CEILING(usize); - malloc_mutex_lock(&arena->lock); - if (config_stats) { - arena_huge_dalloc_stats_update(arena, usize); - arena->stats.mapped -= usize; - stats_cactive_sub(usize); - } - arena->nactive -= (usize >> LG_PAGE); - - chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true); - malloc_mutex_unlock(&arena->lock); -} - -void -arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, - size_t usize) -{ - - assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); - assert(oldsize != usize); - - malloc_mutex_lock(&arena->lock); - if (config_stats) - arena_huge_ralloc_stats_update(arena, oldsize, usize); - if (oldsize < usize) { - size_t udiff = usize - oldsize; - arena->nactive += udiff >> LG_PAGE; - if (config_stats) - stats_cactive_add(udiff); - } else { - size_t udiff = oldsize - usize; - arena->nactive -= udiff >> LG_PAGE; - if (config_stats) - stats_cactive_sub(udiff); - } - malloc_mutex_unlock(&arena->lock); -} - -void -arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, - size_t usize) -{ - size_t udiff = oldsize - usize; - size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); - - malloc_mutex_lock(&arena->lock); - if (config_stats) { - arena_huge_ralloc_stats_update(arena, oldsize, usize); - if (cdiff != 0) { - arena->stats.mapped -= cdiff; - stats_cactive_sub(udiff); - } - } - arena->nactive -= udiff >> LG_PAGE; - - if (cdiff != 0) { - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - void *nchunk = (void *)((uintptr_t)chunk + - CHUNK_CEILING(usize)); - - chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true); - } - malloc_mutex_unlock(&arena->lock); -} - -static bool -arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, - void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk, - size_t udiff, size_t cdiff) -{ - bool err; - bool commit = true; - - err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize, - zero, &commit) == NULL); - if (err) { - /* Revert optimistic stats updates. */ - malloc_mutex_lock(&arena->lock); - if (config_stats) { - arena_huge_ralloc_stats_update_undo(arena, oldsize, - usize); - arena->stats.mapped -= cdiff; - } - arena->nactive -= (udiff >> LG_PAGE); - malloc_mutex_unlock(&arena->lock); - } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, - cdiff, true, arena->ind)) { - chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero, - true); - err = true; - } - return (err); -} - -bool -arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, - size_t usize, bool *zero) -{ - bool err; - chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); - void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); - size_t udiff = usize - oldsize; - size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); - - malloc_mutex_lock(&arena->lock); - - /* Optimistically update stats. */ - if (config_stats) { - arena_huge_ralloc_stats_update(arena, oldsize, usize); - arena->stats.mapped += cdiff; - } - arena->nactive += (udiff >> LG_PAGE); - - err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff, - chunksize, zero, true) == NULL); - malloc_mutex_unlock(&arena->lock); - if (err) { - err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks, - chunk, oldsize, usize, zero, nchunk, udiff, - cdiff); - } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, - cdiff, true, arena->ind)) { - chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero, - true); - err = true; - } - - if (config_stats && !err) - stats_cactive_add(udiff); - return (err); -} - -/* - * Do first-best-fit run selection, i.e. select the lowest run that best fits. - * Run sizes are quantized, so not all candidate runs are necessarily exactly - * the same size. - */ -static arena_run_t * -arena_run_first_best_fit(arena_t *arena, size_t size) -{ - size_t search_size = run_quantize_first(size); - arena_chunk_map_misc_t *key = arena_miscelm_key_create(search_size); - arena_chunk_map_misc_t *miscelm = - arena_avail_tree_nsearch(&arena->runs_avail, key); - if (miscelm == NULL) - return (NULL); - return (&miscelm->run); -} - -static arena_run_t * -arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) -{ - arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); - if (run != NULL) { - if (arena_run_split_large(arena, run, size, zero)) - run = NULL; - } - return (run); -} - -static arena_run_t * -arena_run_alloc_large(arena_t *arena, size_t size, bool zero) -{ - arena_chunk_t *chunk; - arena_run_t *run; - - assert(size <= arena_maxrun); - assert(size == PAGE_CEILING(size)); - - /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_large_helper(arena, size, zero); - if (run != NULL) - return (run); - - /* - * No usable runs. Create a new chunk from which to allocate the run. - */ - chunk = arena_chunk_alloc(arena); - if (chunk != NULL) { - run = &arena_miscelm_get(chunk, map_bias)->run; - if (arena_run_split_large(arena, run, size, zero)) - run = NULL; - return (run); - } - - /* - * arena_chunk_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped arena->lock in - * arena_chunk_alloc(), so search one more time. - */ - return (arena_run_alloc_large_helper(arena, size, zero)); -} - -static arena_run_t * -arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) -{ - arena_run_t *run = arena_run_first_best_fit(arena, size); - if (run != NULL) { - if (arena_run_split_small(arena, run, size, binind)) - run = NULL; - } - return (run); -} - -static arena_run_t * -arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind) -{ - arena_chunk_t *chunk; - arena_run_t *run; - - assert(size <= arena_maxrun); - assert(size == PAGE_CEILING(size)); - assert(binind != BININD_INVALID); - - /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_small_helper(arena, size, binind); - if (run != NULL) - return (run); - - /* - * No usable runs. Create a new chunk from which to allocate the run. - */ - chunk = arena_chunk_alloc(arena); - if (chunk != NULL) { - run = &arena_miscelm_get(chunk, map_bias)->run; - if (arena_run_split_small(arena, run, size, binind)) - run = NULL; - return (run); - } - - /* - * arena_chunk_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped arena->lock in - * arena_chunk_alloc(), so search one more time. - */ - return (arena_run_alloc_small_helper(arena, size, binind)); -} - -static bool -arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) -{ - - return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) - << 3)); -} - -ssize_t -arena_lg_dirty_mult_get(arena_t *arena) -{ - ssize_t lg_dirty_mult; - - malloc_mutex_lock(&arena->lock); - lg_dirty_mult = arena->lg_dirty_mult; - malloc_mutex_unlock(&arena->lock); - - return (lg_dirty_mult); -} - -bool -arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult) -{ - - if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) - return (true); - - malloc_mutex_lock(&arena->lock); - arena->lg_dirty_mult = lg_dirty_mult; - arena_maybe_purge(arena); - malloc_mutex_unlock(&arena->lock); - - return (false); -} - -void -arena_maybe_purge(arena_t *arena) -{ - - /* Don't purge if the option is disabled. */ - if (arena->lg_dirty_mult < 0) - return; - /* Don't recursively purge. */ - if (arena->purging) - return; - /* - * Iterate, since preventing recursive purging could otherwise leave too - * many dirty pages. - */ - while (true) { - size_t threshold = (arena->nactive >> arena->lg_dirty_mult); - if (threshold < chunk_npages) - threshold = chunk_npages; - /* - * Don't purge unless the number of purgeable pages exceeds the - * threshold. - */ - if (arena->ndirty <= threshold) - return; - arena_purge(arena, false); - } -} - -static size_t -arena_dirty_count(arena_t *arena) -{ - size_t ndirty = 0; - arena_runs_dirty_link_t *rdelm; - extent_node_t *chunkselm; - - for (rdelm = qr_next(&arena->runs_dirty, rd_link), - chunkselm = qr_next(&arena->chunks_cache, cc_link); - rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { - size_t npages; - - if (rdelm == &chunkselm->rd) { - npages = extent_node_size_get(chunkselm) >> LG_PAGE; - chunkselm = qr_next(chunkselm, cc_link); - } else { - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - rdelm); - arena_chunk_map_misc_t *miscelm = - arena_rd_to_miscelm(rdelm); - size_t pageind = arena_miscelm_to_pageind(miscelm); - assert(arena_mapbits_allocated_get(chunk, pageind) == - 0); - assert(arena_mapbits_large_get(chunk, pageind) == 0); - assert(arena_mapbits_dirty_get(chunk, pageind) != 0); - npages = arena_mapbits_unallocated_size_get(chunk, - pageind) >> LG_PAGE; - } - ndirty += npages; - } - - return (ndirty); -} - -static size_t -arena_compute_npurge(arena_t *arena, bool all) -{ - size_t npurge; - - /* - * Compute the minimum number of pages that this thread should try to - * purge. - */ - if (!all) { - size_t threshold = (arena->nactive >> arena->lg_dirty_mult); - threshold = threshold < chunk_npages ? chunk_npages : threshold; - - npurge = arena->ndirty - threshold; - } else - npurge = arena->ndirty; - - return (npurge); -} - -static size_t -arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all, - size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel, - extent_node_t *purge_chunks_sentinel) -{ - arena_runs_dirty_link_t *rdelm, *rdelm_next; - extent_node_t *chunkselm; - size_t nstashed = 0; - - /* Stash at least npurge pages. */ - for (rdelm = qr_next(&arena->runs_dirty, rd_link), - chunkselm = qr_next(&arena->chunks_cache, cc_link); - rdelm != &arena->runs_dirty; rdelm = rdelm_next) { - size_t npages; - rdelm_next = qr_next(rdelm, rd_link); - - if (rdelm == &chunkselm->rd) { - extent_node_t *chunkselm_next; - bool zero; - UNUSED void *chunk; - - chunkselm_next = qr_next(chunkselm, cc_link); - /* - * Allocate. chunkselm remains valid due to the - * dalloc_node=false argument to chunk_alloc_cache(). - */ - zero = false; - chunk = chunk_alloc_cache(arena, chunk_hooks, - extent_node_addr_get(chunkselm), - extent_node_size_get(chunkselm), chunksize, &zero, - false); - assert(chunk == extent_node_addr_get(chunkselm)); - assert(zero == extent_node_zeroed_get(chunkselm)); - extent_node_dirty_insert(chunkselm, purge_runs_sentinel, - purge_chunks_sentinel); - npages = extent_node_size_get(chunkselm) >> LG_PAGE; - chunkselm = chunkselm_next; - } else { - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); - arena_chunk_map_misc_t *miscelm = - arena_rd_to_miscelm(rdelm); - size_t pageind = arena_miscelm_to_pageind(miscelm); - arena_run_t *run = &miscelm->run; - size_t run_size = - arena_mapbits_unallocated_size_get(chunk, pageind); - - npages = run_size >> LG_PAGE; - - assert(pageind + npages <= chunk_npages); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, pageind+npages-1)); - - /* - * If purging the spare chunk's run, make it available - * prior to allocation. - */ - if (chunk == arena->spare) - arena_chunk_alloc(arena); - - /* Temporarily allocate the free dirty run. */ - arena_run_split_large(arena, run, run_size, false); - /* Stash. */ - if (false) - qr_new(rdelm, rd_link); /* Redundant. */ - else { - assert(qr_next(rdelm, rd_link) == rdelm); - assert(qr_prev(rdelm, rd_link) == rdelm); - } - qr_meld(purge_runs_sentinel, rdelm, rd_link); - } - - nstashed += npages; - if (!all && nstashed >= npurge) - break; - } - - return (nstashed); -} - -static size_t -arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, - arena_runs_dirty_link_t *purge_runs_sentinel, - extent_node_t *purge_chunks_sentinel) -{ - size_t npurged, nmadvise; - arena_runs_dirty_link_t *rdelm; - extent_node_t *chunkselm; - - if (config_stats) - nmadvise = 0; - npurged = 0; - - malloc_mutex_unlock(&arena->lock); - for (rdelm = qr_next(purge_runs_sentinel, rd_link), - chunkselm = qr_next(purge_chunks_sentinel, cc_link); - rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { - size_t npages; - - if (rdelm == &chunkselm->rd) { - /* - * Don't actually purge the chunk here because 1) - * chunkselm is embedded in the chunk and must remain - * valid, and 2) we deallocate the chunk in - * arena_unstash_purged(), where it is destroyed, - * decommitted, or purged, depending on chunk - * deallocation policy. - */ - size_t size = extent_node_size_get(chunkselm); - npages = size >> LG_PAGE; - chunkselm = qr_next(chunkselm, cc_link); - } else { - size_t pageind, run_size, flag_unzeroed, flags, i; - bool decommitted; - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); - arena_chunk_map_misc_t *miscelm = - arena_rd_to_miscelm(rdelm); - pageind = arena_miscelm_to_pageind(miscelm); - run_size = arena_mapbits_large_size_get(chunk, pageind); - npages = run_size >> LG_PAGE; - - assert(pageind + npages <= chunk_npages); - assert(!arena_mapbits_decommitted_get(chunk, pageind)); - assert(!arena_mapbits_decommitted_get(chunk, - pageind+npages-1)); - decommitted = !chunk_hooks->decommit(chunk, chunksize, - pageind << LG_PAGE, npages << LG_PAGE, arena->ind); - if (decommitted) { - flag_unzeroed = 0; - flags = CHUNK_MAP_DECOMMITTED; - } else { - flag_unzeroed = chunk_purge_wrapper(arena, - chunk_hooks, chunk, chunksize, pageind << - LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; - flags = flag_unzeroed; - } - arena_mapbits_large_set(chunk, pageind+npages-1, 0, - flags); - arena_mapbits_large_set(chunk, pageind, run_size, - flags); - - /* - * Set the unzeroed flag for internal pages, now that - * chunk_purge_wrapper() has returned whether the pages - * were zeroed as a side effect of purging. This chunk - * map modification is safe even though the arena mutex - * isn't currently owned by this thread, because the run - * is marked as allocated, thus protecting it from being - * modified by any other thread. As long as these - * writes don't perturb the first and last elements' - * CHUNK_MAP_ALLOCATED bits, behavior is well defined. - */ - for (i = 1; i < npages-1; i++) { - arena_mapbits_internal_set(chunk, pageind+i, - flag_unzeroed); - } - } - - npurged += npages; - if (config_stats) - nmadvise++; - } - malloc_mutex_lock(&arena->lock); - - if (config_stats) { - arena->stats.nmadvise += nmadvise; - arena->stats.purged += npurged; - } - - return (npurged); -} - -static void -arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, - arena_runs_dirty_link_t *purge_runs_sentinel, - extent_node_t *purge_chunks_sentinel) -{ - arena_runs_dirty_link_t *rdelm, *rdelm_next; - extent_node_t *chunkselm; - - /* Deallocate chunks/runs. */ - for (rdelm = qr_next(purge_runs_sentinel, rd_link), - chunkselm = qr_next(purge_chunks_sentinel, cc_link); - rdelm != purge_runs_sentinel; rdelm = rdelm_next) { - rdelm_next = qr_next(rdelm, rd_link); - if (rdelm == &chunkselm->rd) { - extent_node_t *chunkselm_next = qr_next(chunkselm, - cc_link); - void *addr = extent_node_addr_get(chunkselm); - size_t size = extent_node_size_get(chunkselm); - bool zeroed = extent_node_zeroed_get(chunkselm); - bool committed = extent_node_committed_get(chunkselm); - extent_node_dirty_remove(chunkselm); - arena_node_dalloc(arena, chunkselm); - chunkselm = chunkselm_next; - chunk_dalloc_arena(arena, chunk_hooks, addr, size, - zeroed, committed); - } else { - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); - arena_chunk_map_misc_t *miscelm = - arena_rd_to_miscelm(rdelm); - size_t pageind = arena_miscelm_to_pageind(miscelm); - bool decommitted = (arena_mapbits_decommitted_get(chunk, - pageind) != 0); - arena_run_t *run = &miscelm->run; - qr_remove(rdelm, rd_link); - arena_run_dalloc(arena, run, false, true, decommitted); - } - } -} - -static void -arena_purge(arena_t *arena, bool all) -{ - chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); - size_t npurge, npurgeable, npurged; - arena_runs_dirty_link_t purge_runs_sentinel; - extent_node_t purge_chunks_sentinel; - - arena->purging = true; - - /* - * Calls to arena_dirty_count() are disabled even for debug builds - * because overhead grows nonlinearly as memory usage increases. - */ - if (false && config_debug) { - size_t ndirty = arena_dirty_count(arena); - assert(ndirty == arena->ndirty); - } - assert((arena->nactive >> arena->lg_dirty_mult) < arena->ndirty || all); - - if (config_stats) - arena->stats.npurge++; - - npurge = arena_compute_npurge(arena, all); - qr_new(&purge_runs_sentinel, rd_link); - extent_node_dirty_linkage_init(&purge_chunks_sentinel); - - npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge, - &purge_runs_sentinel, &purge_chunks_sentinel); - assert(npurgeable >= npurge); - npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel, - &purge_chunks_sentinel); - assert(npurged == npurgeable); - arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel, - &purge_chunks_sentinel); - - arena->purging = false; -} - -void -arena_purge_all(arena_t *arena) -{ - - malloc_mutex_lock(&arena->lock); - arena_purge(arena, true); - malloc_mutex_unlock(&arena->lock); -} - -static void -arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, - size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, - size_t flag_decommitted) -{ - size_t size = *p_size; - size_t run_ind = *p_run_ind; - size_t run_pages = *p_run_pages; - - /* Try to coalesce forward. */ - if (run_ind + run_pages < chunk_npages && - arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && - arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty && - arena_mapbits_decommitted_get(chunk, run_ind+run_pages) == - flag_decommitted) { - size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages); - size_t nrun_pages = nrun_size >> LG_PAGE; - - /* - * Remove successor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages+nrun_pages-1) == nrun_size); - assert(arena_mapbits_dirty_get(chunk, - run_ind+run_pages+nrun_pages-1) == flag_dirty); - assert(arena_mapbits_decommitted_get(chunk, - run_ind+run_pages+nrun_pages-1) == flag_decommitted); - arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); - - /* - * If the successor is dirty, remove it from the set of dirty - * pages. - */ - if (flag_dirty != 0) { - arena_run_dirty_remove(arena, chunk, run_ind+run_pages, - nrun_pages); - } - - size += nrun_size; - run_pages += nrun_pages; - - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); - } - - /* Try to coalesce backward. */ - if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, - run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == - flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) == - flag_decommitted) { - size_t prun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind-1); - size_t prun_pages = prun_size >> LG_PAGE; - - run_ind -= prun_pages; - - /* - * Remove predecessor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - prun_size); - assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); - assert(arena_mapbits_decommitted_get(chunk, run_ind) == - flag_decommitted); - arena_avail_remove(arena, chunk, run_ind, prun_pages); - - /* - * If the predecessor is dirty, remove it from the set of dirty - * pages. - */ - if (flag_dirty != 0) { - arena_run_dirty_remove(arena, chunk, run_ind, - prun_pages); - } - - size += prun_size; - run_pages += prun_pages; - - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); - } - - *p_size = size; - *p_run_ind = run_ind; - *p_run_pages = run_pages; -} - -static size_t -arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t run_ind) -{ - size_t size; - - assert(run_ind >= map_bias); - assert(run_ind < chunk_npages); - - if (arena_mapbits_large_get(chunk, run_ind) != 0) { - size = arena_mapbits_large_size_get(chunk, run_ind); - assert(size == PAGE || arena_mapbits_large_size_get(chunk, - run_ind+(size>>LG_PAGE)-1) == 0); - } else { - arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; - size = bin_info->run_size; - } - - return (size); -} - -static bool -arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - size_t run_ind = arena_miscelm_to_pageind(miscelm); - size_t offset = run_ind << LG_PAGE; - size_t length = arena_run_size_get(arena, chunk, run, run_ind); - - return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length, - arena->ind)); -} - -static void -arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, - bool decommitted) -{ - arena_chunk_t *chunk; - arena_chunk_map_misc_t *miscelm; - size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - miscelm = arena_run_to_miscelm(run); - run_ind = arena_miscelm_to_pageind(miscelm); - assert(run_ind >= map_bias); - assert(run_ind < chunk_npages); - size = arena_run_size_get(arena, chunk, run, run_ind); - run_pages = (size >> LG_PAGE); - arena_cactive_update(arena, 0, run_pages); - arena->nactive -= run_pages; - - /* - * The run is dirty if the caller claims to have dirtied it, as well as - * if it was already dirty before being allocated and the caller - * doesn't claim to have cleaned it. - */ - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind) - != 0) - dirty = true; - flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; - flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0; - - /* Mark pages as unallocated in the chunk map. */ - if (dirty || decommitted) { - size_t flags = flag_dirty | flag_decommitted; - arena_mapbits_unallocated_set(chunk, run_ind, size, flags); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - flags); - } else { - arena_mapbits_unallocated_set(chunk, run_ind, size, - arena_mapbits_unzeroed_get(chunk, run_ind)); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); - } - - arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, - flag_dirty, flag_decommitted); - - /* Insert into runs_avail, now that coalescing is complete. */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - assert(arena_mapbits_decommitted_get(chunk, run_ind) == - arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1)); - arena_avail_insert(arena, chunk, run_ind, run_pages); - - if (dirty) - arena_run_dirty_insert(arena, chunk, run_ind, run_pages); - - /* Deallocate chunk if it is now completely unused. */ - if (size == arena_maxrun) { - assert(run_ind == map_bias); - assert(run_pages == (arena_maxrun >> LG_PAGE)); - arena_chunk_dalloc(arena, chunk); - } - - /* - * It is okay to do dirty page processing here even if the chunk was - * deallocated above, since in that case it is the spare. Waiting - * until after possible chunk deallocation to do dirty processing - * allows for an old spare to be fully deallocated, thus decreasing the - * chances of spuriously crossing the dirty page purging threshold. - */ - if (dirty) - arena_maybe_purge(arena); -} - -static void -arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run) -{ - bool committed = arena_run_decommit(arena, chunk, run); - - arena_run_dalloc(arena, run, committed, false, !committed); -} - -static void -arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - size_t pageind = arena_miscelm_to_pageind(miscelm); - size_t head_npages = (oldsize - newsize) >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); - size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); - size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? - CHUNK_MAP_UNZEROED : 0; - - assert(oldsize > newsize); - - /* - * Update the chunk map so that arena_run_dalloc() can treat the - * leading run as separately allocated. Set the last element of each - * run first, in case of single-page runs. - */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+head_npages-1))); - arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); - - if (config_debug) { - UNUSED size_t tail_npages = newsize >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); - } - arena_mapbits_large_set(chunk, pageind+head_npages, newsize, - flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+head_npages))); - - arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0)); -} - -static void -arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize, bool dirty) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - size_t pageind = arena_miscelm_to_pageind(miscelm); - size_t head_npages = newsize >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); - size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); - size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? - CHUNK_MAP_UNZEROED : 0; - arena_chunk_map_misc_t *tail_miscelm; - arena_run_t *tail_run; - - assert(oldsize > newsize); - - /* - * Update the chunk map so that arena_run_dalloc() can treat the - * trailing run as separately allocated. Set the last element of each - * run first, in case of single-page runs. - */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+head_npages-1))); - arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); - - if (config_debug) { - UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); - } - arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, - flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+head_npages))); - - tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages); - tail_run = &tail_miscelm->run; - arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted != - 0)); -} - -static arena_run_t * -arena_bin_runs_first(arena_bin_t *bin) -{ - arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs); - if (miscelm != NULL) - return (&miscelm->run); - - return (NULL); -} - -static void -arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - - assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); - - arena_run_tree_insert(&bin->runs, miscelm); -} - -static void -arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - - assert(arena_run_tree_search(&bin->runs, miscelm) != NULL); - - arena_run_tree_remove(&bin->runs, miscelm); -} - -static arena_run_t * -arena_bin_nonfull_run_tryget(arena_bin_t *bin) -{ - arena_run_t *run = arena_bin_runs_first(bin); - if (run != NULL) { - arena_bin_runs_remove(bin, run); - if (config_stats) - bin->stats.reruns++; - } - return (run); -} - -static arena_run_t * -arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) -{ - arena_run_t *run; - szind_t binind; - arena_bin_info_t *bin_info; - - /* Look for a usable run. */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); - /* No existing runs have any space available. */ - - binind = arena_bin_index(arena, bin); - bin_info = &arena_bin_info[binind]; - - /* Allocate a new run. */ - malloc_mutex_unlock(&bin->lock); - /******************************/ - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_small(arena, bin_info->run_size, binind); - if (run != NULL) { - /* Initialize run internals. */ - run->binind = binind; - run->nfree = bin_info->nregs; - bitmap_init(run->bitmap, &bin_info->bitmap_info); - } - malloc_mutex_unlock(&arena->lock); - /********************************/ - malloc_mutex_lock(&bin->lock); - if (run != NULL) { - if (config_stats) { - bin->stats.nruns++; - bin->stats.curruns++; - } - return (run); - } - - /* - * arena_run_alloc_small() failed, but another thread may have made - * sufficient memory available while this one dropped bin->lock above, - * so search one more time. - */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); - - return (NULL); -} - -/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ -static void * -arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) -{ - szind_t binind; - arena_bin_info_t *bin_info; - arena_run_t *run; - - binind = arena_bin_index(arena, bin); - bin_info = &arena_bin_info[binind]; - bin->runcur = NULL; - run = arena_bin_nonfull_run_get(arena, bin); - if (bin->runcur != NULL && bin->runcur->nfree > 0) { - /* - * Another thread updated runcur while this one ran without the - * bin lock in arena_bin_nonfull_run_get(). - */ - void *ret; - assert(bin->runcur->nfree > 0); - ret = arena_run_reg_alloc(bin->runcur, bin_info); - if (run != NULL) { - arena_chunk_t *chunk; - - /* - * arena_run_alloc_small() may have allocated run, or - * it may have pulled run from the bin's run tree. - * Therefore it is unsafe to make any assumptions about - * how run has previously been used, and - * arena_bin_lower_run() must be called, as if a region - * were just deallocated from the run. - */ - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - if (run->nfree == bin_info->nregs) - arena_dalloc_bin_run(arena, chunk, run, bin); - else - arena_bin_lower_run(arena, chunk, run, bin); - } - return (ret); - } - - if (run == NULL) - return (NULL); - - bin->runcur = run; - - assert(bin->runcur->nfree > 0); - - return (arena_run_reg_alloc(bin->runcur, bin_info)); -} - -void -arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, - uint64_t prof_accumbytes) -{ - unsigned i, nfill; - arena_bin_t *bin; - - assert(tbin->ncached == 0); - - if (config_prof && arena_prof_accum(arena, prof_accumbytes)) - prof_idump(); - bin = &arena->bins[binind]; - malloc_mutex_lock(&bin->lock); - for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> - tbin->lg_fill_div); i < nfill; i++) { - arena_run_t *run; - void *ptr; - if ((run = bin->runcur) != NULL && run->nfree > 0) - ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ptr = arena_bin_malloc_hard(arena, bin); - if (ptr == NULL) { - /* - * OOM. tbin->avail isn't yet filled down to its first - * element, so the successful allocations (if any) must - * be moved to the base of tbin->avail before bailing - * out. - */ - if (i > 0) { - memmove(tbin->avail, &tbin->avail[nfill - i], - i * sizeof(void *)); - } - break; - } - if (config_fill && unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ptr, &arena_bin_info[binind], - true); - } - /* Insert such that low regions get used first. */ - tbin->avail[nfill - 1 - i] = ptr; - } - if (config_stats) { - bin->stats.nmalloc += i; - bin->stats.nrequests += tbin->tstats.nrequests; - bin->stats.curregs += i; - bin->stats.nfills++; - tbin->tstats.nrequests = 0; - } - malloc_mutex_unlock(&bin->lock); - tbin->ncached = i; -} - -void -arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) -{ - - if (zero) { - size_t redzone_size = bin_info->redzone_size; - memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, - redzone_size); - memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, - redzone_size); - } else { - memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, - bin_info->reg_interval); - } -} - -#ifdef JEMALLOC_JET -#undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) -#endif -static void -arena_redzone_corruption(void *ptr, size_t usize, bool after, - size_t offset, uint8_t byte) -{ - - malloc_printf(": Corrupt redzone %zu byte%s %s %p " - "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", - after ? "after" : "before", ptr, usize, byte); -} -#ifdef JEMALLOC_JET -#undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) -arena_redzone_corruption_t *arena_redzone_corruption = - JEMALLOC_N(arena_redzone_corruption_impl); -#endif - -static void -arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) -{ - bool error = false; - - if (opt_junk_alloc) { - size_t size = bin_info->reg_size; - size_t redzone_size = bin_info->redzone_size; - size_t i; - - for (i = 1; i <= redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); - if (*byte != 0xa5) { - error = true; - arena_redzone_corruption(ptr, size, false, i, - *byte); - if (reset) - *byte = 0xa5; - } - } - for (i = 0; i < redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); - if (*byte != 0xa5) { - error = true; - arena_redzone_corruption(ptr, size, true, i, - *byte); - if (reset) - *byte = 0xa5; - } - } - } - - if (opt_abort && error) - abort(); -} - -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) -#endif -void -arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) -{ - size_t redzone_size = bin_info->redzone_size; - - arena_redzones_validate(ptr, bin_info, false); - memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, - bin_info->reg_interval); -} -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) -arena_dalloc_junk_small_t *arena_dalloc_junk_small = - JEMALLOC_N(arena_dalloc_junk_small_impl); -#endif - -void -arena_quarantine_junk_small(void *ptr, size_t usize) -{ - szind_t binind; - arena_bin_info_t *bin_info; - cassert(config_fill); - assert(opt_junk_free); - assert(opt_quarantine); - assert(usize <= SMALL_MAXCLASS); - - binind = size2index(usize); - bin_info = &arena_bin_info[binind]; - arena_redzones_validate(ptr, bin_info, true); -} - -void * -arena_malloc_small(arena_t *arena, size_t size, bool zero) -{ - void *ret; - arena_bin_t *bin; - arena_run_t *run; - szind_t binind; - - binind = size2index(size); - assert(binind < NBINS); - bin = &arena->bins[binind]; - size = index2size(binind); - - malloc_mutex_lock(&bin->lock); - if ((run = bin->runcur) != NULL && run->nfree > 0) - ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ret = arena_bin_malloc_hard(arena, bin); - - if (ret == NULL) { - malloc_mutex_unlock(&bin->lock); - return (NULL); - } - - if (config_stats) { - bin->stats.nmalloc++; - bin->stats.nrequests++; - bin->stats.curregs++; - } - malloc_mutex_unlock(&bin->lock); - if (config_prof && !isthreaded && arena_prof_accum(arena, size)) - prof_idump(); - - if (!zero) { - if (config_fill) { - if (unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, - &arena_bin_info[binind], false); - } else if (unlikely(opt_zero)) - memset(ret, 0, size); - } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - } else { - if (config_fill && unlikely(opt_junk_alloc)) { - arena_alloc_junk_small(ret, &arena_bin_info[binind], - true); - } - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); - } - - return (ret); -} - -void * -arena_malloc_large(arena_t *arena, size_t size, bool zero) -{ - void *ret; - size_t usize; - uintptr_t random_offset; - arena_run_t *run; - arena_chunk_map_misc_t *miscelm; - UNUSED bool idump; - - /* Large allocation. */ - usize = s2u(size); - malloc_mutex_lock(&arena->lock); - if (config_cache_oblivious) { - uint64_t r; - - /* - * Compute a uniformly distributed offset within the first page - * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 - * for 4 KiB pages and 64-byte cachelines. - */ - prng64(r, LG_PAGE - LG_CACHELINE, arena->offset_state, - UINT64_C(6364136223846793009), - UINT64_C(1442695040888963409)); - random_offset = ((uintptr_t)r) << LG_CACHELINE; - } else - random_offset = 0; - run = arena_run_alloc_large(arena, usize + large_pad, zero); - if (run == NULL) { - malloc_mutex_unlock(&arena->lock); - return (NULL); - } - miscelm = arena_run_to_miscelm(run); - ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + - random_offset); - if (config_stats) { - szind_t index = size2index(usize) - NBINS; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += usize; - arena->stats.lstats[index].nmalloc++; - arena->stats.lstats[index].nrequests++; - arena->stats.lstats[index].curruns++; - } - if (config_prof) - idump = arena_prof_accum_locked(arena, usize); - malloc_mutex_unlock(&arena->lock); - if (config_prof && idump) - prof_idump(); - - if (!zero) { - if (config_fill) { - if (unlikely(opt_junk_alloc)) - memset(ret, 0xa5, usize); - else if (unlikely(opt_zero)) - memset(ret, 0, usize); - } - } - - return (ret); -} - -/* Only handles large allocations that require more than page alignment. */ -static void * -arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, - bool zero) -{ - void *ret; - size_t alloc_size, leadsize, trailsize; - arena_run_t *run; - arena_chunk_t *chunk; - arena_chunk_map_misc_t *miscelm; - void *rpages; - - assert(usize == PAGE_CEILING(usize)); - - arena = arena_choose(tsd, arena); - if (unlikely(arena == NULL)) - return (NULL); - - alignment = PAGE_CEILING(alignment); - alloc_size = usize + large_pad + alignment - PAGE; - - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_large(arena, alloc_size, false); - if (run == NULL) { - malloc_mutex_unlock(&arena->lock); - return (NULL); - } - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - miscelm = arena_run_to_miscelm(run); - rpages = arena_miscelm_to_rpages(miscelm); - - leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - - (uintptr_t)rpages; - assert(alloc_size >= leadsize + usize); - trailsize = alloc_size - leadsize - usize - large_pad; - if (leadsize != 0) { - arena_chunk_map_misc_t *head_miscelm = miscelm; - arena_run_t *head_run = run; - - miscelm = arena_miscelm_get(chunk, - arena_miscelm_to_pageind(head_miscelm) + (leadsize >> - LG_PAGE)); - run = &miscelm->run; - - arena_run_trim_head(arena, chunk, head_run, alloc_size, - alloc_size - leadsize); - } - if (trailsize != 0) { - arena_run_trim_tail(arena, chunk, run, usize + large_pad + - trailsize, usize + large_pad, false); - } - if (arena_run_init_large(arena, run, usize + large_pad, zero)) { - size_t run_ind = - arena_miscelm_to_pageind(arena_run_to_miscelm(run)); - bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); - bool decommitted = (arena_mapbits_decommitted_get(chunk, - run_ind) != 0); - - assert(decommitted); /* Cause of OOM. */ - arena_run_dalloc(arena, run, dirty, false, decommitted); - malloc_mutex_unlock(&arena->lock); - return (NULL); - } - ret = arena_miscelm_to_rpages(miscelm); - - if (config_stats) { - szind_t index = size2index(usize) - NBINS; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += usize; - arena->stats.lstats[index].nmalloc++; - arena->stats.lstats[index].nrequests++; - arena->stats.lstats[index].curruns++; - } - malloc_mutex_unlock(&arena->lock); - - if (config_fill && !zero) { - if (unlikely(opt_junk_alloc)) - memset(ret, 0xa5, usize); - else if (unlikely(opt_zero)) - memset(ret, 0, usize); - } - return (ret); -} - -void * -arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, - bool zero, tcache_t *tcache) -{ - void *ret; - - if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE - && (usize & PAGE_MASK) == 0))) { - /* Small; alignment doesn't require special run placement. */ - ret = arena_malloc(tsd, arena, usize, zero, tcache); - } else if (usize <= large_maxclass && alignment <= PAGE) { - /* - * Large; alignment doesn't require special run placement. - * However, the cached pointer may be at a random offset from - * the base of the run, so do some bit manipulation to retrieve - * the base. - */ - ret = arena_malloc(tsd, arena, usize, zero, tcache); - if (config_cache_oblivious) - ret = (void *)((uintptr_t)ret & ~PAGE_MASK); - } else { - if (likely(usize <= large_maxclass)) { - ret = arena_palloc_large(tsd, arena, usize, alignment, - zero); - } else if (likely(alignment <= chunksize)) - ret = huge_malloc(tsd, arena, usize, zero, tcache); - else { - ret = huge_palloc(tsd, arena, usize, alignment, zero, - tcache); - } - } - return (ret); -} - -void -arena_prof_promoted(const void *ptr, size_t size) -{ - arena_chunk_t *chunk; - size_t pageind; - szind_t binind; - - cassert(config_prof); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - assert(isalloc(ptr, false) == LARGE_MINCLASS); - assert(isalloc(ptr, true) == LARGE_MINCLASS); - assert(size <= SMALL_MAXCLASS); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - binind = size2index(size); - assert(binind < NBINS); - arena_mapbits_large_binind_set(chunk, pageind, binind); - - assert(isalloc(ptr, false) == LARGE_MINCLASS); - assert(isalloc(ptr, true) == size); -} - -static void -arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - - /* Dissociate run from bin. */ - if (run == bin->runcur) - bin->runcur = NULL; - else { - szind_t binind = arena_bin_index(extent_node_arena_get( - &chunk->node), bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - - if (bin_info->nregs != 1) { - /* - * This block's conditional is necessary because if the - * run only contains one region, then it never gets - * inserted into the non-full runs tree. - */ - arena_bin_runs_remove(bin, run); - } - } -} - -static void -arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - - assert(run != bin->runcur); - assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == - NULL); - - malloc_mutex_unlock(&bin->lock); - /******************************/ - malloc_mutex_lock(&arena->lock); - arena_run_dalloc_decommit(arena, chunk, run); - malloc_mutex_unlock(&arena->lock); - /****************************/ - malloc_mutex_lock(&bin->lock); - if (config_stats) - bin->stats.curruns--; -} - -static void -arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - - /* - * Make sure that if bin->runcur is non-NULL, it refers to the lowest - * non-full run. It is okay to NULL runcur out rather than proactively - * keeping it pointing at the lowest non-full run. - */ - if ((uintptr_t)run < (uintptr_t)bin->runcur) { - /* Switch runcur. */ - if (bin->runcur->nfree > 0) - arena_bin_runs_insert(bin, bin->runcur); - bin->runcur = run; - if (config_stats) - bin->stats.reruns++; - } else - arena_bin_runs_insert(bin, run); -} - -static void -arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_bits_t *bitselm, bool junked) -{ - size_t pageind, rpages_ind; - arena_run_t *run; - arena_bin_t *bin; - arena_bin_info_t *bin_info; - szind_t binind; - - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - run = &arena_miscelm_get(chunk, rpages_ind)->run; - binind = run->binind; - bin = &arena->bins[binind]; - bin_info = &arena_bin_info[binind]; - - if (!junked && config_fill && unlikely(opt_junk_free)) - arena_dalloc_junk_small(ptr, bin_info); - - arena_run_reg_dalloc(run, ptr); - if (run->nfree == bin_info->nregs) { - arena_dissociate_bin_run(chunk, run, bin); - arena_dalloc_bin_run(arena, chunk, run, bin); - } else if (run->nfree == 1 && run != bin->runcur) - arena_bin_lower_run(arena, chunk, run, bin); - - if (config_stats) { - bin->stats.ndalloc++; - bin->stats.curregs--; - } -} - -void -arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_bits_t *bitselm) -{ - - arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); -} - -void -arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_bits_t *bitselm) -{ - arena_run_t *run; - arena_bin_t *bin; - size_t rpages_ind; - - rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - run = &arena_miscelm_get(chunk, rpages_ind)->run; - bin = &arena->bins[run->binind]; - malloc_mutex_lock(&bin->lock); - arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); - malloc_mutex_unlock(&bin->lock); -} - -void -arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind) -{ - arena_chunk_map_bits_t *bitselm; - - if (config_debug) { - /* arena_ptr_small_binind_get() does extra sanity checking. */ - assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, - pageind)) != BININD_INVALID); - } - bitselm = arena_bitselm_get(chunk, pageind); - arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); -} - -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) -#endif -void -arena_dalloc_junk_large(void *ptr, size_t usize) -{ - - if (config_fill && unlikely(opt_junk_free)) - memset(ptr, 0x5a, usize); -} -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) -arena_dalloc_junk_large_t *arena_dalloc_junk_large = - JEMALLOC_N(arena_dalloc_junk_large_impl); -#endif - -static void -arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, - void *ptr, bool junked) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); - arena_run_t *run = &miscelm->run; - - if (config_fill || config_stats) { - size_t usize = arena_mapbits_large_size_get(chunk, pageind) - - large_pad; - - if (!junked) - arena_dalloc_junk_large(ptr, usize); - if (config_stats) { - szind_t index = size2index(usize) - NBINS; - - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= usize; - arena->stats.lstats[index].ndalloc++; - arena->stats.lstats[index].curruns--; - } - } - - arena_run_dalloc_decommit(arena, chunk, run); -} - -void -arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, - void *ptr) -{ - - arena_dalloc_large_locked_impl(arena, chunk, ptr, true); -} - -void -arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) -{ - - malloc_mutex_lock(&arena->lock); - arena_dalloc_large_locked_impl(arena, chunk, ptr, false); - malloc_mutex_unlock(&arena->lock); -} - -static void -arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t size) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); - arena_run_t *run = &miscelm->run; - - assert(size < oldsize); - - /* - * Shrink the run, and make trailing pages available for other - * allocations. - */ - malloc_mutex_lock(&arena->lock); - arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size + - large_pad, true); - if (config_stats) { - szind_t oldindex = size2index(oldsize) - NBINS; - szind_t index = size2index(size) - NBINS; - - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[oldindex].ndalloc++; - arena->stats.lstats[oldindex].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[index].nmalloc++; - arena->stats.lstats[index].nrequests++; - arena->stats.lstats[index].curruns++; - } - malloc_mutex_unlock(&arena->lock); -} - -static bool -arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t usize_min, size_t usize_max, bool zero) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t npages = (oldsize + large_pad) >> LG_PAGE; - size_t followsize; - - assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - - large_pad); - - /* Try to extend the run. */ - malloc_mutex_lock(&arena->lock); - if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, - pageind+npages) != 0) - goto label_fail; - followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); - if (oldsize + followsize >= usize_min) { - /* - * The next run is available and sufficiently large. Split the - * following run, then merge the first part with the existing - * allocation. - */ - arena_run_t *run; - size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; - - usize = usize_max; - while (oldsize + followsize < usize) - usize = index2size(size2index(usize)-1); - assert(usize >= usize_min); - assert(usize >= oldsize); - splitsize = usize - oldsize; - if (splitsize == 0) - goto label_fail; - - run = &arena_miscelm_get(chunk, pageind+npages)->run; - if (arena_run_split_large(arena, run, splitsize, zero)) - goto label_fail; - - if (config_cache_oblivious && zero) { - /* - * Zero the trailing bytes of the original allocation's - * last page, since they are in an indeterminate state. - */ - assert(PAGE_CEILING(oldsize) == oldsize); - memset((void *)((uintptr_t)ptr + oldsize), 0, - PAGE_CEILING((uintptr_t)ptr) - (uintptr_t)ptr); - } - - size = oldsize + splitsize; - npages = (size + large_pad) >> LG_PAGE; - - /* - * Mark the extended run as dirty if either portion of the run - * was dirty before allocation. This is rather pedantic, - * because there's not actually any sequence of events that - * could cause the resulting run to be passed to - * arena_run_dalloc() with the dirty argument set to false - * (which is when dirty flag consistency would really matter). - */ - flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | - arena_mapbits_dirty_get(chunk, pageind+npages-1); - flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0; - arena_mapbits_large_set(chunk, pageind, size + large_pad, - flag_dirty | (flag_unzeroed_mask & - arena_mapbits_unzeroed_get(chunk, pageind))); - arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | - (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, - pageind+npages-1))); - - if (config_stats) { - szind_t oldindex = size2index(oldsize) - NBINS; - szind_t index = size2index(size) - NBINS; - - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[oldindex].ndalloc++; - arena->stats.lstats[oldindex].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[index].nmalloc++; - arena->stats.lstats[index].nrequests++; - arena->stats.lstats[index].curruns++; - } - malloc_mutex_unlock(&arena->lock); - return (false); - } -label_fail: - malloc_mutex_unlock(&arena->lock); - return (true); -} - -#ifdef JEMALLOC_JET -#undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) -#endif -static void -arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) -{ - - if (config_fill && unlikely(opt_junk_free)) { - memset((void *)((uintptr_t)ptr + usize), 0x5a, - old_usize - usize); - } -} -#ifdef JEMALLOC_JET -#undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) -arena_ralloc_junk_large_t *arena_ralloc_junk_large = - JEMALLOC_N(arena_ralloc_junk_large_impl); -#endif - -/* - * Try to resize a large allocation, in order to avoid copying. This will - * always fail if growing an object, and the following run is already in use. - */ -static bool -arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, - size_t usize_max, bool zero) -{ - arena_chunk_t *chunk; - arena_t *arena; - - if (oldsize == usize_max) { - /* Current size class is compatible and maximal. */ - return (false); - } - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = extent_node_arena_get(&chunk->node); - - if (oldsize < usize_max) { - bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, - usize_min, usize_max, zero); - if (config_fill && !ret && !zero) { - if (unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + oldsize), 0xa5, - isalloc(ptr, config_prof) - oldsize); - } else if (unlikely(opt_zero)) { - memset((void *)((uintptr_t)ptr + oldsize), 0, - isalloc(ptr, config_prof) - oldsize); - } - } - return (ret); - } - - assert(oldsize > usize_max); - /* Fill before shrinking in order avoid a race. */ - arena_ralloc_junk_large(ptr, oldsize, usize_max); - arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max); - return (false); -} - -bool -arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) -{ - size_t usize_min, usize_max; - - usize_min = s2u(size); - usize_max = s2u(size + extra); - if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { - /* - * Avoid moving the allocation if the size class can be left the - * same. - */ - if (oldsize <= SMALL_MAXCLASS) { - assert(arena_bin_info[size2index(oldsize)].reg_size == - oldsize); - if ((usize_max <= SMALL_MAXCLASS && - size2index(usize_max) == size2index(oldsize)) || - (size <= oldsize && usize_max >= oldsize)) - return (false); - } else { - if (usize_max > SMALL_MAXCLASS) { - if (!arena_ralloc_large(ptr, oldsize, usize_min, - usize_max, zero)) - return (false); - } - } - - /* Reallocation would require a move. */ - return (true); - } else { - return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max, - zero)); - } -} - -static void * -arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, - size_t alignment, bool zero, tcache_t *tcache) -{ - - if (alignment == 0) - return (arena_malloc(tsd, arena, usize, zero, tcache)); - usize = sa2u(usize, alignment); - if (usize == 0) - return (NULL); - return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); -} - -void * -arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t alignment, bool zero, tcache_t *tcache) -{ - void *ret; - size_t usize; - - usize = s2u(size); - if (usize == 0) - return (NULL); - - if (likely(usize <= large_maxclass)) { - size_t copysize; - - /* Try to avoid moving the allocation. */ - if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero)) - return (ptr); - - /* - * size and oldsize are different enough that we need to move - * the object. In that case, fall back to allocating new space - * and copying. - */ - ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, - zero, tcache); - if (ret == NULL) - return (NULL); - - /* - * Junk/zero-filling were already done by - * ipalloc()/arena_malloc(). - */ - - copysize = (usize < oldsize) ? usize : oldsize; - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); - memcpy(ret, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache); - } else { - ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, - zero, tcache); - } - return (ret); -} - -dss_prec_t -arena_dss_prec_get(arena_t *arena) -{ - dss_prec_t ret; - - malloc_mutex_lock(&arena->lock); - ret = arena->dss_prec; - malloc_mutex_unlock(&arena->lock); - return (ret); -} - -bool -arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) -{ - - if (!have_dss) - return (dss_prec != dss_prec_disabled); - malloc_mutex_lock(&arena->lock); - arena->dss_prec = dss_prec; - malloc_mutex_unlock(&arena->lock); - return (false); -} - -ssize_t -arena_lg_dirty_mult_default_get(void) -{ - - return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); -} - -bool -arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) -{ - - if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) - return (true); - atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); - return (false); -} - -void -arena_stats_merge(arena_t *arena, const char **dss, ssize_t *lg_dirty_mult, - size_t *nactive, size_t *ndirty, arena_stats_t *astats, - malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, - malloc_huge_stats_t *hstats) -{ - unsigned i; - - malloc_mutex_lock(&arena->lock); - *dss = dss_prec_names[arena->dss_prec]; - *lg_dirty_mult = arena->lg_dirty_mult; - *nactive += arena->nactive; - *ndirty += arena->ndirty; - - astats->mapped += arena->stats.mapped; - astats->npurge += arena->stats.npurge; - astats->nmadvise += arena->stats.nmadvise; - astats->purged += arena->stats.purged; - astats->metadata_mapped += arena->stats.metadata_mapped; - astats->metadata_allocated += arena_metadata_allocated_get(arena); - astats->allocated_large += arena->stats.allocated_large; - astats->nmalloc_large += arena->stats.nmalloc_large; - astats->ndalloc_large += arena->stats.ndalloc_large; - astats->nrequests_large += arena->stats.nrequests_large; - astats->allocated_huge += arena->stats.allocated_huge; - astats->nmalloc_huge += arena->stats.nmalloc_huge; - astats->ndalloc_huge += arena->stats.ndalloc_huge; - - for (i = 0; i < nlclasses; i++) { - lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; - lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; - lstats[i].nrequests += arena->stats.lstats[i].nrequests; - lstats[i].curruns += arena->stats.lstats[i].curruns; - } - - for (i = 0; i < nhclasses; i++) { - hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; - hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; - hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; - } - malloc_mutex_unlock(&arena->lock); - - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - - malloc_mutex_lock(&bin->lock); - bstats[i].nmalloc += bin->stats.nmalloc; - bstats[i].ndalloc += bin->stats.ndalloc; - bstats[i].nrequests += bin->stats.nrequests; - bstats[i].curregs += bin->stats.curregs; - if (config_tcache) { - bstats[i].nfills += bin->stats.nfills; - bstats[i].nflushes += bin->stats.nflushes; - } - bstats[i].nruns += bin->stats.nruns; - bstats[i].reruns += bin->stats.reruns; - bstats[i].curruns += bin->stats.curruns; - malloc_mutex_unlock(&bin->lock); - } -} - -arena_t * -arena_new(unsigned ind) -{ - arena_t *arena; - unsigned i; - arena_bin_t *bin; - - /* - * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly - * because there is no way to clean up if base_alloc() OOMs. - */ - if (config_stats) { - arena = (arena_t *)base_alloc(CACHELINE_CEILING(sizeof(arena_t)) - + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + - nhclasses) * sizeof(malloc_huge_stats_t)); - } else - arena = (arena_t *)base_alloc(sizeof(arena_t)); - if (arena == NULL) - return (NULL); - - arena->ind = ind; - arena->nthreads = 0; - if (malloc_mutex_init(&arena->lock)) - return (NULL); - - if (config_stats) { - memset(&arena->stats, 0, sizeof(arena_stats_t)); - arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena - + CACHELINE_CEILING(sizeof(arena_t))); - memset(arena->stats.lstats, 0, nlclasses * - sizeof(malloc_large_stats_t)); - arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena - + CACHELINE_CEILING(sizeof(arena_t)) + - QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); - memset(arena->stats.hstats, 0, nhclasses * - sizeof(malloc_huge_stats_t)); - if (config_tcache) - ql_new(&arena->tcache_ql); - } - - if (config_prof) - arena->prof_accumbytes = 0; - - if (config_cache_oblivious) { - /* - * A nondeterministic seed based on the address of arena reduces - * the likelihood of lockstep non-uniform cache index - * utilization among identical concurrent processes, but at the - * cost of test repeatability. For debug builds, instead use a - * deterministic seed. - */ - arena->offset_state = config_debug ? ind : - (uint64_t)(uintptr_t)arena; - } - - arena->dss_prec = chunk_dss_prec_get(); - - arena->spare = NULL; - - arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); - arena->purging = false; - arena->nactive = 0; - arena->ndirty = 0; - - arena_avail_tree_new(&arena->runs_avail); - qr_new(&arena->runs_dirty, rd_link); - qr_new(&arena->chunks_cache, cc_link); - - ql_new(&arena->huge); - if (malloc_mutex_init(&arena->huge_mtx)) - return (NULL); - - extent_tree_szad_new(&arena->chunks_szad_cached); - extent_tree_ad_new(&arena->chunks_ad_cached); - extent_tree_szad_new(&arena->chunks_szad_retained); - extent_tree_ad_new(&arena->chunks_ad_retained); - if (malloc_mutex_init(&arena->chunks_mtx)) - return (NULL); - ql_new(&arena->node_cache); - if (malloc_mutex_init(&arena->node_cache_mtx)) - return (NULL); - - arena->chunk_hooks = chunk_hooks_default; - - /* Initialize bins. */ - for (i = 0; i < NBINS; i++) { - bin = &arena->bins[i]; - if (malloc_mutex_init(&bin->lock)) - return (NULL); - bin->runcur = NULL; - arena_run_tree_new(&bin->runs); - if (config_stats) - memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); - } - - return (arena); -} - -/* - * Calculate bin_info->run_size such that it meets the following constraints: - * - * *) bin_info->run_size <= arena_maxrun - * *) bin_info->nregs <= RUN_MAXREGS - * - * bin_info->nregs and bin_info->reg0_offset are also calculated here, since - * these settings are all interdependent. - */ -static void -bin_info_run_size_calc(arena_bin_info_t *bin_info) -{ - size_t pad_size; - size_t try_run_size, perfect_run_size, actual_run_size; - uint32_t try_nregs, perfect_nregs, actual_nregs; - - /* - * Determine redzone size based on minimum alignment and minimum - * redzone size. Add padding to the end of the run if it is needed to - * align the regions. The padding allows each redzone to be half the - * minimum alignment; without the padding, each redzone would have to - * be twice as large in order to maintain alignment. - */ - if (config_fill && unlikely(opt_redzone)) { - size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - - 1); - if (align_min <= REDZONE_MINSIZE) { - bin_info->redzone_size = REDZONE_MINSIZE; - pad_size = 0; - } else { - bin_info->redzone_size = align_min >> 1; - pad_size = bin_info->redzone_size; - } - } else { - bin_info->redzone_size = 0; - pad_size = 0; - } - bin_info->reg_interval = bin_info->reg_size + - (bin_info->redzone_size << 1); - - /* - * Compute run size under ideal conditions (no redzones, no limit on run - * size). - */ - try_run_size = PAGE; - try_nregs = try_run_size / bin_info->reg_size; - do { - perfect_run_size = try_run_size; - perfect_nregs = try_nregs; - - try_run_size += PAGE; - try_nregs = try_run_size / bin_info->reg_size; - } while (perfect_run_size != perfect_nregs * bin_info->reg_size); - assert(perfect_nregs <= RUN_MAXREGS); - - actual_run_size = perfect_run_size; - actual_nregs = (actual_run_size - pad_size) / bin_info->reg_interval; - - /* - * Redzones can require enough padding that not even a single region can - * fit within the number of pages that would normally be dedicated to a - * run for this size class. Increase the run size until at least one - * region fits. - */ - while (actual_nregs == 0) { - assert(config_fill && unlikely(opt_redzone)); - - actual_run_size += PAGE; - actual_nregs = (actual_run_size - pad_size) / - bin_info->reg_interval; - } - - /* - * Make sure that the run will fit within an arena chunk. - */ - while (actual_run_size > arena_maxrun) { - actual_run_size -= PAGE; - actual_nregs = (actual_run_size - pad_size) / - bin_info->reg_interval; - } - assert(actual_nregs > 0); - assert(actual_run_size == s2u(actual_run_size)); - - /* Copy final settings. */ - bin_info->run_size = actual_run_size; - bin_info->nregs = actual_nregs; - bin_info->reg0_offset = actual_run_size - (actual_nregs * - bin_info->reg_interval) - pad_size + bin_info->redzone_size; - - if (actual_run_size > small_maxrun) - small_maxrun = actual_run_size; - - assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs - * bin_info->reg_interval) + pad_size == bin_info->run_size); -} - -static void -bin_info_init(void) -{ - arena_bin_info_t *bin_info; - -#define BIN_INFO_INIT_bin_yes(index, size) \ - bin_info = &arena_bin_info[index]; \ - bin_info->reg_size = size; \ - bin_info_run_size_calc(bin_info); \ - bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); -#define BIN_INFO_INIT_bin_no(index, size) -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ - BIN_INFO_INIT_bin_##bin(index, (ZU(1)<> - LG_PAGE)); - if (small_run_tab == NULL) - return (true); - -#define TAB_INIT_bin_yes(index, size) { \ - arena_bin_info_t *bin_info = &arena_bin_info[index]; \ - small_run_tab[bin_info->run_size >> LG_PAGE] = true; \ - } -#define TAB_INIT_bin_no(index, size) -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ - TAB_INIT_bin_##bin(index, (ZU(1)<= the result - * from (2), and will always be correct. - */ - map_bias = 0; - for (i = 0; i < 3; i++) { - size_t header_size = offsetof(arena_chunk_t, map_bits) + - ((sizeof(arena_chunk_map_bits_t) + - sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); - map_bias = (header_size + PAGE_MASK) >> LG_PAGE; - } - assert(map_bias > 0); - - map_misc_offset = offsetof(arena_chunk_t, map_bits) + - sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); - - arena_maxrun = chunksize - (map_bias << LG_PAGE); - assert(arena_maxrun > 0); - large_maxclass = index2size(size2index(chunksize)-1); - if (large_maxclass > arena_maxrun) { - /* - * For small chunk sizes it's possible for there to be fewer - * non-header pages available than are necessary to serve the - * size classes just below chunksize. - */ - large_maxclass = arena_maxrun; - } - assert(large_maxclass > 0); - nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); - nhclasses = NSIZES - nlclasses - NBINS; - - bin_info_init(); - return (small_run_size_init()); -} - -void -arena_prefork(arena_t *arena) -{ - unsigned i; - - malloc_mutex_prefork(&arena->lock); - malloc_mutex_prefork(&arena->huge_mtx); - malloc_mutex_prefork(&arena->chunks_mtx); - malloc_mutex_prefork(&arena->node_cache_mtx); - for (i = 0; i < NBINS; i++) - malloc_mutex_prefork(&arena->bins[i].lock); -} - -void -arena_postfork_parent(arena_t *arena) -{ - unsigned i; - - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_parent(&arena->bins[i].lock); - malloc_mutex_postfork_parent(&arena->node_cache_mtx); - malloc_mutex_postfork_parent(&arena->chunks_mtx); - malloc_mutex_postfork_parent(&arena->huge_mtx); - malloc_mutex_postfork_parent(&arena->lock); -} - -void -arena_postfork_child(arena_t *arena) -{ - unsigned i; - - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_child(&arena->bins[i].lock); - malloc_mutex_postfork_child(&arena->node_cache_mtx); - malloc_mutex_postfork_child(&arena->chunks_mtx); - malloc_mutex_postfork_child(&arena->huge_mtx); - malloc_mutex_postfork_child(&arena->lock); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/base.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/base.c deleted file mode 100644 index 7cdcfed..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/base.c +++ /dev/null @@ -1,174 +0,0 @@ -#define JEMALLOC_BASE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -static malloc_mutex_t base_mtx; -static extent_tree_t base_avail_szad; -static extent_node_t *base_nodes; -static size_t base_allocated; -static size_t base_resident; -static size_t base_mapped; - -/******************************************************************************/ - -/* base_mtx must be held. */ -static extent_node_t * -base_node_try_alloc(void) -{ - extent_node_t *node; - - if (base_nodes == NULL) - return (NULL); - node = base_nodes; - base_nodes = *(extent_node_t **)node; - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); - return (node); -} - -/* base_mtx must be held. */ -static void -base_node_dalloc(extent_node_t *node) -{ - - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); - *(extent_node_t **)node = base_nodes; - base_nodes = node; -} - -/* base_mtx must be held. */ -static extent_node_t * -base_chunk_alloc(size_t minsize) -{ - extent_node_t *node; - size_t csize, nsize; - void *addr; - - assert(minsize != 0); - node = base_node_try_alloc(); - /* Allocate enough space to also carve a node out if necessary. */ - nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0; - csize = CHUNK_CEILING(minsize + nsize); - addr = chunk_alloc_base(csize); - if (addr == NULL) { - if (node != NULL) - base_node_dalloc(node); - return (NULL); - } - base_mapped += csize; - if (node == NULL) { - node = (extent_node_t *)addr; - addr = (void *)((uintptr_t)addr + nsize); - csize -= nsize; - if (config_stats) { - base_allocated += nsize; - base_resident += PAGE_CEILING(nsize); - } - } - extent_node_init(node, NULL, addr, csize, true, true); - return (node); -} - -/* - * base_alloc() guarantees demand-zeroed memory, in order to make multi-page - * sparse data structures such as radix tree nodes efficient with respect to - * physical memory usage. - */ -void * -base_alloc(size_t size) -{ - void *ret; - size_t csize, usize; - extent_node_t *node; - extent_node_t key; - - /* - * Round size up to nearest multiple of the cacheline size, so that - * there is no chance of false cache line sharing. - */ - csize = CACHELINE_CEILING(size); - - usize = s2u(csize); - extent_node_init(&key, NULL, NULL, usize, false, false); - malloc_mutex_lock(&base_mtx); - node = extent_tree_szad_nsearch(&base_avail_szad, &key); - if (node != NULL) { - /* Use existing space. */ - extent_tree_szad_remove(&base_avail_szad, node); - } else { - /* Try to allocate more space. */ - node = base_chunk_alloc(csize); - } - if (node == NULL) { - ret = NULL; - goto label_return; - } - - ret = extent_node_addr_get(node); - if (extent_node_size_get(node) > csize) { - extent_node_addr_set(node, (void *)((uintptr_t)ret + csize)); - extent_node_size_set(node, extent_node_size_get(node) - csize); - extent_tree_szad_insert(&base_avail_szad, node); - } else - base_node_dalloc(node); - if (config_stats) { - base_allocated += csize; - /* - * Add one PAGE to base_resident for every page boundary that is - * crossed by the new allocation. - */ - base_resident += PAGE_CEILING((uintptr_t)ret + csize) - - PAGE_CEILING((uintptr_t)ret); - } - JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); -label_return: - malloc_mutex_unlock(&base_mtx); - return (ret); -} - -void -base_stats_get(size_t *allocated, size_t *resident, size_t *mapped) -{ - - malloc_mutex_lock(&base_mtx); - assert(base_allocated <= base_resident); - assert(base_resident <= base_mapped); - *allocated = base_allocated; - *resident = base_resident; - *mapped = base_mapped; - malloc_mutex_unlock(&base_mtx); -} - -bool -base_boot(void) -{ - - if (malloc_mutex_init(&base_mtx)) - return (true); - extent_tree_szad_new(&base_avail_szad); - base_nodes = NULL; - - return (false); -} - -void -base_prefork(void) -{ - - malloc_mutex_prefork(&base_mtx); -} - -void -base_postfork_parent(void) -{ - - malloc_mutex_postfork_parent(&base_mtx); -} - -void -base_postfork_child(void) -{ - - malloc_mutex_postfork_child(&base_mtx); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/bitmap.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/bitmap.c deleted file mode 100644 index c733372..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/bitmap.c +++ /dev/null @@ -1,78 +0,0 @@ -#define JEMALLOC_BITMAP_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ - -void -bitmap_info_init(bitmap_info_t *binfo, size_t nbits) -{ - unsigned i; - size_t group_count; - - assert(nbits > 0); - assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); - - /* - * Compute the number of groups necessary to store nbits bits, and - * progressively work upward through the levels until reaching a level - * that requires only one group. - */ - binfo->levels[0].group_offset = 0; - group_count = BITMAP_BITS2GROUPS(nbits); - for (i = 1; group_count > 1; i++) { - assert(i < BITMAP_MAX_LEVELS); - binfo->levels[i].group_offset = binfo->levels[i-1].group_offset - + group_count; - group_count = BITMAP_BITS2GROUPS(group_count); - } - binfo->levels[i].group_offset = binfo->levels[i-1].group_offset - + group_count; - assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); - binfo->nlevels = i; - binfo->nbits = nbits; -} - -size_t -bitmap_info_ngroups(const bitmap_info_t *binfo) -{ - - return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); -} - -size_t -bitmap_size(size_t nbits) -{ - bitmap_info_t binfo; - - bitmap_info_init(&binfo, nbits); - return (bitmap_info_ngroups(&binfo)); -} - -void -bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - size_t extra; - unsigned i; - - /* - * Bits are actually inverted with regard to the external bitmap - * interface, so the bitmap starts out with all 1 bits, except for - * trailing unused bits (if any). Note that each group uses bit 0 to - * correspond to the first logical bit in the group, so extra bits - * are the most significant bits of the last group. - */ - memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset << - LG_SIZEOF_BITMAP); - extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) - & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) - bitmap[binfo->levels[1].group_offset - 1] >>= extra; - for (i = 1; i < binfo->nlevels; i++) { - size_t group_count = binfo->levels[i].group_offset - - binfo->levels[i-1].group_offset; - extra = (BITMAP_GROUP_NBITS - (group_count & - BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) - bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; - } -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/ctl.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/ctl.c deleted file mode 100644 index 3de8e60..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/ctl.c +++ /dev/null @@ -1,2123 +0,0 @@ -#define JEMALLOC_CTL_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -/* - * ctl_mtx protects the following: - * - ctl_stats.* - */ -static malloc_mutex_t ctl_mtx; -static bool ctl_initialized; -static uint64_t ctl_epoch; -static ctl_stats_t ctl_stats; - -/******************************************************************************/ -/* Helpers for named and indexed nodes. */ - -JEMALLOC_INLINE_C const ctl_named_node_t * -ctl_named_node(const ctl_node_t *node) -{ - - return ((node->named) ? (const ctl_named_node_t *)node : NULL); -} - -JEMALLOC_INLINE_C const ctl_named_node_t * -ctl_named_children(const ctl_named_node_t *node, int index) -{ - const ctl_named_node_t *children = ctl_named_node(node->children); - - return (children ? &children[index] : NULL); -} - -JEMALLOC_INLINE_C const ctl_indexed_node_t * -ctl_indexed_node(const ctl_node_t *node) -{ - - return (!node->named ? (const ctl_indexed_node_t *)node : NULL); -} - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -#define CTL_PROTO(n) \ -static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen); - -#define INDEX_PROTO(n) \ -static const ctl_named_node_t *n##_index(const size_t *mib, \ - size_t miblen, size_t i); - -static bool ctl_arena_init(ctl_arena_stats_t *astats); -static void ctl_arena_clear(ctl_arena_stats_t *astats); -static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, - arena_t *arena); -static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, - ctl_arena_stats_t *astats); -static void ctl_arena_refresh(arena_t *arena, unsigned i); -static bool ctl_grow(void); -static void ctl_refresh(void); -static bool ctl_init(void); -static int ctl_lookup(const char *name, ctl_node_t const **nodesp, - size_t *mibp, size_t *depthp); - -CTL_PROTO(version) -CTL_PROTO(epoch) -CTL_PROTO(thread_tcache_enabled) -CTL_PROTO(thread_tcache_flush) -CTL_PROTO(thread_prof_name) -CTL_PROTO(thread_prof_active) -CTL_PROTO(thread_arena) -CTL_PROTO(thread_allocated) -CTL_PROTO(thread_allocatedp) -CTL_PROTO(thread_deallocated) -CTL_PROTO(thread_deallocatedp) -CTL_PROTO(config_cache_oblivious) -CTL_PROTO(config_debug) -CTL_PROTO(config_fill) -CTL_PROTO(config_lazy_lock) -CTL_PROTO(config_munmap) -CTL_PROTO(config_prof) -CTL_PROTO(config_prof_libgcc) -CTL_PROTO(config_prof_libunwind) -CTL_PROTO(config_stats) -CTL_PROTO(config_tcache) -CTL_PROTO(config_tls) -CTL_PROTO(config_utrace) -CTL_PROTO(config_valgrind) -CTL_PROTO(config_xmalloc) -CTL_PROTO(opt_abort) -CTL_PROTO(opt_dss) -CTL_PROTO(opt_lg_chunk) -CTL_PROTO(opt_narenas) -CTL_PROTO(opt_lg_dirty_mult) -CTL_PROTO(opt_stats_print) -CTL_PROTO(opt_junk) -CTL_PROTO(opt_zero) -CTL_PROTO(opt_quarantine) -CTL_PROTO(opt_redzone) -CTL_PROTO(opt_utrace) -CTL_PROTO(opt_xmalloc) -CTL_PROTO(opt_tcache) -CTL_PROTO(opt_lg_tcache_max) -CTL_PROTO(opt_prof) -CTL_PROTO(opt_prof_prefix) -CTL_PROTO(opt_prof_active) -CTL_PROTO(opt_prof_thread_active_init) -CTL_PROTO(opt_lg_prof_sample) -CTL_PROTO(opt_lg_prof_interval) -CTL_PROTO(opt_prof_gdump) -CTL_PROTO(opt_prof_final) -CTL_PROTO(opt_prof_leak) -CTL_PROTO(opt_prof_accum) -CTL_PROTO(tcache_create) -CTL_PROTO(tcache_flush) -CTL_PROTO(tcache_destroy) -CTL_PROTO(arena_i_purge) -static void arena_purge(unsigned arena_ind); -CTL_PROTO(arena_i_dss) -CTL_PROTO(arena_i_lg_dirty_mult) -CTL_PROTO(arena_i_chunk_hooks) -INDEX_PROTO(arena_i) -CTL_PROTO(arenas_bin_i_size) -CTL_PROTO(arenas_bin_i_nregs) -CTL_PROTO(arenas_bin_i_run_size) -INDEX_PROTO(arenas_bin_i) -CTL_PROTO(arenas_lrun_i_size) -INDEX_PROTO(arenas_lrun_i) -CTL_PROTO(arenas_hchunk_i_size) -INDEX_PROTO(arenas_hchunk_i) -CTL_PROTO(arenas_narenas) -CTL_PROTO(arenas_initialized) -CTL_PROTO(arenas_lg_dirty_mult) -CTL_PROTO(arenas_quantum) -CTL_PROTO(arenas_page) -CTL_PROTO(arenas_tcache_max) -CTL_PROTO(arenas_nbins) -CTL_PROTO(arenas_nhbins) -CTL_PROTO(arenas_nlruns) -CTL_PROTO(arenas_nhchunks) -CTL_PROTO(arenas_extend) -CTL_PROTO(prof_thread_active_init) -CTL_PROTO(prof_active) -CTL_PROTO(prof_dump) -CTL_PROTO(prof_gdump) -CTL_PROTO(prof_reset) -CTL_PROTO(prof_interval) -CTL_PROTO(lg_prof_sample) -CTL_PROTO(stats_arenas_i_small_allocated) -CTL_PROTO(stats_arenas_i_small_nmalloc) -CTL_PROTO(stats_arenas_i_small_ndalloc) -CTL_PROTO(stats_arenas_i_small_nrequests) -CTL_PROTO(stats_arenas_i_large_allocated) -CTL_PROTO(stats_arenas_i_large_nmalloc) -CTL_PROTO(stats_arenas_i_large_ndalloc) -CTL_PROTO(stats_arenas_i_large_nrequests) -CTL_PROTO(stats_arenas_i_huge_allocated) -CTL_PROTO(stats_arenas_i_huge_nmalloc) -CTL_PROTO(stats_arenas_i_huge_ndalloc) -CTL_PROTO(stats_arenas_i_huge_nrequests) -CTL_PROTO(stats_arenas_i_bins_j_nmalloc) -CTL_PROTO(stats_arenas_i_bins_j_ndalloc) -CTL_PROTO(stats_arenas_i_bins_j_nrequests) -CTL_PROTO(stats_arenas_i_bins_j_curregs) -CTL_PROTO(stats_arenas_i_bins_j_nfills) -CTL_PROTO(stats_arenas_i_bins_j_nflushes) -CTL_PROTO(stats_arenas_i_bins_j_nruns) -CTL_PROTO(stats_arenas_i_bins_j_nreruns) -CTL_PROTO(stats_arenas_i_bins_j_curruns) -INDEX_PROTO(stats_arenas_i_bins_j) -CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) -CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) -CTL_PROTO(stats_arenas_i_lruns_j_nrequests) -CTL_PROTO(stats_arenas_i_lruns_j_curruns) -INDEX_PROTO(stats_arenas_i_lruns_j) -CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc) -CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc) -CTL_PROTO(stats_arenas_i_hchunks_j_nrequests) -CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks) -INDEX_PROTO(stats_arenas_i_hchunks_j) -CTL_PROTO(stats_arenas_i_nthreads) -CTL_PROTO(stats_arenas_i_dss) -CTL_PROTO(stats_arenas_i_lg_dirty_mult) -CTL_PROTO(stats_arenas_i_pactive) -CTL_PROTO(stats_arenas_i_pdirty) -CTL_PROTO(stats_arenas_i_mapped) -CTL_PROTO(stats_arenas_i_npurge) -CTL_PROTO(stats_arenas_i_nmadvise) -CTL_PROTO(stats_arenas_i_purged) -CTL_PROTO(stats_arenas_i_metadata_mapped) -CTL_PROTO(stats_arenas_i_metadata_allocated) -INDEX_PROTO(stats_arenas_i) -CTL_PROTO(stats_cactive) -CTL_PROTO(stats_allocated) -CTL_PROTO(stats_active) -CTL_PROTO(stats_metadata) -CTL_PROTO(stats_resident) -CTL_PROTO(stats_mapped) - -/******************************************************************************/ -/* mallctl tree. */ - -/* Maximum tree depth. */ -#define CTL_MAX_DEPTH 6 - -#define NAME(n) {true}, n -#define CHILD(t, c) \ - sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ - (ctl_node_t *)c##_node, \ - NULL -#define CTL(c) 0, NULL, c##_ctl - -/* - * Only handles internal indexed nodes, since there are currently no external - * ones. - */ -#define INDEX(i) {false}, i##_index - -static const ctl_named_node_t thread_tcache_node[] = { - {NAME("enabled"), CTL(thread_tcache_enabled)}, - {NAME("flush"), CTL(thread_tcache_flush)} -}; - -static const ctl_named_node_t thread_prof_node[] = { - {NAME("name"), CTL(thread_prof_name)}, - {NAME("active"), CTL(thread_prof_active)} -}; - -static const ctl_named_node_t thread_node[] = { - {NAME("arena"), CTL(thread_arena)}, - {NAME("allocated"), CTL(thread_allocated)}, - {NAME("allocatedp"), CTL(thread_allocatedp)}, - {NAME("deallocated"), CTL(thread_deallocated)}, - {NAME("deallocatedp"), CTL(thread_deallocatedp)}, - {NAME("tcache"), CHILD(named, thread_tcache)}, - {NAME("prof"), CHILD(named, thread_prof)} -}; - -static const ctl_named_node_t config_node[] = { - {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, - {NAME("debug"), CTL(config_debug)}, - {NAME("fill"), CTL(config_fill)}, - {NAME("lazy_lock"), CTL(config_lazy_lock)}, - {NAME("munmap"), CTL(config_munmap)}, - {NAME("prof"), CTL(config_prof)}, - {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, - {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, - {NAME("stats"), CTL(config_stats)}, - {NAME("tcache"), CTL(config_tcache)}, - {NAME("tls"), CTL(config_tls)}, - {NAME("utrace"), CTL(config_utrace)}, - {NAME("valgrind"), CTL(config_valgrind)}, - {NAME("xmalloc"), CTL(config_xmalloc)} -}; - -static const ctl_named_node_t opt_node[] = { - {NAME("abort"), CTL(opt_abort)}, - {NAME("dss"), CTL(opt_dss)}, - {NAME("lg_chunk"), CTL(opt_lg_chunk)}, - {NAME("narenas"), CTL(opt_narenas)}, - {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, - {NAME("stats_print"), CTL(opt_stats_print)}, - {NAME("junk"), CTL(opt_junk)}, - {NAME("zero"), CTL(opt_zero)}, - {NAME("quarantine"), CTL(opt_quarantine)}, - {NAME("redzone"), CTL(opt_redzone)}, - {NAME("utrace"), CTL(opt_utrace)}, - {NAME("xmalloc"), CTL(opt_xmalloc)}, - {NAME("tcache"), CTL(opt_tcache)}, - {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, - {NAME("prof"), CTL(opt_prof)}, - {NAME("prof_prefix"), CTL(opt_prof_prefix)}, - {NAME("prof_active"), CTL(opt_prof_active)}, - {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)}, - {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, - {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, - {NAME("prof_gdump"), CTL(opt_prof_gdump)}, - {NAME("prof_final"), CTL(opt_prof_final)}, - {NAME("prof_leak"), CTL(opt_prof_leak)}, - {NAME("prof_accum"), CTL(opt_prof_accum)} -}; - -static const ctl_named_node_t tcache_node[] = { - {NAME("create"), CTL(tcache_create)}, - {NAME("flush"), CTL(tcache_flush)}, - {NAME("destroy"), CTL(tcache_destroy)} -}; - -static const ctl_named_node_t arena_i_node[] = { - {NAME("purge"), CTL(arena_i_purge)}, - {NAME("dss"), CTL(arena_i_dss)}, - {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)}, - {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)} -}; -static const ctl_named_node_t super_arena_i_node[] = { - {NAME(""), CHILD(named, arena_i)} -}; - -static const ctl_indexed_node_t arena_node[] = { - {INDEX(arena_i)} -}; - -static const ctl_named_node_t arenas_bin_i_node[] = { - {NAME("size"), CTL(arenas_bin_i_size)}, - {NAME("nregs"), CTL(arenas_bin_i_nregs)}, - {NAME("run_size"), CTL(arenas_bin_i_run_size)} -}; -static const ctl_named_node_t super_arenas_bin_i_node[] = { - {NAME(""), CHILD(named, arenas_bin_i)} -}; - -static const ctl_indexed_node_t arenas_bin_node[] = { - {INDEX(arenas_bin_i)} -}; - -static const ctl_named_node_t arenas_lrun_i_node[] = { - {NAME("size"), CTL(arenas_lrun_i_size)} -}; -static const ctl_named_node_t super_arenas_lrun_i_node[] = { - {NAME(""), CHILD(named, arenas_lrun_i)} -}; - -static const ctl_indexed_node_t arenas_lrun_node[] = { - {INDEX(arenas_lrun_i)} -}; - -static const ctl_named_node_t arenas_hchunk_i_node[] = { - {NAME("size"), CTL(arenas_hchunk_i_size)} -}; -static const ctl_named_node_t super_arenas_hchunk_i_node[] = { - {NAME(""), CHILD(named, arenas_hchunk_i)} -}; - -static const ctl_indexed_node_t arenas_hchunk_node[] = { - {INDEX(arenas_hchunk_i)} -}; - -static const ctl_named_node_t arenas_node[] = { - {NAME("narenas"), CTL(arenas_narenas)}, - {NAME("initialized"), CTL(arenas_initialized)}, - {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)}, - {NAME("quantum"), CTL(arenas_quantum)}, - {NAME("page"), CTL(arenas_page)}, - {NAME("tcache_max"), CTL(arenas_tcache_max)}, - {NAME("nbins"), CTL(arenas_nbins)}, - {NAME("nhbins"), CTL(arenas_nhbins)}, - {NAME("bin"), CHILD(indexed, arenas_bin)}, - {NAME("nlruns"), CTL(arenas_nlruns)}, - {NAME("lrun"), CHILD(indexed, arenas_lrun)}, - {NAME("nhchunks"), CTL(arenas_nhchunks)}, - {NAME("hchunk"), CHILD(indexed, arenas_hchunk)}, - {NAME("extend"), CTL(arenas_extend)} -}; - -static const ctl_named_node_t prof_node[] = { - {NAME("thread_active_init"), CTL(prof_thread_active_init)}, - {NAME("active"), CTL(prof_active)}, - {NAME("dump"), CTL(prof_dump)}, - {NAME("gdump"), CTL(prof_gdump)}, - {NAME("reset"), CTL(prof_reset)}, - {NAME("interval"), CTL(prof_interval)}, - {NAME("lg_sample"), CTL(lg_prof_sample)} -}; - -static const ctl_named_node_t stats_arenas_i_metadata_node[] = { - {NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)}, - {NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)} -}; - -static const ctl_named_node_t stats_arenas_i_small_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} -}; - -static const ctl_named_node_t stats_arenas_i_large_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} -}; - -static const ctl_named_node_t stats_arenas_i_huge_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)} -}; - -static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, - {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, - {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, - {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, - {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, - {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, - {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} -}; -static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_bins_j)} -}; - -static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { - {INDEX(stats_arenas_i_bins_j)} -}; - -static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, - {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} -}; -static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_lruns_j)} -}; - -static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = { - {INDEX(stats_arenas_i_lruns_j)} -}; - -static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)}, - {NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)} -}; -static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_hchunks_j)} -}; - -static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = { - {INDEX(stats_arenas_i_hchunks_j)} -}; - -static const ctl_named_node_t stats_arenas_i_node[] = { - {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, - {NAME("dss"), CTL(stats_arenas_i_dss)}, - {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)}, - {NAME("pactive"), CTL(stats_arenas_i_pactive)}, - {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, - {NAME("mapped"), CTL(stats_arenas_i_mapped)}, - {NAME("npurge"), CTL(stats_arenas_i_npurge)}, - {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, - {NAME("purged"), CTL(stats_arenas_i_purged)}, - {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)}, - {NAME("small"), CHILD(named, stats_arenas_i_small)}, - {NAME("large"), CHILD(named, stats_arenas_i_large)}, - {NAME("huge"), CHILD(named, stats_arenas_i_huge)}, - {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, - {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}, - {NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)} -}; -static const ctl_named_node_t super_stats_arenas_i_node[] = { - {NAME(""), CHILD(named, stats_arenas_i)} -}; - -static const ctl_indexed_node_t stats_arenas_node[] = { - {INDEX(stats_arenas_i)} -}; - -static const ctl_named_node_t stats_node[] = { - {NAME("cactive"), CTL(stats_cactive)}, - {NAME("allocated"), CTL(stats_allocated)}, - {NAME("active"), CTL(stats_active)}, - {NAME("metadata"), CTL(stats_metadata)}, - {NAME("resident"), CTL(stats_resident)}, - {NAME("mapped"), CTL(stats_mapped)}, - {NAME("arenas"), CHILD(indexed, stats_arenas)} -}; - -static const ctl_named_node_t root_node[] = { - {NAME("version"), CTL(version)}, - {NAME("epoch"), CTL(epoch)}, - {NAME("thread"), CHILD(named, thread)}, - {NAME("config"), CHILD(named, config)}, - {NAME("opt"), CHILD(named, opt)}, - {NAME("tcache"), CHILD(named, tcache)}, - {NAME("arena"), CHILD(indexed, arena)}, - {NAME("arenas"), CHILD(named, arenas)}, - {NAME("prof"), CHILD(named, prof)}, - {NAME("stats"), CHILD(named, stats)} -}; -static const ctl_named_node_t super_root_node[] = { - {NAME(""), CHILD(named, root)} -}; - -#undef NAME -#undef CHILD -#undef CTL -#undef INDEX - -/******************************************************************************/ - -static bool -ctl_arena_init(ctl_arena_stats_t *astats) -{ - - if (astats->lstats == NULL) { - astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses * - sizeof(malloc_large_stats_t)); - if (astats->lstats == NULL) - return (true); - } - - if (astats->hstats == NULL) { - astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses * - sizeof(malloc_huge_stats_t)); - if (astats->hstats == NULL) - return (true); - } - - return (false); -} - -static void -ctl_arena_clear(ctl_arena_stats_t *astats) -{ - - astats->dss = dss_prec_names[dss_prec_limit]; - astats->lg_dirty_mult = -1; - astats->pactive = 0; - astats->pdirty = 0; - if (config_stats) { - memset(&astats->astats, 0, sizeof(arena_stats_t)); - astats->allocated_small = 0; - astats->nmalloc_small = 0; - astats->ndalloc_small = 0; - astats->nrequests_small = 0; - memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); - memset(astats->lstats, 0, nlclasses * - sizeof(malloc_large_stats_t)); - memset(astats->hstats, 0, nhclasses * - sizeof(malloc_huge_stats_t)); - } -} - -static void -ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) -{ - unsigned i; - - arena_stats_merge(arena, &cstats->dss, &cstats->lg_dirty_mult, - &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats, - cstats->lstats, cstats->hstats); - - for (i = 0; i < NBINS; i++) { - cstats->allocated_small += cstats->bstats[i].curregs * - index2size(i); - cstats->nmalloc_small += cstats->bstats[i].nmalloc; - cstats->ndalloc_small += cstats->bstats[i].ndalloc; - cstats->nrequests_small += cstats->bstats[i].nrequests; - } -} - -static void -ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) -{ - unsigned i; - - sstats->pactive += astats->pactive; - sstats->pdirty += astats->pdirty; - - sstats->astats.mapped += astats->astats.mapped; - sstats->astats.npurge += astats->astats.npurge; - sstats->astats.nmadvise += astats->astats.nmadvise; - sstats->astats.purged += astats->astats.purged; - - sstats->astats.metadata_mapped += astats->astats.metadata_mapped; - sstats->astats.metadata_allocated += astats->astats.metadata_allocated; - - sstats->allocated_small += astats->allocated_small; - sstats->nmalloc_small += astats->nmalloc_small; - sstats->ndalloc_small += astats->ndalloc_small; - sstats->nrequests_small += astats->nrequests_small; - - sstats->astats.allocated_large += astats->astats.allocated_large; - sstats->astats.nmalloc_large += astats->astats.nmalloc_large; - sstats->astats.ndalloc_large += astats->astats.ndalloc_large; - sstats->astats.nrequests_large += astats->astats.nrequests_large; - - sstats->astats.allocated_huge += astats->astats.allocated_huge; - sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge; - sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge; - - for (i = 0; i < NBINS; i++) { - sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; - sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; - sstats->bstats[i].nrequests += astats->bstats[i].nrequests; - sstats->bstats[i].curregs += astats->bstats[i].curregs; - if (config_tcache) { - sstats->bstats[i].nfills += astats->bstats[i].nfills; - sstats->bstats[i].nflushes += - astats->bstats[i].nflushes; - } - sstats->bstats[i].nruns += astats->bstats[i].nruns; - sstats->bstats[i].reruns += astats->bstats[i].reruns; - sstats->bstats[i].curruns += astats->bstats[i].curruns; - } - - for (i = 0; i < nlclasses; i++) { - sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; - sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; - sstats->lstats[i].nrequests += astats->lstats[i].nrequests; - sstats->lstats[i].curruns += astats->lstats[i].curruns; - } - - for (i = 0; i < nhclasses; i++) { - sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc; - sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc; - sstats->hstats[i].curhchunks += astats->hstats[i].curhchunks; - } -} - -static void -ctl_arena_refresh(arena_t *arena, unsigned i) -{ - ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; - ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; - - ctl_arena_clear(astats); - - sstats->nthreads += astats->nthreads; - if (config_stats) { - ctl_arena_stats_amerge(astats, arena); - /* Merge into sum stats as well. */ - ctl_arena_stats_smerge(sstats, astats); - } else { - astats->pactive += arena->nactive; - astats->pdirty += arena->ndirty; - /* Merge into sum stats as well. */ - sstats->pactive += arena->nactive; - sstats->pdirty += arena->ndirty; - } -} - -static bool -ctl_grow(void) -{ - ctl_arena_stats_t *astats; - - /* Initialize new arena. */ - if (arena_init(ctl_stats.narenas) == NULL) - return (true); - - /* Allocate extended arena stats. */ - astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) * - sizeof(ctl_arena_stats_t)); - if (astats == NULL) - return (true); - - /* Initialize the new astats element. */ - memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); - memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); - if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) { - a0dalloc(astats); - return (true); - } - /* Swap merged stats to their new location. */ - { - ctl_arena_stats_t tstats; - memcpy(&tstats, &astats[ctl_stats.narenas], - sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas], - &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas + 1], &tstats, - sizeof(ctl_arena_stats_t)); - } - a0dalloc(ctl_stats.arenas); - ctl_stats.arenas = astats; - ctl_stats.narenas++; - - return (false); -} - -static void -ctl_refresh(void) -{ - tsd_t *tsd; - unsigned i; - bool refreshed; - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); - - /* - * Clear sum stats, since they will be merged into by - * ctl_arena_refresh(). - */ - ctl_stats.arenas[ctl_stats.narenas].nthreads = 0; - ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); - - tsd = tsd_fetch(); - for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) { - tarenas[i] = arena_get(tsd, i, false, false); - if (tarenas[i] == NULL && !refreshed) { - tarenas[i] = arena_get(tsd, i, false, true); - refreshed = true; - } - } - - for (i = 0; i < ctl_stats.narenas; i++) { - if (tarenas[i] != NULL) - ctl_stats.arenas[i].nthreads = arena_nbound(i); - else - ctl_stats.arenas[i].nthreads = 0; - } - - for (i = 0; i < ctl_stats.narenas; i++) { - bool initialized = (tarenas[i] != NULL); - - ctl_stats.arenas[i].initialized = initialized; - if (initialized) - ctl_arena_refresh(tarenas[i], i); - } - - if (config_stats) { - size_t base_allocated, base_resident, base_mapped; - base_stats_get(&base_allocated, &base_resident, &base_mapped); - ctl_stats.allocated = - ctl_stats.arenas[ctl_stats.narenas].allocated_small + - ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + - ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge; - ctl_stats.active = - (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE); - ctl_stats.metadata = base_allocated + - ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + - ctl_stats.arenas[ctl_stats.narenas].astats - .metadata_allocated; - ctl_stats.resident = base_resident + - ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + - ((ctl_stats.arenas[ctl_stats.narenas].pactive + - ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE); - ctl_stats.mapped = base_mapped + - ctl_stats.arenas[ctl_stats.narenas].astats.mapped; - } - - ctl_epoch++; -} - -static bool -ctl_init(void) -{ - bool ret; - - malloc_mutex_lock(&ctl_mtx); - if (!ctl_initialized) { - /* - * Allocate space for one extra arena stats element, which - * contains summed stats across all arenas. - */ - ctl_stats.narenas = narenas_total_get(); - ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc( - (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); - if (ctl_stats.arenas == NULL) { - ret = true; - goto label_return; - } - memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); - - /* - * Initialize all stats structures, regardless of whether they - * ever get used. Lazy initialization would allow errors to - * cause inconsistent state to be viewable by the application. - */ - if (config_stats) { - unsigned i; - for (i = 0; i <= ctl_stats.narenas; i++) { - if (ctl_arena_init(&ctl_stats.arenas[i])) { - unsigned j; - for (j = 0; j < i; j++) { - a0dalloc( - ctl_stats.arenas[j].lstats); - a0dalloc( - ctl_stats.arenas[j].hstats); - } - a0dalloc(ctl_stats.arenas); - ctl_stats.arenas = NULL; - ret = true; - goto label_return; - } - } - } - ctl_stats.arenas[ctl_stats.narenas].initialized = true; - - ctl_epoch = 0; - ctl_refresh(); - ctl_initialized = true; - } - - ret = false; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, - size_t *depthp) -{ - int ret; - const char *elm, *tdot, *dot; - size_t elen, i, j; - const ctl_named_node_t *node; - - elm = name; - /* Equivalent to strchrnul(). */ - dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); - elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); - if (elen == 0) { - ret = ENOENT; - goto label_return; - } - node = super_root_node; - for (i = 0; i < *depthp; i++) { - assert(node); - assert(node->nchildren > 0); - if (ctl_named_node(node->children) != NULL) { - const ctl_named_node_t *pnode = node; - - /* Children are named. */ - for (j = 0; j < node->nchildren; j++) { - const ctl_named_node_t *child = - ctl_named_children(node, j); - if (strlen(child->name) == elen && - strncmp(elm, child->name, elen) == 0) { - node = child; - if (nodesp != NULL) - nodesp[i] = - (const ctl_node_t *)node; - mibp[i] = j; - break; - } - } - if (node == pnode) { - ret = ENOENT; - goto label_return; - } - } else { - uintmax_t index; - const ctl_indexed_node_t *inode; - - /* Children are indexed. */ - index = malloc_strtoumax(elm, NULL, 10); - if (index == UINTMAX_MAX || index > SIZE_T_MAX) { - ret = ENOENT; - goto label_return; - } - - inode = ctl_indexed_node(node->children); - node = inode->index(mibp, *depthp, (size_t)index); - if (node == NULL) { - ret = ENOENT; - goto label_return; - } - - if (nodesp != NULL) - nodesp[i] = (const ctl_node_t *)node; - mibp[i] = (size_t)index; - } - - if (node->ctl != NULL) { - /* Terminal node. */ - if (*dot != '\0') { - /* - * The name contains more elements than are - * in this path through the tree. - */ - ret = ENOENT; - goto label_return; - } - /* Complete lookup successful. */ - *depthp = i + 1; - break; - } - - /* Update elm. */ - if (*dot == '\0') { - /* No more elements. */ - ret = ENOENT; - goto label_return; - } - elm = &dot[1]; - dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : - strchr(elm, '\0'); - elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); - } - - ret = 0; -label_return: - return (ret); -} - -int -ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ - int ret; - size_t depth; - ctl_node_t const *nodes[CTL_MAX_DEPTH]; - size_t mib[CTL_MAX_DEPTH]; - const ctl_named_node_t *node; - - if (!ctl_initialized && ctl_init()) { - ret = EAGAIN; - goto label_return; - } - - depth = CTL_MAX_DEPTH; - ret = ctl_lookup(name, nodes, mib, &depth); - if (ret != 0) - goto label_return; - - node = ctl_named_node(nodes[depth-1]); - if (node != NULL && node->ctl) - ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); - else { - /* The name refers to a partial path through the ctl tree. */ - ret = ENOENT; - } - -label_return: - return(ret); -} - -int -ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) -{ - int ret; - - if (!ctl_initialized && ctl_init()) { - ret = EAGAIN; - goto label_return; - } - - ret = ctl_lookup(name, NULL, mibp, miblenp); -label_return: - return(ret); -} - -int -ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - const ctl_named_node_t *node; - size_t i; - - if (!ctl_initialized && ctl_init()) { - ret = EAGAIN; - goto label_return; - } - - /* Iterate down the tree. */ - node = super_root_node; - for (i = 0; i < miblen; i++) { - assert(node); - assert(node->nchildren > 0); - if (ctl_named_node(node->children) != NULL) { - /* Children are named. */ - if (node->nchildren <= mib[i]) { - ret = ENOENT; - goto label_return; - } - node = ctl_named_children(node, mib[i]); - } else { - const ctl_indexed_node_t *inode; - - /* Indexed element. */ - inode = ctl_indexed_node(node->children); - node = inode->index(mib, miblen, mib[i]); - if (node == NULL) { - ret = ENOENT; - goto label_return; - } - } - } - - /* Call the ctl function. */ - if (node && node->ctl) - ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); - else { - /* Partial MIB. */ - ret = ENOENT; - } - -label_return: - return(ret); -} - -bool -ctl_boot(void) -{ - - if (malloc_mutex_init(&ctl_mtx)) - return (true); - - ctl_initialized = false; - - return (false); -} - -void -ctl_prefork(void) -{ - - malloc_mutex_prefork(&ctl_mtx); -} - -void -ctl_postfork_parent(void) -{ - - malloc_mutex_postfork_parent(&ctl_mtx); -} - -void -ctl_postfork_child(void) -{ - - malloc_mutex_postfork_child(&ctl_mtx); -} - -/******************************************************************************/ -/* *_ctl() functions. */ - -#define READONLY() do { \ - if (newp != NULL || newlen != 0) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) - -#define WRITEONLY() do { \ - if (oldp != NULL || oldlenp != NULL) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) - -#define READ_XOR_WRITE() do { \ - if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ - newlen != 0)) { \ - ret = EPERM; \ - goto label_return; \ - } \ -} while (0) - -#define READ(v, t) do { \ - if (oldp != NULL && oldlenp != NULL) { \ - if (*oldlenp != sizeof(t)) { \ - size_t copylen = (sizeof(t) <= *oldlenp) \ - ? sizeof(t) : *oldlenp; \ - memcpy(oldp, (void *)&(v), copylen); \ - ret = EINVAL; \ - goto label_return; \ - } \ - *(t *)oldp = (v); \ - } \ -} while (0) - -#define WRITE(v, t) do { \ - if (newp != NULL) { \ - if (newlen != sizeof(t)) { \ - ret = EINVAL; \ - goto label_return; \ - } \ - (v) = *(t *)newp; \ - } \ -} while (0) - -/* - * There's a lot of code duplication in the following macros due to limitations - * in how nested cpp macros are expanded. - */ -#define CTL_RO_CLGEN(c, l, n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - if (!(c)) \ - return (ENOENT); \ - if (l) \ - malloc_mutex_lock(&ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - if (l) \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ -} - -#define CTL_RO_CGEN(c, n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - if (!(c)) \ - return (ENOENT); \ - malloc_mutex_lock(&ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ -} - -#define CTL_RO_GEN(n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - malloc_mutex_lock(&ctl_mtx); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ -} - -/* - * ctl_mtx is not acquired, under the assumption that no pertinent data will - * mutate during the call. - */ -#define CTL_RO_NL_CGEN(c, n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - if (!(c)) \ - return (ENOENT); \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -#define CTL_RO_NL_GEN(n, v, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - \ - READONLY(); \ - oldval = (v); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - t oldval; \ - tsd_t *tsd; \ - \ - if (!(c)) \ - return (ENOENT); \ - READONLY(); \ - tsd = tsd_fetch(); \ - oldval = (m(tsd)); \ - READ(oldval, t); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -#define CTL_RO_BOOL_CONFIG_GEN(n) \ -static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ - int ret; \ - bool oldval; \ - \ - READONLY(); \ - oldval = n; \ - READ(oldval, bool); \ - \ - ret = 0; \ -label_return: \ - return (ret); \ -} - -/******************************************************************************/ - -CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) - -static int -epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - UNUSED uint64_t newval; - - malloc_mutex_lock(&ctl_mtx); - WRITE(newval, uint64_t); - if (newp != NULL) - ctl_refresh(); - READ(ctl_epoch, uint64_t); - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -/******************************************************************************/ - -CTL_RO_BOOL_CONFIG_GEN(config_cache_oblivious) -CTL_RO_BOOL_CONFIG_GEN(config_debug) -CTL_RO_BOOL_CONFIG_GEN(config_fill) -CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) -CTL_RO_BOOL_CONFIG_GEN(config_munmap) -CTL_RO_BOOL_CONFIG_GEN(config_prof) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) -CTL_RO_BOOL_CONFIG_GEN(config_stats) -CTL_RO_BOOL_CONFIG_GEN(config_tcache) -CTL_RO_BOOL_CONFIG_GEN(config_tls) -CTL_RO_BOOL_CONFIG_GEN(config_utrace) -CTL_RO_BOOL_CONFIG_GEN(config_valgrind) -CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) - -/******************************************************************************/ - -CTL_RO_NL_GEN(opt_abort, opt_abort, bool) -CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) -CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) -CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) -CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) -CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) -CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) -CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) -CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) -CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) -CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) -CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) -CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) -CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) -CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) -CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init, - opt_prof_thread_active_init, bool) -CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) -CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) -CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) -CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) -CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) - -/******************************************************************************/ - -static int -thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - tsd_t *tsd; - arena_t *oldarena; - unsigned newind, oldind; - - tsd = tsd_fetch(); - oldarena = arena_choose(tsd, NULL); - if (oldarena == NULL) - return (EAGAIN); - - malloc_mutex_lock(&ctl_mtx); - newind = oldind = oldarena->ind; - WRITE(newind, unsigned); - READ(oldind, unsigned); - if (newind != oldind) { - arena_t *newarena; - - if (newind >= ctl_stats.narenas) { - /* New arena index is out of range. */ - ret = EFAULT; - goto label_return; - } - - /* Initialize arena if necessary. */ - newarena = arena_get(tsd, newind, true, true); - if (newarena == NULL) { - ret = EAGAIN; - goto label_return; - } - /* Set new arena/tcache associations. */ - arena_migrate(tsd, oldind, newind); - if (config_tcache) { - tcache_t *tcache = tsd_tcache_get(tsd); - if (tcache != NULL) { - tcache_arena_reassociate(tcache, oldarena, - newarena); - } - } - } - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, - uint64_t) -CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, - uint64_t *) -CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get, - uint64_t) -CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, - tsd_thread_deallocatedp_get, uint64_t *) - -static int -thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (!config_tcache) - return (ENOENT); - - oldval = tcache_enabled_get(); - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - tcache_enabled_set(*(bool *)newp); - } - READ(oldval, bool); - - ret = 0; -label_return: - return (ret); -} - -static int -thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - - if (!config_tcache) - return (ENOENT); - - READONLY(); - WRITEONLY(); - - tcache_flush(); - - ret = 0; -label_return: - return (ret); -} - -static int -thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - - if (!config_prof) - return (ENOENT); - - READ_XOR_WRITE(); - - if (newp != NULL) { - tsd_t *tsd; - - if (newlen != sizeof(const char *)) { - ret = EINVAL; - goto label_return; - } - - tsd = tsd_fetch(); - - if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != - 0) - goto label_return; - } else { - const char *oldname = prof_thread_name_get(); - READ(oldname, const char *); - } - - ret = 0; -label_return: - return (ret); -} - -static int -thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (!config_prof) - return (ENOENT); - - oldval = prof_thread_active_get(); - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - if (prof_thread_active_set(*(bool *)newp)) { - ret = EAGAIN; - goto label_return; - } - } - READ(oldval, bool); - - ret = 0; -label_return: - return (ret); -} - -/******************************************************************************/ - -static int -tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - tsd_t *tsd; - unsigned tcache_ind; - - if (!config_tcache) - return (ENOENT); - - tsd = tsd_fetch(); - - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (tcaches_create(tsd, &tcache_ind)) { - ret = EFAULT; - goto label_return; - } - READ(tcache_ind, unsigned); - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - tsd_t *tsd; - unsigned tcache_ind; - - if (!config_tcache) - return (ENOENT); - - tsd = tsd_fetch(); - - WRITEONLY(); - tcache_ind = UINT_MAX; - WRITE(tcache_ind, unsigned); - if (tcache_ind == UINT_MAX) { - ret = EFAULT; - goto label_return; - } - tcaches_flush(tsd, tcache_ind); - - ret = 0; -label_return: - return (ret); -} - -static int -tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - tsd_t *tsd; - unsigned tcache_ind; - - if (!config_tcache) - return (ENOENT); - - tsd = tsd_fetch(); - - WRITEONLY(); - tcache_ind = UINT_MAX; - WRITE(tcache_ind, unsigned); - if (tcache_ind == UINT_MAX) { - ret = EFAULT; - goto label_return; - } - tcaches_destroy(tsd, tcache_ind); - - ret = 0; -label_return: - return (ret); -} - -/******************************************************************************/ - -/* ctl_mutex must be held during execution of this function. */ -static void -arena_purge(unsigned arena_ind) -{ - tsd_t *tsd; - unsigned i; - bool refreshed; - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); - - tsd = tsd_fetch(); - for (i = 0, refreshed = false; i < ctl_stats.narenas; i++) { - tarenas[i] = arena_get(tsd, i, false, false); - if (tarenas[i] == NULL && !refreshed) { - tarenas[i] = arena_get(tsd, i, false, true); - refreshed = true; - } - } - - if (arena_ind == ctl_stats.narenas) { - unsigned i; - for (i = 0; i < ctl_stats.narenas; i++) { - if (tarenas[i] != NULL) - arena_purge_all(tarenas[i]); - } - } else { - assert(arena_ind < ctl_stats.narenas); - if (tarenas[arena_ind] != NULL) - arena_purge_all(tarenas[arena_ind]); - } -} - -static int -arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - - READONLY(); - WRITEONLY(); - malloc_mutex_lock(&ctl_mtx); - arena_purge(mib[1]); - malloc_mutex_unlock(&ctl_mtx); - - ret = 0; -label_return: - return (ret); -} - -static int -arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - const char *dss = NULL; - unsigned arena_ind = mib[1]; - dss_prec_t dss_prec_old = dss_prec_limit; - dss_prec_t dss_prec = dss_prec_limit; - - malloc_mutex_lock(&ctl_mtx); - WRITE(dss, const char *); - if (dss != NULL) { - int i; - bool match = false; - - for (i = 0; i < dss_prec_limit; i++) { - if (strcmp(dss_prec_names[i], dss) == 0) { - dss_prec = i; - match = true; - break; - } - } - - if (!match) { - ret = EINVAL; - goto label_return; - } - } - - if (arena_ind < ctl_stats.narenas) { - arena_t *arena = arena_get(tsd_fetch(), arena_ind, false, true); - if (arena == NULL || (dss_prec != dss_prec_limit && - arena_dss_prec_set(arena, dss_prec))) { - ret = EFAULT; - goto label_return; - } - dss_prec_old = arena_dss_prec_get(arena); - } else { - if (dss_prec != dss_prec_limit && - chunk_dss_prec_set(dss_prec)) { - ret = EFAULT; - goto label_return; - } - dss_prec_old = chunk_dss_prec_get(); - } - - dss = dss_prec_names[dss_prec_old]; - READ(dss, const char *); - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned arena_ind = mib[1]; - arena_t *arena; - - arena = arena_get(tsd_fetch(), arena_ind, false, true); - if (arena == NULL) { - ret = EFAULT; - goto label_return; - } - - if (oldp != NULL && oldlenp != NULL) { - size_t oldval = arena_lg_dirty_mult_get(arena); - READ(oldval, ssize_t); - } - if (newp != NULL) { - if (newlen != sizeof(ssize_t)) { - ret = EINVAL; - goto label_return; - } - if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) { - ret = EFAULT; - goto label_return; - } - } - - ret = 0; -label_return: - return (ret); -} - -static int -arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned arena_ind = mib[1]; - arena_t *arena; - - malloc_mutex_lock(&ctl_mtx); - if (arena_ind < narenas_total_get() && (arena = - arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) { - if (newp != NULL) { - chunk_hooks_t old_chunk_hooks, new_chunk_hooks; - WRITE(new_chunk_hooks, chunk_hooks_t); - old_chunk_hooks = chunk_hooks_set(arena, - &new_chunk_hooks); - READ(old_chunk_hooks, chunk_hooks_t); - } else { - chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena); - READ(old_chunk_hooks, chunk_hooks_t); - } - } else { - ret = EFAULT; - goto label_return; - } - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static const ctl_named_node_t * -arena_i_index(const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t * ret; - - malloc_mutex_lock(&ctl_mtx); - if (i > ctl_stats.narenas) { - ret = NULL; - goto label_return; - } - - ret = super_arena_i_node; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -/******************************************************************************/ - -static int -arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned narenas; - - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (*oldlenp != sizeof(unsigned)) { - ret = EINVAL; - goto label_return; - } - narenas = ctl_stats.narenas; - READ(narenas, unsigned); - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - unsigned nread, i; - - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { - ret = EINVAL; - nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) - ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas; - } else { - ret = 0; - nread = ctl_stats.narenas; - } - - for (i = 0; i < nread; i++) - ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; - -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -static int -arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - - if (oldp != NULL && oldlenp != NULL) { - size_t oldval = arena_lg_dirty_mult_default_get(); - READ(oldval, ssize_t); - } - if (newp != NULL) { - if (newlen != sizeof(ssize_t)) { - ret = EINVAL; - goto label_return; - } - if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) { - ret = EFAULT; - goto label_return; - } - } - - ret = 0; -label_return: - return (ret); -} - -CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) -CTL_RO_NL_GEN(arenas_page, PAGE, size_t) -CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) -CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) -CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) -CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) -CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) -CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) -static const ctl_named_node_t * -arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > NBINS) - return (NULL); - return (super_arenas_bin_i_node); -} - -CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned) -CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+mib[2]), size_t) -static const ctl_named_node_t * -arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > nlclasses) - return (NULL); - return (super_arenas_lrun_i_node); -} - -CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned) -CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+mib[2]), size_t) -static const ctl_named_node_t * -arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > nhclasses) - return (NULL); - return (super_arenas_hchunk_i_node); -} - -static int -arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - unsigned narenas; - - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (ctl_grow()) { - ret = EAGAIN; - goto label_return; - } - narenas = ctl_stats.narenas - 1; - READ(narenas, unsigned); - - ret = 0; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} - -/******************************************************************************/ - -static int -prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (!config_prof) - return (ENOENT); - - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - oldval = prof_thread_active_init_set(*(bool *)newp); - } else - oldval = prof_thread_active_init_get(); - READ(oldval, bool); - - ret = 0; -label_return: - return (ret); -} - -static int -prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (!config_prof) - return (ENOENT); - - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - oldval = prof_active_set(*(bool *)newp); - } else - oldval = prof_active_get(); - READ(oldval, bool); - - ret = 0; -label_return: - return (ret); -} - -static int -prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - const char *filename = NULL; - - if (!config_prof) - return (ENOENT); - - WRITEONLY(); - WRITE(filename, const char *); - - if (prof_mdump(filename)) { - ret = EFAULT; - goto label_return; - } - - ret = 0; -label_return: - return (ret); -} - -static int -prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - bool oldval; - - if (!config_prof) - return (ENOENT); - - if (newp != NULL) { - if (newlen != sizeof(bool)) { - ret = EINVAL; - goto label_return; - } - oldval = prof_gdump_set(*(bool *)newp); - } else - oldval = prof_gdump_get(); - READ(oldval, bool); - - ret = 0; -label_return: - return (ret); -} - -static int -prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret; - size_t lg_sample = lg_prof_sample; - tsd_t *tsd; - - if (!config_prof) - return (ENOENT); - - WRITEONLY(); - WRITE(lg_sample, size_t); - if (lg_sample >= (sizeof(uint64_t) << 3)) - lg_sample = (sizeof(uint64_t) << 3) - 1; - - tsd = tsd_fetch(); - - prof_reset(tsd, lg_sample); - - ret = 0; -label_return: - return (ret); -} - -CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) -CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) - -/******************************************************************************/ - -CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) -CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) -CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) -CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) -CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t) -CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) - -CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) -CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult, - ssize_t) -CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) -CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) -CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, - ctl_stats.arenas[mib[2]].astats.mapped, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, - ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, - ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_purged, - ctl_stats.arenas[mib[2]].astats.purged, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped, - ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated, - ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t) - -CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, - ctl_stats.arenas[mib[2]].allocated_small, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, - ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, - ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, - ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, - ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, - ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, - ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, - ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated, - ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc, - ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc, - ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests, - ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */ - -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, - ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) - -static const ctl_named_node_t * -stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) -{ - - if (j > NBINS) - return (NULL); - return (super_stats_arenas_i_bins_j_node); -} - -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, - ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) - -static const ctl_named_node_t * -stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) -{ - - if (j > nlclasses) - return (NULL); - return (super_stats_arenas_i_lruns_j_node); -} - -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc, - ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc, - ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests, - ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */ - uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks, - ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t) - -static const ctl_named_node_t * -stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j) -{ - - if (j > nhclasses) - return (NULL); - return (super_stats_arenas_i_hchunks_j_node); -} - -static const ctl_named_node_t * -stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t * ret; - - malloc_mutex_lock(&ctl_mtx); - if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) { - ret = NULL; - goto label_return; - } - - ret = super_stats_arenas_i_node; -label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/extent.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/extent.c deleted file mode 100644 index 13f9441..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/extent.c +++ /dev/null @@ -1,53 +0,0 @@ -#define JEMALLOC_EXTENT_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ - -JEMALLOC_INLINE_C size_t -extent_quantize(size_t size) -{ - - /* - * Round down to the nearest chunk size that can actually be requested - * during normal huge allocation. - */ - return (index2size(size2index(size + 1) - 1)); -} - -JEMALLOC_INLINE_C int -extent_szad_comp(extent_node_t *a, extent_node_t *b) -{ - int ret; - size_t a_qsize = extent_quantize(extent_node_size_get(a)); - size_t b_qsize = extent_quantize(extent_node_size_get(b)); - - /* - * Compare based on quantized size rather than size, in order to sort - * equally useful extents only by address. - */ - ret = (a_qsize > b_qsize) - (a_qsize < b_qsize); - if (ret == 0) { - uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); - uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); - - ret = (a_addr > b_addr) - (a_addr < b_addr); - } - - return (ret); -} - -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link, - extent_szad_comp) - -JEMALLOC_INLINE_C int -extent_ad_comp(extent_node_t *a, extent_node_t *b) -{ - uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a); - uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b); - - return ((a_addr > b_addr) - (a_addr < b_addr)); -} - -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp) diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/hash.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/hash.c deleted file mode 100644 index cfa4da0..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/hash.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_HASH_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/jemalloc.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/jemalloc.c deleted file mode 100644 index fe77c24..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/jemalloc.c +++ /dev/null @@ -1,2625 +0,0 @@ -#define JEMALLOC_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -/* Runtime configuration options. */ -const char *je_malloc_conf JEMALLOC_ATTR(weak); -bool opt_abort = -#ifdef JEMALLOC_DEBUG - true -#else - false -#endif - ; -const char *opt_junk = -#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) - "true" -#else - "false" -#endif - ; -bool opt_junk_alloc = -#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) - true -#else - false -#endif - ; -bool opt_junk_free = -#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) - true -#else - false -#endif - ; - -size_t opt_quarantine = ZU(0); -bool opt_redzone = false; -bool opt_utrace = false; -bool opt_xmalloc = false; -bool opt_zero = false; -size_t opt_narenas = 0; - -/* Initialized to true if the process is running inside Valgrind. */ -bool in_valgrind; - -unsigned ncpus; - -/* Protects arenas initialization (arenas, narenas_total). */ -static malloc_mutex_t arenas_lock; -/* - * Arenas that are used to service external requests. Not all elements of the - * arenas array are necessarily used; arenas are created lazily as needed. - * - * arenas[0..narenas_auto) are used for automatic multiplexing of threads and - * arenas. arenas[narenas_auto..narenas_total) are only used if the application - * takes some action to create them and allocate from them. - */ -static arena_t **arenas; -static unsigned narenas_total; -static arena_t *a0; /* arenas[0]; read-only after initialization. */ -static unsigned narenas_auto; /* Read-only after initialization. */ - -typedef enum { - malloc_init_uninitialized = 3, - malloc_init_a0_initialized = 2, - malloc_init_recursible = 1, - malloc_init_initialized = 0 /* Common case --> jnz. */ -} malloc_init_t; -static malloc_init_t malloc_init_state = malloc_init_uninitialized; - -JEMALLOC_ALIGNED(CACHELINE) -const size_t index2size_tab[NSIZES] = { -#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ - ((ZU(1)<= 0x0600 -static malloc_mutex_t init_lock = SRWLOCK_INIT; -#else -static malloc_mutex_t init_lock; -static bool init_lock_initialized = false; - -JEMALLOC_ATTR(constructor) -static void WINAPI -_init_init_lock(void) -{ - - /* If another constructor in the same binary is using mallctl to - * e.g. setup chunk hooks, it may end up running before this one, - * and malloc_init_hard will crash trying to lock the uninitialized - * lock. So we force an initialization of the lock in - * malloc_init_hard as well. We don't try to care about atomicity - * of the accessed to the init_lock_initialized boolean, since it - * really only matters early in the process creation, before any - * separate thread normally starts doing anything. */ - if (!init_lock_initialized) - malloc_mutex_init(&init_lock); - init_lock_initialized = true; -} - -#ifdef _MSC_VER -# pragma section(".CRT$XCU", read) -JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) -static const void (WINAPI *init_init_lock)(void) = _init_init_lock; -#endif -#endif -#else -static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; -#endif - -typedef struct { - void *p; /* Input pointer (as in realloc(p, s)). */ - size_t s; /* Request size. */ - void *r; /* Result pointer. */ -} malloc_utrace_t; - -#ifdef JEMALLOC_UTRACE -# define UTRACE(a, b, c) do { \ - if (unlikely(opt_utrace)) { \ - int utrace_serrno = errno; \ - malloc_utrace_t ut; \ - ut.p = (a); \ - ut.s = (b); \ - ut.r = (c); \ - utrace(&ut, sizeof(ut)); \ - errno = utrace_serrno; \ - } \ -} while (0) -#else -# define UTRACE(a, b, c) -#endif - -/******************************************************************************/ -/* - * Function prototypes for static functions that are referenced prior to - * definition. - */ - -static bool malloc_init_hard_a0(void); -static bool malloc_init_hard(void); - -/******************************************************************************/ -/* - * Begin miscellaneous support functions. - */ - -JEMALLOC_ALWAYS_INLINE_C bool -malloc_initialized(void) -{ - - return (malloc_init_state == malloc_init_initialized); -} - -JEMALLOC_ALWAYS_INLINE_C void -malloc_thread_init(void) -{ - - /* - * TSD initialization can't be safely done as a side effect of - * deallocation, because it is possible for a thread to do nothing but - * deallocate its TLS data via free(), in which case writing to TLS - * would cause write-after-free memory corruption. The quarantine - * facility *only* gets used as a side effect of deallocation, so make - * a best effort attempt at initializing its TSD by hooking all - * allocation events. - */ - if (config_fill && unlikely(opt_quarantine)) - quarantine_alloc_hook(); -} - -JEMALLOC_ALWAYS_INLINE_C bool -malloc_init_a0(void) -{ - - if (unlikely(malloc_init_state == malloc_init_uninitialized)) - return (malloc_init_hard_a0()); - return (false); -} - -JEMALLOC_ALWAYS_INLINE_C bool -malloc_init(void) -{ - - if (unlikely(!malloc_initialized()) && malloc_init_hard()) - return (true); - malloc_thread_init(); - - return (false); -} - -/* - * The a0*() functions are used instead of i[mcd]alloc() in situations that - * cannot tolerate TLS variable access. - */ - -arena_t * -a0get(void) -{ - - assert(a0 != NULL); - return (a0); -} - -static void * -a0ialloc(size_t size, bool zero, bool is_metadata) -{ - - if (unlikely(malloc_init_a0())) - return (NULL); - - return (iallocztm(NULL, size, zero, false, is_metadata, a0get())); -} - -static void -a0idalloc(void *ptr, bool is_metadata) -{ - - idalloctm(NULL, ptr, false, is_metadata); -} - -void * -a0malloc(size_t size) -{ - - return (a0ialloc(size, false, true)); -} - -void -a0dalloc(void *ptr) -{ - - a0idalloc(ptr, true); -} - -/* - * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive - * situations that cannot tolerate TLS variable access (TLS allocation and very - * early internal data structure initialization). - */ - -void * -bootstrap_malloc(size_t size) -{ - - if (unlikely(size == 0)) - size = 1; - - return (a0ialloc(size, false, false)); -} - -void * -bootstrap_calloc(size_t num, size_t size) -{ - size_t num_size; - - num_size = num * size; - if (unlikely(num_size == 0)) { - assert(num == 0 || size == 0); - num_size = 1; - } - - return (a0ialloc(num_size, true, false)); -} - -void -bootstrap_free(void *ptr) -{ - - if (unlikely(ptr == NULL)) - return; - - a0idalloc(ptr, false); -} - -/* Create a new arena and insert it into the arenas array at index ind. */ -static arena_t * -arena_init_locked(unsigned ind) -{ - arena_t *arena; - - /* Expand arenas if necessary. */ - assert(ind <= narenas_total); - if (ind > MALLOCX_ARENA_MAX) - return (NULL); - if (ind == narenas_total) { - unsigned narenas_new = narenas_total + 1; - arena_t **arenas_new = - (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new * - sizeof(arena_t *))); - if (arenas_new == NULL) - return (NULL); - memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *)); - arenas_new[ind] = NULL; - /* - * Deallocate only if arenas came from a0malloc() (not - * base_alloc()). - */ - if (narenas_total != narenas_auto) - a0dalloc(arenas); - arenas = arenas_new; - narenas_total = narenas_new; - } - - /* - * Another thread may have already initialized arenas[ind] if it's an - * auto arena. - */ - arena = arenas[ind]; - if (arena != NULL) { - assert(ind < narenas_auto); - return (arena); - } - - /* Actually initialize the arena. */ - arena = arenas[ind] = arena_new(ind); - return (arena); -} - -arena_t * -arena_init(unsigned ind) -{ - arena_t *arena; - - malloc_mutex_lock(&arenas_lock); - arena = arena_init_locked(ind); - malloc_mutex_unlock(&arenas_lock); - return (arena); -} - -unsigned -narenas_total_get(void) -{ - unsigned narenas; - - malloc_mutex_lock(&arenas_lock); - narenas = narenas_total; - malloc_mutex_unlock(&arenas_lock); - - return (narenas); -} - -static void -arena_bind_locked(tsd_t *tsd, unsigned ind) -{ - arena_t *arena; - - arena = arenas[ind]; - arena->nthreads++; - - if (tsd_nominal(tsd)) - tsd_arena_set(tsd, arena); -} - -static void -arena_bind(tsd_t *tsd, unsigned ind) -{ - - malloc_mutex_lock(&arenas_lock); - arena_bind_locked(tsd, ind); - malloc_mutex_unlock(&arenas_lock); -} - -void -arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) -{ - arena_t *oldarena, *newarena; - - malloc_mutex_lock(&arenas_lock); - oldarena = arenas[oldind]; - newarena = arenas[newind]; - oldarena->nthreads--; - newarena->nthreads++; - malloc_mutex_unlock(&arenas_lock); - tsd_arena_set(tsd, newarena); -} - -unsigned -arena_nbound(unsigned ind) -{ - unsigned nthreads; - - malloc_mutex_lock(&arenas_lock); - nthreads = arenas[ind]->nthreads; - malloc_mutex_unlock(&arenas_lock); - return (nthreads); -} - -static void -arena_unbind(tsd_t *tsd, unsigned ind) -{ - arena_t *arena; - - malloc_mutex_lock(&arenas_lock); - arena = arenas[ind]; - arena->nthreads--; - malloc_mutex_unlock(&arenas_lock); - tsd_arena_set(tsd, NULL); -} - -arena_t * -arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing) -{ - arena_t *arena; - arena_t **arenas_cache = tsd_arenas_cache_get(tsd); - unsigned narenas_cache = tsd_narenas_cache_get(tsd); - unsigned narenas_actual = narenas_total_get(); - - /* Deallocate old cache if it's too small. */ - if (arenas_cache != NULL && narenas_cache < narenas_actual) { - a0dalloc(arenas_cache); - arenas_cache = NULL; - narenas_cache = 0; - tsd_arenas_cache_set(tsd, arenas_cache); - tsd_narenas_cache_set(tsd, narenas_cache); - } - - /* Allocate cache if it's missing. */ - if (arenas_cache == NULL) { - bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd); - assert(ind < narenas_actual || !init_if_missing); - narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1; - - if (tsd_nominal(tsd) && !*arenas_cache_bypassp) { - *arenas_cache_bypassp = true; - arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) * - narenas_cache); - *arenas_cache_bypassp = false; - } - if (arenas_cache == NULL) { - /* - * This function must always tell the truth, even if - * it's slow, so don't let OOM, thread cleanup (note - * tsd_nominal check), nor recursive allocation - * avoidance (note arenas_cache_bypass check) get in the - * way. - */ - if (ind >= narenas_actual) - return (NULL); - malloc_mutex_lock(&arenas_lock); - arena = arenas[ind]; - malloc_mutex_unlock(&arenas_lock); - return (arena); - } - assert(tsd_nominal(tsd) && !*arenas_cache_bypassp); - tsd_arenas_cache_set(tsd, arenas_cache); - tsd_narenas_cache_set(tsd, narenas_cache); - } - - /* - * Copy to cache. It's possible that the actual number of arenas has - * increased since narenas_total_get() was called above, but that causes - * no correctness issues unless two threads concurrently execute the - * arenas.extend mallctl, which we trust mallctl synchronization to - * prevent. - */ - malloc_mutex_lock(&arenas_lock); - memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual); - malloc_mutex_unlock(&arenas_lock); - if (narenas_cache > narenas_actual) { - memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) * - (narenas_cache - narenas_actual)); - } - - /* Read the refreshed cache, and init the arena if necessary. */ - arena = arenas_cache[ind]; - if (init_if_missing && arena == NULL) - arena = arenas_cache[ind] = arena_init(ind); - return (arena); -} - -/* Slow path, called only by arena_choose(). */ -arena_t * -arena_choose_hard(tsd_t *tsd) -{ - arena_t *ret; - - if (narenas_auto > 1) { - unsigned i, choose, first_null; - - choose = 0; - first_null = narenas_auto; - malloc_mutex_lock(&arenas_lock); - assert(a0get() != NULL); - for (i = 1; i < narenas_auto; i++) { - if (arenas[i] != NULL) { - /* - * Choose the first arena that has the lowest - * number of threads assigned to it. - */ - if (arenas[i]->nthreads < - arenas[choose]->nthreads) - choose = i; - } else if (first_null == narenas_auto) { - /* - * Record the index of the first uninitialized - * arena, in case all extant arenas are in use. - * - * NB: It is possible for there to be - * discontinuities in terms of initialized - * versus uninitialized arenas, due to the - * "thread.arena" mallctl. - */ - first_null = i; - } - } - - if (arenas[choose]->nthreads == 0 - || first_null == narenas_auto) { - /* - * Use an unloaded arena, or the least loaded arena if - * all arenas are already initialized. - */ - ret = arenas[choose]; - } else { - /* Initialize a new arena. */ - choose = first_null; - ret = arena_init_locked(choose); - if (ret == NULL) { - malloc_mutex_unlock(&arenas_lock); - return (NULL); - } - } - arena_bind_locked(tsd, choose); - malloc_mutex_unlock(&arenas_lock); - } else { - ret = a0get(); - arena_bind(tsd, 0); - } - - return (ret); -} - -void -thread_allocated_cleanup(tsd_t *tsd) -{ - - /* Do nothing. */ -} - -void -thread_deallocated_cleanup(tsd_t *tsd) -{ - - /* Do nothing. */ -} - -void -arena_cleanup(tsd_t *tsd) -{ - arena_t *arena; - - arena = tsd_arena_get(tsd); - if (arena != NULL) - arena_unbind(tsd, arena->ind); -} - -void -arenas_cache_cleanup(tsd_t *tsd) -{ - arena_t **arenas_cache; - - arenas_cache = tsd_arenas_cache_get(tsd); - if (arenas_cache != NULL) { - tsd_arenas_cache_set(tsd, NULL); - a0dalloc(arenas_cache); - } -} - -void -narenas_cache_cleanup(tsd_t *tsd) -{ - - /* Do nothing. */ -} - -void -arenas_cache_bypass_cleanup(tsd_t *tsd) -{ - - /* Do nothing. */ -} - -static void -stats_print_atexit(void) -{ - - if (config_tcache && config_stats) { - unsigned narenas, i; - - /* - * Merge stats from extant threads. This is racy, since - * individual threads do not lock when recording tcache stats - * events. As a consequence, the final stats may be slightly - * out of date by the time they are reported, if other threads - * continue to allocate. - */ - for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena = arenas[i]; - if (arena != NULL) { - tcache_t *tcache; - - /* - * tcache_stats_merge() locks bins, so if any - * code is introduced that acquires both arena - * and bin locks in the opposite order, - * deadlocks may result. - */ - malloc_mutex_lock(&arena->lock); - ql_foreach(tcache, &arena->tcache_ql, link) { - tcache_stats_merge(tcache, arena); - } - malloc_mutex_unlock(&arena->lock); - } - } - } - je_malloc_stats_print(NULL, NULL, NULL); -} - -/* - * End miscellaneous support functions. - */ -/******************************************************************************/ -/* - * Begin initialization functions. - */ - -#ifndef JEMALLOC_HAVE_SECURE_GETENV -static char * -secure_getenv(const char *name) -{ - -# ifdef JEMALLOC_HAVE_ISSETUGID - if (issetugid() != 0) - return (NULL); -# endif - return (getenv(name)); -} -#endif - -static unsigned -malloc_ncpus(void) -{ - long result; - -#ifdef _WIN32 - SYSTEM_INFO si; - GetSystemInfo(&si); - result = si.dwNumberOfProcessors; -#else - result = sysconf(_SC_NPROCESSORS_ONLN); -#endif - return ((result == -1) ? 1 : (unsigned)result); -} - -static bool -malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, - char const **v_p, size_t *vlen_p) -{ - bool accept; - const char *opts = *opts_p; - - *k_p = opts; - - for (accept = false; !accept;) { - switch (*opts) { - case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': - case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': - case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': - case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': - case 'Y': case 'Z': - case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': - case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': - case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': - case 's': case 't': case 'u': case 'v': case 'w': case 'x': - case 'y': case 'z': - case '0': case '1': case '2': case '3': case '4': case '5': - case '6': case '7': case '8': case '9': - case '_': - opts++; - break; - case ':': - opts++; - *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; - *v_p = opts; - accept = true; - break; - case '\0': - if (opts != *opts_p) { - malloc_write(": Conf string ends " - "with key\n"); - } - return (true); - default: - malloc_write(": Malformed conf string\n"); - return (true); - } - } - - for (accept = false; !accept;) { - switch (*opts) { - case ',': - opts++; - /* - * Look ahead one character here, because the next time - * this function is called, it will assume that end of - * input has been cleanly reached if no input remains, - * but we have optimistically already consumed the - * comma if one exists. - */ - if (*opts == '\0') { - malloc_write(": Conf string ends " - "with comma\n"); - } - *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; - accept = true; - break; - case '\0': - *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; - accept = true; - break; - default: - opts++; - break; - } - } - - *opts_p = opts; - return (false); -} - -static void -malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, - size_t vlen) -{ - - malloc_printf(": %s: %.*s:%.*s\n", msg, (int)klen, k, - (int)vlen, v); -} - -static void -malloc_conf_init(void) -{ - unsigned i; - char buf[PATH_MAX + 1]; - const char *opts, *k, *v; - size_t klen, vlen; - - /* - * Automatically configure valgrind before processing options. The - * valgrind option remains in jemalloc 3.x for compatibility reasons. - */ - if (config_valgrind) { - in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; - if (config_fill && unlikely(in_valgrind)) { - opt_junk = "false"; - opt_junk_alloc = false; - opt_junk_free = false; - assert(!opt_zero); - opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; - opt_redzone = true; - } - if (config_tcache && unlikely(in_valgrind)) - opt_tcache = false; - } - - for (i = 0; i < 3; i++) { - /* Get runtime configuration. */ - switch (i) { - case 0: - if (je_malloc_conf != NULL) { - /* - * Use options that were compiled into the - * program. - */ - opts = je_malloc_conf; - } else { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - case 1: { - int linklen = 0; -#ifndef _WIN32 - int saved_errno = errno; - const char *linkname = -# ifdef JEMALLOC_PREFIX - "/etc/"JEMALLOC_PREFIX"malloc.conf" -# else - "/etc/malloc.conf" -# endif - ; - - /* - * Try to use the contents of the "/etc/malloc.conf" - * symbolic link's name. - */ - linklen = readlink(linkname, buf, sizeof(buf) - 1); - if (linklen == -1) { - /* No configuration specified. */ - linklen = 0; - /* Restore errno. */ - set_errno(saved_errno); - } -#endif - buf[linklen] = '\0'; - opts = buf; - break; - } case 2: { - const char *envname = -#ifdef JEMALLOC_PREFIX - JEMALLOC_CPREFIX"MALLOC_CONF" -#else - "MALLOC_CONF" -#endif - ; - - if ((opts = secure_getenv(envname)) != NULL) { - /* - * Do nothing; opts is already initialized to - * the value of the MALLOC_CONF environment - * variable. - */ - } else { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - } default: - not_reached(); - buf[0] = '\0'; - opts = buf; - } - - while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, - &vlen)) { -#define CONF_MATCH(n) \ - (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) -#define CONF_MATCH_VALUE(n) \ - (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) -#define CONF_HANDLE_BOOL(o, n, cont) \ - if (CONF_MATCH(n)) { \ - if (CONF_MATCH_VALUE("true")) \ - o = true; \ - else if (CONF_MATCH_VALUE("false")) \ - o = false; \ - else { \ - malloc_conf_error( \ - "Invalid conf value", \ - k, klen, v, vlen); \ - } \ - if (cont) \ - continue; \ - } -#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ - if (CONF_MATCH(n)) { \ - uintmax_t um; \ - char *end; \ - \ - set_errno(0); \ - um = malloc_strtoumax(v, &end, 0); \ - if (get_errno() != 0 || (uintptr_t)end -\ - (uintptr_t)v != vlen) { \ - malloc_conf_error( \ - "Invalid conf value", \ - k, klen, v, vlen); \ - } else if (clip) { \ - if ((min) != 0 && um < (min)) \ - o = (min); \ - else if (um > (max)) \ - o = (max); \ - else \ - o = um; \ - } else { \ - if (((min) != 0 && um < (min)) \ - || um > (max)) { \ - malloc_conf_error( \ - "Out-of-range " \ - "conf value", \ - k, klen, v, vlen); \ - } else \ - o = um; \ - } \ - continue; \ - } -#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ - if (CONF_MATCH(n)) { \ - long l; \ - char *end; \ - \ - set_errno(0); \ - l = strtol(v, &end, 0); \ - if (get_errno() != 0 || (uintptr_t)end -\ - (uintptr_t)v != vlen) { \ - malloc_conf_error( \ - "Invalid conf value", \ - k, klen, v, vlen); \ - } else if (l < (ssize_t)(min) || l > \ - (ssize_t)(max)) { \ - malloc_conf_error( \ - "Out-of-range conf value", \ - k, klen, v, vlen); \ - } else \ - o = l; \ - continue; \ - } -#define CONF_HANDLE_CHAR_P(o, n, d) \ - if (CONF_MATCH(n)) { \ - size_t cpylen = (vlen <= \ - sizeof(o)-1) ? vlen : \ - sizeof(o)-1; \ - strncpy(o, v, cpylen); \ - o[cpylen] = '\0'; \ - continue; \ - } - - CONF_HANDLE_BOOL(opt_abort, "abort", true) - /* - * Chunks always require at least one header page, - * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and - * possibly an additional page in the presence of - * redzones. In order to simplify options processing, - * use a conservative bound that accommodates all these - * constraints. - */ - CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + - LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), - (sizeof(size_t) << 3) - 1, true) - if (strncmp("dss", k, klen) == 0) { - int i; - bool match = false; - for (i = 0; i < dss_prec_limit; i++) { - if (strncmp(dss_prec_names[i], v, vlen) - == 0) { - if (chunk_dss_prec_set(i)) { - malloc_conf_error( - "Error setting dss", - k, klen, v, vlen); - } else { - opt_dss = - dss_prec_names[i]; - match = true; - break; - } - } - } - if (!match) { - malloc_conf_error("Invalid conf value", - k, klen, v, vlen); - } - continue; - } - CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, - SIZE_T_MAX, false) - CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", - -1, (sizeof(size_t) << 3) - 1) - CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) - if (config_fill) { - if (CONF_MATCH("junk")) { - if (CONF_MATCH_VALUE("true")) { - opt_junk = "true"; - opt_junk_alloc = opt_junk_free = - true; - } else if (CONF_MATCH_VALUE("false")) { - opt_junk = "false"; - opt_junk_alloc = opt_junk_free = - false; - } else if (CONF_MATCH_VALUE("alloc")) { - opt_junk = "alloc"; - opt_junk_alloc = true; - opt_junk_free = false; - } else if (CONF_MATCH_VALUE("free")) { - opt_junk = "free"; - opt_junk_alloc = false; - opt_junk_free = true; - } else { - malloc_conf_error( - "Invalid conf value", k, - klen, v, vlen); - } - continue; - } - CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", - 0, SIZE_T_MAX, false) - CONF_HANDLE_BOOL(opt_redzone, "redzone", true) - CONF_HANDLE_BOOL(opt_zero, "zero", true) - } - if (config_utrace) { - CONF_HANDLE_BOOL(opt_utrace, "utrace", true) - } - if (config_xmalloc) { - CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) - } - if (config_tcache) { - CONF_HANDLE_BOOL(opt_tcache, "tcache", - !config_valgrind || !in_valgrind) - if (CONF_MATCH("tcache")) { - assert(config_valgrind && in_valgrind); - if (opt_tcache) { - opt_tcache = false; - malloc_conf_error( - "tcache cannot be enabled " - "while running inside Valgrind", - k, klen, v, vlen); - } - continue; - } - CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, - "lg_tcache_max", -1, - (sizeof(size_t) << 3) - 1) - } - if (config_prof) { - CONF_HANDLE_BOOL(opt_prof, "prof", true) - CONF_HANDLE_CHAR_P(opt_prof_prefix, - "prof_prefix", "jeprof") - CONF_HANDLE_BOOL(opt_prof_active, "prof_active", - true) - CONF_HANDLE_BOOL(opt_prof_thread_active_init, - "prof_thread_active_init", true) - CONF_HANDLE_SIZE_T(opt_lg_prof_sample, - "lg_prof_sample", 0, - (sizeof(uint64_t) << 3) - 1, true) - CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", - true) - CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, - "lg_prof_interval", -1, - (sizeof(uint64_t) << 3) - 1) - CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", - true) - CONF_HANDLE_BOOL(opt_prof_final, "prof_final", - true) - CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", - true) - } - malloc_conf_error("Invalid conf pair", k, klen, v, - vlen); -#undef CONF_MATCH -#undef CONF_HANDLE_BOOL -#undef CONF_HANDLE_SIZE_T -#undef CONF_HANDLE_SSIZE_T -#undef CONF_HANDLE_CHAR_P - } - } -} - -/* init_lock must be held. */ -static bool -malloc_init_hard_needed(void) -{ - - if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == - malloc_init_recursible)) { - /* - * Another thread initialized the allocator before this one - * acquired init_lock, or this thread is the initializing - * thread, and it is recursively allocating. - */ - return (false); - } -#ifdef JEMALLOC_THREADED_INIT - if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { - /* Busy-wait until the initializing thread completes. */ - do { - malloc_mutex_unlock(&init_lock); - CPU_SPINWAIT; - malloc_mutex_lock(&init_lock); - } while (!malloc_initialized()); - return (false); - } -#endif - return (true); -} - -/* init_lock must be held. */ -static bool -malloc_init_hard_a0_locked(void) -{ - - malloc_initializer = INITIALIZER; - - if (config_prof) - prof_boot0(); - malloc_conf_init(); - if (opt_stats_print) { - /* Print statistics at exit. */ - if (atexit(stats_print_atexit) != 0) { - malloc_write(": Error in atexit()\n"); - if (opt_abort) - abort(); - } - } - if (base_boot()) - return (true); - if (chunk_boot()) - return (true); - if (ctl_boot()) - return (true); - if (config_prof) - prof_boot1(); - if (arena_boot()) - return (true); - if (config_tcache && tcache_boot()) - return (true); - if (malloc_mutex_init(&arenas_lock)) - return (true); - /* - * Create enough scaffolding to allow recursive allocation in - * malloc_ncpus(). - */ - narenas_total = narenas_auto = 1; - arenas = &a0; - memset(arenas, 0, sizeof(arena_t *) * narenas_auto); - /* - * Initialize one arena here. The rest are lazily created in - * arena_choose_hard(). - */ - if (arena_init(0) == NULL) - return (true); - malloc_init_state = malloc_init_a0_initialized; - return (false); -} - -static bool -malloc_init_hard_a0(void) -{ - bool ret; - - malloc_mutex_lock(&init_lock); - ret = malloc_init_hard_a0_locked(); - malloc_mutex_unlock(&init_lock); - return (ret); -} - -/* - * Initialize data structures which may trigger recursive allocation. - * - * init_lock must be held. - */ -static void -malloc_init_hard_recursible(void) -{ - - malloc_init_state = malloc_init_recursible; - malloc_mutex_unlock(&init_lock); - - ncpus = malloc_ncpus(); - -#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ - && !defined(_WIN32) && !defined(__native_client__)) - /* LinuxThreads's pthread_atfork() allocates. */ - if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, - jemalloc_postfork_child) != 0) { - malloc_write(": Error in pthread_atfork()\n"); - if (opt_abort) - abort(); - } -#endif - malloc_mutex_lock(&init_lock); -} - -/* init_lock must be held. */ -static bool -malloc_init_hard_finish(void) -{ - - if (mutex_boot()) - return (true); - - if (opt_narenas == 0) { - /* - * For SMP systems, create more than one arena per CPU by - * default. - */ - if (ncpus > 1) - opt_narenas = ncpus << 2; - else - opt_narenas = 1; - } - narenas_auto = opt_narenas; - /* - * Make sure that the arenas array can be allocated. In practice, this - * limit is enough to allow the allocator to function, but the ctl - * machinery will fail to allocate memory at far lower limits. - */ - if (narenas_auto > chunksize / sizeof(arena_t *)) { - narenas_auto = chunksize / sizeof(arena_t *); - malloc_printf(": Reducing narenas to limit (%d)\n", - narenas_auto); - } - narenas_total = narenas_auto; - - /* Allocate and initialize arenas. */ - arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); - if (arenas == NULL) - return (true); - /* - * Zero the array. In practice, this should always be pre-zeroed, - * since it was just mmap()ed, but let's be sure. - */ - memset(arenas, 0, sizeof(arena_t *) * narenas_total); - /* Copy the pointer to the one arena that was already initialized. */ - arenas[0] = a0; - - malloc_init_state = malloc_init_initialized; - return (false); -} - -static bool -malloc_init_hard(void) -{ - -#if defined(_WIN32) && _WIN32_WINNT < 0x0600 - _init_init_lock(); -#endif - malloc_mutex_lock(&init_lock); - if (!malloc_init_hard_needed()) { - malloc_mutex_unlock(&init_lock); - return (false); - } - - if (malloc_init_state != malloc_init_a0_initialized && - malloc_init_hard_a0_locked()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - if (malloc_tsd_boot0()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - if (config_prof && prof_boot2()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - malloc_init_hard_recursible(); - - if (malloc_init_hard_finish()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - malloc_mutex_unlock(&init_lock); - malloc_tsd_boot1(); - return (false); -} - -/* - * End initialization functions. - */ -/******************************************************************************/ -/* - * Begin malloc(3)-compatible functions. - */ - -static void * -imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) -{ - void *p; - - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - p = imalloc(tsd, LARGE_MINCLASS); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = imalloc(tsd, usize); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imalloc_prof(tsd_t *tsd, size_t usize) -{ - void *p; - prof_tctx_t *tctx; - - tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = imalloc_prof_sample(tsd, usize, tctx); - else - p = imalloc(tsd, usize); - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_malloc(p, usize, tctx); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imalloc_body(size_t size, tsd_t **tsd, size_t *usize) -{ - - if (unlikely(malloc_init())) - return (NULL); - *tsd = tsd_fetch(); - - if (config_prof && opt_prof) { - *usize = s2u(size); - if (unlikely(*usize == 0)) - return (NULL); - return (imalloc_prof(*tsd, *usize)); - } - - if (config_stats || (config_valgrind && unlikely(in_valgrind))) - *usize = s2u(size); - return (imalloc(*tsd, size)); -} - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) -je_malloc(size_t size) -{ - void *ret; - tsd_t *tsd; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - - if (size == 0) - size = 1; - - ret = imalloc_body(size, &tsd, &usize); - if (unlikely(ret == NULL)) { - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write(": Error in malloc(): " - "out of memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(ret, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; - } - UTRACE(0, size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); - return (ret); -} - -static void * -imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, - prof_tctx_t *tctx) -{ - void *p; - - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); - p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = ipalloc(tsd, usize, alignment, false); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) -{ - void *p; - prof_tctx_t *tctx; - - tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = imemalign_prof_sample(tsd, alignment, usize, tctx); - else - p = ipalloc(tsd, usize, alignment, false); - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_malloc(p, usize, tctx); - - return (p); -} - -JEMALLOC_ATTR(nonnull(1)) -static int -imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) -{ - int ret; - tsd_t *tsd; - size_t usize; - void *result; - - assert(min_alignment != 0); - - if (unlikely(malloc_init())) { - result = NULL; - goto label_oom; - } - tsd = tsd_fetch(); - if (size == 0) - size = 1; - - /* Make sure that alignment is a large enough power of 2. */ - if (unlikely(((alignment - 1) & alignment) != 0 - || (alignment < min_alignment))) { - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write(": Error allocating " - "aligned memory: invalid alignment\n"); - abort(); - } - result = NULL; - ret = EINVAL; - goto label_return; - } - - usize = sa2u(size, alignment); - if (unlikely(usize == 0)) { - result = NULL; - goto label_oom; - } - - if (config_prof && opt_prof) - result = imemalign_prof(tsd, alignment, usize); - else - result = ipalloc(tsd, usize, alignment, false); - if (unlikely(result == NULL)) - goto label_oom; - assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); - - *memptr = result; - ret = 0; -label_return: - if (config_stats && likely(result != NULL)) { - assert(usize == isalloc(result, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; - } - UTRACE(0, size, result); - return (ret); -label_oom: - assert(result == NULL); - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write(": Error allocating aligned memory: " - "out of memory\n"); - abort(); - } - ret = ENOMEM; - goto label_return; -} - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW -JEMALLOC_ATTR(nonnull(1)) -je_posix_memalign(void **memptr, size_t alignment, size_t size) -{ - int ret = imemalign(memptr, alignment, size, sizeof(void *)); - JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, - config_prof), false); - return (ret); -} - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) -je_aligned_alloc(size_t alignment, size_t size) -{ - void *ret; - int err; - - if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { - ret = NULL; - set_errno(err); - } - JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), - false); - return (ret); -} - -static void * -icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) -{ - void *p; - - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - p = icalloc(tsd, LARGE_MINCLASS); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = icalloc(tsd, usize); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -icalloc_prof(tsd_t *tsd, size_t usize) -{ - void *p; - prof_tctx_t *tctx; - - tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = icalloc_prof_sample(tsd, usize, tctx); - else - p = icalloc(tsd, usize); - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_malloc(p, usize, tctx); - - return (p); -} - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) -je_calloc(size_t num, size_t size) -{ - void *ret; - tsd_t *tsd; - size_t num_size; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - - if (unlikely(malloc_init())) { - num_size = 0; - ret = NULL; - goto label_return; - } - tsd = tsd_fetch(); - - num_size = num * size; - if (unlikely(num_size == 0)) { - if (num == 0 || size == 0) - num_size = 1; - else { - ret = NULL; - goto label_return; - } - /* - * Try to avoid division here. We know that it isn't possible to - * overflow during multiplication if neither operand uses any of the - * most significant half of the bits in a size_t. - */ - } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << - 2))) && (num_size / size != num))) { - /* size_t overflow. */ - ret = NULL; - goto label_return; - } - - if (config_prof && opt_prof) { - usize = s2u(num_size); - if (unlikely(usize == 0)) { - ret = NULL; - goto label_return; - } - ret = icalloc_prof(tsd, usize); - } else { - if (config_stats || (config_valgrind && unlikely(in_valgrind))) - usize = s2u(num_size); - ret = icalloc(tsd, num_size); - } - -label_return: - if (unlikely(ret == NULL)) { - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write(": Error in calloc(): out of " - "memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(ret, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; - } - UTRACE(0, num_size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); - return (ret); -} - -static void * -irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, - prof_tctx_t *tctx) -{ - void *p; - - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) -{ - void *p; - bool prof_active; - prof_tctx_t *old_tctx, *tctx; - - prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(old_ptr); - tctx = prof_alloc_prep(tsd, usize, prof_active, true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) - p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); - else - p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, - old_tctx); - - return (p); -} - -JEMALLOC_INLINE_C void -ifree(tsd_t *tsd, void *ptr, tcache_t *tcache) -{ - size_t usize; - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - - assert(ptr != NULL); - assert(malloc_initialized() || IS_INITIALIZER); - - if (config_prof && opt_prof) { - usize = isalloc(ptr, config_prof); - prof_free(tsd, ptr, usize); - } else if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); - if (config_stats) - *tsd_thread_deallocatedp_get(tsd) += usize; - if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(ptr); - iqalloc(tsd, ptr, tcache); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); -} - -JEMALLOC_INLINE_C void -isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache) -{ - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - - assert(ptr != NULL); - assert(malloc_initialized() || IS_INITIALIZER); - - if (config_prof && opt_prof) - prof_free(tsd, ptr, usize); - if (config_stats) - *tsd_thread_deallocatedp_get(tsd) += usize; - if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(ptr); - isqalloc(tsd, ptr, usize, tcache); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); -} - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ALLOC_SIZE(2) -je_realloc(void *ptr, size_t size) -{ - void *ret; - tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL); - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - size_t old_usize = 0; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - - if (unlikely(size == 0)) { - if (ptr != NULL) { - /* realloc(ptr, 0) is equivalent to free(ptr). */ - UTRACE(ptr, 0, 0); - tsd = tsd_fetch(); - ifree(tsd, ptr, tcache_get(tsd, false)); - return (NULL); - } - size = 1; - } - - if (likely(ptr != NULL)) { - assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); - tsd = tsd_fetch(); - - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && unlikely(in_valgrind)) - old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); - - if (config_prof && opt_prof) { - usize = s2u(size); - ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd, - ptr, old_usize, usize); - } else { - if (config_stats || (config_valgrind && - unlikely(in_valgrind))) - usize = s2u(size); - ret = iralloc(tsd, ptr, old_usize, size, 0, false); - } - } else { - /* realloc(NULL, size) is equivalent to malloc(size). */ - ret = imalloc_body(size, &tsd, &usize); - } - - if (unlikely(ret == NULL)) { - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write(": Error in realloc(): " - "out of memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(ret, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; - *tsd_thread_deallocatedp_get(tsd) += old_usize; - } - UTRACE(ptr, size, ret); - JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize, - old_rzsize, true, false); - return (ret); -} - -JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_free(void *ptr) -{ - - UTRACE(ptr, 0, 0); - if (likely(ptr != NULL)) { - tsd_t *tsd = tsd_fetch(); - ifree(tsd, ptr, tcache_get(tsd, false)); - } -} - -/* - * End malloc(3)-compatible functions. - */ -/******************************************************************************/ -/* - * Begin non-standard override functions. - */ - -#ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) -je_memalign(size_t alignment, size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) - ret = NULL; - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); - return (ret); -} -#endif - -#ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) -je_valloc(size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) - ret = NULL; - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); - return (ret); -} -#endif - -/* - * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has - * #define je_malloc malloc - */ -#define malloc_is_malloc 1 -#define is_malloc_(a) malloc_is_ ## a -#define is_malloc(a) is_malloc_(a) - -#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) -/* - * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible - * to inconsistently reference libc's malloc(3)-compatible functions - * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). - * - * These definitions interpose hooks in glibc. The functions are actually - * passed an extra argument for the caller return address, which will be - * ignored. - */ -JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; -JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; -JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; -# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK -JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = - je_memalign; -# endif -#endif - -/* - * End non-standard override functions. - */ -/******************************************************************************/ -/* - * Begin non-standard functions. - */ - -JEMALLOC_ALWAYS_INLINE_C bool -imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize, - size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) -{ - - if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { - *alignment = 0; - *usize = s2u(size); - } else { - *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); - *usize = sa2u(size, *alignment); - } - assert(*usize != 0); - *zero = MALLOCX_ZERO_GET(flags); - if ((flags & MALLOCX_TCACHE_MASK) != 0) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) - *tcache = NULL; - else - *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else - *tcache = tcache_get(tsd, true); - if ((flags & MALLOCX_ARENA_MASK) != 0) { - unsigned arena_ind = MALLOCX_ARENA_GET(flags); - *arena = arena_get(tsd, arena_ind, true, true); - if (unlikely(*arena == NULL)) - return (true); - } else - *arena = NULL; - return (false); -} - -JEMALLOC_ALWAYS_INLINE_C bool -imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, - size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) -{ - - if (likely(flags == 0)) { - *usize = s2u(size); - assert(*usize != 0); - *alignment = 0; - *zero = false; - *tcache = tcache_get(tsd, true); - *arena = NULL; - return (false); - } else { - return (imallocx_flags_decode_hard(tsd, size, flags, usize, - alignment, zero, tcache, arena)); - } -} - -JEMALLOC_ALWAYS_INLINE_C void * -imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena) -{ - - if (unlikely(alignment != 0)) - return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); - if (unlikely(zero)) - return (icalloct(tsd, usize, tcache, arena)); - return (imalloct(tsd, usize, tcache, arena)); -} - -static void * -imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero, - tcache_t *tcache, arena_t *arena) -{ - void *p; - - if (usize <= SMALL_MAXCLASS) { - assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : - sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); - p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache, - arena); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) -{ - void *p; - size_t alignment; - bool zero; - tcache_t *tcache; - arena_t *arena; - prof_tctx_t *tctx; - - if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, - &zero, &tcache, &arena))) - return (NULL); - tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); - if (likely((uintptr_t)tctx == (uintptr_t)1U)) - p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); - else if ((uintptr_t)tctx > (uintptr_t)1U) { - p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache, - arena); - } else - p = NULL; - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - prof_malloc(p, *usize, tctx); - - assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) -{ - void *p; - size_t alignment; - bool zero; - tcache_t *tcache; - arena_t *arena; - - if (likely(flags == 0)) { - if (config_stats || (config_valgrind && unlikely(in_valgrind))) - *usize = s2u(size); - return (imalloc(tsd, size)); - } - - if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize, - &alignment, &zero, &tcache, &arena))) - return (NULL); - p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); - assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); - return (p); -} - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) -je_mallocx(size_t size, int flags) -{ - tsd_t *tsd; - void *p; - size_t usize; - - assert(size != 0); - - if (unlikely(malloc_init())) - goto label_oom; - tsd = tsd_fetch(); - - if (config_prof && opt_prof) - p = imallocx_prof(tsd, size, flags, &usize); - else - p = imallocx_no_prof(tsd, size, flags, &usize); - if (unlikely(p == NULL)) - goto label_oom; - - if (config_stats) { - assert(usize == isalloc(p, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; - } - UTRACE(0, size, p); - JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags)); - return (p); -label_oom: - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write(": Error in mallocx(): out of memory\n"); - abort(); - } - UTRACE(0, size, 0); - return (NULL); -} - -static void * -irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, - size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, - prof_tctx_t *tctx) -{ - void *p; - - if (tctx == NULL) - return (NULL); - if (usize <= SMALL_MAXCLASS) { - p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, - zero, tcache, arena); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else { - p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, - tcache, arena); - } - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, - size_t alignment, size_t *usize, bool zero, tcache_t *tcache, - arena_t *arena) -{ - void *p; - bool prof_active; - prof_tctx_t *old_tctx, *tctx; - - prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(old_ptr); - tctx = prof_alloc_prep(tsd, *usize, prof_active, true); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { - p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, - alignment, zero, tcache, arena, tctx); - } else { - p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, - tcache, arena); - } - if (unlikely(p == NULL)) { - prof_alloc_rollback(tsd, tctx, true); - return (NULL); - } - - if (p == old_ptr && alignment != 0) { - /* - * The allocation did not move, so it is possible that the size - * class is smaller than would guarantee the requested - * alignment, and that the alignment constraint was - * serendipitously satisfied. Additionally, old_usize may not - * be the same as the current usize because of in-place large - * reallocation. Therefore, query the actual value of usize. - */ - *usize = isalloc(p, config_prof); - } - prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr, - old_usize, old_tctx); - - return (p); -} - -JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN -void JEMALLOC_NOTHROW * -JEMALLOC_ALLOC_SIZE(2) -je_rallocx(void *ptr, size_t size, int flags) -{ - void *p; - tsd_t *tsd; - size_t usize; - size_t old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = MALLOCX_ALIGN_GET(flags); - bool zero = flags & MALLOCX_ZERO; - arena_t *arena; - tcache_t *tcache; - - assert(ptr != NULL); - assert(size != 0); - assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); - tsd = tsd_fetch(); - - if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { - unsigned arena_ind = MALLOCX_ARENA_GET(flags); - arena = arena_get(tsd, arena_ind, true, true); - if (unlikely(arena == NULL)) - goto label_oom; - } else - arena = NULL; - - if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) - tcache = NULL; - else - tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else - tcache = tcache_get(tsd, true); - - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && unlikely(in_valgrind)) - old_rzsize = u2rz(old_usize); - - if (config_prof && opt_prof) { - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - assert(usize != 0); - p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, - zero, tcache, arena); - if (unlikely(p == NULL)) - goto label_oom; - } else { - p = iralloct(tsd, ptr, old_usize, size, alignment, zero, - tcache, arena); - if (unlikely(p == NULL)) - goto label_oom; - if (config_stats || (config_valgrind && unlikely(in_valgrind))) - usize = isalloc(p, config_prof); - } - assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); - - if (config_stats) { - *tsd_thread_allocatedp_get(tsd) += usize; - *tsd_thread_deallocatedp_get(tsd) += old_usize; - } - UTRACE(ptr, size, p); - JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize, - old_rzsize, false, zero); - return (p); -label_oom: - if (config_xmalloc && unlikely(opt_xmalloc)) { - malloc_write(": Error in rallocx(): out of memory\n"); - abort(); - } - UTRACE(ptr, size, 0); - return (NULL); -} - -JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, bool zero) -{ - size_t usize; - - if (ixalloc(ptr, old_usize, size, extra, alignment, zero)) - return (old_usize); - usize = isalloc(ptr, config_prof); - - return (usize); -} - -static size_t -ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, bool zero, prof_tctx_t *tctx) -{ - size_t usize; - - if (tctx == NULL) - return (old_usize); - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero); - - return (usize); -} - -JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, - size_t extra, size_t alignment, bool zero) -{ - size_t usize_max, usize; - bool prof_active; - prof_tctx_t *old_tctx, *tctx; - - prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(ptr); - /* - * usize isn't knowable before ixalloc() returns when extra is non-zero. - * Therefore, compute its maximum possible value and use that in - * prof_alloc_prep() to decide whether to capture a backtrace. - * prof_realloc() will use the actual usize to decide whether to sample. - */ - usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra, - alignment); - assert(usize_max != 0); - tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); - if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { - usize = ixallocx_prof_sample(ptr, old_usize, size, extra, - alignment, zero, tctx); - } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero); - } - if (usize == old_usize) { - prof_alloc_rollback(tsd, tctx, false); - return (usize); - } - prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, - old_tctx); - - return (usize); -} - -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -je_xallocx(void *ptr, size_t size, size_t extra, int flags) -{ - tsd_t *tsd; - size_t usize, old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = MALLOCX_ALIGN_GET(flags); - bool zero = flags & MALLOCX_ZERO; - - assert(ptr != NULL); - assert(size != 0); - assert(SIZE_T_MAX - size >= extra); - assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); - tsd = tsd_fetch(); - - old_usize = isalloc(ptr, config_prof); - - /* Clamp extra if necessary to avoid (size + extra) overflow. */ - if (unlikely(size + extra > HUGE_MAXCLASS)) { - /* Check for size overflow. */ - if (unlikely(size > HUGE_MAXCLASS)) { - usize = old_usize; - goto label_not_resized; - } - extra = HUGE_MAXCLASS - size; - } - - if (config_valgrind && unlikely(in_valgrind)) - old_rzsize = u2rz(old_usize); - - if (config_prof && opt_prof) { - usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, - alignment, zero); - } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero); - } - if (unlikely(usize == old_usize)) - goto label_not_resized; - - if (config_stats) { - *tsd_thread_allocatedp_get(tsd) += usize; - *tsd_thread_deallocatedp_get(tsd) += old_usize; - } - JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize, - old_rzsize, false, zero); -label_not_resized: - UTRACE(ptr, size, ptr); - return (usize); -} - -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -JEMALLOC_ATTR(pure) -je_sallocx(const void *ptr, int flags) -{ - size_t usize; - - assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); - - if (config_ivsalloc) - usize = ivsalloc(ptr, config_prof); - else - usize = isalloc(ptr, config_prof); - - return (usize); -} - -JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_dallocx(void *ptr, int flags) -{ - tsd_t *tsd; - tcache_t *tcache; - - assert(ptr != NULL); - assert(malloc_initialized() || IS_INITIALIZER); - - tsd = tsd_fetch(); - if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) - tcache = NULL; - else - tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else - tcache = tcache_get(tsd, false); - - UTRACE(ptr, 0, 0); - ifree(tsd_fetch(), ptr, tcache); -} - -JEMALLOC_ALWAYS_INLINE_C size_t -inallocx(size_t size, int flags) -{ - size_t usize; - - if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) - usize = s2u(size); - else - usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); - assert(usize != 0); - return (usize); -} - -JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_sdallocx(void *ptr, size_t size, int flags) -{ - tsd_t *tsd; - tcache_t *tcache; - size_t usize; - - assert(ptr != NULL); - assert(malloc_initialized() || IS_INITIALIZER); - usize = inallocx(size, flags); - assert(usize == isalloc(ptr, config_prof)); - - tsd = tsd_fetch(); - if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { - if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) - tcache = NULL; - else - tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); - } else - tcache = tcache_get(tsd, false); - - UTRACE(ptr, 0, 0); - isfree(tsd, ptr, usize, tcache); -} - -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -JEMALLOC_ATTR(pure) -je_nallocx(size_t size, int flags) -{ - - assert(size != 0); - - if (unlikely(malloc_init())) - return (0); - - return (inallocx(size, flags)); -} - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW -je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ - - if (unlikely(malloc_init())) - return (EAGAIN); - - return (ctl_byname(name, oldp, oldlenp, newp, newlen)); -} - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW -je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) -{ - - if (unlikely(malloc_init())) - return (EAGAIN); - - return (ctl_nametomib(name, mibp, miblenp)); -} - -JEMALLOC_EXPORT int JEMALLOC_NOTHROW -je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - - if (unlikely(malloc_init())) - return (EAGAIN); - - return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); -} - -JEMALLOC_EXPORT void JEMALLOC_NOTHROW -je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ - - stats_print(write_cb, cbopaque, opts); -} - -JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW -je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) -{ - size_t ret; - - assert(malloc_initialized() || IS_INITIALIZER); - malloc_thread_init(); - - if (config_ivsalloc) - ret = ivsalloc(ptr, config_prof); - else - ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof); - - return (ret); -} - -/* - * End non-standard functions. - */ -/******************************************************************************/ -/* - * The following functions are used by threading libraries for protection of - * malloc during fork(). - */ - -/* - * If an application creates a thread before doing any allocation in the main - * thread, then calls fork(2) in the main thread followed by memory allocation - * in the child process, a race can occur that results in deadlock within the - * child: the main thread may have forked while the created thread had - * partially initialized the allocator. Ordinarily jemalloc prevents - * fork/malloc races via the following functions it registers during - * initialization using pthread_atfork(), but of course that does no good if - * the allocator isn't fully initialized at fork time. The following library - * constructor is a partial solution to this problem. It may still be possible - * to trigger the deadlock described above, but doing so would involve forking - * via a library constructor that runs before jemalloc's runs. - */ -JEMALLOC_ATTR(constructor) -static void -jemalloc_constructor(void) -{ - - malloc_init(); -} - -#ifndef JEMALLOC_MUTEX_INIT_CB -void -jemalloc_prefork(void) -#else -JEMALLOC_EXPORT void -_malloc_prefork(void) -#endif -{ - unsigned i; - -#ifdef JEMALLOC_MUTEX_INIT_CB - if (!malloc_initialized()) - return; -#endif - assert(malloc_initialized()); - - /* Acquire all mutexes in a safe order. */ - ctl_prefork(); - prof_prefork(); - malloc_mutex_prefork(&arenas_lock); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_prefork(arenas[i]); - } - chunk_prefork(); - base_prefork(); -} - -#ifndef JEMALLOC_MUTEX_INIT_CB -void -jemalloc_postfork_parent(void) -#else -JEMALLOC_EXPORT void -_malloc_postfork(void) -#endif -{ - unsigned i; - -#ifdef JEMALLOC_MUTEX_INIT_CB - if (!malloc_initialized()) - return; -#endif - assert(malloc_initialized()); - - /* Release all mutexes, now that fork() has completed. */ - base_postfork_parent(); - chunk_postfork_parent(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_parent(arenas[i]); - } - malloc_mutex_postfork_parent(&arenas_lock); - prof_postfork_parent(); - ctl_postfork_parent(); -} - -void -jemalloc_postfork_child(void) -{ - unsigned i; - - assert(malloc_initialized()); - - /* Release all mutexes, now that fork() has completed. */ - base_postfork_child(); - chunk_postfork_child(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_child(arenas[i]); - } - malloc_mutex_postfork_child(&arenas_lock); - prof_postfork_child(); - ctl_postfork_child(); -} - -/******************************************************************************/ - -/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation. - * returns 0 if the allocation is in the currently active run, - * or when it is not causing any frag issue (large or huge bin) - * returns the bin utilization and run utilization both in fixed point 16:16. - * If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */ -JEMALLOC_EXPORT int JEMALLOC_NOTHROW -je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) { - int defrag = 0; - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) { /* indication that this is not a HUGE alloc */ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t mapbits = arena_mapbits_get(chunk, pageind); - if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* indication that this is not a LARGE alloc */ - arena_t *arena = extent_node_arena_get(&chunk->node); - size_t rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); - arena_run_t *run = &arena_miscelm_get(chunk, rpages_ind)->run; - arena_bin_t *bin = &arena->bins[run->binind]; - malloc_mutex_lock(&bin->lock); - /* runs that are in the same chunk in as the current chunk, are likely to be the next currun */ - if (chunk != (arena_chunk_t *)CHUNK_ADDR2BASE(bin->runcur)) { - arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; - size_t availregs = bin_info->nregs * bin->stats.curruns; - *bin_util = (bin->stats.curregs<<16) / availregs; - *run_util = ((bin_info->nregs - run->nfree)<<16) / bin_info->nregs; - defrag = 1; - } - malloc_mutex_unlock(&bin->lock); - } - } - return defrag; -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/mutex.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/mutex.c deleted file mode 100644 index 2d47af9..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/mutex.c +++ /dev/null @@ -1,153 +0,0 @@ -#define JEMALLOC_MUTEX_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -#include -#endif - -#ifndef _CRT_SPINCOUNT -#define _CRT_SPINCOUNT 4000 -#endif - -/******************************************************************************/ -/* Data. */ - -#ifdef JEMALLOC_LAZY_LOCK -bool isthreaded = false; -#endif -#ifdef JEMALLOC_MUTEX_INIT_CB -static bool postpone_init = true; -static malloc_mutex_t *postponed_mutexes = NULL; -#endif - -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static void pthread_create_once(void); -#endif - -/******************************************************************************/ -/* - * We intercept pthread_create() calls in order to toggle isthreaded if the - * process goes multi-threaded. - */ - -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, - void *(*)(void *), void *__restrict); - -static void -pthread_create_once(void) -{ - - pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); - if (pthread_create_fptr == NULL) { - malloc_write(": Error in dlsym(RTLD_NEXT, " - "\"pthread_create\")\n"); - abort(); - } - - isthreaded = true; -} - -JEMALLOC_EXPORT int -pthread_create(pthread_t *__restrict thread, - const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), - void *__restrict arg) -{ - static pthread_once_t once_control = PTHREAD_ONCE_INIT; - - pthread_once(&once_control, pthread_create_once); - - return (pthread_create_fptr(thread, attr, start_routine, arg)); -} -#endif - -/******************************************************************************/ - -#ifdef JEMALLOC_MUTEX_INIT_CB -JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, - void *(calloc_cb)(size_t, size_t)); -#endif - -bool -malloc_mutex_init(malloc_mutex_t *mutex) -{ - -#ifdef _WIN32 -# if _WIN32_WINNT >= 0x0600 - InitializeSRWLock(&mutex->lock); -# else - if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, - _CRT_SPINCOUNT)) - return (true); -# endif -#elif (defined(JEMALLOC_OSSPIN)) - mutex->lock = 0; -#elif (defined(JEMALLOC_MUTEX_INIT_CB)) - if (postpone_init) { - mutex->postponed_next = postponed_mutexes; - postponed_mutexes = mutex; - } else { - if (_pthread_mutex_init_calloc_cb(&mutex->lock, - bootstrap_calloc) != 0) - return (true); - } -#else - pthread_mutexattr_t attr; - - if (pthread_mutexattr_init(&attr) != 0) - return (true); - pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); - if (pthread_mutex_init(&mutex->lock, &attr) != 0) { - pthread_mutexattr_destroy(&attr); - return (true); - } - pthread_mutexattr_destroy(&attr); -#endif - return (false); -} - -void -malloc_mutex_prefork(malloc_mutex_t *mutex) -{ - - malloc_mutex_lock(mutex); -} - -void -malloc_mutex_postfork_parent(malloc_mutex_t *mutex) -{ - - malloc_mutex_unlock(mutex); -} - -void -malloc_mutex_postfork_child(malloc_mutex_t *mutex) -{ - -#ifdef JEMALLOC_MUTEX_INIT_CB - malloc_mutex_unlock(mutex); -#else - if (malloc_mutex_init(mutex)) { - malloc_printf(": Error re-initializing mutex in " - "child\n"); - if (opt_abort) - abort(); - } -#endif -} - -bool -mutex_boot(void) -{ - -#ifdef JEMALLOC_MUTEX_INIT_CB - postpone_init = false; - while (postponed_mutexes != NULL) { - if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, - bootstrap_calloc) != 0) - return (true); - postponed_mutexes = postponed_mutexes->postponed_next; - } -#endif - return (false); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/pages.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/pages.c deleted file mode 100644 index 83a167f..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/pages.c +++ /dev/null @@ -1,173 +0,0 @@ -#define JEMALLOC_PAGES_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ - -void * -pages_map(void *addr, size_t size) -{ - void *ret; - - assert(size != 0); - -#ifdef _WIN32 - /* - * If VirtualAlloc can't allocate at the given address when one is - * given, it fails and returns NULL. - */ - ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, - PAGE_READWRITE); -#else - /* - * We don't use MAP_FIXED here, because it can cause the *replacement* - * of existing mappings, and we only want to create new mappings. - */ - ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, - -1, 0); - assert(ret != NULL); - - if (ret == MAP_FAILED) - ret = NULL; - else if (addr != NULL && ret != addr) { - /* - * We succeeded in mapping memory, but not in the right place. - */ - pages_unmap(ret, size); - ret = NULL; - } -#endif - assert(ret == NULL || (addr == NULL && ret != addr) - || (addr != NULL && ret == addr)); - return (ret); -} - -void -pages_unmap(void *addr, size_t size) -{ - -#ifdef _WIN32 - if (VirtualFree(addr, 0, MEM_RELEASE) == 0) -#else - if (munmap(addr, size) == -1) -#endif - { - char buf[BUFERROR_BUF]; - - buferror(get_errno(), buf, sizeof(buf)); - malloc_printf(": Error in " -#ifdef _WIN32 - "VirtualFree" -#else - "munmap" -#endif - "(): %s\n", buf); - if (opt_abort) - abort(); - } -} - -void * -pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) -{ - void *ret = (void *)((uintptr_t)addr + leadsize); - - assert(alloc_size >= leadsize + size); -#ifdef _WIN32 - { - void *new_addr; - - pages_unmap(addr, alloc_size); - new_addr = pages_map(ret, size); - if (new_addr == ret) - return (ret); - if (new_addr) - pages_unmap(new_addr, size); - return (NULL); - } -#else - { - size_t trailsize = alloc_size - leadsize - size; - - if (leadsize != 0) - pages_unmap(addr, leadsize); - if (trailsize != 0) - pages_unmap((void *)((uintptr_t)ret + size), trailsize); - return (ret); - } -#endif -} - -static bool -pages_commit_impl(void *addr, size_t size, bool commit) -{ - -#ifndef _WIN32 - /* - * The following decommit/commit implementation is functional, but - * always disabled because it doesn't add value beyong improved - * debugging (at the cost of extra system calls) on systems that - * overcommit. - */ - if (false) { - int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE; - void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON | - MAP_FIXED, -1, 0); - if (result == MAP_FAILED) - return (true); - if (result != addr) { - /* - * We succeeded in mapping memory, but not in the right - * place. - */ - pages_unmap(result, size); - return (true); - } - return (false); - } -#endif - return (true); -} - -bool -pages_commit(void *addr, size_t size) -{ - - return (pages_commit_impl(addr, size, true)); -} - -bool -pages_decommit(void *addr, size_t size) -{ - - return (pages_commit_impl(addr, size, false)); -} - -bool -pages_purge(void *addr, size_t size) -{ - bool unzeroed; - -#ifdef _WIN32 - VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); - unzeroed = true; -#elif defined(JEMALLOC_HAVE_MADVISE) -# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED -# define JEMALLOC_MADV_PURGE MADV_DONTNEED -# define JEMALLOC_MADV_ZEROS true -# elif defined(JEMALLOC_PURGE_MADVISE_FREE) -# define JEMALLOC_MADV_PURGE MADV_FREE -# define JEMALLOC_MADV_ZEROS false -# else -# error "No madvise(2) flag defined for purging unused dirty pages." -# endif - int err = madvise(addr, size, JEMALLOC_MADV_PURGE); - unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0); -# undef JEMALLOC_MADV_PURGE -# undef JEMALLOC_MADV_ZEROS -#else - /* Last resort no-op. */ - unzeroed = true; -#endif - return (unzeroed); -} - diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/rtree.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/rtree.c deleted file mode 100644 index af0d97e..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/rtree.c +++ /dev/null @@ -1,127 +0,0 @@ -#define JEMALLOC_RTREE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -static unsigned -hmin(unsigned ha, unsigned hb) -{ - - return (ha < hb ? ha : hb); -} - -/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */ -bool -rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, - rtree_node_dalloc_t *dalloc) -{ - unsigned bits_in_leaf, height, i; - - assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); - - bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL - : (bits % RTREE_BITS_PER_LEVEL); - if (bits > bits_in_leaf) { - height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL; - if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits) - height++; - } else - height = 1; - assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits); - - rtree->alloc = alloc; - rtree->dalloc = dalloc; - rtree->height = height; - - /* Root level. */ - rtree->levels[0].subtree = NULL; - rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL : - bits_in_leaf; - rtree->levels[0].cumbits = rtree->levels[0].bits; - /* Interior levels. */ - for (i = 1; i < height-1; i++) { - rtree->levels[i].subtree = NULL; - rtree->levels[i].bits = RTREE_BITS_PER_LEVEL; - rtree->levels[i].cumbits = rtree->levels[i-1].cumbits + - RTREE_BITS_PER_LEVEL; - } - /* Leaf level. */ - if (height > 1) { - rtree->levels[height-1].subtree = NULL; - rtree->levels[height-1].bits = bits_in_leaf; - rtree->levels[height-1].cumbits = bits; - } - - /* Compute lookup table to be used by rtree_start_level(). */ - for (i = 0; i < RTREE_HEIGHT_MAX; i++) { - rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height - - 1); - } - - return (false); -} - -static void -rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level) -{ - - if (level + 1 < rtree->height) { - size_t nchildren, i; - - nchildren = ZU(1) << rtree->levels[level].bits; - for (i = 0; i < nchildren; i++) { - rtree_node_elm_t *child = node[i].child; - if (child != NULL) - rtree_delete_subtree(rtree, child, level + 1); - } - } - rtree->dalloc(node); -} - -void -rtree_delete(rtree_t *rtree) -{ - unsigned i; - - for (i = 0; i < rtree->height; i++) { - rtree_node_elm_t *subtree = rtree->levels[i].subtree; - if (subtree != NULL) - rtree_delete_subtree(rtree, subtree, i); - } -} - -static rtree_node_elm_t * -rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp) -{ - rtree_node_elm_t *node; - - if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) { - /* - * Another thread is already in the process of initializing. - * Spin-wait until initialization is complete. - */ - do { - CPU_SPINWAIT; - node = atomic_read_p((void **)elmp); - } while (node == RTREE_NODE_INITIALIZING); - } else { - node = rtree->alloc(ZU(1) << rtree->levels[level].bits); - if (node == NULL) - return (NULL); - atomic_write_p((void **)elmp, node); - } - - return (node); -} - -rtree_node_elm_t * -rtree_subtree_read_hard(rtree_t *rtree, unsigned level) -{ - - return (rtree_node_init(rtree, level, &rtree->levels[level].subtree)); -} - -rtree_node_elm_t * -rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) -{ - - return (rtree_node_init(rtree, level, &elm->child)); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/stats.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/stats.c deleted file mode 100644 index 154c3e7..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/stats.c +++ /dev/null @@ -1,640 +0,0 @@ -#define JEMALLOC_STATS_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -#define CTL_GET(n, v, t) do { \ - size_t sz = sizeof(t); \ - xmallctl(n, v, &sz, NULL, 0); \ -} while (0) - -#define CTL_M2_GET(n, i, v, t) do { \ - size_t mib[6]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ - size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[2] = (i); \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ -} while (0) - -#define CTL_M2_M4_GET(n, i, j, v, t) do { \ - size_t mib[6]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ - size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[2] = (i); \ - mib[4] = (j); \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ -} while (0) - -/******************************************************************************/ -/* Data. */ - -bool opt_stats_print = false; - -size_t stats_cactive = 0; - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void stats_arena_bins_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_hchunks_print( - void (*write_cb)(void *, const char *), void *cbopaque, unsigned i); -static void stats_arena_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i, bool bins, bool large, bool huge); - -/******************************************************************************/ - -static void -stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) -{ - size_t page; - bool config_tcache, in_gap; - unsigned nbins, j; - - CTL_GET("arenas.page", &page, size_t); - - CTL_GET("config.tcache", &config_tcache, bool); - if (config_tcache) { - malloc_cprintf(write_cb, cbopaque, - "bins: size ind allocated nmalloc" - " ndalloc nrequests curregs curruns regs" - " pgs util nfills nflushes newruns" - " reruns\n"); - } else { - malloc_cprintf(write_cb, cbopaque, - "bins: size ind allocated nmalloc" - " ndalloc nrequests curregs curruns regs" - " pgs util newruns reruns\n"); - } - CTL_GET("arenas.nbins", &nbins, unsigned); - for (j = 0, in_gap = false; j < nbins; j++) { - uint64_t nruns; - - CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns, - uint64_t); - if (nruns == 0) - in_gap = true; - else { - size_t reg_size, run_size, curregs, availregs, milli; - size_t curruns; - uint32_t nregs; - uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; - uint64_t reruns; - char util[6]; /* "x.yyy". */ - - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - in_gap = false; - } - CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); - CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); - CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, - size_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, - &nmalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, - &ndalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, - &curregs, size_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, - &nrequests, uint64_t); - if (config_tcache) { - CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, - j, &nfills, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", - i, j, &nflushes, uint64_t); - } - CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, - &reruns, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, - &curruns, size_t); - - availregs = nregs * curruns; - milli = (availregs != 0) ? (1000 * curregs) / availregs - : 1000; - assert(milli <= 1000); - if (milli < 10) { - malloc_snprintf(util, sizeof(util), - "0.00%zu", milli); - } else if (milli < 100) { - malloc_snprintf(util, sizeof(util), "0.0%zu", - milli); - } else if (milli < 1000) { - malloc_snprintf(util, sizeof(util), "0.%zu", - milli); - } else - malloc_snprintf(util, sizeof(util), "1"); - - if (config_tcache) { - malloc_cprintf(write_cb, cbopaque, - "%20zu %3u %12zu %12"FMTu64 - " %12"FMTu64" %12"FMTu64" %12zu" - " %12zu %4u %3zu %-5s %12"FMTu64 - " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n", - reg_size, j, curregs * reg_size, nmalloc, - ndalloc, nrequests, curregs, curruns, nregs, - run_size / page, util, nfills, nflushes, - nruns, reruns); - } else { - malloc_cprintf(write_cb, cbopaque, - "%20zu %3u %12zu %12"FMTu64 - " %12"FMTu64" %12"FMTu64" %12zu" - " %12zu %4u %3zu %-5s %12"FMTu64 - " %12"FMTu64"\n", - reg_size, j, curregs * reg_size, nmalloc, - ndalloc, nrequests, curregs, curruns, nregs, - run_size / page, util, nruns, reruns); - } - } - } - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - } -} - -static void -stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) -{ - unsigned nbins, nlruns, j; - bool in_gap; - - malloc_cprintf(write_cb, cbopaque, - "large: size ind allocated nmalloc ndalloc" - " nrequests curruns\n"); - CTL_GET("arenas.nbins", &nbins, unsigned); - CTL_GET("arenas.nlruns", &nlruns, unsigned); - for (j = 0, in_gap = false; j < nlruns; j++) { - uint64_t nmalloc, ndalloc, nrequests; - size_t run_size, curruns; - - CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc, - uint64_t); - CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc, - uint64_t); - CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j, - &nrequests, uint64_t); - if (nrequests == 0) - in_gap = true; - else { - CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t); - CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, - &curruns, size_t); - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - in_gap = false; - } - malloc_cprintf(write_cb, cbopaque, - "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64" %12zu\n", - run_size, nbins + j, curruns * run_size, nmalloc, - ndalloc, nrequests, curruns); - } - } - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - } -} - -static void -stats_arena_hchunks_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i) -{ - unsigned nbins, nlruns, nhchunks, j; - bool in_gap; - - malloc_cprintf(write_cb, cbopaque, - "huge: size ind allocated nmalloc ndalloc" - " nrequests curhchunks\n"); - CTL_GET("arenas.nbins", &nbins, unsigned); - CTL_GET("arenas.nlruns", &nlruns, unsigned); - CTL_GET("arenas.nhchunks", &nhchunks, unsigned); - for (j = 0, in_gap = false; j < nhchunks; j++) { - uint64_t nmalloc, ndalloc, nrequests; - size_t hchunk_size, curhchunks; - - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j, - &nmalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j, - &ndalloc, uint64_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j, - &nrequests, uint64_t); - if (nrequests == 0) - in_gap = true; - else { - CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, - size_t); - CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, - j, &curhchunks, size_t); - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - in_gap = false; - } - malloc_cprintf(write_cb, cbopaque, - "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64" %12zu\n", - hchunk_size, nbins + nlruns + j, - curhchunks * hchunk_size, nmalloc, ndalloc, - nrequests, curhchunks); - } - } - if (in_gap) { - malloc_cprintf(write_cb, cbopaque, - " ---\n"); - } -} - -static void -stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i, bool bins, bool large, bool huge) -{ - unsigned nthreads; - const char *dss; - ssize_t lg_dirty_mult; - size_t page, pactive, pdirty, mapped; - size_t metadata_mapped, metadata_allocated; - uint64_t npurge, nmadvise, purged; - size_t small_allocated; - uint64_t small_nmalloc, small_ndalloc, small_nrequests; - size_t large_allocated; - uint64_t large_nmalloc, large_ndalloc, large_nrequests; - size_t huge_allocated; - uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests; - - CTL_GET("arenas.page", &page, size_t); - - CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); - malloc_cprintf(write_cb, cbopaque, - "assigned threads: %u\n", nthreads); - CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); - malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n", - dss); - CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t); - if (lg_dirty_mult >= 0) { - malloc_cprintf(write_cb, cbopaque, - "min active:dirty page ratio: %u:1\n", - (1U << lg_dirty_mult)); - } else { - malloc_cprintf(write_cb, cbopaque, - "min active:dirty page ratio: N/A\n"); - } - CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); - CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); - CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t); - CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t); - CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "dirty pages: %zu:%zu active:dirty, %"FMTu64" sweep%s, %"FMTu64 - " madvise%s, %"FMTu64" purged\n", pactive, pdirty, npurge, npurge == - 1 ? "" : "s", nmadvise, nmadvise == 1 ? "" : "s", purged); - - malloc_cprintf(write_cb, cbopaque, - " allocated nmalloc ndalloc" - " nrequests\n"); - CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated, - size_t); - CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests, - uint64_t); - malloc_cprintf(write_cb, cbopaque, - "small: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - small_allocated, small_nmalloc, small_ndalloc, small_nrequests); - CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated, - size_t); - CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests, - uint64_t); - malloc_cprintf(write_cb, cbopaque, - "large: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - large_allocated, large_nmalloc, large_ndalloc, large_nrequests); - CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t); - CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t); - CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests, - uint64_t); - malloc_cprintf(write_cb, cbopaque, - "huge: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests); - malloc_cprintf(write_cb, cbopaque, - "total: %12zu %12"FMTu64" %12"FMTu64 - " %12"FMTu64"\n", - small_allocated + large_allocated + huge_allocated, - small_nmalloc + large_nmalloc + huge_nmalloc, - small_ndalloc + large_ndalloc + huge_ndalloc, - small_nrequests + large_nrequests + huge_nrequests); - malloc_cprintf(write_cb, cbopaque, - "active: %12zu\n", pactive * page); - CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, - "mapped: %12zu\n", mapped); - CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped, - size_t); - CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated, - size_t); - malloc_cprintf(write_cb, cbopaque, - "metadata: mapped: %zu, allocated: %zu\n", - metadata_mapped, metadata_allocated); - - if (bins) - stats_arena_bins_print(write_cb, cbopaque, i); - if (large) - stats_arena_lruns_print(write_cb, cbopaque, i); - if (huge) - stats_arena_hchunks_print(write_cb, cbopaque, i); -} - -void -stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ - int err; - uint64_t epoch; - size_t u64sz; - bool general = true; - bool merged = true; - bool unmerged = true; - bool bins = true; - bool large = true; - bool huge = true; - - /* - * Refresh stats, in case mallctl() was called by the application. - * - * Check for OOM here, since refreshing the ctl cache can trigger - * allocation. In practice, none of the subsequent mallctl()-related - * calls in this function will cause OOM if this one succeeds. - * */ - epoch = 1; - u64sz = sizeof(uint64_t); - err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); - if (err != 0) { - if (err == EAGAIN) { - malloc_write(": Memory allocation failure in " - "mallctl(\"epoch\", ...)\n"); - return; - } - malloc_write(": Failure in mallctl(\"epoch\", " - "...)\n"); - abort(); - } - - if (opts != NULL) { - unsigned i; - - for (i = 0; opts[i] != '\0'; i++) { - switch (opts[i]) { - case 'g': - general = false; - break; - case 'm': - merged = false; - break; - case 'a': - unmerged = false; - break; - case 'b': - bins = false; - break; - case 'l': - large = false; - break; - case 'h': - huge = false; - break; - default:; - } - } - } - - malloc_cprintf(write_cb, cbopaque, - "___ Begin jemalloc statistics ___\n"); - if (general) { - const char *cpv; - bool bv; - unsigned uv; - ssize_t ssv; - size_t sv, bsz, ssz, sssz, cpsz; - - bsz = sizeof(bool); - ssz = sizeof(size_t); - sssz = sizeof(ssize_t); - cpsz = sizeof(const char *); - - CTL_GET("version", &cpv, const char *); - malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); - CTL_GET("config.debug", &bv, bool); - malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", - bv ? "enabled" : "disabled"); - -#define OPT_WRITE_BOOL(n) \ - if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %s\n", bv ? "true" : "false"); \ - } -#define OPT_WRITE_BOOL_MUTABLE(n, m) { \ - bool bv2; \ - if (je_mallctl("opt."#n, &bv, &bsz, NULL, 0) == 0 && \ - je_mallctl(#m, &bv2, &bsz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %s ("#m": %s)\n", bv ? "true" \ - : "false", bv2 ? "true" : "false"); \ - } \ -} -#define OPT_WRITE_SIZE_T(n) \ - if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zu\n", sv); \ - } -#define OPT_WRITE_SSIZE_T(n) \ - if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zd\n", ssv); \ - } -#define OPT_WRITE_SSIZE_T_MUTABLE(n, m) { \ - ssize_t ssv2; \ - if (je_mallctl("opt."#n, &ssv, &sssz, NULL, 0) == 0 && \ - je_mallctl(#m, &ssv2, &sssz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zd ("#m": %zd)\n", \ - ssv, ssv2); \ - } \ -} -#define OPT_WRITE_CHAR_P(n) \ - if (je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0) == 0) { \ - malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": \"%s\"\n", cpv); \ - } - - malloc_cprintf(write_cb, cbopaque, - "Run-time option settings:\n"); - OPT_WRITE_BOOL(abort) - OPT_WRITE_SIZE_T(lg_chunk) - OPT_WRITE_CHAR_P(dss) - OPT_WRITE_SIZE_T(narenas) - OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, arenas.lg_dirty_mult) - OPT_WRITE_BOOL(stats_print) - OPT_WRITE_CHAR_P(junk) - OPT_WRITE_SIZE_T(quarantine) - OPT_WRITE_BOOL(redzone) - OPT_WRITE_BOOL(zero) - OPT_WRITE_BOOL(utrace) - OPT_WRITE_BOOL(valgrind) - OPT_WRITE_BOOL(xmalloc) - OPT_WRITE_BOOL(tcache) - OPT_WRITE_SSIZE_T(lg_tcache_max) - OPT_WRITE_BOOL(prof) - OPT_WRITE_CHAR_P(prof_prefix) - OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active) - OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, - prof.thread_active_init) - OPT_WRITE_SSIZE_T(lg_prof_sample) - OPT_WRITE_BOOL(prof_accum) - OPT_WRITE_SSIZE_T(lg_prof_interval) - OPT_WRITE_BOOL(prof_gdump) - OPT_WRITE_BOOL(prof_final) - OPT_WRITE_BOOL(prof_leak) - -#undef OPT_WRITE_BOOL -#undef OPT_WRITE_BOOL_MUTABLE -#undef OPT_WRITE_SIZE_T -#undef OPT_WRITE_SSIZE_T -#undef OPT_WRITE_CHAR_P - - malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); - - CTL_GET("arenas.narenas", &uv, unsigned); - malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); - - malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", - sizeof(void *)); - - CTL_GET("arenas.quantum", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", - sv); - - CTL_GET("arenas.page", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); - - CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t); - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: %u:1\n", - (1U << ssv)); - } else { - malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: N/A\n"); - } - if (je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0) == 0) { - malloc_cprintf(write_cb, cbopaque, - "Maximum thread-cached size class: %zu\n", sv); - } - if (je_mallctl("opt.prof", &bv, &bsz, NULL, 0) == 0 && bv) { - CTL_GET("prof.lg_sample", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, - "Average profile sample interval: %"FMTu64 - " (2^%zu)\n", (((uint64_t)1U) << sv), sv); - - CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: %"FMTu64 - " (2^%zd)\n", - (((uint64_t)1U) << ssv), ssv); - } else { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: N/A\n"); - } - } - CTL_GET("opt.lg_chunk", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, - "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv); - } - - if (config_stats) { - size_t *cactive; - size_t allocated, active, metadata, resident, mapped; - - CTL_GET("stats.cactive", &cactive, size_t *); - CTL_GET("stats.allocated", &allocated, size_t); - CTL_GET("stats.active", &active, size_t); - CTL_GET("stats.metadata", &metadata, size_t); - CTL_GET("stats.resident", &resident, size_t); - CTL_GET("stats.mapped", &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, - "Allocated: %zu, active: %zu, metadata: %zu," - " resident: %zu, mapped: %zu\n", - allocated, active, metadata, resident, mapped); - malloc_cprintf(write_cb, cbopaque, - "Current active ceiling: %zu\n", - atomic_read_z(cactive)); - - if (merged) { - unsigned narenas; - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i, ninitialized; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); - for (i = ninitialized = 0; i < narenas; i++) { - if (initialized[i]) - ninitialized++; - } - - if (ninitialized > 1 || !unmerged) { - /* Print merged arena stats. */ - malloc_cprintf(write_cb, cbopaque, - "\nMerged arenas stats:\n"); - stats_arena_print(write_cb, cbopaque, - narenas, bins, large, huge); - } - } - } - - if (unmerged) { - unsigned narenas; - - /* Print stats for each arena. */ - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); - - for (i = 0; i < narenas; i++) { - if (initialized[i]) { - malloc_cprintf(write_cb, - cbopaque, - "\narenas[%u]:\n", i); - stats_arena_print(write_cb, - cbopaque, i, bins, large, - huge); - } - } - } - } - } - malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n"); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/tcache.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/tcache.c deleted file mode 100644 index fdafd0c..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/tcache.c +++ /dev/null @@ -1,537 +0,0 @@ -#define JEMALLOC_TCACHE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -bool opt_tcache = true; -ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; - -tcache_bin_info_t *tcache_bin_info; -static unsigned stack_nelms; /* Total stack elms per tcache. */ - -size_t nhbins; -size_t tcache_maxclass; - -tcaches_t *tcaches; - -/* Index of first element within tcaches that has never been used. */ -static unsigned tcaches_past; - -/* Head of singly linked list tracking available tcaches elements. */ -static tcaches_t *tcaches_avail; - -/******************************************************************************/ - -size_t tcache_salloc(const void *ptr) -{ - - return (arena_salloc(ptr, false)); -} - -void -tcache_event_hard(tsd_t *tsd, tcache_t *tcache) -{ - szind_t binind = tcache->next_gc_bin; - tcache_bin_t *tbin = &tcache->tbins[binind]; - tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; - - if (tbin->low_water > 0) { - /* - * Flush (ceiling) 3/4 of the objects below the low water mark. - */ - if (binind < NBINS) { - tcache_bin_flush_small(tsd, tcache, tbin, binind, - tbin->ncached - tbin->low_water + (tbin->low_water - >> 2)); - } else { - tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached - - tbin->low_water + (tbin->low_water >> 2), tcache); - } - /* - * Reduce fill count by 2X. Limit lg_fill_div such that the - * fill count is always at least 1. - */ - if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) - tbin->lg_fill_div++; - } else if (tbin->low_water < 0) { - /* - * Increase fill count by 2X. Make sure lg_fill_div stays - * greater than 0. - */ - if (tbin->lg_fill_div > 1) - tbin->lg_fill_div--; - } - tbin->low_water = tbin->ncached; - - tcache->next_gc_bin++; - if (tcache->next_gc_bin == nhbins) - tcache->next_gc_bin = 0; - tcache->ev_cnt = 0; -} - -void * -tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, - tcache_bin_t *tbin, szind_t binind) -{ - void *ret; - - arena_tcache_fill_small(arena, tbin, binind, config_prof ? - tcache->prof_accumbytes : 0); - if (config_prof) - tcache->prof_accumbytes = 0; - ret = tcache_alloc_easy(tbin); - - return (ret); -} - -void -tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, - szind_t binind, unsigned rem) -{ - arena_t *arena; - void *ptr; - unsigned i, nflush, ndeferred; - bool merged_stats = false; - - assert(binind < NBINS); - assert(rem <= tbin->ncached); - - arena = arena_choose(tsd, NULL); - assert(arena != NULL); - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { - /* Lock the arena bin associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); - arena_t *bin_arena = extent_node_arena_get(&chunk->node); - arena_bin_t *bin = &bin_arena->bins[binind]; - - if (config_prof && bin_arena == arena) { - if (arena_prof_accum(arena, tcache->prof_accumbytes)) - prof_idump(); - tcache->prof_accumbytes = 0; - } - - malloc_mutex_lock(&bin->lock); - if (config_stats && bin_arena == arena) { - assert(!merged_stats); - merged_stats = true; - bin->stats.nflushes++; - bin->stats.nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (extent_node_arena_get(&chunk->node) == bin_arena) { - size_t pageind = ((uintptr_t)ptr - - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_bits_t *bitselm = - arena_bitselm_get(chunk, pageind); - arena_dalloc_bin_junked_locked(bin_arena, chunk, - ptr, bitselm); - } else { - /* - * This object was allocated via a different - * arena bin than the one that is currently - * locked. Stash the object, so that it can be - * handled in a future pass. - */ - tbin->avail[ndeferred] = ptr; - ndeferred++; - } - } - malloc_mutex_unlock(&bin->lock); - } - if (config_stats && !merged_stats) { - /* - * The flush loop didn't happen to flush to this thread's - * arena, so the stats didn't get merged. Manually do so now. - */ - arena_bin_t *bin = &arena->bins[binind]; - malloc_mutex_lock(&bin->lock); - bin->stats.nflushes++; - bin->stats.nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&bin->lock); - } - - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); - tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) - tbin->low_water = tbin->ncached; -} - -void -tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, - unsigned rem, tcache_t *tcache) -{ - arena_t *arena; - void *ptr; - unsigned i, nflush, ndeferred; - bool merged_stats = false; - - assert(binind < nhbins); - assert(rem <= tbin->ncached); - - arena = arena_choose(tsd, NULL); - assert(arena != NULL); - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { - /* Lock the arena associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); - arena_t *locked_arena = extent_node_arena_get(&chunk->node); - UNUSED bool idump; - - if (config_prof) - idump = false; - malloc_mutex_lock(&locked_arena->lock); - if ((config_prof || config_stats) && locked_arena == arena) { - if (config_prof) { - idump = arena_prof_accum_locked(arena, - tcache->prof_accumbytes); - tcache->prof_accumbytes = 0; - } - if (config_stats) { - merged_stats = true; - arena->stats.nrequests_large += - tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } - } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (extent_node_arena_get(&chunk->node) == - locked_arena) { - arena_dalloc_large_junked_locked(locked_arena, - chunk, ptr); - } else { - /* - * This object was allocated via a different - * arena than the one that is currently locked. - * Stash the object, so that it can be handled - * in a future pass. - */ - tbin->avail[ndeferred] = ptr; - ndeferred++; - } - } - malloc_mutex_unlock(&locked_arena->lock); - if (config_prof && idump) - prof_idump(); - } - if (config_stats && !merged_stats) { - /* - * The flush loop didn't happen to flush to this thread's - * arena, so the stats didn't get merged. Manually do so now. - */ - malloc_mutex_lock(&arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&arena->lock); - } - - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); - tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) - tbin->low_water = tbin->ncached; -} - -void -tcache_arena_associate(tcache_t *tcache, arena_t *arena) -{ - - if (config_stats) { - /* Link into list of extant tcaches. */ - malloc_mutex_lock(&arena->lock); - ql_elm_new(tcache, link); - ql_tail_insert(&arena->tcache_ql, tcache, link); - malloc_mutex_unlock(&arena->lock); - } -} - -void -tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena) -{ - - tcache_arena_dissociate(tcache, oldarena); - tcache_arena_associate(tcache, newarena); -} - -void -tcache_arena_dissociate(tcache_t *tcache, arena_t *arena) -{ - - if (config_stats) { - /* Unlink from list of extant tcaches. */ - malloc_mutex_lock(&arena->lock); - if (config_debug) { - bool in_ql = false; - tcache_t *iter; - ql_foreach(iter, &arena->tcache_ql, link) { - if (iter == tcache) { - in_ql = true; - break; - } - } - assert(in_ql); - } - ql_remove(&arena->tcache_ql, tcache, link); - tcache_stats_merge(tcache, arena); - malloc_mutex_unlock(&arena->lock); - } -} - -tcache_t * -tcache_get_hard(tsd_t *tsd) -{ - arena_t *arena; - - if (!tcache_enabled_get()) { - if (tsd_nominal(tsd)) - tcache_enabled_set(false); /* Memoize. */ - return (NULL); - } - arena = arena_choose(tsd, NULL); - if (unlikely(arena == NULL)) - return (NULL); - return (tcache_create(tsd, arena)); -} - -tcache_t * -tcache_create(tsd_t *tsd, arena_t *arena) -{ - tcache_t *tcache; - size_t size, stack_offset; - unsigned i; - - size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); - /* Naturally align the pointer stacks. */ - size = PTR_CEILING(size); - stack_offset = size; - size += stack_nelms * sizeof(void *); - /* Avoid false cacheline sharing. */ - size = sa2u(size, CACHELINE); - - tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get()); - if (tcache == NULL) - return (NULL); - - tcache_arena_associate(tcache, arena); - - assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); - for (i = 0; i < nhbins; i++) { - tcache->tbins[i].lg_fill_div = 1; - tcache->tbins[i].avail = (void **)((uintptr_t)tcache + - (uintptr_t)stack_offset); - stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); - } - - return (tcache); -} - -static void -tcache_destroy(tsd_t *tsd, tcache_t *tcache) -{ - arena_t *arena; - unsigned i; - - arena = arena_choose(tsd, NULL); - tcache_arena_dissociate(tcache, arena); - - for (i = 0; i < NBINS; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_small(tsd, tcache, tbin, i, 0); - - if (config_stats && tbin->tstats.nrequests != 0) { - arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(&bin->lock); - bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); - } - } - - for (; i < nhbins; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_large(tsd, tbin, i, 0, tcache); - - if (config_stats && tbin->tstats.nrequests != 0) { - malloc_mutex_lock(&arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[i - NBINS].nrequests += - tbin->tstats.nrequests; - malloc_mutex_unlock(&arena->lock); - } - } - - if (config_prof && tcache->prof_accumbytes > 0 && - arena_prof_accum(arena, tcache->prof_accumbytes)) - prof_idump(); - - idalloctm(tsd, tcache, false, true); -} - -void -tcache_cleanup(tsd_t *tsd) -{ - tcache_t *tcache; - - if (!config_tcache) - return; - - if ((tcache = tsd_tcache_get(tsd)) != NULL) { - tcache_destroy(tsd, tcache); - tsd_tcache_set(tsd, NULL); - } -} - -void -tcache_enabled_cleanup(tsd_t *tsd) -{ - - /* Do nothing. */ -} - -/* Caller must own arena->lock. */ -void -tcache_stats_merge(tcache_t *tcache, arena_t *arena) -{ - unsigned i; - - cassert(config_stats); - - /* Merge and reset tcache stats. */ - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - tcache_bin_t *tbin = &tcache->tbins[i]; - malloc_mutex_lock(&bin->lock); - bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); - tbin->tstats.nrequests = 0; - } - - for (; i < nhbins; i++) { - malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; - tcache_bin_t *tbin = &tcache->tbins[i]; - arena->stats.nrequests_large += tbin->tstats.nrequests; - lstats->nrequests += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } -} - -bool -tcaches_create(tsd_t *tsd, unsigned *r_ind) -{ - tcache_t *tcache; - tcaches_t *elm; - - if (tcaches == NULL) { - tcaches = base_alloc(sizeof(tcache_t *) * - (MALLOCX_TCACHE_MAX+1)); - if (tcaches == NULL) - return (true); - } - - if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) - return (true); - tcache = tcache_create(tsd, a0get()); - if (tcache == NULL) - return (true); - - if (tcaches_avail != NULL) { - elm = tcaches_avail; - tcaches_avail = tcaches_avail->next; - elm->tcache = tcache; - *r_ind = elm - tcaches; - } else { - elm = &tcaches[tcaches_past]; - elm->tcache = tcache; - *r_ind = tcaches_past; - tcaches_past++; - } - - return (false); -} - -static void -tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm) -{ - - if (elm->tcache == NULL) - return; - tcache_destroy(tsd, elm->tcache); - elm->tcache = NULL; -} - -void -tcaches_flush(tsd_t *tsd, unsigned ind) -{ - - tcaches_elm_flush(tsd, &tcaches[ind]); -} - -void -tcaches_destroy(tsd_t *tsd, unsigned ind) -{ - tcaches_t *elm = &tcaches[ind]; - tcaches_elm_flush(tsd, elm); - elm->next = tcaches_avail; - tcaches_avail = elm; -} - -bool -tcache_boot(void) -{ - unsigned i; - - /* - * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is - * known. - */ - if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) - tcache_maxclass = SMALL_MAXCLASS; - else if ((1U << opt_lg_tcache_max) > large_maxclass) - tcache_maxclass = large_maxclass; - else - tcache_maxclass = (1U << opt_lg_tcache_max); - - nhbins = size2index(tcache_maxclass) + 1; - - /* Initialize tcache_bin_info. */ - tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * - sizeof(tcache_bin_info_t)); - if (tcache_bin_info == NULL) - return (true); - stack_nelms = 0; - for (i = 0; i < NBINS; i++) { - if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { - tcache_bin_info[i].ncached_max = - TCACHE_NSLOTS_SMALL_MIN; - } else if ((arena_bin_info[i].nregs << 1) <= - TCACHE_NSLOTS_SMALL_MAX) { - tcache_bin_info[i].ncached_max = - (arena_bin_info[i].nregs << 1); - } else { - tcache_bin_info[i].ncached_max = - TCACHE_NSLOTS_SMALL_MAX; - } - stack_nelms += tcache_bin_info[i].ncached_max; - } - for (; i < nhbins; i++) { - tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; - stack_nelms += tcache_bin_info[i].ncached_max; - } - - return (false); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/tsd.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/tsd.c deleted file mode 100644 index 9ffe9af..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/tsd.c +++ /dev/null @@ -1,193 +0,0 @@ -#define JEMALLOC_TSD_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -static unsigned ncleanups; -static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; - -malloc_tsd_data(, , tsd_t, TSD_INITIALIZER) - -/******************************************************************************/ - -void * -malloc_tsd_malloc(size_t size) -{ - - return (a0malloc(CACHELINE_CEILING(size))); -} - -void -malloc_tsd_dalloc(void *wrapper) -{ - - a0dalloc(wrapper); -} - -void -malloc_tsd_no_cleanup(void *arg) -{ - - not_reached(); -} - -#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) -#ifndef _WIN32 -JEMALLOC_EXPORT -#endif -void -_malloc_thread_cleanup(void) -{ - bool pending[MALLOC_TSD_CLEANUPS_MAX], again; - unsigned i; - - for (i = 0; i < ncleanups; i++) - pending[i] = true; - - do { - again = false; - for (i = 0; i < ncleanups; i++) { - if (pending[i]) { - pending[i] = cleanups[i](); - if (pending[i]) - again = true; - } - } - } while (again); -} -#endif - -void -malloc_tsd_cleanup_register(bool (*f)(void)) -{ - - assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); - cleanups[ncleanups] = f; - ncleanups++; -} - -void -tsd_cleanup(void *arg) -{ - tsd_t *tsd = (tsd_t *)arg; - - switch (tsd->state) { - case tsd_state_uninitialized: - /* Do nothing. */ - break; - case tsd_state_nominal: -#define O(n, t) \ - n##_cleanup(tsd); -MALLOC_TSD -#undef O - tsd->state = tsd_state_purgatory; - tsd_set(tsd); - break; - case tsd_state_purgatory: - /* - * The previous time this destructor was called, we set the - * state to tsd_state_purgatory so that other destructors - * wouldn't cause re-creation of the tsd. This time, do - * nothing, and do not request another callback. - */ - break; - case tsd_state_reincarnated: - /* - * Another destructor deallocated memory after this destructor - * was called. Reset state to tsd_state_purgatory and request - * another callback. - */ - tsd->state = tsd_state_purgatory; - tsd_set(tsd); - break; - default: - not_reached(); - } -} - -bool -malloc_tsd_boot0(void) -{ - - ncleanups = 0; - if (tsd_boot0()) - return (true); - *tsd_arenas_cache_bypassp_get(tsd_fetch()) = true; - return (false); -} - -void -malloc_tsd_boot1(void) -{ - - tsd_boot1(); - *tsd_arenas_cache_bypassp_get(tsd_fetch()) = false; -} - -#ifdef _WIN32 -static BOOL WINAPI -_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) -{ - - switch (fdwReason) { -#ifdef JEMALLOC_LAZY_LOCK - case DLL_THREAD_ATTACH: - isthreaded = true; - break; -#endif - case DLL_THREAD_DETACH: - _malloc_thread_cleanup(); - break; - default: - break; - } - return (true); -} - -#ifdef _MSC_VER -# ifdef _M_IX86 -# pragma comment(linker, "/INCLUDE:__tls_used") -# else -# pragma comment(linker, "/INCLUDE:_tls_used") -# endif -# pragma section(".CRT$XLY",long,read) -#endif -JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) -static BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, - DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; -#endif - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -void * -tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) -{ - pthread_t self = pthread_self(); - tsd_init_block_t *iter; - - /* Check whether this thread has already inserted into the list. */ - malloc_mutex_lock(&head->lock); - ql_foreach(iter, &head->blocks, link) { - if (iter->thread == self) { - malloc_mutex_unlock(&head->lock); - return (iter->data); - } - } - /* Insert block into list. */ - ql_elm_new(block, link); - block->thread = self; - ql_tail_insert(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); - return (NULL); -} - -void -tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) -{ - - malloc_mutex_lock(&head->lock); - ql_remove(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); -} -#endif diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/zone.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/zone.c deleted file mode 100644 index 12e1734..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/zone.c +++ /dev/null @@ -1,274 +0,0 @@ -#include "jemalloc/internal/jemalloc_internal.h" -#ifndef JEMALLOC_ZONE -# error "This source file is for zones on Darwin (OS X)." -#endif - -/* - * The malloc_default_purgeable_zone function is only available on >= 10.6. - * We need to check whether it is present at runtime, thus the weak_import. - */ -extern malloc_zone_t *malloc_default_purgeable_zone(void) -JEMALLOC_ATTR(weak_import); - -/******************************************************************************/ -/* Data. */ - -static malloc_zone_t zone; -static struct malloc_introspection_t zone_introspect; - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static size_t zone_size(malloc_zone_t *zone, void *ptr); -static void *zone_malloc(malloc_zone_t *zone, size_t size); -static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); -static void *zone_valloc(malloc_zone_t *zone, size_t size); -static void zone_free(malloc_zone_t *zone, void *ptr); -static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); -#if (JEMALLOC_ZONE_VERSION >= 5) -static void *zone_memalign(malloc_zone_t *zone, size_t alignment, -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) - size_t size); -static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, - size_t size); -#endif -static void *zone_destroy(malloc_zone_t *zone); -static size_t zone_good_size(malloc_zone_t *zone, size_t size); -static void zone_force_lock(malloc_zone_t *zone); -static void zone_force_unlock(malloc_zone_t *zone); - -/******************************************************************************/ -/* - * Functions. - */ - -static size_t -zone_size(malloc_zone_t *zone, void *ptr) -{ - - /* - * There appear to be places within Darwin (such as setenv(3)) that - * cause calls to this function with pointers that *no* zone owns. If - * we knew that all pointers were owned by *some* zone, we could split - * our zone into two parts, and use one as the default allocator and - * the other as the default deallocator/reallocator. Since that will - * not work in practice, we must check all pointers to assure that they - * reside within a mapped chunk before determining size. - */ - return (ivsalloc(ptr, config_prof)); -} - -static void * -zone_malloc(malloc_zone_t *zone, size_t size) -{ - - return (je_malloc(size)); -} - -static void * -zone_calloc(malloc_zone_t *zone, size_t num, size_t size) -{ - - return (je_calloc(num, size)); -} - -static void * -zone_valloc(malloc_zone_t *zone, size_t size) -{ - void *ret = NULL; /* Assignment avoids useless compiler warning. */ - - je_posix_memalign(&ret, PAGE, size); - - return (ret); -} - -static void -zone_free(malloc_zone_t *zone, void *ptr) -{ - - if (ivsalloc(ptr, config_prof) != 0) { - je_free(ptr); - return; - } - - free(ptr); -} - -static void * -zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) -{ - - if (ivsalloc(ptr, config_prof) != 0) - return (je_realloc(ptr, size)); - - return (realloc(ptr, size)); -} - -#if (JEMALLOC_ZONE_VERSION >= 5) -static void * -zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) -{ - void *ret = NULL; /* Assignment avoids useless compiler warning. */ - - je_posix_memalign(&ret, alignment, size); - - return (ret); -} -#endif - -#if (JEMALLOC_ZONE_VERSION >= 6) -static void -zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) -{ - - if (ivsalloc(ptr, config_prof) != 0) { - assert(ivsalloc(ptr, config_prof) == size); - je_free(ptr); - return; - } - - free(ptr); -} -#endif - -static void * -zone_destroy(malloc_zone_t *zone) -{ - - /* This function should never be called. */ - not_reached(); - return (NULL); -} - -static size_t -zone_good_size(malloc_zone_t *zone, size_t size) -{ - - if (size == 0) - size = 1; - return (s2u(size)); -} - -static void -zone_force_lock(malloc_zone_t *zone) -{ - - if (isthreaded) - jemalloc_prefork(); -} - -static void -zone_force_unlock(malloc_zone_t *zone) -{ - - if (isthreaded) - jemalloc_postfork_parent(); -} - -JEMALLOC_ATTR(constructor) -void -register_zone(void) -{ - - /* - * If something else replaced the system default zone allocator, don't - * register jemalloc's. - */ - malloc_zone_t *default_zone = malloc_default_zone(); - malloc_zone_t *purgeable_zone = NULL; - if (!default_zone->zone_name || - strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { - return; - } - - zone.size = (void *)zone_size; - zone.malloc = (void *)zone_malloc; - zone.calloc = (void *)zone_calloc; - zone.valloc = (void *)zone_valloc; - zone.free = (void *)zone_free; - zone.realloc = (void *)zone_realloc; - zone.destroy = (void *)zone_destroy; - zone.zone_name = "jemalloc_zone"; - zone.batch_malloc = NULL; - zone.batch_free = NULL; - zone.introspect = &zone_introspect; - zone.version = JEMALLOC_ZONE_VERSION; -#if (JEMALLOC_ZONE_VERSION >= 5) - zone.memalign = zone_memalign; -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) - zone.free_definite_size = zone_free_definite_size; -#endif -#if (JEMALLOC_ZONE_VERSION >= 8) - zone.pressure_relief = NULL; -#endif - - zone_introspect.enumerator = NULL; - zone_introspect.good_size = (void *)zone_good_size; - zone_introspect.check = NULL; - zone_introspect.print = NULL; - zone_introspect.log = NULL; - zone_introspect.force_lock = (void *)zone_force_lock; - zone_introspect.force_unlock = (void *)zone_force_unlock; - zone_introspect.statistics = NULL; -#if (JEMALLOC_ZONE_VERSION >= 6) - zone_introspect.zone_locked = NULL; -#endif -#if (JEMALLOC_ZONE_VERSION >= 7) - zone_introspect.enable_discharge_checking = NULL; - zone_introspect.disable_discharge_checking = NULL; - zone_introspect.discharge = NULL; -#ifdef __BLOCKS__ - zone_introspect.enumerate_discharged_pointers = NULL; -#else - zone_introspect.enumerate_unavailable_without_blocks = NULL; -#endif -#endif - - /* - * The default purgeable zone is created lazily by OSX's libc. It uses - * the default zone when it is created for "small" allocations - * (< 15 KiB), but assumes the default zone is a scalable_zone. This - * obviously fails when the default zone is the jemalloc zone, so - * malloc_default_purgeable_zone is called beforehand so that the - * default purgeable zone is created when the default zone is still - * a scalable_zone. As purgeable zones only exist on >= 10.6, we need - * to check for the existence of malloc_default_purgeable_zone() at - * run time. - */ - if (malloc_default_purgeable_zone != NULL) - purgeable_zone = malloc_default_purgeable_zone(); - - /* Register the custom zone. At this point it won't be the default. */ - malloc_zone_register(&zone); - - do { - default_zone = malloc_default_zone(); - /* - * Unregister and reregister the default zone. On OSX >= 10.6, - * unregistering takes the last registered zone and places it - * at the location of the specified zone. Unregistering the - * default zone thus makes the last registered one the default. - * On OSX < 10.6, unregistering shifts all registered zones. - * The first registered zone then becomes the default. - */ - malloc_zone_unregister(default_zone); - malloc_zone_register(default_zone); - /* - * On OSX 10.6, having the default purgeable zone appear before - * the default zone makes some things crash because it thinks it - * owns the default zone allocated pointers. We thus - * unregister/re-register it in order to ensure it's always - * after the default zone. On OSX < 10.6, there is no purgeable - * zone, so this does nothing. On OSX >= 10.6, unregistering - * replaces the purgeable zone with the last registered zone - * above, i.e. the default zone. Registering it again then puts - * it at the end, obviously after the default zone. - */ - if (purgeable_zone) { - malloc_zone_unregister(purgeable_zone); - malloc_zone_register(purgeable_zone); - } - } while (malloc_default_zone() != &zone); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/test.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/test.h deleted file mode 100644 index 3cf901f..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/test.h +++ /dev/null @@ -1,329 +0,0 @@ -#define ASSERT_BUFSIZE 256 - -#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \ - t a_ = (a); \ - t b_ = (b); \ - if (!(a_ cmp b_)) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) "#cmp" (%s) --> " \ - "%"pri" "#neg_cmp" %"pri": ", \ - __func__, __FILE__, __LINE__, \ - #a, #b, a_, b_); \ - malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ - p_test_fail(prefix, message); \ - } \ -} while (0) - -#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \ - !=, "p", __VA_ARGS__) -#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \ - ==, "p", __VA_ARGS__) -#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \ - !=, "p", __VA_ARGS__) -#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \ - ==, "p", __VA_ARGS__) - -#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) -#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) -#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) -#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) -#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) -#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) - -#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) -#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) -#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) -#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) -#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) -#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) - -#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) -#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) -#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) -#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) -#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) -#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) - -#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) -#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) -#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) -#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) -#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) -#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) - -#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \ - !=, "ld", __VA_ARGS__) -#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \ - ==, "ld", __VA_ARGS__) -#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \ - >=, "ld", __VA_ARGS__) -#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \ - >, "ld", __VA_ARGS__) -#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \ - <, "ld", __VA_ARGS__) -#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \ - <=, "ld", __VA_ARGS__) - -#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \ - a, b, ==, !=, "lu", __VA_ARGS__) -#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \ - a, b, !=, ==, "lu", __VA_ARGS__) -#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \ - a, b, <, >=, "lu", __VA_ARGS__) -#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \ - a, b, <=, >, "lu", __VA_ARGS__) -#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \ - a, b, >=, <, "lu", __VA_ARGS__) -#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \ - a, b, >, <=, "lu", __VA_ARGS__) - -#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \ - !=, "qd", __VA_ARGS__) -#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \ - ==, "qd", __VA_ARGS__) -#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \ - >=, "qd", __VA_ARGS__) -#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \ - >, "qd", __VA_ARGS__) -#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \ - <, "qd", __VA_ARGS__) -#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \ - <=, "qd", __VA_ARGS__) - -#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \ - a, b, ==, !=, "qu", __VA_ARGS__) -#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \ - a, b, !=, ==, "qu", __VA_ARGS__) -#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \ - a, b, <, >=, "qu", __VA_ARGS__) -#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \ - a, b, <=, >, "qu", __VA_ARGS__) -#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \ - a, b, >=, <, "qu", __VA_ARGS__) -#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \ - a, b, >, <=, "qu", __VA_ARGS__) - -#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \ - !=, "jd", __VA_ARGS__) -#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \ - ==, "jd", __VA_ARGS__) -#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \ - >=, "jd", __VA_ARGS__) -#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \ - >, "jd", __VA_ARGS__) -#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \ - <, "jd", __VA_ARGS__) -#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \ - <=, "jd", __VA_ARGS__) - -#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \ - !=, "ju", __VA_ARGS__) -#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \ - ==, "ju", __VA_ARGS__) -#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \ - >=, "ju", __VA_ARGS__) -#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \ - >, "ju", __VA_ARGS__) -#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \ - <, "ju", __VA_ARGS__) -#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \ - <=, "ju", __VA_ARGS__) - -#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \ - !=, "zd", __VA_ARGS__) -#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \ - ==, "zd", __VA_ARGS__) -#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \ - >=, "zd", __VA_ARGS__) -#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \ - >, "zd", __VA_ARGS__) -#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \ - <, "zd", __VA_ARGS__) -#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \ - <=, "zd", __VA_ARGS__) - -#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \ - !=, "zu", __VA_ARGS__) -#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \ - ==, "zu", __VA_ARGS__) -#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \ - >=, "zu", __VA_ARGS__) -#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \ - >, "zu", __VA_ARGS__) -#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \ - <, "zu", __VA_ARGS__) -#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \ - <=, "zu", __VA_ARGS__) - -#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \ - !=, FMTd32, __VA_ARGS__) -#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \ - ==, FMTd32, __VA_ARGS__) -#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \ - >=, FMTd32, __VA_ARGS__) -#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \ - >, FMTd32, __VA_ARGS__) -#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \ - <, FMTd32, __VA_ARGS__) -#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \ - <=, FMTd32, __VA_ARGS__) - -#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \ - !=, FMTu32, __VA_ARGS__) -#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \ - ==, FMTu32, __VA_ARGS__) -#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \ - >=, FMTu32, __VA_ARGS__) -#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \ - >, FMTu32, __VA_ARGS__) -#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \ - <, FMTu32, __VA_ARGS__) -#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \ - <=, FMTu32, __VA_ARGS__) - -#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \ - !=, FMTd64, __VA_ARGS__) -#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \ - ==, FMTd64, __VA_ARGS__) -#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \ - >=, FMTd64, __VA_ARGS__) -#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \ - >, FMTd64, __VA_ARGS__) -#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \ - <, FMTd64, __VA_ARGS__) -#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \ - <=, FMTd64, __VA_ARGS__) - -#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \ - !=, FMTu64, __VA_ARGS__) -#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \ - ==, FMTu64, __VA_ARGS__) -#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \ - >=, FMTu64, __VA_ARGS__) -#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \ - >, FMTu64, __VA_ARGS__) -#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \ - <, FMTu64, __VA_ARGS__) -#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \ - <=, FMTu64, __VA_ARGS__) - -#define assert_b_eq(a, b, ...) do { \ - bool a_ = (a); \ - bool b_ = (b); \ - if (!(a_ == b_)) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) == (%s) --> %s != %s: ", \ - __func__, __FILE__, __LINE__, \ - #a, #b, a_ ? "true" : "false", \ - b_ ? "true" : "false"); \ - malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ - p_test_fail(prefix, message); \ - } \ -} while (0) -#define assert_b_ne(a, b, ...) do { \ - bool a_ = (a); \ - bool b_ = (b); \ - if (!(a_ != b_)) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) != (%s) --> %s == %s: ", \ - __func__, __FILE__, __LINE__, \ - #a, #b, a_ ? "true" : "false", \ - b_ ? "true" : "false"); \ - malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ - p_test_fail(prefix, message); \ - } \ -} while (0) -#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) -#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) - -#define assert_str_eq(a, b, ...) do { \ - if (strcmp((a), (b))) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) same as (%s) --> " \ - "\"%s\" differs from \"%s\": ", \ - __func__, __FILE__, __LINE__, #a, #b, a, b); \ - malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ - p_test_fail(prefix, message); \ - } \ -} while (0) -#define assert_str_ne(a, b, ...) do { \ - if (!strcmp((a), (b))) { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Failed assertion: " \ - "(%s) differs from (%s) --> " \ - "\"%s\" same as \"%s\": ", \ - __func__, __FILE__, __LINE__, #a, #b, a, b); \ - malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ - p_test_fail(prefix, message); \ - } \ -} while (0) - -#define assert_not_reached(...) do { \ - char prefix[ASSERT_BUFSIZE]; \ - char message[ASSERT_BUFSIZE]; \ - malloc_snprintf(prefix, sizeof(prefix), \ - "%s:%s:%d: Unreachable code reached: ", \ - __func__, __FILE__, __LINE__); \ - malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ - p_test_fail(prefix, message); \ -} while (0) - -/* - * If this enum changes, corresponding changes in test/test.sh.in are also - * necessary. - */ -typedef enum { - test_status_pass = 0, - test_status_skip = 1, - test_status_fail = 2, - - test_status_count = 3 -} test_status_t; - -typedef void (test_t)(void); - -#define TEST_BEGIN(f) \ -static void \ -f(void) \ -{ \ - p_test_init(#f); - -#define TEST_END \ - goto label_test_end; \ -label_test_end: \ - p_test_fini(); \ -} - -#define test(...) \ - p_test(__VA_ARGS__, NULL) - -#define test_skip_if(e) do { \ - if (e) { \ - test_skip("%s:%s:%d: Test skipped: (%s)", \ - __func__, __FILE__, __LINE__, #e); \ - goto label_test_end; \ - } \ -} while (0) - -void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); -void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); - -/* For private use by macros. */ -test_status_t p_test(test_t *t, ...); -void p_test_init(const char *name); -void p_test_fini(void); -void p_test_fail(const char *prefix, const char *message); diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/timer.h b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/timer.h deleted file mode 100644 index a7fefdf..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/timer.h +++ /dev/null @@ -1,26 +0,0 @@ -/* Simple timer, for use in benchmark reporting. */ - -#include -#include - -#define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \ - && _POSIX_MONOTONIC_CLOCK >= 0 - -typedef struct { -#ifdef _WIN32 - FILETIME ft0; - FILETIME ft1; -#elif JEMALLOC_CLOCK_GETTIME - struct timespec ts0; - struct timespec ts1; - int clock_id; -#else - struct timeval tv0; - struct timeval tv1; -#endif -} timedelta_t; - -void timer_start(timedelta_t *timer); -void timer_stop(timedelta_t *timer); -uint64_t timer_usec(const timedelta_t *timer); -void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen); diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/mallocx.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/mallocx.c deleted file mode 100644 index 6253175..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/mallocx.c +++ /dev/null @@ -1,182 +0,0 @@ -#include "test/jemalloc_test.h" - -static unsigned -get_nsizes_impl(const char *cmd) -{ - unsigned ret; - size_t z; - - z = sizeof(unsigned); - assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, - "Unexpected mallctl(\"%s\", ...) failure", cmd); - - return (ret); -} - -static unsigned -get_nhuge(void) -{ - - return (get_nsizes_impl("arenas.nhchunks")); -} - -static size_t -get_size_impl(const char *cmd, size_t ind) -{ - size_t ret; - size_t z; - size_t mib[4]; - size_t miblen = 4; - - z = sizeof(size_t); - assert_d_eq(mallctlnametomib(cmd, mib, &miblen), - 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); - mib[2] = ind; - z = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), - 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); - - return (ret); -} - -static size_t -get_huge_size(size_t ind) -{ - - return (get_size_impl("arenas.hchunk.0.size", ind)); -} - -TEST_BEGIN(test_oom) -{ - size_t hugemax, size, alignment; - - hugemax = get_huge_size(get_nhuge()-1); - - /* - * It should be impossible to allocate two objects that each consume - * more than half the virtual address space. - */ - { - void *p; - - p = mallocx(hugemax, 0); - if (p != NULL) { - assert_ptr_null(mallocx(hugemax, 0), - "Expected OOM for mallocx(size=%#zx, 0)", hugemax); - dallocx(p, 0); - } - } - -#if LG_SIZEOF_PTR == 3 - size = ZU(0x8000000000000000); - alignment = ZU(0x8000000000000000); -#else - size = ZU(0x80000000); - alignment = ZU(0x80000000); -#endif - assert_ptr_null(mallocx(size, MALLOCX_ALIGN(alignment)), - "Expected OOM for mallocx(size=%#zx, MALLOCX_ALIGN(%#zx)", size, - alignment); -} -TEST_END - -TEST_BEGIN(test_basic) -{ -#define MAXSZ (((size_t)1) << 26) - size_t sz; - - for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { - size_t nsz, rsz; - void *p; - nsz = nallocx(sz, 0); - assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); - p = mallocx(sz, 0); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - rsz = sallocx(p, 0); - assert_zu_ge(rsz, sz, "Real size smaller than expected"); - assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); - dallocx(p, 0); - - p = mallocx(sz, 0); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - dallocx(p, 0); - - nsz = nallocx(sz, MALLOCX_ZERO); - assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); - p = mallocx(sz, MALLOCX_ZERO); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - rsz = sallocx(p, 0); - assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); - dallocx(p, 0); - } -#undef MAXSZ -} -TEST_END - -TEST_BEGIN(test_alignment_and_size) -{ -#define MAXALIGN (((size_t)1) << 25) -#define NITER 4 - size_t nsz, rsz, sz, alignment, total; - unsigned i; - void *ps[NITER]; - - for (i = 0; i < NITER; i++) - ps[i] = NULL; - - for (alignment = 8; - alignment <= MAXALIGN; - alignment <<= 1) { - total = 0; - for (sz = 1; - sz < 3 * alignment && sz < (1U << 31); - sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { - for (i = 0; i < NITER; i++) { - nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | - MALLOCX_ZERO); - assert_zu_ne(nsz, 0, - "nallocx() error for alignment=%zu, " - "size=%zu (%#zx)", alignment, sz, sz); - ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | - MALLOCX_ZERO); - assert_ptr_not_null(ps[i], - "mallocx() error for alignment=%zu, " - "size=%zu (%#zx)", alignment, sz, sz); - rsz = sallocx(ps[i], 0); - assert_zu_ge(rsz, sz, - "Real size smaller than expected for " - "alignment=%zu, size=%zu", alignment, sz); - assert_zu_eq(nsz, rsz, - "nallocx()/sallocx() size mismatch for " - "alignment=%zu, size=%zu", alignment, sz); - assert_ptr_null( - (void *)((uintptr_t)ps[i] & (alignment-1)), - "%p inadequately aligned for" - " alignment=%zu, size=%zu", ps[i], - alignment, sz); - total += rsz; - if (total >= (MAXALIGN << 1)) - break; - } - for (i = 0; i < NITER; i++) { - if (ps[i] != NULL) { - dallocx(ps[i], 0); - ps[i] = NULL; - } - } - } - } -#undef MAXALIGN -#undef NITER -} -TEST_END - -int -main(void) -{ - - return (test( - test_oom, - test_basic, - test_alignment_and_size)); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/thread_tcache_enabled.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/thread_tcache_enabled.c deleted file mode 100644 index f4e89c6..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/thread_tcache_enabled.c +++ /dev/null @@ -1,113 +0,0 @@ -#include "test/jemalloc_test.h" - -static const bool config_tcache = -#ifdef JEMALLOC_TCACHE - true -#else - false -#endif - ; - -void * -thd_start(void *arg) -{ - int err; - size_t sz; - bool e0, e1; - - sz = sizeof(bool); - if ((err = mallctl("thread.tcache.enabled", &e0, &sz, NULL, 0))) { - if (err == ENOENT) { - assert_false(config_tcache, - "ENOENT should only be returned if tcache is " - "disabled"); - } - goto label_ENOENT; - } - - if (e0) { - e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), - 0, "Unexpected mallctl() error"); - assert_true(e0, "tcache should be enabled"); - } - - e1 = true; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_false(e0, "tcache should be disabled"); - - e1 = true; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_true(e0, "tcache should be enabled"); - - e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_true(e0, "tcache should be enabled"); - - e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_false(e0, "tcache should be disabled"); - - free(malloc(1)); - e1 = true; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_false(e0, "tcache should be disabled"); - - free(malloc(1)); - e1 = true; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_true(e0, "tcache should be enabled"); - - free(malloc(1)); - e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_true(e0, "tcache should be enabled"); - - free(malloc(1)); - e1 = false; - assert_d_eq(mallctl("thread.tcache.enabled", &e0, &sz, &e1, sz), 0, - "Unexpected mallctl() error"); - assert_false(e0, "tcache should be disabled"); - - free(malloc(1)); - return (NULL); -label_ENOENT: - test_skip("\"thread.tcache.enabled\" mallctl not available"); - return (NULL); -} - -TEST_BEGIN(test_main_thread) -{ - - thd_start(NULL); -} -TEST_END - -TEST_BEGIN(test_subthread) -{ - thd_t thd; - - thd_create(&thd, thd_start, NULL); - thd_join(thd, NULL); -} -TEST_END - -int -main(void) -{ - - /* Run tests multiple times to check for bad interactions. */ - return (test( - test_main_thread, - test_subthread, - test_main_thread, - test_subthread, - test_main_thread)); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/btalloc.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/btalloc.c deleted file mode 100644 index 9a253d9..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/btalloc.c +++ /dev/null @@ -1,8 +0,0 @@ -#include "test/jemalloc_test.h" - -void * -btalloc(size_t size, unsigned bits) -{ - - return (btalloc_0(size, bits)); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/test.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/test.c deleted file mode 100644 index 8173614..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/test.c +++ /dev/null @@ -1,107 +0,0 @@ -#include "test/jemalloc_test.h" - -static unsigned test_count = 0; -static test_status_t test_counts[test_status_count] = {0, 0, 0}; -static test_status_t test_status = test_status_pass; -static const char * test_name = ""; - -JEMALLOC_FORMAT_PRINTF(1, 2) -void -test_skip(const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - malloc_vcprintf(NULL, NULL, format, ap); - va_end(ap); - malloc_printf("\n"); - test_status = test_status_skip; -} - -JEMALLOC_FORMAT_PRINTF(1, 2) -void -test_fail(const char *format, ...) -{ - va_list ap; - - va_start(ap, format); - malloc_vcprintf(NULL, NULL, format, ap); - va_end(ap); - malloc_printf("\n"); - test_status = test_status_fail; -} - -static const char * -test_status_string(test_status_t test_status) -{ - - switch (test_status) { - case test_status_pass: return "pass"; - case test_status_skip: return "skip"; - case test_status_fail: return "fail"; - default: not_reached(); - } -} - -void -p_test_init(const char *name) -{ - - test_count++; - test_status = test_status_pass; - test_name = name; -} - -void -p_test_fini(void) -{ - - test_counts[test_status]++; - malloc_printf("%s: %s\n", test_name, test_status_string(test_status)); -} - -test_status_t -p_test(test_t *t, ...) -{ - test_status_t ret; - va_list ap; - - /* - * Make sure initialization occurs prior to running tests. Tests are - * special because they may use internal facilities prior to triggering - * initialization as a side effect of calling into the public API. This - * is a final safety that works even if jemalloc_constructor() doesn't - * run, as for MSVC builds. - */ - if (nallocx(1, 0) == 0) { - malloc_printf("Initialization error"); - return (test_status_fail); - } - - ret = test_status_pass; - va_start(ap, t); - for (; t != NULL; t = va_arg(ap, test_t *)) { - t(); - if (test_status > ret) - ret = test_status; - } - va_end(ap); - - malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n", - test_status_string(test_status_pass), - test_counts[test_status_pass], test_count, - test_status_string(test_status_skip), - test_counts[test_status_skip], test_count, - test_status_string(test_status_fail), - test_counts[test_status_fail], test_count); - - return (ret); -} - -void -p_test_fail(const char *prefix, const char *message) -{ - - malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message); - test_status = test_status_fail; -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/timer.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/timer.c deleted file mode 100644 index 0c93aba..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/timer.c +++ /dev/null @@ -1,85 +0,0 @@ -#include "test/jemalloc_test.h" - -void -timer_start(timedelta_t *timer) -{ - -#ifdef _WIN32 - GetSystemTimeAsFileTime(&timer->ft0); -#elif JEMALLOC_CLOCK_GETTIME - if (sysconf(_SC_MONOTONIC_CLOCK) <= 0) - timer->clock_id = CLOCK_REALTIME; - else - timer->clock_id = CLOCK_MONOTONIC; - clock_gettime(timer->clock_id, &timer->ts0); -#else - gettimeofday(&timer->tv0, NULL); -#endif -} - -void -timer_stop(timedelta_t *timer) -{ - -#ifdef _WIN32 - GetSystemTimeAsFileTime(&timer->ft0); -#elif JEMALLOC_CLOCK_GETTIME - clock_gettime(timer->clock_id, &timer->ts1); -#else - gettimeofday(&timer->tv1, NULL); -#endif -} - -uint64_t -timer_usec(const timedelta_t *timer) -{ - -#ifdef _WIN32 - uint64_t t0, t1; - t0 = (((uint64_t)timer->ft0.dwHighDateTime) << 32) | - timer->ft0.dwLowDateTime; - t1 = (((uint64_t)timer->ft1.dwHighDateTime) << 32) | - timer->ft1.dwLowDateTime; - return ((t1 - t0) / 10); -#elif JEMALLOC_CLOCK_GETTIME - return (((timer->ts1.tv_sec - timer->ts0.tv_sec) * 1000000) + - (timer->ts1.tv_nsec - timer->ts0.tv_nsec) / 1000); -#else - return (((timer->tv1.tv_sec - timer->tv0.tv_sec) * 1000000) + - timer->tv1.tv_usec - timer->tv0.tv_usec); -#endif -} - -void -timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) -{ - uint64_t t0 = timer_usec(a); - uint64_t t1 = timer_usec(b); - uint64_t mult; - unsigned i = 0; - unsigned j; - int n; - - /* Whole. */ - n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1); - i += n; - if (i >= buflen) - return; - mult = 1; - for (j = 0; j < n; j++) - mult *= 10; - - /* Decimal. */ - n = malloc_snprintf(&buf[i], buflen-i, "."); - i += n; - - /* Fraction. */ - while (i < buflen-1) { - uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10 - >= 5)) ? 1 : 0; - n = malloc_snprintf(&buf[i], buflen-i, - "%"FMTu64, (t0 * mult / t1) % 10 + round); - i += n; - mult *= 10; - } -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/test.sh.in b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/test.sh.in deleted file mode 100644 index a39f99f..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/test.sh.in +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/sh - -case @abi@ in - macho) - export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib" - ;; - pecoff) - export PATH="${PATH}:@objroot@lib" - ;; - *) - ;; -esac - -# Corresponds to test_status_t. -pass_code=0 -skip_code=1 -fail_code=2 - -pass_count=0 -skip_count=0 -fail_count=0 -for t in $@; do - if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then - echo - fi - echo "=== ${t} ===" - ${t}@exe@ @abs_srcroot@ @abs_objroot@ - result_code=$? - case ${result_code} in - ${pass_code}) - pass_count=$((pass_count+1)) - ;; - ${skip_code}) - skip_count=$((skip_count+1)) - ;; - ${fail_code}) - fail_count=$((fail_count+1)) - ;; - *) - echo "Test harness error" 1>&2 - exit 1 - esac -done - -total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}` -echo -echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}" - -if [ ${fail_count} -eq 0 ] ; then - exit 0 -else - exit 1 -fi diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/atomic.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/atomic.c deleted file mode 100644 index bdd74f6..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/atomic.c +++ /dev/null @@ -1,122 +0,0 @@ -#include "test/jemalloc_test.h" - -#define TEST_STRUCT(p, t) \ -struct p##_test_s { \ - t accum0; \ - t x; \ - t s; \ -}; \ -typedef struct p##_test_s p##_test_t; - -#define TEST_BODY(p, t, tc, ta, FMT) do { \ - const p##_test_t tests[] = { \ - {(t)-1, (t)-1, (t)-2}, \ - {(t)-1, (t) 0, (t)-2}, \ - {(t)-1, (t) 1, (t)-2}, \ - \ - {(t) 0, (t)-1, (t)-2}, \ - {(t) 0, (t) 0, (t)-2}, \ - {(t) 0, (t) 1, (t)-2}, \ - \ - {(t) 1, (t)-1, (t)-2}, \ - {(t) 1, (t) 0, (t)-2}, \ - {(t) 1, (t) 1, (t)-2}, \ - \ - {(t)0, (t)-(1 << 22), (t)-2}, \ - {(t)0, (t)(1 << 22), (t)-2}, \ - {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \ - {(t)(1 << 22), (t)(1 << 22), (t)-2} \ - }; \ - unsigned i; \ - \ - for (i = 0; i < sizeof(tests)/sizeof(p##_test_t); i++) { \ - bool err; \ - t accum = tests[i].accum0; \ - assert_##ta##_eq(atomic_read_##p(&accum), \ - tests[i].accum0, \ - "Erroneous read, i=%u", i); \ - \ - assert_##ta##_eq(atomic_add_##p(&accum, tests[i].x), \ - (t)((tc)tests[i].accum0 + (tc)tests[i].x), \ - "i=%u, accum=%"FMT", x=%"FMT, \ - i, tests[i].accum0, tests[i].x); \ - assert_##ta##_eq(atomic_read_##p(&accum), accum, \ - "Erroneous add, i=%u", i); \ - \ - accum = tests[i].accum0; \ - assert_##ta##_eq(atomic_sub_##p(&accum, tests[i].x), \ - (t)((tc)tests[i].accum0 - (tc)tests[i].x), \ - "i=%u, accum=%"FMT", x=%"FMT, \ - i, tests[i].accum0, tests[i].x); \ - assert_##ta##_eq(atomic_read_##p(&accum), accum, \ - "Erroneous sub, i=%u", i); \ - \ - accum = tests[i].accum0; \ - err = atomic_cas_##p(&accum, tests[i].x, tests[i].s); \ - assert_b_eq(err, tests[i].accum0 != tests[i].x, \ - "Erroneous cas success/failure result"); \ - assert_##ta##_eq(accum, err ? tests[i].accum0 : \ - tests[i].s, "Erroneous cas effect, i=%u", i); \ - \ - accum = tests[i].accum0; \ - atomic_write_##p(&accum, tests[i].s); \ - assert_##ta##_eq(accum, tests[i].s, \ - "Erroneous write, i=%u", i); \ - } \ -} while (0) - -TEST_STRUCT(uint64, uint64_t) -TEST_BEGIN(test_atomic_uint64) -{ - -#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) - test_skip("64-bit atomic operations not supported"); -#else - TEST_BODY(uint64, uint64_t, uint64_t, u64, FMTx64); -#endif -} -TEST_END - -TEST_STRUCT(uint32, uint32_t) -TEST_BEGIN(test_atomic_uint32) -{ - - TEST_BODY(uint32, uint32_t, uint32_t, u32, "#"FMTx32); -} -TEST_END - -TEST_STRUCT(p, void *) -TEST_BEGIN(test_atomic_p) -{ - - TEST_BODY(p, void *, uintptr_t, ptr, "p"); -} -TEST_END - -TEST_STRUCT(z, size_t) -TEST_BEGIN(test_atomic_z) -{ - - TEST_BODY(z, size_t, size_t, zu, "#zx"); -} -TEST_END - -TEST_STRUCT(u, unsigned) -TEST_BEGIN(test_atomic_u) -{ - - TEST_BODY(u, unsigned, unsigned, u, "#x"); -} -TEST_END - -int -main(void) -{ - - return (test( - test_atomic_uint64, - test_atomic_uint32, - test_atomic_p, - test_atomic_z, - test_atomic_u)); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/bitmap.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/bitmap.c deleted file mode 100644 index 7da583d..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/bitmap.c +++ /dev/null @@ -1,159 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_bitmap_size) -{ - size_t i, prev_size; - - prev_size = 0; - for (i = 1; i <= BITMAP_MAXBITS; i++) { - size_t size = bitmap_size(i); - assert_true(size >= prev_size, - "Bitmap size is smaller than expected"); - prev_size = size; - } -} -TEST_END - -TEST_BEGIN(test_bitmap_init) -{ - size_t i; - - for (i = 1; i <= BITMAP_MAXBITS; i++) { - bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - size_t j; - bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) * - bitmap_info_ngroups(&binfo)); - bitmap_init(bitmap, &binfo); - - for (j = 0; j < i; j++) { - assert_false(bitmap_get(bitmap, &binfo, j), - "Bit should be unset"); - } - free(bitmap); - } - } -} -TEST_END - -TEST_BEGIN(test_bitmap_set) -{ - size_t i; - - for (i = 1; i <= BITMAP_MAXBITS; i++) { - bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - size_t j; - bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) * - bitmap_info_ngroups(&binfo)); - bitmap_init(bitmap, &binfo); - - for (j = 0; j < i; j++) - bitmap_set(bitmap, &binfo, j); - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - free(bitmap); - } - } -} -TEST_END - -TEST_BEGIN(test_bitmap_unset) -{ - size_t i; - - for (i = 1; i <= BITMAP_MAXBITS; i++) { - bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - size_t j; - bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) * - bitmap_info_ngroups(&binfo)); - bitmap_init(bitmap, &binfo); - - for (j = 0; j < i; j++) - bitmap_set(bitmap, &binfo, j); - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - for (j = 0; j < i; j++) - bitmap_unset(bitmap, &binfo, j); - for (j = 0; j < i; j++) - bitmap_set(bitmap, &binfo, j); - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - free(bitmap); - } - } -} -TEST_END - -TEST_BEGIN(test_bitmap_sfu) -{ - size_t i; - - for (i = 1; i <= BITMAP_MAXBITS; i++) { - bitmap_info_t binfo; - bitmap_info_init(&binfo, i); - { - ssize_t j; - bitmap_t *bitmap = (bitmap_t *)malloc(sizeof(bitmap_t) * - bitmap_info_ngroups(&binfo)); - bitmap_init(bitmap, &binfo); - - /* Iteratively set bits starting at the beginning. */ - for (j = 0; j < i; j++) { - assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, - "First unset bit should be just after " - "previous first unset bit"); - } - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - - /* - * Iteratively unset bits starting at the end, and - * verify that bitmap_sfu() reaches the unset bits. - */ - for (j = i - 1; j >= 0; j--) { - bitmap_unset(bitmap, &binfo, j); - assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, - "First unset bit should the bit previously " - "unset"); - bitmap_unset(bitmap, &binfo, j); - } - assert_false(bitmap_get(bitmap, &binfo, 0), - "Bit should be unset"); - - /* - * Iteratively set bits starting at the beginning, and - * verify that bitmap_sfu() looks past them. - */ - for (j = 1; j < i; j++) { - bitmap_set(bitmap, &binfo, j - 1); - assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, - "First unset bit should be just after the " - "bit previously set"); - bitmap_unset(bitmap, &binfo, j); - } - assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1, - "First unset bit should be the last bit"); - assert_true(bitmap_full(bitmap, &binfo), - "All bits should be set"); - free(bitmap); - } - } -} -TEST_END - -int -main(void) -{ - - return (test( - test_bitmap_size, - test_bitmap_init, - test_bitmap_set, - test_bitmap_unset, - test_bitmap_sfu)); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/junk.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/junk.c deleted file mode 100644 index b23dd1e..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/junk.c +++ /dev/null @@ -1,254 +0,0 @@ -#include "test/jemalloc_test.h" - -#ifdef JEMALLOC_FILL -# ifndef JEMALLOC_TEST_JUNK_OPT -# define JEMALLOC_TEST_JUNK_OPT "junk:true" -# endif -const char *malloc_conf = - "abort:false,zero:false,redzone:true,quarantine:0," JEMALLOC_TEST_JUNK_OPT; -#endif - -static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; -static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig; -static huge_dalloc_junk_t *huge_dalloc_junk_orig; -static void *watch_for_junking; -static bool saw_junking; - -static void -watch_junking(void *p) -{ - - watch_for_junking = p; - saw_junking = false; -} - -static void -arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info) -{ - size_t i; - - arena_dalloc_junk_small_orig(ptr, bin_info); - for (i = 0; i < bin_info->reg_size; i++) { - assert_c_eq(((char *)ptr)[i], 0x5a, - "Missing junk fill for byte %zu/%zu of deallocated region", - i, bin_info->reg_size); - } - if (ptr == watch_for_junking) - saw_junking = true; -} - -static void -arena_dalloc_junk_large_intercept(void *ptr, size_t usize) -{ - size_t i; - - arena_dalloc_junk_large_orig(ptr, usize); - for (i = 0; i < usize; i++) { - assert_c_eq(((char *)ptr)[i], 0x5a, - "Missing junk fill for byte %zu/%zu of deallocated region", - i, usize); - } - if (ptr == watch_for_junking) - saw_junking = true; -} - -static void -huge_dalloc_junk_intercept(void *ptr, size_t usize) -{ - - huge_dalloc_junk_orig(ptr, usize); - /* - * The conditions under which junk filling actually occurs are nuanced - * enough that it doesn't make sense to duplicate the decision logic in - * test code, so don't actually check that the region is junk-filled. - */ - if (ptr == watch_for_junking) - saw_junking = true; -} - -static void -test_junk(size_t sz_min, size_t sz_max) -{ - char *s; - size_t sz_prev, sz, i; - - if (opt_junk_free) { - arena_dalloc_junk_small_orig = arena_dalloc_junk_small; - arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; - arena_dalloc_junk_large_orig = arena_dalloc_junk_large; - arena_dalloc_junk_large = arena_dalloc_junk_large_intercept; - huge_dalloc_junk_orig = huge_dalloc_junk; - huge_dalloc_junk = huge_dalloc_junk_intercept; - } - - sz_prev = 0; - s = (char *)mallocx(sz_min, 0); - assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); - - for (sz = sallocx(s, 0); sz <= sz_max; - sz_prev = sz, sz = sallocx(s, 0)) { - if (sz_prev > 0) { - assert_c_eq(s[0], 'a', - "Previously allocated byte %zu/%zu is corrupted", - ZU(0), sz_prev); - assert_c_eq(s[sz_prev-1], 'a', - "Previously allocated byte %zu/%zu is corrupted", - sz_prev-1, sz_prev); - } - - for (i = sz_prev; i < sz; i++) { - if (opt_junk_alloc) { - assert_c_eq(s[i], 0xa5, - "Newly allocated byte %zu/%zu isn't " - "junk-filled", i, sz); - } - s[i] = 'a'; - } - - if (xallocx(s, sz+1, 0, 0) == sz) { - watch_junking(s); - s = (char *)rallocx(s, sz+1, 0); - assert_ptr_not_null((void *)s, - "Unexpected rallocx() failure"); - assert_true(!opt_junk_free || saw_junking, - "Expected region of size %zu to be junk-filled", - sz); - } - } - - watch_junking(s); - dallocx(s, 0); - assert_true(!opt_junk_free || saw_junking, - "Expected region of size %zu to be junk-filled", sz); - - if (opt_junk_free) { - arena_dalloc_junk_small = arena_dalloc_junk_small_orig; - arena_dalloc_junk_large = arena_dalloc_junk_large_orig; - huge_dalloc_junk = huge_dalloc_junk_orig; - } -} - -TEST_BEGIN(test_junk_small) -{ - - test_skip_if(!config_fill); - test_junk(1, SMALL_MAXCLASS-1); -} -TEST_END - -TEST_BEGIN(test_junk_large) -{ - - test_skip_if(!config_fill); - test_junk(SMALL_MAXCLASS+1, large_maxclass); -} -TEST_END - -TEST_BEGIN(test_junk_huge) -{ - - test_skip_if(!config_fill); - test_junk(large_maxclass+1, chunksize*2); -} -TEST_END - -arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig; -static void *most_recently_trimmed; - -static size_t -shrink_size(size_t size) -{ - size_t shrink_size; - - for (shrink_size = size - 1; nallocx(shrink_size, 0) == size; - shrink_size--) - ; /* Do nothing. */ - - return (shrink_size); -} - -static void -arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize) -{ - - arena_ralloc_junk_large_orig(ptr, old_usize, usize); - assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize"); - assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize"); - most_recently_trimmed = ptr; -} - -TEST_BEGIN(test_junk_large_ralloc_shrink) -{ - void *p1, *p2; - - p1 = mallocx(large_maxclass, 0); - assert_ptr_not_null(p1, "Unexpected mallocx() failure"); - - arena_ralloc_junk_large_orig = arena_ralloc_junk_large; - arena_ralloc_junk_large = arena_ralloc_junk_large_intercept; - - p2 = rallocx(p1, shrink_size(large_maxclass), 0); - assert_ptr_eq(p1, p2, "Unexpected move during shrink"); - - arena_ralloc_junk_large = arena_ralloc_junk_large_orig; - - assert_ptr_eq(most_recently_trimmed, p1, - "Expected trimmed portion of region to be junk-filled"); -} -TEST_END - -static bool detected_redzone_corruption; - -static void -arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, - size_t offset, uint8_t byte) -{ - - detected_redzone_corruption = true; -} - -TEST_BEGIN(test_junk_redzone) -{ - char *s; - arena_redzone_corruption_t *arena_redzone_corruption_orig; - - test_skip_if(!config_fill); - test_skip_if(!opt_junk_alloc || !opt_junk_free); - - arena_redzone_corruption_orig = arena_redzone_corruption; - arena_redzone_corruption = arena_redzone_corruption_replacement; - - /* Test underflow. */ - detected_redzone_corruption = false; - s = (char *)mallocx(1, 0); - assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); - s[-1] = 0xbb; - dallocx(s, 0); - assert_true(detected_redzone_corruption, - "Did not detect redzone corruption"); - - /* Test overflow. */ - detected_redzone_corruption = false; - s = (char *)mallocx(1, 0); - assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); - s[sallocx(s, 0)] = 0xbb; - dallocx(s, 0); - assert_true(detected_redzone_corruption, - "Did not detect redzone corruption"); - - arena_redzone_corruption = arena_redzone_corruption_orig; -} -TEST_END - -int -main(void) -{ - - assert(!config_fill || opt_junk_alloc || opt_junk_free); - return (test( - test_junk_small, - test_junk_large, - test_junk_huge, - test_junk_large_ralloc_shrink, - test_junk_redzone)); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/junk_alloc.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/junk_alloc.c deleted file mode 100644 index 8db3331..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/junk_alloc.c +++ /dev/null @@ -1,3 +0,0 @@ -#define JEMALLOC_TEST_JUNK_OPT "junk:alloc" -#include "junk.c" -#undef JEMALLOC_TEST_JUNK_OPT diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/junk_free.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/junk_free.c deleted file mode 100644 index 482a61d..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/junk_free.c +++ /dev/null @@ -1,3 +0,0 @@ -#define JEMALLOC_TEST_JUNK_OPT "junk:free" -#include "junk.c" -#undef JEMALLOC_TEST_JUNK_OPT diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/mallctl.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/mallctl.c deleted file mode 100644 index 31e354c..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/mallctl.c +++ /dev/null @@ -1,633 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_mallctl_errors) -{ - uint64_t epoch; - size_t sz; - - assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT, - "mallctl() should return ENOENT for non-existent names"); - - assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")), - EPERM, "mallctl() should return EPERM on attempt to write " - "read-only value"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)-1), - EINVAL, "mallctl() should return EINVAL for input size mismatch"); - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)+1), - EINVAL, "mallctl() should return EINVAL for input size mismatch"); - - sz = sizeof(epoch)-1; - assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, - "mallctl() should return EINVAL for output size mismatch"); - sz = sizeof(epoch)+1; - assert_d_eq(mallctl("epoch", &epoch, &sz, NULL, 0), EINVAL, - "mallctl() should return EINVAL for output size mismatch"); -} -TEST_END - -TEST_BEGIN(test_mallctlnametomib_errors) -{ - size_t mib[1]; - size_t miblen; - - miblen = sizeof(mib)/sizeof(size_t); - assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT, - "mallctlnametomib() should return ENOENT for non-existent names"); -} -TEST_END - -TEST_BEGIN(test_mallctlbymib_errors) -{ - uint64_t epoch; - size_t sz; - size_t mib[1]; - size_t miblen; - - miblen = sizeof(mib)/sizeof(size_t); - assert_d_eq(mallctlnametomib("version", mib, &miblen), 0, - "Unexpected mallctlnametomib() failure"); - - assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0", - strlen("0.0.0")), EPERM, "mallctl() should return EPERM on " - "attempt to write read-only value"); - - miblen = sizeof(mib)/sizeof(size_t); - assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0, - "Unexpected mallctlnametomib() failure"); - - assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, - sizeof(epoch)-1), EINVAL, - "mallctlbymib() should return EINVAL for input size mismatch"); - assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &epoch, - sizeof(epoch)+1), EINVAL, - "mallctlbymib() should return EINVAL for input size mismatch"); - - sz = sizeof(epoch)-1; - assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, - "mallctlbymib() should return EINVAL for output size mismatch"); - sz = sizeof(epoch)+1; - assert_d_eq(mallctlbymib(mib, miblen, &epoch, &sz, NULL, 0), EINVAL, - "mallctlbymib() should return EINVAL for output size mismatch"); -} -TEST_END - -TEST_BEGIN(test_mallctl_read_write) -{ - uint64_t old_epoch, new_epoch; - size_t sz = sizeof(old_epoch); - - /* Blind. */ - assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); - - /* Read. */ - assert_d_eq(mallctl("epoch", &old_epoch, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); - - /* Write. */ - assert_d_eq(mallctl("epoch", NULL, NULL, &new_epoch, sizeof(new_epoch)), - 0, "Unexpected mallctl() failure"); - assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); - - /* Read+write. */ - assert_d_eq(mallctl("epoch", &old_epoch, &sz, &new_epoch, - sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); - assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); -} -TEST_END - -TEST_BEGIN(test_mallctlnametomib_short_mib) -{ - size_t mib[4]; - size_t miblen; - - miblen = 3; - mib[3] = 42; - assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, - "Unexpected mallctlnametomib() failure"); - assert_zu_eq(miblen, 3, "Unexpected mib output length"); - assert_zu_eq(mib[3], 42, - "mallctlnametomib() wrote past the end of the input mib"); -} -TEST_END - -TEST_BEGIN(test_mallctl_config) -{ - -#define TEST_MALLCTL_CONFIG(config) do { \ - bool oldval; \ - size_t sz = sizeof(oldval); \ - assert_d_eq(mallctl("config."#config, &oldval, &sz, NULL, 0), \ - 0, "Unexpected mallctl() failure"); \ - assert_b_eq(oldval, config_##config, "Incorrect config value"); \ - assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ -} while (0) - - TEST_MALLCTL_CONFIG(cache_oblivious); - TEST_MALLCTL_CONFIG(debug); - TEST_MALLCTL_CONFIG(fill); - TEST_MALLCTL_CONFIG(lazy_lock); - TEST_MALLCTL_CONFIG(munmap); - TEST_MALLCTL_CONFIG(prof); - TEST_MALLCTL_CONFIG(prof_libgcc); - TEST_MALLCTL_CONFIG(prof_libunwind); - TEST_MALLCTL_CONFIG(stats); - TEST_MALLCTL_CONFIG(tcache); - TEST_MALLCTL_CONFIG(tls); - TEST_MALLCTL_CONFIG(utrace); - TEST_MALLCTL_CONFIG(valgrind); - TEST_MALLCTL_CONFIG(xmalloc); - -#undef TEST_MALLCTL_CONFIG -} -TEST_END - -TEST_BEGIN(test_mallctl_opt) -{ - bool config_always = true; - -#define TEST_MALLCTL_OPT(t, opt, config) do { \ - t oldval; \ - size_t sz = sizeof(oldval); \ - int expected = config_##config ? 0 : ENOENT; \ - int result = mallctl("opt."#opt, &oldval, &sz, NULL, 0); \ - assert_d_eq(result, expected, \ - "Unexpected mallctl() result for opt."#opt); \ - assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ -} while (0) - - TEST_MALLCTL_OPT(bool, abort, always); - TEST_MALLCTL_OPT(size_t, lg_chunk, always); - TEST_MALLCTL_OPT(const char *, dss, always); - TEST_MALLCTL_OPT(size_t, narenas, always); - TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always); - TEST_MALLCTL_OPT(bool, stats_print, always); - TEST_MALLCTL_OPT(const char *, junk, fill); - TEST_MALLCTL_OPT(size_t, quarantine, fill); - TEST_MALLCTL_OPT(bool, redzone, fill); - TEST_MALLCTL_OPT(bool, zero, fill); - TEST_MALLCTL_OPT(bool, utrace, utrace); - TEST_MALLCTL_OPT(bool, xmalloc, xmalloc); - TEST_MALLCTL_OPT(bool, tcache, tcache); - TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache); - TEST_MALLCTL_OPT(bool, prof, prof); - TEST_MALLCTL_OPT(const char *, prof_prefix, prof); - TEST_MALLCTL_OPT(bool, prof_active, prof); - TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof); - TEST_MALLCTL_OPT(bool, prof_accum, prof); - TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof); - TEST_MALLCTL_OPT(bool, prof_gdump, prof); - TEST_MALLCTL_OPT(bool, prof_final, prof); - TEST_MALLCTL_OPT(bool, prof_leak, prof); - -#undef TEST_MALLCTL_OPT -} -TEST_END - -TEST_BEGIN(test_manpage_example) -{ - unsigned nbins, i; - size_t mib[4]; - size_t len, miblen; - - len = sizeof(nbins); - assert_d_eq(mallctl("arenas.nbins", &nbins, &len, NULL, 0), 0, - "Unexpected mallctl() failure"); - - miblen = 4; - assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, - "Unexpected mallctlnametomib() failure"); - for (i = 0; i < nbins; i++) { - size_t bin_size; - - mib[2] = i; - len = sizeof(bin_size); - assert_d_eq(mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0), - 0, "Unexpected mallctlbymib() failure"); - /* Do something with bin_size... */ - } -} -TEST_END - -TEST_BEGIN(test_tcache_none) -{ - void *p0, *q, *p1; - - test_skip_if(!config_tcache); - - /* Allocate p and q. */ - p0 = mallocx(42, 0); - assert_ptr_not_null(p0, "Unexpected mallocx() failure"); - q = mallocx(42, 0); - assert_ptr_not_null(q, "Unexpected mallocx() failure"); - - /* Deallocate p and q, but bypass the tcache for q. */ - dallocx(p0, 0); - dallocx(q, MALLOCX_TCACHE_NONE); - - /* Make sure that tcache-based allocation returns p, not q. */ - p1 = mallocx(42, 0); - assert_ptr_not_null(p1, "Unexpected mallocx() failure"); - assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region"); - - /* Clean up. */ - dallocx(p1, MALLOCX_TCACHE_NONE); -} -TEST_END - -TEST_BEGIN(test_tcache) -{ -#define NTCACHES 10 - unsigned tis[NTCACHES]; - void *ps[NTCACHES]; - void *qs[NTCACHES]; - unsigned i; - size_t sz, psz, qsz; - - test_skip_if(!config_tcache); - - psz = 42; - qsz = nallocx(psz, 0) + 1; - - /* Create tcaches. */ - for (i = 0; i < NTCACHES; i++) { - sz = sizeof(unsigned); - assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0, - "Unexpected mallctl() failure, i=%u", i); - } - - /* Exercise tcache ID recycling. */ - for (i = 0; i < NTCACHES; i++) { - assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i], - sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", - i); - } - for (i = 0; i < NTCACHES; i++) { - sz = sizeof(unsigned); - assert_d_eq(mallctl("tcache.create", &tis[i], &sz, NULL, 0), 0, - "Unexpected mallctl() failure, i=%u", i); - } - - /* Flush empty tcaches. */ - for (i = 0; i < NTCACHES; i++) { - assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i], - sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", - i); - } - - /* Cache some allocations. */ - for (i = 0; i < NTCACHES; i++) { - ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); - assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", - i); - dallocx(ps[i], MALLOCX_TCACHE(tis[i])); - - qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i])); - assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u", - i); - dallocx(qs[i], MALLOCX_TCACHE(tis[i])); - } - - /* Verify that tcaches allocate cached regions. */ - for (i = 0; i < NTCACHES; i++) { - void *p0 = ps[i]; - ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); - assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", - i); - assert_ptr_eq(ps[i], p0, - "Expected mallocx() to allocate cached region, i=%u", i); - } - - /* Verify that reallocation uses cached regions. */ - for (i = 0; i < NTCACHES; i++) { - void *q0 = qs[i]; - qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i])); - assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u", - i); - assert_ptr_eq(qs[i], q0, - "Expected rallocx() to allocate cached region, i=%u", i); - /* Avoid undefined behavior in case of test failure. */ - if (qs[i] == NULL) - qs[i] = ps[i]; - } - for (i = 0; i < NTCACHES; i++) - dallocx(qs[i], MALLOCX_TCACHE(tis[i])); - - /* Flush some non-empty tcaches. */ - for (i = 0; i < NTCACHES/2; i++) { - assert_d_eq(mallctl("tcache.flush", NULL, NULL, &tis[i], - sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", - i); - } - - /* Destroy tcaches. */ - for (i = 0; i < NTCACHES; i++) { - assert_d_eq(mallctl("tcache.destroy", NULL, NULL, &tis[i], - sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", - i); - } -} -TEST_END - -TEST_BEGIN(test_thread_arena) -{ - unsigned arena_old, arena_new, narenas; - size_t sz = sizeof(unsigned); - - assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect"); - arena_new = narenas - 1; - assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, - sizeof(unsigned)), 0, "Unexpected mallctl() failure"); - arena_new = 0; - assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new, - sizeof(unsigned)), 0, "Unexpected mallctl() failure"); -} -TEST_END - -TEST_BEGIN(test_arena_i_lg_dirty_mult) -{ - ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; - size_t sz = sizeof(ssize_t); - - assert_d_eq(mallctl("arena.0.lg_dirty_mult", &orig_lg_dirty_mult, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); - - lg_dirty_mult = -2; - assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, - &lg_dirty_mult, sizeof(ssize_t)), EFAULT, - "Unexpected mallctl() success"); - - lg_dirty_mult = (sizeof(size_t) << 3); - assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, - &lg_dirty_mult, sizeof(ssize_t)), EFAULT, - "Unexpected mallctl() success"); - - for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; - lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult - = lg_dirty_mult, lg_dirty_mult++) { - ssize_t old_lg_dirty_mult; - - assert_d_eq(mallctl("arena.0.lg_dirty_mult", &old_lg_dirty_mult, - &sz, &lg_dirty_mult, sizeof(ssize_t)), 0, - "Unexpected mallctl() failure"); - assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, - "Unexpected old arena.0.lg_dirty_mult"); - } -} -TEST_END - -TEST_BEGIN(test_arena_i_purge) -{ - unsigned narenas; - size_t sz = sizeof(unsigned); - size_t mib[3]; - size_t miblen = 3; - - assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, - "Unexpected mallctl() failure"); - - assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, - "Unexpected mallctlnametomib() failure"); - mib[1] = narenas; - assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, - "Unexpected mallctlbymib() failure"); -} -TEST_END - -TEST_BEGIN(test_arena_i_dss) -{ - const char *dss_prec_old, *dss_prec_new; - size_t sz = sizeof(dss_prec_old); - size_t mib[3]; - size_t miblen; - - miblen = sizeof(mib)/sizeof(size_t); - assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, - "Unexpected mallctlnametomib() error"); - - dss_prec_new = "disabled"; - assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new, - sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); - assert_str_ne(dss_prec_old, "primary", - "Unexpected default for dss precedence"); - - assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old, - sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure"); - - assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_str_ne(dss_prec_old, "primary", - "Unexpected value for dss precedence"); - - mib[1] = narenas_total_get(); - dss_prec_new = "disabled"; - assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new, - sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); - assert_str_ne(dss_prec_old, "primary", - "Unexpected default for dss precedence"); - - assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old, - sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); - - assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_str_ne(dss_prec_old, "primary", - "Unexpected value for dss precedence"); -} -TEST_END - -TEST_BEGIN(test_arenas_initialized) -{ - unsigned narenas; - size_t sz = sizeof(narenas); - - assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - { - VARIABLE_ARRAY(bool, initialized, narenas); - - sz = narenas * sizeof(bool); - assert_d_eq(mallctl("arenas.initialized", initialized, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); - } -} -TEST_END - -TEST_BEGIN(test_arenas_lg_dirty_mult) -{ - ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; - size_t sz = sizeof(ssize_t); - - assert_d_eq(mallctl("arenas.lg_dirty_mult", &orig_lg_dirty_mult, &sz, - NULL, 0), 0, "Unexpected mallctl() failure"); - - lg_dirty_mult = -2; - assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, - &lg_dirty_mult, sizeof(ssize_t)), EFAULT, - "Unexpected mallctl() success"); - - lg_dirty_mult = (sizeof(size_t) << 3); - assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, - &lg_dirty_mult, sizeof(ssize_t)), EFAULT, - "Unexpected mallctl() success"); - - for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; - lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult = - lg_dirty_mult, lg_dirty_mult++) { - ssize_t old_lg_dirty_mult; - - assert_d_eq(mallctl("arenas.lg_dirty_mult", &old_lg_dirty_mult, - &sz, &lg_dirty_mult, sizeof(ssize_t)), 0, - "Unexpected mallctl() failure"); - assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, - "Unexpected old arenas.lg_dirty_mult"); - } -} -TEST_END - -TEST_BEGIN(test_arenas_constants) -{ - -#define TEST_ARENAS_CONSTANT(t, name, expected) do { \ - t name; \ - size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas."#name, &name, &sz, NULL, 0), 0, \ - "Unexpected mallctl() failure"); \ - assert_zu_eq(name, expected, "Incorrect "#name" size"); \ -} while (0) - - TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); - TEST_ARENAS_CONSTANT(size_t, page, PAGE); - TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS); - TEST_ARENAS_CONSTANT(unsigned, nlruns, nlclasses); - TEST_ARENAS_CONSTANT(unsigned, nhchunks, nhclasses); - -#undef TEST_ARENAS_CONSTANT -} -TEST_END - -TEST_BEGIN(test_arenas_bin_constants) -{ - -#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ - t name; \ - size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas.bin.0."#name, &name, &sz, NULL, 0), \ - 0, "Unexpected mallctl() failure"); \ - assert_zu_eq(name, expected, "Incorrect "#name" size"); \ -} while (0) - - TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size); - TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs); - TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size); - -#undef TEST_ARENAS_BIN_CONSTANT -} -TEST_END - -TEST_BEGIN(test_arenas_lrun_constants) -{ - -#define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \ - t name; \ - size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas.lrun.0."#name, &name, &sz, NULL, \ - 0), 0, "Unexpected mallctl() failure"); \ - assert_zu_eq(name, expected, "Incorrect "#name" size"); \ -} while (0) - - TEST_ARENAS_LRUN_CONSTANT(size_t, size, LARGE_MINCLASS); - -#undef TEST_ARENAS_LRUN_CONSTANT -} -TEST_END - -TEST_BEGIN(test_arenas_hchunk_constants) -{ - -#define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \ - t name; \ - size_t sz = sizeof(t); \ - assert_d_eq(mallctl("arenas.hchunk.0."#name, &name, &sz, NULL, \ - 0), 0, "Unexpected mallctl() failure"); \ - assert_zu_eq(name, expected, "Incorrect "#name" size"); \ -} while (0) - - TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, chunksize); - -#undef TEST_ARENAS_HCHUNK_CONSTANT -} -TEST_END - -TEST_BEGIN(test_arenas_extend) -{ - unsigned narenas_before, arena, narenas_after; - size_t sz = sizeof(unsigned); - - assert_d_eq(mallctl("arenas.narenas", &narenas_before, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_d_eq(mallctl("arenas.extend", &arena, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - assert_d_eq(mallctl("arenas.narenas", &narenas_after, &sz, NULL, 0), 0, - "Unexpected mallctl() failure"); - - assert_u_eq(narenas_before+1, narenas_after, - "Unexpected number of arenas before versus after extension"); - assert_u_eq(arena, narenas_after-1, "Unexpected arena index"); -} -TEST_END - -TEST_BEGIN(test_stats_arenas) -{ - -#define TEST_STATS_ARENAS(t, name) do { \ - t name; \ - size_t sz = sizeof(t); \ - assert_d_eq(mallctl("stats.arenas.0."#name, &name, &sz, NULL, \ - 0), 0, "Unexpected mallctl() failure"); \ -} while (0) - - TEST_STATS_ARENAS(const char *, dss); - TEST_STATS_ARENAS(unsigned, nthreads); - TEST_STATS_ARENAS(size_t, pactive); - TEST_STATS_ARENAS(size_t, pdirty); - -#undef TEST_STATS_ARENAS -} -TEST_END - -int -main(void) -{ - - return (test( - test_mallctl_errors, - test_mallctlnametomib_errors, - test_mallctlbymib_errors, - test_mallctl_read_write, - test_mallctlnametomib_short_mib, - test_mallctl_config, - test_mallctl_opt, - test_manpage_example, - test_tcache_none, - test_tcache, - test_thread_arena, - test_arena_i_lg_dirty_mult, - test_arena_i_purge, - test_arena_i_dss, - test_arenas_initialized, - test_arenas_lg_dirty_mult, - test_arenas_constants, - test_arenas_bin_constants, - test_arenas_lrun_constants, - test_arenas_hchunk_constants, - test_arenas_extend, - test_stats_arenas)); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/rtree.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/rtree.c deleted file mode 100644 index b54b3e8..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/rtree.c +++ /dev/null @@ -1,151 +0,0 @@ -#include "test/jemalloc_test.h" - -static rtree_node_elm_t * -node_alloc(size_t nelms) -{ - - return ((rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t))); -} - -static void -node_dalloc(rtree_node_elm_t *node) -{ - - free(node); -} - -TEST_BEGIN(test_rtree_get_empty) -{ - unsigned i; - - for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - rtree_t rtree; - assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), - "Unexpected rtree_new() failure"); - assert_ptr_null(rtree_get(&rtree, 0, false), - "rtree_get() should return NULL for empty tree"); - rtree_delete(&rtree); - } -} -TEST_END - -TEST_BEGIN(test_rtree_extrema) -{ - unsigned i; - extent_node_t node_a, node_b; - - for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - rtree_t rtree; - assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), - "Unexpected rtree_new() failure"); - - assert_false(rtree_set(&rtree, 0, &node_a), - "Unexpected rtree_set() failure"); - assert_ptr_eq(rtree_get(&rtree, 0, true), &node_a, - "rtree_get() should return previously set value"); - - assert_false(rtree_set(&rtree, ~((uintptr_t)0), &node_b), - "Unexpected rtree_set() failure"); - assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0), true), &node_b, - "rtree_get() should return previously set value"); - - rtree_delete(&rtree); - } -} -TEST_END - -TEST_BEGIN(test_rtree_bits) -{ - unsigned i, j, k; - - for (i = 1; i < (sizeof(uintptr_t) << 3); i++) { - uintptr_t keys[] = {0, 1, - (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1}; - extent_node_t node; - rtree_t rtree; - - assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), - "Unexpected rtree_new() failure"); - - for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { - assert_false(rtree_set(&rtree, keys[j], &node), - "Unexpected rtree_set() failure"); - for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) { - assert_ptr_eq(rtree_get(&rtree, keys[k], true), - &node, "rtree_get() should return " - "previously set value and ignore " - "insignificant key bits; i=%u, j=%u, k=%u, " - "set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, - j, k, keys[j], keys[k]); - } - assert_ptr_null(rtree_get(&rtree, - (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)), false), - "Only leftmost rtree leaf should be set; " - "i=%u, j=%u", i, j); - assert_false(rtree_set(&rtree, keys[j], NULL), - "Unexpected rtree_set() failure"); - } - - rtree_delete(&rtree); - } -} -TEST_END - -TEST_BEGIN(test_rtree_random) -{ - unsigned i; - sfmt_t *sfmt; -#define NSET 16 -#define SEED 42 - - sfmt = init_gen_rand(SEED); - for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - uintptr_t keys[NSET]; - extent_node_t node; - unsigned j; - rtree_t rtree; - - assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), - "Unexpected rtree_new() failure"); - - for (j = 0; j < NSET; j++) { - keys[j] = (uintptr_t)gen_rand64(sfmt); - assert_false(rtree_set(&rtree, keys[j], &node), - "Unexpected rtree_set() failure"); - assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node, - "rtree_get() should return previously set value"); - } - for (j = 0; j < NSET; j++) { - assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node, - "rtree_get() should return previously set value"); - } - - for (j = 0; j < NSET; j++) { - assert_false(rtree_set(&rtree, keys[j], NULL), - "Unexpected rtree_set() failure"); - assert_ptr_null(rtree_get(&rtree, keys[j], true), - "rtree_get() should return previously set value"); - } - for (j = 0; j < NSET; j++) { - assert_ptr_null(rtree_get(&rtree, keys[j], true), - "rtree_get() should return previously set value"); - } - - rtree_delete(&rtree); - } - fini_gen_rand(sfmt); -#undef NSET -#undef SEED -} -TEST_END - -int -main(void) -{ - - return (test( - test_rtree_get_empty, - test_rtree_extrema, - test_rtree_bits, - test_rtree_random)); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/size_classes.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/size_classes.c deleted file mode 100644 index d3aaebd..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/size_classes.c +++ /dev/null @@ -1,89 +0,0 @@ -#include "test/jemalloc_test.h" - -static size_t -get_max_size_class(void) -{ - unsigned nhchunks; - size_t mib[4]; - size_t sz, miblen, max_size_class; - - sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0, - "Unexpected mallctl() error"); - - miblen = sizeof(mib) / sizeof(size_t); - assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, - "Unexpected mallctlnametomib() error"); - mib[2] = nhchunks - 1; - - sz = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0, - "Unexpected mallctlbymib() error"); - - return (max_size_class); -} - -TEST_BEGIN(test_size_classes) -{ - size_t size_class, max_size_class; - szind_t index, max_index; - - max_size_class = get_max_size_class(); - max_index = size2index(max_size_class); - - for (index = 0, size_class = index2size(index); index < max_index || - size_class < max_size_class; index++, size_class = - index2size(index)) { - assert_true(index < max_index, - "Loop conditionals should be equivalent; index=%u, " - "size_class=%zu (%#zx)", index, size_class, size_class); - assert_true(size_class < max_size_class, - "Loop conditionals should be equivalent; index=%u, " - "size_class=%zu (%#zx)", index, size_class, size_class); - - assert_u_eq(index, size2index(size_class), - "size2index() does not reverse index2size(): index=%u -->" - " size_class=%zu --> index=%u --> size_class=%zu", index, - size_class, size2index(size_class), - index2size(size2index(size_class))); - assert_zu_eq(size_class, index2size(size2index(size_class)), - "index2size() does not reverse size2index(): index=%u -->" - " size_class=%zu --> index=%u --> size_class=%zu", index, - size_class, size2index(size_class), - index2size(size2index(size_class))); - - assert_u_eq(index+1, size2index(size_class+1), - "Next size_class does not round up properly"); - - assert_zu_eq(size_class, (index > 0) ? - s2u(index2size(index-1)+1) : s2u(1), - "s2u() does not round up to size class"); - assert_zu_eq(size_class, s2u(size_class-1), - "s2u() does not round up to size class"); - assert_zu_eq(size_class, s2u(size_class), - "s2u() does not compute same size class"); - assert_zu_eq(s2u(size_class+1), index2size(index+1), - "s2u() does not round up to next size class"); - } - - assert_u_eq(index, size2index(index2size(index)), - "size2index() does not reverse index2size()"); - assert_zu_eq(max_size_class, index2size(size2index(max_size_class)), - "index2size() does not reverse size2index()"); - - assert_zu_eq(size_class, s2u(index2size(index-1)+1), - "s2u() does not round up to size class"); - assert_zu_eq(size_class, s2u(size_class-1), - "s2u() does not round up to size class"); - assert_zu_eq(size_class, s2u(size_class), - "s2u() does not compute same size class"); -} -TEST_END - -int -main(void) -{ - - return (test( - test_size_classes)); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/stats.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/stats.c deleted file mode 100644 index 8e4bc63..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/stats.c +++ /dev/null @@ -1,447 +0,0 @@ -#include "test/jemalloc_test.h" - -TEST_BEGIN(test_stats_summary) -{ - size_t *cactive; - size_t sz, allocated, active, resident, mapped; - int expected = config_stats ? 0 : ENOENT; - - sz = sizeof(cactive); - assert_d_eq(mallctl("stats.cactive", &cactive, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.allocated", &allocated, &sz, NULL, 0), - expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.active", &active, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.resident", &resident, &sz, NULL, 0), - expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.mapped", &mapped, &sz, NULL, 0), expected, - "Unexpected mallctl() result"); - - if (config_stats) { - assert_zu_le(active, *cactive, - "active should be no larger than cactive"); - assert_zu_le(allocated, active, - "allocated should be no larger than active"); - assert_zu_lt(active, resident, - "active should be less than resident"); - assert_zu_lt(active, mapped, - "active should be less than mapped"); - } -} -TEST_END - -TEST_BEGIN(test_stats_huge) -{ - void *p; - uint64_t epoch; - size_t allocated; - uint64_t nmalloc, ndalloc, nrequests; - size_t sz; - int expected = config_stats ? 0 : ENOENT; - - p = mallocx(large_maxclass+1, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL, - 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL, - 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_zu_gt(allocated, 0, - "allocated should be greater than zero"); - assert_u64_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - assert_u64_le(nmalloc, nrequests, - "nmalloc should no larger than nrequests"); - } - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_stats_arenas_summary) -{ - unsigned arena; - void *little, *large, *huge; - uint64_t epoch; - size_t sz; - int expected = config_stats ? 0 : ENOENT; - size_t mapped; - uint64_t npurge, nmadvise, purged; - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); - - little = mallocx(SMALL_MAXCLASS, 0); - assert_ptr_not_null(little, "Unexpected mallocx() failure"); - large = mallocx(large_maxclass, 0); - assert_ptr_not_null(large, "Unexpected mallocx() failure"); - huge = mallocx(chunksize, 0); - assert_ptr_not_null(huge, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, - "Unexpected mallctl() failure"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.mapped", &mapped, &sz, NULL, 0), - expected, "Unexepected mallctl() result"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.npurge", &npurge, &sz, NULL, 0), - expected, "Unexepected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.nmadvise", &nmadvise, &sz, NULL, 0), - expected, "Unexepected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.purged", &purged, &sz, NULL, 0), - expected, "Unexepected mallctl() result"); - - if (config_stats) { - assert_u64_gt(npurge, 0, - "At least one purge should have occurred"); - assert_u64_le(nmadvise, purged, - "nmadvise should be no greater than purged"); - } - - dallocx(little, 0); - dallocx(large, 0); - dallocx(huge, 0); -} -TEST_END - -void * -thd_start(void *arg) -{ - - return (NULL); -} - -static void -no_lazy_lock(void) -{ - thd_t thd; - - thd_create(&thd, thd_start, NULL); - thd_join(thd, NULL); -} - -TEST_BEGIN(test_stats_arenas_small) -{ - unsigned arena; - void *p; - size_t sz, allocated; - uint64_t epoch, nmalloc, ndalloc, nrequests; - int expected = config_stats ? 0 : ENOENT; - - no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); - - p = mallocx(SMALL_MAXCLASS, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), - config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.small.allocated", &allocated, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.small.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_zu_gt(allocated, 0, - "allocated should be greater than zero"); - assert_u64_gt(nmalloc, 0, - "nmalloc should be no greater than zero"); - assert_u64_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - assert_u64_gt(nrequests, 0, - "nrequests should be greater than zero"); - } - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_stats_arenas_large) -{ - unsigned arena; - void *p; - size_t sz, allocated; - uint64_t epoch, nmalloc, ndalloc, nrequests; - int expected = config_stats ? 0 : ENOENT; - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); - - p = mallocx(large_maxclass, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.large.allocated", &allocated, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.large.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_zu_gt(allocated, 0, - "allocated should be greater than zero"); - assert_zu_gt(nmalloc, 0, - "nmalloc should be greater than zero"); - assert_zu_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - assert_zu_gt(nrequests, 0, - "nrequests should be greater than zero"); - } - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_stats_arenas_huge) -{ - unsigned arena; - void *p; - size_t sz, allocated; - uint64_t epoch, nmalloc, ndalloc; - int expected = config_stats ? 0 : ENOENT; - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); - - p = mallocx(chunksize, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_zu_gt(allocated, 0, - "allocated should be greater than zero"); - assert_zu_gt(nmalloc, 0, - "nmalloc should be greater than zero"); - assert_zu_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - } - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_stats_arenas_bins) -{ - unsigned arena; - void *p; - size_t sz, curruns, curregs; - uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; - uint64_t nruns, nreruns; - int expected = config_stats ? 0 : ENOENT; - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); - - p = mallocx(arena_bin_info[0].reg_size, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), - config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", &curregs, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", &nfills, &sz, - NULL, 0), config_tcache ? expected : ENOENT, - "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", &nflushes, &sz, - NULL, 0), config_tcache ? expected : ENOENT, - "Unexpected mallctl() result"); - - assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", &nruns, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", &nreruns, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", &curruns, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_u64_gt(nmalloc, 0, - "nmalloc should be greater than zero"); - assert_u64_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - assert_u64_gt(nrequests, 0, - "nrequests should be greater than zero"); - assert_zu_gt(curregs, 0, - "allocated should be greater than zero"); - if (config_tcache) { - assert_u64_gt(nfills, 0, - "At least one fill should have occurred"); - assert_u64_gt(nflushes, 0, - "At least one flush should have occurred"); - } - assert_u64_gt(nruns, 0, - "At least one run should have been allocated"); - assert_zu_gt(curruns, 0, - "At least one run should be currently allocated"); - } - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_stats_arenas_lruns) -{ - unsigned arena; - void *p; - uint64_t epoch, nmalloc, ndalloc, nrequests; - size_t curruns, sz; - int expected = config_stats ? 0 : ENOENT; - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); - - p = mallocx(LARGE_MINCLASS, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", &nrequests, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", &curruns, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_u64_gt(nmalloc, 0, - "nmalloc should be greater than zero"); - assert_u64_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - assert_u64_gt(nrequests, 0, - "nrequests should be greater than zero"); - assert_u64_gt(curruns, 0, - "At least one run should be currently allocated"); - } - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_stats_arenas_hchunks) -{ - unsigned arena; - void *p; - uint64_t epoch, nmalloc, ndalloc; - size_t curhchunks, sz; - int expected = config_stats ? 0 : ENOENT; - - arena = 0; - assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)), - 0, "Unexpected mallctl() failure"); - - p = mallocx(chunksize, 0); - assert_ptr_not_null(p, "Unexpected mallocx() failure"); - - assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0, - "Unexpected mallctl() failure"); - - sz = sizeof(uint64_t); - assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", &nmalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", &ndalloc, &sz, - NULL, 0), expected, "Unexpected mallctl() result"); - sz = sizeof(size_t); - assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", &curhchunks, - &sz, NULL, 0), expected, "Unexpected mallctl() result"); - - if (config_stats) { - assert_u64_gt(nmalloc, 0, - "nmalloc should be greater than zero"); - assert_u64_ge(nmalloc, ndalloc, - "nmalloc should be at least as large as ndalloc"); - assert_u64_gt(curhchunks, 0, - "At least one chunk should be currently allocated"); - } - - dallocx(p, 0); -} -TEST_END - -int -main(void) -{ - - return (test( - test_stats_summary, - test_stats_huge, - test_stats_arenas_summary, - test_stats_arenas_small, - test_stats_arenas_large, - test_stats_arenas_huge, - test_stats_arenas_bins, - test_stats_arenas_lruns, - test_stats_arenas_hchunks)); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/tsd.c b/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/tsd.c deleted file mode 100644 index 8be787f..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/tsd.c +++ /dev/null @@ -1,107 +0,0 @@ -#include "test/jemalloc_test.h" - -#define THREAD_DATA 0x72b65c10 - -typedef unsigned int data_t; - -static bool data_cleanup_executed; - -malloc_tsd_types(data_, data_t) -malloc_tsd_protos(, data_, data_t) - -void -data_cleanup(void *arg) -{ - data_t *data = (data_t *)arg; - - if (!data_cleanup_executed) { - assert_x_eq(*data, THREAD_DATA, - "Argument passed into cleanup function should match tsd " - "value"); - } - data_cleanup_executed = true; - - /* - * Allocate during cleanup for two rounds, in order to assure that - * jemalloc's internal tsd reinitialization happens. - */ - switch (*data) { - case THREAD_DATA: - *data = 1; - data_tsd_set(data); - break; - case 1: - *data = 2; - data_tsd_set(data); - break; - case 2: - return; - default: - not_reached(); - } - - { - void *p = mallocx(1, 0); - assert_ptr_not_null(p, "Unexpeced mallocx() failure"); - dallocx(p, 0); - } -} - -malloc_tsd_externs(data_, data_t) -#define DATA_INIT 0x12345678 -malloc_tsd_data(, data_, data_t, DATA_INIT) -malloc_tsd_funcs(, data_, data_t, DATA_INIT, data_cleanup) - -static void * -thd_start(void *arg) -{ - data_t d = (data_t)(uintptr_t)arg; - void *p; - - assert_x_eq(*data_tsd_get(), DATA_INIT, - "Initial tsd get should return initialization value"); - - p = malloc(1); - assert_ptr_not_null(p, "Unexpected malloc() failure"); - - data_tsd_set(&d); - assert_x_eq(*data_tsd_get(), d, - "After tsd set, tsd get should return value that was set"); - - d = 0; - assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg, - "Resetting local data should have no effect on tsd"); - - free(p); - return (NULL); -} - -TEST_BEGIN(test_tsd_main_thread) -{ - - thd_start((void *) 0xa5f3e329); -} -TEST_END - -TEST_BEGIN(test_tsd_sub_thread) -{ - thd_t thd; - - data_cleanup_executed = false; - thd_create(&thd, thd_start, (void *)THREAD_DATA); - thd_join(thd, NULL); - assert_true(data_cleanup_executed, - "Cleanup function should have executed"); -} -TEST_END - -int -main(void) -{ - - data_tsd_boot(); - - return (test( - test_tsd_main_thread, - test_tsd_sub_thread)); -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/.gitignore b/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/.gitignore deleted file mode 100644 index 7ab7825..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -linenoise_example -*.dSYM -history.txt diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/Makefile b/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/Makefile deleted file mode 100644 index 1dd894b..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -STD= -WARN= -Wall -OPT= -Os - -R_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) -R_LDFLAGS= $(LDFLAGS) -DEBUG= -g - -R_CC=$(CC) $(R_CFLAGS) -R_LD=$(CC) $(R_LDFLAGS) - -linenoise.o: linenoise.h linenoise.c - -linenoise_example: linenoise.o example.o - $(R_LD) -o $@ $^ - -.c.o: - $(R_CC) -c $< - -clean: - rm -f linenoise_example *.o diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/README.markdown b/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/README.markdown deleted file mode 100644 index e01642c..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/README.markdown +++ /dev/null @@ -1,224 +0,0 @@ -# Linenoise - -A minimal, zero-config, BSD licensed, readline replacement used in Redis, -MongoDB, and Android. - -* Single and multi line editing mode with the usual key bindings implemented. -* History handling. -* Completion. -* Hints (suggestions at the right of the prompt as you type). -* About 1,100 lines of BSD license source code. -* Only uses a subset of VT100 escapes (ANSI.SYS compatible). - -## Can a line editing library be 20k lines of code? - -Line editing with some support for history is a really important feature for command line utilities. Instead of retyping almost the same stuff again and again it's just much better to hit the up arrow and edit on syntax errors, or in order to try a slightly different command. But apparently code dealing with terminals is some sort of Black Magic: readline is 30k lines of code, libedit 20k. Is it reasonable to link small utilities to huge libraries just to get a minimal support for line editing? - -So what usually happens is either: - - * Large programs with configure scripts disabling line editing if readline is not present in the system, or not supporting it at all since readline is GPL licensed and libedit (the BSD clone) is not as known and available as readline is (Real world example of this problem: Tclsh). - * Smaller programs not using a configure script not supporting line editing at all (A problem we had with Redis-cli for instance). - -The result is a pollution of binaries without line editing support. - -So I spent more or less two hours doing a reality check resulting in this little library: is it *really* needed for a line editing library to be 20k lines of code? Apparently not, it is possibe to get a very small, zero configuration, trivial to embed library, that solves the problem. Smaller programs will just include this, supporing line editing out of the box. Larger programs may use this little library or just checking with configure if readline/libedit is available and resorting to Linenoise if not. - -## Terminals, in 2010. - -Apparently almost every terminal you can happen to use today has some kind of support for basic VT100 escape sequences. So I tried to write a lib using just very basic VT100 features. The resulting library appears to work everywhere I tried to use it, and now can work even on ANSI.SYS compatible terminals, since no -VT220 specific sequences are used anymore. - -The library is currently about 1100 lines of code. In order to use it in your project just look at the *example.c* file in the source distribution, it is trivial. Linenoise is BSD code, so you can use both in free software and commercial software. - -## Tested with... - - * Linux text only console ($TERM = linux) - * Linux KDE terminal application ($TERM = xterm) - * Linux xterm ($TERM = xterm) - * Linux Buildroot ($TERM = vt100) - * Mac OS X iTerm ($TERM = xterm) - * Mac OS X default Terminal.app ($TERM = xterm) - * OpenBSD 4.5 through an OSX Terminal.app ($TERM = screen) - * IBM AIX 6.1 - * FreeBSD xterm ($TERM = xterm) - * ANSI.SYS - * Emacs comint mode ($TERM = dumb) - -Please test it everywhere you can and report back! - -## Let's push this forward! - -Patches should be provided in the respect of Linenoise sensibility for small -easy to understand code. - -Send feedbacks to antirez at gmail - -# The API - -Linenoise is very easy to use, and reading the example shipped with the -library should get you up to speed ASAP. Here is a list of API calls -and how to use them. - - char *linenoise(const char *prompt); - -This is the main Linenoise call: it shows the user a prompt with line editing -and history capabilities. The prompt you specify is used as a prompt, that is, -it will be printed to the left of the cursor. The library returns a buffer -with the line composed by the user, or NULL on end of file or when there -is an out of memory condition. - -When a tty is detected (the user is actually typing into a terminal session) -the maximum editable line length is `LINENOISE_MAX_LINE`. When instead the -standard input is not a tty, which happens every time you redirect a file -to a program, or use it in an Unix pipeline, there are no limits to the -length of the line that can be returned. - -The returned line should be freed with the `free()` standard system call. -However sometimes it could happen that your program uses a different dynamic -allocation library, so you may also used `linenoiseFree` to make sure the -line is freed with the same allocator it was created. - -The canonical loop used by a program using Linenoise will be something like -this: - - while((line = linenoise("hello> ")) != NULL) { - printf("You wrote: %s\n", line); - linenoiseFree(line); /* Or just free(line) if you use libc malloc. */ - } - -## Single line VS multi line editing - -By default, Linenoise uses single line editing, that is, a single row on the -screen will be used, and as the user types more, the text will scroll towards -left to make room. This works if your program is one where the user is -unlikely to write a lot of text, otherwise multi line editing, where multiple -screens rows are used, can be a lot more comfortable. - -In order to enable multi line editing use the following API call: - - linenoiseSetMultiLine(1); - -You can disable it using `0` as argument. - -## History - -Linenoise supporst history, so that the user does not have to retype -again and again the same things, but can use the down and up arrows in order -to search and re-edit already inserted lines of text. - -The followings are the history API calls: - - int linenoiseHistoryAdd(const char *line); - int linenoiseHistorySetMaxLen(int len); - int linenoiseHistorySave(const char *filename); - int linenoiseHistoryLoad(const char *filename); - -Use `linenoiseHistoryAdd` every time you want to add a new element -to the top of the history (it will be the first the user will see when -using the up arrow). - -Note that for history to work, you have to set a length for the history -(which is zero by default, so history will be disabled if you don't set -a proper one). This is accomplished using the `linenoiseHistorySetMaxLen` -function. - -Linenoise has direct support for persisting the history into an history -file. The functions `linenoiseHistorySave` and `linenoiseHistoryLoad` do -just that. Both functions return -1 on error and 0 on success. - -## Completion - -Linenoise supports completion, which is the ability to complete the user -input when she or he presses the `` key. - -In order to use completion, you need to register a completion callback, which -is called every time the user presses ``. Your callback will return a -list of items that are completions for the current string. - -The following is an example of registering a completion callback: - - linenoiseSetCompletionCallback(completion); - -The completion must be a function returning `void` and getting as input -a `const char` pointer, which is the line the user has typed so far, and -a `linenoiseCompletions` object pointer, which is used as argument of -`linenoiseAddCompletion` in order to add completions inside the callback. -An example will make it more clear: - - void completion(const char *buf, linenoiseCompletions *lc) { - if (buf[0] == 'h') { - linenoiseAddCompletion(lc,"hello"); - linenoiseAddCompletion(lc,"hello there"); - } - } - -Basically in your completion callback, you inspect the input, and return -a list of items that are good completions by using `linenoiseAddCompletion`. - -If you want to test the completion feature, compile the example program -with `make`, run it, type `h` and press ``. - -## Hints - -Linenoise has a feature called *hints* which is very useful when you -use Linenoise in order to implement a REPL (Read Eval Print Loop) for -a program that accepts commands and arguments, but may also be useful in -other conditions. - -The feature shows, on the right of the cursor, as the user types, hints that -may be useful. The hints can be displayed using a different color compared -to the color the user is typing, and can also be bold. - -For example as the user starts to type `"git remote add"`, with hints it's -possible to show on the right of the prompt a string ` `. - -The feature works similarly to the history feature, using a callback. -To register the callback we use: - - linenoiseSetHintsCallback(hints); - -The callback itself is implemented like this: - - char *hints(const char *buf, int *color, int *bold) { - if (!strcasecmp(buf,"git remote add")) { - *color = 35; - *bold = 0; - return " "; - } - return NULL; - } - -The callback function returns the string that should be displayed or NULL -if no hint is available for the text the user currently typed. The returned -string will be trimmed as needed depending on the number of columns available -on the screen. - -It is possible to return a string allocated in dynamic way, by also registering -a function to deallocate the hint string once used: - - void linenoiseSetFreeHintsCallback(linenoiseFreeHintsCallback *); - -The free hint callback will just receive the pointer and free the string -as needed (depending on how the hits callback allocated it). - -As you can see in the example above, a `color` (in xterm color terminal codes) -can be provided together with a `bold` attribute. If no color is set, the -current terminal foreground color is used. If no bold attribute is set, -non-bold text is printed. - -Color codes are: - - red = 31 - green = 32 - yellow = 33 - blue = 34 - magenta = 35 - cyan = 36 - white = 37; - -## Screen handling - -Sometimes you may want to clear the screen as a result of something the -user typed. You can do this by calling the following function: - - void linenoiseClearScreen(void); diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/example.c b/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/example.c deleted file mode 100644 index 3a544d3..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/example.c +++ /dev/null @@ -1,74 +0,0 @@ -#include -#include -#include -#include "linenoise.h" - - -void completion(const char *buf, linenoiseCompletions *lc) { - if (buf[0] == 'h') { - linenoiseAddCompletion(lc,"hello"); - linenoiseAddCompletion(lc,"hello there"); - } -} - -char *hints(const char *buf, int *color, int *bold) { - if (!strcasecmp(buf,"hello")) { - *color = 35; - *bold = 0; - return " World"; - } - return NULL; -} - -int main(int argc, char **argv) { - char *line; - char *prgname = argv[0]; - - /* Parse options, with --multiline we enable multi line editing. */ - while(argc > 1) { - argc--; - argv++; - if (!strcmp(*argv,"--multiline")) { - linenoiseSetMultiLine(1); - printf("Multi-line mode enabled.\n"); - } else if (!strcmp(*argv,"--keycodes")) { - linenoisePrintKeyCodes(); - exit(0); - } else { - fprintf(stderr, "Usage: %s [--multiline] [--keycodes]\n", prgname); - exit(1); - } - } - - /* Set the completion callback. This will be called every time the - * user uses the key. */ - linenoiseSetCompletionCallback(completion); - linenoiseSetHintsCallback(hints); - - /* Load history from file. The history file is just a plain text file - * where entries are separated by newlines. */ - linenoiseHistoryLoad("history.txt"); /* Load the history at startup */ - - /* Now this is the main loop of the typical linenoise-based application. - * The call to linenoise() will block as long as the user types something - * and presses enter. - * - * The typed string is returned as a malloc() allocated string by - * linenoise, so the user needs to free() it. */ - while((line = linenoise("hello> ")) != NULL) { - /* Do something with the string. */ - if (line[0] != '\0' && line[0] != '/') { - printf("echo: '%s'\n", line); - linenoiseHistoryAdd(line); /* Add to the history. */ - linenoiseHistorySave("history.txt"); /* Save the history on disk. */ - } else if (!strncmp(line,"/historylen",11)) { - /* The "/historylen" command will change the history len. */ - int len = atoi(line+11); - linenoiseHistorySetMaxLen(len); - } else if (line[0] == '/') { - printf("Unreconized command: %s\n", line); - } - free(line); - } - return 0; -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/linenoise.c b/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/linenoise.c deleted file mode 100644 index fce14a7..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/linenoise.c +++ /dev/null @@ -1,1199 +0,0 @@ -/* linenoise.c -- guerrilla line editing library against the idea that a - * line editing lib needs to be 20,000 lines of C code. - * - * You can find the latest source code at: - * - * http://github.com/antirez/linenoise - * - * Does a number of crazy assumptions that happen to be true in 99.9999% of - * the 2010 UNIX computers around. - * - * ------------------------------------------------------------------------ - * - * Copyright (c) 2010-2016, Salvatore Sanfilippo - * Copyright (c) 2010-2013, Pieter Noordhuis - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * ------------------------------------------------------------------------ - * - * References: - * - http://invisible-island.net/xterm/ctlseqs/ctlseqs.html - * - http://www.3waylabs.com/nw/WWW/products/wizcon/vt220.html - * - * Todo list: - * - Filter bogus Ctrl+ combinations. - * - Win32 support - * - * Bloat: - * - History search like Ctrl+r in readline? - * - * List of escape sequences used by this program, we do everything just - * with three sequences. In order to be so cheap we may have some - * flickering effect with some slow terminal, but the lesser sequences - * the more compatible. - * - * EL (Erase Line) - * Sequence: ESC [ n K - * Effect: if n is 0 or missing, clear from cursor to end of line - * Effect: if n is 1, clear from beginning of line to cursor - * Effect: if n is 2, clear entire line - * - * CUF (CUrsor Forward) - * Sequence: ESC [ n C - * Effect: moves cursor forward n chars - * - * CUB (CUrsor Backward) - * Sequence: ESC [ n D - * Effect: moves cursor backward n chars - * - * The following is used to get the terminal width if getting - * the width with the TIOCGWINSZ ioctl fails - * - * DSR (Device Status Report) - * Sequence: ESC [ 6 n - * Effect: reports the current cusor position as ESC [ n ; m R - * where n is the row and m is the column - * - * When multi line mode is enabled, we also use an additional escape - * sequence. However multi line editing is disabled by default. - * - * CUU (Cursor Up) - * Sequence: ESC [ n A - * Effect: moves cursor up of n chars. - * - * CUD (Cursor Down) - * Sequence: ESC [ n B - * Effect: moves cursor down of n chars. - * - * When linenoiseClearScreen() is called, two additional escape sequences - * are used in order to clear the screen and position the cursor at home - * position. - * - * CUP (Cursor position) - * Sequence: ESC [ H - * Effect: moves the cursor to upper left corner - * - * ED (Erase display) - * Sequence: ESC [ 2 J - * Effect: clear the whole screen - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "linenoise.h" - -#define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100 -#define LINENOISE_MAX_LINE 4096 -static char *unsupported_term[] = {"dumb","cons25","emacs",NULL}; -static linenoiseCompletionCallback *completionCallback = NULL; -static linenoiseHintsCallback *hintsCallback = NULL; -static linenoiseFreeHintsCallback *freeHintsCallback = NULL; - -static struct termios orig_termios; /* In order to restore at exit.*/ -static int rawmode = 0; /* For atexit() function to check if restore is needed*/ -static int mlmode = 0; /* Multi line mode. Default is single line. */ -static int atexit_registered = 0; /* Register atexit just 1 time. */ -static int history_max_len = LINENOISE_DEFAULT_HISTORY_MAX_LEN; -static int history_len = 0; -static char **history = NULL; - -/* The linenoiseState structure represents the state during line editing. - * We pass this state to functions implementing specific editing - * functionalities. */ -struct linenoiseState { - int ifd; /* Terminal stdin file descriptor. */ - int ofd; /* Terminal stdout file descriptor. */ - char *buf; /* Edited line buffer. */ - size_t buflen; /* Edited line buffer size. */ - const char *prompt; /* Prompt to display. */ - size_t plen; /* Prompt length. */ - size_t pos; /* Current cursor position. */ - size_t oldpos; /* Previous refresh cursor position. */ - size_t len; /* Current edited line length. */ - size_t cols; /* Number of columns in terminal. */ - size_t maxrows; /* Maximum num of rows used so far (multiline mode) */ - int history_index; /* The history index we are currently editing. */ -}; - -enum KEY_ACTION{ - KEY_NULL = 0, /* NULL */ - CTRL_A = 1, /* Ctrl+a */ - CTRL_B = 2, /* Ctrl-b */ - CTRL_C = 3, /* Ctrl-c */ - CTRL_D = 4, /* Ctrl-d */ - CTRL_E = 5, /* Ctrl-e */ - CTRL_F = 6, /* Ctrl-f */ - CTRL_H = 8, /* Ctrl-h */ - TAB = 9, /* Tab */ - CTRL_K = 11, /* Ctrl+k */ - CTRL_L = 12, /* Ctrl+l */ - ENTER = 13, /* Enter */ - CTRL_N = 14, /* Ctrl-n */ - CTRL_P = 16, /* Ctrl-p */ - CTRL_T = 20, /* Ctrl-t */ - CTRL_U = 21, /* Ctrl+u */ - CTRL_W = 23, /* Ctrl+w */ - ESC = 27, /* Escape */ - BACKSPACE = 127 /* Backspace */ -}; - -static void linenoiseAtExit(void); -int linenoiseHistoryAdd(const char *line); -static void refreshLine(struct linenoiseState *l); - -/* Debugging macro. */ -#if 0 -FILE *lndebug_fp = NULL; -#define lndebug(...) \ - do { \ - if (lndebug_fp == NULL) { \ - lndebug_fp = fopen("/tmp/lndebug.txt","a"); \ - fprintf(lndebug_fp, \ - "[%d %d %d] p: %d, rows: %d, rpos: %d, max: %d, oldmax: %d\n", \ - (int)l->len,(int)l->pos,(int)l->oldpos,plen,rows,rpos, \ - (int)l->maxrows,old_rows); \ - } \ - fprintf(lndebug_fp, ", " __VA_ARGS__); \ - fflush(lndebug_fp); \ - } while (0) -#else -#define lndebug(fmt, ...) -#endif - -/* ======================= Low level terminal handling ====================== */ - -/* Set if to use or not the multi line mode. */ -void linenoiseSetMultiLine(int ml) { - mlmode = ml; -} - -/* Return true if the terminal name is in the list of terminals we know are - * not able to understand basic escape sequences. */ -static int isUnsupportedTerm(void) { - char *term = getenv("TERM"); - int j; - - if (term == NULL) return 0; - for (j = 0; unsupported_term[j]; j++) - if (!strcasecmp(term,unsupported_term[j])) return 1; - return 0; -} - -/* Raw mode: 1960 magic shit. */ -static int enableRawMode(int fd) { - struct termios raw; - - if (!isatty(STDIN_FILENO)) goto fatal; - if (!atexit_registered) { - atexit(linenoiseAtExit); - atexit_registered = 1; - } - if (tcgetattr(fd,&orig_termios) == -1) goto fatal; - - raw = orig_termios; /* modify the original mode */ - /* input modes: no break, no CR to NL, no parity check, no strip char, - * no start/stop output control. */ - raw.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON); - /* output modes - disable post processing */ - raw.c_oflag &= ~(OPOST); - /* control modes - set 8 bit chars */ - raw.c_cflag |= (CS8); - /* local modes - choing off, canonical off, no extended functions, - * no signal chars (^Z,^C) */ - raw.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG); - /* control chars - set return condition: min number of bytes and timer. - * We want read to return every single byte, without timeout. */ - raw.c_cc[VMIN] = 1; raw.c_cc[VTIME] = 0; /* 1 byte, no timer */ - - /* put terminal in raw mode after flushing */ - if (tcsetattr(fd,TCSAFLUSH,&raw) < 0) goto fatal; - rawmode = 1; - return 0; - -fatal: - errno = ENOTTY; - return -1; -} - -static void disableRawMode(int fd) { - /* Don't even check the return value as it's too late. */ - if (rawmode && tcsetattr(fd,TCSAFLUSH,&orig_termios) != -1) - rawmode = 0; -} - -/* Use the ESC [6n escape sequence to query the horizontal cursor position - * and return it. On error -1 is returned, on success the position of the - * cursor. */ -static int getCursorPosition(int ifd, int ofd) { - char buf[32]; - int cols, rows; - unsigned int i = 0; - - /* Report cursor location */ - if (write(ofd, "\x1b[6n", 4) != 4) return -1; - - /* Read the response: ESC [ rows ; cols R */ - while (i < sizeof(buf)-1) { - if (read(ifd,buf+i,1) != 1) break; - if (buf[i] == 'R') break; - i++; - } - buf[i] = '\0'; - - /* Parse it. */ - if (buf[0] != ESC || buf[1] != '[') return -1; - if (sscanf(buf+2,"%d;%d",&rows,&cols) != 2) return -1; - return cols; -} - -/* Try to get the number of columns in the current terminal, or assume 80 - * if it fails. */ -static int getColumns(int ifd, int ofd) { - struct winsize ws; - - if (ioctl(1, TIOCGWINSZ, &ws) == -1 || ws.ws_col == 0) { - /* ioctl() failed. Try to query the terminal itself. */ - int start, cols; - - /* Get the initial position so we can restore it later. */ - start = getCursorPosition(ifd,ofd); - if (start == -1) goto failed; - - /* Go to right margin and get position. */ - if (write(ofd,"\x1b[999C",6) != 6) goto failed; - cols = getCursorPosition(ifd,ofd); - if (cols == -1) goto failed; - - /* Restore position. */ - if (cols > start) { - char seq[32]; - snprintf(seq,32,"\x1b[%dD",cols-start); - if (write(ofd,seq,strlen(seq)) == -1) { - /* Can't recover... */ - } - } - return cols; - } else { - return ws.ws_col; - } - -failed: - return 80; -} - -/* Clear the screen. Used to handle ctrl+l */ -void linenoiseClearScreen(void) { - if (write(STDOUT_FILENO,"\x1b[H\x1b[2J",7) <= 0) { - /* nothing to do, just to avoid warning. */ - } -} - -/* Beep, used for completion when there is nothing to complete or when all - * the choices were already shown. */ -static void linenoiseBeep(void) { - fprintf(stderr, "\x7"); - fflush(stderr); -} - -/* ============================== Completion ================================ */ - -/* Free a list of completion option populated by linenoiseAddCompletion(). */ -static void freeCompletions(linenoiseCompletions *lc) { - size_t i; - for (i = 0; i < lc->len; i++) - free(lc->cvec[i]); - if (lc->cvec != NULL) - free(lc->cvec); -} - -/* This is an helper function for linenoiseEdit() and is called when the - * user types the key in order to complete the string currently in the - * input. - * - * The state of the editing is encapsulated into the pointed linenoiseState - * structure as described in the structure definition. */ -static int completeLine(struct linenoiseState *ls) { - linenoiseCompletions lc = { 0, NULL }; - int nread, nwritten; - char c = 0; - - completionCallback(ls->buf,&lc); - if (lc.len == 0) { - linenoiseBeep(); - } else { - size_t stop = 0, i = 0; - - while(!stop) { - /* Show completion or original buffer */ - if (i < lc.len) { - struct linenoiseState saved = *ls; - - ls->len = ls->pos = strlen(lc.cvec[i]); - ls->buf = lc.cvec[i]; - refreshLine(ls); - ls->len = saved.len; - ls->pos = saved.pos; - ls->buf = saved.buf; - } else { - refreshLine(ls); - } - - nread = read(ls->ifd,&c,1); - if (nread <= 0) { - freeCompletions(&lc); - return -1; - } - - switch(c) { - case 9: /* tab */ - i = (i+1) % (lc.len+1); - if (i == lc.len) linenoiseBeep(); - break; - case 27: /* escape */ - /* Re-show original buffer */ - if (i < lc.len) refreshLine(ls); - stop = 1; - break; - default: - /* Update buffer and return */ - if (i < lc.len) { - nwritten = snprintf(ls->buf,ls->buflen,"%s",lc.cvec[i]); - ls->len = ls->pos = nwritten; - } - stop = 1; - break; - } - } - } - - freeCompletions(&lc); - return c; /* Return last read character */ -} - -/* Register a callback function to be called for tab-completion. */ -void linenoiseSetCompletionCallback(linenoiseCompletionCallback *fn) { - completionCallback = fn; -} - -/* Register a hits function to be called to show hits to the user at the - * right of the prompt. */ -void linenoiseSetHintsCallback(linenoiseHintsCallback *fn) { - hintsCallback = fn; -} - -/* Register a function to free the hints returned by the hints callback - * registered with linenoiseSetHintsCallback(). */ -void linenoiseSetFreeHintsCallback(linenoiseFreeHintsCallback *fn) { - freeHintsCallback = fn; -} - -/* This function is used by the callback function registered by the user - * in order to add completion options given the input string when the - * user typed . See the example.c source code for a very easy to - * understand example. */ -void linenoiseAddCompletion(linenoiseCompletions *lc, const char *str) { - size_t len = strlen(str); - char *copy, **cvec; - - copy = malloc(len+1); - if (copy == NULL) return; - memcpy(copy,str,len+1); - cvec = realloc(lc->cvec,sizeof(char*)*(lc->len+1)); - if (cvec == NULL) { - free(copy); - return; - } - lc->cvec = cvec; - lc->cvec[lc->len++] = copy; -} - -/* =========================== Line editing ================================= */ - -/* We define a very simple "append buffer" structure, that is an heap - * allocated string where we can append to. This is useful in order to - * write all the escape sequences in a buffer and flush them to the standard - * output in a single call, to avoid flickering effects. */ -struct abuf { - char *b; - int len; -}; - -static void abInit(struct abuf *ab) { - ab->b = NULL; - ab->len = 0; -} - -static void abAppend(struct abuf *ab, const char *s, int len) { - char *new = realloc(ab->b,ab->len+len); - - if (new == NULL) return; - memcpy(new+ab->len,s,len); - ab->b = new; - ab->len += len; -} - -static void abFree(struct abuf *ab) { - free(ab->b); -} - -/* Helper of refreshSingleLine() and refreshMultiLine() to show hints - * to the right of the prompt. */ -void refreshShowHints(struct abuf *ab, struct linenoiseState *l, int plen) { - char seq[64]; - if (hintsCallback && plen+l->len < l->cols) { - int color = -1, bold = 0; - char *hint = hintsCallback(l->buf,&color,&bold); - if (hint) { - int hintlen = strlen(hint); - int hintmaxlen = l->cols-(plen+l->len); - if (hintlen > hintmaxlen) hintlen = hintmaxlen; - if (bold == 1 && color == -1) color = 37; - if (color != -1 || bold != 0) - snprintf(seq,64,"\033[%d;%d;49m",bold,color); - abAppend(ab,seq,strlen(seq)); - abAppend(ab,hint,hintlen); - if (color != -1 || bold != 0) - abAppend(ab,"\033[0m",4); - /* Call the function to free the hint returned. */ - if (freeHintsCallback) freeHintsCallback(hint); - } - } -} - -/* Single line low level line refresh. - * - * Rewrite the currently edited line accordingly to the buffer content, - * cursor position, and number of columns of the terminal. */ -static void refreshSingleLine(struct linenoiseState *l) { - char seq[64]; - size_t plen = strlen(l->prompt); - int fd = l->ofd; - char *buf = l->buf; - size_t len = l->len; - size_t pos = l->pos; - struct abuf ab; - - while((plen+pos) >= l->cols) { - buf++; - len--; - pos--; - } - while (plen+len > l->cols) { - len--; - } - - abInit(&ab); - /* Cursor to left edge */ - snprintf(seq,64,"\r"); - abAppend(&ab,seq,strlen(seq)); - /* Write the prompt and the current buffer content */ - abAppend(&ab,l->prompt,strlen(l->prompt)); - abAppend(&ab,buf,len); - /* Show hits if any. */ - refreshShowHints(&ab,l,plen); - /* Erase to right */ - snprintf(seq,64,"\x1b[0K"); - abAppend(&ab,seq,strlen(seq)); - /* Move cursor to original position. */ - snprintf(seq,64,"\r\x1b[%dC", (int)(pos+plen)); - abAppend(&ab,seq,strlen(seq)); - if (write(fd,ab.b,ab.len) == -1) {} /* Can't recover from write error. */ - abFree(&ab); -} - -/* Multi line low level line refresh. - * - * Rewrite the currently edited line accordingly to the buffer content, - * cursor position, and number of columns of the terminal. */ -static void refreshMultiLine(struct linenoiseState *l) { - char seq[64]; - int plen = strlen(l->prompt); - int rows = (plen+l->len+l->cols-1)/l->cols; /* rows used by current buf. */ - int rpos = (plen+l->oldpos+l->cols)/l->cols; /* cursor relative row. */ - int rpos2; /* rpos after refresh. */ - int col; /* colum position, zero-based. */ - int old_rows = l->maxrows; - int fd = l->ofd, j; - struct abuf ab; - - /* Update maxrows if needed. */ - if (rows > (int)l->maxrows) l->maxrows = rows; - - /* First step: clear all the lines used before. To do so start by - * going to the last row. */ - abInit(&ab); - if (old_rows-rpos > 0) { - lndebug("go down %d", old_rows-rpos); - snprintf(seq,64,"\x1b[%dB", old_rows-rpos); - abAppend(&ab,seq,strlen(seq)); - } - - /* Now for every row clear it, go up. */ - for (j = 0; j < old_rows-1; j++) { - lndebug("clear+up"); - snprintf(seq,64,"\r\x1b[0K\x1b[1A"); - abAppend(&ab,seq,strlen(seq)); - } - - /* Clean the top line. */ - lndebug("clear"); - snprintf(seq,64,"\r\x1b[0K"); - abAppend(&ab,seq,strlen(seq)); - - /* Write the prompt and the current buffer content */ - abAppend(&ab,l->prompt,strlen(l->prompt)); - abAppend(&ab,l->buf,l->len); - - /* Show hits if any. */ - refreshShowHints(&ab,l,plen); - - /* If we are at the very end of the screen with our prompt, we need to - * emit a newline and move the prompt to the first column. */ - if (l->pos && - l->pos == l->len && - (l->pos+plen) % l->cols == 0) - { - lndebug(""); - abAppend(&ab,"\n",1); - snprintf(seq,64,"\r"); - abAppend(&ab,seq,strlen(seq)); - rows++; - if (rows > (int)l->maxrows) l->maxrows = rows; - } - - /* Move cursor to right position. */ - rpos2 = (plen+l->pos+l->cols)/l->cols; /* current cursor relative row. */ - lndebug("rpos2 %d", rpos2); - - /* Go up till we reach the expected positon. */ - if (rows-rpos2 > 0) { - lndebug("go-up %d", rows-rpos2); - snprintf(seq,64,"\x1b[%dA", rows-rpos2); - abAppend(&ab,seq,strlen(seq)); - } - - /* Set column. */ - col = (plen+(int)l->pos) % (int)l->cols; - lndebug("set col %d", 1+col); - if (col) - snprintf(seq,64,"\r\x1b[%dC", col); - else - snprintf(seq,64,"\r"); - abAppend(&ab,seq,strlen(seq)); - - lndebug("\n"); - l->oldpos = l->pos; - - if (write(fd,ab.b,ab.len) == -1) {} /* Can't recover from write error. */ - abFree(&ab); -} - -/* Calls the two low level functions refreshSingleLine() or - * refreshMultiLine() according to the selected mode. */ -static void refreshLine(struct linenoiseState *l) { - if (mlmode) - refreshMultiLine(l); - else - refreshSingleLine(l); -} - -/* Insert the character 'c' at cursor current position. - * - * On error writing to the terminal -1 is returned, otherwise 0. */ -int linenoiseEditInsert(struct linenoiseState *l, char c) { - if (l->len < l->buflen) { - if (l->len == l->pos) { - l->buf[l->pos] = c; - l->pos++; - l->len++; - l->buf[l->len] = '\0'; - if ((!mlmode && l->plen+l->len < l->cols && !hintsCallback)) { - /* Avoid a full update of the line in the - * trivial case. */ - if (write(l->ofd,&c,1) == -1) return -1; - } else { - refreshLine(l); - } - } else { - memmove(l->buf+l->pos+1,l->buf+l->pos,l->len-l->pos); - l->buf[l->pos] = c; - l->len++; - l->pos++; - l->buf[l->len] = '\0'; - refreshLine(l); - } - } - return 0; -} - -/* Move cursor on the left. */ -void linenoiseEditMoveLeft(struct linenoiseState *l) { - if (l->pos > 0) { - l->pos--; - refreshLine(l); - } -} - -/* Move cursor on the right. */ -void linenoiseEditMoveRight(struct linenoiseState *l) { - if (l->pos != l->len) { - l->pos++; - refreshLine(l); - } -} - -/* Move cursor to the start of the line. */ -void linenoiseEditMoveHome(struct linenoiseState *l) { - if (l->pos != 0) { - l->pos = 0; - refreshLine(l); - } -} - -/* Move cursor to the end of the line. */ -void linenoiseEditMoveEnd(struct linenoiseState *l) { - if (l->pos != l->len) { - l->pos = l->len; - refreshLine(l); - } -} - -/* Substitute the currently edited line with the next or previous history - * entry as specified by 'dir'. */ -#define LINENOISE_HISTORY_NEXT 0 -#define LINENOISE_HISTORY_PREV 1 -void linenoiseEditHistoryNext(struct linenoiseState *l, int dir) { - if (history_len > 1) { - /* Update the current history entry before to - * overwrite it with the next one. */ - free(history[history_len - 1 - l->history_index]); - history[history_len - 1 - l->history_index] = strdup(l->buf); - /* Show the new entry */ - l->history_index += (dir == LINENOISE_HISTORY_PREV) ? 1 : -1; - if (l->history_index < 0) { - l->history_index = 0; - return; - } else if (l->history_index >= history_len) { - l->history_index = history_len-1; - return; - } - strncpy(l->buf,history[history_len - 1 - l->history_index],l->buflen); - l->buf[l->buflen-1] = '\0'; - l->len = l->pos = strlen(l->buf); - refreshLine(l); - } -} - -/* Delete the character at the right of the cursor without altering the cursor - * position. Basically this is what happens with the "Delete" keyboard key. */ -void linenoiseEditDelete(struct linenoiseState *l) { - if (l->len > 0 && l->pos < l->len) { - memmove(l->buf+l->pos,l->buf+l->pos+1,l->len-l->pos-1); - l->len--; - l->buf[l->len] = '\0'; - refreshLine(l); - } -} - -/* Backspace implementation. */ -void linenoiseEditBackspace(struct linenoiseState *l) { - if (l->pos > 0 && l->len > 0) { - memmove(l->buf+l->pos-1,l->buf+l->pos,l->len-l->pos); - l->pos--; - l->len--; - l->buf[l->len] = '\0'; - refreshLine(l); - } -} - -/* Delete the previosu word, maintaining the cursor at the start of the - * current word. */ -void linenoiseEditDeletePrevWord(struct linenoiseState *l) { - size_t old_pos = l->pos; - size_t diff; - - while (l->pos > 0 && l->buf[l->pos-1] == ' ') - l->pos--; - while (l->pos > 0 && l->buf[l->pos-1] != ' ') - l->pos--; - diff = old_pos - l->pos; - memmove(l->buf+l->pos,l->buf+old_pos,l->len-old_pos+1); - l->len -= diff; - refreshLine(l); -} - -/* This function is the core of the line editing capability of linenoise. - * It expects 'fd' to be already in "raw mode" so that every key pressed - * will be returned ASAP to read(). - * - * The resulting string is put into 'buf' when the user type enter, or - * when ctrl+d is typed. - * - * The function returns the length of the current buffer. */ -static int linenoiseEdit(int stdin_fd, int stdout_fd, char *buf, size_t buflen, const char *prompt) -{ - struct linenoiseState l; - - /* Populate the linenoise state that we pass to functions implementing - * specific editing functionalities. */ - l.ifd = stdin_fd; - l.ofd = stdout_fd; - l.buf = buf; - l.buflen = buflen; - l.prompt = prompt; - l.plen = strlen(prompt); - l.oldpos = l.pos = 0; - l.len = 0; - l.cols = getColumns(stdin_fd, stdout_fd); - l.maxrows = 0; - l.history_index = 0; - - /* Buffer starts empty. */ - l.buf[0] = '\0'; - l.buflen--; /* Make sure there is always space for the nulterm */ - - /* The latest history entry is always our current buffer, that - * initially is just an empty string. */ - linenoiseHistoryAdd(""); - - if (write(l.ofd,prompt,l.plen) == -1) return -1; - while(1) { - char c; - int nread; - char seq[3]; - - nread = read(l.ifd,&c,1); - if (nread <= 0) return l.len; - - /* Only autocomplete when the callback is set. It returns < 0 when - * there was an error reading from fd. Otherwise it will return the - * character that should be handled next. */ - if (c == 9 && completionCallback != NULL) { - c = completeLine(&l); - /* Return on errors */ - if (c < 0) return l.len; - /* Read next character when 0 */ - if (c == 0) continue; - } - - switch(c) { - case ENTER: /* enter */ - history_len--; - free(history[history_len]); - if (mlmode) linenoiseEditMoveEnd(&l); - if (hintsCallback) { - /* Force a refresh without hints to leave the previous - * line as the user typed it after a newline. */ - linenoiseHintsCallback *hc = hintsCallback; - hintsCallback = NULL; - refreshLine(&l); - hintsCallback = hc; - } - return (int)l.len; - case CTRL_C: /* ctrl-c */ - errno = EAGAIN; - return -1; - case BACKSPACE: /* backspace */ - case 8: /* ctrl-h */ - linenoiseEditBackspace(&l); - break; - case CTRL_D: /* ctrl-d, remove char at right of cursor, or if the - line is empty, act as end-of-file. */ - if (l.len > 0) { - linenoiseEditDelete(&l); - } else { - history_len--; - free(history[history_len]); - return -1; - } - break; - case CTRL_T: /* ctrl-t, swaps current character with previous. */ - if (l.pos > 0 && l.pos < l.len) { - int aux = buf[l.pos-1]; - buf[l.pos-1] = buf[l.pos]; - buf[l.pos] = aux; - if (l.pos != l.len-1) l.pos++; - refreshLine(&l); - } - break; - case CTRL_B: /* ctrl-b */ - linenoiseEditMoveLeft(&l); - break; - case CTRL_F: /* ctrl-f */ - linenoiseEditMoveRight(&l); - break; - case CTRL_P: /* ctrl-p */ - linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_PREV); - break; - case CTRL_N: /* ctrl-n */ - linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_NEXT); - break; - case ESC: /* escape sequence */ - /* Read the next two bytes representing the escape sequence. - * Use two calls to handle slow terminals returning the two - * chars at different times. */ - if (read(l.ifd,seq,1) == -1) break; - if (read(l.ifd,seq+1,1) == -1) break; - - /* ESC [ sequences. */ - if (seq[0] == '[') { - if (seq[1] >= '0' && seq[1] <= '9') { - /* Extended escape, read additional byte. */ - if (read(l.ifd,seq+2,1) == -1) break; - if (seq[2] == '~') { - switch(seq[1]) { - case '3': /* Delete key. */ - linenoiseEditDelete(&l); - break; - } - } - } else { - switch(seq[1]) { - case 'A': /* Up */ - linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_PREV); - break; - case 'B': /* Down */ - linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_NEXT); - break; - case 'C': /* Right */ - linenoiseEditMoveRight(&l); - break; - case 'D': /* Left */ - linenoiseEditMoveLeft(&l); - break; - case 'H': /* Home */ - linenoiseEditMoveHome(&l); - break; - case 'F': /* End*/ - linenoiseEditMoveEnd(&l); - break; - } - } - } - - /* ESC O sequences. */ - else if (seq[0] == 'O') { - switch(seq[1]) { - case 'H': /* Home */ - linenoiseEditMoveHome(&l); - break; - case 'F': /* End*/ - linenoiseEditMoveEnd(&l); - break; - } - } - break; - default: - if (linenoiseEditInsert(&l,c)) return -1; - break; - case CTRL_U: /* Ctrl+u, delete the whole line. */ - buf[0] = '\0'; - l.pos = l.len = 0; - refreshLine(&l); - break; - case CTRL_K: /* Ctrl+k, delete from current to end of line. */ - buf[l.pos] = '\0'; - l.len = l.pos; - refreshLine(&l); - break; - case CTRL_A: /* Ctrl+a, go to the start of the line */ - linenoiseEditMoveHome(&l); - break; - case CTRL_E: /* ctrl+e, go to the end of the line */ - linenoiseEditMoveEnd(&l); - break; - case CTRL_L: /* ctrl+l, clear screen */ - linenoiseClearScreen(); - refreshLine(&l); - break; - case CTRL_W: /* ctrl+w, delete previous word */ - linenoiseEditDeletePrevWord(&l); - break; - } - } - return l.len; -} - -/* This special mode is used by linenoise in order to print scan codes - * on screen for debugging / development purposes. It is implemented - * by the linenoise_example program using the --keycodes option. */ -void linenoisePrintKeyCodes(void) { - char quit[4]; - - printf("Linenoise key codes debugging mode.\n" - "Press keys to see scan codes. Type 'quit' at any time to exit.\n"); - if (enableRawMode(STDIN_FILENO) == -1) return; - memset(quit,' ',4); - while(1) { - char c; - int nread; - - nread = read(STDIN_FILENO,&c,1); - if (nread <= 0) continue; - memmove(quit,quit+1,sizeof(quit)-1); /* shift string to left. */ - quit[sizeof(quit)-1] = c; /* Insert current char on the right. */ - if (memcmp(quit,"quit",sizeof(quit)) == 0) break; - - printf("'%c' %02x (%d) (type quit to exit)\n", - isprint(c) ? c : '?', (int)c, (int)c); - printf("\r"); /* Go left edge manually, we are in raw mode. */ - fflush(stdout); - } - disableRawMode(STDIN_FILENO); -} - -/* This function calls the line editing function linenoiseEdit() using - * the STDIN file descriptor set in raw mode. */ -static int linenoiseRaw(char *buf, size_t buflen, const char *prompt) { - int count; - - if (buflen == 0) { - errno = EINVAL; - return -1; - } - - if (enableRawMode(STDIN_FILENO) == -1) return -1; - count = linenoiseEdit(STDIN_FILENO, STDOUT_FILENO, buf, buflen, prompt); - disableRawMode(STDIN_FILENO); - printf("\n"); - return count; -} - -/* This function is called when linenoise() is called with the standard - * input file descriptor not attached to a TTY. So for example when the - * program using linenoise is called in pipe or with a file redirected - * to its standard input. In this case, we want to be able to return the - * line regardless of its length (by default we are limited to 4k). */ -static char *linenoiseNoTTY(void) { - char *line = NULL; - size_t len = 0, maxlen = 0; - - while(1) { - if (len == maxlen) { - if (maxlen == 0) maxlen = 16; - maxlen *= 2; - char *oldval = line; - line = realloc(line,maxlen); - if (line == NULL) { - if (oldval) free(oldval); - return NULL; - } - } - int c = fgetc(stdin); - if (c == EOF || c == '\n') { - if (c == EOF && len == 0) { - free(line); - return NULL; - } else { - line[len] = '\0'; - return line; - } - } else { - line[len] = c; - len++; - } - } -} - -/* The high level function that is the main API of the linenoise library. - * This function checks if the terminal has basic capabilities, just checking - * for a blacklist of stupid terminals, and later either calls the line - * editing function or uses dummy fgets() so that you will be able to type - * something even in the most desperate of the conditions. */ -char *linenoise(const char *prompt) { - char buf[LINENOISE_MAX_LINE]; - int count; - - if (!isatty(STDIN_FILENO)) { - /* Not a tty: read from file / pipe. In this mode we don't want any - * limit to the line size, so we call a function to handle that. */ - return linenoiseNoTTY(); - } else if (isUnsupportedTerm()) { - size_t len; - - printf("%s",prompt); - fflush(stdout); - if (fgets(buf,LINENOISE_MAX_LINE,stdin) == NULL) return NULL; - len = strlen(buf); - while(len && (buf[len-1] == '\n' || buf[len-1] == '\r')) { - len--; - buf[len] = '\0'; - } - return strdup(buf); - } else { - count = linenoiseRaw(buf,LINENOISE_MAX_LINE,prompt); - if (count == -1) return NULL; - return strdup(buf); - } -} - -/* This is just a wrapper the user may want to call in order to make sure - * the linenoise returned buffer is freed with the same allocator it was - * created with. Useful when the main program is using an alternative - * allocator. */ -void linenoiseFree(void *ptr) { - free(ptr); -} - -/* ================================ History ================================= */ - -/* Free the history, but does not reset it. Only used when we have to - * exit() to avoid memory leaks are reported by valgrind & co. */ -static void freeHistory(void) { - if (history) { - int j; - - for (j = 0; j < history_len; j++) - free(history[j]); - free(history); - } -} - -/* At exit we'll try to fix the terminal to the initial conditions. */ -static void linenoiseAtExit(void) { - disableRawMode(STDIN_FILENO); - freeHistory(); -} - -/* This is the API call to add a new entry in the linenoise history. - * It uses a fixed array of char pointers that are shifted (memmoved) - * when the history max length is reached in order to remove the older - * entry and make room for the new one, so it is not exactly suitable for huge - * histories, but will work well for a few hundred of entries. - * - * Using a circular buffer is smarter, but a bit more complex to handle. */ -int linenoiseHistoryAdd(const char *line) { - char *linecopy; - - if (history_max_len == 0) return 0; - - /* Initialization on first call. */ - if (history == NULL) { - history = malloc(sizeof(char*)*history_max_len); - if (history == NULL) return 0; - memset(history,0,(sizeof(char*)*history_max_len)); - } - - /* Don't add duplicated lines. */ - if (history_len && !strcmp(history[history_len-1], line)) return 0; - - /* Add an heap allocated copy of the line in the history. - * If we reached the max length, remove the older line. */ - linecopy = strdup(line); - if (!linecopy) return 0; - if (history_len == history_max_len) { - free(history[0]); - memmove(history,history+1,sizeof(char*)*(history_max_len-1)); - history_len--; - } - history[history_len] = linecopy; - history_len++; - return 1; -} - -/* Set the maximum length for the history. This function can be called even - * if there is already some history, the function will make sure to retain - * just the latest 'len' elements if the new history length value is smaller - * than the amount of items already inside the history. */ -int linenoiseHistorySetMaxLen(int len) { - char **new; - - if (len < 1) return 0; - if (history) { - int tocopy = history_len; - - new = malloc(sizeof(char*)*len); - if (new == NULL) return 0; - - /* If we can't copy everything, free the elements we'll not use. */ - if (len < tocopy) { - int j; - - for (j = 0; j < tocopy-len; j++) free(history[j]); - tocopy = len; - } - memset(new,0,sizeof(char*)*len); - memcpy(new,history+(history_len-tocopy), sizeof(char*)*tocopy); - free(history); - history = new; - } - history_max_len = len; - if (history_len > history_max_len) - history_len = history_max_len; - return 1; -} - -/* Save the history in the specified file. On success 0 is returned - * otherwise -1 is returned. */ -int linenoiseHistorySave(const char *filename) { - mode_t old_umask = umask(S_IXUSR|S_IRWXG|S_IRWXO); - FILE *fp; - int j; - - fp = fopen(filename,"w"); - umask(old_umask); - if (fp == NULL) return -1; - chmod(filename,S_IRUSR|S_IWUSR); - for (j = 0; j < history_len; j++) - fprintf(fp,"%s\n",history[j]); - fclose(fp); - return 0; -} - -/* Load the history from the specified file. If the file does not exist - * zero is returned and no operation is performed. - * - * If the file exists and the operation succeeded 0 is returned, otherwise - * on error -1 is returned. */ -int linenoiseHistoryLoad(const char *filename) { - FILE *fp = fopen(filename,"r"); - char buf[LINENOISE_MAX_LINE]; - - if (fp == NULL) return -1; - - while (fgets(buf,LINENOISE_MAX_LINE,fp) != NULL) { - char *p; - - p = strchr(buf,'\r'); - if (!p) p = strchr(buf,'\n'); - if (p) *p = '\0'; - linenoiseHistoryAdd(buf); - } - fclose(fp); - return 0; -} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/linenoise.h b/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/linenoise.h deleted file mode 100644 index ed20232..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/deps/linenoise/linenoise.h +++ /dev/null @@ -1,73 +0,0 @@ -/* linenoise.h -- VERSION 1.0 - * - * Guerrilla line editing library against the idea that a line editing lib - * needs to be 20,000 lines of C code. - * - * See linenoise.c for more information. - * - * ------------------------------------------------------------------------ - * - * Copyright (c) 2010-2014, Salvatore Sanfilippo - * Copyright (c) 2010-2013, Pieter Noordhuis - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __LINENOISE_H -#define __LINENOISE_H - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct linenoiseCompletions { - size_t len; - char **cvec; -} linenoiseCompletions; - -typedef void(linenoiseCompletionCallback)(const char *, linenoiseCompletions *); -typedef char*(linenoiseHintsCallback)(const char *, int *color, int *bold); -typedef void(linenoiseFreeHintsCallback)(void *); -void linenoiseSetCompletionCallback(linenoiseCompletionCallback *); -void linenoiseSetHintsCallback(linenoiseHintsCallback *); -void linenoiseSetFreeHintsCallback(linenoiseFreeHintsCallback *); -void linenoiseAddCompletion(linenoiseCompletions *, const char *); - -char *linenoise(const char *prompt); -void linenoiseFree(void *ptr); -int linenoiseHistoryAdd(const char *line); -int linenoiseHistorySetMaxLen(int len); -int linenoiseHistorySave(const char *filename); -int linenoiseHistoryLoad(const char *filename); -void linenoiseClearScreen(void); -void linenoiseSetMultiLine(int ml); -void linenoisePrintKeyCodes(void); - -#ifdef __cplusplus -} -#endif - -#endif /* __LINENOISE_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/src/blocked.c b/redis-android/src/main/jni/redis-4.0.11/src/blocked.c deleted file mode 100644 index 54b26b7..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/src/blocked.c +++ /dev/null @@ -1,195 +0,0 @@ -/* blocked.c - generic support for blocking operations like BLPOP & WAIT. - * - * Copyright (c) 2009-2012, Salvatore Sanfilippo - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * --------------------------------------------------------------------------- - * - * API: - * - * getTimeoutFromObjectOrReply() is just an utility function to parse a - * timeout argument since blocking operations usually require a timeout. - * - * blockClient() set the CLIENT_BLOCKED flag in the client, and set the - * specified block type 'btype' filed to one of BLOCKED_* macros. - * - * unblockClient() unblocks the client doing the following: - * 1) It calls the btype-specific function to cleanup the state. - * 2) It unblocks the client by unsetting the CLIENT_BLOCKED flag. - * 3) It puts the client into a list of just unblocked clients that are - * processed ASAP in the beforeSleep() event loop callback, so that - * if there is some query buffer to process, we do it. This is also - * required because otherwise there is no 'readable' event fired, we - * already read the pending commands. We also set the CLIENT_UNBLOCKED - * flag to remember the client is in the unblocked_clients list. - * - * processUnblockedClients() is called inside the beforeSleep() function - * to process the query buffer from unblocked clients and remove the clients - * from the blocked_clients queue. - * - * replyToBlockedClientTimedOut() is called by the cron function when - * a client blocked reaches the specified timeout (if the timeout is set - * to 0, no timeout is processed). - * It usually just needs to send a reply to the client. - * - * When implementing a new type of blocking opeation, the implementation - * should modify unblockClient() and replyToBlockedClientTimedOut() in order - * to handle the btype-specific behavior of this two functions. - * If the blocking operation waits for certain keys to change state, the - * clusterRedirectBlockedClientIfNeeded() function should also be updated. - */ - -#include "server.h" - -/* Get a timeout value from an object and store it into 'timeout'. - * The final timeout is always stored as milliseconds as a time where the - * timeout will expire, however the parsing is performed according to - * the 'unit' that can be seconds or milliseconds. - * - * Note that if the timeout is zero (usually from the point of view of - * commands API this means no timeout) the value stored into 'timeout' - * is zero. */ -int getTimeoutFromObjectOrReply(client *c, robj *object, mstime_t *timeout, int unit) { - long long tval; - - if (getLongLongFromObjectOrReply(c,object,&tval, - "timeout is not an integer or out of range") != C_OK) - return C_ERR; - - if (tval < 0) { - addReplyError(c,"timeout is negative"); - return C_ERR; - } - - if (tval > 0) { - if (unit == UNIT_SECONDS) tval *= 1000; - tval += mstime(); - } - *timeout = tval; - - return C_OK; -} - -/* Block a client for the specific operation type. Once the CLIENT_BLOCKED - * flag is set client query buffer is not longer processed, but accumulated, - * and will be processed when the client is unblocked. */ -void blockClient(client *c, int btype) { - c->flags |= CLIENT_BLOCKED; - c->btype = btype; - server.bpop_blocked_clients++; -} - -/* This function is called in the beforeSleep() function of the event loop - * in order to process the pending input buffer of clients that were - * unblocked after a blocking operation. */ -void processUnblockedClients(void) { - listNode *ln; - client *c; - - while (listLength(server.unblocked_clients)) { - ln = listFirst(server.unblocked_clients); - serverAssert(ln != NULL); - c = ln->value; - listDelNode(server.unblocked_clients,ln); - c->flags &= ~CLIENT_UNBLOCKED; - - /* Process remaining data in the input buffer, unless the client - * is blocked again. Actually processInputBuffer() checks that the - * client is not blocked before to proceed, but things may change and - * the code is conceptually more correct this way. */ - if (!(c->flags & CLIENT_BLOCKED)) { - if (c->querybuf && sdslen(c->querybuf) > 0) { - processInputBuffer(c); - } - } - } -} - -/* Unblock a client calling the right function depending on the kind - * of operation the client is blocking for. */ -void unblockClient(client *c) { - if (c->btype == BLOCKED_LIST) { - unblockClientWaitingData(c); - } else if (c->btype == BLOCKED_WAIT) { - unblockClientWaitingReplicas(c); - } else if (c->btype == BLOCKED_MODULE) { - unblockClientFromModule(c); - } else { - serverPanic("Unknown btype in unblockClient()."); - } - /* Clear the flags, and put the client in the unblocked list so that - * we'll process new commands in its query buffer ASAP. */ - c->flags &= ~CLIENT_BLOCKED; - c->btype = BLOCKED_NONE; - server.bpop_blocked_clients--; - /* The client may already be into the unblocked list because of a previous - * blocking operation, don't add back it into the list multiple times. */ - if (!(c->flags & CLIENT_UNBLOCKED)) { - c->flags |= CLIENT_UNBLOCKED; - listAddNodeTail(server.unblocked_clients,c); - } -} - -/* This function gets called when a blocked client timed out in order to - * send it a reply of some kind. After this function is called, - * unblockClient() will be called with the same client as argument. */ -void replyToBlockedClientTimedOut(client *c) { - if (c->btype == BLOCKED_LIST) { - addReply(c,shared.nullmultibulk); - } else if (c->btype == BLOCKED_WAIT) { - addReplyLongLong(c,replicationCountAcksByOffset(c->bpop.reploffset)); - } else if (c->btype == BLOCKED_MODULE) { - moduleBlockedClientTimedOut(c); - } else { - serverPanic("Unknown btype in replyToBlockedClientTimedOut()."); - } -} - -/* Mass-unblock clients because something changed in the instance that makes - * blocking no longer safe. For example clients blocked in list operations - * in an instance which turns from master to slave is unsafe, so this function - * is called when a master turns into a slave. - * - * The semantics is to send an -UNBLOCKED error to the client, disconnecting - * it at the same time. */ -void disconnectAllBlockedClients(void) { - listNode *ln; - listIter li; - - listRewind(server.clients,&li); - while((ln = listNext(&li))) { - client *c = listNodeValue(ln); - - if (c->flags & CLIENT_BLOCKED) { - addReplySds(c,sdsnew( - "-UNBLOCKED force unblock from blocking operation, " - "instance state changed (master -> slave?)\r\n")); - unblockClient(c); - c->flags |= CLIENT_CLOSE_AFTER_REPLY; - } - } -} diff --git a/redis-android/src/main/jni/redis-4.0.11/src/defrag.c b/redis-android/src/main/jni/redis-4.0.11/src/defrag.c deleted file mode 100644 index 3f0e662..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/src/defrag.c +++ /dev/null @@ -1,579 +0,0 @@ -/* - * Active memory defragmentation - * Try to find key / value allocations that need to be re-allocated in order - * to reduce external fragmentation. - * We do that by scanning the keyspace and for each pointer we have, we can try to - * ask the allocator if moving it to a new address will help reduce fragmentation. - * - * Copyright (c) 2017, Oran Agra - * Copyright (c) 2017, Redis Labs, Inc - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include "server.h" -#include -#include -#include - -#ifdef HAVE_DEFRAG - -/* this method was added to jemalloc in order to help us understand which - * pointers are worthwhile moving and which aren't */ -int je_get_defrag_hint(void* ptr, int *bin_util, int *run_util); - -/* Defrag helper for generic allocations. - * - * returns NULL in case the allocatoin wasn't moved. - * when it returns a non-null value, the old pointer was already released - * and should NOT be accessed. */ -void* activeDefragAlloc(void *ptr) { - int bin_util, run_util; - size_t size; - void *newptr; - if(!je_get_defrag_hint(ptr, &bin_util, &run_util)) { - server.stat_active_defrag_misses++; - return NULL; - } - /* if this run is more utilized than the average utilization in this bin - * (or it is full), skip it. This will eventually move all the allocations - * from relatively empty runs into relatively full runs. */ - if (run_util > bin_util || run_util == 1<<16) { - server.stat_active_defrag_misses++; - return NULL; - } - /* move this allocation to a new allocation. - * make sure not to use the thread cache. so that we don't get back the same - * pointers we try to free */ - size = zmalloc_size(ptr); - newptr = zmalloc_no_tcache(size); - memcpy(newptr, ptr, size); - zfree_no_tcache(ptr); - return newptr; -} - -/*Defrag helper for sds strings - * - * returns NULL in case the allocatoin wasn't moved. - * when it returns a non-null value, the old pointer was already released - * and should NOT be accessed. */ -sds activeDefragSds(sds sdsptr) { - void* ptr = sdsAllocPtr(sdsptr); - void* newptr = activeDefragAlloc(ptr); - if (newptr) { - size_t offset = sdsptr - (char*)ptr; - sdsptr = (char*)newptr + offset; - return sdsptr; - } - return NULL; -} - -/* Defrag helper for robj and/or string objects - * - * returns NULL in case the allocatoin wasn't moved. - * when it returns a non-null value, the old pointer was already released - * and should NOT be accessed. */ -robj *activeDefragStringOb(robj* ob, int *defragged) { - robj *ret = NULL; - if (ob->refcount!=1) - return NULL; - - /* try to defrag robj (only if not an EMBSTR type (handled below). */ - if (ob->type!=OBJ_STRING || ob->encoding!=OBJ_ENCODING_EMBSTR) { - if ((ret = activeDefragAlloc(ob))) { - ob = ret; - (*defragged)++; - } - } - - /* try to defrag string object */ - if (ob->type == OBJ_STRING) { - if(ob->encoding==OBJ_ENCODING_RAW) { - sds newsds = activeDefragSds((sds)ob->ptr); - if (newsds) { - ob->ptr = newsds; - (*defragged)++; - } - } else if (ob->encoding==OBJ_ENCODING_EMBSTR) { - /* The sds is embedded in the object allocation, calculate the - * offset and update the pointer in the new allocation. */ - long ofs = (intptr_t)ob->ptr - (intptr_t)ob; - if ((ret = activeDefragAlloc(ob))) { - ret->ptr = (void*)((intptr_t)ret + ofs); - (*defragged)++; - } - } else if (ob->encoding!=OBJ_ENCODING_INT) { - serverPanic("Unknown string encoding"); - } - } - return ret; -} - -/* Defrag helper for dictEntries to be used during dict iteration (called on - * each step). Teturns a stat of how many pointers were moved. */ -int dictIterDefragEntry(dictIterator *iter) { - /* This function is a little bit dirty since it messes with the internals - * of the dict and it's iterator, but the benefit is that it is very easy - * to use, and require no other chagnes in the dict. */ - int defragged = 0; - dictht *ht; - /* Handle the next entry (if there is one), and update the pointer in the - * current entry. */ - if (iter->nextEntry) { - dictEntry *newde = activeDefragAlloc(iter->nextEntry); - if (newde) { - defragged++; - iter->nextEntry = newde; - iter->entry->next = newde; - } - } - /* handle the case of the first entry in the hash bucket. */ - ht = &iter->d->ht[iter->table]; - if (ht->table[iter->index] == iter->entry) { - dictEntry *newde = activeDefragAlloc(iter->entry); - if (newde) { - iter->entry = newde; - ht->table[iter->index] = newde; - defragged++; - } - } - return defragged; -} - -/* Defrag helper for dict main allocations (dict struct, and hash tables). - * receives a pointer to the dict* and implicitly updates it when the dict - * struct itself was moved. Returns a stat of how many pointers were moved. */ -int dictDefragTables(dict** dictRef) { - dict *d = *dictRef; - dictEntry **newtable; - int defragged = 0; - /* handle the dict struct */ - dict *newd = activeDefragAlloc(d); - if (newd) - defragged++, *dictRef = d = newd; - /* handle the first hash table */ - newtable = activeDefragAlloc(d->ht[0].table); - if (newtable) - defragged++, d->ht[0].table = newtable; - /* handle the second hash table */ - if (d->ht[1].table) { - newtable = activeDefragAlloc(d->ht[1].table); - if (newtable) - defragged++, d->ht[1].table = newtable; - } - return defragged; -} - -/* Internal function used by zslDefrag */ -void zslUpdateNode(zskiplist *zsl, zskiplistNode *oldnode, zskiplistNode *newnode, zskiplistNode **update) { - int i; - for (i = 0; i < zsl->level; i++) { - if (update[i]->level[i].forward == oldnode) - update[i]->level[i].forward = newnode; - } - serverAssert(zsl->header!=oldnode); - if (newnode->level[0].forward) { - serverAssert(newnode->level[0].forward->backward==oldnode); - newnode->level[0].forward->backward = newnode; - } else { - serverAssert(zsl->tail==oldnode); - zsl->tail = newnode; - } -} - -/* Defrag helper for sorted set. - * Update the robj pointer, defrag the skiplist struct and return the new score - * reference. We may not access oldele pointer (not even the pointer stored in - * the skiplist), as it was already freed. Newele may be null, in which case we - * only need to defrag the skiplist, but not update the obj pointer. - * When return value is non-NULL, it is the score reference that must be updated - * in the dict record. */ -double *zslDefrag(zskiplist *zsl, double score, sds oldele, sds newele) { - zskiplistNode *update[ZSKIPLIST_MAXLEVEL], *x, *newx; - int i; - sds ele = newele? newele: oldele; - - /* find the skiplist node referring to the object that was moved, - * and all pointers that need to be updated if we'll end up moving the skiplist node. */ - x = zsl->header; - for (i = zsl->level-1; i >= 0; i--) { - while (x->level[i].forward && - x->level[i].forward->ele != oldele && /* make sure not to access the - ->obj pointer if it matches - oldele */ - (x->level[i].forward->score < score || - (x->level[i].forward->score == score && - sdscmp(x->level[i].forward->ele,ele) < 0))) - x = x->level[i].forward; - update[i] = x; - } - - /* update the robj pointer inside the skip list record. */ - x = x->level[0].forward; - serverAssert(x && score == x->score && x->ele==oldele); - if (newele) - x->ele = newele; - - /* try to defrag the skiplist record itself */ - newx = activeDefragAlloc(x); - if (newx) { - zslUpdateNode(zsl, x, newx, update); - return &newx->score; - } - return NULL; -} - -/* Utility function that replaces an old key pointer in the dictionary with a - * new pointer. Additionally, we try to defrag the dictEntry in that dict. - * Oldkey mey be a dead pointer and should not be accessed (we get a - * pre-calculated hash value). Newkey may be null if the key pointer wasn't - * moved. Return value is the the dictEntry if found, or NULL if not found. - * NOTE: this is very ugly code, but it let's us avoid the complication of - * doing a scan on another dict. */ -dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, unsigned int hash, int *defragged) { - dictEntry **deref = dictFindEntryRefByPtrAndHash(d, oldkey, hash); - if (deref) { - dictEntry *de = *deref; - dictEntry *newde = activeDefragAlloc(de); - if (newde) { - de = *deref = newde; - (*defragged)++; - } - if (newkey) - de->key = newkey; - return de; - } - return NULL; -} - -/* for each key we scan in the main dict, this function will attempt to defrag - * all the various pointers it has. Returns a stat of how many pointers were - * moved. */ -int defragKey(redisDb *db, dictEntry *de) { - sds keysds = dictGetKey(de); - robj *newob, *ob; - unsigned char *newzl; - dict *d; - dictIterator *di; - int defragged = 0; - sds newsds; - - /* Try to defrag the key name. */ - newsds = activeDefragSds(keysds); - if (newsds) - defragged++, de->key = newsds; - if (dictSize(db->expires)) { - /* Dirty code: - * I can't search in db->expires for that key after i already released - * the pointer it holds it won't be able to do the string compare */ - uint64_t hash = dictGetHash(db->dict, de->key); - replaceSateliteDictKeyPtrAndOrDefragDictEntry(db->expires, keysds, newsds, hash, &defragged); - } - - /* Try to defrag robj and / or string value. */ - ob = dictGetVal(de); - if ((newob = activeDefragStringOb(ob, &defragged))) { - de->v.val = newob; - ob = newob; - } - - if (ob->type == OBJ_STRING) { - /* Already handled in activeDefragStringOb. */ - } else if (ob->type == OBJ_LIST) { - if (ob->encoding == OBJ_ENCODING_QUICKLIST) { - quicklist *ql = ob->ptr, *newql; - quicklistNode *node = ql->head, *newnode; - if ((newql = activeDefragAlloc(ql))) - defragged++, ob->ptr = ql = newql; - while (node) { - if ((newnode = activeDefragAlloc(node))) { - if (newnode->prev) - newnode->prev->next = newnode; - else - ql->head = newnode; - if (newnode->next) - newnode->next->prev = newnode; - else - ql->tail = newnode; - node = newnode; - defragged++; - } - if ((newzl = activeDefragAlloc(node->zl))) - defragged++, node->zl = newzl; - node = node->next; - } - } else if (ob->encoding == OBJ_ENCODING_ZIPLIST) { - if ((newzl = activeDefragAlloc(ob->ptr))) - defragged++, ob->ptr = newzl; - } else { - serverPanic("Unknown list encoding"); - } - } else if (ob->type == OBJ_SET) { - if (ob->encoding == OBJ_ENCODING_HT) { - d = ob->ptr; - di = dictGetIterator(d); - while((de = dictNext(di)) != NULL) { - sds sdsele = dictGetKey(de); - if ((newsds = activeDefragSds(sdsele))) - defragged++, de->key = newsds; - defragged += dictIterDefragEntry(di); - } - dictReleaseIterator(di); - dictDefragTables((dict**)&ob->ptr); - } else if (ob->encoding == OBJ_ENCODING_INTSET) { - intset *is = ob->ptr; - intset *newis = activeDefragAlloc(is); - if (newis) - defragged++, ob->ptr = newis; - } else { - serverPanic("Unknown set encoding"); - } - } else if (ob->type == OBJ_ZSET) { - if (ob->encoding == OBJ_ENCODING_ZIPLIST) { - if ((newzl = activeDefragAlloc(ob->ptr))) - defragged++, ob->ptr = newzl; - } else if (ob->encoding == OBJ_ENCODING_SKIPLIST) { - zset *zs = (zset*)ob->ptr; - zset *newzs; - zskiplist *newzsl; - struct zskiplistNode *newheader; - if ((newzs = activeDefragAlloc(zs))) - defragged++, ob->ptr = zs = newzs; - if ((newzsl = activeDefragAlloc(zs->zsl))) - defragged++, zs->zsl = newzsl; - if ((newheader = activeDefragAlloc(zs->zsl->header))) - defragged++, zs->zsl->header = newheader; - d = zs->dict; - di = dictGetIterator(d); - while((de = dictNext(di)) != NULL) { - double* newscore; - sds sdsele = dictGetKey(de); - if ((newsds = activeDefragSds(sdsele))) - defragged++, de->key = newsds; - newscore = zslDefrag(zs->zsl, *(double*)dictGetVal(de), sdsele, newsds); - if (newscore) { - dictSetVal(d, de, newscore); - defragged++; - } - defragged += dictIterDefragEntry(di); - } - dictReleaseIterator(di); - dictDefragTables(&zs->dict); - } else { - serverPanic("Unknown sorted set encoding"); - } - } else if (ob->type == OBJ_HASH) { - if (ob->encoding == OBJ_ENCODING_ZIPLIST) { - if ((newzl = activeDefragAlloc(ob->ptr))) - defragged++, ob->ptr = newzl; - } else if (ob->encoding == OBJ_ENCODING_HT) { - d = ob->ptr; - di = dictGetIterator(d); - while((de = dictNext(di)) != NULL) { - sds sdsele = dictGetKey(de); - if ((newsds = activeDefragSds(sdsele))) - defragged++, de->key = newsds; - sdsele = dictGetVal(de); - if ((newsds = activeDefragSds(sdsele))) - defragged++, de->v.val = newsds; - defragged += dictIterDefragEntry(di); - } - dictReleaseIterator(di); - dictDefragTables((dict**)&ob->ptr); - } else { - serverPanic("Unknown hash encoding"); - } - } else if (ob->type == OBJ_MODULE) { - /* Currently defragmenting modules private data types - * is not supported. */ - } else { - serverPanic("Unknown object type"); - } - return defragged; -} - -/* Defrag scan callback for the main db dictionary. */ -void defragScanCallback(void *privdata, const dictEntry *de) { - int defragged = defragKey((redisDb*)privdata, (dictEntry*)de); - server.stat_active_defrag_hits += defragged; - if(defragged) - server.stat_active_defrag_key_hits++; - else - server.stat_active_defrag_key_misses++; -} - -/* Defrag scan callback for for each hash table bicket, - * used in order to defrag the dictEntry allocations. */ -void defragDictBucketCallback(void *privdata, dictEntry **bucketref) { - UNUSED(privdata); - while(*bucketref) { - dictEntry *de = *bucketref, *newde; - if ((newde = activeDefragAlloc(de))) { - *bucketref = newde; - } - bucketref = &(*bucketref)->next; - } -} - -/* Utility function to get the fragmentation ratio from jemalloc. - * It is critical to do that by comparing only heap maps that belown to - * jemalloc, and skip ones the jemalloc keeps as spare. Since we use this - * fragmentation ratio in order to decide if a defrag action should be taken - * or not, a false detection can cause the defragmenter to waste a lot of CPU - * without the possibility of getting any results. */ -float getAllocatorFragmentation(size_t *out_frag_bytes) { - size_t epoch = 1, allocated = 0, resident = 0, active = 0, sz = sizeof(size_t); - /* Update the statistics cached by mallctl. */ - je_mallctl("epoch", &epoch, &sz, &epoch, sz); - /* Unlike RSS, this does not include RSS from shared libraries and other non - * heap mappings. */ - je_mallctl("stats.resident", &resident, &sz, NULL, 0); - /* Unlike resident, this doesn't not include the pages jemalloc reserves - * for re-use (purge will clean that). */ - je_mallctl("stats.active", &active, &sz, NULL, 0); - /* Unlike zmalloc_used_memory, this matches the stats.resident by taking - * into account all allocations done by this process (not only zmalloc). */ - je_mallctl("stats.allocated", &allocated, &sz, NULL, 0); - float frag_pct = ((float)active / allocated)*100 - 100; - size_t frag_bytes = active - allocated; - float rss_pct = ((float)resident / allocated)*100 - 100; - size_t rss_bytes = resident - allocated; - if(out_frag_bytes) - *out_frag_bytes = frag_bytes; - serverLog(LL_DEBUG, - "allocated=%zu, active=%zu, resident=%zu, frag=%.0f%% (%.0f%% rss), frag_bytes=%zu (%zu%% rss)", - allocated, active, resident, frag_pct, rss_pct, frag_bytes, rss_bytes); - return frag_pct; -} - -#define INTERPOLATE(x, x1, x2, y1, y2) ( (y1) + ((x)-(x1)) * ((y2)-(y1)) / ((x2)-(x1)) ) -#define LIMIT(y, min, max) ((y)<(min)? min: ((y)>(max)? max: (y))) - -/* Perform incremental defragmentation work from the serverCron. - * This works in a similar way to activeExpireCycle, in the sense that - * we do incremental work across calls. */ -void activeDefragCycle(void) { - static int current_db = -1; - static unsigned long cursor = 0; - static redisDb *db = NULL; - static long long start_scan, start_stat; - unsigned int iterations = 0; - unsigned long long defragged = server.stat_active_defrag_hits; - long long start, timelimit; - - if (server.aof_child_pid!=-1 || server.rdb_child_pid!=-1) - return; /* Defragging memory while there's a fork will just do damage. */ - - /* Once a second, check if we the fragmentation justfies starting a scan - * or making it more aggressive. */ - run_with_period(1000) { - size_t frag_bytes; - float frag_pct = getAllocatorFragmentation(&frag_bytes); - /* If we're not already running, and below the threshold, exit. */ - if (!server.active_defrag_running) { - if(frag_pct < server.active_defrag_threshold_lower || frag_bytes < server.active_defrag_ignore_bytes) - return; - } - - /* Calculate the adaptive aggressiveness of the defrag */ - int cpu_pct = INTERPOLATE(frag_pct, - server.active_defrag_threshold_lower, - server.active_defrag_threshold_upper, - server.active_defrag_cycle_min, - server.active_defrag_cycle_max); - cpu_pct = LIMIT(cpu_pct, - server.active_defrag_cycle_min, - server.active_defrag_cycle_max); - /* We allow increasing the aggressiveness during a scan, but don't - * reduce it. */ - if (!server.active_defrag_running || - cpu_pct > server.active_defrag_running) - { - server.active_defrag_running = cpu_pct; - serverLog(LL_VERBOSE, - "Starting active defrag, frag=%.0f%%, frag_bytes=%zu, cpu=%d%%", - frag_pct, frag_bytes, cpu_pct); - } - } - if (!server.active_defrag_running) - return; - - /* See activeExpireCycle for how timelimit is handled. */ - start = ustime(); - timelimit = 1000000*server.active_defrag_running/server.hz/100; - if (timelimit <= 0) timelimit = 1; - - do { - if (!cursor) { - /* Move on to next database, and stop if we reached the last one. */ - if (++current_db >= server.dbnum) { - long long now = ustime(); - size_t frag_bytes; - float frag_pct = getAllocatorFragmentation(&frag_bytes); - serverLog(LL_VERBOSE, - "Active defrag done in %dms, reallocated=%d, frag=%.0f%%, frag_bytes=%zu", - (int)((now - start_scan)/1000), (int)(server.stat_active_defrag_hits - start_stat), frag_pct, frag_bytes); - - start_scan = now; - current_db = -1; - cursor = 0; - db = NULL; - server.active_defrag_running = 0; - return; - } - else if (current_db==0) { - /* Start a scan from the first database. */ - start_scan = ustime(); - start_stat = server.stat_active_defrag_hits; - } - - db = &server.db[current_db]; - cursor = 0; - } - - do { - cursor = dictScan(db->dict, cursor, defragScanCallback, defragDictBucketCallback, db); - /* Once in 16 scan iterations, or 1000 pointer reallocations - * (if we have a lot of pointers in one hash bucket), check if we - * reached the tiem limit. */ - if (cursor && (++iterations > 16 || server.stat_active_defrag_hits - defragged > 1000)) { - if ((ustime() - start) > timelimit) { - return; - } - iterations = 0; - defragged = server.stat_active_defrag_hits; - } - } while(cursor); - } while(1); -} - -#else /* HAVE_DEFRAG */ - -void activeDefragCycle(void) { - /* Not implemented yet. */ -} - -#endif diff --git a/redis-android/src/main/jni/redis-4.0.11/src/redis-cli.c b/redis-android/src/main/jni/redis-4.0.11/src/redis-cli.c deleted file mode 100644 index cd3f47d..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/src/redis-cli.c +++ /dev/null @@ -1,2985 +0,0 @@ -/* Redis CLI (command line interface) - * - * Copyright (c) 2009-2012, Salvatore Sanfilippo - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include "fmacros.h" -#include "version.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include /* use sds.h from hiredis, so that only one set of sds functions will be present in the binary */ -#include "zmalloc.h" -#include "linenoise.h" -#include "help.h" -#include "anet.h" -#include "ae.h" - -#define UNUSED(V) ((void) V) - -#define OUTPUT_STANDARD 0 -#define OUTPUT_RAW 1 -#define OUTPUT_CSV 2 -#define REDIS_CLI_KEEPALIVE_INTERVAL 15 /* seconds */ -#define REDIS_CLI_DEFAULT_PIPE_TIMEOUT 30 /* seconds */ -#define REDIS_CLI_HISTFILE_ENV "REDISCLI_HISTFILE" -#define REDIS_CLI_HISTFILE_DEFAULT ".rediscli_history" -#define REDIS_CLI_RCFILE_ENV "REDISCLI_RCFILE" -#define REDIS_CLI_RCFILE_DEFAULT ".redisclirc" - -/* --latency-dist palettes. */ -int spectrum_palette_color_size = 19; -int spectrum_palette_color[] = {0,233,234,235,237,239,241,243,245,247,144,143,142,184,226,214,208,202,196}; - -int spectrum_palette_mono_size = 13; -int spectrum_palette_mono[] = {0,233,234,235,237,239,241,243,245,247,249,251,253}; - -/* The actual palette in use. */ -int *spectrum_palette; -int spectrum_palette_size; - -static redisContext *context; -static struct config { - char *hostip; - int hostport; - char *hostsocket; - long repeat; - long interval; - int dbnum; - int interactive; - int shutdown; - int monitor_mode; - int pubsub_mode; - int latency_mode; - int latency_dist_mode; - int latency_history; - int lru_test_mode; - long long lru_test_sample_size; - int cluster_mode; - int cluster_reissue_command; - int slave_mode; - int pipe_mode; - int pipe_timeout; - int getrdb_mode; - int stat_mode; - int scan_mode; - int intrinsic_latency_mode; - int intrinsic_latency_duration; - char *pattern; - char *rdb_filename; - int bigkeys; - int hotkeys; - int stdinarg; /* get last arg from stdin. (-x option) */ - char *auth; - int output; /* output mode, see OUTPUT_* defines */ - sds mb_delim; - char prompt[128]; - char *eval; - int eval_ldb; - int eval_ldb_sync; /* Ask for synchronous mode of the Lua debugger. */ - int eval_ldb_end; /* Lua debugging session ended. */ - int enable_ldb_on_eval; /* Handle manual SCRIPT DEBUG + EVAL commands. */ - int last_cmd_type; -} config; - -/* User preferences. */ -static struct pref { - int hints; -} pref; - -static volatile sig_atomic_t force_cancel_loop = 0; -static void usage(void); -static void slaveMode(void); -char *redisGitSHA1(void); -char *redisGitDirty(void); -static int cliConnect(int force); - -/*------------------------------------------------------------------------------ - * Utility functions - *--------------------------------------------------------------------------- */ - -static long long ustime(void) { - struct timeval tv; - long long ust; - - gettimeofday(&tv, NULL); - ust = ((long long)tv.tv_sec)*1000000; - ust += tv.tv_usec; - return ust; -} - -static long long mstime(void) { - return ustime()/1000; -} - -static void cliRefreshPrompt(void) { - if (config.eval_ldb) return; - - sds prompt = sdsempty(); - if (config.hostsocket != NULL) { - prompt = sdscatfmt(prompt,"redis %s",config.hostsocket); - } else { - char addr[256]; - anetFormatAddr(addr, sizeof(addr), config.hostip, config.hostport); - prompt = sdscatlen(prompt,addr,strlen(addr)); - } - - /* Add [dbnum] if needed */ - if (config.dbnum != 0) - prompt = sdscatfmt(prompt,"[%i]",config.dbnum); - - /* Copy the prompt in the static buffer. */ - prompt = sdscatlen(prompt,"> ",2); - snprintf(config.prompt,sizeof(config.prompt),"%s",prompt); - sdsfree(prompt); -} - -/* Return the name of the dotfile for the specified 'dotfilename'. - * Normally it just concatenates user $HOME to the file specified - * in 'dotfilename'. However if the environment varialbe 'envoverride' - * is set, its value is taken as the path. - * - * The function returns NULL (if the file is /dev/null or cannot be - * obtained for some error), or an SDS string that must be freed by - * the user. */ -static sds getDotfilePath(char *envoverride, char *dotfilename) { - char *path = NULL; - sds dotPath = NULL; - - /* Check the env for a dotfile override. */ - path = getenv(envoverride); - if (path != NULL && *path != '\0') { - if (!strcmp("/dev/null", path)) { - return NULL; - } - - /* If the env is set, return it. */ - dotPath = sdsnew(path); - } else { - char *home = getenv("HOME"); - if (home != NULL && *home != '\0') { - /* If no override is set use $HOME/. */ - dotPath = sdscatprintf(sdsempty(), "%s/%s", home, dotfilename); - } - } - return dotPath; -} - -/* URL-style percent decoding. */ -#define isHexChar(c) (isdigit(c) || (c >= 'a' && c <= 'f')) -#define decodeHexChar(c) (isdigit(c) ? c - '0' : c - 'a' + 10) -#define decodeHex(h, l) ((decodeHexChar(h) << 4) + decodeHexChar(l)) - -static sds percentDecode(const char *pe, size_t len) { - const char *end = pe + len; - sds ret = sdsempty(); - const char *curr = pe; - - while (curr < end) { - if (*curr == '%') { - if ((end - curr) < 2) { - fprintf(stderr, "Incomplete URI encoding\n"); - exit(1); - } - - char h = tolower(*(++curr)); - char l = tolower(*(++curr)); - if (!isHexChar(h) || !isHexChar(l)) { - fprintf(stderr, "Illegal character in URI encoding\n"); - exit(1); - } - char c = decodeHex(h, l); - ret = sdscatlen(ret, &c, 1); - curr++; - } else { - ret = sdscatlen(ret, curr++, 1); - } - } - - return ret; -} - -/* Parse a URI and extract the server connection information. - * URI scheme is based on the the provisional specification[1] excluding support - * for query parameters. Valid URIs are: - * scheme: "redis://" - * authority: [ ":"] "@"] [ [":" ]] - * path: ["/" []] - * - * [1]: https://www.iana.org/assignments/uri-schemes/prov/redis */ -static void parseRedisUri(const char *uri) { - - const char *scheme = "redis://"; - const char *curr = uri; - const char *end = uri + strlen(uri); - const char *userinfo, *username, *port, *host, *path; - - /* URI must start with a valid scheme. */ - if (strncasecmp(scheme, curr, strlen(scheme))) { - fprintf(stderr,"Invalid URI scheme\n"); - exit(1); - } - curr += strlen(scheme); - if (curr == end) return; - - /* Extract user info. */ - if ((userinfo = strchr(curr,'@'))) { - if ((username = strchr(curr, ':')) && username < userinfo) { - /* If provided, username is ignored. */ - curr = username + 1; - } - - config.auth = percentDecode(curr, userinfo - curr); - curr = userinfo + 1; - } - if (curr == end) return; - - /* Extract host and port. */ - path = strchr(curr, '/'); - if (*curr != '/') { - host = path ? path - 1 : end; - if ((port = strchr(curr, ':'))) { - config.hostport = atoi(port + 1); - host = port - 1; - } - config.hostip = sdsnewlen(curr, host - curr + 1); - } - curr = path ? path + 1 : end; - if (curr == end) return; - - /* Extract database number. */ - config.dbnum = atoi(curr); -} - -/*------------------------------------------------------------------------------ - * Help functions - *--------------------------------------------------------------------------- */ - -#define CLI_HELP_COMMAND 1 -#define CLI_HELP_GROUP 2 - -typedef struct { - int type; - int argc; - sds *argv; - sds full; - - /* Only used for help on commands */ - struct commandHelp *org; -} helpEntry; - -static helpEntry *helpEntries; -static int helpEntriesLen; - -static sds cliVersion(void) { - sds version; - version = sdscatprintf(sdsempty(), "%s", REDIS_VERSION); - - /* Add git commit and working tree status when available */ - if (strtoll(redisGitSHA1(),NULL,16)) { - version = sdscatprintf(version, " (git:%s", redisGitSHA1()); - if (strtoll(redisGitDirty(),NULL,10)) - version = sdscatprintf(version, "-dirty"); - version = sdscat(version, ")"); - } - return version; -} - -static void cliInitHelp(void) { - int commandslen = sizeof(commandHelp)/sizeof(struct commandHelp); - int groupslen = sizeof(commandGroups)/sizeof(char*); - int i, len, pos = 0; - helpEntry tmp; - - helpEntriesLen = len = commandslen+groupslen; - helpEntries = zmalloc(sizeof(helpEntry)*len); - - for (i = 0; i < groupslen; i++) { - tmp.argc = 1; - tmp.argv = zmalloc(sizeof(sds)); - tmp.argv[0] = sdscatprintf(sdsempty(),"@%s",commandGroups[i]); - tmp.full = tmp.argv[0]; - tmp.type = CLI_HELP_GROUP; - tmp.org = NULL; - helpEntries[pos++] = tmp; - } - - for (i = 0; i < commandslen; i++) { - tmp.argv = sdssplitargs(commandHelp[i].name,&tmp.argc); - tmp.full = sdsnew(commandHelp[i].name); - tmp.type = CLI_HELP_COMMAND; - tmp.org = &commandHelp[i]; - helpEntries[pos++] = tmp; - } -} - -/* cliInitHelp() setups the helpEntries array with the command and group - * names from the help.h file. However the Redis instance we are connecting - * to may support more commands, so this function integrates the previous - * entries with additional entries obtained using the COMMAND command - * available in recent versions of Redis. */ -static void cliIntegrateHelp(void) { - if (cliConnect(0) == REDIS_ERR) return; - - redisReply *reply = redisCommand(context, "COMMAND"); - if(reply == NULL || reply->type != REDIS_REPLY_ARRAY) return; - - /* Scan the array reported by COMMAND and fill only the entries that - * don't already match what we have. */ - for (size_t j = 0; j < reply->elements; j++) { - redisReply *entry = reply->element[j]; - if (entry->type != REDIS_REPLY_ARRAY || entry->elements < 4 || - entry->element[0]->type != REDIS_REPLY_STRING || - entry->element[1]->type != REDIS_REPLY_INTEGER || - entry->element[3]->type != REDIS_REPLY_INTEGER) return; - char *cmdname = entry->element[0]->str; - int i; - - for (i = 0; i < helpEntriesLen; i++) { - helpEntry *he = helpEntries+i; - if (!strcasecmp(he->argv[0],cmdname)) - break; - } - if (i != helpEntriesLen) continue; - - helpEntriesLen++; - helpEntries = zrealloc(helpEntries,sizeof(helpEntry)*helpEntriesLen); - helpEntry *new = helpEntries+(helpEntriesLen-1); - - new->argc = 1; - new->argv = zmalloc(sizeof(sds)); - new->argv[0] = sdsnew(cmdname); - new->full = new->argv[0]; - new->type = CLI_HELP_COMMAND; - sdstoupper(new->argv[0]); - - struct commandHelp *ch = zmalloc(sizeof(*ch)); - ch->name = new->argv[0]; - ch->params = sdsempty(); - int args = llabs(entry->element[1]->integer); - if (entry->element[3]->integer == 1) { - ch->params = sdscat(ch->params,"key "); - args--; - } - while(args--) ch->params = sdscat(ch->params,"arg "); - if (entry->element[1]->integer < 0) - ch->params = sdscat(ch->params,"...options..."); - ch->summary = "Help not available"; - ch->group = 0; - ch->since = "not known"; - new->org = ch; - } - freeReplyObject(reply); -} - -/* Output command help to stdout. */ -static void cliOutputCommandHelp(struct commandHelp *help, int group) { - printf("\r\n \x1b[1m%s\x1b[0m \x1b[90m%s\x1b[0m\r\n", help->name, help->params); - printf(" \x1b[33msummary:\x1b[0m %s\r\n", help->summary); - printf(" \x1b[33msince:\x1b[0m %s\r\n", help->since); - if (group) { - printf(" \x1b[33mgroup:\x1b[0m %s\r\n", commandGroups[help->group]); - } -} - -/* Print generic help. */ -static void cliOutputGenericHelp(void) { - sds version = cliVersion(); - printf( - "redis-cli %s\n" - "To get help about Redis commands type:\n" - " \"help @\" to get a list of commands in \n" - " \"help \" for help on \n" - " \"help \" to get a list of possible help topics\n" - " \"quit\" to exit\n" - "\n" - "To set redis-cli preferences:\n" - " \":set hints\" enable online hints\n" - " \":set nohints\" disable online hints\n" - "Set your preferences in ~/.redisclirc\n", - version - ); - sdsfree(version); -} - -/* Output all command help, filtering by group or command name. */ -static void cliOutputHelp(int argc, char **argv) { - int i, j, len; - int group = -1; - helpEntry *entry; - struct commandHelp *help; - - if (argc == 0) { - cliOutputGenericHelp(); - return; - } else if (argc > 0 && argv[0][0] == '@') { - len = sizeof(commandGroups)/sizeof(char*); - for (i = 0; i < len; i++) { - if (strcasecmp(argv[0]+1,commandGroups[i]) == 0) { - group = i; - break; - } - } - } - - assert(argc > 0); - for (i = 0; i < helpEntriesLen; i++) { - entry = &helpEntries[i]; - if (entry->type != CLI_HELP_COMMAND) continue; - - help = entry->org; - if (group == -1) { - /* Compare all arguments */ - if (argc == entry->argc) { - for (j = 0; j < argc; j++) { - if (strcasecmp(argv[j],entry->argv[j]) != 0) break; - } - if (j == argc) { - cliOutputCommandHelp(help,1); - } - } - } else { - if (group == help->group) { - cliOutputCommandHelp(help,0); - } - } - } - printf("\r\n"); -} - -/* Linenoise completion callback. */ -static void completionCallback(const char *buf, linenoiseCompletions *lc) { - size_t startpos = 0; - int mask; - int i; - size_t matchlen; - sds tmp; - - if (strncasecmp(buf,"help ",5) == 0) { - startpos = 5; - while (isspace(buf[startpos])) startpos++; - mask = CLI_HELP_COMMAND | CLI_HELP_GROUP; - } else { - mask = CLI_HELP_COMMAND; - } - - for (i = 0; i < helpEntriesLen; i++) { - if (!(helpEntries[i].type & mask)) continue; - - matchlen = strlen(buf+startpos); - if (strncasecmp(buf+startpos,helpEntries[i].full,matchlen) == 0) { - tmp = sdsnewlen(buf,startpos); - tmp = sdscat(tmp,helpEntries[i].full); - linenoiseAddCompletion(lc,tmp); - sdsfree(tmp); - } - } -} - -/* Linenoise hints callback. */ -static char *hintsCallback(const char *buf, int *color, int *bold) { - if (!pref.hints) return NULL; - - int i, argc, buflen = strlen(buf); - sds *argv = sdssplitargs(buf,&argc); - int endspace = buflen && isspace(buf[buflen-1]); - - /* Check if the argument list is empty and return ASAP. */ - if (argc == 0) { - sdsfreesplitres(argv,argc); - return NULL; - } - - for (i = 0; i < helpEntriesLen; i++) { - if (!(helpEntries[i].type & CLI_HELP_COMMAND)) continue; - - if (strcasecmp(argv[0],helpEntries[i].full) == 0) - { - *color = 90; - *bold = 0; - sds hint = sdsnew(helpEntries[i].org->params); - - /* Remove arguments from the returned hint to show only the - * ones the user did not yet typed. */ - int toremove = argc-1; - while(toremove > 0 && sdslen(hint)) { - if (hint[0] == '[') break; - if (hint[0] == ' ') toremove--; - sdsrange(hint,1,-1); - } - - /* Add an initial space if needed. */ - if (!endspace) { - sds newhint = sdsnewlen(" ",1); - newhint = sdscatsds(newhint,hint); - sdsfree(hint); - hint = newhint; - } - - sdsfreesplitres(argv,argc); - return hint; - } - } - sdsfreesplitres(argv,argc); - return NULL; -} - -static void freeHintsCallback(void *ptr) { - sdsfree(ptr); -} - -/*------------------------------------------------------------------------------ - * Networking / parsing - *--------------------------------------------------------------------------- */ - -/* Send AUTH command to the server */ -static int cliAuth(void) { - redisReply *reply; - if (config.auth == NULL) return REDIS_OK; - - reply = redisCommand(context,"AUTH %s",config.auth); - if (reply != NULL) { - freeReplyObject(reply); - return REDIS_OK; - } - return REDIS_ERR; -} - -/* Send SELECT dbnum to the server */ -static int cliSelect(void) { - redisReply *reply; - if (config.dbnum == 0) return REDIS_OK; - - reply = redisCommand(context,"SELECT %d",config.dbnum); - if (reply != NULL) { - int result = REDIS_OK; - if (reply->type == REDIS_REPLY_ERROR) result = REDIS_ERR; - freeReplyObject(reply); - return result; - } - return REDIS_ERR; -} - -/* Connect to the server. If force is not zero the connection is performed - * even if there is already a connected socket. */ -static int cliConnect(int force) { - if (context == NULL || force) { - if (context != NULL) { - redisFree(context); - } - - if (config.hostsocket == NULL) { - context = redisConnect(config.hostip,config.hostport); - } else { - context = redisConnectUnix(config.hostsocket); - } - - if (context->err) { - fprintf(stderr,"Could not connect to Redis at "); - if (config.hostsocket == NULL) - fprintf(stderr,"%s:%d: %s\n",config.hostip,config.hostport,context->errstr); - else - fprintf(stderr,"%s: %s\n",config.hostsocket,context->errstr); - redisFree(context); - context = NULL; - return REDIS_ERR; - } - - /* Set aggressive KEEP_ALIVE socket option in the Redis context socket - * in order to prevent timeouts caused by the execution of long - * commands. At the same time this improves the detection of real - * errors. */ - anetKeepAlive(NULL, context->fd, REDIS_CLI_KEEPALIVE_INTERVAL); - - /* Do AUTH and select the right DB. */ - if (cliAuth() != REDIS_OK) - return REDIS_ERR; - if (cliSelect() != REDIS_OK) - return REDIS_ERR; - } - return REDIS_OK; -} - -static void cliPrintContextError(void) { - if (context == NULL) return; - fprintf(stderr,"Error: %s\n",context->errstr); -} - -static sds cliFormatReplyTTY(redisReply *r, char *prefix) { - sds out = sdsempty(); - switch (r->type) { - case REDIS_REPLY_ERROR: - out = sdscatprintf(out,"(error) %s\n", r->str); - break; - case REDIS_REPLY_STATUS: - out = sdscat(out,r->str); - out = sdscat(out,"\n"); - break; - case REDIS_REPLY_INTEGER: - out = sdscatprintf(out,"(integer) %lld\n",r->integer); - break; - case REDIS_REPLY_STRING: - /* If you are producing output for the standard output we want - * a more interesting output with quoted characters and so forth */ - out = sdscatrepr(out,r->str,r->len); - out = sdscat(out,"\n"); - break; - case REDIS_REPLY_NIL: - out = sdscat(out,"(nil)\n"); - break; - case REDIS_REPLY_ARRAY: - if (r->elements == 0) { - out = sdscat(out,"(empty list or set)\n"); - } else { - unsigned int i, idxlen = 0; - char _prefixlen[16]; - char _prefixfmt[16]; - sds _prefix; - sds tmp; - - /* Calculate chars needed to represent the largest index */ - i = r->elements; - do { - idxlen++; - i /= 10; - } while(i); - - /* Prefix for nested multi bulks should grow with idxlen+2 spaces */ - memset(_prefixlen,' ',idxlen+2); - _prefixlen[idxlen+2] = '\0'; - _prefix = sdscat(sdsnew(prefix),_prefixlen); - - /* Setup prefix format for every entry */ - snprintf(_prefixfmt,sizeof(_prefixfmt),"%%s%%%ud) ",idxlen); - - for (i = 0; i < r->elements; i++) { - /* Don't use the prefix for the first element, as the parent - * caller already prepended the index number. */ - out = sdscatprintf(out,_prefixfmt,i == 0 ? "" : prefix,i+1); - - /* Format the multi bulk entry */ - tmp = cliFormatReplyTTY(r->element[i],_prefix); - out = sdscatlen(out,tmp,sdslen(tmp)); - sdsfree(tmp); - } - sdsfree(_prefix); - } - break; - default: - fprintf(stderr,"Unknown reply type: %d\n", r->type); - exit(1); - } - return out; -} - -int isColorTerm(void) { - char *t = getenv("TERM"); - return t != NULL && strstr(t,"xterm") != NULL; -} - -/* Helper function for sdsCatColorizedLdbReply() appending colorize strings - * to an SDS string. */ -sds sdscatcolor(sds o, char *s, size_t len, char *color) { - if (!isColorTerm()) return sdscatlen(o,s,len); - - int bold = strstr(color,"bold") != NULL; - int ccode = 37; /* Defaults to white. */ - if (strstr(color,"red")) ccode = 31; - else if (strstr(color,"green")) ccode = 32; - else if (strstr(color,"yellow")) ccode = 33; - else if (strstr(color,"blue")) ccode = 34; - else if (strstr(color,"magenta")) ccode = 35; - else if (strstr(color,"cyan")) ccode = 36; - else if (strstr(color,"white")) ccode = 37; - - o = sdscatfmt(o,"\033[%i;%i;49m",bold,ccode); - o = sdscatlen(o,s,len); - o = sdscat(o,"\033[0m"); - return o; -} - -/* Colorize Lua debugger status replies according to the prefix they - * have. */ -sds sdsCatColorizedLdbReply(sds o, char *s, size_t len) { - char *color = "white"; - - if (strstr(s,"")) color = "bold"; - if (strstr(s,"")) color = "green"; - if (strstr(s,"")) color = "cyan"; - if (strstr(s,"")) color = "red"; - if (strstr(s,"")) color = "bold"; - if (strstr(s,"") || strstr(s,"")) color = "magenta"; - if (len > 4 && isdigit(s[3])) { - if (s[1] == '>') color = "yellow"; /* Current line. */ - else if (s[2] == '#') color = "bold"; /* Break point. */ - } - return sdscatcolor(o,s,len,color); -} - -static sds cliFormatReplyRaw(redisReply *r) { - sds out = sdsempty(), tmp; - size_t i; - - switch (r->type) { - case REDIS_REPLY_NIL: - /* Nothing... */ - break; - case REDIS_REPLY_ERROR: - out = sdscatlen(out,r->str,r->len); - out = sdscatlen(out,"\n",1); - break; - case REDIS_REPLY_STATUS: - case REDIS_REPLY_STRING: - if (r->type == REDIS_REPLY_STATUS && config.eval_ldb) { - /* The Lua debugger replies with arrays of simple (status) - * strings. We colorize the output for more fun if this - * is a debugging session. */ - - /* Detect the end of a debugging session. */ - if (strstr(r->str,"") == r->str) { - config.enable_ldb_on_eval = 0; - config.eval_ldb = 0; - config.eval_ldb_end = 1; /* Signal the caller session ended. */ - config.output = OUTPUT_STANDARD; - cliRefreshPrompt(); - } else { - out = sdsCatColorizedLdbReply(out,r->str,r->len); - } - } else { - out = sdscatlen(out,r->str,r->len); - } - break; - case REDIS_REPLY_INTEGER: - out = sdscatprintf(out,"%lld",r->integer); - break; - case REDIS_REPLY_ARRAY: - for (i = 0; i < r->elements; i++) { - if (i > 0) out = sdscat(out,config.mb_delim); - tmp = cliFormatReplyRaw(r->element[i]); - out = sdscatlen(out,tmp,sdslen(tmp)); - sdsfree(tmp); - } - break; - default: - fprintf(stderr,"Unknown reply type: %d\n", r->type); - exit(1); - } - return out; -} - -static sds cliFormatReplyCSV(redisReply *r) { - unsigned int i; - - sds out = sdsempty(); - switch (r->type) { - case REDIS_REPLY_ERROR: - out = sdscat(out,"ERROR,"); - out = sdscatrepr(out,r->str,strlen(r->str)); - break; - case REDIS_REPLY_STATUS: - out = sdscatrepr(out,r->str,r->len); - break; - case REDIS_REPLY_INTEGER: - out = sdscatprintf(out,"%lld",r->integer); - break; - case REDIS_REPLY_STRING: - out = sdscatrepr(out,r->str,r->len); - break; - case REDIS_REPLY_NIL: - out = sdscat(out,"NIL"); - break; - case REDIS_REPLY_ARRAY: - for (i = 0; i < r->elements; i++) { - sds tmp = cliFormatReplyCSV(r->element[i]); - out = sdscatlen(out,tmp,sdslen(tmp)); - if (i != r->elements-1) out = sdscat(out,","); - sdsfree(tmp); - } - break; - default: - fprintf(stderr,"Unknown reply type: %d\n", r->type); - exit(1); - } - return out; -} - -static int cliReadReply(int output_raw_strings) { - void *_reply; - redisReply *reply; - sds out = NULL; - int output = 1; - - if (redisGetReply(context,&_reply) != REDIS_OK) { - if (config.shutdown) { - redisFree(context); - context = NULL; - return REDIS_OK; - } - if (config.interactive) { - /* Filter cases where we should reconnect */ - if (context->err == REDIS_ERR_IO && - (errno == ECONNRESET || errno == EPIPE)) - return REDIS_ERR; - if (context->err == REDIS_ERR_EOF) - return REDIS_ERR; - } - cliPrintContextError(); - exit(1); - return REDIS_ERR; /* avoid compiler warning */ - } - - reply = (redisReply*)_reply; - - config.last_cmd_type = reply->type; - - /* Check if we need to connect to a different node and reissue the - * request. */ - if (config.cluster_mode && reply->type == REDIS_REPLY_ERROR && - (!strncmp(reply->str,"MOVED",5) || !strcmp(reply->str,"ASK"))) - { - char *p = reply->str, *s; - int slot; - - output = 0; - /* Comments show the position of the pointer as: - * - * [S] for pointer 's' - * [P] for pointer 'p' - */ - s = strchr(p,' '); /* MOVED[S]3999 127.0.0.1:6381 */ - p = strchr(s+1,' '); /* MOVED[S]3999[P]127.0.0.1:6381 */ - *p = '\0'; - slot = atoi(s+1); - s = strrchr(p+1,':'); /* MOVED 3999[P]127.0.0.1[S]6381 */ - *s = '\0'; - sdsfree(config.hostip); - config.hostip = sdsnew(p+1); - config.hostport = atoi(s+1); - if (config.interactive) - printf("-> Redirected to slot [%d] located at %s:%d\n", - slot, config.hostip, config.hostport); - config.cluster_reissue_command = 1; - cliRefreshPrompt(); - } - - if (output) { - if (output_raw_strings) { - out = cliFormatReplyRaw(reply); - } else { - if (config.output == OUTPUT_RAW) { - out = cliFormatReplyRaw(reply); - out = sdscat(out,"\n"); - } else if (config.output == OUTPUT_STANDARD) { - out = cliFormatReplyTTY(reply,""); - } else if (config.output == OUTPUT_CSV) { - out = cliFormatReplyCSV(reply); - out = sdscat(out,"\n"); - } - } - fwrite(out,sdslen(out),1,stdout); - sdsfree(out); - } - freeReplyObject(reply); - return REDIS_OK; -} - -static int cliSendCommand(int argc, char **argv, long repeat) { - char *command = argv[0]; - size_t *argvlen; - int j, output_raw; - - if (!config.eval_ldb && /* In debugging mode, let's pass "help" to Redis. */ - (!strcasecmp(command,"help") || !strcasecmp(command,"?"))) { - cliOutputHelp(--argc, ++argv); - return REDIS_OK; - } - - if (context == NULL) return REDIS_ERR; - - output_raw = 0; - if (!strcasecmp(command,"info") || - (argc >= 2 && !strcasecmp(command,"debug") && - !strcasecmp(argv[1],"htstats")) || - (argc >= 2 && !strcasecmp(command,"memory") && - (!strcasecmp(argv[1],"malloc-stats") || - !strcasecmp(argv[1],"doctor"))) || - (argc == 2 && !strcasecmp(command,"cluster") && - (!strcasecmp(argv[1],"nodes") || - !strcasecmp(argv[1],"info"))) || - (argc == 2 && !strcasecmp(command,"client") && - !strcasecmp(argv[1],"list")) || - (argc == 3 && !strcasecmp(command,"latency") && - !strcasecmp(argv[1],"graph")) || - (argc == 2 && !strcasecmp(command,"latency") && - !strcasecmp(argv[1],"doctor"))) - { - output_raw = 1; - } - - if (!strcasecmp(command,"shutdown")) config.shutdown = 1; - if (!strcasecmp(command,"monitor")) config.monitor_mode = 1; - if (!strcasecmp(command,"subscribe") || - !strcasecmp(command,"psubscribe")) config.pubsub_mode = 1; - if (!strcasecmp(command,"sync") || - !strcasecmp(command,"psync")) config.slave_mode = 1; - - /* When the user manually calls SCRIPT DEBUG, setup the activation of - * debugging mode on the next eval if needed. */ - if (argc == 3 && !strcasecmp(argv[0],"script") && - !strcasecmp(argv[1],"debug")) - { - if (!strcasecmp(argv[2],"yes") || !strcasecmp(argv[2],"sync")) { - config.enable_ldb_on_eval = 1; - } else { - config.enable_ldb_on_eval = 0; - } - } - - /* Actually activate LDB on EVAL if needed. */ - if (!strcasecmp(command,"eval") && config.enable_ldb_on_eval) { - config.eval_ldb = 1; - config.output = OUTPUT_RAW; - } - - /* Setup argument length */ - argvlen = zmalloc(argc*sizeof(size_t)); - for (j = 0; j < argc; j++) - argvlen[j] = sdslen(argv[j]); - - while(repeat-- > 0) { - redisAppendCommandArgv(context,argc,(const char**)argv,argvlen); - while (config.monitor_mode) { - if (cliReadReply(output_raw) != REDIS_OK) exit(1); - fflush(stdout); - } - - if (config.pubsub_mode) { - if (config.output != OUTPUT_RAW) - printf("Reading messages... (press Ctrl-C to quit)\n"); - while (1) { - if (cliReadReply(output_raw) != REDIS_OK) exit(1); - } - } - - if (config.slave_mode) { - printf("Entering slave output mode... (press Ctrl-C to quit)\n"); - slaveMode(); - config.slave_mode = 0; - zfree(argvlen); - return REDIS_ERR; /* Error = slaveMode lost connection to master */ - } - - if (cliReadReply(output_raw) != REDIS_OK) { - zfree(argvlen); - return REDIS_ERR; - } else { - /* Store database number when SELECT was successfully executed. */ - if (!strcasecmp(command,"select") && argc == 2 && config.last_cmd_type != REDIS_REPLY_ERROR) { - config.dbnum = atoi(argv[1]); - cliRefreshPrompt(); - } else if (!strcasecmp(command,"auth") && argc == 2) { - cliSelect(); - } - } - if (config.interval) usleep(config.interval); - fflush(stdout); /* Make it grep friendly */ - } - - zfree(argvlen); - return REDIS_OK; -} - -/* Send a command reconnecting the link if needed. */ -static redisReply *reconnectingRedisCommand(redisContext *c, const char *fmt, ...) { - redisReply *reply = NULL; - int tries = 0; - va_list ap; - - assert(!c->err); - while(reply == NULL) { - while (c->err & (REDIS_ERR_IO | REDIS_ERR_EOF)) { - printf("\r\x1b[0K"); /* Cursor to left edge + clear line. */ - printf("Reconnecting... %d\r", ++tries); - fflush(stdout); - - redisFree(c); - c = redisConnect(config.hostip,config.hostport); - usleep(1000000); - } - - va_start(ap,fmt); - reply = redisvCommand(c,fmt,ap); - va_end(ap); - - if (c->err && !(c->err & (REDIS_ERR_IO | REDIS_ERR_EOF))) { - fprintf(stderr, "Error: %s\n", c->errstr); - exit(1); - } else if (tries > 0) { - printf("\r\x1b[0K"); /* Cursor to left edge + clear line. */ - } - } - - context = c; - return reply; -} - -/*------------------------------------------------------------------------------ - * User interface - *--------------------------------------------------------------------------- */ - -static int parseOptions(int argc, char **argv) { - int i; - - for (i = 1; i < argc; i++) { - int lastarg = i==argc-1; - - if (!strcmp(argv[i],"-h") && !lastarg) { - sdsfree(config.hostip); - config.hostip = sdsnew(argv[++i]); - } else if (!strcmp(argv[i],"-h") && lastarg) { - usage(); - } else if (!strcmp(argv[i],"--help")) { - usage(); - } else if (!strcmp(argv[i],"-x")) { - config.stdinarg = 1; - } else if (!strcmp(argv[i],"-p") && !lastarg) { - config.hostport = atoi(argv[++i]); - } else if (!strcmp(argv[i],"-s") && !lastarg) { - config.hostsocket = argv[++i]; - } else if (!strcmp(argv[i],"-r") && !lastarg) { - config.repeat = strtoll(argv[++i],NULL,10); - } else if (!strcmp(argv[i],"-i") && !lastarg) { - double seconds = atof(argv[++i]); - config.interval = seconds*1000000; - } else if (!strcmp(argv[i],"-n") && !lastarg) { - config.dbnum = atoi(argv[++i]); - } else if (!strcmp(argv[i],"-a") && !lastarg) { - fputs("Warning: Using a password with '-a' option on the command line interface may not be safe.\n", stderr); - config.auth = argv[++i]; - } else if (!strcmp(argv[i],"-u") && !lastarg) { - parseRedisUri(argv[++i]); - } else if (!strcmp(argv[i],"--raw")) { - config.output = OUTPUT_RAW; - } else if (!strcmp(argv[i],"--no-raw")) { - config.output = OUTPUT_STANDARD; - } else if (!strcmp(argv[i],"--csv")) { - config.output = OUTPUT_CSV; - } else if (!strcmp(argv[i],"--latency")) { - config.latency_mode = 1; - } else if (!strcmp(argv[i],"--latency-dist")) { - config.latency_dist_mode = 1; - } else if (!strcmp(argv[i],"--mono")) { - spectrum_palette = spectrum_palette_mono; - spectrum_palette_size = spectrum_palette_mono_size; - } else if (!strcmp(argv[i],"--latency-history")) { - config.latency_mode = 1; - config.latency_history = 1; - } else if (!strcmp(argv[i],"--lru-test") && !lastarg) { - config.lru_test_mode = 1; - config.lru_test_sample_size = strtoll(argv[++i],NULL,10); - } else if (!strcmp(argv[i],"--slave")) { - config.slave_mode = 1; - } else if (!strcmp(argv[i],"--stat")) { - config.stat_mode = 1; - } else if (!strcmp(argv[i],"--scan")) { - config.scan_mode = 1; - } else if (!strcmp(argv[i],"--pattern") && !lastarg) { - config.pattern = argv[++i]; - } else if (!strcmp(argv[i],"--intrinsic-latency") && !lastarg) { - config.intrinsic_latency_mode = 1; - config.intrinsic_latency_duration = atoi(argv[++i]); - } else if (!strcmp(argv[i],"--rdb") && !lastarg) { - config.getrdb_mode = 1; - config.rdb_filename = argv[++i]; - } else if (!strcmp(argv[i],"--pipe")) { - config.pipe_mode = 1; - } else if (!strcmp(argv[i],"--pipe-timeout") && !lastarg) { - config.pipe_timeout = atoi(argv[++i]); - } else if (!strcmp(argv[i],"--bigkeys")) { - config.bigkeys = 1; - } else if (!strcmp(argv[i],"--hotkeys")) { - config.hotkeys = 1; - } else if (!strcmp(argv[i],"--eval") && !lastarg) { - config.eval = argv[++i]; - } else if (!strcmp(argv[i],"--ldb")) { - config.eval_ldb = 1; - config.output = OUTPUT_RAW; - } else if (!strcmp(argv[i],"--ldb-sync-mode")) { - config.eval_ldb = 1; - config.eval_ldb_sync = 1; - config.output = OUTPUT_RAW; - } else if (!strcmp(argv[i],"-c")) { - config.cluster_mode = 1; - } else if (!strcmp(argv[i],"-d") && !lastarg) { - sdsfree(config.mb_delim); - config.mb_delim = sdsnew(argv[++i]); - } else if (!strcmp(argv[i],"-v") || !strcmp(argv[i], "--version")) { - sds version = cliVersion(); - printf("redis-cli %s\n", version); - sdsfree(version); - exit(0); - } else { - if (argv[i][0] == '-') { - fprintf(stderr, - "Unrecognized option or bad number of args for: '%s'\n", - argv[i]); - exit(1); - } else { - /* Likely the command name, stop here. */ - break; - } - } - } - - /* --ldb requires --eval. */ - if (config.eval_ldb && config.eval == NULL) { - fprintf(stderr,"Options --ldb and --ldb-sync-mode require --eval.\n"); - fprintf(stderr,"Try %s --help for more information.\n", argv[0]); - exit(1); - } - return i; -} - -static sds readArgFromStdin(void) { - char buf[1024]; - sds arg = sdsempty(); - - while(1) { - int nread = read(fileno(stdin),buf,1024); - - if (nread == 0) break; - else if (nread == -1) { - perror("Reading from standard input"); - exit(1); - } - arg = sdscatlen(arg,buf,nread); - } - return arg; -} - -static void usage(void) { - sds version = cliVersion(); - fprintf(stderr, -"redis-cli %s\n" -"\n" -"Usage: redis-cli [OPTIONS] [cmd [arg [arg ...]]]\n" -" -h Server hostname (default: 127.0.0.1).\n" -" -p Server port (default: 6379).\n" -" -s Server socket (overrides hostname and port).\n" -" -a Password to use when connecting to the server.\n" -" -u Server URI.\n" -" -r Execute specified command N times.\n" -" -i When -r is used, waits seconds per command.\n" -" It is possible to specify sub-second times like -i 0.1.\n" -" -n Database number.\n" -" -x Read last argument from STDIN.\n" -" -d Multi-bulk delimiter in for raw formatting (default: \\n).\n" -" -c Enable cluster mode (follow -ASK and -MOVED redirections).\n" -" --raw Use raw formatting for replies (default when STDOUT is\n" -" not a tty).\n" -" --no-raw Force formatted output even when STDOUT is not a tty.\n" -" --csv Output in CSV format.\n" -" --stat Print rolling stats about server: mem, clients, ...\n" -" --latency Enter a special mode continuously sampling latency.\n" -" If you use this mode in an interactive session it runs\n" -" forever displaying real-time stats. Otherwise if --raw or\n" -" --csv is specified, or if you redirect the output to a non\n" -" TTY, it samples the latency for 1 second (you can use\n" -" -i to change the interval), then produces a single output\n" -" and exits.\n" -" --latency-history Like --latency but tracking latency changes over time.\n" -" Default time interval is 15 sec. Change it using -i.\n" -" --latency-dist Shows latency as a spectrum, requires xterm 256 colors.\n" -" Default time interval is 1 sec. Change it using -i.\n" -" --lru-test Simulate a cache workload with an 80-20 distribution.\n" -" --slave Simulate a slave showing commands received from the master.\n" -" --rdb Transfer an RDB dump from remote server to local file.\n" -" --pipe Transfer raw Redis protocol from stdin to server.\n" -" --pipe-timeout In --pipe mode, abort with error if after sending all data.\n" -" no reply is received within seconds.\n" -" Default timeout: %d. Use 0 to wait forever.\n" -" --bigkeys Sample Redis keys looking for big keys.\n" -" --hotkeys Sample Redis keys looking for hot keys.\n" -" only works when maxmemory-policy is *lfu.\n" -" --scan List all keys using the SCAN command.\n" -" --pattern Useful with --scan to specify a SCAN pattern.\n" -" --intrinsic-latency Run a test to measure intrinsic system latency.\n" -" The test will run for the specified amount of seconds.\n" -" --eval Send an EVAL command using the Lua script at .\n" -" --ldb Used with --eval enable the Redis Lua debugger.\n" -" --ldb-sync-mode Like --ldb but uses the synchronous Lua debugger, in\n" -" this mode the server is blocked and script changes are\n" -" are not rolled back from the server memory.\n" -" --help Output this help and exit.\n" -" --version Output version and exit.\n" -"\n" -"Examples:\n" -" cat /etc/passwd | redis-cli -x set mypasswd\n" -" redis-cli get mypasswd\n" -" redis-cli -r 100 lpush mylist x\n" -" redis-cli -r 100 -i 1 info | grep used_memory_human:\n" -" redis-cli --eval myscript.lua key1 key2 , arg1 arg2 arg3\n" -" redis-cli --scan --pattern '*:12345*'\n" -"\n" -" (Note: when using --eval the comma separates KEYS[] from ARGV[] items)\n" -"\n" -"When no command is given, redis-cli starts in interactive mode.\n" -"Type \"help\" in interactive mode for information on available commands\n" -"and settings.\n" -"\n", - version, REDIS_CLI_DEFAULT_PIPE_TIMEOUT); - sdsfree(version); - exit(1); -} - -/* Turn the plain C strings into Sds strings */ -static char **convertToSds(int count, char** args) { - int j; - char **sds = zmalloc(sizeof(char*)*count); - - for(j = 0; j < count; j++) - sds[j] = sdsnew(args[j]); - - return sds; -} - -static int issueCommandRepeat(int argc, char **argv, long repeat) { - while (1) { - config.cluster_reissue_command = 0; - if (cliSendCommand(argc,argv,repeat) != REDIS_OK) { - cliConnect(1); - - /* If we still cannot send the command print error. - * We'll try to reconnect the next time. */ - if (cliSendCommand(argc,argv,repeat) != REDIS_OK) { - cliPrintContextError(); - return REDIS_ERR; - } - } - /* Issue the command again if we got redirected in cluster mode */ - if (config.cluster_mode && config.cluster_reissue_command) { - cliConnect(1); - } else { - break; - } - } - return REDIS_OK; -} - -static int issueCommand(int argc, char **argv) { - return issueCommandRepeat(argc, argv, config.repeat); -} - -/* Split the user provided command into multiple SDS arguments. - * This function normally uses sdssplitargs() from sds.c which is able - * to understand "quoted strings", escapes and so forth. However when - * we are in Lua debugging mode and the "eval" command is used, we want - * the remaining Lua script (after "e " or "eval ") to be passed verbatim - * as a single big argument. */ -static sds *cliSplitArgs(char *line, int *argc) { - if (config.eval_ldb && (strstr(line,"eval ") == line || - strstr(line,"e ") == line)) - { - sds *argv = sds_malloc(sizeof(sds)*2); - *argc = 2; - int len = strlen(line); - int elen = line[1] == ' ' ? 2 : 5; /* "e " or "eval "? */ - argv[0] = sdsnewlen(line,elen-1); - argv[1] = sdsnewlen(line+elen,len-elen); - return argv; - } else { - return sdssplitargs(line,argc); - } -} - -/* Set the CLI preferences. This function is invoked when an interactive - * ":command" is called, or when reading ~/.redisclirc file, in order to - * set user preferences. */ -void cliSetPreferences(char **argv, int argc, int interactive) { - if (!strcasecmp(argv[0],":set") && argc >= 2) { - if (!strcasecmp(argv[1],"hints")) pref.hints = 1; - else if (!strcasecmp(argv[1],"nohints")) pref.hints = 0; - else { - printf("%sunknown redis-cli preference '%s'\n", - interactive ? "" : ".redisclirc: ", - argv[1]); - } - } else { - printf("%sunknown redis-cli internal command '%s'\n", - interactive ? "" : ".redisclirc: ", - argv[0]); - } -} - -/* Load the ~/.redisclirc file if any. */ -void cliLoadPreferences(void) { - sds rcfile = getDotfilePath(REDIS_CLI_RCFILE_ENV,REDIS_CLI_RCFILE_DEFAULT); - if (rcfile == NULL) return; - FILE *fp = fopen(rcfile,"r"); - char buf[1024]; - - if (fp) { - while(fgets(buf,sizeof(buf),fp) != NULL) { - sds *argv; - int argc; - - argv = sdssplitargs(buf,&argc); - if (argc > 0) cliSetPreferences(argv,argc,0); - sdsfreesplitres(argv,argc); - } - fclose(fp); - } - sdsfree(rcfile); -} - -static void repl(void) { - sds historyfile = NULL; - int history = 0; - char *line; - int argc; - sds *argv; - - /* Initialize the help and, if possible, use the COMMAND command in order - * to retrieve missing entries. */ - cliInitHelp(); - cliIntegrateHelp(); - - config.interactive = 1; - linenoiseSetMultiLine(1); - linenoiseSetCompletionCallback(completionCallback); - linenoiseSetHintsCallback(hintsCallback); - linenoiseSetFreeHintsCallback(freeHintsCallback); - - /* Only use history and load the rc file when stdin is a tty. */ - if (isatty(fileno(stdin))) { - historyfile = getDotfilePath(REDIS_CLI_HISTFILE_ENV,REDIS_CLI_HISTFILE_DEFAULT); - //keep in-memory history always regardless if history file can be determined - history = 1; - if (historyfile != NULL) { - linenoiseHistoryLoad(historyfile); - } - cliLoadPreferences(); - } - - cliRefreshPrompt(); - while((line = linenoise(context ? config.prompt : "not connected> ")) != NULL) { - if (line[0] != '\0') { - long repeat = 1; - int skipargs = 0; - char *endptr = NULL; - - argv = cliSplitArgs(line,&argc); - - /* check if we have a repeat command option and - * need to skip the first arg */ - if (argv && argc > 0) { - errno = 0; - repeat = strtol(argv[0], &endptr, 10); - if (argc > 1 && *endptr == '\0') { - if (errno == ERANGE || errno == EINVAL || repeat <= 0) { - fputs("Invalid redis-cli repeat command option value.\n", stdout); - sdsfreesplitres(argv, argc); - linenoiseFree(line); - continue; - } - skipargs = 1; - } else { - repeat = 1; - } - } - - /* Won't save auth command in history file */ - if (!(argv && argc > 0 && !strcasecmp(argv[0+skipargs], "auth"))) { - if (history) linenoiseHistoryAdd(line); - if (historyfile) linenoiseHistorySave(historyfile); - } - - if (argv == NULL) { - printf("Invalid argument(s)\n"); - linenoiseFree(line); - continue; - } else if (argc > 0) { - if (strcasecmp(argv[0],"quit") == 0 || - strcasecmp(argv[0],"exit") == 0) - { - exit(0); - } else if (argv[0][0] == ':') { - cliSetPreferences(argv,argc,1); - sdsfreesplitres(argv,argc); - linenoiseFree(line); - continue; - } else if (strcasecmp(argv[0],"restart") == 0) { - if (config.eval) { - config.eval_ldb = 1; - config.output = OUTPUT_RAW; - return; /* Return to evalMode to restart the session. */ - } else { - printf("Use 'restart' only in Lua debugging mode."); - } - } else if (argc == 3 && !strcasecmp(argv[0],"connect")) { - sdsfree(config.hostip); - config.hostip = sdsnew(argv[1]); - config.hostport = atoi(argv[2]); - cliRefreshPrompt(); - cliConnect(1); - } else if (argc == 1 && !strcasecmp(argv[0],"clear")) { - linenoiseClearScreen(); - } else { - long long start_time = mstime(), elapsed; - - issueCommandRepeat(argc-skipargs, argv+skipargs, repeat); - - /* If our debugging session ended, show the EVAL final - * reply. */ - if (config.eval_ldb_end) { - config.eval_ldb_end = 0; - cliReadReply(0); - printf("\n(Lua debugging session ended%s)\n\n", - config.eval_ldb_sync ? "" : - " -- dataset changes rolled back"); - } - - elapsed = mstime()-start_time; - if (elapsed >= 500 && - config.output == OUTPUT_STANDARD) - { - printf("(%.2fs)\n",(double)elapsed/1000); - } - } - } - /* Free the argument vector */ - sdsfreesplitres(argv,argc); - } - /* linenoise() returns malloc-ed lines like readline() */ - linenoiseFree(line); - } - exit(0); -} - -static int noninteractive(int argc, char **argv) { - int retval = 0; - if (config.stdinarg) { - argv = zrealloc(argv, (argc+1)*sizeof(char*)); - argv[argc] = readArgFromStdin(); - retval = issueCommand(argc+1, argv); - } else { - retval = issueCommand(argc, argv); - } - return retval; -} - -/*------------------------------------------------------------------------------ - * Eval mode - *--------------------------------------------------------------------------- */ - -static int evalMode(int argc, char **argv) { - sds script = NULL; - FILE *fp; - char buf[1024]; - size_t nread; - char **argv2; - int j, got_comma, keys; - int retval = REDIS_OK; - - while(1) { - if (config.eval_ldb) { - printf( - "Lua debugging session started, please use:\n" - "quit -- End the session.\n" - "restart -- Restart the script in debug mode again.\n" - "help -- Show Lua script debugging commands.\n\n" - ); - } - - sdsfree(script); - script = sdsempty(); - got_comma = 0; - keys = 0; - - /* Load the script from the file, as an sds string. */ - fp = fopen(config.eval,"r"); - if (!fp) { - fprintf(stderr, - "Can't open file '%s': %s\n", config.eval, strerror(errno)); - exit(1); - } - while((nread = fread(buf,1,sizeof(buf),fp)) != 0) { - script = sdscatlen(script,buf,nread); - } - fclose(fp); - - /* If we are debugging a script, enable the Lua debugger. */ - if (config.eval_ldb) { - redisReply *reply = redisCommand(context, - config.eval_ldb_sync ? - "SCRIPT DEBUG sync": "SCRIPT DEBUG yes"); - if (reply) freeReplyObject(reply); - } - - /* Create our argument vector */ - argv2 = zmalloc(sizeof(sds)*(argc+3)); - argv2[0] = sdsnew("EVAL"); - argv2[1] = script; - for (j = 0; j < argc; j++) { - if (!got_comma && argv[j][0] == ',' && argv[j][1] == 0) { - got_comma = 1; - continue; - } - argv2[j+3-got_comma] = sdsnew(argv[j]); - if (!got_comma) keys++; - } - argv2[2] = sdscatprintf(sdsempty(),"%d",keys); - - /* Call it */ - int eval_ldb = config.eval_ldb; /* Save it, may be reverteed. */ - retval = issueCommand(argc+3-got_comma, argv2); - if (eval_ldb) { - if (!config.eval_ldb) { - /* If the debugging session ended immediately, there was an - * error compiling the script. Show it and don't enter - * the REPL at all. */ - printf("Eval debugging session can't start:\n"); - cliReadReply(0); - break; /* Return to the caller. */ - } else { - strncpy(config.prompt,"lua debugger> ",sizeof(config.prompt)); - repl(); - /* Restart the session if repl() returned. */ - cliConnect(1); - printf("\n"); - } - } else { - break; /* Return to the caller. */ - } - } - return retval; -} - -/*------------------------------------------------------------------------------ - * Latency and latency history modes - *--------------------------------------------------------------------------- */ - -static void latencyModePrint(long long min, long long max, double avg, long long count) { - if (config.output == OUTPUT_STANDARD) { - printf("min: %lld, max: %lld, avg: %.2f (%lld samples)", - min, max, avg, count); - fflush(stdout); - } else if (config.output == OUTPUT_CSV) { - printf("%lld,%lld,%.2f,%lld\n", min, max, avg, count); - } else if (config.output == OUTPUT_RAW) { - printf("%lld %lld %.2f %lld\n", min, max, avg, count); - } -} - -#define LATENCY_SAMPLE_RATE 10 /* milliseconds. */ -#define LATENCY_HISTORY_DEFAULT_INTERVAL 15000 /* milliseconds. */ -static void latencyMode(void) { - redisReply *reply; - long long start, latency, min = 0, max = 0, tot = 0, count = 0; - long long history_interval = - config.interval ? config.interval/1000 : - LATENCY_HISTORY_DEFAULT_INTERVAL; - double avg; - long long history_start = mstime(); - - /* Set a default for the interval in case of --latency option - * with --raw, --csv or when it is redirected to non tty. */ - if (config.interval == 0) { - config.interval = 1000; - } else { - config.interval /= 1000; /* We need to convert to milliseconds. */ - } - - if (!context) exit(1); - while(1) { - start = mstime(); - reply = reconnectingRedisCommand(context,"PING"); - if (reply == NULL) { - fprintf(stderr,"\nI/O error\n"); - exit(1); - } - latency = mstime()-start; - freeReplyObject(reply); - count++; - if (count == 1) { - min = max = tot = latency; - avg = (double) latency; - } else { - if (latency < min) min = latency; - if (latency > max) max = latency; - tot += latency; - avg = (double) tot/count; - } - - if (config.output == OUTPUT_STANDARD) { - printf("\x1b[0G\x1b[2K"); /* Clear the line. */ - latencyModePrint(min,max,avg,count); - } else { - if (config.latency_history) { - latencyModePrint(min,max,avg,count); - } else if (mstime()-history_start > config.interval) { - latencyModePrint(min,max,avg,count); - exit(0); - } - } - - if (config.latency_history && mstime()-history_start > history_interval) - { - printf(" -- %.2f seconds range\n", (float)(mstime()-history_start)/1000); - history_start = mstime(); - min = max = tot = count = 0; - } - usleep(LATENCY_SAMPLE_RATE * 1000); - } -} - -/*------------------------------------------------------------------------------ - * Latency distribution mode -- requires 256 colors xterm - *--------------------------------------------------------------------------- */ - -#define LATENCY_DIST_DEFAULT_INTERVAL 1000 /* milliseconds. */ - -/* Structure to store samples distribution. */ -struct distsamples { - long long max; /* Max latency to fit into this interval (usec). */ - long long count; /* Number of samples in this interval. */ - int character; /* Associated character in visualization. */ -}; - -/* Helper function for latencyDistMode(). Performs the spectrum visualization - * of the collected samples targeting an xterm 256 terminal. - * - * Takes an array of distsamples structures, ordered from smaller to bigger - * 'max' value. Last sample max must be 0, to mean that it olds all the - * samples greater than the previous one, and is also the stop sentinel. - * - * "tot' is the total number of samples in the different buckets, so it - * is the SUM(samples[i].conut) for i to 0 up to the max sample. - * - * As a side effect the function sets all the buckets count to 0. */ -void showLatencyDistSamples(struct distsamples *samples, long long tot) { - int j; - - /* We convert samples into a index inside the palette - * proportional to the percentage a given bucket represents. - * This way intensity of the different parts of the spectrum - * don't change relative to the number of requests, which avoids to - * pollute the visualization with non-latency related info. */ - printf("\033[38;5;0m"); /* Set foreground color to black. */ - for (j = 0; ; j++) { - int coloridx = - ceil((float) samples[j].count / tot * (spectrum_palette_size-1)); - int color = spectrum_palette[coloridx]; - printf("\033[48;5;%dm%c", (int)color, samples[j].character); - samples[j].count = 0; - if (samples[j].max == 0) break; /* Last sample. */ - } - printf("\033[0m\n"); - fflush(stdout); -} - -/* Show the legend: different buckets values and colors meaning, so - * that the spectrum is more easily readable. */ -void showLatencyDistLegend(void) { - int j; - - printf("---------------------------------------------\n"); - printf(". - * # .01 .125 .25 .5 milliseconds\n"); - printf("1,2,3,...,9 from 1 to 9 milliseconds\n"); - printf("A,B,C,D,E 10,20,30,40,50 milliseconds\n"); - printf("F,G,H,I,J .1,.2,.3,.4,.5 seconds\n"); - printf("K,L,M,N,O,P,Q,? 1,2,4,8,16,30,60,>60 seconds\n"); - printf("From 0 to 100%%: "); - for (j = 0; j < spectrum_palette_size; j++) { - printf("\033[48;5;%dm ", spectrum_palette[j]); - } - printf("\033[0m\n"); - printf("---------------------------------------------\n"); -} - -static void latencyDistMode(void) { - redisReply *reply; - long long start, latency, count = 0; - long long history_interval = - config.interval ? config.interval/1000 : - LATENCY_DIST_DEFAULT_INTERVAL; - long long history_start = ustime(); - int j, outputs = 0; - - struct distsamples samples[] = { - /* We use a mostly logarithmic scale, with certain linear intervals - * which are more interesting than others, like 1-10 milliseconds - * range. */ - {10,0,'.'}, /* 0.01 ms */ - {125,0,'-'}, /* 0.125 ms */ - {250,0,'*'}, /* 0.25 ms */ - {500,0,'#'}, /* 0.5 ms */ - {1000,0,'1'}, /* 1 ms */ - {2000,0,'2'}, /* 2 ms */ - {3000,0,'3'}, /* 3 ms */ - {4000,0,'4'}, /* 4 ms */ - {5000,0,'5'}, /* 5 ms */ - {6000,0,'6'}, /* 6 ms */ - {7000,0,'7'}, /* 7 ms */ - {8000,0,'8'}, /* 8 ms */ - {9000,0,'9'}, /* 9 ms */ - {10000,0,'A'}, /* 10 ms */ - {20000,0,'B'}, /* 20 ms */ - {30000,0,'C'}, /* 30 ms */ - {40000,0,'D'}, /* 40 ms */ - {50000,0,'E'}, /* 50 ms */ - {100000,0,'F'}, /* 0.1 s */ - {200000,0,'G'}, /* 0.2 s */ - {300000,0,'H'}, /* 0.3 s */ - {400000,0,'I'}, /* 0.4 s */ - {500000,0,'J'}, /* 0.5 s */ - {1000000,0,'K'}, /* 1 s */ - {2000000,0,'L'}, /* 2 s */ - {4000000,0,'M'}, /* 4 s */ - {8000000,0,'N'}, /* 8 s */ - {16000000,0,'O'}, /* 16 s */ - {30000000,0,'P'}, /* 30 s */ - {60000000,0,'Q'}, /* 1 minute */ - {0,0,'?'}, /* > 1 minute */ - }; - - if (!context) exit(1); - while(1) { - start = ustime(); - reply = reconnectingRedisCommand(context,"PING"); - if (reply == NULL) { - fprintf(stderr,"\nI/O error\n"); - exit(1); - } - latency = ustime()-start; - freeReplyObject(reply); - count++; - - /* Populate the relevant bucket. */ - for (j = 0; ; j++) { - if (samples[j].max == 0 || latency <= samples[j].max) { - samples[j].count++; - break; - } - } - - /* From time to time show the spectrum. */ - if (count && (ustime()-history_start)/1000 > history_interval) { - if ((outputs++ % 20) == 0) - showLatencyDistLegend(); - showLatencyDistSamples(samples,count); - history_start = ustime(); - count = 0; - } - usleep(LATENCY_SAMPLE_RATE * 1000); - } -} - -/*------------------------------------------------------------------------------ - * Slave mode - *--------------------------------------------------------------------------- */ - -/* Sends SYNC and reads the number of bytes in the payload. Used both by - * slaveMode() and getRDB(). */ -unsigned long long sendSync(int fd) { - /* To start we need to send the SYNC command and return the payload. - * The hiredis client lib does not understand this part of the protocol - * and we don't want to mess with its buffers, so everything is performed - * using direct low-level I/O. */ - char buf[4096], *p; - ssize_t nread; - - /* Send the SYNC command. */ - if (write(fd,"SYNC\r\n",6) != 6) { - fprintf(stderr,"Error writing to master\n"); - exit(1); - } - - /* Read $\r\n, making sure to read just up to "\n" */ - p = buf; - while(1) { - nread = read(fd,p,1); - if (nread <= 0) { - fprintf(stderr,"Error reading bulk length while SYNCing\n"); - exit(1); - } - if (*p == '\n' && p != buf) break; - if (*p != '\n') p++; - } - *p = '\0'; - if (buf[0] == '-') { - printf("SYNC with master failed: %s\n", buf); - exit(1); - } - return strtoull(buf+1,NULL,10); -} - -static void slaveMode(void) { - int fd = context->fd; - unsigned long long payload = sendSync(fd); - char buf[1024]; - int original_output = config.output; - - fprintf(stderr,"SYNC with master, discarding %llu " - "bytes of bulk transfer...\n", payload); - - /* Discard the payload. */ - while(payload) { - ssize_t nread; - - nread = read(fd,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); - if (nread <= 0) { - fprintf(stderr,"Error reading RDB payload while SYNCing\n"); - exit(1); - } - payload -= nread; - } - fprintf(stderr,"SYNC done. Logging commands from master.\n"); - - /* Now we can use hiredis to read the incoming protocol. */ - config.output = OUTPUT_CSV; - while (cliReadReply(0) == REDIS_OK); - config.output = original_output; -} - -/*------------------------------------------------------------------------------ - * RDB transfer mode - *--------------------------------------------------------------------------- */ - -/* This function implements --rdb, so it uses the replication protocol in order - * to fetch the RDB file from a remote server. */ -static void getRDB(void) { - int s = context->fd; - int fd; - unsigned long long payload = sendSync(s); - char buf[4096]; - - fprintf(stderr,"SYNC sent to master, writing %llu bytes to '%s'\n", - payload, config.rdb_filename); - - /* Write to file. */ - if (!strcmp(config.rdb_filename,"-")) { - fd = STDOUT_FILENO; - } else { - fd = open(config.rdb_filename, O_CREAT|O_WRONLY, 0644); - if (fd == -1) { - fprintf(stderr, "Error opening '%s': %s\n", config.rdb_filename, - strerror(errno)); - exit(1); - } - } - - while(payload) { - ssize_t nread, nwritten; - - nread = read(s,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); - if (nread <= 0) { - fprintf(stderr,"I/O Error reading RDB payload from socket\n"); - exit(1); - } - nwritten = write(fd, buf, nread); - if (nwritten != nread) { - fprintf(stderr,"Error writing data to file: %s\n", - strerror(errno)); - exit(1); - } - payload -= nread; - } - close(s); /* Close the file descriptor ASAP as fsync() may take time. */ - fsync(fd); - fprintf(stderr,"Transfer finished with success.\n"); - exit(0); -} - -/*------------------------------------------------------------------------------ - * Bulk import (pipe) mode - *--------------------------------------------------------------------------- */ - -#define PIPEMODE_WRITE_LOOP_MAX_BYTES (128*1024) -static void pipeMode(void) { - int fd = context->fd; - long long errors = 0, replies = 0, obuf_len = 0, obuf_pos = 0; - char ibuf[1024*16], obuf[1024*16]; /* Input and output buffers */ - char aneterr[ANET_ERR_LEN]; - redisReader *reader = redisReaderCreate(); - redisReply *reply; - int eof = 0; /* True once we consumed all the standard input. */ - int done = 0; - char magic[20]; /* Special reply we recognize. */ - time_t last_read_time = time(NULL); - - srand(time(NULL)); - - /* Use non blocking I/O. */ - if (anetNonBlock(aneterr,fd) == ANET_ERR) { - fprintf(stderr, "Can't set the socket in non blocking mode: %s\n", - aneterr); - exit(1); - } - - /* Transfer raw protocol and read replies from the server at the same - * time. */ - while(!done) { - int mask = AE_READABLE; - - if (!eof || obuf_len != 0) mask |= AE_WRITABLE; - mask = aeWait(fd,mask,1000); - - /* Handle the readable state: we can read replies from the server. */ - if (mask & AE_READABLE) { - ssize_t nread; - - /* Read from socket and feed the hiredis reader. */ - do { - nread = read(fd,ibuf,sizeof(ibuf)); - if (nread == -1 && errno != EAGAIN && errno != EINTR) { - fprintf(stderr, "Error reading from the server: %s\n", - strerror(errno)); - exit(1); - } - if (nread > 0) { - redisReaderFeed(reader,ibuf,nread); - last_read_time = time(NULL); - } - } while(nread > 0); - - /* Consume replies. */ - do { - if (redisReaderGetReply(reader,(void**)&reply) == REDIS_ERR) { - fprintf(stderr, "Error reading replies from server\n"); - exit(1); - } - if (reply) { - if (reply->type == REDIS_REPLY_ERROR) { - fprintf(stderr,"%s\n", reply->str); - errors++; - } else if (eof && reply->type == REDIS_REPLY_STRING && - reply->len == 20) { - /* Check if this is the reply to our final ECHO - * command. If so everything was received - * from the server. */ - if (memcmp(reply->str,magic,20) == 0) { - printf("Last reply received from server.\n"); - done = 1; - replies--; - } - } - replies++; - freeReplyObject(reply); - } - } while(reply); - } - - /* Handle the writable state: we can send protocol to the server. */ - if (mask & AE_WRITABLE) { - ssize_t loop_nwritten = 0; - - while(1) { - /* Transfer current buffer to server. */ - if (obuf_len != 0) { - ssize_t nwritten = write(fd,obuf+obuf_pos,obuf_len); - - if (nwritten == -1) { - if (errno != EAGAIN && errno != EINTR) { - fprintf(stderr, "Error writing to the server: %s\n", - strerror(errno)); - exit(1); - } else { - nwritten = 0; - } - } - obuf_len -= nwritten; - obuf_pos += nwritten; - loop_nwritten += nwritten; - if (obuf_len != 0) break; /* Can't accept more data. */ - } - /* If buffer is empty, load from stdin. */ - if (obuf_len == 0 && !eof) { - ssize_t nread = read(STDIN_FILENO,obuf,sizeof(obuf)); - - if (nread == 0) { - /* The ECHO sequence starts with a "\r\n" so that if there - * is garbage in the protocol we read from stdin, the ECHO - * will likely still be properly formatted. - * CRLF is ignored by Redis, so it has no effects. */ - char echo[] = - "\r\n*2\r\n$4\r\nECHO\r\n$20\r\n01234567890123456789\r\n"; - int j; - - eof = 1; - /* Everything transferred, so we queue a special - * ECHO command that we can match in the replies - * to make sure everything was read from the server. */ - for (j = 0; j < 20; j++) - magic[j] = rand() & 0xff; - memcpy(echo+21,magic,20); - memcpy(obuf,echo,sizeof(echo)-1); - obuf_len = sizeof(echo)-1; - obuf_pos = 0; - printf("All data transferred. Waiting for the last reply...\n"); - } else if (nread == -1) { - fprintf(stderr, "Error reading from stdin: %s\n", - strerror(errno)); - exit(1); - } else { - obuf_len = nread; - obuf_pos = 0; - } - } - if ((obuf_len == 0 && eof) || - loop_nwritten > PIPEMODE_WRITE_LOOP_MAX_BYTES) break; - } - } - - /* Handle timeout, that is, we reached EOF, and we are not getting - * replies from the server for a few seconds, nor the final ECHO is - * received. */ - if (eof && config.pipe_timeout > 0 && - time(NULL)-last_read_time > config.pipe_timeout) - { - fprintf(stderr,"No replies for %d seconds: exiting.\n", - config.pipe_timeout); - errors++; - break; - } - } - redisReaderFree(reader); - printf("errors: %lld, replies: %lld\n", errors, replies); - if (errors) - exit(1); - else - exit(0); -} - -/*------------------------------------------------------------------------------ - * Find big keys - *--------------------------------------------------------------------------- */ - -#define TYPE_STRING 0 -#define TYPE_LIST 1 -#define TYPE_SET 2 -#define TYPE_HASH 3 -#define TYPE_ZSET 4 -#define TYPE_STREAM 5 -#define TYPE_NONE 6 -#define TYPE_COUNT 7 - -static redisReply *sendScan(unsigned long long *it) { - redisReply *reply = redisCommand(context, "SCAN %llu", *it); - - /* Handle any error conditions */ - if(reply == NULL) { - fprintf(stderr, "\nI/O error\n"); - exit(1); - } else if(reply->type == REDIS_REPLY_ERROR) { - fprintf(stderr, "SCAN error: %s\n", reply->str); - exit(1); - } else if(reply->type != REDIS_REPLY_ARRAY) { - fprintf(stderr, "Non ARRAY response from SCAN!\n"); - exit(1); - } else if(reply->elements != 2) { - fprintf(stderr, "Invalid element count from SCAN!\n"); - exit(1); - } - - /* Validate our types are correct */ - assert(reply->element[0]->type == REDIS_REPLY_STRING); - assert(reply->element[1]->type == REDIS_REPLY_ARRAY); - - /* Update iterator */ - *it = strtoull(reply->element[0]->str, NULL, 10); - - return reply; -} - -static int getDbSize(void) { - redisReply *reply; - int size; - - reply = redisCommand(context, "DBSIZE"); - - if(reply == NULL || reply->type != REDIS_REPLY_INTEGER) { - fprintf(stderr, "Couldn't determine DBSIZE!\n"); - exit(1); - } - - /* Grab the number of keys and free our reply */ - size = reply->integer; - freeReplyObject(reply); - - return size; -} - -static int toIntType(char *key, char *type) { - if(!strcmp(type, "string")) { - return TYPE_STRING; - } else if(!strcmp(type, "list")) { - return TYPE_LIST; - } else if(!strcmp(type, "set")) { - return TYPE_SET; - } else if(!strcmp(type, "hash")) { - return TYPE_HASH; - } else if(!strcmp(type, "zset")) { - return TYPE_ZSET; - } else if(!strcmp(type, "none")) { - return TYPE_NONE; - } else { - fprintf(stderr, "Unknown type '%s' for key '%s'\n", type, key); - exit(1); - } -} - -static void getKeyTypes(redisReply *keys, int *types) { - redisReply *reply; - unsigned int i; - - /* Pipeline TYPE commands */ - for(i=0;ielements;i++) { - redisAppendCommand(context, "TYPE %s", keys->element[i]->str); - } - - /* Retrieve types */ - for(i=0;ielements;i++) { - if(redisGetReply(context, (void**)&reply)!=REDIS_OK) { - fprintf(stderr, "Error getting type for key '%s' (%d: %s)\n", - keys->element[i]->str, context->err, context->errstr); - exit(1); - } else if(reply->type != REDIS_REPLY_STATUS) { - if(reply->type == REDIS_REPLY_ERROR) { - fprintf(stderr, "TYPE returned an error: %s\n", reply->str); - } else { - fprintf(stderr, - "Invalid reply type (%d) for TYPE on key '%s'!\n", - reply->type, keys->element[i]->str); - } - exit(1); - } - - types[i] = toIntType(keys->element[i]->str, reply->str); - freeReplyObject(reply); - } -} - -static void getKeySizes(redisReply *keys, int *types, - unsigned long long *sizes) -{ - redisReply *reply; - char *sizecmds[] = {"STRLEN","LLEN","SCARD","HLEN","ZCARD"}; - unsigned int i; - - /* Pipeline size commands */ - for(i=0;ielements;i++) { - /* Skip keys that were deleted */ - if(types[i]==TYPE_NONE) - continue; - - redisAppendCommand(context, "%s %s", sizecmds[types[i]], - keys->element[i]->str); - } - - /* Retreive sizes */ - for(i=0;ielements;i++) { - /* Skip keys that dissapeared between SCAN and TYPE */ - if(types[i] == TYPE_NONE) { - sizes[i] = 0; - continue; - } - - /* Retreive size */ - if(redisGetReply(context, (void**)&reply)!=REDIS_OK) { - fprintf(stderr, "Error getting size for key '%s' (%d: %s)\n", - keys->element[i]->str, context->err, context->errstr); - exit(1); - } else if(reply->type != REDIS_REPLY_INTEGER) { - /* Theoretically the key could have been removed and - * added as a different type between TYPE and SIZE */ - fprintf(stderr, - "Warning: %s on '%s' failed (may have changed type)\n", - sizecmds[types[i]], keys->element[i]->str); - sizes[i] = 0; - } else { - sizes[i] = reply->integer; - } - - freeReplyObject(reply); - } -} - -static void findBigKeys(void) { - unsigned long long biggest[TYPE_COUNT] = {0}, counts[TYPE_COUNT] = {0}, totalsize[TYPE_COUNT] = {0}; - unsigned long long sampled = 0, total_keys, totlen=0, *sizes=NULL, it=0; - sds maxkeys[TYPE_COUNT] = {0}; - char *typename[] = {"string","list","set","hash","zset","stream","none"}; - char *typeunit[] = {"bytes","items","members","fields","members","entries",""}; - redisReply *reply, *keys; - unsigned int arrsize=0, i; - int type, *types=NULL; - double pct; - - /* Total keys pre scanning */ - total_keys = getDbSize(); - - /* Status message */ - printf("\n# Scanning the entire keyspace to find biggest keys as well as\n"); - printf("# average sizes per key type. You can use -i 0.1 to sleep 0.1 sec\n"); - printf("# per 100 SCAN commands (not usually needed).\n\n"); - - /* New up sds strings to keep track of overall biggest per type */ - for(i=0;ielement[1]; - - /* Reallocate our type and size array if we need to */ - if(keys->elements > arrsize) { - types = zrealloc(types, sizeof(int)*keys->elements); - sizes = zrealloc(sizes, sizeof(unsigned long long)*keys->elements); - - if(!types || !sizes) { - fprintf(stderr, "Failed to allocate storage for keys!\n"); - exit(1); - } - - arrsize = keys->elements; - } - - /* Retreive types and then sizes */ - getKeyTypes(keys, types); - getKeySizes(keys, types, sizes); - - /* Now update our stats */ - for(i=0;ielements;i++) { - if((type = types[i]) == TYPE_NONE) - continue; - - totalsize[type] += sizes[i]; - counts[type]++; - totlen += keys->element[i]->len; - sampled++; - - if(biggest[type]element[i]->str, sizes[i], - typeunit[type]); - - /* Keep track of biggest key name for this type */ - maxkeys[type] = sdscpy(maxkeys[type], keys->element[i]->str); - if(!maxkeys[type]) { - fprintf(stderr, "Failed to allocate memory for key!\n"); - exit(1); - } - - /* Keep track of the biggest size for this type */ - biggest[type] = sizes[i]; - } - - /* Update overall progress */ - if(sampled % 1000000 == 0) { - printf("[%05.2f%%] Sampled %llu keys so far\n", pct, sampled); - } - } - - /* Sleep if we've been directed to do so */ - if(sampled && (sampled %100) == 0 && config.interval) { - usleep(config.interval); - } - - freeReplyObject(reply); - } while(it != 0); - - if(types) zfree(types); - if(sizes) zfree(sizes); - - /* We're done */ - printf("\n-------- summary -------\n\n"); - - printf("Sampled %llu keys in the keyspace!\n", sampled); - printf("Total key length in bytes is %llu (avg len %.2f)\n\n", - totlen, totlen ? (double)totlen/sampled : 0); - - /* Output the biggest keys we found, for types we did find */ - for(i=0;i0) { - printf("Biggest %6s found '%s' has %llu %s\n", typename[i], maxkeys[i], - biggest[i], typeunit[i]); - } - } - - printf("\n"); - - for(i=0;ielements;i++) { - redisAppendCommand(context, "OBJECT freq %s", keys->element[i]->str); - } - - /* Retrieve freqs */ - for(i=0;ielements;i++) { - if(redisGetReply(context, (void**)&reply)!=REDIS_OK) { - fprintf(stderr, "Error getting freq for key '%s' (%d: %s)\n", - keys->element[i]->str, context->err, context->errstr); - exit(1); - } else if(reply->type != REDIS_REPLY_INTEGER) { - if(reply->type == REDIS_REPLY_ERROR) { - fprintf(stderr, "Error: %s\n", reply->str); - exit(1); - } else { - fprintf(stderr, "Warning: OBJECT freq on '%s' failed (may have been deleted)\n", keys->element[i]->str); - freqs[i] = 0; - } - } else { - freqs[i] = reply->integer; - } - freeReplyObject(reply); - } -} - -#define HOTKEYS_SAMPLE 16 -static void findHotKeys(void) { - redisReply *keys, *reply; - unsigned long long counters[HOTKEYS_SAMPLE] = {0}; - sds hotkeys[HOTKEYS_SAMPLE] = {NULL}; - unsigned long long sampled = 0, total_keys, *freqs = NULL, it = 0; - unsigned int arrsize = 0, i, k; - double pct; - - /* Total keys pre scanning */ - total_keys = getDbSize(); - - /* Status message */ - printf("\n# Scanning the entire keyspace to find hot keys as well as\n"); - printf("# average sizes per key type. You can use -i 0.1 to sleep 0.1 sec\n"); - printf("# per 100 SCAN commands (not usually needed).\n\n"); - - /* SCAN loop */ - do { - /* Calculate approximate percentage completion */ - pct = 100 * (double)sampled/total_keys; - - /* Grab some keys and point to the keys array */ - reply = sendScan(&it); - keys = reply->element[1]; - - /* Reallocate our freqs array if we need to */ - if(keys->elements > arrsize) { - freqs = zrealloc(freqs, sizeof(unsigned long long)*keys->elements); - - if(!freqs) { - fprintf(stderr, "Failed to allocate storage for keys!\n"); - exit(1); - } - - arrsize = keys->elements; - } - - getKeyFreqs(keys, freqs); - - /* Now update our stats */ - for(i=0;ielements;i++) { - sampled++; - /* Update overall progress */ - if(sampled % 1000000 == 0) { - printf("[%05.2f%%] Sampled %llu keys so far\n", pct, sampled); - } - - /* Use eviction pool here */ - k = 0; - while (k < HOTKEYS_SAMPLE && freqs[i] > counters[k]) k++; - if (k == 0) continue; - k--; - if (k == 0 || counters[k] == 0) { - sdsfree(hotkeys[k]); - } else { - sdsfree(hotkeys[0]); - memmove(counters,counters+1,sizeof(counters[0])*k); - memmove(hotkeys,hotkeys+1,sizeof(hotkeys[0])*k); - } - counters[k] = freqs[i]; - hotkeys[k] = sdsnew(keys->element[i]->str); - printf( - "[%05.2f%%] Hot key '%s' found so far with counter %llu\n", - pct, keys->element[i]->str, freqs[i]); - } - - /* Sleep if we've been directed to do so */ - if(sampled && (sampled %100) == 0 && config.interval) { - usleep(config.interval); - } - - freeReplyObject(reply); - } while(it != 0); - - if (freqs) zfree(freqs); - - /* We're done */ - printf("\n-------- summary -------\n\n"); - - printf("Sampled %llu keys in the keyspace!\n", sampled); - - for (i=1; i<= HOTKEYS_SAMPLE; i++) { - k = HOTKEYS_SAMPLE - i; - if(counters[k]>0) { - printf("hot key found with counter: %llu\tkeyname: %s\n", counters[k], hotkeys[k]); - sdsfree(hotkeys[k]); - } - } - - exit(0); -} - -/*------------------------------------------------------------------------------ - * Stats mode - *--------------------------------------------------------------------------- */ - -/* Return the specified INFO field from the INFO command output "info". - * A new buffer is allocated for the result, that needs to be free'd. - * If the field is not found NULL is returned. */ -static char *getInfoField(char *info, char *field) { - char *p = strstr(info,field); - char *n1, *n2; - char *result; - - if (!p) return NULL; - p += strlen(field)+1; - n1 = strchr(p,'\r'); - n2 = strchr(p,','); - if (n2 && n2 < n1) n1 = n2; - result = zmalloc(sizeof(char)*(n1-p)+1); - memcpy(result,p,(n1-p)); - result[n1-p] = '\0'; - return result; -} - -/* Like the above function but automatically convert the result into - * a long. On error (missing field) LONG_MIN is returned. */ -static long getLongInfoField(char *info, char *field) { - char *value = getInfoField(info,field); - long l; - - if (!value) return LONG_MIN; - l = strtol(value,NULL,10); - zfree(value); - return l; -} - -/* Convert number of bytes into a human readable string of the form: - * 100B, 2G, 100M, 4K, and so forth. */ -void bytesToHuman(char *s, long long n) { - double d; - - if (n < 0) { - *s = '-'; - s++; - n = -n; - } - if (n < 1024) { - /* Bytes */ - sprintf(s,"%lldB",n); - return; - } else if (n < (1024*1024)) { - d = (double)n/(1024); - sprintf(s,"%.2fK",d); - } else if (n < (1024LL*1024*1024)) { - d = (double)n/(1024*1024); - sprintf(s,"%.2fM",d); - } else if (n < (1024LL*1024*1024*1024)) { - d = (double)n/(1024LL*1024*1024); - sprintf(s,"%.2fG",d); - } -} - -static void statMode(void) { - redisReply *reply; - long aux, requests = 0; - int i = 0; - - while(1) { - char buf[64]; - int j; - - reply = reconnectingRedisCommand(context,"INFO"); - if (reply->type == REDIS_REPLY_ERROR) { - printf("ERROR: %s\n", reply->str); - exit(1); - } - - if ((i++ % 20) == 0) { - printf( -"------- data ------ --------------------- load -------------------- - child -\n" -"keys mem clients blocked requests connections \n"); - } - - /* Keys */ - aux = 0; - for (j = 0; j < 20; j++) { - long k; - - sprintf(buf,"db%d:keys",j); - k = getLongInfoField(reply->str,buf); - if (k == LONG_MIN) continue; - aux += k; - } - sprintf(buf,"%ld",aux); - printf("%-11s",buf); - - /* Used memory */ - aux = getLongInfoField(reply->str,"used_memory"); - bytesToHuman(buf,aux); - printf("%-8s",buf); - - /* Clients */ - aux = getLongInfoField(reply->str,"connected_clients"); - sprintf(buf,"%ld",aux); - printf(" %-8s",buf); - - /* Blocked (BLPOPPING) Clients */ - aux = getLongInfoField(reply->str,"blocked_clients"); - sprintf(buf,"%ld",aux); - printf("%-8s",buf); - - /* Requests */ - aux = getLongInfoField(reply->str,"total_commands_processed"); - sprintf(buf,"%ld (+%ld)",aux,requests == 0 ? 0 : aux-requests); - printf("%-19s",buf); - requests = aux; - - /* Connections */ - aux = getLongInfoField(reply->str,"total_connections_received"); - sprintf(buf,"%ld",aux); - printf(" %-12s",buf); - - /* Children */ - aux = getLongInfoField(reply->str,"bgsave_in_progress"); - aux |= getLongInfoField(reply->str,"aof_rewrite_in_progress") << 1; - aux |= getLongInfoField(reply->str,"loading") << 2; - switch(aux) { - case 0: break; - case 1: - printf("SAVE"); - break; - case 2: - printf("AOF"); - break; - case 3: - printf("SAVE+AOF"); - break; - case 4: - printf("LOAD"); - break; - } - - printf("\n"); - freeReplyObject(reply); - usleep(config.interval); - } -} - -/*------------------------------------------------------------------------------ - * Scan mode - *--------------------------------------------------------------------------- */ - -static void scanMode(void) { - redisReply *reply; - unsigned long long cur = 0; - - do { - if (config.pattern) - reply = redisCommand(context,"SCAN %llu MATCH %s", - cur,config.pattern); - else - reply = redisCommand(context,"SCAN %llu",cur); - if (reply == NULL) { - printf("I/O error\n"); - exit(1); - } else if (reply->type == REDIS_REPLY_ERROR) { - printf("ERROR: %s\n", reply->str); - exit(1); - } else { - unsigned int j; - - cur = strtoull(reply->element[0]->str,NULL,10); - for (j = 0; j < reply->element[1]->elements; j++) - printf("%s\n", reply->element[1]->element[j]->str); - } - freeReplyObject(reply); - } while(cur != 0); - - exit(0); -} - -/*------------------------------------------------------------------------------ - * LRU test mode - *--------------------------------------------------------------------------- */ - -/* Return an integer from min to max (both inclusive) using a power-law - * distribution, depending on the value of alpha: the greater the alpha - * the more bias towards lower values. - * - * With alpha = 6.2 the output follows the 80-20 rule where 20% of - * the returned numbers will account for 80% of the frequency. */ -long long powerLawRand(long long min, long long max, double alpha) { - double pl, r; - - max += 1; - r = ((double)rand()) / RAND_MAX; - pl = pow( - ((pow(max,alpha+1) - pow(min,alpha+1))*r + pow(min,alpha+1)), - (1.0/(alpha+1))); - return (max-1-(long long)pl)+min; -} - -/* Generates a key name among a set of lru_test_sample_size keys, using - * an 80-20 distribution. */ -void LRUTestGenKey(char *buf, size_t buflen) { - snprintf(buf, buflen, "lru:%lld", - powerLawRand(1, config.lru_test_sample_size, 6.2)); -} - -#define LRU_CYCLE_PERIOD 1000 /* 1000 milliseconds. */ -#define LRU_CYCLE_PIPELINE_SIZE 250 -static void LRUTestMode(void) { - redisReply *reply; - char key[128]; - long long start_cycle; - int j; - - srand(time(NULL)^getpid()); - while(1) { - /* Perform cycles of 1 second with 50% writes and 50% reads. - * We use pipelining batching writes / reads N times per cycle in order - * to fill the target instance easily. */ - start_cycle = mstime(); - long long hits = 0, misses = 0; - while(mstime() - start_cycle < 1000) { - /* Write cycle. */ - for (j = 0; j < LRU_CYCLE_PIPELINE_SIZE; j++) { - char val[6]; - val[5] = '\0'; - for (int i = 0; i < 5; i++) val[i] = 'A'+rand()%('z'-'A'); - LRUTestGenKey(key,sizeof(key)); - redisAppendCommand(context, "SET %s %s",key,val); - } - for (j = 0; j < LRU_CYCLE_PIPELINE_SIZE; j++) - redisGetReply(context, (void**)&reply); - - /* Read cycle. */ - for (j = 0; j < LRU_CYCLE_PIPELINE_SIZE; j++) { - LRUTestGenKey(key,sizeof(key)); - redisAppendCommand(context, "GET %s",key); - } - for (j = 0; j < LRU_CYCLE_PIPELINE_SIZE; j++) { - if (redisGetReply(context, (void**)&reply) == REDIS_OK) { - switch(reply->type) { - case REDIS_REPLY_ERROR: - printf("%s\n", reply->str); - break; - case REDIS_REPLY_NIL: - misses++; - break; - default: - hits++; - break; - } - } - } - - if (context->err) { - fprintf(stderr,"I/O error during LRU test\n"); - exit(1); - } - } - /* Print stats. */ - printf( - "%lld Gets/sec | Hits: %lld (%.2f%%) | Misses: %lld (%.2f%%)\n", - hits+misses, - hits, (double)hits/(hits+misses)*100, - misses, (double)misses/(hits+misses)*100); - } - exit(0); -} - -/*------------------------------------------------------------------------------ - * Intrisic latency mode. - * - * Measure max latency of a running process that does not result from - * syscalls. Basically this software should provide an hint about how much - * time the kernel leaves the process without a chance to run. - *--------------------------------------------------------------------------- */ - -/* This is just some computation the compiler can't optimize out. - * Should run in less than 100-200 microseconds even using very - * slow hardware. Runs in less than 10 microseconds in modern HW. */ -unsigned long compute_something_fast(void) { - unsigned char s[256], i, j, t; - int count = 1000, k; - unsigned long output = 0; - - for (k = 0; k < 256; k++) s[k] = k; - - i = 0; - j = 0; - while(count--) { - i++; - j = j + s[i]; - t = s[i]; - s[i] = s[j]; - s[j] = t; - output += s[(s[i]+s[j])&255]; - } - return output; -} - -static void intrinsicLatencyModeStop(int s) { - UNUSED(s); - force_cancel_loop = 1; -} - -static void intrinsicLatencyMode(void) { - long long test_end, run_time, max_latency = 0, runs = 0; - - run_time = config.intrinsic_latency_duration*1000000; - test_end = ustime() + run_time; - signal(SIGINT, intrinsicLatencyModeStop); - - while(1) { - long long start, end, latency; - - start = ustime(); - compute_something_fast(); - end = ustime(); - latency = end-start; - runs++; - if (latency <= 0) continue; - - /* Reporting */ - if (latency > max_latency) { - max_latency = latency; - printf("Max latency so far: %lld microseconds.\n", max_latency); - } - - double avg_us = (double)run_time/runs; - double avg_ns = avg_us * 1e3; - if (force_cancel_loop || end > test_end) { - printf("\n%lld total runs " - "(avg latency: " - "%.4f microseconds / %.2f nanoseconds per run).\n", - runs, avg_us, avg_ns); - printf("Worst run took %.0fx longer than the average latency.\n", - max_latency / avg_us); - exit(0); - } - } -} - -/*------------------------------------------------------------------------------ - * Program main() - *--------------------------------------------------------------------------- */ - -int main(int argc, char **argv) { - int firstarg; - - config.hostip = sdsnew("127.0.0.1"); - config.hostport = 6379; - config.hostsocket = NULL; - config.repeat = 1; - config.interval = 0; - config.dbnum = 0; - config.interactive = 0; - config.shutdown = 0; - config.monitor_mode = 0; - config.pubsub_mode = 0; - config.latency_mode = 0; - config.latency_dist_mode = 0; - config.latency_history = 0; - config.lru_test_mode = 0; - config.lru_test_sample_size = 0; - config.cluster_mode = 0; - config.slave_mode = 0; - config.getrdb_mode = 0; - config.stat_mode = 0; - config.scan_mode = 0; - config.intrinsic_latency_mode = 0; - config.pattern = NULL; - config.rdb_filename = NULL; - config.pipe_mode = 0; - config.pipe_timeout = REDIS_CLI_DEFAULT_PIPE_TIMEOUT; - config.bigkeys = 0; - config.hotkeys = 0; - config.stdinarg = 0; - config.auth = NULL; - config.eval = NULL; - config.eval_ldb = 0; - config.eval_ldb_end = 0; - config.eval_ldb_sync = 0; - config.enable_ldb_on_eval = 0; - config.last_cmd_type = -1; - - pref.hints = 1; - - spectrum_palette = spectrum_palette_color; - spectrum_palette_size = spectrum_palette_color_size; - - if (!isatty(fileno(stdout)) && (getenv("FAKETTY") == NULL)) - config.output = OUTPUT_RAW; - else - config.output = OUTPUT_STANDARD; - config.mb_delim = sdsnew("\n"); - - firstarg = parseOptions(argc,argv); - argc -= firstarg; - argv += firstarg; - - /* Latency mode */ - if (config.latency_mode) { - if (cliConnect(0) == REDIS_ERR) exit(1); - latencyMode(); - } - - /* Latency distribution mode */ - if (config.latency_dist_mode) { - if (cliConnect(0) == REDIS_ERR) exit(1); - latencyDistMode(); - } - - /* Slave mode */ - if (config.slave_mode) { - if (cliConnect(0) == REDIS_ERR) exit(1); - slaveMode(); - } - - /* Get RDB mode. */ - if (config.getrdb_mode) { - if (cliConnect(0) == REDIS_ERR) exit(1); - getRDB(); - } - - /* Pipe mode */ - if (config.pipe_mode) { - if (cliConnect(0) == REDIS_ERR) exit(1); - pipeMode(); - } - - /* Find big keys */ - if (config.bigkeys) { - if (cliConnect(0) == REDIS_ERR) exit(1); - findBigKeys(); - } - - /* Find hot keys */ - if (config.hotkeys) { - if (cliConnect(0) == REDIS_ERR) exit(1); - findHotKeys(); - } - - /* Stat mode */ - if (config.stat_mode) { - if (cliConnect(0) == REDIS_ERR) exit(1); - if (config.interval == 0) config.interval = 1000000; - statMode(); - } - - /* Scan mode */ - if (config.scan_mode) { - if (cliConnect(0) == REDIS_ERR) exit(1); - scanMode(); - } - - /* LRU test mode */ - if (config.lru_test_mode) { - if (cliConnect(0) == REDIS_ERR) exit(1); - LRUTestMode(); - } - - /* Intrinsic latency mode */ - if (config.intrinsic_latency_mode) intrinsicLatencyMode(); - - /* Start interactive mode when no command is provided */ - if (argc == 0 && !config.eval) { - /* Ignore SIGPIPE in interactive mode to force a reconnect */ - signal(SIGPIPE, SIG_IGN); - - /* Note that in repl mode we don't abort on connection error. - * A new attempt will be performed for every command send. */ - cliConnect(0); - repl(); - } - - /* Otherwise, we have some arguments to execute */ - if (cliConnect(0) != REDIS_OK) exit(1); - if (config.eval) { - return evalMode(argc,argv); - } else { - return noninteractive(argc,convertToSds(argc,argv)); - } -} diff --git a/redis-android/src/main/jni/redis-4.0.11/src/redis-trib.rb b/redis-android/src/main/jni/redis-4.0.11/src/redis-trib.rb deleted file mode 100755 index 47b398b..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/src/redis-trib.rb +++ /dev/null @@ -1,1830 +0,0 @@ -#!/usr/bin/env ruby - -# TODO (temporary here, we'll move this into the Github issues once -# redis-trib initial implementation is completed). -# -# - Make sure that if the rehashing fails in the middle redis-trib will try -# to recover. -# - When redis-trib performs a cluster check, if it detects a slot move in -# progress it should prompt the user to continue the move from where it -# stopped. -# - Gracefully handle Ctrl+C in move_slot to prompt the user if really stop -# while rehashing, and performing the best cleanup possible if the user -# forces the quit. -# - When doing "fix" set a global Fix to true, and prompt the user to -# fix the problem if automatically fixable every time there is something -# to fix. For instance: -# 1) If there is a node that pretend to receive a slot, or to migrate a -# slot, but has no entries in that slot, fix it. -# 2) If there is a node having keys in slots that are not owned by it -# fix this condition moving the entries in the same node. -# 3) Perform more possibly slow tests about the state of the cluster. -# 4) When aborted slot migration is detected, fix it. - -require 'rubygems' -require 'redis' - -ClusterHashSlots = 16384 -MigrateDefaultTimeout = 60000 -MigrateDefaultPipeline = 10 -RebalanceDefaultThreshold = 2 - -$verbose = false - -def xputs(s) - case s[0..2] - when ">>>" - color="29;1" - when "[ER" - color="31;1" - when "[WA" - color="31;1" - when "[OK" - color="32" - when "[FA","***" - color="33" - else - color=nil - end - - color = nil if ENV['TERM'] != "xterm" - print "\033[#{color}m" if color - print s - print "\033[0m" if color - print "\n" -end - -class ClusterNode - def initialize(addr) - s = addr.split("@")[0].split(":") - if s.length < 2 - puts "Invalid IP or Port (given as #{addr}) - use IP:Port format" - exit 1 - end - port = s.pop # removes port from split array - ip = s.join(":") # if s.length > 1 here, it's IPv6, so restore address - @r = nil - @info = {} - @info[:host] = ip - @info[:port] = port - @info[:slots] = {} - @info[:migrating] = {} - @info[:importing] = {} - @info[:replicate] = false - @dirty = false # True if we need to flush slots info into node. - @friends = [] - end - - def friends - @friends - end - - def slots - @info[:slots] - end - - def has_flag?(flag) - @info[:flags].index(flag) - end - - def to_s - "#{@info[:host]}:#{@info[:port]}" - end - - def connect(o={}) - return if @r - print "Connecting to node #{self}: " if $verbose - STDOUT.flush - begin - @r = Redis.new(:host => @info[:host], :port => @info[:port], :timeout => 60) - @r.ping - rescue - xputs "[ERR] Sorry, can't connect to node #{self}" - exit 1 if o[:abort] - @r = nil - end - xputs "OK" if $verbose - end - - def assert_cluster - info = @r.info - if !info["cluster_enabled"] || info["cluster_enabled"].to_i == 0 - xputs "[ERR] Node #{self} is not configured as a cluster node." - exit 1 - end - end - - def assert_empty - if !(@r.cluster("info").split("\r\n").index("cluster_known_nodes:1")) || - (@r.info['db0']) - xputs "[ERR] Node #{self} is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0." - exit 1 - end - end - - def load_info(o={}) - self.connect - nodes = @r.cluster("nodes").split("\n") - nodes.each{|n| - # name addr flags role ping_sent ping_recv link_status slots - split = n.split - name,addr,flags,master_id,ping_sent,ping_recv,config_epoch,link_status = split[0..6] - slots = split[8..-1] - info = { - :name => name, - :addr => addr, - :flags => flags.split(","), - :replicate => master_id, - :ping_sent => ping_sent.to_i, - :ping_recv => ping_recv.to_i, - :link_status => link_status - } - info[:replicate] = false if master_id == "-" - - if info[:flags].index("myself") - @info = @info.merge(info) - @info[:slots] = {} - slots.each{|s| - if s[0..0] == '[' - if s.index("->-") # Migrating - slot,dst = s[1..-1].split("->-") - @info[:migrating][slot.to_i] = dst - elsif s.index("-<-") # Importing - slot,src = s[1..-1].split("-<-") - @info[:importing][slot.to_i] = src - end - elsif s.index("-") - start,stop = s.split("-") - self.add_slots((start.to_i)..(stop.to_i)) - else - self.add_slots((s.to_i)..(s.to_i)) - end - } if slots - @dirty = false - @r.cluster("info").split("\n").each{|e| - k,v=e.split(":") - k = k.to_sym - v.chop! - if k != :cluster_state - @info[k] = v.to_i - else - @info[k] = v - end - } - elsif o[:getfriends] - @friends << info - end - } - end - - def add_slots(slots) - slots.each{|s| - @info[:slots][s] = :new - } - @dirty = true - end - - def set_as_replica(node_id) - @info[:replicate] = node_id - @dirty = true - end - - def flush_node_config - return if !@dirty - if @info[:replicate] - begin - @r.cluster("replicate",@info[:replicate]) - rescue - # If the cluster did not already joined it is possible that - # the slave does not know the master node yet. So on errors - # we return ASAP leaving the dirty flag set, to flush the - # config later. - return - end - else - new = [] - @info[:slots].each{|s,val| - if val == :new - new << s - @info[:slots][s] = true - end - } - @r.cluster("addslots",*new) - end - @dirty = false - end - - def info_string - # We want to display the hash slots assigned to this node - # as ranges, like in: "1-5,8-9,20-25,30" - # - # Note: this could be easily written without side effects, - # we use 'slots' just to split the computation into steps. - - # First step: we want an increasing array of integers - # for instance: [1,2,3,4,5,8,9,20,21,22,23,24,25,30] - slots = @info[:slots].keys.sort - - # As we want to aggregate adjacent slots we convert all the - # slot integers into ranges (with just one element) - # So we have something like [1..1,2..2, ... and so forth. - slots.map!{|x| x..x} - - # Finally we group ranges with adjacent elements. - slots = slots.reduce([]) {|a,b| - if !a.empty? && b.first == (a[-1].last)+1 - a[0..-2] + [(a[-1].first)..(b.last)] - else - a + [b] - end - } - - # Now our task is easy, we just convert ranges with just one - # element into a number, and a real range into a start-end format. - # Finally we join the array using the comma as separator. - slots = slots.map{|x| - x.count == 1 ? x.first.to_s : "#{x.first}-#{x.last}" - }.join(",") - - role = self.has_flag?("master") ? "M" : "S" - - if self.info[:replicate] and @dirty - is = "S: #{self.info[:name]} #{self.to_s}" - else - is = "#{role}: #{self.info[:name]} #{self.to_s}\n"+ - " slots:#{slots} (#{self.slots.length} slots) "+ - "#{(self.info[:flags]-["myself"]).join(",")}" - end - if self.info[:replicate] - is += "\n replicates #{info[:replicate]}" - elsif self.has_flag?("master") && self.info[:replicas] - is += "\n #{info[:replicas].length} additional replica(s)" - end - is - end - - # Return a single string representing nodes and associated slots. - # TODO: remove slaves from config when slaves will be handled - # by Redis Cluster. - def get_config_signature - config = [] - @r.cluster("nodes").each_line{|l| - s = l.split - slots = s[8..-1].select {|x| x[0..0] != "["} - next if slots.length == 0 - config << s[0]+":"+(slots.sort.join(",")) - } - config.sort.join("|") - end - - def info - @info - end - - def is_dirty? - @dirty - end - - def r - @r - end -end - -class RedisTrib - def initialize - @nodes = [] - @fix = false - @errors = [] - @timeout = MigrateDefaultTimeout - end - - def check_arity(req_args, num_args) - if ((req_args > 0 and num_args != req_args) || - (req_args < 0 and num_args < req_args.abs)) - xputs "[ERR] Wrong number of arguments for specified sub command" - exit 1 - end - end - - def add_node(node) - @nodes << node - end - - def reset_nodes - @nodes = [] - end - - def cluster_error(msg) - @errors << msg - xputs msg - end - - # Return the node with the specified ID or Nil. - def get_node_by_name(name) - @nodes.each{|n| - return n if n.info[:name] == name.downcase - } - return nil - end - - # Like get_node_by_name but the specified name can be just the first - # part of the node ID as long as the prefix in unique across the - # cluster. - def get_node_by_abbreviated_name(name) - l = name.length - candidates = [] - @nodes.each{|n| - if n.info[:name][0...l] == name.downcase - candidates << n - end - } - return nil if candidates.length != 1 - candidates[0] - end - - # This function returns the master that has the least number of replicas - # in the cluster. If there are multiple masters with the same smaller - # number of replicas, one at random is returned. - def get_master_with_least_replicas - masters = @nodes.select{|n| n.has_flag? "master"} - sorted = masters.sort{|a,b| - a.info[:replicas].length <=> b.info[:replicas].length - } - sorted[0] - end - - def check_cluster(opt={}) - xputs ">>> Performing Cluster Check (using node #{@nodes[0]})" - show_nodes if !opt[:quiet] - check_config_consistency - check_open_slots - check_slots_coverage - end - - def show_cluster_info - masters = 0 - keys = 0 - @nodes.each{|n| - if n.has_flag?("master") - puts "#{n} (#{n.info[:name][0...8]}...) -> #{n.r.dbsize} keys | #{n.slots.length} slots | "+ - "#{n.info[:replicas].length} slaves." - masters += 1 - keys += n.r.dbsize - end - } - xputs "[OK] #{keys} keys in #{masters} masters." - keys_per_slot = sprintf("%.2f",keys/16384.0) - puts "#{keys_per_slot} keys per slot on average." - end - - # Merge slots of every known node. If the resulting slots are equal - # to ClusterHashSlots, then all slots are served. - def covered_slots - slots = {} - @nodes.each{|n| - slots = slots.merge(n.slots) - } - slots - end - - def check_slots_coverage - xputs ">>> Check slots coverage..." - slots = covered_slots - if slots.length == ClusterHashSlots - xputs "[OK] All #{ClusterHashSlots} slots covered." - else - cluster_error \ - "[ERR] Not all #{ClusterHashSlots} slots are covered by nodes." - fix_slots_coverage if @fix - end - end - - def check_open_slots - xputs ">>> Check for open slots..." - open_slots = [] - @nodes.each{|n| - if n.info[:migrating].size > 0 - cluster_error \ - "[WARNING] Node #{n} has slots in migrating state (#{n.info[:migrating].keys.join(",")})." - open_slots += n.info[:migrating].keys - end - if n.info[:importing].size > 0 - cluster_error \ - "[WARNING] Node #{n} has slots in importing state (#{n.info[:importing].keys.join(",")})." - open_slots += n.info[:importing].keys - end - } - open_slots.uniq! - if open_slots.length > 0 - xputs "[WARNING] The following slots are open: #{open_slots.join(",")}" - end - if @fix - open_slots.each{|slot| fix_open_slot slot} - end - end - - def nodes_with_keys_in_slot(slot) - nodes = [] - @nodes.each{|n| - next if n.has_flag?("slave") - nodes << n if n.r.cluster("getkeysinslot",slot,1).length > 0 - } - nodes - end - - def fix_slots_coverage - not_covered = (0...ClusterHashSlots).to_a - covered_slots.keys - xputs ">>> Fixing slots coverage..." - xputs "List of not covered slots: " + not_covered.join(",") - - # For every slot, take action depending on the actual condition: - # 1) No node has keys for this slot. - # 2) A single node has keys for this slot. - # 3) Multiple nodes have keys for this slot. - slots = {} - not_covered.each{|slot| - nodes = nodes_with_keys_in_slot(slot) - slots[slot] = nodes - xputs "Slot #{slot} has keys in #{nodes.length} nodes: #{nodes.join(", ")}" - } - - none = slots.select {|k,v| v.length == 0} - single = slots.select {|k,v| v.length == 1} - multi = slots.select {|k,v| v.length > 1} - - # Handle case "1": keys in no node. - if none.length > 0 - xputs "The folowing uncovered slots have no keys across the cluster:" - xputs none.keys.join(",") - yes_or_die "Fix these slots by covering with a random node?" - none.each{|slot,nodes| - node = @nodes.sample - xputs ">>> Covering slot #{slot} with #{node}" - node.r.cluster("addslots",slot) - } - end - - # Handle case "2": keys only in one node. - if single.length > 0 - xputs "The folowing uncovered slots have keys in just one node:" - puts single.keys.join(",") - yes_or_die "Fix these slots by covering with those nodes?" - single.each{|slot,nodes| - xputs ">>> Covering slot #{slot} with #{nodes[0]}" - nodes[0].r.cluster("addslots",slot) - } - end - - # Handle case "3": keys in multiple nodes. - if multi.length > 0 - xputs "The folowing uncovered slots have keys in multiple nodes:" - xputs multi.keys.join(",") - yes_or_die "Fix these slots by moving keys into a single node?" - multi.each{|slot,nodes| - target = get_node_with_most_keys_in_slot(nodes,slot) - xputs ">>> Covering slot #{slot} moving keys to #{target}" - - target.r.cluster('addslots',slot) - target.r.cluster('setslot',slot,'stable') - nodes.each{|src| - next if src == target - # Set the source node in 'importing' state (even if we will - # actually migrate keys away) in order to avoid receiving - # redirections for MIGRATE. - src.r.cluster('setslot',slot,'importing',target.info[:name]) - move_slot(src,target,slot,:dots=>true,:fix=>true,:cold=>true) - src.r.cluster('setslot',slot,'stable') - } - } - end - end - - # Return the owner of the specified slot - def get_slot_owners(slot) - owners = [] - @nodes.each{|n| - next if n.has_flag?("slave") - n.slots.each{|s,_| - owners << n if s == slot - } - } - owners - end - - # Return the node, among 'nodes' with the greatest number of keys - # in the specified slot. - def get_node_with_most_keys_in_slot(nodes,slot) - best = nil - best_numkeys = 0 - @nodes.each{|n| - next if n.has_flag?("slave") - numkeys = n.r.cluster("countkeysinslot",slot) - if numkeys > best_numkeys || best == nil - best = n - best_numkeys = numkeys - end - } - return best - end - - # Slot 'slot' was found to be in importing or migrating state in one or - # more nodes. This function fixes this condition by migrating keys where - # it seems more sensible. - def fix_open_slot(slot) - puts ">>> Fixing open slot #{slot}" - - # Try to obtain the current slot owner, according to the current - # nodes configuration. - owners = get_slot_owners(slot) - owner = owners[0] if owners.length == 1 - - migrating = [] - importing = [] - @nodes.each{|n| - next if n.has_flag? "slave" - if n.info[:migrating][slot] - migrating << n - elsif n.info[:importing][slot] - importing << n - elsif n.r.cluster("countkeysinslot",slot) > 0 && n != owner - xputs "*** Found keys about slot #{slot} in node #{n}!" - importing << n - end - } - puts "Set as migrating in: #{migrating.join(",")}" - puts "Set as importing in: #{importing.join(",")}" - - # If there is no slot owner, set as owner the slot with the biggest - # number of keys, among the set of migrating / importing nodes. - if !owner - xputs ">>> Nobody claims ownership, selecting an owner..." - owner = get_node_with_most_keys_in_slot(@nodes,slot) - - # If we still don't have an owner, we can't fix it. - if !owner - xputs "[ERR] Can't select a slot owner. Impossible to fix." - exit 1 - end - - # Use ADDSLOTS to assign the slot. - puts "*** Configuring #{owner} as the slot owner" - owner.r.cluster("setslot",slot,"stable") - owner.r.cluster("addslots",slot) - # Make sure this information will propagate. Not strictly needed - # since there is no past owner, so all the other nodes will accept - # whatever epoch this node will claim the slot with. - owner.r.cluster("bumpepoch") - - # Remove the owner from the list of migrating/importing - # nodes. - migrating.delete(owner) - importing.delete(owner) - end - - # If there are multiple owners of the slot, we need to fix it - # so that a single node is the owner and all the other nodes - # are in importing state. Later the fix can be handled by one - # of the base cases above. - # - # Note that this case also covers multiple nodes having the slot - # in migrating state, since migrating is a valid state only for - # slot owners. - if owners.length > 1 - owner = get_node_with_most_keys_in_slot(owners,slot) - owners.each{|n| - next if n == owner - n.r.cluster('delslots',slot) - n.r.cluster('setslot',slot,'importing',owner.info[:name]) - importing.delete(n) # Avoid duplciates - importing << n - } - owner.r.cluster('bumpepoch') - end - - # Case 1: The slot is in migrating state in one slot, and in - # importing state in 1 slot. That's trivial to address. - if migrating.length == 1 && importing.length == 1 - move_slot(migrating[0],importing[0],slot,:dots=>true,:fix=>true) - # Case 2: There are multiple nodes that claim the slot as importing, - # they probably got keys about the slot after a restart so opened - # the slot. In this case we just move all the keys to the owner - # according to the configuration. - elsif migrating.length == 0 && importing.length > 0 - xputs ">>> Moving all the #{slot} slot keys to its owner #{owner}" - importing.each {|node| - next if node == owner - move_slot(node,owner,slot,:dots=>true,:fix=>true,:cold=>true) - xputs ">>> Setting #{slot} as STABLE in #{node}" - node.r.cluster("setslot",slot,"stable") - } - # Case 3: There are no slots claiming to be in importing state, but - # there is a migrating node that actually don't have any key. We - # can just close the slot, probably a reshard interrupted in the middle. - elsif importing.length == 0 && migrating.length == 1 && - migrating[0].r.cluster("getkeysinslot",slot,10).length == 0 - migrating[0].r.cluster("setslot",slot,"stable") - else - xputs "[ERR] Sorry, Redis-trib can't fix this slot yet (work in progress). Slot is set as migrating in #{migrating.join(",")}, as importing in #{importing.join(",")}, owner is #{owner}" - end - end - - # Check if all the nodes agree about the cluster configuration - def check_config_consistency - if !is_config_consistent? - cluster_error "[ERR] Nodes don't agree about configuration!" - else - xputs "[OK] All nodes agree about slots configuration." - end - end - - def is_config_consistent? - signatures=[] - @nodes.each{|n| - signatures << n.get_config_signature - } - return signatures.uniq.length == 1 - end - - def wait_cluster_join - print "Waiting for the cluster to join" - while !is_config_consistent? - print "." - STDOUT.flush - sleep 1 - end - print "\n" - end - - def alloc_slots - nodes_count = @nodes.length - masters_count = @nodes.length / (@replicas+1) - masters = [] - - # The first step is to split instances by IP. This is useful as - # we'll try to allocate master nodes in different physical machines - # (as much as possible) and to allocate slaves of a given master in - # different physical machines as well. - # - # This code assumes just that if the IP is different, than it is more - # likely that the instance is running in a different physical host - # or at least a different virtual machine. - ips = {} - @nodes.each{|n| - ips[n.info[:host]] = [] if !ips[n.info[:host]] - ips[n.info[:host]] << n - } - - # Select master instances - puts "Using #{masters_count} masters:" - interleaved = [] - stop = false - while not stop do - # Take one node from each IP until we run out of nodes - # across every IP. - ips.each do |ip,nodes| - if nodes.empty? - # if this IP has no remaining nodes, check for termination - if interleaved.length == nodes_count - # stop when 'interleaved' has accumulated all nodes - stop = true - next - end - else - # else, move one node from this IP to 'interleaved' - interleaved.push nodes.shift - end - end - end - - masters = interleaved.slice!(0, masters_count) - nodes_count -= masters.length - - masters.each{|m| puts m} - - # Rotating the list sometimes helps to get better initial - # anti-affinity before the optimizer runs. - interleaved.push interleaved.shift - - # Alloc slots on masters. After interleaving to get just the first N - # should be optimal. With slaves is more complex, see later... - slots_per_node = ClusterHashSlots.to_f / masters_count - first = 0 - cursor = 0.0 - masters.each_with_index{|n,masternum| - last = (cursor+slots_per_node-1).round - if last > ClusterHashSlots || masternum == masters.length-1 - last = ClusterHashSlots-1 - end - last = first if last < first # Min step is 1. - n.add_slots first..last - first = last+1 - cursor += slots_per_node - } - - # Select N replicas for every master. - # We try to split the replicas among all the IPs with spare nodes - # trying to avoid the host where the master is running, if possible. - # - # Note we loop two times. The first loop assigns the requested - # number of replicas to each master. The second loop assigns any - # remaining instances as extra replicas to masters. Some masters - # may end up with more than their requested number of replicas, but - # all nodes will be used. - assignment_verbose = false - - [:requested,:unused].each do |assign| - masters.each do |m| - assigned_replicas = 0 - while assigned_replicas < @replicas - break if nodes_count == 0 - if assignment_verbose - if assign == :requested - puts "Requesting total of #{@replicas} replicas " \ - "(#{assigned_replicas} replicas assigned " \ - "so far with #{nodes_count} total remaining)." - elsif assign == :unused - puts "Assigning extra instance to replication " \ - "role too (#{nodes_count} remaining)." - end - end - - # Return the first node not matching our current master - node = interleaved.find{|n| n.info[:host] != m.info[:host]} - - # If we found a node, use it as a best-first match. - # Otherwise, we didn't find a node on a different IP, so we - # go ahead and use a same-IP replica. - if node - slave = node - interleaved.delete node - else - slave = interleaved.shift - end - slave.set_as_replica(m.info[:name]) - nodes_count -= 1 - assigned_replicas += 1 - puts "Adding replica #{slave} to #{m}" - - # If we are in the "assign extra nodes" loop, - # we want to assign one extra replica to each - # master before repeating masters. - # This break lets us assign extra replicas to masters - # in a round-robin way. - break if assign == :unused - end - end - end - - optimize_anti_affinity - end - - def optimize_anti_affinity - score,aux = get_anti_affinity_score - return if score == 0 - - xputs ">>> Trying to optimize slaves allocation for anti-affinity" - - maxiter = 500*@nodes.length # Effort is proportional to cluster size... - while maxiter > 0 - score,offenders = get_anti_affinity_score - break if score == 0 # Optimal anti affinity reached - - # We'll try to randomly swap a slave's assigned master causing - # an affinity problem with another random slave, to see if we - # can improve the affinity. - first = offenders.shuffle.first - nodes = @nodes.select{|n| n != first && n.info[:replicate]} - break if nodes.length == 0 - second = nodes.shuffle.first - - first_master = first.info[:replicate] - second_master = second.info[:replicate] - first.set_as_replica(second_master) - second.set_as_replica(first_master) - - new_score,aux = get_anti_affinity_score - # If the change actually makes thing worse, revert. Otherwise - # leave as it is becuase the best solution may need a few - # combined swaps. - if new_score > score - first.set_as_replica(first_master) - second.set_as_replica(second_master) - end - - maxiter -= 1 - end - - score,aux = get_anti_affinity_score - if score == 0 - xputs "[OK] Perfect anti-affinity obtained!" - elsif score >= 10000 - puts "[WARNING] Some slaves are in the same host as their master" - else - puts "[WARNING] Some slaves of the same master are in the same host" - end - end - - # Return the anti-affinity score, which is a measure of the amount of - # violations of anti-affinity in the current cluster layout, that is, how - # badly the masters and slaves are distributed in the different IP - # addresses so that slaves of the same master are not in the master - # host and are also in different hosts. - # - # The score is calculated as follows: - # - # SAME_AS_MASTER = 10000 * each slave in the same IP of its master. - # SAME_AS_SLAVE = 1 * each slave having the same IP as another slave - # of the same master. - # FINAL_SCORE = SAME_AS_MASTER + SAME_AS_SLAVE - # - # So a greater score means a worse anti-affinity level, while zero - # means perfect anti-affinity. - # - # The anti affinity optimizator will try to get a score as low as - # possible. Since we do not want to sacrifice the fact that slaves should - # not be in the same host as the master, we assign 10000 times the score - # to this violation, so that we'll optimize for the second factor only - # if it does not impact the first one. - # - # The function returns two things: the above score, and the list of - # offending slaves, so that the optimizer can try changing the - # configuration of the slaves violating the anti-affinity goals. - def get_anti_affinity_score - score = 0 - offending = [] # List of offending slaves to return to the caller - - # First, split nodes by host - host_to_node = {} - @nodes.each{|n| - host = n.info[:host] - host_to_node[host] = [] if host_to_node[host] == nil - host_to_node[host] << n - } - - # Then, for each set of nodes in the same host, split by - # related nodes (masters and slaves which are involved in - # replication of each other) - host_to_node.each{|host,nodes| - related = {} - nodes.each{|n| - if !n.info[:replicate] - name = n.info[:name] - related[name] = [] if related[name] == nil - related[name] << :m - else - name = n.info[:replicate] - related[name] = [] if related[name] == nil - related[name] << :s - end - } - - # Now it's trivial to check, for each related group having the - # same host, what is their local score. - related.each{|id,types| - next if types.length < 2 - types.sort! # Make sure :m if the first if any - if types[0] == :m - score += 10000 * (types.length-1) - else - score += 1 * types.length - end - - # Populate the list of offending nodes - @nodes.each{|n| - if n.info[:replicate] == id && - n.info[:host] == host - offending << n - end - } - } - } - return score,offending - end - - def flush_nodes_config - @nodes.each{|n| - n.flush_node_config - } - end - - def show_nodes - @nodes.each{|n| - xputs n.info_string - } - end - - # Redis Cluster config epoch collision resolution code is able to eventually - # set a different epoch to each node after a new cluster is created, but - # it is slow compared to assign a progressive config epoch to each node - # before joining the cluster. However we do just a best-effort try here - # since if we fail is not a problem. - def assign_config_epoch - config_epoch = 1 - @nodes.each{|n| - begin - n.r.cluster("set-config-epoch",config_epoch) - rescue - end - config_epoch += 1 - } - end - - def join_cluster - # We use a brute force approach to make sure the node will meet - # each other, that is, sending CLUSTER MEET messages to all the nodes - # about the very same node. - # Thanks to gossip this information should propagate across all the - # cluster in a matter of seconds. - first = false - @nodes.each{|n| - if !first then first = n.info; next; end # Skip the first node - n.r.cluster("meet",first[:host],first[:port]) - } - end - - def yes_or_die(msg) - print "#{msg} (type 'yes' to accept): " - STDOUT.flush - if !(STDIN.gets.chomp.downcase == "yes") - xputs "*** Aborting..." - exit 1 - end - end - - def load_cluster_info_from_node(nodeaddr) - node = ClusterNode.new(nodeaddr) - node.connect(:abort => true) - node.assert_cluster - node.load_info(:getfriends => true) - add_node(node) - node.friends.each{|f| - next if f[:flags].index("noaddr") || - f[:flags].index("disconnected") || - f[:flags].index("fail") - fnode = ClusterNode.new(f[:addr]) - fnode.connect() - next if !fnode.r - begin - fnode.load_info() - add_node(fnode) - rescue => e - xputs "[ERR] Unable to load info for node #{fnode}" - end - } - populate_nodes_replicas_info - end - - # This function is called by load_cluster_info_from_node in order to - # add additional information to every node as a list of replicas. - def populate_nodes_replicas_info - # Start adding the new field to every node. - @nodes.each{|n| - n.info[:replicas] = [] - } - - # Populate the replicas field using the replicate field of slave - # nodes. - @nodes.each{|n| - if n.info[:replicate] - master = get_node_by_name(n.info[:replicate]) - if !master - xputs "*** WARNING: #{n} claims to be slave of unknown node ID #{n.info[:replicate]}." - else - master.info[:replicas] << n - end - end - } - end - - # Given a list of source nodes return a "resharding plan" - # with what slots to move in order to move "numslots" slots to another - # instance. - def compute_reshard_table(sources,numslots) - moved = [] - # Sort from bigger to smaller instance, for two reasons: - # 1) If we take less slots than instances it is better to start - # getting from the biggest instances. - # 2) We take one slot more from the first instance in the case of not - # perfect divisibility. Like we have 3 nodes and need to get 10 - # slots, we take 4 from the first, and 3 from the rest. So the - # biggest is always the first. - sources = sources.sort{|a,b| b.slots.length <=> a.slots.length} - source_tot_slots = sources.inject(0) {|sum,source| - sum+source.slots.length - } - sources.each_with_index{|s,i| - # Every node will provide a number of slots proportional to the - # slots it has assigned. - n = (numslots.to_f/source_tot_slots*s.slots.length) - if i == 0 - n = n.ceil - else - n = n.floor - end - s.slots.keys.sort[(0...n)].each{|slot| - if moved.length < numslots - moved << {:source => s, :slot => slot} - end - } - } - return moved - end - - def show_reshard_table(table) - table.each{|e| - puts " Moving slot #{e[:slot]} from #{e[:source].info[:name]}" - } - end - - # Move slots between source and target nodes using MIGRATE. - # - # Options: - # :verbose -- Print a dot for every moved key. - # :fix -- We are moving in the context of a fix. Use REPLACE. - # :cold -- Move keys without opening slots / reconfiguring the nodes. - # :update -- Update nodes.info[:slots] for source/target nodes. - # :quiet -- Don't print info messages. - def move_slot(source,target,slot,o={}) - o = {:pipeline => MigrateDefaultPipeline}.merge(o) - - # We start marking the slot as importing in the destination node, - # and the slot as migrating in the target host. Note that the order of - # the operations is important, as otherwise a client may be redirected - # to the target node that does not yet know it is importing this slot. - if !o[:quiet] - print "Moving slot #{slot} from #{source} to #{target}: " - STDOUT.flush - end - - if !o[:cold] - target.r.cluster("setslot",slot,"importing",source.info[:name]) - source.r.cluster("setslot",slot,"migrating",target.info[:name]) - end - # Migrate all the keys from source to target using the MIGRATE command - while true - keys = source.r.cluster("getkeysinslot",slot,o[:pipeline]) - break if keys.length == 0 - begin - source.r.client.call(["migrate",target.info[:host],target.info[:port],"",0,@timeout,:keys,*keys]) - rescue => e - if o[:fix] && e.to_s =~ /BUSYKEY/ - xputs "*** Target key exists. Replacing it for FIX." - source.r.client.call(["migrate",target.info[:host],target.info[:port],"",0,@timeout,:replace,:keys,*keys]) - else - puts "" - xputs "[ERR] Calling MIGRATE: #{e}" - exit 1 - end - end - print "."*keys.length if o[:dots] - STDOUT.flush - end - - puts if !o[:quiet] - # Set the new node as the owner of the slot in all the known nodes. - if !o[:cold] - @nodes.each{|n| - next if n.has_flag?("slave") - n.r.cluster("setslot",slot,"node",target.info[:name]) - } - end - - # Update the node logical config - if o[:update] then - source.info[:slots].delete(slot) - target.info[:slots][slot] = true - end - end - - # redis-trib subcommands implementations. - - def check_cluster_cmd(argv,opt) - load_cluster_info_from_node(argv[0]) - check_cluster - end - - def info_cluster_cmd(argv,opt) - load_cluster_info_from_node(argv[0]) - show_cluster_info - end - - def rebalance_cluster_cmd(argv,opt) - opt = { - 'pipeline' => MigrateDefaultPipeline, - 'threshold' => RebalanceDefaultThreshold - }.merge(opt) - - # Load nodes info before parsing options, otherwise we can't - # handle --weight. - load_cluster_info_from_node(argv[0]) - - # Options parsing - threshold = opt['threshold'].to_i - autoweights = opt['auto-weights'] - weights = {} - opt['weight'].each{|w| - fields = w.split("=") - node = get_node_by_abbreviated_name(fields[0]) - if !node || !node.has_flag?("master") - puts "*** No such master node #{fields[0]}" - exit 1 - end - weights[node.info[:name]] = fields[1].to_f - } if opt['weight'] - useempty = opt['use-empty-masters'] - - # Assign a weight to each node, and compute the total cluster weight. - total_weight = 0 - nodes_involved = 0 - @nodes.each{|n| - if n.has_flag?("master") - next if !useempty && n.slots.length == 0 - n.info[:w] = weights[n.info[:name]] ? weights[n.info[:name]] : 1 - total_weight += n.info[:w] - nodes_involved += 1 - end - } - - # Check cluster, only proceed if it looks sane. - check_cluster(:quiet => true) - if @errors.length != 0 - puts "*** Please fix your cluster problems before rebalancing" - exit 1 - end - - # Calculate the slots balance for each node. It's the number of - # slots the node should lose (if positive) or gain (if negative) - # in order to be balanced. - threshold = opt['threshold'].to_f - threshold_reached = false - @nodes.each{|n| - if n.has_flag?("master") - next if !n.info[:w] - expected = ((ClusterHashSlots.to_f / total_weight) * - n.info[:w]).to_i - n.info[:balance] = n.slots.length - expected - # Compute the percentage of difference between the - # expected number of slots and the real one, to see - # if it's over the threshold specified by the user. - over_threshold = false - if threshold > 0 - if n.slots.length > 0 - err_perc = (100-(100.0*expected/n.slots.length)).abs - over_threshold = true if err_perc > threshold - elsif expected > 0 - over_threshold = true - end - end - threshold_reached = true if over_threshold - end - } - if !threshold_reached - xputs "*** No rebalancing needed! All nodes are within the #{threshold}% threshold." - return - end - - # Only consider nodes we want to change - sn = @nodes.select{|n| - n.has_flag?("master") && n.info[:w] - } - - # Because of rounding, it is possible that the balance of all nodes - # summed does not give 0. Make sure that nodes that have to provide - # slots are always matched by nodes receiving slots. - total_balance = sn.map{|x| x.info[:balance]}.reduce{|a,b| a+b} - while total_balance > 0 - sn.each{|n| - if n.info[:balance] < 0 && total_balance > 0 - n.info[:balance] -= 1 - total_balance -= 1 - end - } - end - - # Sort nodes by their slots balance. - sn = sn.sort{|a,b| - a.info[:balance] <=> b.info[:balance] - } - - xputs ">>> Rebalancing across #{nodes_involved} nodes. Total weight = #{total_weight}" - - if $verbose - sn.each{|n| - puts "#{n} balance is #{n.info[:balance]} slots" - } - end - - # Now we have at the start of the 'sn' array nodes that should get - # slots, at the end nodes that must give slots. - # We take two indexes, one at the start, and one at the end, - # incrementing or decrementing the indexes accordingly til we - # find nodes that need to get/provide slots. - dst_idx = 0 - src_idx = sn.length - 1 - - while dst_idx < src_idx - dst = sn[dst_idx] - src = sn[src_idx] - numslots = [dst.info[:balance],src.info[:balance]].map{|n| - n.abs - }.min - - if numslots > 0 - puts "Moving #{numslots} slots from #{src} to #{dst}" - - # Actaully move the slots. - reshard_table = compute_reshard_table([src],numslots) - if reshard_table.length != numslots - xputs "*** Assertio failed: Reshard table != number of slots" - exit 1 - end - if opt['simulate'] - print "#"*reshard_table.length - else - reshard_table.each{|e| - move_slot(e[:source],dst,e[:slot], - :quiet=>true, - :dots=>false, - :update=>true, - :pipeline=>opt['pipeline']) - print "#" - STDOUT.flush - } - end - puts - end - - # Update nodes balance. - dst.info[:balance] += numslots - src.info[:balance] -= numslots - dst_idx += 1 if dst.info[:balance] == 0 - src_idx -= 1 if src.info[:balance] == 0 - end - end - - def fix_cluster_cmd(argv,opt) - @fix = true - @timeout = opt['timeout'].to_i if opt['timeout'] - - load_cluster_info_from_node(argv[0]) - check_cluster - end - - def reshard_cluster_cmd(argv,opt) - opt = {'pipeline' => MigrateDefaultPipeline}.merge(opt) - - load_cluster_info_from_node(argv[0]) - check_cluster - if @errors.length != 0 - puts "*** Please fix your cluster problems before resharding" - exit 1 - end - - @timeout = opt['timeout'].to_i if opt['timeout'].to_i - - # Get number of slots - if opt['slots'] - numslots = opt['slots'].to_i - else - numslots = 0 - while numslots <= 0 or numslots > ClusterHashSlots - print "How many slots do you want to move (from 1 to #{ClusterHashSlots})? " - numslots = STDIN.gets.to_i - end - end - - # Get the target instance - if opt['to'] - target = get_node_by_name(opt['to']) - if !target || target.has_flag?("slave") - xputs "*** The specified node is not known or not a master, please retry." - exit 1 - end - else - target = nil - while not target - print "What is the receiving node ID? " - target = get_node_by_name(STDIN.gets.chop) - if !target || target.has_flag?("slave") - xputs "*** The specified node is not known or not a master, please retry." - target = nil - end - end - end - - # Get the source instances - sources = [] - if opt['from'] - opt['from'].split(',').each{|node_id| - if node_id == "all" - sources = "all" - break - end - src = get_node_by_name(node_id) - if !src || src.has_flag?("slave") - xputs "*** The specified node is not known or is not a master, please retry." - exit 1 - end - sources << src - } - else - xputs "Please enter all the source node IDs." - xputs " Type 'all' to use all the nodes as source nodes for the hash slots." - xputs " Type 'done' once you entered all the source nodes IDs." - while true - print "Source node ##{sources.length+1}:" - line = STDIN.gets.chop - src = get_node_by_name(line) - if line == "done" - break - elsif line == "all" - sources = "all" - break - elsif !src || src.has_flag?("slave") - xputs "*** The specified node is not known or is not a master, please retry." - elsif src.info[:name] == target.info[:name] - xputs "*** It is not possible to use the target node as source node." - else - sources << src - end - end - end - - if sources.length == 0 - puts "*** No source nodes given, operation aborted" - exit 1 - end - - # Handle soures == all. - if sources == "all" - sources = [] - @nodes.each{|n| - next if n.info[:name] == target.info[:name] - next if n.has_flag?("slave") - sources << n - } - end - - # Check if the destination node is the same of any source nodes. - if sources.index(target) - xputs "*** Target node is also listed among the source nodes!" - exit 1 - end - - puts "\nReady to move #{numslots} slots." - puts " Source nodes:" - sources.each{|s| puts " "+s.info_string} - puts " Destination node:" - puts " #{target.info_string}" - reshard_table = compute_reshard_table(sources,numslots) - puts " Resharding plan:" - show_reshard_table(reshard_table) - if !opt['yes'] - print "Do you want to proceed with the proposed reshard plan (yes/no)? " - yesno = STDIN.gets.chop - exit(1) if (yesno != "yes") - end - reshard_table.each{|e| - move_slot(e[:source],target,e[:slot], - :dots=>true, - :pipeline=>opt['pipeline']) - } - end - - # This is an helper function for create_cluster_cmd that verifies if - # the number of nodes and the specified replicas have a valid configuration - # where there are at least three master nodes and enough replicas per node. - def check_create_parameters - masters = @nodes.length/(@replicas+1) - if masters < 3 - puts "*** ERROR: Invalid configuration for cluster creation." - puts "*** Redis Cluster requires at least 3 master nodes." - puts "*** This is not possible with #{@nodes.length} nodes and #{@replicas} replicas per node." - puts "*** At least #{3*(@replicas+1)} nodes are required." - exit 1 - end - end - - def create_cluster_cmd(argv,opt) - opt = {'replicas' => 0}.merge(opt) - @replicas = opt['replicas'].to_i - - xputs ">>> Creating cluster" - argv[0..-1].each{|n| - node = ClusterNode.new(n) - node.connect(:abort => true) - node.assert_cluster - node.load_info - node.assert_empty - add_node(node) - } - check_create_parameters - xputs ">>> Performing hash slots allocation on #{@nodes.length} nodes..." - alloc_slots - show_nodes - yes_or_die "Can I set the above configuration?" - flush_nodes_config - xputs ">>> Nodes configuration updated" - xputs ">>> Assign a different config epoch to each node" - assign_config_epoch - xputs ">>> Sending CLUSTER MEET messages to join the cluster" - join_cluster - # Give one second for the join to start, in order to avoid that - # wait_cluster_join will find all the nodes agree about the config as - # they are still empty with unassigned slots. - sleep 1 - wait_cluster_join - flush_nodes_config # Useful for the replicas - # Reset the node information, so that when the - # final summary is listed in check_cluster about the newly created cluster - # all the nodes would get properly listed as slaves or masters - reset_nodes - load_cluster_info_from_node(argv[0]) - check_cluster - end - - def addnode_cluster_cmd(argv,opt) - xputs ">>> Adding node #{argv[0]} to cluster #{argv[1]}" - - # Check the existing cluster - load_cluster_info_from_node(argv[1]) - check_cluster - - # If --master-id was specified, try to resolve it now so that we - # abort before starting with the node configuration. - if opt['slave'] - if opt['master-id'] - master = get_node_by_name(opt['master-id']) - if !master - xputs "[ERR] No such master ID #{opt['master-id']}" - end - else - master = get_master_with_least_replicas - xputs "Automatically selected master #{master}" - end - end - - # Add the new node - new = ClusterNode.new(argv[0]) - new.connect(:abort => true) - new.assert_cluster - new.load_info - new.assert_empty - first = @nodes.first.info - add_node(new) - - # Send CLUSTER MEET command to the new node - xputs ">>> Send CLUSTER MEET to node #{new} to make it join the cluster." - new.r.cluster("meet",first[:host],first[:port]) - - # Additional configuration is needed if the node is added as - # a slave. - if opt['slave'] - wait_cluster_join - xputs ">>> Configure node as replica of #{master}." - new.r.cluster("replicate",master.info[:name]) - end - xputs "[OK] New node added correctly." - end - - def delnode_cluster_cmd(argv,opt) - id = argv[1].downcase - xputs ">>> Removing node #{id} from cluster #{argv[0]}" - - # Load cluster information - load_cluster_info_from_node(argv[0]) - - # Check if the node exists and is not empty - node = get_node_by_name(id) - - if !node - xputs "[ERR] No such node ID #{id}" - exit 1 - end - - if node.slots.length != 0 - xputs "[ERR] Node #{node} is not empty! Reshard data away and try again." - exit 1 - end - - # Send CLUSTER FORGET to all the nodes but the node to remove - xputs ">>> Sending CLUSTER FORGET messages to the cluster..." - @nodes.each{|n| - next if n == node - if n.info[:replicate] && n.info[:replicate].downcase == id - # Reconfigure the slave to replicate with some other node - master = get_master_with_least_replicas - xputs ">>> #{n} as replica of #{master}" - n.r.cluster("replicate",master.info[:name]) - end - n.r.cluster("forget",argv[1]) - } - - # Finally shutdown the node - xputs ">>> SHUTDOWN the node." - node.r.shutdown - end - - def set_timeout_cluster_cmd(argv,opt) - timeout = argv[1].to_i - if timeout < 100 - puts "Setting a node timeout of less than 100 milliseconds is a bad idea." - exit 1 - end - - # Load cluster information - load_cluster_info_from_node(argv[0]) - ok_count = 0 - err_count = 0 - - # Send CLUSTER FORGET to all the nodes but the node to remove - xputs ">>> Reconfiguring node timeout in every cluster node..." - @nodes.each{|n| - begin - n.r.config("set","cluster-node-timeout",timeout) - n.r.config("rewrite") - ok_count += 1 - xputs "*** New timeout set for #{n}" - rescue => e - puts "ERR setting node-timeot for #{n}: #{e}" - err_count += 1 - end - } - xputs ">>> New node timeout set. #{ok_count} OK, #{err_count} ERR." - end - - def call_cluster_cmd(argv,opt) - cmd = argv[1..-1] - cmd[0] = cmd[0].upcase - - # Load cluster information - load_cluster_info_from_node(argv[0]) - xputs ">>> Calling #{cmd.join(" ")}" - @nodes.each{|n| - begin - res = n.r.send(*cmd) - puts "#{n}: #{res}" - rescue => e - puts "#{n}: #{e}" - end - } - end - - def import_cluster_cmd(argv,opt) - source_addr = opt['from'] - xputs ">>> Importing data from #{source_addr} to cluster #{argv[1]}" - use_copy = opt['copy'] - use_replace = opt['replace'] - - # Check the existing cluster. - load_cluster_info_from_node(argv[0]) - check_cluster - - # Connect to the source node. - xputs ">>> Connecting to the source Redis instance" - src_host,src_port = source_addr.split(":") - source = Redis.new(:host =>src_host, :port =>src_port) - if source.info['cluster_enabled'].to_i == 1 - xputs "[ERR] The source node should not be a cluster node." - end - xputs "*** Importing #{source.dbsize} keys from DB 0" - - # Build a slot -> node map - slots = {} - @nodes.each{|n| - n.slots.each{|s,_| - slots[s] = n - } - } - - # Use SCAN to iterate over the keys, migrating to the - # right node as needed. - cursor = nil - while cursor != 0 - cursor,keys = source.scan(cursor, :count => 1000) - cursor = cursor.to_i - keys.each{|k| - # Migrate keys using the MIGRATE command. - slot = key_to_slot(k) - target = slots[slot] - print "Migrating #{k} to #{target}: " - STDOUT.flush - begin - cmd = ["migrate",target.info[:host],target.info[:port],k,0,@timeout] - cmd << :copy if use_copy - cmd << :replace if use_replace - source.client.call(cmd) - rescue => e - puts e - else - puts "OK" - end - } - end - end - - def help_cluster_cmd(argv,opt) - show_help - exit 0 - end - - # Parse the options for the specific command "cmd". - # Returns an hash populate with option => value pairs, and the index of - # the first non-option argument in ARGV. - def parse_options(cmd) - idx = 1 ; # Current index into ARGV - options={} - while idx < ARGV.length && ARGV[idx][0..1] == '--' - if ARGV[idx][0..1] == "--" - option = ARGV[idx][2..-1] - idx += 1 - - # --verbose is a global option - if option == "verbose" - $verbose = true - next - end - - if ALLOWED_OPTIONS[cmd] == nil || ALLOWED_OPTIONS[cmd][option] == nil - puts "Unknown option '#{option}' for command '#{cmd}'" - exit 1 - end - if ALLOWED_OPTIONS[cmd][option] != false - value = ARGV[idx] - idx += 1 - else - value = true - end - - # If the option is set to [], it's a multiple arguments - # option. We just queue every new value into an array. - if ALLOWED_OPTIONS[cmd][option] == [] - options[option] = [] if !options[option] - options[option] << value - else - options[option] = value - end - else - # Remaining arguments are not options. - break - end - end - - # Enforce mandatory options - if ALLOWED_OPTIONS[cmd] - ALLOWED_OPTIONS[cmd].each {|option,val| - if !options[option] && val == :required - puts "Option '--#{option}' is required "+ \ - "for subcommand '#{cmd}'" - exit 1 - end - } - end - return options,idx - end -end - -################################################################################# -# Libraries -# -# We try to don't depend on external libs since this is a critical part -# of Redis Cluster. -################################################################################# - -# This is the CRC16 algorithm used by Redis Cluster to hash keys. -# Implementation according to CCITT standards. -# -# This is actually the XMODEM CRC 16 algorithm, using the -# following parameters: -# -# Name : "XMODEM", also known as "ZMODEM", "CRC-16/ACORN" -# Width : 16 bit -# Poly : 1021 (That is actually x^16 + x^12 + x^5 + 1) -# Initialization : 0000 -# Reflect Input byte : False -# Reflect Output CRC : False -# Xor constant to output CRC : 0000 -# Output for "123456789" : 31C3 - -module RedisClusterCRC16 - def RedisClusterCRC16.crc16(bytes) - crc = 0 - bytes.each_byte{|b| - crc = ((crc<<8) & 0xffff) ^ XMODEMCRC16Lookup[((crc>>8)^b) & 0xff] - } - crc - end - -private - XMODEMCRC16Lookup = [ - 0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7, - 0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef, - 0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6, - 0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de, - 0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485, - 0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d, - 0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4, - 0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc, - 0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823, - 0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b, - 0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12, - 0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a, - 0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41, - 0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49, - 0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70, - 0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78, - 0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f, - 0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067, - 0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e, - 0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256, - 0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d, - 0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405, - 0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c, - 0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634, - 0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab, - 0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3, - 0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a, - 0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92, - 0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9, - 0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1, - 0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8, - 0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0 - ] -end - -# Turn a key name into the corrisponding Redis Cluster slot. -def key_to_slot(key) - # Only hash what is inside {...} if there is such a pattern in the key. - # Note that the specification requires the content that is between - # the first { and the first } after the first {. If we found {} without - # nothing in the middle, the whole key is hashed as usually. - s = key.index "{" - if s - e = key.index "}",s+1 - if e && e != s+1 - key = key[s+1..e-1] - end - end - RedisClusterCRC16.crc16(key) % 16384 -end - -################################################################################# -# Definition of commands -################################################################################# - -COMMANDS={ - "create" => ["create_cluster_cmd", -2, "host1:port1 ... hostN:portN"], - "check" => ["check_cluster_cmd", 2, "host:port"], - "info" => ["info_cluster_cmd", 2, "host:port"], - "fix" => ["fix_cluster_cmd", 2, "host:port"], - "reshard" => ["reshard_cluster_cmd", 2, "host:port"], - "rebalance" => ["rebalance_cluster_cmd", -2, "host:port"], - "add-node" => ["addnode_cluster_cmd", 3, "new_host:new_port existing_host:existing_port"], - "del-node" => ["delnode_cluster_cmd", 3, "host:port node_id"], - "set-timeout" => ["set_timeout_cluster_cmd", 3, "host:port milliseconds"], - "call" => ["call_cluster_cmd", -3, "host:port command arg arg .. arg"], - "import" => ["import_cluster_cmd", 2, "host:port"], - "help" => ["help_cluster_cmd", 1, "(show this help)"] -} - -ALLOWED_OPTIONS={ - "create" => {"replicas" => true}, - "add-node" => {"slave" => false, "master-id" => true}, - "import" => {"from" => :required, "copy" => false, "replace" => false}, - "reshard" => {"from" => true, "to" => true, "slots" => true, "yes" => false, "timeout" => true, "pipeline" => true}, - "rebalance" => {"weight" => [], "auto-weights" => false, "use-empty-masters" => false, "timeout" => true, "simulate" => false, "pipeline" => true, "threshold" => true}, - "fix" => {"timeout" => MigrateDefaultTimeout}, -} - -def show_help - puts "Usage: redis-trib \n\n" - COMMANDS.each{|k,v| - puts " #{k.ljust(15)} #{v[2]}" - if ALLOWED_OPTIONS[k] - ALLOWED_OPTIONS[k].each{|optname,has_arg| - puts " --#{optname}" + (has_arg ? " " : "") - } - end - } - puts "\nFor check, fix, reshard, del-node, set-timeout you can specify the host and port of any working node in the cluster.\n" -end - -# Sanity check -if ARGV.length == 0 - show_help - exit 1 -end - -rt = RedisTrib.new -cmd_spec = COMMANDS[ARGV[0].downcase] -if !cmd_spec - puts "Unknown redis-trib subcommand '#{ARGV[0]}'" - exit 1 -end - -# Parse options -cmd_options,first_non_option = rt.parse_options(ARGV[0].downcase) -rt.check_arity(cmd_spec[1],ARGV.length-(first_non_option-1)) - -# Dispatch -rt.send(cmd_spec[0],ARGV[first_non_option..-1],cmd_options) diff --git a/redis-android/src/main/jni/redis-4.0.11/src/version.h b/redis-android/src/main/jni/redis-4.0.11/src/version.h deleted file mode 100644 index 622382a..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/src/version.h +++ /dev/null @@ -1 +0,0 @@ -#define REDIS_VERSION "4.0.11" diff --git a/redis-android/src/main/jni/redis-4.0.11/tests/unit/introspection-2.tcl b/redis-android/src/main/jni/redis-4.0.11/tests/unit/introspection-2.tcl deleted file mode 100644 index 350a8a0..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/tests/unit/introspection-2.tcl +++ /dev/null @@ -1,23 +0,0 @@ -start_server {tags {"introspection"}} { - test {TTL and TYPYE do not alter the last access time of a key} { - r set foo bar - after 3000 - r ttl foo - r type foo - assert {[r object idletime foo] >= 2} - } - - test {TOUCH alters the last access time of a key} { - r set foo bar - after 3000 - r touch foo - assert {[r object idletime foo] < 2} - } - - test {TOUCH returns the number of existing keys specified} { - r flushdb - r set key1 1 - r set key2 2 - r touch key0 key1 key2 key3 - } 2 -} diff --git a/redis-android/src/main/jni/redis-4.0.11/tests/unit/memefficiency.tcl b/redis-android/src/main/jni/redis-4.0.11/tests/unit/memefficiency.tcl deleted file mode 100644 index f452f02..0000000 --- a/redis-android/src/main/jni/redis-4.0.11/tests/unit/memefficiency.tcl +++ /dev/null @@ -1,85 +0,0 @@ -proc test_memory_efficiency {range} { - r flushall - set rd [redis_deferring_client] - set base_mem [s used_memory] - set written 0 - for {set j 0} {$j < 10000} {incr j} { - set key key:$j - set val [string repeat A [expr {int(rand()*$range)}]] - $rd set $key $val - incr written [string length $key] - incr written [string length $val] - incr written 2 ;# A separator is the minimum to store key-value data. - } - for {set j 0} {$j < 10000} {incr j} { - $rd read ; # Discard replies - } - - set current_mem [s used_memory] - set used [expr {$current_mem-$base_mem}] - set efficiency [expr {double($written)/$used}] - return $efficiency -} - -start_server {tags {"memefficiency"}} { - foreach {size_range expected_min_efficiency} { - 32 0.15 - 64 0.25 - 128 0.35 - 1024 0.75 - 16384 0.82 - } { - test "Memory efficiency with values in range $size_range" { - set efficiency [test_memory_efficiency $size_range] - assert {$efficiency >= $expected_min_efficiency} - } - } -} - -if 0 { - start_server {tags {"defrag"}} { - if {[string match {*jemalloc*} [s mem_allocator]]} { - test "Active defrag" { - r config set activedefrag no - r config set active-defrag-threshold-lower 5 - r config set active-defrag-ignore-bytes 2mb - r config set maxmemory 100mb - r config set maxmemory-policy allkeys-lru - r debug populate 700000 asdf 150 - r debug populate 170000 asdf 300 - set frag [s mem_fragmentation_ratio] - assert {$frag >= 1.7} - r config set activedefrag yes - after 1500 ;# active defrag tests the status once a second. - set hits [s active_defrag_hits] - - # wait for the active defrag to stop working - set tries 0 - while { True } { - incr tries - after 500 - set prev_hits $hits - set hits [s active_defrag_hits] - if {$hits == $prev_hits} { - break - } - assert {$tries < 100} - } - - # TODO: we need to expose more accurate fragmentation info - # i.e. the allocator used and active pages - # instead we currently look at RSS so we need to ask for purge - r memory purge - - # Test the the fragmentation is lower and that the defragger - # stopped working - set frag [s mem_fragmentation_ratio] - assert {$frag < 1.55} - set misses [s active_defrag_misses] - after 500 - set misses2 [s active_defrag_misses] - assert {$misses2 == $misses} - } - } - } -} diff --git a/redis-android/src/main/jni/redis-4.0.11/.gitignore b/redis-android/src/main/jni/redis-5.0.0/.gitignore similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/.gitignore rename to redis-android/src/main/jni/redis-5.0.0/.gitignore diff --git a/redis-android/src/main/jni/redis-5.0.0/00-RELEASENOTES b/redis-android/src/main/jni/redis-5.0.0/00-RELEASENOTES new file mode 100644 index 0000000..9c8efe2 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/00-RELEASENOTES @@ -0,0 +1,2150 @@ +Redis 5.0 release notes +======================= + +-------------------------------------------------------------------------------- +Upgrade urgency levels: + +LOW: No need to upgrade unless there are new features you want to use. +MODERATE: Program an upgrade of the server, but it's not urgent. +HIGH: There is a critical bug that may affect a subset of users. Upgrade! +CRITICAL: There is a critical bug affecting MOST USERS. Upgrade ASAP. +SECURITY: There are security fixes in the release. +-------------------------------------------------------------------------------- + +================================================================================ +Redis 5.0.0 Released Wed Oct 17 13:28:26 CEST 2018 +================================================================================ + +Upgrade urgency CRITICAL: Several fixes to streams AOF and replication. + +Hi all and welcome to the first stable release of Redis 5! \o/ + +To start a quick recap of what's new in Redis 5: + +1. The new Stream data type. https://redis.io/topics/streams-intro +2. New Redis modules APIs: Timers, Cluster and Dictionary APIs. +3. RDB now store LFU and LRU information. +4. The cluster manager was ported from Ruby (redis-trib.rb) to C code + inside redis-cli. Check `redis-cli --cluster help` for more info. +5. New sorted set commands: ZPOPMIN/MAX and blocking variants. +6. Active defragmentation version 2. +7. Improvemenets in HyperLogLog implementations. +8. Better memory reporting capabilities. +9. Many commands with sub-commands now have an HELP subcommand. +10. Better performances when clients connect and disconnect often. +11. Many bug fixes and other random improvements. +12. Jemalloc was upgraded to version 5.1 +13. CLIENT UNBLOCK and CLIENT ID. +14. The LOLWUT command was added. http://antirez.com/news/123 +15. We no longer use the "slave" word if not for API backward compatibility. +16. Differnet optimizations in the networking layer. +17. Lua improvements: + - Better propagation of Lua scripts to slaves / AOF. + - Lua scripts can now timeout and get in -BUSY state in the slave as well. +18. Dynamic HZ to balance idle CPU usage with responsiveness. +19. The Redis core was refactored and improved in many ways. + +However the list above really does not do justice to the changes of Redis 5 +since the core was improved in many ways during the development of the new +version. However certain changes were back ported into Redis 4 once they were +sensed as safe, because many improvements were hard to distinguish from fixes. + +The most important user facing improvement is without doubts the introduction +of the new general purpose data type after years: the streams. + +Note that we worked to improve and fix streams till a few hours ago, so while +we are not aware of critical bugs in this release, surely there is to handle it +with some care for the first weeks. Bug reporting will be highly appreciated and +we are ready to work immediately to release 5.0.1 once there is enough important +stuff to justify a new release (probably soon). + +People not using the streams can have probably a better production-ready +experience with Redis 5, also because many internals are shared with Redis 4 +so the jump is not as big as it was between 3.2 and 4 in terms of how things +internally work. + +Well, many thanks to the Redis community and the developers that made +this release possible, contributing bug reports, patches, new features, working +on the clients, sometimes debugging problems for days. Also thank to everybody +that adopted Redis for their use cases making things work for users worldwide. + +The list of commits in this release follows. + +Cheers, +Salvatore + +antirez in commit bcc0916d: + Fix conditional in XGROUP. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 1b2f23f3: + Update help.h for redis-cli. + 1 file changed, 57 insertions(+), 7 deletions(-) + +antirez in commit de0ae56c: + Tests for XGROUP CREATE MKSTREAM. + 1 file changed, 11 insertions(+) + +antirez in commit 56c3dfa1: + Fix XGROUP CREATE MKSTREAM handling of . + 1 file changed, 7 insertions(+), 2 deletions(-) + +antirez in commit 2687f228: + Process MKSTREAM option of XGROUP CREATE at a later time. + 1 file changed, 28 insertions(+), 17 deletions(-) + +zhaozhao.zz in commit cfbaf8f1: + Scripting & Streams: some commands need right flags + 1 file changed, 5 insertions(+), 5 deletions(-) + +antirez in commit 4e4099b9: + XGROUP CREATE: MKSTREAM option for automatic stream creation. + 1 file changed, 29 insertions(+), 5 deletions(-) + +zhaozhao.zz in commit 6dd4d864: + Streams: Tests modified XSTREAM -> XSETID + 1 file changed, 2 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit 3aff0e8c: + Streams: rewrite empty streams with certain lastid + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 880b563e: + Tests modified to use XADD MAXLEN 0 + XSETID. + 1 file changed, 12 insertions(+), 26 deletions(-) + +antirez in commit 83c87835: + Streams: rewrite empty streams with XADD MAXLEN 0. Use XSETID. + 1 file changed, 18 insertions(+), 12 deletions(-) + +antirez in commit fd22e3ac: + XSETID: accept IDs based on last entry. + 1 file changed, 18 insertions(+), 5 deletions(-) + +antirez in commit dfab3cba: + Streams: XSTREAM SETID -> XSETID. + 3 files changed, 17 insertions(+), 67 deletions(-) + +zhaozhao.zz in commit a3fb28ed: + Streams: rewrite id in XSTREAM CREATE * + 1 file changed, 4 insertions(+) + +zhaozhao.zz in commit f4b4db13: + Streams: add tests for aof rewrite + 1 file changed, 23 insertions(+) + +zhaozhao.zz in commit d22f1ef0: + Stream & AOF: rewrite stream in correct way + 1 file changed, 32 insertions(+), 16 deletions(-) + +zhaozhao.zz in commit 6455274d: + Streams: add tests for XSTREAM command + 1 file changed, 39 insertions(+) + +zhaozhao.zz in commit 0edbe953: + Streams: add a new command XTREAM + 3 files changed, 67 insertions(+) + +Hamid Alaei in commit 9714bba2: + fix timer context selected database + 1 file changed, 3 insertions(+), 1 deletion(-) + +antirez in commit eb53f15a: + Make comment about nack->consumer test for minidle more obvious. + 1 file changed, 4 insertions(+), 2 deletions(-) + +antirez in commit a77f836e: + Streams: use propagate_last_id itself as streamPropagateGroupID trigger. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 0f0610eb: + Streams: better naming: lastid_updated -> propagate_last_id. + 1 file changed, 6 insertions(+), 6 deletions(-) + +zhaozhao.zz in commit a745e423: + Streams: panic if streamID invalid after check, should not be possible. + 1 file changed, 2 insertions(+), 1 deletion(-) + +zhaozhao.zz in commit 9974be13: + Streams: propagate lastid in XCLAIM when it has effect + 1 file changed, 13 insertions(+), 6 deletions(-) + +zhaozhao.zz in commit 69a628d0: + Streams: XCLAIM ignore minidle if NACK is created by FORCE + 1 file changed, 4 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit a04b43c7: + Streams: bugfix XCLAIM should propagate group name not consumer name + 1 file changed, 1 insertion(+), 1 deletion(-) + +Sergey Chupov in commit 8977a90c: + fixed typos in readme + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 3a745674: + redis.conf typo fixed: ingore -> ignore. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 22770d76: + Rax: radix tree updated to latest version from antirez/rax. + 2 files changed, 233 insertions(+), 68 deletions(-) + +antirez in commit fbac534f: + Test: avoid time related false positive in RESTORE test. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 49872337: + LOLWUT: capitalize Nees. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 80c471f5: + Test: cgroup propagation test also for NOACK variant. + 1 file changed, 39 insertions(+), 29 deletions(-) + +antirez in commit 8defa5da: + Test: consumer group last ID slave propagation test. + 1 file changed, 39 insertions(+) + +zhaozhao.zz in commit e1e3eaca: + Avoid recreate write handler for protected client. + 1 file changed, 4 insertions(+) + +antirez in commit b501fd5d: + Fix propagation of consumer groups last ID. + 3 files changed, 56 insertions(+), 9 deletions(-) + + +================================================================================ +Redis 5.0-rc6 Released Wed Oct 10 11:03:54 CEST 2018 +================================================================================ + +Upgrade urgency HIGH: Many bugs fixed especially in the context of streams. + +This is probably the last release candidate of Redis 5. The Redis 5 GA version +will be released 17th of October. The main highlights of this release are: + +* Critical AOF bug, as old as AOF itself: if an open MULTI/EXEC block is at + the end of the AOF file, Redis would still read the half-transaction when + reloading back the AOF. +* The slave name was removed from logs and documentation, now replica is used + instead. +* LOLWUT command added. +* New modules APIs: Disable Redis Cluster redirection. +* New modules APIs: Sorted dictionaries data type. +* Modules APIs fixes: timer / cluster messages callback now can call RM_Call(). +* Fix for #5024 - commandstats for multi-exec were logged as EXEC. +* A number of optimizations and fixes for the stream data type. +* Many other stability improvements. + +This is the list of comments and contributors: + +antirez in commit 9a6fa7d0: + changelog.tcl: get optional argument for number of commits. + 1 file changed, 8 insertions(+), 3 deletions(-) + +antirez in commit 101e419f: + Free protected clients asynchronously. + 1 file changed, 7 insertions(+) + +antirez in commit 726debb8: + Actually use the protectClient() API where needed. + 2 files changed, 8 insertions(+), 9 deletions(-) + +antirez in commit 0b87f78a: + Introduce protectClient() + some refactoring. + 2 files changed, 60 insertions(+), 18 deletions(-) + +zhaozhao.zz in commit 6aa8ac70: + debug: avoid free client unexpectedly when reload & loadaof + 1 file changed, 8 insertions(+), 2 deletions(-) + +antirez in commit 48040b02: + aof.c: improve indentation and change warning message. + 1 file changed, 11 insertions(+), 4 deletions(-) + +zhaozhao.zz in commit 7cc20569: + AOF: discard if we lost EXEC when loading aof + 2 files changed, 14 insertions(+), 3 deletions(-) + +antirez in commit 2007d30c: + Refactoring of XADD / XTRIM MAXLEN rewriting. + 1 file changed, 15 insertions(+), 22 deletions(-) + +zhaozhao.zz in commit 6a298110: + Streams: add test cases for XADD/XTRIM maxlen + 1 file changed, 46 insertions(+) + +zhaozhao.zz in commit 041161b7: + Streams: propagate specified MAXLEN instead of approximated + 1 file changed, 35 insertions(+), 6 deletions(-) + +zhaozhao.zz in commit f04d799b: + Streams: reset approx_maxlen in every maxlen loop + 1 file changed, 2 insertions(+) + +zhaozhao.zz in commit affd9365: + Streams: XTRIM will return an error if MAXLEN with a count < 0 + 1 file changed, 6 insertions(+), 1 deletion(-) + +zhaozhao.zz in commit 4c405ad0: + Streams: propagate original MAXLEN argument in XADD context + 1 file changed, 3 insertions(+), 12 deletions(-) + +antirez in commit 5c6d4b4a: + Fix typo in replicationCron() comment. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit a67a8dbf: + Fix typo in design comment of bio.c. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit c4ab5a05: + xclaimCommand(): fix comment typos. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit dc0b628a: + streamAppendItem(): Update the radix tree pointer only if changed. + 1 file changed, 2 insertions(+), 1 deletion(-) + +antirez in commit 4566fbc7: + Listpack: optionally force reallocation on inserts. + 1 file changed, 20 insertions(+) + +antirez in commit 5eca170c: + Fix printf type mismatch in genRedisInfoString(). + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 260b53a2: + streamIteratorRemoveEntry(): set back lp only if pointer changed. + 1 file changed, 2 insertions(+), 1 deletion(-) + +zhaozhao.zz in commit 5d12f9d9: + Streams: update listpack with new pointer in XDEL + 1 file changed, 3 insertions(+) + +zhaozhao.zz in commit 6b7ad838: + bugfix: replace lastcmd with cmd when rewrite BRPOPLPUSH as RPOPLPUSH + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit 3454a043: + script cache memory in INFO and MEMORY includes both script code and overheads + 2 files changed, 3 insertions(+), 3 deletions(-) + +Oran Agra in commit d6aeca86: + fix #5024 - commandstats for multi-exec were logged as EXEC. + 2 files changed, 63 insertions(+), 2 deletions(-) + +antirez in commit a996b2a2: + Fix XINFO comment for consistency. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Bruce Merry in commit 1a8447b6: + Fix invalid use of sdsZmallocSize on an embedded string + 1 file changed, 1 insertion(+), 1 deletion(-) + +Bruce Merry in commit 8dde46ad: + Fix incorrect memory usage accounting in zrealloc + 3 files changed, 24 insertions(+), 2 deletions(-) + +Hamid Alaei in commit b362a1b7: + fix dict get on not found + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 55e9df8a: + Try to avoid issues with GCC pragmas and older compilers. + 1 file changed, 7 insertions(+), 4 deletions(-) + +antirez in commit b0d22702: + Modules: hellodict example WIP #3: KEYRANGE. + 1 file changed, 40 insertions(+) + +antirez in commit af2f6682: + Modules: Modules: dictionary API WIP #13: Compare API exported. + 2 files changed, 6 insertions(+) + +antirez in commit f9a3e6ef: + Modules: Modules: dictionary API WIP #12: DictCompare API. + 1 file changed, 8 insertions(+) + +antirez in commit 01e0341a: + Modules: Modules: dictionary API WIP #11: DictCompareC API. + 1 file changed, 18 insertions(+) + +antirez in commit f9b3ce9a: + Modules: hellodict example WIP #1: GET command. + 1 file changed, 18 insertions(+) + +antirez in commit 36e66d86: + Modules: hellodict example WIP #1: SET command. + 1 file changed, 74 insertions(+) + +antirez in commit e33fdbe8: + Modules: remove useless defines in hellotimer.c + 2 files changed, 6 insertions(+), 4 deletions(-) + +antirez in commit 1c8b2248: + Modules: fix top comment of hellotimer.c + 1 file changed, 1 insertion(+), 1 deletion(-) + +Guy Korland in commit 7ded552d: + add missing argument to function doc + 1 file changed, 1 insertion(+), 1 deletion(-) + +Pavel Skuratovich in commit f92b3273: + Fix typo in comment + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 57b6c343: + Modules: dictionary API WIP #10: export API to modules. + 2 files changed, 60 insertions(+) + +antirez in commit 3f82e59c: + Modules: dictionary API WIP #9: iterator returning string object. + 1 file changed, 23 insertions(+), 6 deletions(-) + +antirez in commit 6a73aca3: + Modules: dictionary API WIP #8: Iterator next/prev. + 1 file changed, 42 insertions(+) + +antirez in commit ef8413db: + Modules: dictionary API WIP #7: don't store the context. + 1 file changed, 7 insertions(+), 8 deletions(-) + +antirez in commit 05579e38: + Modules: dictionary API WIP #6: implement automatic memory management. + 1 file changed, 21 insertions(+), 7 deletions(-) + +antirez in commit 11c53f8c: + Modules: dictionary API work in progress #5: rename API for consistency. + 1 file changed, 25 insertions(+), 25 deletions(-) + +antirez in commit 0bd7091b: + Modules: change RedisModuleString API to allow NULL context. + 1 file changed, 33 insertions(+), 12 deletions(-) + +antirez in commit 5fc16f17: + Modules: dictionary API work in progress #4: reseek API. + 1 file changed, 25 insertions(+), 6 deletions(-) + +antirez in commit 45b7f779: + Modules: dictionary API work in progress #3: Iterator creation. + 1 file changed, 41 insertions(+), 1 deletion(-) + +antirez in commit 8576b0ae: + Modules: dictionary API work in progress #2: Del API. + 1 file changed, 17 insertions(+), 2 deletions(-) + +antirez in commit 4b0fa7a7: + Modules: dictionary API work in progress #1. + 2 files changed, 95 insertions(+), 1 deletion(-) + +antirez in commit 28210760: + Module cluster flags: use RM_SetClusterFlags() in the example. + 2 files changed, 11 insertions(+) + +antirez in commit 18c5ab93: + Module cluster flags: add RM_SetClusterFlags() API. + 3 files changed, 33 insertions(+) + +antirez in commit 4ce6bff2: + Module cluster flags: add hooks for NO_FAILOVER flag. + 1 file changed, 4 insertions(+), 2 deletions(-) + +antirez in commit 2ba52889: + Module cluster flags: add hooks for NO_REDIRECTION flag. + 3 files changed, 14 insertions(+), 4 deletions(-) + +antirez in commit 6a39ece6: + Module cluster flags: initial vars / defines added. + 5 files changed, 20 insertions(+) + +antirez in commit 0ff35370: + Modules: rename the reused static client to something more general. + 1 file changed, 10 insertions(+), 8 deletions(-) + +antirez in commit 2d11ee95: + Modules: associate a fake client to timer context callback. + 1 file changed, 2 insertions(+) + +antirez in commit 851b2ed3: + Modules: associate a fake client to cluster message context callback. + 1 file changed, 2 insertions(+) + +artix in commit 148e4911: + Cluster Manager: clusterManagerFixOpenSlot now counts node's keys in slot if node is neither migrating nor importing. + 1 file changed, 20 insertions(+), 1 deletion(-) + +Guy Korland in commit 8afca145: + No need to return "OK" + 1 file changed, 1 insertion(+), 1 deletion(-) + +Guy Korland in commit 9a278db2: + typo fix + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 26479036: + Revert "fix repeat argument issue and reduce unnessary loop times for redis-cli." + 1 file changed, 7 insertions(+), 12 deletions(-) + +Guy Korland in commit 27b7fb5a: + Fix few typos + 1 file changed, 10 insertions(+), 10 deletions(-) + +Guy Korland in commit 233aa2d3: + RedisModule_HashSet call must end with NULL + 1 file changed, 3 insertions(+), 1 deletion(-) + +antirez in commit a8494072: + Sentinel: document how to undo a renamed command. + 1 file changed, 6 insertions(+), 1 deletion(-) + +antirez in commit 6c8a8f2e: + LOLWUT: split the command from version-specific implementations. + 3 files changed, 297 insertions(+), 241 deletions(-) + +antirez in commit 5c758406: + Slave removal: add a few forgotten aliases for CONFIG SET. + 1 file changed, 10 insertions(+) + +antirez in commit 2da823c4: + LOLWUT: add Redis version in the output. + 1 file changed, 3 insertions(+), 1 deletion(-) + +antirez in commit bfcba420: + LOLWUT: Ness -> Nees. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit efed898a: + LOLWUT: Limit maximum CPU effort. + 1 file changed, 5 insertions(+) + +antirez in commit eb0fbd71: + LOLWUT: change padding conditional to a more direct one. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Slobodan MiÅ¡ković in commit ed08feb7: + Fix spelling descrive -> describe + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 2ffb4413: + LOLWUT: fix crash when col < 2. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 55dae693: + LOLWUT: fix structure typo in comment. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 9b3098b9: + LOLWUT: Fix license copyright year. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 263dbadc: + LOLWUT: increase the translation factor. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit a622f6c0: + LOLWUT: change default size to fit a normal terminal better. + 1 file changed, 6 insertions(+), 6 deletions(-) + +antirez in commit 38b0d25a: + LOLWUT: wrap it into a proper command. + 4 files changed, 40 insertions(+), 15 deletions(-) + +antirez in commit 34ebd898: + LOLWUT: draw Schotter by Georg Nees. + 1 file changed, 47 insertions(+), 3 deletions(-) + +antirez in commit 46286e64: + LOLWUT: draw rotated squares using trivial trigonometry. + 1 file changed, 44 insertions(+) + +antirez in commit 2d4143fd: + LOLWUT: draw lines using Bresenham algorithm. + 1 file changed, 26 insertions(+), 2 deletions(-) + +antirez in commit 3546d9ce: + LOLWUT: Rendering of the virtual canvas to a string. + 1 file changed, 78 insertions(+), 7 deletions(-) + +antirez in commit b404a6ce: + LOLWUT: show the output verbatim in redis-cli. + 1 file changed, 1 insertion(+) + +antirez in commit e30ba94f: + LOLWUT: canvas structure and BSD license on top. + 1 file changed, 46 insertions(+) + +antirez in commit 9c771145: + LOLWUT: Emit Braille unicode according to pixel pattern. + 1 file changed, 23 insertions(+) + +Jakub Vrana in commit 4a1d6c7d: + Slave removal: capitalize Replica + 2 files changed, 5 insertions(+), 5 deletions(-) + +antirez in commit 72e0368a: + Slave removal: remove slave from integration tests descriptions. + 8 files changed, 36 insertions(+), 36 deletions(-) + +antirez in commit c7841c2b: + Slave removal: remove slave from top-level tests descriptions. + 3 files changed, 12 insertions(+), 12 deletions(-) + +antirez in commit 1b9b19ba: + Slave removal: remove slave from object.c. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 7da266e6: + Slave removal: remove slave from the README. + 1 file changed, 7 insertions(+), 7 deletions(-) + +antirez in commit 93d803c9: + Slave removal: server.c logs fixed. + 1 file changed, 5 insertions(+), 5 deletions(-) + +antirez in commit 89434032: + Slave removal: remove slave from sentinel.conf when possible. + 1 file changed, 18 insertions(+), 18 deletions(-) + +antirez in commit 7673d88d: + Slave removal: replace very few things in Sentinel. + 1 file changed, 12 insertions(+), 8 deletions(-) + +antirez in commit f1de29b3: + Slave removal: scripting.c logs and other stuff fixed. + 1 file changed, 6 insertions(+), 2 deletions(-) + +antirez in commit 53fe558e: + Slave removal: replication.c logs fixed. + 1 file changed, 35 insertions(+), 35 deletions(-) + +antirez in commit c92b02dd: + Slave removal: networking.c logs fixed. + 1 file changed, 5 insertions(+), 5 deletions(-) + +antirez in commit be76ed0c: + Slave removal: blocked.c logs fixed. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 3fd73151: + Slave removal: Make obvious in redis.conf what a replica is. + 1 file changed, 5 insertions(+) + +antirez in commit a22168e4: + Slave removal: slave mode -> replica mode text in redis-cli. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 0e222fbe: + Slave removal: fix typo of replicaof. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 34a5615e: + Slave removal: slave -> replica in redis.conf and output buffer option. + 3 files changed, 132 insertions(+), 129 deletions(-) + +antirez in commit 1d2fcf6f: + Slave removal: Convert cluster.c log messages and command names. + 1 file changed, 12 insertions(+), 11 deletions(-) + +antirez in commit 2546158d: + Slave removal: config.c converted + config rewriting hacks. + 1 file changed, 117 insertions(+), 38 deletions(-) + +antirez in commit c0952c0d: + Slave removal: redis-cli --slave -> --replica. + 1 file changed, 3 insertions(+), 1 deletion(-) + +antirez in commit 1f37f1dd: + Slave removal: SLAVEOF -> REPLICAOF. SLAVEOF is now an alias. + 3 files changed, 4 insertions(+), 3 deletions(-) + +Amin Mesbah in commit 7928f578: + Use geohash limit defines in constraint check + 1 file changed, 2 insertions(+), 2 deletions(-) + +Jeffrey Lovitz in commit bb2bed78: + CLI Help text loop verifies arg count + 1 file changed, 1 insertion(+), 1 deletion(-) + +youjiali1995 in commit 246980d0: + sentinel: fix randomized sentinelTimer. + 1 file changed, 1 insertion(+), 3 deletions(-) + +youjiali1995 in commit fa7de8c4: + bio: fix bioWaitStepOfType. + 1 file changed, 3 insertions(+), 3 deletions(-) + +Weiliang Li in commit 7642f9d5: + fix usage typo in redis-cli + 1 file changed, 1 insertion(+), 1 deletion(-) + +================================================================================ +Redis 5.0 RC5 Released Thu Sep 06 12:54:29 CEST 2018 +================================================================================ + +Upgrade urgency HIGH: Several imporant bugs fixed. + +Hi all, + +This is the release candidate number five, and has a lot of bug fixes inside, +together with a few big changes to the Redis behavior from the point of view +of replication of scripts and handling of the maxmemory directive in slaves. +Make sure to read the whole list! + +* BREAKING BEHAVIOR: Slaves now ignore maxmemory by default. +* BREAKING BEHAVIOR: Now scripts are always replicated for their effects, and + never sending the script itself to slaves/AOF. +* Improvement: Big pipelining performances improved significantly. +* Fix: Rewrite BRPOPLPUSH as RPOPLPUSH to propagate. +* Fix: False positives in tests. +* Fix: Certain command flags were modified because not correct. +* Fix: Fix blocking XREAD for streams that are empty. +* Improvement: Allow scripts to timeout on slaves as well. +* Fix: Different corner cases due to CLIENT PAUSE are now fixed. +* Improvement: Optimize parsing large bulk greater than 32k. +* Fix: Propagate read-only scripts as SCRIPT LOAD, not as EVAL. + +The following is the list of commits, so that you can read the details and +check the credits of the commits. + +antirez in commit 1d1bf7f0: + Document that effects replication is Redis 5 default. + 1 file changed, 8 insertions(+) + +antirez in commit cfd969c7: + Fix scripting tests now that we default to commands repl. + 1 file changed, 8 insertions(+), 1 deletion(-) + +antirez in commit 3e1fb5ff: + Use commands (effects) replication by default in scripts. + 3 files changed, 8 insertions(+), 1 deletion(-) + +antirez in commit c6c71abe: + Safer script stop condition on OOM. + 1 file changed, 5 insertions(+), 2 deletions(-) + +antirez in commit dfbce91a: + Propagate read-only scripts as SCRIPT LOAD. + 1 file changed, 16 insertions(+), 3 deletions(-) + +antirez in commit 1705e42e: + Don't perform eviction when re-entering the event loop. + 1 file changed, 7 insertions(+), 2 deletions(-) + +antirez in commit a0dd6f82: + Clarify why remaining may be zero in readQueryFromClient(). + 1 file changed, 2 insertions(+) + +zhaozhao.zz in commit 2eed31a5: + networking: fix unexpected negative or zero readlen + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 37fb606c: + Merge branch '5.0' of github.com:/antirez/redis into 5.0 +zhaozhao.zz in commit 1898e6ce: + networking: optimize parsing large bulk greater than 32k + 1 file changed, 13 insertions(+), 10 deletions(-) + +antirez in commit 82fc63d1: + Unblocked clients API refactoring. See #4418. + 4 files changed, 33 insertions(+), 15 deletions(-) + +zhaozhao.zz in commit 839bb52c: + if master is already unblocked, do not unblock it twice + 1 file changed, 1 insertion(+), 1 deletion(-) + +zhaozhao.zz in commit 2e1cd82d: + fix multiple unblock for clientsArePaused() + 1 file changed, 3 insertions(+), 3 deletions(-) + +antirez in commit 17233080: + Make pending buffer processing safe for CLIENT_MASTER client. + 3 files changed, 22 insertions(+), 13 deletions(-) + +antirez in commit 42bce87a: + Test: processing of master stream in slave -BUSY state. + 1 file changed, 44 insertions(+) + +antirez in commit 8bf42f60: + After slave Lua script leaves busy state, re-process the master buffer. + 2 files changed, 5 insertions(+), 2 deletions(-) + +antirez in commit c2b104c7: + While the slave is busy, just accumulate master input. + 2 files changed, 6 insertions(+), 1 deletion(-) + +antirez in commit 7b75f4ae: + Allow scripts to timeout even if from the master instance. + 1 file changed, 6 insertions(+), 11 deletions(-) + +antirez in commit adc4e031: + Allow scripts to timeout on slaves as well. + 2 files changed, 10 insertions(+), 3 deletions(-) + +dejun.xdj in commit 20ec1f0c: + Revise the comments of latency command. + 1 file changed, 2 insertions(+), 1 deletion(-) + +Chris Lamb in commit 8e5423eb: + Correct "did not received" -> "did not receive" typos/grammar. + 6 files changed, 10 insertions(+), 10 deletions(-) + +zhaozhao.zz in commit 395063d7: + remove duplicate bind in sentinel.conf + 1 file changed, 10 deletions(-) + +Salvatore Sanfilippo in commit b221ca41: + Merge pull request #5300 from SaschaRoland/xread-block-5299 +Sascha Roland in commit eea0d3c5: + #5299 Fix blocking XREAD for streams that ran dry + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 4cb9ee11: + Add maxmemory slave behavior change in the change log. + 1 file changed, 8 insertions(+) + +zhaozhao.zz in commit 5ad888ba: + Supplement to PR #4835, just take info/memory/command as random commands + 1 file changed, 3 insertions(+), 3 deletions(-) + +zhaozhao.zz in commit d928487f: + some commands' flags should be set correctly, issue #4834 + 1 file changed, 14 insertions(+), 14 deletions(-) + +Oran Agra in commit af675f0a: + Fix unstable tests on slow machines. + 3 files changed, 23 insertions(+), 17 deletions(-) + +antirez in commit f2cd16be: + Document slave-ignore-maxmemory in redis.conf. + 1 file changed, 20 insertions(+) + +antirez in commit 02d729b4: + Make slave-ignore-maxmemory configurable. + 1 file changed, 9 insertions(+) + +antirez in commit 447da44d: + Introduce repl_slave_ignore_maxmemory flag internally. + 3 files changed, 7 insertions(+) + +antirez in commit 868b2925: + Better variable meaning in processCommand(). + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 319f2ee6: + Re-apply rebased #2358. + 1 file changed, 1 insertion(+), 1 deletion(-) + +zhaozhao.zz in commit 22c166da: + block: format code + 1 file changed, 2 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit c03c5913: + block: rewrite BRPOPLPUSH as RPOPLPUSH to propagate + 3 files changed, 5 insertions(+), 1 deletion(-) + +zhaozhao.zz in commit fcd5ef16: + networking: make setProtocolError simple and clear + 1 file changed, 11 insertions(+), 13 deletions(-) + +zhaozhao.zz in commit 656e4b2f: + networking: just move qb_pos instead of sdsrange in processInlineBuffer + 1 file changed, 2 insertions(+), 3 deletions(-) + +zhaozhao.zz in commit 2c7972ce: + networking: just return C_OK if multibulk processing saw a <= 0 length. + 1 file changed, 2 insertions(+), 5 deletions(-) + +zhaozhao.zz in commit 1203a04f: + adjust qbuf to 26 in test case for client list + 1 file changed, 1 insertion(+), 1 deletion(-) + +zhaozhao.zz in commit aff86fa1: + pipeline: do not sdsrange querybuf unless all commands processed + 2 files changed, 48 insertions(+), 40 deletions(-) + +Chris Lamb in commit 45a6c5be: + Use SOURCE_DATE_EPOCH over unreproducible uname + date calls. + 1 file changed, 3 insertions(+) + +Chris Lamb in commit 186df148: + Make some defaults explicit in the sentinel.conf for package maintainers + 1 file changed, 25 insertions(+) + +dejun.xdj in commit b59f04a0: + Streams: ID of xclaim command starts from the sixth argument. + 1 file changed, 1 insertion(+), 1 deletion(-) + +shenlongxing in commit a3f2437b: + Fix stream command paras + 2 files changed, 7 insertions(+), 7 deletions(-) + +antirez in commit df911235: + Fix AOF comment to report the current behavior. + 1 file changed, 3 insertions(+), 1 deletion(-) + + + +================================================================================ +Redis 5.0 RC4 Released Fri Aug 03 13:51:02 CEST 2018 +================================================================================ + +Upgrade urgency + + HIGH: Many non critical but important issues fixed. + CRITICAL for Stream users: Many important bugs fixed. + +Hi all, welcome to Redis 5.0 RC4. + +This release is a huge step forward in Redis 5 maturity and fixes a number +of issues. It also provides interesting improvements. Here I'll summarize +the biggest one, but laster you can find the full list of commits: + +Fixes: + +* A number of fixes related to Streams: stability and correctnes. +* Fix dbRandomKey() potential infinite loop. +* Improve eviction LFU/LRU when keys are created by INCR commands family. +* Active defragmentation is now working on Redis 5. +* Fix corner case in Redis CLuster / Sentinel failover, by resetting the + disconnection time with master in a more appropriate place. +* Use a private version of localtime() to avoid potential deadlocks. +* Different redis-cli non critical fixes. +* Fix rare replication stream corruption with disk-based replication. + +Improvements: + +* Sentinel: add an option to deny online script reconfiguration. +* Improved RESTORE command. +* Sentinel command renaming: allows to use Sentinel with Redis instances + that have non standard command names. +* CLIENT ID and CLIENT UNBLOCK. +* CLIENT LIST now supports a TYPE option. +* redis-cli --cluster now supports authentication. +* redis-trib is now deprecated (use redis-cli --cluster). +* Better slaves output buffers efficiency. +* Faster INFO when there are many clients connected. +* Dynamic HZ feature. +* Improvements in what the MEMORY command is able to report. +* Add year in log. (WARNING: may be incompatible with log scraping tools) +* Lazy freeing now works even when values are overwritten (for instance SET). +* Faster ZADD when elements scores are updated. +* Improvements to the test suite, including many new options. + +antirez in commit a4d1201e: + Test suite: add --loop option. + 1 file changed, 12 insertions(+), 5 deletions(-) + +antirez in commit 273d8191: + Test suite: new --stop option. + 1 file changed, 13 insertions(+), 4 deletions(-) + +antirez in commit fbbcc6a6: + Streams IDs parsing refactoring. + 1 file changed, 32 insertions(+), 17 deletions(-) + +antirez in commit 70c4bcb7: + Test: new sorted set skiplist order consistency. + 1 file changed, 26 insertions(+) + +antirez in commit 63addc5c: + Fix zslUpdateScore() edge case. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 724740cc: + More commenting of zslUpdateScore(). + 1 file changed, 2 insertions(+) + +antirez in commit ddc87eef: + Explain what's the point of zslUpdateScore() in top comment. + 1 file changed, 5 insertions(+) + +antirez in commit 741f29ea: + Remove old commented zslUpdateScore() from source. + 1 file changed, 13 deletions(-) + +antirez in commit 20116836: + Optimize zslUpdateScore() as asked in #5179. + 1 file changed, 44 insertions(+) + +antirez in commit 8c297e8b: + zsetAdd() refactored adding zslUpdateScore(). + 1 file changed, 18 insertions(+), 7 deletions(-) + +dejun.xdj in commit bd2f3f6b: + Streams: rearrange the usage of '-' and '+' IDs in stream commands. + 1 file changed, 13 insertions(+), 13 deletions(-) + +dejun.xdj in commit c0c06b84: + Streams: add mmid_supp argument in streamParseIDOrReply(). + 1 file changed, 6 insertions(+), 2 deletions(-) + +antirez in commit ab237a8e: + Minor improvements to PR #5187. + 2 files changed, 13 insertions(+), 6 deletions(-) + +Oran Agra in commit 1ce3cf7a: + test suite conveniency improvements + 3 files changed, 79 insertions(+), 3 deletions(-) + +Oran Agra in commit 36622899: + add DEBUG LOG, to to assist test suite debugging + 1 file changed, 4 insertions(+) + +antirez in commit 83d4311a: + Cluster cron announce IP minor refactoring. + 1 file changed, 6 insertions(+), 3 deletions(-) + +shenlongxing in commit a633f8e1: + Fix cluster-announce-ip memory leak + 1 file changed, 3 insertions(+), 2 deletions(-) + +antirez in commit 24c45538: + Tranfer -> transfer typo fixed. + 1 file changed, 1 insertion(+), 1 deletion(-) + +zhaozhao.zz in commit c609f240: + refactor dbOverwrite to make lazyfree work + 4 files changed, 27 insertions(+), 12 deletions(-) + +antirez in commit 9e971739: + Refactoring: replace low-level checks with writeCommandsDeniedByDiskError(). + 2 files changed, 6 insertions(+), 13 deletions(-) + +antirez in commit 0e77cef0: + Fix writeCommandsDeniedByDiskError() inverted return value. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit acfe9d13: + Better top comment for writeCommandsDeniedByDiskError(). + 1 file changed, 8 insertions(+), 1 deletion(-) + +antirez in commit 4e933e00: + Introduce writeCommandsDeniedByDiskError(). + 2 files changed, 24 insertions(+) + +WuYunlong in commit 41607dfd: + Consider aof write error as well as rdb in lua script. + 1 file changed, 14 insertions(+), 4 deletions(-) + +Salvatore Sanfilippo in commit 1d073a64: + Merge pull request #5168 from rpv-tomsk/issue-5033 +Guy Korland in commit 2db31fd4: + Few typo fixes + 1 file changed, 13 insertions(+), 13 deletions(-) + +antirez in commit 64242757: + Add year in log. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 50be4a1f: + Document dynamic-hz in the example redis.conf. + 1 file changed, 16 insertions(+) + +antirez in commit 9a76472d: + Make dynamic hz actually configurable. + 1 file changed, 9 insertions(+) + +antirez in commit a330d06c: + Control dynamic HZ via server configuration. + 2 files changed, 13 insertions(+), 6 deletions(-) + +antirez in commit d42602ff: + Dynamic HZ: adapt cron frequency to number of clients. + 2 files changed, 17 insertions(+), 5 deletions(-) + +antirez in commit 7b5f0223: + Dynamic HZ: separate hz from the configured hz. + 3 files changed, 15 insertions(+), 9 deletions(-) + +antirez in commit 037b00de: + Remove useless conditional from emptyDb(). + 1 file changed, 1 deletion(-) + +antirez in commit 0e97ae79: + Make emptyDb() change introduced in #4852 simpler to read. + 1 file changed, 8 insertions(+), 3 deletions(-) + +zhaozhao.zz in commit f7740faf: + optimize flushdb, avoid useless loops + 1 file changed, 5 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit 0c008376: + Streams: fix xdel memory leak + 1 file changed, 1 insertion(+) + +antirez in commit dc600a25: + Example the magic +1 in migrateCommand(). + 1 file changed, 4 insertions(+) + +antirez in commit d6827ab6: + Make changes of PR #5154 hopefully simpler. + 1 file changed, 10 insertions(+), 5 deletions(-) + +WuYunlong in commit 89ec1453: + Do not migrate already expired keys. + 1 file changed, 6 insertions(+), 2 deletions(-) + +Pavel Rochnyack in commit cd25ed17: + INFO CPU: higher precision of reported values + 1 file changed, 8 insertions(+), 8 deletions(-) + +antirez in commit 6bfb4745: + Streams: refactoring of next entry seek in the iterator. + 1 file changed, 11 insertions(+), 7 deletions(-) + +zhaozhao.zz in commit 4724548e: + Streams: skip master fileds only when we are going forward in streamIteratorGetID + 1 file changed, 8 insertions(+), 5 deletions(-) + +Oran Agra in commit 4b79fdf1: + fix slave buffer test suite false positives + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit a1e081f7: + string2ll(): better commenting. + 1 file changed, 6 insertions(+) + +dsomeshwar in commit 8b4fe752: + removing redundant check + 1 file changed, 3 deletions(-) + +antirez in commit 9e5bf047: + Restore string2ll() to original version. + 1 file changed, 7 insertions(+), 2 deletions(-) + +Oran Agra in commit c2ecdcde: + fix recursion typo in zmalloc_usable + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 4f742bd6: + string2ll(): remove duplicated check for special case. + 1 file changed, 1 insertion(+), 6 deletions(-) + +antirez in commit a4efac00: + string2ll(): test for NULL pointer in all the cases. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 2c07c107: + Change 42 to 1000 as warning level for cached scripts. + 1 file changed, 3 insertions(+), 3 deletions(-) + +Itamar Haber in commit 270903d6: + Adds Lua overheads to MEMORY STATS, smartens the MEMORY DOCTOR + 3 files changed, 30 insertions(+), 4 deletions(-) + +Itamar Haber in commit faf3dbfc: + Adds memory information about the script's cache to INFO + 3 files changed, 12 insertions(+) + +antirez in commit 49841a54: + Fix merge errors. + 2 files changed, 7 deletions(-) + +antirez in commit 77a7ec72: + Merge branch 'unstable' into 5.0 branch +antirez in commit 4ff47a0b: + Top comment clientsCron(). + 1 file changed, 19 insertions(+), 4 deletions(-) + +antirez in commit aba68552: + Clarify that clientsCronTrackExpansiveClients() indexes may jump ahead. + 1 file changed, 9 insertions(+), 1 deletion(-) + +antirez in commit be88c0b1: + Rename INFO CLIENT max buffers field names for correctness. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 0cf3794e: + Fix wrong array index variable in getExpansiveClientsInfo(). + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit ea3a20c5: + Change INFO CLIENTS sections to report pre-computed max/min client buffers. + 1 file changed, 5 insertions(+), 5 deletions(-) + +antirez in commit 8f7e496b: + Rename var in clientsCronTrackExpansiveClients() for clarity. + 1 file changed, 3 insertions(+), 3 deletions(-) + +antirez in commit 8d617596: + Implement a function to retrieve the expansive clients mem usage. + 1 file changed, 12 insertions(+) + +antirez in commit 85a1b4f8: + clientsCronTrackExpansiveClients() actual implementation. + 1 file changed, 14 insertions(+), 1 deletion(-) + +antirez in commit d4c5fc57: + clientsCronTrackExpansiveClients() skeleton and ideas. + 1 file changed, 23 insertions(+) + +antirez in commit 1c95c075: + Make vars used only by INFO CLIENTS local to the block. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Salvatore Sanfilippo in commit 16b8d364: + Merge pull request #4727 from kingpeterpaule/redis-fix-info-cli +antirez in commit 0aca977c: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit 313b2240: + In addReplyErrorLength() only panic when replying to slave. + 1 file changed, 4 insertions(+), 3 deletions(-) + +antirez in commit 6183f059: + Refine comment in addReplyErrorLength() about replying to masters/slaves. + 1 file changed, 11 insertions(+) + +Salvatore Sanfilippo in commit 22e9321c: + Merge pull request #5138 from oranagra/improve_defrag_test +Oran Agra in commit f89c93c8: + make active defrag test more stable + 2 files changed, 6 insertions(+), 5 deletions(-) + +Salvatore Sanfilippo in commit 8213f64d: + Merge pull request #5122 from trevor211/allowWritesWhenAofDisabled +Salvatore Sanfilippo in commit 46fd9278: + Merge pull request #4237 from aspirewit/update-comment +antirez in commit 6201f7b4: + Streams: better error when $ is given with XREADGROUP. + 1 file changed, 5 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit 4bff45c7: + Merge pull request #5136 from 0xtonyxia/fix-xread-id-parse +antirez in commit afc7e08a: + Panic when we are sending an error to our master/slave. + 1 file changed, 5 insertions(+) + +Salvatore Sanfilippo in commit e03358c0: + Merge pull request #5135 from oranagra/rare_repl_corruption +dejun.xdj in commit 846cf12a: + Streams: remove meaningless if condition. + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit 6501b6bb: + Streams: return an error message if using xreadgroup with '$' ID. + 1 file changed, 5 insertions(+) + +Oran Agra in commit d5559898: + fix rare replication stream corruption with disk-based replication + 3 files changed, 18 insertions(+), 9 deletions(-) + +antirez in commit cefe21d2: + dict.c: remove a few trailing spaces. + 1 file changed, 2 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit 4fc20992: + Merge pull request #5128 from kingpeterpaule/remove-one-loop-in-freeMemoryIfNeeded +Salvatore Sanfilippo in commit 9fbd49bb: + Merge pull request #5113 from 0xtonyxia/using-compare-func-instead +Salvatore Sanfilippo in commit cab39676: + Merge pull request #5127 from oranagra/sds_req_type +antirez in commit f9c84d6d: + Hopefully improve commenting of #5126. + 2 files changed, 22 insertions(+), 10 deletions(-) + +Salvatore Sanfilippo in commit e22a1218: + Merge pull request #5126 from oranagra/slave_buf_memory_2 +Salvatore Sanfilippo in commit 28dd8dd1: + Merge pull request #5132 from soloestoy/propagate-xdel-correctly +Oran Agra in commit bf680b6f: + slave buffers were wasteful and incorrectly counted causing eviction + 10 files changed, 182 insertions(+), 50 deletions(-) + +zhaozhao.zz in commit 73306c6f: + Streams: correctly propagate xdel if needed + 1 file changed, 7 insertions(+), 3 deletions(-) + +antirez in commit 103c5a1a: + Add a few comments to streamIteratorRemoveEntry(). + 1 file changed, 4 insertions(+) + +Salvatore Sanfilippo in commit a317f55d: + Merge pull request #5131 from soloestoy/optimize-xdel +antirez in commit 185e0d9c: + Modify XINFO field from last-id to last-generated-id. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Salvatore Sanfilippo in commit 4215e74b: + Merge pull request #5129 from soloestoy/xinfo-show-last-id +zhaozhao.zz in commit c9324f81: + Streams: free lp if all elements are deleted + 1 file changed, 9 insertions(+), 4 deletions(-) + +paule in commit b6ce7d5d: + Update dict.c + 1 file changed, 4 insertions(+), 2 deletions(-) + +zhaozhao.zz in commit b4ba5ac8: + Streams: show last id for streams and groups + 1 file changed, 6 insertions(+), 2 deletions(-) + +peterpaule in commit 816fc6cb: + remove one ineffective loop in dictGetSomeKeys. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit b05a22e2: + bugfix in sdsReqType creating 64bit sds headers on 32bit systems + 1 file changed, 3 insertions(+), 1 deletion(-) + +dejun.xdj in commit 491682a6: + Streams: using streamCompareID() instead of direct compare in block.c. + 1 file changed, 1 insertion(+), 4 deletions(-) + +dejun.xdj in commit a2177cd2: + Streams: add streamCompareID() declaration in stream.h. + 1 file changed, 1 insertion(+) + +dejun.xdj in commit 0484dbcf: + Streams: using streamCompareID() instead of direct compare. + 1 file changed, 2 insertions(+), 6 deletions(-) + +WuYunlong in commit 2d4366c5: + Accept write commands if persisting is disabled, event if we do have problems persisting on disk previously. + 1 file changed, 2 insertions(+), 1 deletion(-) + +Salvatore Sanfilippo in commit ab33bcd3: + Merge pull request #5120 from andrewsensus/leap-year-comment-patch-1 +antirez in commit 2352a519: + Test: XDEL fuzz testing. Remove and check stage. + 1 file changed, 15 insertions(+) + +antirez in commit 3d7d20b7: + Test: fix lshuffle by providing the "K" combinator. + 1 file changed, 2 insertions(+) + +antirez in commit 967ad364: + Test: add lshuffle in the Tcl utility functions set. + 1 file changed, 14 insertions(+) + +antirez in commit d6efd5fc: + Test: XDEL fuzz testing, stream creation. + 1 file changed, 20 insertions(+) + +andrewsensus in commit 8dc08ae2: + update leap year comment + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 69997153: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit a93f8f09: + Test: XDEL basic test. + 1 file changed, 12 insertions(+) + +Salvatore Sanfilippo in commit a44a5934: + Merge pull request #5119 from trevor211/fixSlowlogConfig +WuYunlong in commit d6ba4fd5: + Fix config set slowlog-log-slower-than and condition in createLatencyReport + 2 files changed, 2 insertions(+), 2 deletions(-) + +WuYunlong in commit b3660be8: + Add test in slowlog.tcl + 1 file changed, 10 insertions(+) + +artix in commit d4182a0a: + Cluster Manager: more checks on --cluster-weight option. + 1 file changed, 12 insertions(+), 2 deletions(-) + +artix in commit d222eda9: + Redis-trib deprecated: it no longer works and it outputs a warning to the user. + 1 file changed, 103 insertions(+), 1804 deletions(-) + +artix in commit 513eb572: + Cluster Manager: auth support (-a argument). + 1 file changed, 41 insertions(+), 19 deletions(-) + +Salvatore Sanfilippo in commit f3980bb9: + Merge pull request #5115 from shenlongxing/patch-1 +Shen Longxing in commit c2a85fb3: + Delete unused role checking. + 1 file changed, 2 insertions(+), 6 deletions(-) + +Salvatore Sanfilippo in commit 4cb5bd4e: + Merge pull request #4820 from charpty/wip-serverc-simplify +antirez in commit 8d6b7861: + Add regression test for #5111. + 1 file changed, 15 insertions(+) + +antirez in commit b6260a02: + Streams: when re-delivering because of SETID, reset deliveries counter. + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit a7c180e5: + Simplify duplicated NACK #5112 fix. + 1 file changed, 18 insertions(+), 21 deletions(-) + +Salvatore Sanfilippo in commit bf4def0f: + Merge pull request #5112 from 0xtonyxia/fix-xreadgroup-crash-after-setid +Salvatore Sanfilippo in commit 16770551: + Merge pull request #5114 from oranagra/defrag_32 +Oran Agra in commit 920158ec: + Active defrag fixes for 32bit builds (again) + 1 file changed, 2 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit f45e7901: + Merge pull request #4967 from JingchengLi/unstable +tengfeng in commit 9505dd20: + fix repeat argument issue and reduce unnessary loop times for redis-cli. + 1 file changed, 12 insertions(+), 7 deletions(-) + +antirez in commit 0420c327: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit 28e95c7c: + Streams: fix typo "consumer". + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit a8c1bb31: + Streams: fix new XREADGROUP sync logic. + 1 file changed, 13 insertions(+), 3 deletions(-) + +antirez in commit 1a02b5f6: + Streams: make blocking for > a truly special case. + 1 file changed, 29 insertions(+), 4 deletions(-) + +antirez in commit a71e8148: + Streams: send an error to consumers blocked on non-existing group. + 1 file changed, 5 insertions(+), 1 deletion(-) + +antirez in commit 09327f11: + Streams: fix unblocking logic into a consumer group. + 1 file changed, 24 insertions(+), 14 deletions(-) + +dejun.xdj in commit 3f8a3efe: + Streams: fix xreadgroup crash after xgroup SETID is sent. + 1 file changed, 20 insertions(+), 15 deletions(-) + +Salvatore Sanfilippo in commit 7239e9ca: + Merge pull request #5095 from 0xtonyxia/fix-indentation +dejun.xdj in commit 61f12973: + Bugfix: PEL is incorrect when consumer is blocked using xreadgroup with NOACK option. + 4 files changed, 6 insertions(+), 1 deletion(-) + +antirez in commit b67f0276: + redis-cli: fix #4990 additional argument in help. + 1 file changed, 1 insertion(+) + +antirez in commit 18d65849: + redis-cli: fix #5096 double error message. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 84620327: + redis-cli: cliConnect() flags CC_FORCE and CC_QUIET. + 1 file changed, 23 insertions(+), 13 deletions(-) + +Amit Dey in commit a3a5a25f: + fixing broken link in CONTRIBUTING + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit 289d8d9c: + CLIENT UNBLOCK: fix client unblock help message. + 1 file changed, 1 insertion(+), 1 deletion(-) + +minkikim89 in commit 62a4a8c1: + fix whitespace in redis-cli.c + 1 file changed, 362 insertions(+), 362 deletions(-) + +WuYunlong in commit 0a5805d7: + fix compile warning in addReplySubcommandSyntaxError + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit cb78c842: + Use nolocks_localtime() for safer logging. + 2 files changed, 8 insertions(+), 2 deletions(-) + +antirez in commit 81778d91: + Cache timezone and daylight active flag for safer logging. + 2 files changed, 14 insertions(+), 1 deletion(-) + +antirez in commit 18d8205b: + Localtime: clarify is_leap_year() working with comments. + 1 file changed, 4 insertions(+), 4 deletions(-) + +antirez in commit 29644144: + Localtime: fix comment about leap year. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 0ea39aa4: + Localtime: fix daylight saving adjustment. Use * not +. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 6614d305: + Localtime: fix daylight time documentation and computation. + 1 file changed, 14 insertions(+), 4 deletions(-) + +dejun.xdj in commit 46f5a2ca: + Fix indentation. + 2 files changed, 14 insertions(+), 14 deletions(-) + +antirez in commit 243c5a7a: + Localtime: add a test main() function to check the output. + 1 file changed, 15 insertions(+), 1 deletion(-) + +antirez in commit c25ee35a: + Localtime: day of month is 1 based. Convert from 0 based "days". + 1 file changed, 2 insertions(+), 2 deletions(-) + +antirez in commit b9f33830: + Localtime: fix timezone adjustment. + 1 file changed, 2 insertions(+), 1 deletion(-) + +antirez in commit 0c12cbed: + Localtime: compute year, month and day of the month. + 1 file changed, 26 insertions(+) + +antirez in commit 06ca400f: + Localtime: basics initial calculations. Year missing. + 1 file changed, 30 insertions(+), 2 deletions(-) + +antirez in commit 6a529067: + Localtime function skeleton and file added. + 1 file changed, 44 insertions(+) + +Jack Drogon in commit 93238575: + Fix typo + 40 files changed, 99 insertions(+), 99 deletions(-) + +antirez in commit 94b3ee61: + Clarify the pending_querybuf field of clients. + 1 file changed, 4 insertions(+), 3 deletions(-) + +antirez in commit 549b8b99: + Improve style of PR #5084. + 1 file changed, 8 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit 526b30a7: + Merge pull request #5084 from chendq8/pending-querybuf +antirez in commit 677d10b2: + Set repl_down_since to zero on state change. + 1 file changed, 2 insertions(+), 1 deletion(-) + +Salvatore Sanfilippo in commit 02e38516: + Merge pull request #5081 from trevor211/fixClusterFailover +chendianqiang in commit cbb2ac07: + Merge branch 'unstable' into pending-querybuf +antirez in commit 2edcafb3: + addReplySubSyntaxError() renamed to addReplySubcommandSyntaxError(). + 12 files changed, 14 insertions(+), 14 deletions(-) + +Salvatore Sanfilippo in commit bc6a0045: + Merge pull request #4998 from itamarhaber/module_command_help +Salvatore Sanfilippo in commit ee09b5ed: + Merge pull request #5071 from akshaynagpal/patch-2 +Salvatore Sanfilippo in commit f03ad962: + Merge pull request #5068 from shenlongxing/fix-rename-command +Salvatore Sanfilippo in commit e4881cd0: + Merge pull request #5090 from trevor211/test_helper_tcl +WuYunlong in commit 2833cfbe: + fix tests/test_helper.tcl with --wait-server option. Issue #5063 added --wait-server option, but can not work. + 1 file changed, 1 deletion(-) + +chendianqiang in commit 7de1ada0: + limit the size of pending-querybuf in masterclient + 4 files changed, 48 insertions(+) + +WuYunlong in commit 2e167f7d: + fix server.repl_down_since resetting, so that slaves could failover automatically as expected. + 1 file changed, 1 insertion(+), 1 deletion(-) + +WuYunlong in commit aeb7bc3e: + cluster.tcl: Add master consecutively down test. + 1 file changed, 77 insertions(+) + +antirez in commit d751d98b: + Change CLIENT LIST TYPE help string. + 1 file changed, 2 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit a0b05a04: + Merge pull request #5075 from soloestoy/client-list-types +Salvatore Sanfilippo in commit aa2c390e: + Merge pull request #5074 from soloestoy/fix-compile-warning +Salvatore Sanfilippo in commit a4ef94d2: + Merge pull request #5076 from 0xtonyxia/add-no-auth-warning-option +dejun.xdj in commit 9f185626: + Check if password is used on command line interface. + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit 1139070a: + Fix trailing white space. + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit bbd0ca95: + Fix code format issue. + 1 file changed, 4 insertions(+), 4 deletions(-) + +dejun.xdj in commit 7becf54e: + Don't output password warning message when --no-auth-warning is used. + 1 file changed, 10 insertions(+), 1 deletion(-) + +dejun.xdj in commit bde05e9c: + Avoid -Woverlength-strings compile warning. + 1 file changed, 5 insertions(+), 3 deletions(-) + +antirez in commit 5baf50d8: + Rax library updated (node callback). + 2 files changed, 19 insertions(+), 5 deletions(-) + +dejun.xdj in commit 0b74fd67: + Add --no-auth-warning help message. + 1 file changed, 2 insertions(+) + +zhaozhao.zz in commit b9cbd04b: + clients: add type option for client list + 4 files changed, 20 insertions(+), 6 deletions(-) + +zhaozhao.zz in commit f5538642: + clients: show pubsub flag in client list + 1 file changed, 1 insertion(+) + +zhaozhao.zz in commit 1fcf2737: + fix some compile warnings + 2 files changed, 2 insertions(+), 2 deletions(-) + +Akshay Nagpal in commit 007e3cbd: + Added link to Google Group + 1 file changed, 3 insertions(+), 1 deletion(-) + +antirez in commit ab55f9da: + Make CLIENT HELP output nicer to the eyes. + 1 file changed, 11 insertions(+), 11 deletions(-) + +antirez in commit 75f1a7bd: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit 4a70ff74: + Add unblock in CLIENT HELP. + 1 file changed, 1 insertion(+) + +shenlongxing in commit 3c27db1c: + fix empty string for sentinel rename-command + 1 file changed, 5 insertions(+) + +Salvatore Sanfilippo in commit f7b21bc7: + Merge pull request #5066 from oranagra/defrag_jemalloc5_fix +Salvatore Sanfilippo in commit 730a4cfa: + Merge pull request #5067 from mpaltun/mpaltun-doc-fix +antirez in commit 2214043b: + CLIENT UNBLOCK: support unblocking by error. + 1 file changed, 22 insertions(+), 3 deletions(-) + +Mustafa Paltun in commit 010dc172: + Update t_stream.c + 1 file changed, 2 insertions(+), 2 deletions(-) + +Mustafa Paltun in commit 6d0acb33: + Update help.h + 1 file changed, 1 insertion(+), 1 deletion(-) + +Oran Agra in commit de495ee7: + minor fix in creating a stream NACK for rdb and defrag tests + 2 files changed, 2 insertions(+), 2 deletions(-) + +antirez in commit 71295ee3: + CLIENT UNBLOCK implemented. + 1 file changed, 22 insertions(+) + +antirez in commit fb39bfd7: + Take clients in a ID -> Client handle dictionary. + 3 files changed, 6 insertions(+) + +antirez in commit ed65d734: + CLIENT ID implemented. + 1 file changed, 4 insertions(+) + +Salvatore Sanfilippo in commit 345b4809: + Merge pull request #5063 from oranagra/test_suite_improvements +Salvatore Sanfilippo in commit 35c5f3fa: + Merge pull request #5065 from oranagra/defrag_jemalloc5 +Oran Agra in commit 5616d4c6: + add active defrag support for streams + 6 files changed, 230 insertions(+), 25 deletions(-) + +Oran Agra in commit e8099cab: + add defrag hint support into jemalloc 5 + 3 files changed, 43 insertions(+) + +Oran Agra in commit 751eea24: + test suite infra improvements and fix + 2 files changed, 19 insertions(+) + +Salvatore Sanfilippo in commit bb666d44: + Merge pull request #5027 from tigertv/unstable +antirez in commit b9058c73: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit 43831779: + Sentinel: test command renaming feature. + 1 file changed, 10 insertions(+) + +Salvatore Sanfilippo in commit eb052ba9: + Merge pull request #5059 from guybe7/fix_restore_warning +antirez in commit 27178a3f: + Fix type of argslen in sendSynchronousCommand(). + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 1f1e724f: + Remove black space. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Salvatore Sanfilippo in commit aa5eaad4: + Merge pull request #5037 from madolson/repl-auth-fix +antirez in commit 3cf8dd2c: + Sentinel: fix SENTINEL SET error reporting. + 1 file changed, 18 insertions(+), 9 deletions(-) + +Madelyn Olson in commit 45731edc: + Addressed comments + 1 file changed, 1 insertion(+), 1 deletion(-) + +Madelyn Olson in commit e8d68b6b: + Fixed replication authentication with whitespace in password + 1 file changed, 12 insertions(+), 5 deletions(-) + +antirez in commit fc0c9c80: + Sentinel: drop the renamed-command entry in a more natural way. + 1 file changed, 4 insertions(+), 7 deletions(-) + +antirez in commit 8ba670f5: + Sentinel command renaming: document it into sentinel.conf. + 1 file changed, 19 insertions(+) + +antirez in commit a8a76bda: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit 2358de68: + Sentinel command renaming: use case sensitive hashing for the dict. + 1 file changed, 2 insertions(+), 1 deletion(-) + +antirez in commit a9c50088: + Sentinel command renaming: fix CONFIG SET event logging. + 1 file changed, 17 insertions(+), 1 deletion(-) + +antirez in commit b72cecd7: + Sentinel command renaming: fix CONFIG SET after refactoring. + 1 file changed, 5 insertions(+), 5 deletions(-) + +antirez in commit 91a384a5: + Sentinel command renaming: implement SENTINEL SET. + 1 file changed, 19 insertions(+) + +antirez in commit 903582dd: + Sentinel: make SENTINEL SET able to handle different arities. + 1 file changed, 19 insertions(+), 12 deletions(-) + +antirez in commit c303e768: + Sentinel command renaming: config rewriting. + 1 file changed, 12 insertions(+) + +antirez in commit 60df7dbe: + Sentinel command renaming: rename-command option parsing. + 1 file changed, 11 insertions(+) + +antirez in commit 72e8a33b: + Sentinel command renaming: base machanism implemented. + 1 file changed, 64 insertions(+), 15 deletions(-) + +Guy Benoish in commit dfcc20f4: + Fix compiler warning in restoreCommand + 1 file changed, 1 insertion(+), 1 deletion(-) + +Salvatore Sanfilippo in commit cf7fcdbe: + Merge pull request #4634 from soloestoy/special-auth +Salvatore Sanfilippo in commit 70b7fa2c: + Merge pull request #5049 from youjiali1995/fix-load-rdb +Salvatore Sanfilippo in commit 54d66d39: + Merge pull request #5053 from michael-grunder/zpopminmax-keypos +Salvatore Sanfilippo in commit 199e704a: + Merge pull request #5050 from shenlongxing/fix-typo +michael-grunder in commit db6b99f9: + Update ZPOPMIN/ZPOPMAX command declaration + 1 file changed, 2 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit a16aa03a: + Merge pull request #5051 from oranagra/streams_mem_estimate +Oran Agra in commit 20e10dc7: + fix streams memory estimation, missing raxSeek + 1 file changed, 2 insertions(+), 1 deletion(-) + +shenlongxing in commit ec55df11: + fix typo + 4 files changed, 4 insertions(+), 4 deletions(-) + +youjiali1995 in commit df6644fe: + Fix rdbLoadIntegerObject() to create shared objects when needed. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Salvatore Sanfilippo in commit 1527bcad: + Merge pull request #5036 from bepahol/unstable +Salvatore Sanfilippo in commit c1e82405: + Merge pull request #5039 from oranagra/rdb_dbsize_hint +Salvatore Sanfilippo in commit 79f55eed: + Merge pull request #5040 from oranagra/memrev64ifbe_fix +Salvatore Sanfilippo in commit c6f4118c: + Merge pull request #5045 from guybe7/restore_fix +Guy Benoish in commit b5197f1f: + Enhance RESTORE with RDBv9 new features + 5 files changed, 100 insertions(+), 22 deletions(-) + +Salvatore Sanfilippo in commit c6fdebf5: + Merge pull request #5042 from oranagra/malloc_usable_size_libc +Oran Agra in commit 482785ac: + add malloc_usable_size for libc malloc + 2 files changed, 8 insertions(+), 3 deletions(-) + +Salvatore Sanfilippo in commit 4da29630: + Merge pull request #5023 from FX-HAO/unstable +antirez in commit e7219025: + Test RDB stream encoding saving/loading. + 1 file changed, 17 insertions(+) + +Salvatore Sanfilippo in commit 5f5e1199: + Merge pull request #5041 from oranagra/redis-rdb-check_rdbLoadMillisecondTime +antirez in commit 4848fbec: + Modules: convert hash to hash table for big objects. + 1 file changed, 3 insertions(+) + +Oran Agra in commit f31b0405: + fix redis-rdb-check to provide proper arguments to rdbLoadMillisecondTime + 2 files changed, 3 insertions(+), 2 deletions(-) + +antirez in commit 333c98c4: + AOF: remove no longer used variable "now". + 1 file changed, 1 deletion(-) + +antirez in commit e94b2053: + Modify clusterRedirectClient() to handle ZPOP and XREAD. + 1 file changed, 5 insertions(+), 1 deletion(-) + +Oran Agra in commit 26229aa6: + use safe macro (non empty) in memrev64ifbe to eliminate empty if warning + 1 file changed, 3 insertions(+), 3 deletions(-) + +Oran Agra in commit 5cd3c952: + 64 bit RDB_OPCODE_RESIZEDB in rdb saving + 1 file changed, 3 insertions(+), 7 deletions(-) + +antirez in commit ba92b517: + Remove AOF optimization to skip expired keys. + 1 file changed, 3 deletions(-) + +Benjamin Holst in commit 36524060: + Update README.md + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 44571088: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit 6967d0bd: + Revert fix #4976 just leaving the flush() part. + 1 file changed, 1 insertion(+), 5 deletions(-) + +antirez in commit 0ed0dc3c: + Fix incrDecrCommand() to create shared objects when needed. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit bd92389c: + Refactor createObjectFromLongLong() to be suitable for value objects. + 2 files changed, 33 insertions(+), 2 deletions(-) + +Salvatore Sanfilippo in commit 3518bb66: + Merge pull request #5020 from shenlongxing/fix-config +antirez in commit 20766608: + Streams: fix xreadGetKeys() for correctness. + 1 file changed, 19 insertions(+), 5 deletions(-) + +Salvatore Sanfilippo in commit e670ccff: + Merge pull request #4857 from youjiali1995/fix-command-getkeys +antirez in commit a0b27dae: + Streams: fix xreadGetKeys() buffer overflow. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit 62f9ac6f: + Streams: Change XADD MAXLEN handling of values <= 0. + 1 file changed, 3 insertions(+), 3 deletions(-) + +Max Vetrov in commit d4c4f20a: + Update sort.c + 1 file changed, 1 insertion(+), 3 deletions(-) + +antirez in commit 79a1c19a: + XADD MAXLEN should return an error for values < 0. + 1 file changed, 5 insertions(+) + +Salvatore Sanfilippo in commit 2e0ab4a8: + Merge pull request #4976 from trevor211/fixDebugLoadaof +Salvatore Sanfilippo in commit 94658303: + Merge pull request #4758 from soloestoy/rdb-save-incremental-fsync +antirez in commit 6a66b93b: + Sentinel: add an option to deny online script reconfiguration. + 2 files changed, 41 insertions(+) + +antirez in commit d353023a: + Merge branch 'unstable' of github.com:/antirez/redis into unstable +antirez in commit d6e8fe77: + Fix infinite loop in dbRandomKey(). + 1 file changed, 13 insertions(+) + +Salvatore Sanfilippo in commit 40d5df65: + Merge pull request #5008 from zwkno1/unstable +Salvatore Sanfilippo in commit 8bc3ffcb: + Merge pull request #5021 from soloestoy/fix-exists +Salvatore Sanfilippo in commit 6c7847a1: + Merge pull request #5018 from soloestoy/optimize-reply +antirez in commit 1e92fde3: + Fix SCAN bug regression test, avoiding empty SREM call. + 1 file changed, 1 insertion(+), 1 deletion(-) + +Fuxin Hao in commit a4f658b2: + Fix update_zmalloc_stat_alloc in zrealloc + 1 file changed, 1 insertion(+), 1 deletion(-) + +================================================================================ +Redis 5.0 RC3 Released Wed Jun 14 9:51:44 CEST 2018 +================================================================================ + +Upgrade urgency LOW: + +This release fixes compilation of Redis RC2. For an error the commit from unstable +updating the Rax library was not merged into the 5.0 branch. + +================================================================================ +Redis 5.0 RC2 Released Wed Jun 13 12:49:13 CEST 2018 +================================================================================ + +Upgrade urgency CRITICAL: This release fixes important security issues. + HIGH: This release fixes a SCAN commands family bug. + MODERATE: This release fixes a PSYNC2 edge case with expires. + MODERATE: Sentinel related fixes. + LOW: All the other issues + +NOTE: This release breaks API compatibility with certain APIs that were +introduced in Redis 5.0 RC1. Notably ZPOP* now returns score/element in reverse +order. XINFO special form was removed, now XINFO STREAM must be +used to obtain general information about the stream. + +Redis 5.0 RC2 ixes a number of important issues: + +* Important security issues related to the Lua scripting engine. + Please check https://github.com/antirez/redis/issues/5017 + for more information. + +* A bug with SCAN, SSCAN, HSCAN and ZSCAN, that may not return all the elements. + We also add a regression test that can trigger the issue often when present, and + may in theory be able to find unrelated regressions. + +* A PSYNC2 bug is fixed: Redis should not expire keys when saving RDB files + because otherwise it is no longer possible to use such RDB file as a base + for partial resynchronization. It no longer represents the right state. + +* Compatibility of AOF with RDB preamble when the RDB checksum is disabled. + +* Sentinel bug that in some cases prevented Sentinel to detect that the master + was down immediately. A delay was added to the detection. + +* XREADGROUP would crash when the master had attacked slaves. + +* Replication and events generation of several streams command were fixed. + +* XREVRANGE fixed, in some cases it could not return elements, or crash the + server, or in general not behave correctly. + +* ZPOP can now unblock multiple clients in a sane way. + +* Other minor issues. + +Moreover this release adds new features: + +* XGROUP DESTROY and XGROUP SETID. + +* RDB loading speedup. + +* Configurable stream macro node limits (number of elements / bytes). + +* More smaller improvements. + +The following is the list of commits composing the release, please check +the commit messages and authors for credits. + +antirez in commit 9fdcc159: + Security: fix redis-cli buffer overflow. + 1 file changed, 16 insertions(+), 11 deletions(-) + +antirez in commit cf760071: + Security: fix Lua struct package offset handling. + 1 file changed, 6 insertions(+), 2 deletions(-) + +antirez in commit a57595ca: + Security: more cmsgpack fixes by @soloestoy. + 1 file changed, 7 insertions(+) + +antirez in commit 8783fb94: + Security: update Lua struct package for security. + 1 file changed, 23 insertions(+), 23 deletions(-) + +antirez in commit 8cb9344b: + Security: fix Lua cmsgpack library stack overflow. + 1 file changed, 3 insertions(+) + +赵磊 in commit 59080f60: + Fix dictScan(): It can't scan all buckets when dict is shrinking. + 1 file changed, 14 insertions(+), 11 deletions(-) + +dejun.xdj in commit ac2a824a: + Fix redis-cli memory leak when sending set preference command. + 1 file changed, 2 insertions(+) + +dejun.xdj in commit c7197ff5: + Check if the repeat value is positive in while loop of cliSendCommand(). + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit 3f77777f: + Change the type of repeat argument to long for function cliSendCommand. + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit 7a565d72: + Fix negtive repeat command value issue. + 1 file changed, 11 insertions(+), 3 deletions(-) + +dejun.xdj in commit 64bf60fb: + Detect and stop saving history for auth command with repeat option. + 1 file changed, 17 insertions(+), 10 deletions(-) + +dejun.xdj in commit 5bed12aa: + Change the warning message a little bit to avoid trademark issuses. + 1 file changed, 1 insertion(+), 1 deletion(-) + +dejun.xdj in commit d71c4961: + Stop saving auth command in redis-cli history. + 1 file changed, 4 insertions(+), 2 deletions(-) + +dejun.xdj in commit fca99e41: + Add warning message when using password on command line + 1 file changed, 1 insertion(+) + +antirez in commit 01407a3a: + Don't expire keys while loading RDB from AOF preamble. + 3 files changed, 5 insertions(+), 5 deletions(-) + +WuYunlong in commit fb5408cf: + Fix rdb save by allowing dumping of expire keys, so that when we add a new slave, and do a failover, eighter by manual or not, other local slaves will delete the expired keys properly. + 2 files changed, 3 insertions(+), 7 deletions(-) + +antirez in commit 0b8b6df4: + Backport hiredis issue 525 fix to compile on FreeBSD. + 1 file changed, 1 insertion(+), 1 deletion(-) + +antirez in commit e98627c5: + Add INIT INFO to the provided init script. + 1 file changed, 8 insertions(+) + +antirez in commit 17f5de89: + Fix ae.c when a timer finalizerProc adds an event. + 2 files changed, 10 insertions(+), 6 deletions(-) + +antirez in commit 266e6423: + Sentinel: fix delay in detecting ODOWN. + 1 file changed, 9 insertions(+), 5 deletions(-) + +zhaozhao.zz in commit eafaf172: + AOF & RDB: be compatible with rdbchecksum no + 1 file changed, 9 insertions(+), 7 deletions(-) + +huijing.whj in commit 4630da37: + fix int overflow problem in freeMemoryIfNeeded + 1 file changed, 1 insertion(+), 1 deletion(-) + +================================================================================ +Redis 5.0 RC1 Released Tue May 29 14:14:11 CEST 2018 +================================================================================ + +Upgrade urgency LOW: This is the first RC of Redis 5. + +Introduction to the Redis 5 release +=================================== + +Redis 5 is a release focused on a few important features. While Redis 4 +was very very focused on operations, Redis 5 changes are mostly user-facing, +with the implementation of new data types and operations on top of existing +types. The following are the major features of this release: + +1. The new Stream data type. https://redis.io/topics/streams-intro +2. New Redis modules APIs: Timers, Cluster and Dictionary APIs. +3. RDB now store LFU and LRU information. +4. The cluster manager was ported from Ruby (redis-trib.rb) to C code + inside redis-cli. Check `redis-cli --cluster help` for more info. +5. New sorted set commands: ZPOPMIN/MAX and blocking variants. +6. Active defragmentation version 2. +7. Improvemenets in HyperLogLog implementations. +8. Better memory reporting capabilities. +9. Many commands with sub-commands now have an HELP subcommand. +10. Better performances when clients connect and disconnect often. +11. Many bug fixes and other random improvements. +12. Jemalloc was upgraded to version 5.1 +13. CLIENT UNBLOCK and CLIENT ID. +14. The LOLWUT command was added. http://antirez.com/news/123 +15. We no longer use the "slave" word if not for API backward compatibility. +16. Differnet optimizations in the networking layer. +17. Lua improvements: + - Better propagation of Lua scripts to slaves / AOF. + - Lua scripts can now timeout and get in -BUSY state in the slave as well. +18. Dynamic HZ to balance idle CPU usage with responsiveness. +19. The Redis core was refactored and improved in many ways. + +Thanks to all the users and developers who made this release possible. +We'll follow up with more RC releases, until the code looks production ready +and we don't get reports of serious issues for a while. + +A special thank you for the amount of work put into this release +(in decreasing number of commits) by: + +Fabio Nicotra, +Soloestoy +Itamar Haber +Oran Agra +Dvir Volk +dejun.xdj +Guy Benoish +Charsyam +Otmar Ertl +Jan-Erik Rediger +Spinlock + +Migrating from 4.0 to 5.0 +========================= + +Redis 4.0 is mostly a strict subset of 5.0, you should not have any problem +upgrading your application from 4.0 to 5.0. However this is a list of small +non-backward compatible changes introduced in the 5.0 release: + +* redis-cli now implements the cluster management tool. We still ship the + old redis-trib, but new fixes will be implemented only in redis-cli. + See `redis-cli --cluster help` for more info. + +* The RDB format changed. Redis 5.0 is still able to read 4.0 (and all the + past versions) files, but not the other way around. + +* Certain log formats and sentences are different in Redis 5.0. + +* Now by default maxmemory is ignored by slaves, and used only once a slave + is promoted to master. It means that in setups where you want slaves to + enforce maxmemory in an independent way from the master (that will anyway + stream the key eviction DEL commands), you should active this feature manually + and make sure you understand that it breaks consistency if writes are not + always idempotent. TLDR: the new behavior is much better for 99.999% of use + cases, revert it if you really know what you are doing. + +* Scripts are only replicated by their *effects* and not by sending EVAL/EVALSHA + to slaves or the AOF log itself. This is much better in the general case + and in the future we want to totally remove the other possiblity of propagating + scripts the old way (as EVAL). However you can still turn this back to the + default via the non-documented (if not here) Redis configuration directive + "lua-replicate-commands yes" or "DEBUG lua-always-replicate-commands 0". + However note that Redis 6 may completely remove such feature. + +-------------------------------------------------------------------------------- + +Credits: For each release, a list of changes with the relative author is +provided. Where not specified the implementation and design is done by +Salvatore Sanfilippo. Thanks to Redis Labs for making all this possible. +Also many thanks to all the other contributors and the amazing community +we have. + +Commit messages may contain additional credits. + +Enjoy, +Salvatore diff --git a/redis-android/src/main/jni/redis-4.0.11/Android.mk b/redis-android/src/main/jni/redis-5.0.0/Android.mk similarity index 86% rename from redis-android/src/main/jni/redis-4.0.11/Android.mk rename to redis-android/src/main/jni/redis-5.0.0/Android.mk index 017c9e4..a708e09 100644 --- a/redis-android/src/main/jni/redis-4.0.11/Android.mk +++ b/redis-android/src/main/jni/redis-5.0.0/Android.mk @@ -19,7 +19,7 @@ RELEASE_HDR := $(shell sh -c '$(SRC)/mkreleasehdr.sh') LOCAL_MODULE := redis REDIS_ANDROID := redis-android -REDIS_DIR := redis-4.0.11 +REDIS_DIR := redis-5.0.0 LOCAL_LDLIBS := -llog LOCAL_CFLAGS := -O3 -D__ANDROID__ -D__REDIS_ANDROID__ @@ -37,6 +37,9 @@ LOCAL_CFLAGS += -funwind-tables LOCAL_C_INCLUDES += $(LOCAL_PATH)/../ LOCAL_SRC_FILES += $(LOCAL_PATH)/../redis-android.c +HAVE_EVPORT := 0 +HAVE_EPOLL := 0 +HAVE_KQUEUE := 1 # Includes bthread library LOCAL_STATIC_LIBRARIES += bthread @@ -50,11 +53,14 @@ LOCAL_STATIC_LIBRARIES += lua # Includes linenoise library LOCAL_STATIC_LIBRARIES += linenoise +# Includes jemalloc library +# LOCAL_STATIC_LIBRARIES += jemalloc -ifeq ($(REDIS_DIR),redis-4.0.11) + +ifeq ($(REDIS_DIR),redis-5.0.0) LOCAL_SRC_FILES += \ - $(SRC)/adlist.c $(SRC)/ae.c \ + $(SRC)/adlist.c $(SRC)/ae.c \ $(SRC)/anet.c $(SRC)/aof.c $(SRC)/bio.c \ $(SRC)/bitops.c $(SRC)/blocked.c $(SRC)/childinfo.c \ $(SRC)/cluster.c \ @@ -63,16 +69,18 @@ ifeq ($(REDIS_DIR),redis-4.0.11) $(SRC)/dict.c $(SRC)/endianconv.c $(SRC)/evict.c \ $(SRC)/expire.c $(SRC)/geo.c $(SRC)/geohash.c $(SRC)/geohash_helper.c \ $(SRC)/hyperloglog.c $(SRC)/intset.c $(SRC)/latency.c \ - $(SRC)/lazyfree.c $(SRC)/lzf_c.c $(SRC)/lzf_d.c $(SRC)/memtest.c \ + $(SRC)/lazyfree.c $(SRC)/listpack.c $(SRC)/localtime.c $(SRC)/lolwut.c \ + $(SRC)/lolwut5.c \ + $(SRC)/lzf_c.c $(SRC)/lzf_d.c $(SRC)/memtest.c \ $(SRC)/module.c $(SRC)/multi.c $(SRC)/networking.c $(SRC)/notify.c \ $(SRC)/object.c $(SRC)/pqsort.c $(SRC)/pubsub.c \ $(SRC)/quicklist.c $(SRC)/rand.c $(SRC)/rax.c $(SRC)/rdb.c \ $(SRC)/release.c $(SRC)/replication.c $(SRC)/rio.c \ - $(SRC)/scripting.c $(SRC)/sentinel.c \ + $(SRC)/scripting.c $(SRC)/sds.c $(SRC)/sentinel.c \ $(SRC)/server.c $(SRC)/setproctitle.c $(SRC)/sha1.c \ $(SRC)/siphash.c $(SRC)/slowlog.c $(SRC)/sort.c $(SRC)/sparkline.c \ $(SRC)/syncio.c $(SRC)/t_hash.c $(SRC)/t_list.c \ - $(SRC)/t_set.c $(SRC)/t_string.c $(SRC)/t_zset.c \ + $(SRC)/t_set.c $(SRC)/t_stream.c $(SRC)/t_string.c $(SRC)/t_zset.c \ $(SRC)/util.c $(SRC)/wait3.c $(SRC)/ziplist.c \ $(SRC)/zipmap.c $(SRC)/zmalloc.c $(SRC)/redis-check-aof.c \ $(SRC)/redis-check-rdb.c @@ -88,8 +96,12 @@ include $(CLEAR_VARS) LOCAL_PATH := $(ORG_LOCAL_PATH) LOCAL_MODULE := redis-check-rdb -REDIS_ANDROID_DIR := redis-4.0.11 -REDIS_DIR := redis-4.0.11 +HAVE_EVPORT := 0 +HAVE_EPOLL := 0 +HAVE_KQUEUE := 0 + +REDIS_ANDROID_DIR := redis-5.0.0 +REDIS_DIR := redis-5.0.0 # Includes lua library LOCAL_STATIC_LIBRARIES += lua @@ -104,7 +116,7 @@ LOCAL_STATIC_LIBRARIES += hiredis LOCAL_STATIC_LIBRARIES += linenoise LOCAL_LDLIBS += -llog -LOCAL_CFLAGS += -O3 -D__ANDROID__ +LOCAL_CFLAGS := -O3 -D__ANDROID__ -D__REDIS_ANDROID__ # Redis default settings STD :=-std=c99 -pedantic -DREDIS_STATIC='' @@ -116,7 +128,7 @@ LOCAL_CFLAGS += -funwind-tables LOCAL_C_INCLUDES += $(LOCAL_PATH)/../ -ifeq ($(REDIS_DIR),redis-4.0.11) +ifeq ($(REDIS_DIR),redis-5.0.0) LOCAL_SRC_FILES += \ $(SRC)/adlist.c $(SRC)/ae.c \ @@ -152,8 +164,8 @@ include $(BUILD_EXECUTABLE) include $(CLEAR_VARS) LOCAL_MODULE := redis-check-aof -REDIS_ANDROID_DIR := redis-4.0.11 -REDIS_DIR := redis-4.0.11 +REDIS_ANDROID_DIR := redis-5.0.0 +REDIS_DIR := redis-5.0.0 # Includes lua library LOCAL_STATIC_LIBRARIES += lua @@ -180,7 +192,7 @@ LOCAL_CFLAGS += -funwind-tables LOCAL_C_INCLUDES += $(LOCAL_PATH)/../ -ifeq ($(REDIS_DIR),redis-4.0.11) +ifeq ($(REDIS_DIR),redis-5.0.0) LOCAL_SRC_FILES += \ @@ -203,6 +215,8 @@ ifeq ($(REDIS_DIR),redis-4.0.11) $(SRC)/siphash.c $(SRC)/slowlog.c $(SRC)/sort.c $(SRC)/sparkline.c \ $(SRC)/syncio.c $(SRC)/t_hash.c $(SRC)/t_list.c \ $(SRC)/t_set.c $(SRC)/t_string.c $(SRC)/t_zset.c \ + $(SRC)/lolwut.c $(SRC)/lolwut5.c $(SRC)/localtime.c \ + $(SRC)/listpack.c $(SRC)/t_stream.c \ $(SRC)/util.c $(SRC)/wait3.c $(SRC)/ziplist.c \ $(SRC)/zipmap.c $(SRC)/zmalloc.c $(SRC)/redis-check-aof.c \ $(SRC)/redis-check-rdb.c @@ -217,8 +231,8 @@ include $(BUILD_EXECUTABLE) include $(CLEAR_VARS) LOCAL_MODULE := redis-cli -REDIS_ANDROID_DIR := redis-4.0.11 -REDIS_DIR := redis-4.0.11 +REDIS_ANDROID_DIR := redis-5.0.0 +REDIS_DIR := redis-5.0.0 # Includes linenoise library @@ -227,6 +241,9 @@ LOCAL_STATIC_LIBRARIES += linenoise # Includes hiredis library LOCAL_STATIC_LIBRARIES += hiredis +# Includes lua library +LOCAL_STATIC_LIBRARIES += lua + LOCAL_LDLIBS := -llog LOCAL_CFLAGS := -O3 -D__ANDROID__ @@ -234,7 +251,7 @@ LOCAL_SRC_FILES += \ $(SRC)/anet.c $(SRC)/adlist.c \ $(SRC)/redis-cli.c $(SRC)/zmalloc.c \ $(SRC)/release.c $(SRC)/crc64.c \ - $(SRC)/ae.c + $(SRC)/ae.c $(SRC)/crc16.c $(SRC)/dict.c $(SRC)/siphash.c #### PROJECT_PATH := $(abspath $(SOURCE_PATH)/../../) #### TARGET_OUT := $(PROJECT_PATH)/res/raw/ diff --git a/redis-android/src/main/jni/redis-4.0.11/BUGS b/redis-android/src/main/jni/redis-5.0.0/BUGS similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/BUGS rename to redis-android/src/main/jni/redis-5.0.0/BUGS diff --git a/redis-android/src/main/jni/redis-4.0.11/CONTRIBUTING b/redis-android/src/main/jni/redis-5.0.0/CONTRIBUTING similarity index 87% rename from redis-android/src/main/jni/redis-4.0.11/CONTRIBUTING rename to redis-android/src/main/jni/redis-5.0.0/CONTRIBUTING index f57de3f..7dee24c 100644 --- a/redis-android/src/main/jni/redis-4.0.11/CONTRIBUTING +++ b/redis-android/src/main/jni/redis-5.0.0/CONTRIBUTING @@ -8,7 +8,9 @@ each source file that you contribute. # IMPORTANT: HOW TO USE REDIS GITHUB ISSUES * Github issues SHOULD ONLY BE USED to report bugs, and for DETAILED feature - requests. Everything else belongs to the Redis Google Group. + requests. Everything else belongs to the Redis Google Group: + + https://groups.google.com/forum/m/#!forum/Redis-db PLEASE DO NOT POST GENERAL QUESTIONS that are not about bugs or suspected bugs in the Github issues system. We'll be very happy to help you and provide @@ -30,7 +32,7 @@ each source file that you contribute. a. Fork Redis on github ( http://help.github.com/fork-a-repo/ ) b. Create a topic branch (git checkout -b my_branch) c. Push to your branch (git push origin my_branch) - d. Initiate a pull request on github ( http://help.github.com/send-pull-requests/ ) + d. Initiate a pull request on github ( https://help.github.com/articles/creating-a-pull-request/ ) e. Done :) For minor fixes just open a pull request on Github. diff --git a/redis-android/src/main/jni/redis-4.0.11/COPYING b/redis-android/src/main/jni/redis-5.0.0/COPYING similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/COPYING rename to redis-android/src/main/jni/redis-5.0.0/COPYING diff --git a/redis-android/src/main/jni/redis-4.0.11/INSTALL b/redis-android/src/main/jni/redis-5.0.0/INSTALL similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/INSTALL rename to redis-android/src/main/jni/redis-5.0.0/INSTALL diff --git a/redis-android/src/main/jni/redis-4.0.11/MANIFESTO b/redis-android/src/main/jni/redis-5.0.0/MANIFESTO similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/MANIFESTO rename to redis-android/src/main/jni/redis-5.0.0/MANIFESTO diff --git a/redis-android/src/main/jni/redis-4.0.11/Makefile b/redis-android/src/main/jni/redis-5.0.0/Makefile similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/Makefile rename to redis-android/src/main/jni/redis-5.0.0/Makefile diff --git a/redis-android/src/main/jni/redis-4.0.11/README.md b/redis-android/src/main/jni/redis-5.0.0/README.md similarity index 97% rename from redis-android/src/main/jni/redis-4.0.11/README.md rename to redis-android/src/main/jni/redis-5.0.0/README.md index 42ab478..4b1a983 100644 --- a/redis-android/src/main/jni/redis-4.0.11/README.md +++ b/redis-android/src/main/jni/redis-5.0.0/README.md @@ -119,7 +119,7 @@ parameter (the path of the configuration file): It is possible to alter the Redis configuration by passing parameters directly as options using the command line. Examples: - % ./redis-server --port 9999 --slaveof 127.0.0.1 6379 + % ./redis-server --port 9999 --replicaof 127.0.0.1 6379 % ./redis-server /etc/redis/6379.conf --loglevel debug All the options in redis.conf are also supported as options using the command @@ -216,7 +216,7 @@ Inside the root are the following important directories: * `src`: contains the Redis implementation, written in C. * `tests`: contains the unit tests, implemented in Tcl. -* `deps`: contains libraries Redis uses. Everything needed to compile Redis is inside this directory; your system just needs to provide `libc`, a POSIX compatible interface and a C compiler. Notably `deps` contains a copy of `jemalloc`, which is the default allocator of Redis under Linux. Note that under `deps` there are also things which started with the Redis project, but for which the main repository is not `anitrez/redis`. An exception to this rule is `deps/geohash-int` which is the low level geocoding library used by Redis: it originated from a different project, but at this point it diverged so much that it is developed as a separated entity directly inside the Redis repository. +* `deps`: contains libraries Redis uses. Everything needed to compile Redis is inside this directory; your system just needs to provide `libc`, a POSIX compatible interface and a C compiler. Notably `deps` contains a copy of `jemalloc`, which is the default allocator of Redis under Linux. Note that under `deps` there are also things which started with the Redis project, but for which the main repository is not `antirez/redis`. An exception to this rule is `deps/geohash-int` which is the low level geocoding library used by Redis: it originated from a different project, but at this point it diverged so much that it is developed as a separated entity directly inside the Redis repository. There are a few more directories but they are not very important for our goals here. We'll focus mostly on `src`, where the Redis implementation is contained, @@ -227,7 +227,7 @@ of complexity incrementally. Note: lately Redis was refactored quite a bit. Function names and file names have been changed, so you may find that this documentation reflects the `unstable` branch more closely. For instance in Redis 3.0 the `server.c` -and `server.h` files were named to `redis.c` and `redis.h`. However the overall +and `server.h` files were named `redis.c` and `redis.h`. However the overall structure is the same. Keep in mind that all the new developments and pull requests should be performed against the `unstable` branch. @@ -245,7 +245,7 @@ A few important fields in this structure are: * `server.db` is an array of Redis databases, where data is stored. * `server.commands` is the command table. * `server.clients` is a linked list of clients connected to the server. -* `server.master` is a special client, the master, if the instance is a slave. +* `server.master` is a special client, the master, if the instance is a replica. There are tons of other fields. Most fields are commented directly inside the structure definition. @@ -323,7 +323,7 @@ Inside server.c you can find code that handles other vital things of the Redis s networking.c --- -This file defines all the I/O functions with clients, masters and slaves +This file defines all the I/O functions with clients, masters and replicas (which in Redis are just special clients): * `createClient()` allocates and initializes a new client. @@ -390,16 +390,16 @@ replication.c This is one of the most complex files inside Redis, it is recommended to approach it only after getting a bit familiar with the rest of the code base. -In this file there is the implementation of both the master and slave role +In this file there is the implementation of both the master and replica role of Redis. -One of the most important functions inside this file is `replicationFeedSlaves()` that writes commands to the clients representing slave instances connected -to our master, so that the slaves can get the writes performed by the clients: +One of the most important functions inside this file is `replicationFeedSlaves()` that writes commands to the clients representing replica instances connected +to our master, so that the replicas can get the writes performed by the clients: this way their data set will remain synchronized with the one in the master. This file also implements both the `SYNC` and `PSYNC` commands that are used in order to perform the first synchronization between masters and -slaves, or to continue the replication after a disconnection. +replicas, or to continue the replication after a disconnection. Other C files --- @@ -435,7 +435,7 @@ top comment inside `server.c`. After the command operates in some way, it returns a reply to the client, usually using `addReply()` or a similar function defined inside `networking.c`. -There are tons of commands implementations inside th Redis source code +There are tons of commands implementations inside the Redis source code that can serve as examples of actual commands implementations. To write a few toy commands can be a good exercise to familiarize with the code base. diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/Android.mk b/redis-android/src/main/jni/redis-5.0.0/deps/Android.mk similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/Android.mk rename to redis-android/src/main/jni/redis-5.0.0/deps/Android.mk diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/Makefile b/redis-android/src/main/jni/redis-5.0.0/deps/Makefile similarity index 92% rename from redis-android/src/main/jni/redis-4.0.11/deps/Makefile rename to redis-android/src/main/jni/redis-5.0.0/deps/Makefile index e148a33..eb35c1e 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/Makefile +++ b/redis-android/src/main/jni/redis-5.0.0/deps/Makefile @@ -77,7 +77,7 @@ JEMALLOC_LDFLAGS= $(LDFLAGS) jemalloc: .make-prerequisites @printf '%b %b\n' $(MAKECOLOR)MAKE$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR) - cd jemalloc && ./configure --with-lg-quantum=3 --with-jemalloc-prefix=je_ --enable-cc-silence CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)" + cd jemalloc && ./configure --with-version=5.1.0-0-g0 --with-lg-quantum=3 --with-jemalloc-prefix=je_ --enable-cc-silence CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)" cd jemalloc && $(MAKE) CFLAGS="$(JEMALLOC_CFLAGS)" LDFLAGS="$(JEMALLOC_LDFLAGS)" lib/libjemalloc.a .PHONY: jemalloc diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/README.md b/redis-android/src/main/jni/redis-5.0.0/deps/README.md similarity index 64% rename from redis-android/src/main/jni/redis-4.0.11/deps/README.md rename to redis-android/src/main/jni/redis-5.0.0/deps/README.md index 0ce4800..367ee16 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/README.md +++ b/redis-android/src/main/jni/redis-5.0.0/deps/README.md @@ -13,12 +13,34 @@ How to upgrade the above dependencies Jemalloc --- -Jemalloc is unmodified. We only change settings via the `configure` script of Jemalloc using the `--with-lg-quantum` option, setting it to the value of 3 instead of 4. This provides us with more size classes that better suit the Redis data structures, in order to gain memory efficiency. - -So in order to upgrade jemalloc: +Jemalloc is modified with changes that allow us to implement the Redis +active defragmentation logic. However this feature of Redis is not mandatory +and Redis is able to understand if the Jemalloc version it is compiled +against supports such Redis-specific modifications. So in theory, if you +are not interested in the active defragmentation, you can replace Jemalloc +just following tose steps: 1. Remove the jemalloc directory. 2. Substitute it with the new jemalloc source tree. +3. Edit the Makefile localted in the same directory as the README you are + reading, and change the --with-version in the Jemalloc configure script + options with the version you are using. This is required because otherwise + Jemalloc configuration script is broken and will not work nested in another + git repository. + +However note that we change Jemalloc settings via the `configure` script of Jemalloc using the `--with-lg-quantum` option, setting it to the value of 3 instead of 4. This provides us with more size classes that better suit the Redis data structures, in order to gain memory efficiency. + +If you want to upgrade Jemalloc while also providing support for +active defragmentation, in addition to the above steps you need to perform +the following additional steps: + +5. In Jemalloc three, file `include/jemalloc/jemalloc_macros.h.in`, make sure + to add `#define JEMALLOC_FRAG_HINT`. +6. Implement the function `je_get_defrag_hint()` inside `src/jemalloc.c`. You + can see how it is implemented in the current Jemalloc source tree shipped + with Redis, and rewrite it according to the new Jemalloc internals, if they + changed, otherwise you could just copy the old implementation if you are + upgrading just to a similar version of Jemalloc. Geohash --- @@ -28,7 +50,7 @@ This is never upgraded since it's part of the Redis project. If there are change Hiredis --- -Hiredis uses the SDS string library, that must be the same version used inside Redis itself. Hiredis is also very critical for Sentinel. Historically Redis often used forked versions of hiredis in a way or the other. In order to upgrade it is adviced to take a lot of care: +Hiredis uses the SDS string library, that must be the same version used inside Redis itself. Hiredis is also very critical for Sentinel. Historically Redis often used forked versions of hiredis in a way or the other. In order to upgrade it is advised to take a lot of care: 1. Check with diff if hiredis API changed and what impact it could have in Redis. 2. Make sure thet the SDS library inside Hiredis and inside Redis are compatible. @@ -61,6 +83,6 @@ and our version: 1. Makefile is modified to allow a different compiler than GCC. 2. We have the implementation source code, and directly link to the following external libraries: `lua_cjson.o`, `lua_struct.o`, `lua_cmsgpack.o` and `lua_bit.o`. -3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order toa void direct bytecode exectuion. +3. There is a security fix in `ldo.c`, line 498: The check for `LUA_SIGNATURE[0]` is removed in order toa void direct bytecode execution. diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.appveyor.yml b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.appveyor.yml new file mode 100644 index 0000000..9a7d00a --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.appveyor.yml @@ -0,0 +1,42 @@ +version: '{build}' + +environment: + matrix: + - MSYSTEM: MINGW64 + CPU: x86_64 + MSVC: amd64 + - MSYSTEM: MINGW32 + CPU: i686 + MSVC: x86 + - MSYSTEM: MINGW64 + CPU: x86_64 + - MSYSTEM: MINGW32 + CPU: i686 + - MSYSTEM: MINGW64 + CPU: x86_64 + MSVC: amd64 + CONFIG_FLAGS: --enable-debug + - MSYSTEM: MINGW32 + CPU: i686 + MSVC: x86 + CONFIG_FLAGS: --enable-debug + - MSYSTEM: MINGW64 + CPU: x86_64 + CONFIG_FLAGS: --enable-debug + - MSYSTEM: MINGW32 + CPU: i686 + CONFIG_FLAGS: --enable-debug + +install: + - set PATH=c:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH% + - if defined MSVC call "c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %MSVC% + - if defined MSVC pacman --noconfirm -Rsc mingw-w64-%CPU%-gcc gcc + - pacman --noconfirm -Suy mingw-w64-%CPU%-make + +build_script: + - bash -c "autoconf" + - bash -c "./configure $CONFIG_FLAGS" + - mingw32-make + - file lib/jemalloc.dll + - mingw32-make tests + - mingw32-make -k check diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/.autom4te.cfg b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.autom4te.cfg similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/.autom4te.cfg rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.autom4te.cfg diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/.gitattributes b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.gitattributes similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/.gitattributes rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.gitattributes diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/.gitignore b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.gitignore similarity index 64% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/.gitignore rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.gitignore index d0e3936..19199cc 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/.gitignore +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.gitignore @@ -1,5 +1,3 @@ -/*.gcov.* - /bin/jemalloc-config /bin/jemalloc.sh /bin/jeprof @@ -21,10 +19,14 @@ /Makefile -/include/jemalloc/internal/jemalloc_internal.h +/include/jemalloc/internal/jemalloc_preamble.h /include/jemalloc/internal/jemalloc_internal_defs.h +/include/jemalloc/internal/private_namespace.gen.h /include/jemalloc/internal/private_namespace.h -/include/jemalloc/internal/private_unnamespace.h +/include/jemalloc/internal/private_namespace_jet.gen.h +/include/jemalloc/internal/private_namespace_jet.h +/include/jemalloc/internal/private_symbols.awk +/include/jemalloc/internal/private_symbols_jet.awk /include/jemalloc/internal/public_namespace.h /include/jemalloc/internal/public_symbols.txt /include/jemalloc/internal/public_unnamespace.h @@ -40,8 +42,9 @@ /include/jemalloc/jemalloc_typedefs.h /src/*.[od] -/src/*.gcda -/src/*.gcno +/src/*.sym + +/run_tests.out/ /test/test.sh test/include/test/jemalloc_test.h @@ -50,26 +53,41 @@ test/include/test/jemalloc_test_defs.h /test/integration/[A-Za-z]* !/test/integration/[A-Za-z]*.* /test/integration/*.[od] -/test/integration/*.gcda -/test/integration/*.gcno /test/integration/*.out +/test/integration/cpp/[A-Za-z]* +!/test/integration/cpp/[A-Za-z]*.* +/test/integration/cpp/*.[od] +/test/integration/cpp/*.out + /test/src/*.[od] -/test/src/*.gcda -/test/src/*.gcno /test/stress/[A-Za-z]* !/test/stress/[A-Za-z]*.* /test/stress/*.[od] -/test/stress/*.gcda -/test/stress/*.gcno /test/stress/*.out /test/unit/[A-Za-z]* !/test/unit/[A-Za-z]*.* /test/unit/*.[od] -/test/unit/*.gcda -/test/unit/*.gcno /test/unit/*.out /VERSION + +*.pdb +*.sdf +*.opendb +*.VC.db +*.opensdf +*.cachefile +*.suo +*.user +*.sln.docstates +*.tmp +.vs/ +/msvc/Win32/ +/msvc/x64/ +/msvc/projects/*/*/Debug*/ +/msvc/projects/*/*/Release*/ +/msvc/projects/*/*/Win32/ +/msvc/projects/*/*/x64/ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.travis.yml b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.travis.yml new file mode 100644 index 0000000..4cc116e --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/.travis.yml @@ -0,0 +1,156 @@ +language: generic +dist: precise + +matrix: + include: + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: + apt: + packages: + - gcc-multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: osx + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: + apt: + packages: + - gcc-multilib + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: + apt: + packages: + - gcc-multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: + apt: + packages: + - gcc-multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: + apt: + packages: + - gcc-multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: + apt: + packages: + - gcc-multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: + apt: + packages: + - gcc-multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: + apt: + packages: + - gcc-multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + addons: + apt: + packages: + - gcc-multilib + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,dss:primary" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,percpu_arena:percpu" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + - os: linux + env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" + + +before_script: + - autoconf + - ./configure ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS" } $CONFIGURE_FLAGS + - make -j3 + - make -j3 tests + +script: + - make check + diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/Android.mk b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/Android.mk new file mode 100644 index 0000000..8f7fd92 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/Android.mk @@ -0,0 +1,35 @@ + +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) + +LOCAL_MODULE := jemalloc +LOCAL_CFLAGS := -O2 -D__ANDROID__ + +# To get ARM stack traces if Redis crashes we need a special C flag. +LOCAL_CFLAGS += -funwind-tables + + +SRC := $(LOCAL_PATH)/src +INCLUDE := $(LOCAL_PATH)/include +LOCAL_C_INCLUDES += $(SRC) +LOCAL_C_INCLUDES += $(INCLUDE) +LOCAL_CFLAGS += -I$(SRC) -I$(INCLUDE) + +LOCAL_SRC_FILES += \ + $(SRC)/arena.c $(SRC)/atomic.c $(SRC)/background_thread.c $(SRC)/base.c $(SRC)/bin.c \ + $(SRC)/bitmap.c $(SRC)/chunk.c $(SRC)/chunk_dss.c $(SRC)/chunk_mmap.c \ + $(SRC)/ckh.c $(SRC)/ctl.c $(SRC)/div.c $(SRC)/extent.c $(SRC)/extent_dss.c \ + $(SRC)/extent_mmap.c $(SRC)/hash.c $(SRC)/hook.c $(SRC)/huge.c \ + $(SRC)/jemalloc.c $(SRC)/jemalloc_cpp.c $(SRC)/large.c $(SRC)/log.c \ + $(SRC)/malloc_io.c $(SRC)/mb.c $(SRC)/mutex.c $(SRC)/mutex_pool.c $(SRC)/nstime.c \ + $(SRC)/pages.c $(SRC)/prng.c $(SRC)/prof.c $(SRC)/quarantine.c \ + $(SRC)/tree.c $(SRC)/stats.c $(SRC)/sz.c $(SRC)/tcache.c $(SRC)/ticker.c \ + $(SRC)/tsd.c $(SRC)/util.c $(SRC)/valgrind.c $(SRC)/witness.c $(SRC)/zone.c + + +# Allow to use the header files from external library. +# http://serenegiant.com/blog/?p=2119 +LOCAL_EXPORT_C_INCLUDES := $(SRC) $(INCLUDE) + +include $(BUILD_STATIC_LIBRARY) diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/COPYING b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/COPYING similarity index 92% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/COPYING rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/COPYING index 611968c..98458d9 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/COPYING +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/COPYING @@ -1,10 +1,10 @@ Unless otherwise specified, files in the jemalloc source distribution are subject to the following license: -------------------------------------------------------------------------------- -Copyright (C) 2002-2015 Jason Evans . +Copyright (C) 2002-2018 Jason Evans . All rights reserved. Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. -Copyright (C) 2009-2015 Facebook, Inc. All rights reserved. +Copyright (C) 2009-2018 Facebook, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/ChangeLog b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/ChangeLog similarity index 55% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/ChangeLog rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/ChangeLog index e3b0a51..29a00fb 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/ChangeLog +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/ChangeLog @@ -4,6 +4,600 @@ brevity. Much more detail can be found in the git revision history: https://github.com/jemalloc/jemalloc +* 5.1.0 (May 4th, 2018) + + This release is primarily about fine-tuning, ranging from several new features + to numerous notable performance and portability enhancements. The release and + prior dev versions have been running in multiple large scale applications for + months, and the cumulative improvements are substantial in many cases. + + Given the long and successful production runs, this release is likely a good + candidate for applications to upgrade, from both jemalloc 5.0 and before. For + performance-critical applications, the newly added TUNING.md provides + guidelines on jemalloc tuning. + + New features: + - Implement transparent huge page support for internal metadata. (@interwq) + - Add opt.thp to allow enabling / disabling transparent huge pages for all + mappings. (@interwq) + - Add maximum background thread count option. (@djwatson) + - Allow prof_active to control opt.lg_prof_interval and prof.gdump. + (@interwq) + - Allow arena index lookup based on allocation addresses via mallctl. + (@lionkov) + - Allow disabling initial-exec TLS model. (@davidtgoldblatt, @KenMacD) + - Add opt.lg_extent_max_active_fit to set the max ratio between the size of + the active extent selected (to split off from) and the size of the requested + allocation. (@interwq, @davidtgoldblatt) + - Add retain_grow_limit to set the max size when growing virtual address + space. (@interwq) + - Add mallctl interfaces: + + arena..retain_grow_limit (@interwq) + + arenas.lookup (@lionkov) + + max_background_threads (@djwatson) + + opt.lg_extent_max_active_fit (@interwq) + + opt.max_background_threads (@djwatson) + + opt.metadata_thp (@interwq) + + opt.thp (@interwq) + + stats.metadata_thp (@interwq) + + Portability improvements: + - Support GNU/kFreeBSD configuration. (@paravoid) + - Support m68k, nios2 and SH3 architectures. (@paravoid) + - Fall back to FD_CLOEXEC when O_CLOEXEC is unavailable. (@zonyitoo) + - Fix symbol listing for cross-compiling. (@tamird) + - Fix high bits computation on ARM. (@davidtgoldblatt, @paravoid) + - Disable the CPU_SPINWAIT macro for Power. (@davidtgoldblatt, @marxin) + - Fix MSVC 2015 & 2017 builds. (@rustyx) + - Improve RISC-V support. (@EdSchouten) + - Set name mangling script in strict mode. (@nicolov) + - Avoid MADV_HUGEPAGE on ARM. (@marxin) + - Modify configure to determine return value of strerror_r. + (@davidtgoldblatt, @cferris1000) + - Make sure CXXFLAGS is tested with CPP compiler. (@nehaljwani) + - Fix 32-bit build on MSVC. (@rustyx) + - Fix external symbol on MSVC. (@maksqwe) + - Avoid a printf format specifier warning. (@jasone) + - Add configure option --disable-initial-exec-tls which can allow jemalloc to + be dynamically loaded after program startup. (@davidtgoldblatt, @KenMacD) + - AArch64: Add ILP32 support. (@cmuellner) + - Add --with-lg-vaddr configure option to support cross compiling. + (@cmuellner, @davidtgoldblatt) + + Optimizations and refactors: + - Improve active extent fit with extent_max_active_fit. This considerably + reduces fragmentation over time and improves virtual memory and metadata + usage. (@davidtgoldblatt, @interwq) + - Eagerly coalesce large extents to reduce fragmentation. (@interwq) + - sdallocx: only read size info when page aligned (i.e. possibly sampled), + which speeds up the sized deallocation path significantly. (@interwq) + - Avoid attempting new mappings for in place expansion with retain, since + it rarely succeeds in practice and causes high overhead. (@interwq) + - Refactor OOM handling in newImpl. (@wqfish) + - Add internal fine-grained logging functionality for debugging use. + (@davidtgoldblatt) + - Refactor arena / tcache interactions. (@davidtgoldblatt) + - Refactor extent management with dumpable flag. (@davidtgoldblatt) + - Add runtime detection of lazy purging. (@interwq) + - Use pairing heap instead of red-black tree for extents_avail. (@djwatson) + - Use sysctl on startup in FreeBSD. (@trasz) + - Use thread local prng state instead of atomic. (@djwatson) + - Make decay to always purge one more extent than before, because in + practice large extents are usually the ones that cross the decay threshold. + Purging the additional extent helps save memory as well as reduce VM + fragmentation. (@interwq) + - Fast division by dynamic values. (@davidtgoldblatt) + - Improve the fit for aligned allocation. (@interwq, @edwinsmith) + - Refactor extent_t bitpacking. (@rkmisra) + - Optimize the generated assembly for ticker operations. (@davidtgoldblatt) + - Convert stats printing to use a structured text emitter. (@davidtgoldblatt) + - Remove preserve_lru feature for extents management. (@djwatson) + - Consolidate two memory loads into one on the fast deallocation path. + (@davidtgoldblatt, @interwq) + + Bug fixes (most of the issues are only relevant to jemalloc 5.0): + - Fix deadlock with multithreaded fork in OS X. (@davidtgoldblatt) + - Validate returned file descriptor before use. (@zonyitoo) + - Fix a few background thread initialization and shutdown issues. (@interwq) + - Fix an extent coalesce + decay race by taking both coalescing extents off + the LRU list. (@interwq) + - Fix potentially unbound increase during decay, caused by one thread keep + stashing memory to purge while other threads generating new pages. The + number of pages to purge is checked to prevent this. (@interwq) + - Fix a FreeBSD bootstrap assertion. (@strejda, @interwq) + - Handle 32 bit mutex counters. (@rkmisra) + - Fix a indexing bug when creating background threads. (@davidtgoldblatt, + @binliu19) + - Fix arguments passed to extent_init. (@yuleniwo, @interwq) + - Fix addresses used for ordering mutexes. (@rkmisra) + - Fix abort_conf processing during bootstrap. (@interwq) + - Fix include path order for out-of-tree builds. (@cmuellner) + + Incompatible changes: + - Remove --disable-thp. (@interwq) + - Remove mallctl interfaces: + + config.thp (@interwq) + + Documentation: + - Add TUNING.md. (@interwq, @davidtgoldblatt, @djwatson) + +* 5.0.1 (July 1, 2017) + + This bugfix release fixes several issues, most of which are obscure enough + that typical applications are not impacted. + + Bug fixes: + - Update decay->nunpurged before purging, in order to avoid potential update + races and subsequent incorrect purging volume. (@interwq) + - Only abort on dlsym(3) error if the failure impacts an enabled feature (lazy + locking and/or background threads). This mitigates an initialization + failure bug for which we still do not have a clear reproduction test case. + (@interwq) + - Modify tsd management so that it neither crashes nor leaks if a thread's + only allocation activity is to call free() after TLS destructors have been + executed. This behavior was observed when operating with GNU libc, and is + unlikely to be an issue with other libc implementations. (@interwq) + - Mask signals during background thread creation. This prevents signals from + being inadvertently delivered to background threads. (@jasone, + @davidtgoldblatt, @interwq) + - Avoid inactivity checks within background threads, in order to prevent + recursive mutex acquisition. (@interwq) + - Fix extent_grow_retained() to use the specified hooks when the + arena..extent_hooks mallctl is used to override the default hooks. + (@interwq) + - Add missing reentrancy support for custom extent hooks which allocate. + (@interwq) + - Post-fork(2), re-initialize the list of tcaches associated with each arena + to contain no tcaches except the forking thread's. (@interwq) + - Add missing post-fork(2) mutex reinitialization for extent_grow_mtx. This + fixes potential deadlocks after fork(2). (@interwq) + - Enforce minimum autoconf version (currently 2.68), since 2.63 is known to + generate corrupt configure scripts. (@jasone) + - Ensure that the configured page size (--with-lg-page) is no larger than the + configured huge page size (--with-lg-hugepage). (@jasone) + +* 5.0.0 (June 13, 2017) + + Unlike all previous jemalloc releases, this release does not use naturally + aligned "chunks" for virtual memory management, and instead uses page-aligned + "extents". This change has few externally visible effects, but the internal + impacts are... extensive. Many other internal changes combine to make this + the most cohesively designed version of jemalloc so far, with ample + opportunity for further enhancements. + + Continuous integration is now an integral aspect of development thanks to the + efforts of @davidtgoldblatt, and the dev branch tends to remain reasonably + stable on the tested platforms (Linux, FreeBSD, macOS, and Windows). As a + side effect the official release frequency may decrease over time. + + New features: + - Implement optional per-CPU arena support; threads choose which arena to use + based on current CPU rather than on fixed thread-->arena associations. + (@interwq) + - Implement two-phase decay of unused dirty pages. Pages transition from + dirty-->muzzy-->clean, where the first phase transition relies on + madvise(... MADV_FREE) semantics, and the second phase transition discards + pages such that they are replaced with demand-zeroed pages on next access. + (@jasone) + - Increase decay time resolution from seconds to milliseconds. (@jasone) + - Implement opt-in per CPU background threads, and use them for asynchronous + decay-driven unused dirty page purging. (@interwq) + - Add mutex profiling, which collects a variety of statistics useful for + diagnosing overhead/contention issues. (@interwq) + - Add C++ new/delete operator bindings. (@djwatson) + - Support manually created arena destruction, such that all data and metadata + are discarded. Add MALLCTL_ARENAS_DESTROYED for accessing merged stats + associated with destroyed arenas. (@jasone) + - Add MALLCTL_ARENAS_ALL as a fixed index for use in accessing + merged/destroyed arena statistics via mallctl. (@jasone) + - Add opt.abort_conf to optionally abort if invalid configuration options are + detected during initialization. (@interwq) + - Add opt.stats_print_opts, so that e.g. JSON output can be selected for the + stats dumped during exit if opt.stats_print is true. (@jasone) + - Add --with-version=VERSION for use when embedding jemalloc into another + project's git repository. (@jasone) + - Add --disable-thp to support cross compiling. (@jasone) + - Add --with-lg-hugepage to support cross compiling. (@jasone) + - Add mallctl interfaces (various authors): + + background_thread + + opt.abort_conf + + opt.retain + + opt.percpu_arena + + opt.background_thread + + opt.{dirty,muzzy}_decay_ms + + opt.stats_print_opts + + arena..initialized + + arena..destroy + + arena..{dirty,muzzy}_decay_ms + + arena..extent_hooks + + arenas.{dirty,muzzy}_decay_ms + + arenas.bin..slab_size + + arenas.nlextents + + arenas.lextent..size + + arenas.create + + stats.background_thread.{num_threads,num_runs,run_interval} + + stats.mutexes.{ctl,background_thread,prof,reset}. + {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, + num_owner_switch} + + stats.arenas..{dirty,muzzy}_decay_ms + + stats.arenas..uptime + + stats.arenas..{pmuzzy,base,internal,resident} + + stats.arenas..{dirty,muzzy}_{npurge,nmadvise,purged} + + stats.arenas..bins..{nslabs,reslabs,curslabs} + + stats.arenas..bins..mutex. + {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, + num_owner_switch} + + stats.arenas..lextents..{nmalloc,ndalloc,nrequests,curlextents} + + stats.arenas.i.mutexes.{large,extent_avail,extents_dirty,extents_muzzy, + extents_retained,decay_dirty,decay_muzzy,base,tcache_list}. + {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, + num_owner_switch} + + Portability improvements: + - Improve reentrant allocation support, such that deadlock is less likely if + e.g. a system library call in turn allocates memory. (@davidtgoldblatt, + @interwq) + - Support static linking of jemalloc with glibc. (@djwatson) + + Optimizations and refactors: + - Organize virtual memory as "extents" of virtual memory pages, rather than as + naturally aligned "chunks", and store all metadata in arbitrarily distant + locations. This reduces virtual memory external fragmentation, and will + interact better with huge pages (not yet explicitly supported). (@jasone) + - Fold large and huge size classes together; only small and large size classes + remain. (@jasone) + - Unify the allocation paths, and merge most fast-path branching decisions. + (@davidtgoldblatt, @interwq) + - Embed per thread automatic tcache into thread-specific data, which reduces + conditional branches and dereferences. Also reorganize tcache to increase + fast-path data locality. (@interwq) + - Rewrite atomics to closely model the C11 API, convert various + synchronization from mutex-based to atomic, and use the explicit memory + ordering control to resolve various hypothetical races without increasing + synchronization overhead. (@davidtgoldblatt) + - Extensively optimize rtree via various methods: + + Add multiple layers of rtree lookup caching, since rtree lookups are now + part of fast-path deallocation. (@interwq) + + Determine rtree layout at compile time. (@jasone) + + Make the tree shallower for common configurations. (@jasone) + + Embed the root node in the top-level rtree data structure, thus avoiding + one level of indirection. (@jasone) + + Further specialize leaf elements as compared to internal node elements, + and directly embed extent metadata needed for fast-path deallocation. + (@jasone) + + Ignore leading always-zero address bits (architecture-specific). + (@jasone) + - Reorganize headers (ongoing work) to make them hermetic, and disentangle + various module dependencies. (@davidtgoldblatt) + - Convert various internal data structures such as size class metadata from + boot-time-initialized to compile-time-initialized. Propagate resulting data + structure simplifications, such as making arena metadata fixed-size. + (@jasone) + - Simplify size class lookups when constrained to size classes that are + multiples of the page size. This speeds lookups, but the primary benefit is + complexity reduction in code that was the source of numerous regressions. + (@jasone) + - Lock individual extents when possible for localized extent operations, + rather than relying on a top-level arena lock. (@davidtgoldblatt, @jasone) + - Use first fit layout policy instead of best fit, in order to improve + packing. (@jasone) + - If munmap(2) is not in use, use an exponential series to grow each arena's + virtual memory, so that the number of disjoint virtual memory mappings + remains low. (@jasone) + - Implement per arena base allocators, so that arenas never share any virtual + memory pages. (@jasone) + - Automatically generate private symbol name mangling macros. (@jasone) + + Incompatible changes: + - Replace chunk hooks with an expanded/normalized set of extent hooks. + (@jasone) + - Remove ratio-based purging. (@jasone) + - Remove --disable-tcache. (@jasone) + - Remove --disable-tls. (@jasone) + - Remove --enable-ivsalloc. (@jasone) + - Remove --with-lg-size-class-group. (@jasone) + - Remove --with-lg-tiny-min. (@jasone) + - Remove --disable-cc-silence. (@jasone) + - Remove --enable-code-coverage. (@jasone) + - Remove --disable-munmap (replaced by opt.retain). (@jasone) + - Remove Valgrind support. (@jasone) + - Remove quarantine support. (@jasone) + - Remove redzone support. (@jasone) + - Remove mallctl interfaces (various authors): + + config.munmap + + config.tcache + + config.tls + + config.valgrind + + opt.lg_chunk + + opt.purge + + opt.lg_dirty_mult + + opt.decay_time + + opt.quarantine + + opt.redzone + + opt.thp + + arena..lg_dirty_mult + + arena..decay_time + + arena..chunk_hooks + + arenas.initialized + + arenas.lg_dirty_mult + + arenas.decay_time + + arenas.bin..run_size + + arenas.nlruns + + arenas.lrun..size + + arenas.nhchunks + + arenas.hchunk..size + + arenas.extend + + stats.cactive + + stats.arenas..lg_dirty_mult + + stats.arenas..decay_time + + stats.arenas..metadata.{mapped,allocated} + + stats.arenas..{npurge,nmadvise,purged} + + stats.arenas..huge.{allocated,nmalloc,ndalloc,nrequests} + + stats.arenas..bins..{nruns,reruns,curruns} + + stats.arenas..lruns..{nmalloc,ndalloc,nrequests,curruns} + + stats.arenas..hchunks..{nmalloc,ndalloc,nrequests,curhchunks} + + Bug fixes: + - Improve interval-based profile dump triggering to dump only one profile when + a single allocation's size exceeds the interval. (@jasone) + - Use prefixed function names (as controlled by --with-jemalloc-prefix) when + pruning backtrace frames in jeprof. (@jasone) + +* 4.5.0 (February 28, 2017) + + This is the first release to benefit from much broader continuous integration + testing, thanks to @davidtgoldblatt. Had we had this testing infrastructure + in place for prior releases, it would have caught all of the most serious + regressions fixed by this release. + + New features: + - Add --disable-thp and the opt.thp mallctl to provide opt-out mechanisms for + transparent huge page integration. (@jasone) + - Update zone allocator integration to work with macOS 10.12. (@glandium) + - Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and + EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not + during configuration. (@jasone, @ronawho) + + Bug fixes: + - Fix DSS (sbrk(2)-based) allocation. This regression was first released in + 4.3.0. (@jasone) + - Handle race in per size class utilization computation. This functionality + was first released in 4.0.0. (@interwq) + - Fix lock order reversal during gdump. (@jasone) + - Fix/refactor tcache synchronization. This regression was first released in + 4.0.0. (@jasone) + - Fix various JSON-formatted malloc_stats_print() bugs. This functionality + was first released in 4.3.0. (@jasone) + - Fix huge-aligned allocation. This regression was first released in 4.4.0. + (@jasone) + - When transparent huge page integration is enabled, detect what state pages + start in according to the kernel's current operating mode, and only convert + arena chunks to non-huge during purging if that is not their initial state. + This functionality was first released in 4.4.0. (@jasone) + - Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case. + This regression was first released in 4.0.0. (@jasone, @428desmo) + - Properly detect sparc64 when building for Linux. (@glaubitz) + +* 4.4.0 (December 3, 2016) + + New features: + - Add configure support for *-*-linux-android. (@cferris1000, @jasone) + - Add the --disable-syscall configure option, for use on systems that place + security-motivated limitations on syscall(2). (@jasone) + - Add support for Debian GNU/kFreeBSD. (@thesam) + + Optimizations: + - Add extent serial numbers and use them where appropriate as a sort key that + is higher priority than address, so that the allocation policy prefers older + extents. This tends to improve locality (decrease fragmentation) when + memory grows downward. (@jasone) + - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized + on Linux 4.5 and newer. (@jasone) + - Mark partially purged arena chunks as non-huge-page. This improves + interaction with Linux's transparent huge page functionality. (@jasone) + + Bug fixes: + - Fix size class computations for edge conditions involving extremely large + allocations. This regression was first released in 4.0.0. (@jasone, + @ingvarha) + - Remove overly restrictive assertions related to the cactive statistic. This + regression was first released in 4.1.0. (@jasone) + - Implement a more reliable detection scheme for os_unfair_lock on macOS. + (@jszakmeister) + +* 4.3.1 (November 7, 2016) + + Bug fixes: + - Fix a severe virtual memory leak. This regression was first released in + 4.3.0. (@interwq, @jasone) + - Refactor atomic and prng APIs to restore support for 32-bit platforms that + use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone) + +* 4.3.0 (November 4, 2016) + + This is the first release that passes the test suite for multiple Windows + configurations, thanks in large part to @glandium setting up continuous + integration via AppVeyor (and Travis CI for Linux and OS X). + + New features: + - Add "J" (JSON) support to malloc_stats_print(). (@jasone) + - Add Cray compiler support. (@ronawho) + + Optimizations: + - Add/use adaptive spinning for bootstrapping and radix tree node + initialization. (@jasone) + + Bug fixes: + - Fix large allocation to search starting in the optimal size class heap, + which can substantially reduce virtual memory churn and fragmentation. This + regression was first released in 4.0.0. (@mjp41, @jasone) + - Fix stats.arenas..nthreads accounting. (@interwq) + - Fix and simplify decay-based purging. (@jasone) + - Make DSS (sbrk(2)-related) operations lockless, which resolves potential + deadlocks during thread exit. (@jasone) + - Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun, + @jasone) + - Fix over-sized allocation of arena_t (plus associated stats) data + structures. (@jasone, @interwq) + - Fix EXTRA_CFLAGS to not affect configuration. (@jasone) + - Fix a Valgrind integration bug. (@ronawho) + - Disallow 0x5a junk filling when running in Valgrind. (@jasone) + - Fix a file descriptor leak on Linux. This regression was first released in + 4.2.0. (@vsarunas, @jasone) + - Fix static linking of jemalloc with glibc. (@djwatson) + - Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This + works around other libraries' system call wrappers performing reentrant + allocation. (@kspinka, @Whissi, @jasone) + - Fix OS X default zone replacement to work with OS X 10.12. (@glandium, + @jasone) + - Fix cached memory management to avoid needless commit/decommit operations + during purging, which resolves permanent virtual memory map fragmentation + issues on Windows. (@mjp41, @jasone) + - Fix TSD fetches to avoid (recursive) allocation. This is relevant to + non-TLS and Windows configurations. (@jasone) + - Fix malloc_conf overriding to work on Windows. (@jasone) + - Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone) + +* 4.2.1 (June 8, 2016) + + Bug fixes: + - Fix bootstrapping issues for configurations that require allocation during + tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone) + - Fix gettimeofday() version of nstime_update(). (@ronawho) + - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho) + - Fix potential VM map fragmentation regression. (@jasone) + - Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone) + - Fix heap profiling context leaks in reallocation edge cases. (@jasone) + +* 4.2.0 (May 12, 2016) + + New features: + - Add the arena..reset mallctl, which makes it possible to discard all of + an arena's allocations in a single operation. (@jasone) + - Add the stats.retained and stats.arenas..retained statistics. (@jasone) + - Add the --with-version configure option. (@jasone) + - Support --with-lg-page values larger than actual page size. (@jasone) + + Optimizations: + - Use pairing heaps rather than red-black trees for various hot data + structures. (@djwatson, @jasone) + - Streamline fast paths of rtree operations. (@jasone) + - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone) + - Decommit unused virtual memory if the OS does not overcommit. (@jasone) + - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order + to avoid unfortunate interactions during fork(2). (@jasone) + + Bug fixes: + - Fix chunk accounting related to triggering gdump profiles. (@jasone) + - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone) + - Scale leak report summary according to sampling probability. (@jasone) + +* 4.1.1 (May 3, 2016) + + This bugfix release resolves a variety of mostly minor issues, though the + bitmap fix is critical for 64-bit Windows. + + Bug fixes: + - Fix the linear scan version of bitmap_sfu() to shift by the proper amount + even when sizeof(long) is not the same as sizeof(void *), as on 64-bit + Windows. (@jasone) + - Fix hashing functions to avoid unaligned memory accesses (and resulting + crashes). This is relevant at least to some ARM-based platforms. + (@rkmisra) + - Fix fork()-related lock rank ordering reversals. These reversals were + unlikely to cause deadlocks in practice except when heap profiling was + enabled and active. (@jasone) + - Fix various chunk leaks in OOM code paths. (@jasone) + - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone) + - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin) + - Fix a variety of test failures that were due to test fragility rather than + core bugs. (@jasone) + +* 4.1.0 (February 28, 2016) + + This release is primarily about optimizations, but it also incorporates a lot + of portability-motivated refactoring and enhancements. Many people worked on + this release, to an extent that even with the omission here of minor changes + (see git revision history), and of the people who reported and diagnosed + issues, so much of the work was contributed that starting with this release, + changes are annotated with author credits to help reflect the collaborative + effort involved. + + New features: + - Implement decay-based unused dirty page purging, a major optimization with + mallctl API impact. This is an alternative to the existing ratio-based + unused dirty page purging, and is intended to eventually become the sole + purging mechanism. New mallctls: + + opt.purge + + opt.decay_time + + arena..decay + + arena..decay_time + + arenas.decay_time + + stats.arenas..decay_time + (@jasone, @cevans87) + - Add --with-malloc-conf, which makes it possible to embed a default + options string during configuration. This was motivated by the desire to + specify --with-malloc-conf=purge:decay , since the default must remain + purge:ratio until the 5.0.0 release. (@jasone) + - Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin) + - Make *allocx() size class overflow behavior defined. The maximum + size class is now less than PTRDIFF_MAX to protect applications against + numerical overflow, and all allocation functions are guaranteed to indicate + errors rather than potentially crashing if the request size exceeds the + maximum size class. (@jasone) + - jeprof: + + Add raw heap profile support. (@jasone) + + Add --retain and --exclude for backtrace symbol filtering. (@jasone) + + Optimizations: + - Optimize the fast path to combine various bootstrapping and configuration + checks and execute more streamlined code in the common case. (@interwq) + - Use linear scan for small bitmaps (used for small object tracking). In + addition to speeding up bitmap operations on 64-bit systems, this reduces + allocator metadata overhead by approximately 0.2%. (@djwatson) + - Separate arena_avail trees, which substantially speeds up run tree + operations. (@djwatson) + - Use memoization (boot-time-computed table) for run quantization. Separate + arena_avail trees reduced the importance of this optimization. (@jasone) + - Attempt mmap-based in-place huge reallocation. This can dramatically speed + up incremental huge reallocation. (@jasone) + + Incompatible changes: + - Make opt.narenas unsigned rather than size_t. (@jasone) + + Bug fixes: + - Fix stats.cactive accounting regression. (@rustyx, @jasone) + - Handle unaligned keys in hash(). This caused problems for some ARM systems. + (@jasone, @cferris1000) + - Refactor arenas array. In addition to fixing a fork-related deadlock, this + makes arena lookups faster and simpler. (@jasone) + - Move retained memory allocation out of the default chunk allocation + function, to a location that gets executed even if the application installs + a custom chunk allocation function. This resolves a virtual memory leak. + (@buchgr) + - Fix a potential tsd cleanup leak. (@cferris1000, @jasone) + - Fix run quantization. In practice this bug had no impact unless + applications requested memory with alignment exceeding one page. + (@jasone, @djwatson) + - Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv) + - jeprof: + + Don't discard curl options if timeout is not defined. (@djwatson) + + Detect failed profile fetches. (@djwatson) + - Fix stats.arenas..{dss,lg_dirty_mult,decay_time,pactive,pdirty} for + --disable-stats case. (@jasone) + +* 4.0.4 (October 24, 2015) + + This bugfix release fixes another xallocx() regression. No other regressions + have come to light in over a month, so this is likely a good starting point + for people who prefer to wait for "dot one" releases with all the major issues + shaken out. + + Bug fixes: + - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large + allocations that have been randomly assigned an offset of 0 when + --enable-cache-oblivious configure option is enabled. + * 4.0.3 (September 24, 2015) This bugfix release continues the trend of xallocx() and heap profiling fixes. @@ -38,7 +632,7 @@ brevity. Much more detail can be found in the git revision history: these fixes, xallocx() now tries harder to partially fulfill requests for optional extra space. Note that a couple of minor heap profiling optimizations are included, but these are better thought of as performance - fixes that were integral to disovering most of the other bugs. + fixes that were integral to discovering most of the other bugs. Optimizations: - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/INSTALL b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/INSTALL similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/INSTALL rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/INSTALL diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/INSTALL.md b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/INSTALL.md new file mode 100644 index 0000000..ef328c6 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/INSTALL.md @@ -0,0 +1,423 @@ +Building and installing a packaged release of jemalloc can be as simple as +typing the following while in the root directory of the source tree: + + ./configure + make + make install + +If building from unpackaged developer sources, the simplest command sequence +that might work is: + + ./autogen.sh + make dist + make + make install + +Note that documentation is not built by the default target because doing so +would create a dependency on xsltproc in packaged releases, hence the +requirement to either run 'make dist' or avoid installing docs via the various +install_* targets documented below. + + +## Advanced configuration + +The 'configure' script supports numerous options that allow control of which +functionality is enabled, where jemalloc is installed, etc. Optionally, pass +any of the following arguments (not a definitive list) to 'configure': + +* `--help` + + Print a definitive list of options. + +* `--prefix=` + + Set the base directory in which to install. For example: + + ./configure --prefix=/usr/local + + will cause files to be installed into /usr/local/include, /usr/local/lib, + and /usr/local/man. + +* `--with-version=(..--g|VERSION)` + + The VERSION file is mandatory for successful configuration, and the + following steps are taken to assure its presence: + 1) If --with-version=..--g is specified, + generate VERSION using the specified value. + 2) If --with-version is not specified in either form and the source + directory is inside a git repository, try to generate VERSION via 'git + describe' invocations that pattern-match release tags. + 3) If VERSION is missing, generate it with a bogus version: + 0.0.0-0-g0000000000000000000000000000000000000000 + + Note that --with-version=VERSION bypasses (1) and (2), which simplifies + VERSION configuration when embedding a jemalloc release into another + project's git repository. + +* `--with-rpath=` + + Embed one or more library paths, so that libjemalloc can find the libraries + it is linked to. This works only on ELF-based systems. + +* `--with-mangling=` + + Mangle public symbols specified in which is a comma-separated list of + name:mangled pairs. + + For example, to use ld's --wrap option as an alternative method for + overriding libc's malloc implementation, specify something like: + + --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...] + + Note that mangling happens prior to application of the prefix specified by + --with-jemalloc-prefix, and mangled symbols are then ignored when applying + the prefix. + +* `--with-jemalloc-prefix=` + + Prefix all public APIs with . For example, if is + "prefix_", API changes like the following occur: + + malloc() --> prefix_malloc() + malloc_conf --> prefix_malloc_conf + /etc/malloc.conf --> /etc/prefix_malloc.conf + MALLOC_CONF --> PREFIX_MALLOC_CONF + + This makes it possible to use jemalloc at the same time as the system + allocator, or even to use multiple copies of jemalloc simultaneously. + + By default, the prefix is "", except on OS X, where it is "je_". On OS X, + jemalloc overlays the default malloc zone, but makes no attempt to actually + replace the "malloc", "calloc", etc. symbols. + +* `--without-export` + + Don't export public APIs. This can be useful when building jemalloc as a + static library, or to avoid exporting public APIs when using the zone + allocator on OSX. + +* `--with-private-namespace=` + + Prefix all library-private APIs with je_. For shared libraries, + symbol visibility mechanisms prevent these symbols from being exported, but + for static libraries, naming collisions are a real possibility. By + default, is empty, which results in a symbol prefix of je_ . + +* `--with-install-suffix=` + + Append to the base name of all installed files, such that multiple + versions of jemalloc can coexist in the same installation directory. For + example, libjemalloc.so.0 becomes libjemalloc.so.0. + +* `--with-malloc-conf=` + + Embed `` as a run-time options string that is processed prior to + the malloc_conf global variable, the /etc/malloc.conf symlink, and the + MALLOC_CONF environment variable. For example, to change the default decay + time to 30 seconds: + + --with-malloc-conf=decay_ms:30000 + +* `--enable-debug` + + Enable assertions and validation code. This incurs a substantial + performance hit, but is very useful during application development. + +* `--disable-stats` + + Disable statistics gathering functionality. See the "opt.stats_print" + option documentation for usage details. + +* `--enable-prof` + + Enable heap profiling and leak detection functionality. See the "opt.prof" + option documentation for usage details. When enabled, there are several + approaches to backtracing, and the configure script chooses the first one + in the following list that appears to function correctly: + + + libunwind (requires --enable-prof-libunwind) + + libgcc (unless --disable-prof-libgcc) + + gcc intrinsics (unless --disable-prof-gcc) + +* `--enable-prof-libunwind` + + Use the libunwind library (http://www.nongnu.org/libunwind/) for stack + backtracing. + +* `--disable-prof-libgcc` + + Disable the use of libgcc's backtracing functionality. + +* `--disable-prof-gcc` + + Disable the use of gcc intrinsics for backtracing. + +* `--with-static-libunwind=` + + Statically link against the specified libunwind.a rather than dynamically + linking with -lunwind. + +* `--disable-fill` + + Disable support for junk/zero filling of memory. See the "opt.junk" and + "opt.zero" option documentation for usage details. + +* `--disable-zone-allocator` + + Disable zone allocator for Darwin. This means jemalloc won't be hooked as + the default allocator on OSX/iOS. + +* `--enable-utrace` + + Enable utrace(2)-based allocation tracing. This feature is not broadly + portable (FreeBSD has it, but Linux and OS X do not). + +* `--enable-xmalloc` + + Enable support for optional immediate termination due to out-of-memory + errors, as is commonly implemented by "xmalloc" wrapper function for malloc. + See the "opt.xmalloc" option documentation for usage details. + +* `--enable-lazy-lock` + + Enable code that wraps pthread_create() to detect when an application + switches from single-threaded to multi-threaded mode, so that it can avoid + mutex locking/unlocking operations while in single-threaded mode. In + practice, this feature usually has little impact on performance unless + thread-specific caching is disabled. + +* `--disable-cache-oblivious` + + Disable cache-oblivious large allocation alignment for large allocation + requests with no alignment constraints. If this feature is disabled, all + large allocations are page-aligned as an implementation artifact, which can + severely harm CPU cache utilization. However, the cache-oblivious layout + comes at the cost of one extra page per large allocation, which in the + most extreme case increases physical memory usage for the 16 KiB size class + to 20 KiB. + +* `--disable-syscall` + + Disable use of syscall(2) rather than {open,read,write,close}(2). This is + intended as a workaround for systems that place security limitations on + syscall(2). + +* `--disable-cxx` + + Disable C++ integration. This will cause new and delete operator + implementations to be omitted. + +* `--with-xslroot=` + + Specify where to find DocBook XSL stylesheets when building the + documentation. + +* `--with-lg-page=` + + Specify the base 2 log of the allocator page size, which must in turn be at + least as large as the system page size. By default the configure script + determines the host's page size and sets the allocator page size equal to + the system page size, so this option need not be specified unless the + system page size may change between configuration and execution, e.g. when + cross compiling. + +* `--with-lg-page-sizes=` + + Specify the comma-separated base 2 logs of the page sizes to support. This + option may be useful when cross compiling in combination with + `--with-lg-page`, but its primary use case is for integration with FreeBSD's + libc, wherein jemalloc is embedded. + +* `--with-lg-hugepage=` + + Specify the base 2 log of the system huge page size. This option is useful + when cross compiling, or when overriding the default for systems that do + not explicitly support huge pages. + +* `--with-lg-quantum=` + + Specify the base 2 log of the minimum allocation alignment. jemalloc needs + to know the minimum alignment that meets the following C standard + requirement (quoted from the April 12, 2011 draft of the C11 standard): + + > The pointer returned if the allocation succeeds is suitably aligned so + that it may be assigned to a pointer to any type of object with a + fundamental alignment requirement and then used to access such an object + or an array of such objects in the space allocated [...] + + This setting is architecture-specific, and although jemalloc includes known + safe values for the most commonly used modern architectures, there is a + wrinkle related to GNU libc (glibc) that may impact your choice of + . On most modern architectures, this mandates 16-byte + alignment (=4), but the glibc developers chose not to meet this + requirement for performance reasons. An old discussion can be found at + . Unlike glibc, + jemalloc does follow the C standard by default (caveat: jemalloc + technically cheats for size classes smaller than the quantum), but the fact + that Linux systems already work around this allocator noncompliance means + that it is generally safe in practice to let jemalloc's minimum alignment + follow glibc's lead. If you specify `--with-lg-quantum=3` during + configuration, jemalloc will provide additional size classes that are not + 16-byte-aligned (24, 40, and 56). + +* `--with-lg-vaddr=` + + Specify the number of significant virtual address bits. By default, the + configure script attempts to detect virtual address size on those platforms + where it knows how, and picks a default otherwise. This option may be + useful when cross-compiling. + +* `--disable-initial-exec-tls` + + Disable the initial-exec TLS model for jemalloc's internal thread-local + storage (on those platforms that support explicit settings). This can allow + jemalloc to be dynamically loaded after program startup (e.g. using dlopen). + Note that in this case, there will be two malloc implementations operating + in the same process, which will almost certainly result in confusing runtime + crashes if pointers leak from one implementation to the other. + +The following environment variables (not a definitive list) impact configure's +behavior: + +* `CFLAGS="?"` +* `CXXFLAGS="?"` + + Pass these flags to the C/C++ compiler. Any flags set by the configure + script are prepended, which means explicitly set flags generally take + precedence. Take care when specifying flags such as -Werror, because + configure tests may be affected in undesirable ways. + +* `EXTRA_CFLAGS="?"` +* `EXTRA_CXXFLAGS="?"` + + Append these flags to CFLAGS/CXXFLAGS, without passing them to the + compiler(s) during configuration. This makes it possible to add flags such + as -Werror, while allowing the configure script to determine what other + flags are appropriate for the specified configuration. + +* `CPPFLAGS="?"` + + Pass these flags to the C preprocessor. Note that CFLAGS is not passed to + 'cpp' when 'configure' is looking for include files, so you must use + CPPFLAGS instead if you need to help 'configure' find header files. + +* `LD_LIBRARY_PATH="?"` + + 'ld' uses this colon-separated list to find libraries. + +* `LDFLAGS="?"` + + Pass these flags when linking. + +* `PATH="?"` + + 'configure' uses this to find programs. + +In some cases it may be necessary to work around configuration results that do +not match reality. For example, Linux 4.5 added support for the MADV_FREE flag +to madvise(2), which can cause problems if building on a host with MADV_FREE +support and deploying to a target without. To work around this, use a cache +file to override the relevant configuration variable defined in configure.ac, +e.g.: + + echo "je_cv_madv_free=no" > config.cache && ./configure -C + + +## Advanced compilation + +To build only parts of jemalloc, use the following targets: + + build_lib_shared + build_lib_static + build_lib + build_doc_html + build_doc_man + build_doc + +To install only parts of jemalloc, use the following targets: + + install_bin + install_include + install_lib_shared + install_lib_static + install_lib_pc + install_lib + install_doc_html + install_doc_man + install_doc + +To clean up build results to varying degrees, use the following make targets: + + clean + distclean + relclean + + +## Advanced installation + +Optionally, define make variables when invoking make, including (not +exclusively): + +* `INCLUDEDIR="?"` + + Use this as the installation prefix for header files. + +* `LIBDIR="?"` + + Use this as the installation prefix for libraries. + +* `MANDIR="?"` + + Use this as the installation prefix for man pages. + +* `DESTDIR="?"` + + Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful + when installing to a different path than was specified via --prefix. + +* `CC="?"` + + Use this to invoke the C compiler. + +* `CFLAGS="?"` + + Pass these flags to the compiler. + +* `CPPFLAGS="?"` + + Pass these flags to the C preprocessor. + +* `LDFLAGS="?"` + + Pass these flags when linking. + +* `PATH="?"` + + Use this to search for programs used during configuration and building. + + +## Development + +If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh' +script rather than 'configure'. This re-generates 'configure', enables +configuration dependency rules, and enables re-generation of automatically +generated source files. + +The build system supports using an object directory separate from the source +tree. For example, you can create an 'obj' directory, and from within that +directory, issue configuration and build commands: + + autoconf + mkdir obj + cd obj + ../configure --enable-autogen + make + + +## Documentation + +The manual page is generated in both html and roff formats. Any web browser +can be used to view the html manual. The roff manual page can be formatted +prior to installation via the following command: + + nroff -man -t doc/jemalloc.3 diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/Makefile.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/Makefile.in similarity index 52% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/Makefile.in rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/Makefile.in index 1ac6f29..9b9347f 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/Makefile.in +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/Makefile.in @@ -9,6 +9,7 @@ vpath % . SHELL := /bin/sh CC := @CC@ +CXX := @CXX@ # Configuration parameters. DESTDIR = @@ -23,12 +24,18 @@ abs_srcroot := @abs_srcroot@ abs_objroot := @abs_objroot@ # Build parameters. -CPPFLAGS := @CPPFLAGS@ -I$(srcroot)include -I$(objroot)include -CFLAGS := @CFLAGS@ +CPPFLAGS := @CPPFLAGS@ -I$(objroot)include -I$(srcroot)include +CONFIGURE_CFLAGS := @CONFIGURE_CFLAGS@ +SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@ +EXTRA_CFLAGS := @EXTRA_CFLAGS@ +CFLAGS := $(strip $(CONFIGURE_CFLAGS) $(SPECIFIED_CFLAGS) $(EXTRA_CFLAGS)) +CONFIGURE_CXXFLAGS := @CONFIGURE_CXXFLAGS@ +SPECIFIED_CXXFLAGS := @SPECIFIED_CXXFLAGS@ +EXTRA_CXXFLAGS := @EXTRA_CXXFLAGS@ +CXXFLAGS := $(strip $(CONFIGURE_CXXFLAGS) $(SPECIFIED_CXXFLAGS) $(EXTRA_CXXFLAGS)) LDFLAGS := @LDFLAGS@ EXTRA_LDFLAGS := @EXTRA_LDFLAGS@ LIBS := @LIBS@ -TESTLIBS := @TESTLIBS@ RPATH_EXTRA := @RPATH_EXTRA@ SO := @so@ IMPORTLIB := @importlib@ @@ -48,20 +55,24 @@ cfghdrs_out := @cfghdrs_out@ cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@) cfgoutputs_out := @cfgoutputs_out@ enable_autogen := @enable_autogen@ -enable_code_coverage := @enable_code_coverage@ enable_prof := @enable_prof@ -enable_valgrind := @enable_valgrind@ enable_zone_allocator := @enable_zone_allocator@ MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF +link_whole_archive := @link_whole_archive@ DSO_LDFLAGS = @DSO_LDFLAGS@ SOREV = @SOREV@ PIC_CFLAGS = @PIC_CFLAGS@ CTARGET = @CTARGET@ LDTARGET = @LDTARGET@ +TEST_LD_MODE = @TEST_LD_MODE@ MKLIB = @MKLIB@ AR = @AR@ ARFLAGS = @ARFLAGS@ +DUMP_SYMS = @DUMP_SYMS@ +AWK := @AWK@ CC_MM = @CC_MM@ +LM := @LM@ +INSTALL = @INSTALL@ ifeq (macho, $(ABI)) TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib" @@ -78,18 +89,36 @@ LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix) # Lists of files. BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h -C_SRCS := $(srcroot)src/jemalloc.c $(srcroot)src/arena.c \ - $(srcroot)src/atomic.c $(srcroot)src/base.c $(srcroot)src/bitmap.c \ - $(srcroot)src/chunk.c $(srcroot)src/chunk_dss.c \ - $(srcroot)src/chunk_mmap.c $(srcroot)src/ckh.c $(srcroot)src/ctl.c \ - $(srcroot)src/extent.c $(srcroot)src/hash.c $(srcroot)src/huge.c \ - $(srcroot)src/mb.c $(srcroot)src/mutex.c $(srcroot)src/pages.c \ - $(srcroot)src/prof.c $(srcroot)src/quarantine.c $(srcroot)src/rtree.c \ - $(srcroot)src/stats.c $(srcroot)src/tcache.c $(srcroot)src/util.c \ - $(srcroot)src/tsd.c -ifeq ($(enable_valgrind), 1) -C_SRCS += $(srcroot)src/valgrind.c -endif +C_SRCS := $(srcroot)src/jemalloc.c \ + $(srcroot)src/arena.c \ + $(srcroot)src/background_thread.c \ + $(srcroot)src/base.c \ + $(srcroot)src/bin.c \ + $(srcroot)src/bitmap.c \ + $(srcroot)src/ckh.c \ + $(srcroot)src/ctl.c \ + $(srcroot)src/div.c \ + $(srcroot)src/extent.c \ + $(srcroot)src/extent_dss.c \ + $(srcroot)src/extent_mmap.c \ + $(srcroot)src/hash.c \ + $(srcroot)src/hooks.c \ + $(srcroot)src/large.c \ + $(srcroot)src/log.c \ + $(srcroot)src/malloc_io.c \ + $(srcroot)src/mutex.c \ + $(srcroot)src/mutex_pool.c \ + $(srcroot)src/nstime.c \ + $(srcroot)src/pages.c \ + $(srcroot)src/prng.c \ + $(srcroot)src/prof.c \ + $(srcroot)src/rtree.c \ + $(srcroot)src/stats.c \ + $(srcroot)src/sz.c \ + $(srcroot)src/tcache.c \ + $(srcroot)src/ticker.c \ + $(srcroot)src/tsd.c \ + $(srcroot)src/witness.c ifeq ($(enable_zone_allocator), 1) C_SRCS += $(srcroot)src/zone.c endif @@ -105,6 +134,11 @@ DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV) ifneq ($(SOREV),$(SO)) DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO) endif +ifeq (1, $(link_whole_archive)) +LJEMALLOC := -Wl,--whole-archive -L$(objroot)lib -l$(LIBJEMALLOC) -Wl,--no-whole-archive +else +LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) +endif PC := $(objroot)jemalloc.pc MAN3 := $(objroot)doc/jemalloc$(install_suffix).3 DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml @@ -116,53 +150,103 @@ C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \ $(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \ $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \ $(srcroot)test/src/thd.c $(srcroot)test/src/timer.c -C_UTIL_INTEGRATION_SRCS := $(srcroot)src/util.c -TESTS_UNIT := $(srcroot)test/unit/atomic.c \ +ifeq (1, $(link_whole_archive)) +C_UTIL_INTEGRATION_SRCS := +C_UTIL_CPP_SRCS := +else +C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c +C_UTIL_CPP_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c +endif +TESTS_UNIT := \ + $(srcroot)test/unit/a0.c \ + $(srcroot)test/unit/arena_reset.c \ + $(srcroot)test/unit/atomic.c \ + $(srcroot)test/unit/background_thread.c \ + $(srcroot)test/unit/background_thread_enable.c \ + $(srcroot)test/unit/base.c \ $(srcroot)test/unit/bitmap.c \ $(srcroot)test/unit/ckh.c \ + $(srcroot)test/unit/decay.c \ + $(srcroot)test/unit/div.c \ + $(srcroot)test/unit/emitter.c \ + $(srcroot)test/unit/extent_quantize.c \ + $(srcroot)test/unit/fork.c \ $(srcroot)test/unit/hash.c \ + $(srcroot)test/unit/hooks.c \ $(srcroot)test/unit/junk.c \ $(srcroot)test/unit/junk_alloc.c \ $(srcroot)test/unit/junk_free.c \ - $(srcroot)test/unit/lg_chunk.c \ + $(srcroot)test/unit/log.c \ $(srcroot)test/unit/mallctl.c \ + $(srcroot)test/unit/malloc_io.c \ $(srcroot)test/unit/math.c \ $(srcroot)test/unit/mq.c \ $(srcroot)test/unit/mtx.c \ + $(srcroot)test/unit/pack.c \ + $(srcroot)test/unit/pages.c \ + $(srcroot)test/unit/ph.c \ + $(srcroot)test/unit/prng.c \ $(srcroot)test/unit/prof_accum.c \ $(srcroot)test/unit/prof_active.c \ $(srcroot)test/unit/prof_gdump.c \ $(srcroot)test/unit/prof_idump.c \ $(srcroot)test/unit/prof_reset.c \ + $(srcroot)test/unit/prof_tctx.c \ $(srcroot)test/unit/prof_thread_name.c \ $(srcroot)test/unit/ql.c \ $(srcroot)test/unit/qr.c \ - $(srcroot)test/unit/quarantine.c \ $(srcroot)test/unit/rb.c \ + $(srcroot)test/unit/retained.c \ $(srcroot)test/unit/rtree.c \ $(srcroot)test/unit/SFMT.c \ $(srcroot)test/unit/size_classes.c \ + $(srcroot)test/unit/slab.c \ + $(srcroot)test/unit/smoothstep.c \ + $(srcroot)test/unit/spin.c \ $(srcroot)test/unit/stats.c \ + $(srcroot)test/unit/stats_print.c \ + $(srcroot)test/unit/ticker.c \ + $(srcroot)test/unit/nstime.c \ $(srcroot)test/unit/tsd.c \ - $(srcroot)test/unit/util.c \ + $(srcroot)test/unit/witness.c \ $(srcroot)test/unit/zero.c +ifeq (@enable_prof@, 1) +TESTS_UNIT += \ + $(srcroot)test/unit/arena_reset_prof.c +endif TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \ $(srcroot)test/integration/allocated.c \ - $(srcroot)test/integration/sdallocx.c \ + $(srcroot)test/integration/extent.c \ $(srcroot)test/integration/mallocx.c \ $(srcroot)test/integration/MALLOCX_ARENA.c \ $(srcroot)test/integration/overflow.c \ $(srcroot)test/integration/posix_memalign.c \ $(srcroot)test/integration/rallocx.c \ + $(srcroot)test/integration/sdallocx.c \ $(srcroot)test/integration/thread_arena.c \ $(srcroot)test/integration/thread_tcache_enabled.c \ - $(srcroot)test/integration/xallocx.c \ - $(srcroot)test/integration/chunk.c + $(srcroot)test/integration/xallocx.c +ifeq (@enable_cxx@, 1) +CPP_SRCS := $(srcroot)src/jemalloc_cpp.cpp +TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp +else +CPP_SRCS := +TESTS_INTEGRATION_CPP := +endif TESTS_STRESS := $(srcroot)test/stress/microbench.c -TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_STRESS) +TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) $(TESTS_STRESS) + +PRIVATE_NAMESPACE_HDRS := $(objroot)include/jemalloc/internal/private_namespace.h $(objroot)include/jemalloc/internal/private_namespace_jet.h +PRIVATE_NAMESPACE_GEN_HDRS := $(PRIVATE_NAMESPACE_HDRS:%.h=%.gen.h) +C_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym.$(O)) +C_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym) C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O)) +CPP_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.$(O)) C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O)) +CPP_PIC_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.pic.$(O)) +C_JET_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym.$(O)) +C_JET_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym) C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O)) C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O)) C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O)) @@ -172,15 +256,17 @@ C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_ TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O)) +TESTS_INTEGRATION_CPP_OBJS := $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%.$(O)) TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O)) TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS) +TESTS_CPP_OBJS := $(TESTS_INTEGRATION_CPP_OBJS) .PHONY: all dist build_doc_html build_doc_man build_doc .PHONY: install_bin install_include install_lib .PHONY: install_doc_html install_doc_man install_doc install .PHONY: tests check clean distclean relclean -.SECONDARY : $(TESTS_OBJS) +.SECONDARY : $(PRIVATE_NAMESPACE_GEN_HDRS) $(TESTS_OBJS) $(TESTS_CPP_OBJS) # Default target. all: build_lib @@ -201,18 +287,32 @@ build_doc: $(DOCS) # Include generated dependency files. # ifdef CC_MM +-include $(C_SYM_OBJS:%.$(O)=%.d) -include $(C_OBJS:%.$(O)=%.d) +-include $(CPP_OBJS:%.$(O)=%.d) -include $(C_PIC_OBJS:%.$(O)=%.d) +-include $(CPP_PIC_OBJS:%.$(O)=%.d) +-include $(C_JET_SYM_OBJS:%.$(O)=%.d) -include $(C_JET_OBJS:%.$(O)=%.d) -include $(C_TESTLIB_OBJS:%.$(O)=%.d) -include $(TESTS_OBJS:%.$(O)=%.d) +-include $(TESTS_CPP_OBJS:%.$(O)=%.d) endif +$(C_SYM_OBJS): $(objroot)src/%.sym.$(O): $(srcroot)src/%.c +$(C_SYM_OBJS): CPPFLAGS += -DJEMALLOC_NO_PRIVATE_NAMESPACE +$(C_SYMS): $(objroot)src/%.sym: $(objroot)src/%.sym.$(O) $(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c +$(CPP_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.cpp $(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c $(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS) +$(CPP_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.cpp +$(CPP_PIC_OBJS): CXXFLAGS += $(PIC_CFLAGS) +$(C_JET_SYM_OBJS): $(objroot)src/%.jet.sym.$(O): $(srcroot)src/%.c +$(C_JET_SYM_OBJS): CPPFLAGS += -DJEMALLOC_JET -DJEMALLOC_NO_PRIVATE_NAMESPACE +$(C_JET_SYMS): $(objroot)src/%.jet.sym: $(objroot)src/%.jet.sym.$(O) $(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c -$(C_JET_OBJS): CFLAGS += -DJEMALLOC_JET +$(C_JET_OBJS): CPPFLAGS += -DJEMALLOC_JET $(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c $(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST $(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c @@ -223,112 +323,146 @@ $(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_T $(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include $(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST $(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST +$(TESTS_INTEGRATION_CPP_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_CPP_TEST $(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST $(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c +$(TESTS_CPP_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.cpp $(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include +$(TESTS_CPP_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include ifneq ($(IMPORTLIB),$(SO)) -$(C_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT +$(CPP_OBJS) $(C_SYM_OBJS) $(C_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT endif -ifndef CC_MM # Dependencies. +ifndef CC_MM HEADER_DIRS = $(srcroot)include/jemalloc/internal \ $(objroot)include/jemalloc $(objroot)include/jemalloc/internal -HEADERS = $(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h)) -$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): $(HEADERS) -$(TESTS_OBJS): $(objroot)test/include/test/jemalloc_test.h +HEADERS = $(filter-out $(PRIVATE_NAMESPACE_HDRS),$(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h))) +$(C_SYM_OBJS) $(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(HEADERS) +$(TESTS_OBJS) $(TESTS_CPP_OBJS): $(objroot)test/include/test/jemalloc_test.h endif -$(C_OBJS) $(C_PIC_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O): +$(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_INTEGRATION_CPP_OBJS): $(objroot)include/jemalloc/internal/private_namespace.h +$(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_STRESS_OBJS) $(TESTS_UNIT_OBJS) $(TESTS_STRESS_OBJS): $(objroot)include/jemalloc/internal/private_namespace_jet.h + +$(C_SYM_OBJS) $(C_OBJS) $(C_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O): @mkdir -p $(@D) $(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $< ifdef CC_MM @$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< endif +$(C_SYMS): %.sym: + @mkdir -p $(@D) + $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols.awk > $@ + +$(C_JET_SYMS): %.sym: + @mkdir -p $(@D) + $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols_jet.awk > $@ + +$(objroot)include/jemalloc/internal/private_namespace.gen.h: $(C_SYMS) + $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@ + +$(objroot)include/jemalloc/internal/private_namespace_jet.gen.h: $(C_JET_SYMS) + $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@ + +%.h: %.gen.h + @if ! `cmp -s $< $@` ; then echo "cp $< $<"; cp $< $@ ; fi + +$(CPP_OBJS) $(CPP_PIC_OBJS) $(TESTS_CPP_OBJS): %.$(O): + @mkdir -p $(@D) + $(CXX) $(CXXFLAGS) -c $(CPPFLAGS) $(CTARGET) $< +ifdef CC_MM + @$(CXX) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $< +endif + ifneq ($(SOREV),$(SO)) %.$(SO) : %.$(SOREV) @mkdir -p $(@D) ln -sf $( Hide nodes below *total [default=.005] --edgefraction= Hide edges below *total [default=.001] --maxdegree= Max incoming/outgoing edges per node [default=8] - --focus= Focus on nodes matching + --focus= Focus on backtraces with nodes matching --thread= Show profile for thread - --ignore= Ignore nodes matching + --ignore= Ignore backtraces with nodes matching --scale= Set GV scaling [default=0] --heapcheck Make nodes with non-0 object counts (i.e. direct leak generators) more visible + --retain= Retain only nodes that match + --exclude= Exclude all nodes that match Miscellaneous: --tools=[,...] \$PATH for object tool pathnames @@ -339,6 +342,8 @@ sub Init() { $main::opt_ignore = ''; $main::opt_scale = 0; $main::opt_heapcheck = 0; + $main::opt_retain = ''; + $main::opt_exclude = ''; $main::opt_seconds = 30; $main::opt_lib = ""; @@ -410,6 +415,8 @@ sub Init() { "ignore=s" => \$main::opt_ignore, "scale=i" => \$main::opt_scale, "heapcheck" => \$main::opt_heapcheck, + "retain=s" => \$main::opt_retain, + "exclude=s" => \$main::opt_exclude, "inuse_space!" => \$main::opt_inuse_space, "inuse_objects!" => \$main::opt_inuse_objects, "alloc_space!" => \$main::opt_alloc_space, @@ -1160,8 +1167,21 @@ sub PrintSymbolizedProfile { } print '---', "\n"; - $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash - my $profile_marker = $&; + my $profile_marker; + if ($main::profile_type eq 'heap') { + $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash + $profile_marker = $&; + } elsif ($main::profile_type eq 'growth') { + $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash + $profile_marker = $&; + } elsif ($main::profile_type eq 'contention') { + $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash + $profile_marker = $&; + } else { # elsif ($main::profile_type eq 'cpu') + $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash + $profile_marker = $&; + } + print '--- ', $profile_marker, "\n"; if (defined($main::collected_profile)) { # if used with remote fetch, simply dump the collected profile to output. @@ -1171,6 +1191,12 @@ sub PrintSymbolizedProfile { } close(SRC); } else { + # --raw/http: For everything to work correctly for non-remote profiles, we + # would need to extend PrintProfileData() to handle all possible profile + # types, re-enable the code that is currently disabled in ReadCPUProfile() + # and FixCallerAddresses(), and remove the remote profile dumping code in + # the block above. + die "--raw/http: jeprof can only dump remote profiles for --raw\n"; # dump a cpu-format profile to standard out PrintProfileData($profile); } @@ -2821,6 +2847,43 @@ sub ExtractCalls { return $calls; } +sub FilterFrames { + my $symbols = shift; + my $profile = shift; + + if ($main::opt_retain eq '' && $main::opt_exclude eq '') { + return $profile; + } + + my $result = {}; + foreach my $k (keys(%{$profile})) { + my $count = $profile->{$k}; + my @addrs = split(/\n/, $k); + my @path = (); + foreach my $a (@addrs) { + my $sym; + if (exists($symbols->{$a})) { + $sym = $symbols->{$a}->[0]; + } else { + $sym = $a; + } + if ($main::opt_retain ne '' && $sym !~ m/$main::opt_retain/) { + next; + } + if ($main::opt_exclude ne '' && $sym =~ m/$main::opt_exclude/) { + next; + } + push(@path, $a); + } + if (scalar(@path) > 0) { + my $reduced_path = join("\n", @path); + AddEntry($result, $reduced_path, $count); + } + } + + return $result; +} + sub RemoveUninterestingFrames { my $symbols = shift; my $profile = shift; @@ -2829,21 +2892,23 @@ sub RemoveUninterestingFrames { my %skip = (); my $skip_regexp = 'NOMATCH'; if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') { - foreach my $name ('calloc', + foreach my $name ('@JEMALLOC_PREFIX@calloc', 'cfree', - 'malloc', - 'free', - 'memalign', - 'posix_memalign', - 'aligned_alloc', + '@JEMALLOC_PREFIX@malloc', + 'newImpl', + 'void* newImpl', + '@JEMALLOC_PREFIX@free', + '@JEMALLOC_PREFIX@memalign', + '@JEMALLOC_PREFIX@posix_memalign', + '@JEMALLOC_PREFIX@aligned_alloc', 'pvalloc', - 'valloc', - 'realloc', - 'mallocx', # jemalloc - 'rallocx', # jemalloc - 'xallocx', # jemalloc - 'dallocx', # jemalloc - 'sdallocx', # jemalloc + '@JEMALLOC_PREFIX@valloc', + '@JEMALLOC_PREFIX@realloc', + '@JEMALLOC_PREFIX@mallocx', + '@JEMALLOC_PREFIX@rallocx', + '@JEMALLOC_PREFIX@xallocx', + '@JEMALLOC_PREFIX@dallocx', + '@JEMALLOC_PREFIX@sdallocx', 'tc_calloc', 'tc_cfree', 'tc_malloc', @@ -2965,6 +3030,9 @@ sub RemoveUninterestingFrames { my $reduced_path = join("\n", @path); AddEntry($result, $reduced_path, $count); } + + $result = FilterFrames($symbols, $result); + return $result; } @@ -3274,7 +3342,7 @@ sub ResolveRedirectionForCurl { # Add a timeout flat to URL_FETCHER. Returns a new list. sub AddFetchTimeout { my $timeout = shift; - my @fetcher = shift; + my @fetcher = @_; if (defined($timeout)) { if (join(" ", @fetcher) =~ m/\bcurl -s/) { push(@fetcher, "--max-time", sprintf("%d", $timeout)); @@ -3320,6 +3388,27 @@ sub ReadSymbols { return $map; } +sub URLEncode { + my $str = shift; + $str =~ s/([^A-Za-z0-9\-_.!~*'()])/ sprintf "%%%02x", ord $1 /eg; + return $str; +} + +sub AppendSymbolFilterParams { + my $url = shift; + my @params = (); + if ($main::opt_retain ne '') { + push(@params, sprintf("retain=%s", URLEncode($main::opt_retain))); + } + if ($main::opt_exclude ne '') { + push(@params, sprintf("exclude=%s", URLEncode($main::opt_exclude))); + } + if (scalar @params > 0) { + $url = sprintf("%s?%s", $url, join("&", @params)); + } + return $url; +} + # Fetches and processes symbols to prepare them for use in the profile output # code. If the optional 'symbol_map' arg is not given, fetches symbols from # $SYMBOL_PAGE for all PC values found in profile. Otherwise, the raw symbols @@ -3344,9 +3433,11 @@ sub FetchSymbols { my $command_line; if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) { $url = ResolveRedirectionForCurl($url); + $url = AppendSymbolFilterParams($url); $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym", $url); } else { + $url = AppendSymbolFilterParams($url); $command_line = (ShellEscape(@URL_FETCHER, "--post", $url) . " < " . ShellEscape($main::tmpfile_sym)); } @@ -3427,12 +3518,22 @@ sub FetchDynamicProfile { } $url .= sprintf("seconds=%d", $main::opt_seconds); $fetch_timeout = $main::opt_seconds * 1.01 + 60; + # Set $profile_type for consumption by PrintSymbolizedProfile. + $main::profile_type = 'cpu'; } else { # For non-CPU profiles, we add a type-extension to # the target profile file name. my $suffix = $path; $suffix =~ s,/,.,g; $profile_file .= $suffix; + # Set $profile_type for consumption by PrintSymbolizedProfile. + if ($path =~ m/$HEAP_PAGE/) { + $main::profile_type = 'heap'; + } elsif ($path =~ m/$GROWTH_PAGE/) { + $main::profile_type = 'growth'; + } elsif ($path =~ m/$CONTENTION_PAGE/) { + $main::profile_type = 'contention'; + } } my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof"); @@ -3730,6 +3831,8 @@ sub ReadProfile { my $symbol_marker = $&; $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash my $profile_marker = $&; + $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash + my $heap_marker = $&; # Look at first line to see if it is a heap or a CPU profile. # CPU profile may start with no header at all, and just binary data @@ -3756,7 +3859,13 @@ sub ReadProfile { $header = ReadProfileHeader(*PROFILE) || ""; } + if ($header =~ m/^--- *($heap_marker|$growth_marker)/o) { + # Skip "--- ..." line for profile types that have their own headers. + $header = ReadProfileHeader(*PROFILE) || ""; + } + $main::profile_type = ''; + if ($header =~ m/^heap profile:.*$growth_marker/o) { $main::profile_type = 'growth'; $result = ReadHeapProfile($prog, *PROFILE, $header); @@ -3808,9 +3917,9 @@ sub ReadProfile { # independent implementation. sub FixCallerAddresses { my $stack = shift; - if ($main::use_symbolized_profile) { - return $stack; - } else { + # --raw/http: Always subtract one from pc's, because PrintSymbolizedProfile() + # dumps unadjusted profiles. + { $stack =~ /(\s)/; my $delimiter = $1; my @addrs = split(' ', $stack); @@ -3878,12 +3987,7 @@ sub ReadCPUProfile { for (my $j = 0; $j < $d; $j++) { my $pc = $slots->get($i+$j); # Subtract one from caller pc so we map back to call instr. - # However, don't do this if we're reading a symbolized profile - # file, in which case the subtract-one was done when the file - # was written. - if ($j > 0 && !$main::use_symbolized_profile) { - $pc--; - } + $pc--; $pc = sprintf("%0*x", $address_length, $pc); $pcs->{$pc} = 1; push @k, $pc; @@ -4469,7 +4573,7 @@ sub ParseTextSectionHeader { # Split /proc/pid/maps dump into a list of libraries sub ParseLibraries { return if $main::use_symbol_page; # We don't need libraries info. - my $prog = shift; + my $prog = Cwd::abs_path(shift); my $map = shift; my $pcs = shift; @@ -4502,6 +4606,16 @@ sub ParseLibraries { $finish = HexExtend($2); $offset = $zero_offset; $lib = $3; + } elsif (($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+)$/i) && ($4 eq $prog)) { + # PIEs and address space randomization do not play well with our + # default assumption that main executable is at lowest + # addresses. So we're detecting main executable in + # /proc/self/maps as well. + $start = HexExtend($1); + $finish = HexExtend($2); + $offset = HexExtend($3); + $lib = $4; + $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths } # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in # function procfs_doprocmap (sys/fs/procfs/procfs_map.c) diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/build-aux/config.guess b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/build-aux/config.guess new file mode 100755 index 0000000..2e9ad7f --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/build-aux/config.guess @@ -0,0 +1,1462 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright 1992-2016 Free Software Foundation, Inc. + +timestamp='2016-10-02' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. +# +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess +# +# Please send patches to . + + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright 1992-2016 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +case "${UNAME_SYSTEM}" in +Linux|GNU|GNU/*) + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + LIBC=gnu + + eval $set_cc_for_build + cat <<-EOF > $dummy.c + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #else + LIBC=gnu + #endif + EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + ;; +esac + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ + /sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || \ + echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + earmv*) + arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'` + endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'` + machine=${arch}${endian}-unknown + ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently (or will in the future) and ABI. + case "${UNAME_MACHINE_ARCH}" in + earm*) + os=netbsdelf + ;; + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # Determine ABI tags. + case "${UNAME_MACHINE_ARCH}" in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"` + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}${abi}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE} + exit ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit ;; + *:SolidBSD:*:*) + echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:Sortix:*:*) + echo ${UNAME_MACHINE}-unknown-sortix + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE=alpha ;; + "EV4.5 (21064)") + UNAME_MACHINE=alpha ;; + "LCA4 (21066/21068)") + UNAME_MACHINE=alpha ;; + "EV5 (21164)") + UNAME_MACHINE=alphaev5 ;; + "EV5.6 (21164A)") + UNAME_MACHINE=alphaev56 ;; + "EV5.6 (21164PC)") + UNAME_MACHINE=alphapca56 ;; + "EV5.7 (21164PC)") + UNAME_MACHINE=alphapca57 ;; + "EV6 (21264)") + UNAME_MACHINE=alphaev6 ;; + "EV6.7 (21264A)") + UNAME_MACHINE=alphaev67 ;; + "EV6.8CB (21264C)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8AL (21264B)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8CX (21264D)") + UNAME_MACHINE=alphaev68 ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE=alphaev69 ;; + "EV7 (21364)") + UNAME_MACHINE=alphaev7 ;; + "EV7.9 (21364A)") + UNAME_MACHINE=alphaev79 ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux${UNAME_RELEASE} + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + eval $set_cc_for_build + SUN_ARCH=i386 + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH=x86_64 + fi + fi + echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && + dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`$dummy $dummyarg` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos${UNAME_RELEASE} + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/lslpp ] ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = hppa2.0w ] + then + eval $set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH=hppa2.0w + else + HP_ARCH=hppa64 + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case ${UNAME_PROCESSOR} in + amd64) + echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + *) + echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + esac + exit ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit ;; + *:MINGW64*:*) + echo ${UNAME_MACHINE}-pc-mingw64 + exit ;; + *:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit ;; + *:MSYS*:*) + echo ${UNAME_MACHINE}-pc-msys + exit ;; + i*:windows32*:*) + # uname -m includes "-pc" on this system. + echo ${UNAME_MACHINE}-mingw32 + exit ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit ;; + *:Interix*:*) + case ${UNAME_MACHINE} in + x86) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + IA64) + echo ia64-unknown-interix${UNAME_RELEASE} + exit ;; + esac ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit ;; + 8664:Windows_NT:*) + echo x86_64-pc-mks + exit ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} + exit ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit ;; + aarch64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi + else + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + cris:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + crisv32:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + e2k:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + frv:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + hexagon:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + k1om:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef ${UNAME_MACHINE} + #undef ${UNAME_MACHINE}el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=${UNAME_MACHINE}el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=${UNAME_MACHINE} + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } + ;; + mips64el:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-${LIBC} + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-${LIBC} + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-${LIBC} + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; + PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; + *) echo hppa-unknown-linux-${LIBC} ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-${LIBC} + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-${LIBC} + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-${LIBC} + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-${LIBC} + exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux-${LIBC} + exit ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + tile*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-${LIBC} + exit ;; + x86_64:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + xtensa*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configure will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux${UNAME_RELEASE} + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux${UNAME_RELEASE} + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux${UNAME_RELEASE} + exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux${UNAME_RELEASE} + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + eval $set_cc_for_build + if test "$UNAME_PROCESSOR" = unknown ; then + UNAME_PROCESSOR=powerpc + fi + if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # Avoid executing cc on OS X 10.9, as it ships with a stub + # that puts up a graphical alert prompting to install + # developer tools. Any system running Mac OS X 10.7 or + # later (Darwin 11 and later) is required to have a 64-bit + # processor. This is not true of the ARM version of Darwin + # that Apple uses in portable devices. + UNAME_PROCESSOR=x86_64 + fi + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = x86; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-?:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk${UNAME_RELEASE} + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit ;; + NSR-?:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = 386; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'` + exit ;; + i*86:rdos:*:*) + echo ${UNAME_MACHINE}-pc-rdos + exit ;; + i*86:AROS:*:*) + echo ${UNAME_MACHINE}-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo ${UNAME_MACHINE}-unknown-esx + exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs + exit ;; +esac + +cat >&2 </dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/build-aux/config.sub b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/build-aux/config.sub new file mode 100755 index 0000000..dd2ca93 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/build-aux/config.sub @@ -0,0 +1,1825 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright 1992-2016 Free Software Foundation, Inc. + +timestamp='2016-11-04' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches to . +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright 1992-2016 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ + linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ + kopensolaris*-gnu* | cloudabi*-eabi* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + android-linux) + os=-linux-android + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray | -microblaze*) + os= + basic_machine=$1 + ;; + -bluegene*) + os=-cnk + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*178) + os=-lynxos178 + ;; + -lynx*5) + os=-lynxos5 + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | aarch64 | aarch64_be \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arceb \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ + | avr | avr32 \ + | ba \ + | be32 | be64 \ + | bfin \ + | c4x | c8051 | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | e2k | epiphany \ + | fido | fr30 | frv | ft32 \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i860 | i960 | ia64 \ + | ip2k | iq2000 \ + | k1om \ + | le32 | le64 \ + | lm32 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa32r6 | mipsisa32r6el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64r6 | mipsisa64r6el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nios | nios2 | nios2eb | nios2el \ + | ns16k | ns32k \ + | open8 | or1k | or1knd | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pru \ + | pyramid \ + | riscv32 | riscv64 \ + | rl78 | rx \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu \ + | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ + | ubicom32 \ + | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | visium \ + | we32k \ + | x86 | xc16x | xstormy16 | xtensa \ + | z8k | z80) + basic_machine=$basic_machine-unknown + ;; + c54x) + basic_machine=tic54x-unknown + ;; + c55x) + basic_machine=tic55x-unknown + ;; + c6x) + basic_machine=tic6x-unknown + ;; + leon|leon[3-9]) + basic_machine=sparc-$basic_machine + ;; + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + ms1) + basic_machine=mt-unknown + ;; + + strongarm | thumb | xscale) + basic_machine=arm-unknown + ;; + xgate) + basic_machine=$basic_machine-unknown + os=-none + ;; + xscaleeb) + basic_machine=armeb-unknown + ;; + + xscaleel) + basic_machine=armel-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | aarch64-* | aarch64_be-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | ba-* \ + | be32-* | be64-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* \ + | c8051-* | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | e2k-* | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | hexagon-* \ + | i*86-* | i860-* | i960-* | ia64-* \ + | ip2k-* | iq2000-* \ + | k1om-* \ + | le32-* | le64-* \ + | lm32-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ + | microblaze-* | microblazeel-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64octeon-* | mips64octeonel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64r5900-* | mips64r5900el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa32r6-* | mipsisa32r6el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64r6-* | mipsisa64r6el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipsr5900-* | mipsr5900el-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nds32-* | nds32le-* | nds32be-* \ + | nios-* | nios2-* | nios2eb-* | nios2el-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | open8-* \ + | or1k*-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pru-* \ + | pyramid-* \ + | riscv32-* | riscv64-* \ + | rl78-* | romp-* | rs6000-* | rx-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ + | tahoe-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tile*-* \ + | tron-* \ + | ubicom32-* \ + | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ + | vax-* \ + | visium-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* \ + | xstormy16-* | xtensa*-* \ + | ymp-* \ + | z8k-* | z80-*) + ;; + # Recognize the basic CPU types without company name, with glob match. + xtensa*) + basic_machine=$basic_machine-unknown + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aros) + basic_machine=i386-pc + os=-aros + ;; + asmjs) + basic_machine=asmjs-unknown + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=-linux + ;; + blackfin-*) + basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + bluegene*) + basic_machine=powerpc-ibm + os=-cnk + ;; + c54x-*) + basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c55x-*) + basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c6x-*) + basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + cegcc) + basic_machine=arm-unknown + os=-cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16 | cr16-*) + basic_machine=cr16-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dicos) + basic_machine=i686-pc + os=-dicos + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + e500v[12]) + basic_machine=powerpc-unknown + os=$os"spe" + ;; + e500v[12]-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + os=$os"spe" + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + leon-*|leon[3-9]-*) + basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'` + ;; + m68knommu) + basic_machine=m68k-unknown + os=-linux + ;; + m68knommu-*) + basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + microblaze*) + basic_machine=microblaze-xilinx + ;; + mingw64) + basic_machine=x86_64-pc + os=-mingw64 + ;; + mingw32) + basic_machine=i686-pc + os=-mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + moxiebox) + basic_machine=moxie-unknown + os=-moxiebox + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + ;; + msys) + basic_machine=i686-pc + os=-msys + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + nacl) + basic_machine=le32-unknown + os=-nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + neo-tandem) + basic_machine=neo-tandem + ;; + nse-tandem) + basic_machine=nse-tandem + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + parisc) + basic_machine=hppa-unknown + os=-linux + ;; + parisc-*) + basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc | ppcbe) basic_machine=powerpc-unknown + ;; + ppc-* | ppcbe-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + os=-rdos + ;; + rdos32) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + strongarm-* | thumb-*) + basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tile*) + basic_machine=$basic_machine-unknown + os=-linux-gnu + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + xscale-* | xscalee[bl]-*) + basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + z80-*-coff) + basic_machine=z80-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -auroraux) + os=-auroraux + ;; + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ + | -sym* | -kopensolaris* | -plan9* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* | -aros* | -cloudabi* | -sortix* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ + | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* | -cegcc* \ + | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-musl* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ + | -onefs* | -tirtos* | -phoenix* | -fuchsia*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -zvmoe) + os=-zvmoe + ;; + -dicos*) + os=-dicos + ;; + -nacl*) + ;; + -ios) + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + c8051-*) + os=-elf + ;; + hexagon-*) + os=-elf + ;; + tic54x-*) + os=-coff + ;; + tic55x-*) + os=-coff + ;; + tic6x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + *-be) + os=-beos + ;; + *-haiku) + os=-haiku + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -cnk*|-aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/install-sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/build-aux/install-sh similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/install-sh rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/build-aux/install-sh diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/config.guess b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/config.guess similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/config.guess rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/config.guess diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/config.stamp.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/config.stamp.in similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/config.stamp.in rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/config.stamp.in diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/config.sub b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/config.sub similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/config.sub rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/config.sub diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/configure.ac b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/configure.ac similarity index 59% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/configure.ac rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/configure.ac index 7a1290e..a6a08db 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/configure.ac +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/configure.ac @@ -1,34 +1,99 @@ dnl Process this file with autoconf to produce a configure script. +AC_PREREQ(2.68) AC_INIT([Makefile.in]) +AC_CONFIG_AUX_DIR([build-aux]) + dnl ============================================================================ dnl Custom macro definitions. -dnl JE_CFLAGS_APPEND(cflag) -AC_DEFUN([JE_CFLAGS_APPEND], -[ -AC_MSG_CHECKING([whether compiler supports $1]) -TCFLAGS="${CFLAGS}" -if test "x${CFLAGS}" = "x" ; then - CFLAGS="$1" +dnl JE_CONCAT_VVV(r, a, b) +dnl +dnl Set $r to the concatenation of $a and $b, with a space separating them iff +dnl both $a and $b are non-empty. +AC_DEFUN([JE_CONCAT_VVV], +if test "x[$]{$2}" = "x" -o "x[$]{$3}" = "x" ; then + $1="[$]{$2}[$]{$3}" else - CFLAGS="${CFLAGS} $1" + $1="[$]{$2} [$]{$3}" fi +) + +dnl JE_APPEND_VS(a, b) +dnl +dnl Set $a to the concatenation of $a and b, with a space separating them iff +dnl both $a and b are non-empty. +AC_DEFUN([JE_APPEND_VS], + T_APPEND_V=$2 + JE_CONCAT_VVV($1, $1, T_APPEND_V) +) + +CONFIGURE_CFLAGS= +SPECIFIED_CFLAGS="${CFLAGS}" +dnl JE_CFLAGS_ADD(cflag) +dnl +dnl CFLAGS is the concatenation of CONFIGURE_CFLAGS and SPECIFIED_CFLAGS +dnl (ignoring EXTRA_CFLAGS, which does not impact configure tests. This macro +dnl appends to CONFIGURE_CFLAGS and regenerates CFLAGS. +AC_DEFUN([JE_CFLAGS_ADD], +[ +AC_MSG_CHECKING([whether compiler supports $1]) +T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +JE_APPEND_VS(CONFIGURE_CFLAGS, $1) +JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[ ]], [[ return 0; ]])], - [je_cv_cflags_appended=$1] + [je_cv_cflags_added=$1] AC_MSG_RESULT([yes]), - [je_cv_cflags_appended=] + [je_cv_cflags_added=] AC_MSG_RESULT([no]) - [CFLAGS="${TCFLAGS}"] + [CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"] ) +JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS) +]) + +dnl JE_CFLAGS_SAVE() +dnl JE_CFLAGS_RESTORE() +dnl +dnl Save/restore CFLAGS. Nesting is not supported. +AC_DEFUN([JE_CFLAGS_SAVE], +SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}" +) +AC_DEFUN([JE_CFLAGS_RESTORE], +CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}" +JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS) +) + +CONFIGURE_CXXFLAGS= +SPECIFIED_CXXFLAGS="${CXXFLAGS}" +dnl JE_CXXFLAGS_ADD(cxxflag) +AC_DEFUN([JE_CXXFLAGS_ADD], +[ +AC_MSG_CHECKING([whether compiler supports $1]) +T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}" +JE_APPEND_VS(CONFIGURE_CXXFLAGS, $1) +JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS) +AC_LANG_PUSH([C++]) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM( +[[ +]], [[ + return 0; +]])], + [je_cv_cxxflags_added=$1] + AC_MSG_RESULT([yes]), + [je_cv_cxxflags_added=] + AC_MSG_RESULT([no]) + [CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"] +) +AC_LANG_POP([C++]) +JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS) ]) dnl JE_COMPILABLE(label, hcode, mcode, rvar) -dnl +dnl dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors dnl cause failure. AC_DEFUN([JE_COMPILABLE], @@ -116,6 +181,7 @@ dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise, dnl just prevent autoconf from molesting CFLAGS. CFLAGS=$CFLAGS AC_PROG_CC + if test "x$GCC" != "xyes" ; then AC_CACHE_CHECK([whether compiler is MSVC], [je_cv_msvc], @@ -129,31 +195,122 @@ if test "x$GCC" != "xyes" ; then [je_cv_msvc=no])]) fi -if test "x$CFLAGS" = "x" ; then - no_CFLAGS="yes" - if test "x$GCC" = "xyes" ; then - JE_CFLAGS_APPEND([-std=gnu99]) - if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then +dnl check if a cray prgenv wrapper compiler is being used +je_cv_cray_prgenv_wrapper="" +if test "x${PE_ENV}" != "x" ; then + case "${CC}" in + CC|cc) + je_cv_cray_prgenv_wrapper="yes" + ;; + *) + ;; + esac +fi + +AC_CACHE_CHECK([whether compiler is cray], + [je_cv_cray], + [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], + [ +#ifndef _CRAYC + int fail[-1]; +#endif +])], + [je_cv_cray=yes], + [je_cv_cray=no])]) + +if test "x${je_cv_cray}" = "xyes" ; then + AC_CACHE_CHECK([whether cray compiler version is 8.4], + [je_cv_cray_84], + [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], + [ +#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4) + int fail[-1]; +#endif +])], + [je_cv_cray_84=yes], + [je_cv_cray_84=no])]) +fi + +if test "x$GCC" = "xyes" ; then + JE_CFLAGS_ADD([-std=gnu11]) + if test "x$je_cv_cflags_added" = "x-std=gnu11" ; then + AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) + else + JE_CFLAGS_ADD([-std=gnu99]) + if test "x$je_cv_cflags_added" = "x-std=gnu99" ; then AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT]) fi - JE_CFLAGS_APPEND([-Wall]) - JE_CFLAGS_APPEND([-Werror=declaration-after-statement]) - JE_CFLAGS_APPEND([-pipe]) - JE_CFLAGS_APPEND([-g3]) - elif test "x$je_cv_msvc" = "xyes" ; then - CC="$CC -nologo" - JE_CFLAGS_APPEND([-Zi]) - JE_CFLAGS_APPEND([-MT]) - JE_CFLAGS_APPEND([-W3]) - JE_CFLAGS_APPEND([-FS]) - CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat" fi + JE_CFLAGS_ADD([-Wall]) + JE_CFLAGS_ADD([-Wshorten-64-to-32]) + JE_CFLAGS_ADD([-Wsign-compare]) + JE_CFLAGS_ADD([-Wundef]) + JE_CFLAGS_ADD([-Wno-format-zero-length]) + JE_CFLAGS_ADD([-pipe]) + JE_CFLAGS_ADD([-g3]) +elif test "x$je_cv_msvc" = "xyes" ; then + CC="$CC -nologo" + JE_CFLAGS_ADD([-Zi]) + JE_CFLAGS_ADD([-MT]) + JE_CFLAGS_ADD([-W3]) + JE_CFLAGS_ADD([-FS]) + JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat) +fi +if test "x$je_cv_cray" = "xyes" ; then + dnl cray compiler 8.4 has an inlining bug + if test "x$je_cv_cray_84" = "xyes" ; then + JE_CFLAGS_ADD([-hipa2]) + JE_CFLAGS_ADD([-hnognu]) + fi + dnl ignore unreachable code warning + JE_CFLAGS_ADD([-hnomessage=128]) + dnl ignore redefinition of "malloc", "free", etc warning + JE_CFLAGS_ADD([-hnomessage=1357]) +fi +AC_SUBST([CONFIGURE_CFLAGS]) +AC_SUBST([SPECIFIED_CFLAGS]) +AC_SUBST([EXTRA_CFLAGS]) +AC_PROG_CPP + +AC_ARG_ENABLE([cxx], + [AS_HELP_STRING([--disable-cxx], [Disable C++ integration])], +if test "x$enable_cxx" = "xno" ; then + enable_cxx="0" +else + enable_cxx="1" fi -dnl Append EXTRA_CFLAGS to CFLAGS, if defined. -if test "x$EXTRA_CFLAGS" != "x" ; then - JE_CFLAGS_APPEND([$EXTRA_CFLAGS]) +, +enable_cxx="1" +) +if test "x$enable_cxx" = "x1" ; then + dnl Require at least c++14, which is the first version to support sized + dnl deallocation. C++ support is not compiled otherwise. + m4_include([m4/ax_cxx_compile_stdcxx.m4]) + AX_CXX_COMPILE_STDCXX([14], [noext], [optional]) + if test "x${HAVE_CXX14}" = "x1" ; then + JE_CXXFLAGS_ADD([-Wall]) + JE_CXXFLAGS_ADD([-g3]) + + SAVED_LIBS="${LIBS}" + JE_APPEND_VS(LIBS, -lstdc++) + JE_COMPILABLE([libstdc++ linkage], [ +#include +], [[ + int *arr = (int *)malloc(sizeof(int) * 42); + if (arr == NULL) + return 1; +]], [je_cv_libstdcxx]) + if test "x${je_cv_libstdcxx}" = "xno" ; then + LIBS="${SAVED_LIBS}" + fi + else + enable_cxx="0" + fi fi -AC_PROG_CPP +AC_SUBST([enable_cxx]) +AC_SUBST([CONFIGURE_CXXFLAGS]) +AC_SUBST([SPECIFIED_CXXFLAGS]) +AC_SUBST([EXTRA_CXXFLAGS]) AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0]) if test "x${ac_cv_big_endian}" = "x1" ; then @@ -161,16 +318,21 @@ if test "x${ac_cv_big_endian}" = "x1" ; then fi if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then - CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat/C99" + JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat/C99) fi -AC_CHECK_SIZEOF([void *]) -if test "x${ac_cv_sizeof_void_p}" = "x8" ; then - LG_SIZEOF_PTR=3 -elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then - LG_SIZEOF_PTR=2 +if test "x${je_cv_msvc}" = "xyes" ; then + LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN + AC_MSG_RESULT([Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit]) else - AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}]) + AC_CHECK_SIZEOF([void *]) + if test "x${ac_cv_sizeof_void_p}" = "x8" ; then + LG_SIZEOF_PTR=3 + elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then + LG_SIZEOF_PTR=2 + else + AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}]) + fi fi AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR]) @@ -194,6 +356,16 @@ else fi AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG]) +AC_CHECK_SIZEOF([long long]) +if test "x${ac_cv_sizeof_long_long}" = "x8" ; then + LG_SIZEOF_LONG_LONG=3 +elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then + LG_SIZEOF_LONG_LONG=2 +else + AC_MSG_ERROR([Unsupported long long size: ${ac_cv_sizeof_long_long}]) +fi +AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG_LONG], [$LG_SIZEOF_LONG_LONG]) + AC_CHECK_SIZEOF([intmax_t]) if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then LG_SIZEOF_INTMAX_T=4 @@ -211,22 +383,119 @@ dnl CPU-specific settings. CPU_SPINWAIT="" case "${host_cpu}" in i686|x86_64) - AC_CACHE_VAL([je_cv_pause], - [JE_COMPILABLE([pause instruction], [], - [[__asm__ volatile("pause"); return 0;]], - [je_cv_pause])]) - if test "x${je_cv_pause}" = "xyes" ; then - CPU_SPINWAIT='__asm__ volatile("pause")' + HAVE_CPU_SPINWAIT=1 + if test "x${je_cv_msvc}" = "xyes" ; then + AC_CACHE_VAL([je_cv_pause_msvc], + [JE_COMPILABLE([pause instruction MSVC], [], + [[_mm_pause(); return 0;]], + [je_cv_pause_msvc])]) + if test "x${je_cv_pause_msvc}" = "xyes" ; then + CPU_SPINWAIT='_mm_pause()' + fi + else + AC_CACHE_VAL([je_cv_pause], + [JE_COMPILABLE([pause instruction], [], + [[__asm__ volatile("pause"); return 0;]], + [je_cv_pause])]) + if test "x${je_cv_pause}" = "xyes" ; then + CPU_SPINWAIT='__asm__ volatile("pause")' + fi fi ;; - powerpc) - AC_DEFINE_UNQUOTED([HAVE_ALTIVEC], [ ]) - ;; *) + HAVE_CPU_SPINWAIT=0 ;; esac +AC_DEFINE_UNQUOTED([HAVE_CPU_SPINWAIT], [$HAVE_CPU_SPINWAIT]) AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT]) +AC_ARG_WITH([lg_vaddr], + [AS_HELP_STRING([--with-lg-vaddr=], [Number of significant virtual address bits])], + [LG_VADDR="$with_lg_vaddr"], [LG_VADDR="detect"]) + +case "${host_cpu}" in + aarch64) + if test "x$LG_VADDR" = "xdetect"; then + AC_MSG_CHECKING([number of significant virtual address bits]) + if test "x${LG_SIZEOF_PTR}" = "x2" ; then + #aarch64 ILP32 + LG_VADDR=32 + else + #aarch64 LP64 + LG_VADDR=48 + fi + AC_MSG_RESULT([$LG_VADDR]) + fi + ;; + x86_64) + if test "x$LG_VADDR" = "xdetect"; then + AC_CACHE_CHECK([number of significant virtual address bits], + [je_cv_lg_vaddr], + AC_RUN_IFELSE([AC_LANG_PROGRAM( +[[ +#include +#ifdef _WIN32 +#include +#include +typedef unsigned __int32 uint32_t; +#else +#include +#endif +]], [[ + uint32_t r[[4]]; + uint32_t eax_in = 0x80000008U; +#ifdef _WIN32 + __cpuid((int *)r, (int)eax_in); +#else + asm volatile ("cpuid" + : "=a" (r[[0]]), "=b" (r[[1]]), "=c" (r[[2]]), "=d" (r[[3]]) + : "a" (eax_in), "c" (0) + ); +#endif + uint32_t eax_out = r[[0]]; + uint32_t vaddr = ((eax_out & 0x0000ff00U) >> 8); + FILE *f = fopen("conftest.out", "w"); + if (f == NULL) { + return 1; + } + if (vaddr > (sizeof(void *) << 3)) { + vaddr = sizeof(void *) << 3; + } + fprintf(f, "%u", vaddr); + fclose(f); + return 0; +]])], + [je_cv_lg_vaddr=`cat conftest.out`], + [je_cv_lg_vaddr=error], + [je_cv_lg_vaddr=57])) + if test "x${je_cv_lg_vaddr}" != "x" ; then + LG_VADDR="${je_cv_lg_vaddr}" + fi + if test "x${LG_VADDR}" != "xerror" ; then + AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR]) + else + AC_MSG_ERROR([cannot determine number of significant virtual address bits]) + fi + fi + ;; + *) + if test "x$LG_VADDR" = "xdetect"; then + AC_MSG_CHECKING([number of significant virtual address bits]) + if test "x${LG_SIZEOF_PTR}" = "x3" ; then + LG_VADDR=64 + elif test "x${LG_SIZEOF_PTR}" = "x2" ; then + LG_VADDR=32 + elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then + LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))" + else + AC_MSG_ERROR([Unsupported lg(pointer size): ${LG_SIZEOF_PTR}]) + fi + AC_MSG_RESULT([$LG_VADDR]) + fi + ;; +esac +AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR]) + LD_PRELOAD_VAR="LD_PRELOAD" so="so" importlib="${so}" @@ -234,36 +503,53 @@ o="$ac_objext" a="a" exe="$ac_exeext" libprefix="lib" +link_whole_archive="0" DSO_LDFLAGS='-shared -Wl,-soname,$(@F)' RPATH='-Wl,-rpath,$(1)' SOREV="${so}.${rev}" PIC_CFLAGS='-fPIC -DPIC' CTARGET='-o $@' LDTARGET='-o $@' +TEST_LD_MODE= EXTRA_LDFLAGS= ARFLAGS='crus' AROUT=' $@' CC_MM=1 +if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then + TEST_LD_MODE='-dynamic' +fi + +if test "x${je_cv_cray}" = "xyes" ; then + CC_MM= +fi + AN_MAKEVAR([AR], [AC_PROG_AR]) AN_PROGRAM([ar], [AC_PROG_AR]) AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)]) AC_PROG_AR +AN_MAKEVAR([NM], [AC_PROG_NM]) +AN_PROGRAM([nm], [AC_PROG_NM]) +AC_DEFUN([AC_PROG_NM], [AC_CHECK_TOOL(NM, nm, :)]) +AC_PROG_NM + +AC_PROG_AWK + dnl Platform-specific settings. abi and RPATH can probably be determined dnl programmatically, but doing so is error-prone, which makes it generally dnl not worth the trouble. -dnl +dnl dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the dnl definitions need to be seen before any headers are included, which is a pain dnl to make happen otherwise. -default_munmap="1" +default_retain="0" maps_coalesce="1" +DUMP_SYMS="${NM} -a" +SYM_PREFIX="" case "${host}" in *-*-darwin* | *-*-ios*) - CFLAGS="$CFLAGS" abi="macho" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) RPATH="" LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES" so="dylib" @@ -272,38 +558,58 @@ case "${host}" in DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)' SOREV="${rev}.${so}" sbrk_deprecated="1" + SYM_PREFIX="_" ;; *-*-freebsd*) - CFLAGS="$CFLAGS" abi="elf" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ]) force_lazy_lock="1" ;; *-*-dragonfly*) - CFLAGS="$CFLAGS" abi="elf" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) ;; *-*-openbsd*) - CFLAGS="$CFLAGS" abi="elf" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) force_tls="0" ;; *-*-bitrig*) - CFLAGS="$CFLAGS" abi="elf" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + ;; + *-*-linux-android) + dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. + JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE) + abi="elf" + AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ]) + AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) + AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ]) + AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) + AC_DEFINE([JEMALLOC_C11_ATOMICS]) + force_tls="0" + if test "${LG_SIZEOF_PTR}" = "3"; then + default_retain="1" + fi ;; *-*-linux*) - CFLAGS="$CFLAGS" - CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" + dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. + JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE) abi="elf" + AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ]) AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) - AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ]) + AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ]) + AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) + AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ]) + if test "${LG_SIZEOF_PTR}" = "3"; then + default_retain="1" + fi + ;; + *-*-kfreebsd*) + dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE. + JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE) + abi="elf" + AC_DEFINE([JEMALLOC_HAS_ALLOCA_H]) + AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ]) AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ]) - default_munmap="0" ;; *-*-netbsd*) AC_MSG_CHECKING([ABI]) @@ -314,22 +620,19 @@ case "${host}" in #error aout #endif ]])], - [CFLAGS="$CFLAGS"; abi="elf"], + [abi="elf"], [abi="aout"]) AC_MSG_RESULT([$abi]) - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) ;; *-*-solaris2*) - CFLAGS="$CFLAGS" abi="elf" - AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) RPATH='-Wl,-R,$(1)' dnl Solaris needs this for sigwait(). - CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS" - LIBS="$LIBS -lposix4 -lsocket -lnsl" + JE_APPEND_VS(CPPFLAGS, -D_POSIX_PTHREAD_SEMANTICS) + JE_APPEND_VS(LIBS, -lposix4 -lsocket -lnsl) ;; *-ibm-aix*) - if "$LG_SIZEOF_PTR" = "8"; then + if test "${LG_SIZEOF_PTR}" = "3"; then dnl 64bit AIX LD_PRELOAD_VAR="LDR_PRELOAD64" else @@ -341,7 +644,6 @@ case "${host}" in *-*-mingw* | *-*-cygwin*) abi="pecoff" force_tls="0" - force_lazy_lock="1" maps_coalesce="0" RPATH="" so="dll" @@ -358,7 +660,15 @@ case "${host}" in else importlib="${so}" DSO_LDFLAGS="-shared" + link_whole_archive="1" fi + case "${host}" in + *-*-cygwin*) + DUMP_SYMS="dumpbin /SYMBOLS" + ;; + *) + ;; + esac a="lib" libprefix="" SOREV="${so}" @@ -395,17 +705,29 @@ AC_SUBST([o]) AC_SUBST([a]) AC_SUBST([exe]) AC_SUBST([libprefix]) +AC_SUBST([link_whole_archive]) AC_SUBST([DSO_LDFLAGS]) AC_SUBST([EXTRA_LDFLAGS]) AC_SUBST([SOREV]) AC_SUBST([PIC_CFLAGS]) AC_SUBST([CTARGET]) AC_SUBST([LDTARGET]) +AC_SUBST([TEST_LD_MODE]) AC_SUBST([MKLIB]) AC_SUBST([ARFLAGS]) AC_SUBST([AROUT]) +AC_SUBST([DUMP_SYMS]) AC_SUBST([CC_MM]) +dnl Determine whether libm must be linked to use e.g. log(3). +AC_SEARCH_LIBS([log], [m], , [AC_MSG_ERROR([Missing math functions])]) +if test "x$ac_cv_search_log" != "xnone required" ; then + LM="$ac_cv_search_log" +else + LM= +fi +AC_SUBST(LM) + JE_COMPILABLE([__attribute__ syntax], [static __attribute__((unused)) void foo(void){}], [], @@ -413,51 +735,53 @@ JE_COMPILABLE([__attribute__ syntax], if test "x${je_cv_attribute}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ]) if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then - JE_CFLAGS_APPEND([-fvisibility=hidden]) + JE_CFLAGS_ADD([-fvisibility=hidden]) + JE_CXXFLAGS_ADD([-fvisibility=hidden]) fi fi dnl Check for tls_model attribute support (clang 3.0 still lacks support). -SAVED_CFLAGS="${CFLAGS}" -JE_CFLAGS_APPEND([-Werror]) +JE_CFLAGS_SAVE() +JE_CFLAGS_ADD([-Werror]) +JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([tls_model attribute], [], [static __thread int __attribute__((tls_model("initial-exec"), unused)) foo; foo = 0;], [je_cv_tls_model]) -CFLAGS="${SAVED_CFLAGS}" -if test "x${je_cv_tls_model}" = "xyes" ; then - AC_DEFINE([JEMALLOC_TLS_MODEL], - [__attribute__((tls_model("initial-exec")))]) -else - AC_DEFINE([JEMALLOC_TLS_MODEL], [ ]) -fi +JE_CFLAGS_RESTORE() +dnl (Setting of JEMALLOC_TLS_MODEL is done later, after we've checked for +dnl --disable-initial-exec-tls) + dnl Check for alloc_size attribute support. -SAVED_CFLAGS="${CFLAGS}" -JE_CFLAGS_APPEND([-Werror]) +JE_CFLAGS_SAVE() +JE_CFLAGS_ADD([-Werror]) +JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([alloc_size attribute], [#include ], [void *foo(size_t size) __attribute__((alloc_size(1)));], [je_cv_alloc_size]) -CFLAGS="${SAVED_CFLAGS}" +JE_CFLAGS_RESTORE() if test "x${je_cv_alloc_size}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ]) fi dnl Check for format(gnu_printf, ...) attribute support. -SAVED_CFLAGS="${CFLAGS}" -JE_CFLAGS_APPEND([-Werror]) +JE_CFLAGS_SAVE() +JE_CFLAGS_ADD([-Werror]) +JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include ], [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));], [je_cv_format_gnu_printf]) -CFLAGS="${SAVED_CFLAGS}" +JE_CFLAGS_RESTORE() if test "x${je_cv_format_gnu_printf}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ]) fi dnl Check for format(printf, ...) attribute support. -SAVED_CFLAGS="${CFLAGS}" -JE_CFLAGS_APPEND([-Werror]) +JE_CFLAGS_SAVE() +JE_CFLAGS_ADD([-Werror]) +JE_CFLAGS_ADD([-herror_on_warning]) JE_COMPILABLE([format(printf, ...) attribute], [#include ], [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));], [je_cv_format_printf]) -CFLAGS="${SAVED_CFLAGS}" +JE_CFLAGS_RESTORE() if test "x${je_cv_format_printf}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ]) fi @@ -492,41 +816,6 @@ AC_PROG_RANLIB AC_PATH_PROG([LD], [ld], [false], [$PATH]) AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH]) -public_syms="malloc_conf malloc_message malloc calloc posix_memalign aligned_alloc realloc free mallocx rallocx xallocx sallocx dallocx sdallocx nallocx mallctl mallctlnametomib mallctlbymib malloc_stats_print malloc_usable_size" - -dnl Check for allocator-related functions that should be wrapped. -AC_CHECK_FUNC([memalign], - [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ]) - public_syms="${public_syms} memalign"]) -AC_CHECK_FUNC([valloc], - [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ]) - public_syms="${public_syms} valloc"]) - -dnl Do not compute test code coverage by default. -GCOV_FLAGS= -AC_ARG_ENABLE([code-coverage], - [AS_HELP_STRING([--enable-code-coverage], - [Enable code coverage])], -[if test "x$enable_code_coverage" = "xno" ; then - enable_code_coverage="0" -else - enable_code_coverage="1" -fi -], -[enable_code_coverage="0"] -) -if test "x$enable_code_coverage" = "x1" ; then - deoptimize="no" - echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || deoptimize="yes" - if test "x${deoptimize}" = "xyes" ; then - JE_CFLAGS_APPEND([-O0]) - fi - JE_CFLAGS_APPEND([-fprofile-arcs -ftest-coverage]) - EXTRA_LDFLAGS="$EXTRA_LDFLAGS -fprofile-arcs -ftest-coverage" - AC_DEFINE([JEMALLOC_CODE_COVERAGE], [ ]) -fi -AC_SUBST([enable_code_coverage]) - dnl Perform no name mangling by default. AC_ARG_WITH([mangling], [AS_HELP_STRING([--with-mangling=], [Mangle symbols in ])], @@ -542,11 +831,14 @@ else JEMALLOC_PREFIX="je_" fi] ) -if test "x$JEMALLOC_PREFIX" != "x" ; then +if test "x$JEMALLOC_PREFIX" = "x" ; then + AC_DEFINE([JEMALLOC_IS_MALLOC]) +else JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"` AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"]) AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"]) fi +AC_SUBST([JEMALLOC_PREFIX]) AC_SUBST([JEMALLOC_CPREFIX]) AC_ARG_WITH([export], @@ -556,6 +848,49 @@ AC_ARG_WITH([export], fi] ) +public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_message malloc_stats_print malloc_usable_size mallocx nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx" +dnl Check for additional platform-specific public API functions. +AC_CHECK_FUNC([memalign], + [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ]) + public_syms="${public_syms} memalign"]) +AC_CHECK_FUNC([valloc], + [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ]) + public_syms="${public_syms} valloc"]) + +dnl Check for allocator-related functions that should be wrapped. +wrap_syms= +if test "x${JEMALLOC_PREFIX}" = "x" ; then + AC_CHECK_FUNC([__libc_calloc], + [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_CALLOC], [ ]) + wrap_syms="${wrap_syms} __libc_calloc"]) + AC_CHECK_FUNC([__libc_free], + [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_FREE], [ ]) + wrap_syms="${wrap_syms} __libc_free"]) + AC_CHECK_FUNC([__libc_malloc], + [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MALLOC], [ ]) + wrap_syms="${wrap_syms} __libc_malloc"]) + AC_CHECK_FUNC([__libc_memalign], + [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MEMALIGN], [ ]) + wrap_syms="${wrap_syms} __libc_memalign"]) + AC_CHECK_FUNC([__libc_realloc], + [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_REALLOC], [ ]) + wrap_syms="${wrap_syms} __libc_realloc"]) + AC_CHECK_FUNC([__libc_valloc], + [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_VALLOC], [ ]) + wrap_syms="${wrap_syms} __libc_valloc"]) + AC_CHECK_FUNC([__posix_memalign], + [AC_DEFINE([JEMALLOC_OVERRIDE___POSIX_MEMALIGN], [ ]) + wrap_syms="${wrap_syms} __posix_memalign"]) +fi + +case "${host}" in + *-*-mingw* | *-*-cygwin*) + wrap_syms="${wrap_syms} tls_callback" + ;; + *) + ;; +esac + dnl Mangle library-private APIs. AC_ARG_WITH([private_namespace], [AS_HELP_STRING([--with-private-namespace=], [Prefix to prepend to all library-private APIs])], @@ -575,6 +910,15 @@ AC_ARG_WITH([install_suffix], install_suffix="$INSTALL_SUFFIX" AC_SUBST([install_suffix]) +dnl Specify default malloc_conf. +AC_ARG_WITH([malloc_conf], + [AS_HELP_STRING([--with-malloc-conf=], [config.malloc_conf options string])], + [JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"], + [JEMALLOC_CONFIG_MALLOC_CONF=""] +) +config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF" +AC_DEFINE_UNQUOTED([JEMALLOC_CONFIG_MALLOC_CONF], ["$config_malloc_conf"]) + dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of dnl jemalloc_protos_jet.h easy. je_="je_" @@ -588,7 +932,7 @@ cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in" cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in" -cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_internal.h.in" +cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_preamble.h.in" cfgoutputs_in="${cfgoutputs_in} test/test.sh.in" cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in" @@ -600,7 +944,7 @@ cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h" cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h" -cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_internal.h" +cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_preamble.h" cfgoutputs_out="${cfgoutputs_out} test/test.sh" cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h" @@ -612,15 +956,14 @@ cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in" cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in" -cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_internal.h" +cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_preamble.h" cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in" cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in" cfghdrs_in="include/jemalloc/jemalloc_defs.h.in" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in" +cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh" -cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_unnamespace.sh" -cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.txt" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh" cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/size_classes.sh" @@ -631,8 +974,8 @@ cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in" cfghdrs_out="include/jemalloc/jemalloc_defs.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_namespace.h" -cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_unnamespace.h" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols.awk" +cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols_jet.awk" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h" cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h" @@ -648,26 +991,10 @@ cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.i cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in" cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in" -dnl Silence irrelevant compiler warnings by default. -AC_ARG_ENABLE([cc-silence], - [AS_HELP_STRING([--disable-cc-silence], - [Do not silence irrelevant compiler warnings])], -[if test "x$enable_cc_silence" = "xno" ; then - enable_cc_silence="0" -else - enable_cc_silence="1" -fi -], -[enable_cc_silence="1"] -) -if test "x$enable_cc_silence" = "x1" ; then - AC_DEFINE([JEMALLOC_CC_SILENCE], [ ]) -fi - dnl Do not compile with debugging by default. AC_ARG_ENABLE([debug], [AS_HELP_STRING([--enable-debug], - [Build debugging code (implies --enable-ivsalloc)])], + [Build debugging code])], [if test "x$enable_debug" = "xno" ; then enable_debug="0" else @@ -681,40 +1008,21 @@ if test "x$enable_debug" = "x1" ; then fi if test "x$enable_debug" = "x1" ; then AC_DEFINE([JEMALLOC_DEBUG], [ ]) - enable_ivsalloc="1" fi AC_SUBST([enable_debug]) -dnl Do not validate pointers by default. -AC_ARG_ENABLE([ivsalloc], - [AS_HELP_STRING([--enable-ivsalloc], - [Validate pointers passed through the public API])], -[if test "x$enable_ivsalloc" = "xno" ; then - enable_ivsalloc="0" -else - enable_ivsalloc="1" -fi -], -[enable_ivsalloc="0"] -) -if test "x$enable_ivsalloc" = "x1" ; then - AC_DEFINE([JEMALLOC_IVSALLOC], [ ]) -fi - dnl Only optimize if not debugging. -if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then - dnl Make sure that an optimization flag was not specified in EXTRA_CFLAGS. - optimize="no" - echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || optimize="yes" - if test "x${optimize}" = "xyes" ; then - if test "x$GCC" = "xyes" ; then - JE_CFLAGS_APPEND([-O3]) - JE_CFLAGS_APPEND([-funroll-loops]) - elif test "x$je_cv_msvc" = "xyes" ; then - JE_CFLAGS_APPEND([-O2]) - else - JE_CFLAGS_APPEND([-O]) - fi +if test "x$enable_debug" = "x0" ; then + if test "x$GCC" = "xyes" ; then + JE_CFLAGS_ADD([-O3]) + JE_CXXFLAGS_ADD([-O3]) + JE_CFLAGS_ADD([-funroll-loops]) + elif test "x$je_cv_msvc" = "xyes" ; then + JE_CFLAGS_ADD([-O2]) + JE_CXXFLAGS_ADD([-O2]) + else + JE_CFLAGS_ADD([-O]) + JE_CXXFLAGS_ADD([-O]) fi fi @@ -778,10 +1086,10 @@ fi, if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"]) if test "x$LUNWIND" = "x-lunwind" ; then - AC_CHECK_LIB([unwind], [unw_backtrace], [LIBS="$LIBS $LUNWIND"], + AC_CHECK_LIB([unwind], [unw_backtrace], [JE_APPEND_VS(LIBS, $LUNWIND)], [enable_prof_libunwind="0"]) else - LIBS="$LIBS $LUNWIND" + JE_APPEND_VS(LIBS, $LUNWIND) fi if test "x${enable_prof_libunwind}" = "x1" ; then backtrace_method="libunwind" @@ -803,7 +1111,9 @@ fi if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \ -a "x$GCC" = "xyes" ; then AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"]) - AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [LIBS="$LIBS -lgcc"], [enable_prof_libgcc="0"]) + if test "x${enable_prof_libgcc}" = "x1" ; then + AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [JE_APPEND_VS(LIBS, -lgcc)], [enable_prof_libgcc="0"]) + fi if test "x${enable_prof_libgcc}" = "x1" ; then backtrace_method="libgcc" AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ]) @@ -825,7 +1135,7 @@ fi ) if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \ -a "x$GCC" = "xyes" ; then - JE_CFLAGS_APPEND([-fno-omit-frame-pointer]) + JE_CFLAGS_ADD([-fno-omit-frame-pointer]) backtrace_method="gcc intrinsics" AC_DEFINE([JEMALLOC_PROF_GCC], [ ]) else @@ -839,52 +1149,23 @@ fi AC_MSG_CHECKING([configured backtracing method]) AC_MSG_RESULT([$backtrace_method]) if test "x$enable_prof" = "x1" ; then - if test "x$abi" != "xpecoff"; then - dnl Heap profiling uses the log(3) function. - LIBS="$LIBS -lm" - fi + dnl Heap profiling uses the log(3) function. + JE_APPEND_VS(LIBS, $LM) AC_DEFINE([JEMALLOC_PROF], [ ]) fi AC_SUBST([enable_prof]) -dnl Enable thread-specific caching by default. -AC_ARG_ENABLE([tcache], - [AS_HELP_STRING([--disable-tcache], [Disable per thread caches])], -[if test "x$enable_tcache" = "xno" ; then - enable_tcache="0" -else - enable_tcache="1" -fi -], -[enable_tcache="1"] -) -if test "x$enable_tcache" = "x1" ; then - AC_DEFINE([JEMALLOC_TCACHE], [ ]) -fi -AC_SUBST([enable_tcache]) - dnl Indicate whether adjacent virtual memory mappings automatically coalesce dnl (and fragment on demand). if test "x${maps_coalesce}" = "x1" ; then AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ]) fi -dnl Enable VM deallocation via munmap() by default. -AC_ARG_ENABLE([munmap], - [AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])], -[if test "x$enable_munmap" = "xno" ; then - enable_munmap="0" -else - enable_munmap="1" +dnl Indicate whether to retain memory (rather than using munmap()) by default. +if test "x$default_retain" = "x1" ; then + AC_DEFINE([JEMALLOC_RETAIN], [ ]) fi -], -[enable_munmap="${default_munmap}"] -) -if test "x$enable_munmap" = "x1" ; then - AC_DEFINE([JEMALLOC_MUNMAP], [ ]) -fi -AC_SUBST([enable_munmap]) dnl Enable allocation from DSS if supported by the OS. have_dss="1" @@ -905,8 +1186,7 @@ fi dnl Support the junk/zero filling option by default. AC_ARG_ENABLE([fill], - [AS_HELP_STRING([--disable-fill], - [Disable support for junk/zero filling, quarantine, and redzones])], + [AS_HELP_STRING([--disable-fill], [Disable support for junk/zero filling])], [if test "x$enable_fill" = "xno" ; then enable_fill="0" else @@ -948,35 +1228,6 @@ if test "x$enable_utrace" = "x1" ; then fi AC_SUBST([enable_utrace]) -dnl Support Valgrind by default. -AC_ARG_ENABLE([valgrind], - [AS_HELP_STRING([--disable-valgrind], [Disable support for Valgrind])], -[if test "x$enable_valgrind" = "xno" ; then - enable_valgrind="0" -else - enable_valgrind="1" -fi -], -[enable_valgrind="1"] -) -if test "x$enable_valgrind" = "x1" ; then - JE_COMPILABLE([valgrind], [ -#include -#include - -#if !defined(VALGRIND_RESIZEINPLACE_BLOCK) -# error "Incompatible Valgrind version" -#endif -], [], [je_cv_valgrind]) - if test "x${je_cv_valgrind}" = "xno" ; then - enable_valgrind="0" - fi - if test "x$enable_valgrind" = "x1" ; then - AC_DEFINE([JEMALLOC_VALGRIND], [ ]) - fi -fi -AC_SUBST([enable_valgrind]) - dnl Do not support the xmalloc option by default. AC_ARG_ENABLE([xmalloc], [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])], @@ -1010,11 +1261,43 @@ if test "x$enable_cache_oblivious" = "x1" ; then fi AC_SUBST([enable_cache_oblivious]) +dnl Do not log by default. +AC_ARG_ENABLE([log], + [AS_HELP_STRING([--enable-log], [Support debug logging])], +[if test "x$enable_log" = "xno" ; then + enable_log="0" +else + enable_log="1" +fi +], +[enable_log="0"] +) +if test "x$enable_log" = "x1" ; then + AC_DEFINE([JEMALLOC_LOG], [ ]) +fi +AC_SUBST([enable_log]) + + +JE_COMPILABLE([a program using __builtin_unreachable], [ +void foo (void) { + __builtin_unreachable(); +} +], [ + { + foo(); + } +], [je_cv_gcc_builtin_unreachable]) +if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then + AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [__builtin_unreachable]) +else + AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [abort]) +fi + dnl ============================================================================ dnl Check for __builtin_ffsl(), then ffsl(3), and fail if neither are found. dnl One of those two functions should (theoretically) exist on all platforms dnl that jemalloc currently has a chance of functioning on without modification. -dnl We additionally assume ffs() or __builtin_ffs() are defined if +dnl We additionally assume ffs[ll]() or __builtin_ffs[ll]() are defined if dnl ffsl() or __builtin_ffsl() are defined, respectively. JE_COMPILABLE([a program using __builtin_ffsl], [ #include @@ -1027,6 +1310,7 @@ JE_COMPILABLE([a program using __builtin_ffsl], [ } ], [je_cv_gcc_builtin_ffsl]) if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then + AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [__builtin_ffsll]) AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl]) AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs]) else @@ -1041,6 +1325,7 @@ else } ], [je_cv_function_ffsl]) if test "x${je_cv_function_ffsl}" = "xyes" ; then + AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [ffsll]) AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl]) AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs]) else @@ -1048,13 +1333,6 @@ else fi fi -AC_ARG_WITH([lg_tiny_min], - [AS_HELP_STRING([--with-lg-tiny-min=], - [Base 2 log of minimum tiny size class to support])], - [LG_TINY_MIN="$with_lg_tiny_min"], - [LG_TINY_MIN="3"]) -AC_DEFINE_UNQUOTED([LG_TINY_MIN], [$LG_TINY_MIN]) - AC_ARG_WITH([lg_quantum], [AS_HELP_STRING([--with-lg-quantum=], [Base 2 log of minimum allocation alignment])], @@ -1100,7 +1378,7 @@ if test "x$LG_PAGE" = "xdetect"; then if (f == NULL) { return 1; } - fprintf(f, "%d\n", result); + fprintf(f, "%d", result); fclose(f); return 0; @@ -1118,42 +1396,83 @@ else AC_MSG_ERROR([cannot determine value for LG_PAGE]) fi +AC_ARG_WITH([lg_hugepage], + [AS_HELP_STRING([--with-lg-hugepage=], + [Base 2 log of system huge page size])], + [je_cv_lg_hugepage="${with_lg_hugepage}"], + [je_cv_lg_hugepage=""]) +if test "x${je_cv_lg_hugepage}" = "x" ; then + dnl Look in /proc/meminfo (Linux-specific) for information on the default huge + dnl page size, if any. The relevant line looks like: + dnl + dnl Hugepagesize: 2048 kB + if test -e "/proc/meminfo" ; then + hpsk=[`cat /proc/meminfo 2>/dev/null | \ + grep -e '^Hugepagesize:[[:space:]]\+[0-9]\+[[:space:]]kB$' | \ + awk '{print $2}'`] + if test "x${hpsk}" != "x" ; then + je_cv_lg_hugepage=10 + while test "${hpsk}" -gt 1 ; do + hpsk="$((hpsk / 2))" + je_cv_lg_hugepage="$((je_cv_lg_hugepage + 1))" + done + fi + fi + + dnl Set default if unable to automatically configure. + if test "x${je_cv_lg_hugepage}" = "x" ; then + je_cv_lg_hugepage=21 + fi +fi +if test "x${LG_PAGE}" != "xundefined" -a \ + "${je_cv_lg_hugepage}" -lt "${LG_PAGE}" ; then + AC_MSG_ERROR([Huge page size (2^${je_cv_lg_hugepage}) must be at least page size (2^${LG_PAGE})]) +fi +AC_DEFINE_UNQUOTED([LG_HUGEPAGE], [${je_cv_lg_hugepage}]) + AC_ARG_WITH([lg_page_sizes], [AS_HELP_STRING([--with-lg-page-sizes=], [Base 2 logs of system page sizes to support])], [LG_PAGE_SIZES="$with_lg_page_sizes"], [LG_PAGE_SIZES="$LG_PAGE"]) -AC_ARG_WITH([lg_size_class_group], - [AS_HELP_STRING([--with-lg-size-class-group=], - [Base 2 log of size classes per doubling])], - [LG_SIZE_CLASS_GROUP="$with_lg_size_class_group"], - [LG_SIZE_CLASS_GROUP="2"]) - dnl ============================================================================ dnl jemalloc configuration. -dnl - -dnl Set VERSION if source directory is inside a git repository. -if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then - dnl Pattern globs aren't powerful enough to match both single- and - dnl double-digit version numbers, so iterate over patterns to support up to - dnl version 99.99.99 without any accidental matches. - rm -f "${objroot}VERSION" - for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ - '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ - '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ - '[0-9][0-9].[0-9][0-9].[0-9]' \ - '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do - if test ! -e "${objroot}VERSION" ; then - (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null - if test $? -eq 0 ; then - mv "${objroot}VERSION.tmp" "${objroot}VERSION" - break +dnl + +AC_ARG_WITH([version], + [AS_HELP_STRING([--with-version=..--g], + [Version string])], + [ + echo "${with_version}" | grep ['^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$'] 2>&1 1>/dev/null + if test $? -eq 0 ; then + echo "$with_version" > "${objroot}VERSION" + else + echo "${with_version}" | grep ['^VERSION$'] 2>&1 1>/dev/null + if test $? -ne 0 ; then + AC_MSG_ERROR([${with_version} does not match ..--g or VERSION]) fi fi - done -fi -rm -f "${objroot}VERSION.tmp" + ], [ + dnl Set VERSION if source directory is inside a git repository. + if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then + dnl Pattern globs aren't powerful enough to match both single- and + dnl double-digit version numbers, so iterate over patterns to support up + dnl to version 99.99.99 without any accidental matches. + for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \ + '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \ + '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \ + '[0-9][0-9].[0-9][0-9].[0-9]' \ + '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do + (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null + if test $? -eq 0 ; then + mv "${objroot}VERSION.tmp" "${objroot}VERSION" + break + fi + done + fi + rm -f "${objroot}VERSION.tmp" + ]) + if test ! -e "${objroot}VERSION" ; then if test ! -e "${srcroot}VERSION" ; then AC_MSG_RESULT( @@ -1180,23 +1499,128 @@ dnl ============================================================================ dnl Configure pthreads. if test "x$abi" != "xpecoff" ; then + AC_DEFINE([JEMALLOC_HAVE_PTHREAD], [ ]) AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])]) dnl Some systems may embed pthreads functionality in libc; check for libpthread dnl first, but try libc too before failing. - AC_CHECK_LIB([pthread], [pthread_create], [LIBS="$LIBS -lpthread"], + AC_CHECK_LIB([pthread], [pthread_create], [JE_APPEND_VS(LIBS, -lpthread)], [AC_SEARCH_LIBS([pthread_create], , , AC_MSG_ERROR([libpthread is missing]))]) + wrap_syms="${wrap_syms} pthread_create" + have_pthread="1" + dnl Check if we have dlsym support. + have_dlsym="1" + AC_CHECK_HEADERS([dlfcn.h], + AC_CHECK_FUNC([dlsym], [], + [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], [have_dlsym="0"])]), + [have_dlsym="0"]) + if test "x$have_dlsym" = "x1" ; then + AC_DEFINE([JEMALLOC_HAVE_DLSYM], [ ]) + fi + JE_COMPILABLE([pthread_atfork(3)], [ +#include +], [ + pthread_atfork((void *)0, (void *)0, (void *)0); +], [je_cv_pthread_atfork]) + if test "x${je_cv_pthread_atfork}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_PTHREAD_ATFORK], [ ]) + fi + dnl Check if pthread_setname_np is available with the expected API. + JE_COMPILABLE([pthread_setname_np(3)], [ +#include +], [ + pthread_setname_np(pthread_self(), "setname_test"); +], [je_cv_pthread_setname_np]) + if test "x${je_cv_pthread_setname_np}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_PTHREAD_SETNAME_NP], [ ]) + fi +fi + +JE_APPEND_VS(CPPFLAGS, -D_REENTRANT) + +dnl Check whether clock_gettime(2) is in libc or librt. +AC_SEARCH_LIBS([clock_gettime], [rt]) + +dnl Cray wrapper compiler often adds `-lrt` when using `-static`. Check with +dnl `-dynamic` as well in case a user tries to dynamically link in jemalloc +if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then + if test "$ac_cv_search_clock_gettime" != "-lrt"; then + JE_CFLAGS_SAVE() + + unset ac_cv_search_clock_gettime + JE_CFLAGS_ADD([-dynamic]) + AC_SEARCH_LIBS([clock_gettime], [rt]) + + JE_CFLAGS_RESTORE() + fi +fi + +dnl check for CLOCK_MONOTONIC_COARSE (Linux-specific). +JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC_COARSE, ...)], [ +#include +], [ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); +], [je_cv_clock_monotonic_coarse]) +if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE]) +fi + +dnl check for CLOCK_MONOTONIC. +JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC, ...)], [ +#include +#include +], [ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); +#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0 +# error _POSIX_MONOTONIC_CLOCK missing/invalid +#endif +], [je_cv_clock_monotonic]) +if test "x${je_cv_clock_monotonic}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC]) fi -CPPFLAGS="$CPPFLAGS -D_REENTRANT" +dnl Check for mach_absolute_time(). +JE_COMPILABLE([mach_absolute_time()], [ +#include +], [ + mach_absolute_time(); +], [je_cv_mach_absolute_time]) +if test "x${je_cv_mach_absolute_time}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_MACH_ABSOLUTE_TIME]) +fi -dnl Check whether clock_gettime(2) is in libc or librt. This function is only -dnl used in test code, so save the result to TESTLIBS to avoid poluting LIBS. -SAVED_LIBS="${LIBS}" -LIBS= -AC_SEARCH_LIBS([clock_gettime], [rt], [TESTLIBS="${LIBS}"]) -AC_SUBST([TESTLIBS]) -LIBS="${SAVED_LIBS}" +dnl Use syscall(2) (if available) by default. +AC_ARG_ENABLE([syscall], + [AS_HELP_STRING([--disable-syscall], [Disable use of syscall(2)])], +[if test "x$enable_syscall" = "xno" ; then + enable_syscall="0" +else + enable_syscall="1" +fi +], +[enable_syscall="1"] +) +if test "x$enable_syscall" = "x1" ; then + dnl Check if syscall(2) is usable. Treat warnings as errors, so that e.g. OS + dnl X 10.12's deprecation warning prevents use. + JE_CFLAGS_SAVE() + JE_CFLAGS_ADD([-Werror]) + JE_COMPILABLE([syscall(2)], [ +#include +#include +], [ + syscall(SYS_write, 2, "hello", 5); +], + [je_cv_syscall]) + JE_CFLAGS_RESTORE() + if test "x$je_cv_syscall" = "xyes" ; then + AC_DEFINE([JEMALLOC_USE_SYSCALL], [ ]) + fi +fi dnl Check if the GNU-specific secure_getenv function exists. AC_CHECK_FUNC([secure_getenv], @@ -1207,6 +1631,24 @@ if test "x$have_secure_getenv" = "x1" ; then AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ]) fi +dnl Check if the GNU-specific sched_getcpu function exists. +AC_CHECK_FUNC([sched_getcpu], + [have_sched_getcpu="1"], + [have_sched_getcpu="0"] + ) +if test "x$have_sched_getcpu" = "x1" ; then + AC_DEFINE([JEMALLOC_HAVE_SCHED_GETCPU], [ ]) +fi + +dnl Check if the GNU-specific sched_setaffinity function exists. +AC_CHECK_FUNC([sched_setaffinity], + [have_sched_setaffinity="1"], + [have_sched_setaffinity="0"] + ) +if test "x$have_sched_setaffinity" = "x1" ; then + AC_DEFINE([JEMALLOC_HAVE_SCHED_SETAFFINITY], [ ]) +fi + dnl Check if the Solaris/BSD issetugid function exists. AC_CHECK_FUNC([issetugid], [have_issetugid="1"], @@ -1226,6 +1668,7 @@ AC_CHECK_FUNC([_malloc_thread_cleanup], ) if test "x$have__malloc_thread_cleanup" = "x1" ; then AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ]) + wrap_syms="${wrap_syms} _malloc_thread_cleanup" force_tls="1" fi @@ -1238,6 +1681,7 @@ AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb], ) if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then AC_DEFINE([JEMALLOC_MUTEX_INIT_CB]) + wrap_syms="${wrap_syms} _malloc_prefork _malloc_postfork" fi dnl Disable lazy locking by default. @@ -1252,45 +1696,35 @@ fi ], [enable_lazy_lock=""] ) -if test "x$enable_lazy_lock" = "x" -a "x${force_lazy_lock}" = "x1" ; then - AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues]) - enable_lazy_lock="1" +if test "x${enable_lazy_lock}" = "x" ; then + if test "x${force_lazy_lock}" = "x1" ; then + AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues]) + enable_lazy_lock="1" + else + enable_lazy_lock="0" + fi +fi +if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then + AC_MSG_RESULT([Forcing no lazy-lock because thread creation monitoring is unimplemented]) + enable_lazy_lock="0" fi if test "x$enable_lazy_lock" = "x1" ; then - if test "x$abi" != "xpecoff" ; then - AC_CHECK_HEADERS([dlfcn.h], , [AC_MSG_ERROR([dlfcn.h is missing])]) - AC_CHECK_FUNC([dlsym], [], - [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], - [AC_MSG_ERROR([libdl is missing])]) - ]) + if test "x$have_dlsym" = "x1" ; then + AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ]) + else + AC_MSG_ERROR([Missing dlsym support: lazy-lock cannot be enabled.]) fi - AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ]) -else - enable_lazy_lock="0" fi AC_SUBST([enable_lazy_lock]) -AC_ARG_ENABLE([tls], - [AS_HELP_STRING([--disable-tls], [Disable thread-local storage (__thread keyword)])], -if test "x$enable_tls" = "xno" ; then +dnl Automatically configure TLS. +if test "x${force_tls}" = "x1" ; then + enable_tls="1" +elif test "x${force_tls}" = "x0" ; then enable_tls="0" else enable_tls="1" fi -, -enable_tls="" -) -if test "x${enable_tls}" = "x" ; then - if test "x${force_tls}" = "x1" ; then - AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues]) - enable_tls="1" - elif test "x${force_tls}" = "x0" ; then - AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues]) - enable_tls="0" - else - enable_tls="1" - fi -fi if test "x${enable_tls}" = "x1" ; then AC_MSG_CHECKING([for TLS]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( @@ -1309,12 +1743,7 @@ else fi AC_SUBST([enable_tls]) if test "x${enable_tls}" = "x1" ; then - if test "x${force_tls}" = "x0" ; then - AC_MSG_WARN([TLS enabled despite being marked unusable on this platform]) - fi AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ]) -elif test "x${force_tls}" = "x1" ; then - AC_MSG_WARN([TLS disabled despite being marked critical on this platform]) fi dnl ============================================================================ @@ -1332,37 +1761,45 @@ JE_COMPILABLE([C11 atomics], [ uint64_t x = 1; volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; uint64_t r = atomic_fetch_add(a, x) + x; - return (r == 0); -], [je_cv_c11atomics]) -if test "x${je_cv_c11atomics}" = "xyes" ; then - AC_DEFINE([JEMALLOC_C11ATOMICS]) + return r == 0; +], [je_cv_c11_atomics]) +if test "x${je_cv_c11_atomics}" = "xyes" ; then + AC_DEFINE([JEMALLOC_C11_ATOMICS]) fi dnl ============================================================================ -dnl Check for atomic(9) operations as provided on FreeBSD. +dnl Check for GCC-style __atomic atomics. -JE_COMPILABLE([atomic(9)], [ -#include -#include -#include +JE_COMPILABLE([GCC __atomic atomics], [ ], [ - { - uint32_t x32 = 0; - volatile uint32_t *x32p = &x32; - atomic_fetchadd_32(x32p, 1); - } - { - unsigned long xlong = 0; - volatile unsigned long *xlongp = &xlong; - atomic_fetchadd_long(xlongp, 1); - } -], [je_cv_atomic9]) -if test "x${je_cv_atomic9}" = "xyes" ; then - AC_DEFINE([JEMALLOC_ATOMIC9]) + int x = 0; + int val = 1; + int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED); + int after_add = x; + return after_add == 1; +], [je_cv_gcc_atomic_atomics]) +if test "x${je_cv_gcc_atomic_atomics}" = "xyes" ; then + AC_DEFINE([JEMALLOC_GCC_ATOMIC_ATOMICS]) +fi + +dnl ============================================================================ +dnl Check for GCC-style __sync atomics. + +JE_COMPILABLE([GCC __sync atomics], [ +], [ + int x = 0; + int before_add = __sync_fetch_and_add(&x, 1); + int after_add = x; + return (before_add == 0) && (after_add == 1); +], [je_cv_gcc_sync_atomics]) +if test "x${je_cv_gcc_sync_atomics}" = "xyes" ; then + AC_DEFINE([JEMALLOC_GCC_SYNC_ATOMICS]) fi dnl ============================================================================ dnl Check for atomic(3) operations as provided on Darwin. +dnl We need this not for the atomic operations (which are provided above), but +dnl rather for the OSSpinLock type it exposes. JE_COMPILABLE([Darwin OSAtomic*()], [ #include @@ -1389,12 +1826,67 @@ dnl Check for madvise(2). JE_COMPILABLE([madvise(2)], [ #include ], [ - { - madvise((void *)0, 0, 0); - } + madvise((void *)0, 0, 0); ], [je_cv_madvise]) if test "x${je_cv_madvise}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ]) + + dnl Check for madvise(..., MADV_FREE). + JE_COMPILABLE([madvise(..., MADV_FREE)], [ +#include +], [ + madvise((void *)0, 0, MADV_FREE); +], [je_cv_madv_free]) + if test "x${je_cv_madv_free}" = "xyes" ; then + AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + elif test "x${je_cv_madvise}" = "xyes" ; then + case "${host_cpu}" in i686|x86_64) + case "${host}" in *-*-linux*) + AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ]) + AC_DEFINE([JEMALLOC_DEFINE_MADVISE_FREE], [ ]) + ;; + esac + ;; + esac + fi + + dnl Check for madvise(..., MADV_DONTNEED). + JE_COMPILABLE([madvise(..., MADV_DONTNEED)], [ +#include +], [ + madvise((void *)0, 0, MADV_DONTNEED); +], [je_cv_madv_dontneed]) + if test "x${je_cv_madv_dontneed}" = "xyes" ; then + AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ]) + fi + + dnl Check for madvise(..., MADV_DO[NT]DUMP). + JE_COMPILABLE([madvise(..., MADV_DO[[NT]]DUMP)], [ +#include +], [ + madvise((void *)0, 0, MADV_DONTDUMP); + madvise((void *)0, 0, MADV_DODUMP); +], [je_cv_madv_dontdump]) + if test "x${je_cv_madv_dontdump}" = "xyes" ; then + AC_DEFINE([JEMALLOC_MADVISE_DONTDUMP], [ ]) + fi + + dnl Check for madvise(..., MADV_[NO]HUGEPAGE). + JE_COMPILABLE([madvise(..., MADV_[[NO]]HUGEPAGE)], [ +#include +], [ + madvise((void *)0, 0, MADV_HUGEPAGE); + madvise((void *)0, 0, MADV_NOHUGEPAGE); +], [je_cv_thp]) +case "${host_cpu}" in + arm*) + ;; + *) + if test "x${je_cv_thp}" = "xyes" ; then + AC_DEFINE([JEMALLOC_HAVE_MADVISE_HUGE], [ ]) + fi + ;; +esac fi dnl ============================================================================ @@ -1454,6 +1946,25 @@ if test "x${je_cv_builtin_clz}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_BUILTIN_CLZ], [ ]) fi +dnl ============================================================================ +dnl Check for os_unfair_lock operations as provided on Darwin. + +JE_COMPILABLE([Darwin os_unfair_lock_*()], [ +#include +#include +], [ + #if MAC_OS_X_VERSION_MIN_REQUIRED < 101200 + #error "os_unfair_lock is not supported" + #else + os_unfair_lock lock = OS_UNFAIR_LOCK_INIT; + os_unfair_lock_lock(&lock); + os_unfair_lock_unlock(&lock); + #endif +], [je_cv_os_unfair_lock]) +if test "x${je_cv_os_unfair_lock}" = "xyes" ; then + AC_DEFINE([JEMALLOC_OS_UNFAIR_LOCK], [ ]) +fi + dnl ============================================================================ dnl Check for spinlock(3) operations as provided on Darwin. @@ -1493,37 +2004,38 @@ if test "x${enable_zone_allocator}" = "x1" ; then AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin]) fi AC_DEFINE([JEMALLOC_ZONE], [ ]) +fi - dnl The szone version jumped from 3 to 6 between the OS X 10.5.x and 10.6 - dnl releases. malloc_zone_t and malloc_introspection_t have new fields in - dnl 10.6, which is the only source-level indication of the change. - AC_MSG_CHECKING([malloc zone version]) - AC_DEFUN([JE_ZONE_PROGRAM], - [AC_LANG_PROGRAM( - [#include ], - [static int foo[[sizeof($1) $2 sizeof(void *) * $3 ? 1 : -1]]] - )]) - - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,14)],[JEMALLOC_ZONE_VERSION=3],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,15)],[JEMALLOC_ZONE_VERSION=5],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,16)],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,9)],[JEMALLOC_ZONE_VERSION=6],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,13)],[JEMALLOC_ZONE_VERSION=7],[JEMALLOC_ZONE_VERSION=] - )])],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,17)],[JEMALLOC_ZONE_VERSION=8],[ - AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,>,17)],[JEMALLOC_ZONE_VERSION=9],[JEMALLOC_ZONE_VERSION=] - )])])])]) - if test "x${JEMALLOC_ZONE_VERSION}" = "x"; then - AC_MSG_RESULT([unsupported]) - AC_MSG_ERROR([Unsupported malloc zone version]) - fi - if test "${JEMALLOC_ZONE_VERSION}" = 9; then - JEMALLOC_ZONE_VERSION=8 - AC_MSG_RESULT([> 8]) - else - AC_MSG_RESULT([$JEMALLOC_ZONE_VERSION]) - fi - AC_DEFINE_UNQUOTED(JEMALLOC_ZONE_VERSION, [$JEMALLOC_ZONE_VERSION]) +dnl ============================================================================ +dnl Use initial-exec TLS by default. +AC_ARG_ENABLE([initial-exec-tls], + [AS_HELP_STRING([--disable-initial-exec-tls], + [Disable the initial-exec tls model])], +[if test "x$enable_initial_exec_tls" = "xno" ; then + enable_initial_exec_tls="0" +else + enable_initial_exec_tls="1" +fi +], +[enable_initial_exec_tls="1"] +) +AC_SUBST([enable_initial_exec_tls]) + +if test "x${je_cv_tls_model}" = "xyes" -a \ + "x${enable_initial_exec_tls}" = "x1" ; then + AC_DEFINE([JEMALLOC_TLS_MODEL], + [__attribute__((tls_model("initial-exec")))]) +else + AC_DEFINE([JEMALLOC_TLS_MODEL], [ ]) +fi + +dnl ============================================================================ +dnl Enable background threads if possible. + +if test "x${have_pthread}" = "x1" -a "x${have_dlsym}" = "x1" \ + -a "x${je_cv_os_unfair_lock}" != "xyes" \ + -a "x${je_cv_osspin}" != "xyes" ; then + AC_DEFINE([JEMALLOC_BACKGROUND_THREAD]) fi dnl ============================================================================ @@ -1542,7 +2054,10 @@ extern void *(* __realloc_hook)(void *ptr, size_t size); if (__free_hook && ptr) __free_hook(ptr); ], [je_cv_glibc_malloc_hook]) if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then - AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ]) + if test "x${JEMALLOC_PREFIX}" = "x" ; then + AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ]) + wrap_syms="${wrap_syms} __free_hook __malloc_hook __realloc_hook" + fi fi JE_COMPILABLE([glibc memalign hook], [ @@ -1554,7 +2069,10 @@ extern void *(* __memalign_hook)(size_t alignment, size_t size); if (__memalign_hook) ptr = __memalign_hook(16, 7); ], [je_cv_glibc_memalign_hook]) if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then - AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ]) + if test "x${JEMALLOC_PREFIX}" = "x" ; then + AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ]) + wrap_syms="${wrap_syms} __memalign_hook" + fi fi JE_COMPILABLE([pthreads adaptive mutexes], [ @@ -1569,6 +2087,25 @@ if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ]) fi +JE_CFLAGS_SAVE() +JE_CFLAGS_ADD([-D_GNU_SOURCE]) +JE_CFLAGS_ADD([-Werror]) +JE_CFLAGS_ADD([-herror_on_warning]) +JE_COMPILABLE([strerror_r returns char with gnu source], [ +#include +#include +#include +#include +], [ + char *buffer = (char *) malloc(100); + char *error = strerror_r(EINVAL, buffer, 100); + printf("%s\n", error); +], [je_cv_strerror_r_returns_char_with_gnu_source]) +JE_CFLAGS_RESTORE() +if test "x${je_cv_strerror_r_returns_char_with_gnu_source}" = "xyes" ; then + AC_DEFINE([JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE], [ ]) +fi + dnl ============================================================================ dnl Check for typedefs, structures, and compiler characteristics. AC_HEADER_STDBOOL @@ -1576,20 +2113,6 @@ AC_HEADER_STDBOOL dnl ============================================================================ dnl Define commands that generate output files. -AC_CONFIG_COMMANDS([include/jemalloc/internal/private_namespace.h], [ - mkdir -p "${objroot}include/jemalloc/internal" - "${srcdir}/include/jemalloc/internal/private_namespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_namespace.h" -], [ - srcdir="${srcdir}" - objroot="${objroot}" -]) -AC_CONFIG_COMMANDS([include/jemalloc/internal/private_unnamespace.h], [ - mkdir -p "${objroot}include/jemalloc/internal" - "${srcdir}/include/jemalloc/internal/private_unnamespace.sh" "${srcdir}/include/jemalloc/internal/private_symbols.txt" > "${objroot}include/jemalloc/internal/private_unnamespace.h" -], [ - srcdir="${srcdir}" - objroot="${objroot}" -]) AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [ f="${objroot}include/jemalloc/internal/public_symbols.txt" mkdir -p "${objroot}include/jemalloc/internal" @@ -1613,6 +2136,31 @@ AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [ public_syms="${public_syms}" JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" ]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols.awk], [ + f="${objroot}include/jemalloc/internal/private_symbols.awk" + mkdir -p "${objroot}include/jemalloc/internal" + export_syms=`for sym in ${public_syms}; do echo "${JEMALLOC_PREFIX}${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` + "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols.awk" +], [ + srcdir="${srcdir}" + objroot="${objroot}" + public_syms="${public_syms}" + wrap_syms="${wrap_syms}" + SYM_PREFIX="${SYM_PREFIX}" + JEMALLOC_PREFIX="${JEMALLOC_PREFIX}" +]) +AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols_jet.awk], [ + f="${objroot}include/jemalloc/internal/private_symbols_jet.awk" + mkdir -p "${objroot}include/jemalloc/internal" + export_syms=`for sym in ${public_syms}; do echo "jet_${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;` + "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols_jet.awk" +], [ + srcdir="${srcdir}" + objroot="${objroot}" + public_syms="${public_syms}" + wrap_syms="${wrap_syms}" + SYM_PREFIX="${SYM_PREFIX}" +]) AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [ mkdir -p "${objroot}include/jemalloc/internal" "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h" @@ -1629,15 +2177,13 @@ AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [ ]) AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [ mkdir -p "${objroot}include/jemalloc/internal" - "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h" + "${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" 3 "${LG_PAGE_SIZES}" 2 > "${objroot}include/jemalloc/internal/size_classes.h" ], [ SHELL="${SHELL}" srcdir="${srcdir}" objroot="${objroot}" LG_QUANTA="${LG_QUANTA}" - LG_TINY_MIN=${LG_TINY_MIN} LG_PAGE_SIZES="${LG_PAGE_SIZES}" - LG_SIZE_CLASS_GROUP=${LG_SIZE_CLASS_GROUP} ]) AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [ mkdir -p "${objroot}include/jemalloc" @@ -1697,12 +2243,18 @@ AC_MSG_RESULT([library revision : ${rev}]) AC_MSG_RESULT([]) AC_MSG_RESULT([CONFIG : ${CONFIG}]) AC_MSG_RESULT([CC : ${CC}]) -AC_MSG_RESULT([CFLAGS : ${CFLAGS}]) +AC_MSG_RESULT([CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}]) +AC_MSG_RESULT([SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}]) +AC_MSG_RESULT([EXTRA_CFLAGS : ${EXTRA_CFLAGS}]) AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}]) +AC_MSG_RESULT([CXX : ${CXX}]) +AC_MSG_RESULT([CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}]) +AC_MSG_RESULT([SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}]) +AC_MSG_RESULT([EXTRA_CXXFLAGS : ${EXTRA_CXXFLAGS}]) AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}]) AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}]) +AC_MSG_RESULT([DSO_LDFLAGS : ${DSO_LDFLAGS}]) AC_MSG_RESULT([LIBS : ${LIBS}]) -AC_MSG_RESULT([TESTLIBS : ${TESTLIBS}]) AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}]) AC_MSG_RESULT([]) AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}]) @@ -1724,22 +2276,19 @@ AC_MSG_RESULT([JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}]) AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE]) AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}]) AC_MSG_RESULT([install_suffix : ${install_suffix}]) +AC_MSG_RESULT([malloc_conf : ${config_malloc_conf}]) AC_MSG_RESULT([autogen : ${enable_autogen}]) -AC_MSG_RESULT([cc-silence : ${enable_cc_silence}]) AC_MSG_RESULT([debug : ${enable_debug}]) -AC_MSG_RESULT([code-coverage : ${enable_code_coverage}]) AC_MSG_RESULT([stats : ${enable_stats}]) AC_MSG_RESULT([prof : ${enable_prof}]) AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}]) AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}]) AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}]) -AC_MSG_RESULT([tcache : ${enable_tcache}]) AC_MSG_RESULT([fill : ${enable_fill}]) AC_MSG_RESULT([utrace : ${enable_utrace}]) -AC_MSG_RESULT([valgrind : ${enable_valgrind}]) AC_MSG_RESULT([xmalloc : ${enable_xmalloc}]) -AC_MSG_RESULT([munmap : ${enable_munmap}]) +AC_MSG_RESULT([log : ${enable_log}]) AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}]) -AC_MSG_RESULT([tls : ${enable_tls}]) AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}]) +AC_MSG_RESULT([cxx : ${enable_cxx}]) AC_MSG_RESULT([===============================================================================]) diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/coverage.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/coverage.sh similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/coverage.sh rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/coverage.sh diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/doc/html.xsl.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/doc/html.xsl.in similarity index 81% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/doc/html.xsl.in rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/doc/html.xsl.in index a91d974..ec4fa65 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/doc/html.xsl.in +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/doc/html.xsl.in @@ -1,4 +1,5 @@ + diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/doc/jemalloc.xml.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/doc/jemalloc.xml.in similarity index 58% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/doc/jemalloc.xml.in rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/doc/jemalloc.xml.in index 8fc774b..1e12fd3 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/doc/jemalloc.xml.in +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/doc/jemalloc.xml.in @@ -52,7 +52,7 @@ LIBRARY This manual describes jemalloc @jemalloc_version@. More information can be found at the jemalloc website. + url="http://jemalloc.net/">jemalloc website. SYNOPSIS @@ -180,20 +180,20 @@ Standard API - The malloc function allocates + The malloc() function allocates size bytes of uninitialized memory. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object. - The calloc function allocates + The calloc() function allocates space for number objects, each size bytes in length. The result is identical to - calling malloc with an argument of + calling malloc() with an argument of number * size, with the exception that the allocated memory is explicitly initialized to zero bytes. - The posix_memalign function + The posix_memalign() function allocates size bytes of memory such that the allocation's base address is a multiple of alignment, and returns the allocation in the value @@ -201,7 +201,7 @@ alignment must be a power of 2 at least as large as sizeof(void *). - The aligned_alloc function + The aligned_alloc() function allocates size bytes of memory such that the allocation's base address is a multiple of alignment. The requested @@ -209,7 +209,7 @@ undefined if size is not an integral multiple of alignment. - The realloc function changes the + The realloc() function changes the size of the previously allocated memory referenced by ptr to size bytes. The contents of the memory are unchanged up to the lesser of the new and old @@ -217,26 +217,26 @@ portion of the memory are undefined. Upon success, the memory referenced by ptr is freed and a pointer to the newly allocated memory is returned. Note that - realloc may move the memory allocation, + realloc() may move the memory allocation, resulting in a different return value than ptr. If ptr is NULL, the - realloc function behaves identically to - malloc for the specified size. + realloc() function behaves identically to + malloc() for the specified size. - The free function causes the + The free() function causes the allocated memory referenced by ptr to be made available for future allocations. If ptr is NULL, no action occurs. Non-standard API - The mallocx, - rallocx, - xallocx, - sallocx, - dallocx, - sdallocx, and - nallocx functions all have a + The mallocx(), + rallocx(), + xallocx(), + sallocx(), + dallocx(), + sdallocx(), and + nallocx() functions all have a flags argument that can be used to specify options. The functions only check the options that are contextually relevant. Use bitwise or (|) operations to @@ -307,21 +307,19 @@ - The mallocx function allocates at + The mallocx() function allocates at least size bytes of memory, and returns a pointer to the base address of the allocation. Behavior is undefined if - size is 0, or if request size - overflows due to size class and/or alignment constraints. + size is 0. - The rallocx function resizes the + The rallocx() function resizes the allocation at ptr to be at least size bytes, and returns a pointer to the base address of the resulting allocation, which may or may not have moved from its original location. Behavior is undefined if - size is 0, or if request size - overflows due to size class and/or alignment constraints. + size is 0. - The xallocx function resizes the + The xallocx() function resizes the allocation at ptr in place to be at least size bytes, and returns the real size of the allocation. If extra is non-zero, an attempt is @@ -334,32 +332,32 @@ language="C">(size + extra > SIZE_T_MAX). - The sallocx function returns the + The sallocx() function returns the real size of the allocation at ptr. - The dallocx function causes the + The dallocx() function causes the memory referenced by ptr to be made available for future allocations. - The sdallocx function is an - extension of dallocx with a + The sdallocx() function is an + extension of dallocx() with a size parameter to allow the caller to pass in the allocation size as an optimization. The minimum valid input size is the original requested size of the allocation, and the maximum valid input size is the corresponding value returned by - nallocx or - sallocx. + nallocx() or + sallocx(). - The nallocx function allocates no + The nallocx() function allocates no memory, but it performs the same size computation as the - mallocx function, and returns the real + mallocx() function, and returns the real size of the allocation that would result from the equivalent - mallocx function call. Behavior is - undefined if size is 0, or if - request size overflows due to size class and/or alignment - constraints. + mallocx() function call, or + 0 if the inputs exceed the maximum supported size + class and/or alignment. Behavior is undefined if + size is 0. - The mallctl function provides a + The mallctl() function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions. The period-separated name argument specifies a @@ -374,12 +372,12 @@ newlen; otherwise pass NULL and 0. - The mallctlnametomib function + The mallctlnametomib() function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name - to a “Management Information Base” (MIB) that can be passed - repeatedly to mallctlbymib. Upon - successful return from mallctlnametomib, + to a Management Information Base (MIB) that can be passed + repeatedly to mallctlbymib(). Upon + successful return from mallctlnametomib(), mibp contains an array of *miblenp integers, where *miblenp is the lesser of the number of components @@ -408,43 +406,47 @@ for (i = 0; i < nbins; i++) { mib[2] = i; len = sizeof(bin_size); - mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); + mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0); /* Do something with bin_size... */ }]]> - The malloc_stats_print function - writes human-readable summary statistics via the - write_cb callback function pointer and - cbopaque data passed to - write_cb, or - malloc_message if - write_cb is NULL. This - function can be called repeatedly. General information that never - changes during execution can be omitted by specifying "g" as a character + + + The malloc_stats_print() function writes + summary statistics via the write_cb callback + function pointer and cbopaque data passed to + write_cb, or malloc_message() + if write_cb is NULL. The + statistics are presented in human-readable form unless J is + specified as a character within the opts string, in + which case the statistics are presented in JSON format. This function can be + called repeatedly. General information that never changes during + execution can be omitted by specifying g as a character within the opts string. Note that - malloc_message uses the - mallctl* functions internally, so - inconsistent statistics can be reported if multiple threads use these - functions simultaneously. If is - specified during configuration, “m” and “a” can - be specified to omit merged arena and per arena statistics, respectively; - “b”, “l”, and “h” can be specified to - omit per size class statistics for bins, large objects, and huge objects, - respectively. Unrecognized characters are silently ignored. Note that + malloc_message() uses the + mallctl*() functions internally, so inconsistent + statistics can be reported if multiple threads use these functions + simultaneously. If is specified during + configuration, m, d, and a + can be specified to omit merged arena, destroyed merged arena, and per + arena statistics, respectively; b and l can + be specified to omit per size class statistics for bins and large objects, + respectively; x can be specified to omit all mutex + statistics. Unrecognized characters are silently ignored. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track - thread cache operations. - + thread cache operations. - The malloc_usable_size function + The malloc_usable_size() function returns the usable size of the allocation pointed to by ptr. The return value may be larger than the size that was requested during allocation. The - malloc_usable_size function is not a - mechanism for in-place realloc; rather + malloc_usable_size() function is not a + mechanism for in-place realloc(); rather it is provided solely as a tool for introspection purposes. Any discrepancy between the requested allocation size and the size reported - by malloc_usable_size should not be + by malloc_usable_size() should not be depended on, since such behavior is entirely implementation-dependent. @@ -455,19 +457,20 @@ for (i = 0; i < nbins; i++) { routines, the allocator initializes its internals based in part on various options that can be specified at compile- or run-time. - The string pointed to by the global variable - malloc_conf, the “name” of the file - referenced by the symbolic link named /etc/malloc.conf, and the value of the + The string specified via , the + string pointed to by the global variable malloc_conf, the + name of the file referenced by the symbolic link named + /etc/malloc.conf, and the value of the environment variable MALLOC_CONF, will be interpreted, in that order, from left to right as options. Note that malloc_conf may be read before - main is entered, so the declaration of + main() is entered, so the declaration of malloc_conf should specify an initializer that contains - the final value to be read by jemalloc. malloc_conf is - a compile-time setting, whereas /etc/malloc.conf and MALLOC_CONF - can be safely set any time prior to program invocation. + the final value to be read by jemalloc. + and malloc_conf are compile-time mechanisms, whereas + /etc/malloc.conf and + MALLOC_CONF can be safely set any time prior to program + invocation. An options string is a comma-separated list of option:value pairs. There is one key corresponding to each - In addition to multiple arenas, unless - is specified during configuration, this - allocator supports thread-specific caching for small and large objects, in - order to make it possible to completely avoid synchronization for most - allocation requests. Such caching allows very fast allocation in the - common case, but it increases memory usage and fragmentation, since a - bounded number of objects can remain allocated in each thread cache. - - Memory is conceptually broken into equal-sized chunks, where the - chunk size is a power of two that is greater than the page size. Chunks - are always aligned to multiples of the chunk size. This alignment makes it - possible to find metadata for user objects very quickly. - - User objects are broken into three categories according to size: - small, large, and huge. Small and large objects are managed entirely by - arenas; huge objects are additionally aggregated in a single data structure - that is shared by all threads. Huge objects are typically used by - applications infrequently enough that this single data structure is not a - scalability issue. - - Each chunk that is managed by an arena tracks its contents as runs of - contiguous pages (unused, backing a set of small objects, or backing one - large object). The combination of chunk alignment and chunk page maps - makes it possible to determine all metadata regarding small and large - allocations in constant time. - - Small objects are managed in groups by page runs. Each run maintains + In addition to multiple arenas, this allocator supports + thread-specific caching, in order to make it possible to completely avoid + synchronization for most allocation requests. Such caching allows very fast + allocation in the common case, but it increases memory usage and + fragmentation, since a bounded number of objects can remain allocated in + each thread cache. + + Memory is conceptually broken into extents. Extents are always + aligned to multiples of the page size. This alignment makes it possible to + find metadata for user objects quickly. User objects are broken into two + categories according to size: small and large. Contiguous small objects + comprise a slab, which resides within a single extent, whereas large objects + each have their own extents backing them. + + Small objects are managed in groups by slabs. Each slab maintains a bitmap to track which regions are in use. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least opt.lg_chunk option), and - huge size classes extend from the chunk size up to one size class less than - the full address space size. + are smaller than four times the page size, and large size classes extend + from four times the page size up to the largest size class that does not + exceed PTRDIFF_MAX. Allocations are packed tightly together, which can be an issue for multi-threaded applications. If you need to assure that allocations do not @@ -555,30 +544,28 @@ for (i = 0; i < nbins; i++) { nearest multiple of the cacheline size, or specify cacheline alignment when allocating. - The realloc, - rallocx, and - xallocx functions may resize allocations + The realloc(), + rallocx(), and + xallocx() functions may resize allocations without moving them under limited circumstances. Unlike the - *allocx API, the standard API does not + *allocx() API, the standard API does not officially round up the usable size of an allocation to the nearest size class, so technically it is necessary to call - realloc to grow e.g. a 9-byte allocation to + realloc() to grow e.g. a 9-byte allocation to 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage trivially succeeds in place as long as the pre-size and post-size both round up to the same size class. No other API guarantees are made regarding in-place resizing, but the current implementation also tries to resize large - and huge allocations in place, as long as the pre-size and post-size are - both large or both huge. In such cases shrinkage always succeeds for large - size classes, but for huge size classes the chunk allocator must support - splitting (see arena.<i>.chunk_hooks). - Growth only succeeds if the trailing memory is currently available, and - additionally for huge size classes the chunk allocator must support - merging. - - Assuming 2 MiB chunks, 4 KiB pages, and a 16-byte quantum on a - 64-bit system, the size classes in each category are as shown in . + allocations in place, as long as the pre-size and post-size are both large. + For shrinkage to succeed, the extent allocator must support splitting (see + arena.<i>.extent_hooks). + Growth only succeeds if the trailing memory is currently available, and the + extent allocator supports merging. + + Assuming 4 KiB pages and a 16-byte quantum on a 64-bit system, the + size classes in each category are as shown in . Size classes @@ -632,7 +619,7 @@ for (i = 0; i < nbins; i++) { [10 KiB, 12 KiB, 14 KiB] - Large + Large 2 KiB [16 KiB] @@ -662,12 +649,7 @@ for (i = 0; i < nbins; i++) { 256 KiB - [1280 KiB, 1536 KiB, 1792 KiB] - - - Huge - 256 KiB - [2 MiB] + [1280 KiB, 1536 KiB, 1792 KiB, 2 MiB] 512 KiB @@ -693,6 +675,14 @@ for (i = 0; i < nbins; i++) { ... ... + + 512 PiB + [2560 PiB, 3 EiB, 3584 PiB, 4 EiB] + + + 1 EiB + [5 EiB, 6 EiB, 7 EiB] +
@@ -700,19 +690,32 @@ for (i = 0; i < nbins; i++) { MALLCTL NAMESPACE The following names are defined in the namespace accessible via the - mallctl* functions. Value types are - specified in parentheses, their readable/writable statuses are encoded as + mallctl*() functions. Value types are specified in + parentheses, their readable/writable statuses are encoded as rw, r-, -w, or --, and required build configuration flags follow, if any. A name element encoded as <i> or <j> indicates an integer component, where the integer varies from 0 to some upper value that must be determined via - introspection. In the case of stats.arenas.<i>.*, - <i> equal to arenas.narenas can be - used to access the summation of statistics from all arenas. Take special - note of the epoch mallctl, - which controls refreshing of cached dynamic statistics. + introspection. In the case of stats.arenas.<i>.* + and arena.<i>.{initialized,purge,decay,dss}, + <i> equal to + MALLCTL_ARENAS_ALL can be used to operate on all arenas + or access the summation of statistics from all arenas; similarly + <i> equal to + MALLCTL_ARENAS_DESTROYED can be used to access the + summation of statistics from all destroyed arenas. These constants can be + utilized either via mallctlnametomib() followed by + mallctlbymib(), or via code such as the following: + + Take special note of the epoch mallctl, which controls + refreshing of cached dynamic statistics.
@@ -731,11 +734,45 @@ for (i = 0; i < nbins; i++) { rw If a value is passed in, refresh the data from which - the mallctl* functions report values, + the mallctl*() functions report values, and increment the epoch. Return the current epoch. This is useful for detecting whether another thread caused a refresh. + + + background_thread + (bool) + rw + + Enable/disable internal background worker threads. When + set to true, background threads are created on demand (the number of + background threads will be no more than the number of CPUs or active + arenas). Threads run periodically, and handle purging asynchronously. When switching + off, background threads are terminated synchronously. Note that after + fork2 + function, the state in the child process will be disabled regardless + the state in parent process. See stats.background_thread + for related stats. opt.background_thread + can be used to set the default option. This option is only available on + selected pthread-based platforms. + + + + + max_background_threads + (size_t) + rw + + Maximum number of background worker threads that will + be created. This value is capped at opt.max_background_threads at + startup. + + config.cache_oblivious @@ -776,14 +813,15 @@ for (i = 0; i < nbins; i++) { during build configuration. - + - config.munmap - (bool) + config.malloc_conf + (const char *) r- - was specified during - build configuration. + Embedded configure-time-specified run-time options + string, empty unless was specified + during build configuration. @@ -826,68 +864,94 @@ for (i = 0; i < nbins; i++) { build configuration. - + + - config.tcache + config.utrace (bool) r- - was not specified - during build configuration. + was specified during + build configuration. - + - config.tls + config.xmalloc (bool) r- - was not specified during + was specified during build configuration. - + - config.utrace + opt.abort (bool) r- - was specified during - build configuration. + Abort-on-warning enabled/disabled. If true, most + warnings are fatal. Note that runtime option warnings are not included + (see opt.abort_conf for + that). The process will call + abort + 3 in these cases. This option is + disabled by default unless is + specified during configuration, in which case it is enabled by default. + - + - config.valgrind + opt.abort_conf (bool) r- - was specified during - build configuration. + Abort-on-invalid-configuration enabled/disabled. If + true, invalid runtime options are fatal. The process will call + abort + 3 in these cases. This option is + disabled by default unless is + specified during configuration, in which case it is enabled by default. + - + - config.xmalloc - (bool) + opt.metadata_thp + (const char *) r- - was specified during - build configuration. + Controls whether to allow jemalloc to use transparent + huge page (THP) for internal metadata (see stats.metadata). always + allows such usage. auto uses no THP initially, but may + begin to do so when metadata usage reaches certain level. The default + is disabled. - + - opt.abort + opt.retain (bool) r- - Abort-on-warning enabled/disabled. If true, most - warnings are fatal. The process will call - abort - 3 in these cases. This option is - disabled by default unless is - specified during configuration, in which case it is enabled by default. + If true, retain unused virtual memory for later reuse + rather than discarding it by calling + munmap + 2 or equivalent (see stats.retained for related details). + This option is disabled by default unless discarding virtual memory is + known to trigger + platform-specific performance problems, e.g. for [64-bit] Linux, which + has a quirk in its virtual memory allocation algorithm that causes + semi-permanent VM map holes under normal jemalloc operation. Although + munmap + 2 causes issues on 32-bit Linux as + well, retaining virtual memory for 32-bit Linux is disabled by default + due to the practical possibility of address space exhaustion. @@ -904,61 +968,136 @@ for (i = 0; i < nbins; i++) { settings are supported if sbrk 2 is supported by the operating - system: “disabled”, “primary”, and - “secondary”; otherwise only “disabled” is - supported. The default is “secondary” if + system: disabled, primary, and + secondary; otherwise only disabled is + supported. The default is secondary if sbrk 2 is supported by the operating - system; “disabled” otherwise. + system; disabled otherwise. - + - opt.lg_chunk - (size_t) + opt.narenas + (unsigned) + r- + + Maximum number of arenas to use for automatic + multiplexing of threads and arenas. The default is four times the + number of CPUs, or one if there is a single CPU. + + + + + opt.percpu_arena + (const char *) r- - Virtual memory chunk size (log base 2). If a chunk - size outside the supported size range is specified, the size is - silently clipped to the minimum/maximum supported size. The default - chunk size is 2 MiB (2^21). + Per CPU arena mode. Use the percpu + setting to enable this feature, which uses number of CPUs to determine + number of arenas, and bind threads to arenas dynamically based on the + CPU the thread runs on currently. phycpu setting uses + one arena per physical CPU, which means the two hyper threads on the + same CPU share one arena. Note that no runtime checking regarding the + availability of hyper threading is done at the moment. When set to + disabled, narenas and thread to arena association will + not be impacted by this option. The default is disabled. - + - opt.narenas - (size_t) + opt.background_thread + (const bool) r- - Maximum number of arenas to use for automatic - multiplexing of threads and arenas. The default is four times the - number of CPUs, or one if there is a single CPU. + Internal background worker threads enabled/disabled. + Because of potential circular dependencies, enabling background thread + using this option may cause crash or deadlock during initialization. For + a reliable way to use this feature, see background_thread for dynamic control + options and details. This option is disabled by + default. + + + + + opt.max_background_threads + (const size_t) + r- + + Maximum number of background threads that will be created + if background_thread is set. + Defaults to number of cpus. - + - opt.lg_dirty_mult + opt.dirty_decay_ms (ssize_t) r- - Per-arena minimum ratio (log base 2) of active to dirty - pages. Some dirty unused pages may be allowed to accumulate, within - the limit set by the ratio (or one chunk worth of dirty pages, - whichever is greater), before informing the kernel about some of those - pages via madvise - 2 or a similar system call. This - provides the kernel with sufficient information to recycle dirty pages - if physical memory becomes scarce and the pages remain unused. The - default minimum ratio is 8:1 (2^3:1); an option value of -1 will - disable dirty page purging. See arenas.lg_dirty_mult + Approximate time in milliseconds from the creation of a + set of unused dirty pages until an equivalent set of unused dirty pages + is purged (i.e. converted to muzzy via e.g. + madvise(...MADV_FREE) + if supported by the operating system, or converted to clean otherwise) + and/or reused. Dirty pages are defined as previously having been + potentially written to by the application, and therefore consuming + physical memory, yet having no current use. The pages are incrementally + purged according to a sigmoidal decay curve that starts and ends with + zero purge rate. A decay time of 0 causes all unused dirty pages to be + purged immediately upon creation. A decay time of -1 disables purging. + The default decay time is 10 seconds. See arenas.dirty_decay_ms and arena.<i>.lg_dirty_mult + linkend="arena.i.dirty_decay_ms">arena.<i>.dirty_decay_ms + for related dynamic control options. See opt.muzzy_decay_ms + for a description of muzzy pages. + + + + + opt.muzzy_decay_ms + (ssize_t) + r- + + Approximate time in milliseconds from the creation of a + set of unused muzzy pages until an equivalent set of unused muzzy pages + is purged (i.e. converted to clean) and/or reused. Muzzy pages are + defined as previously having been unused dirty pages that were + subsequently purged in a manner that left them subject to the + reclamation whims of the operating system (e.g. + madvise(...MADV_FREE)), + and therefore in an indeterminate state. The pages are incrementally + purged according to a sigmoidal decay curve that starts and ends with + zero purge rate. A decay time of 0 causes all unused muzzy pages to be + purged immediately upon creation. A decay time of -1 disables purging. + The default decay time is 10 seconds. See arenas.muzzy_decay_ms + and arena.<i>.muzzy_decay_ms for related dynamic control options. + + + opt.lg_extent_max_active_fit + (size_t) + r- + + When reusing dirty extents, this determines the (log + base 2 of the) maximum ratio between the size of the active extent + selected (to split off from) and the size of the requested allocation. + This prevents the splitting of large active extents for smaller + allocations, which can reduce fragmentation over the long run + (especially for non-active extents). Lower value may reduce + fragmentation, at the cost of extra active extents. The default value + is 6, which gives a maximum ratio of 64 (2^6). + + opt.stats_print @@ -966,82 +1105,61 @@ for (i = 0; i < nbins; i++) { r- Enable/disable statistics printing at exit. If - enabled, the malloc_stats_print + enabled, the malloc_stats_print() function is called at program exit via an atexit - 3 function. If + 3 function. opt.stats_print_opts + can be combined to specify output options. If is specified during configuration, this has the potential to cause deadlock for a multi-threaded process that exits while one or more threads are executing in the memory allocation - functions. Furthermore, atexit may + functions. Furthermore, atexit() may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls - atexit, so this option is not - univerally usable (though the application can register its own - atexit function with equivalent + atexit(), so this option is not + universally usable (though the application can register its own + atexit() function with equivalent functionality). Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development. This option is disabled by default. - + - opt.junk + opt.stats_print_opts (const char *) r- - [] - Junk filling. If set to "alloc", each byte of - uninitialized allocated memory will be initialized to - 0xa5. If set to "free", all deallocated memory will - be initialized to 0x5a. If set to "true", both - allocated and deallocated memory will be initialized, and if set to - "false", junk filling be disabled entirely. This is intended for - debugging and will impact performance negatively. This option is - "false" by default unless is specified - during configuration, in which case it is "true" by default unless - running inside Valgrind. + Options (the opts string) to pass + to the malloc_stats_print() at exit (enabled + through opt.stats_print). See + available options in malloc_stats_print(). + Has no effect unless opt.stats_print is + enabled. The default is . - - - opt.quarantine - (size_t) - r- - [] - - Per thread quarantine size in bytes. If non-zero, each - thread maintains a FIFO object quarantine that stores up to the - specified number of bytes of memory. The quarantined memory is not - freed until it is released from quarantine, though it is immediately - junk-filled if the opt.junk option is - enabled. This feature is of particular use in combination with Valgrind, which can detect attempts - to access quarantined objects. This is intended for debugging and will - impact performance negatively. The default quarantine size is 0 unless - running inside Valgrind, in which case the default is 16 - MiB. - - - + - opt.redzone - (bool) + opt.junk + (const char *) r- [] - Redzones enabled/disabled. If enabled, small - allocations have redzones before and after them. Furthermore, if the - opt.junk option is - enabled, the redzones are checked for corruption during deallocation. - However, the primary intended purpose of this feature is to be used in - combination with Valgrind, - which needs redzones in order to do effective buffer overflow/underflow - detection. This option is intended for debugging and will impact - performance negatively. This option is disabled by - default unless running inside Valgrind. + Junk filling. If set to alloc, each byte + of uninitialized allocated memory will be initialized to + 0xa5. If set to free, all deallocated + memory will be initialized to 0x5a. If set to + true, both allocated and deallocated memory will be + initialized, and if set to false, junk filling be + disabled entirely. This is intended for debugging and will impact + performance negatively. This option is false by default + unless is specified during + configuration, in which case it is true by + default. @@ -1054,8 +1172,8 @@ for (i = 0; i < nbins; i++) { Zero filling enabled/disabled. If enabled, each byte of uninitialized allocated memory will be initialized to 0. Note that this initialization only happens once for each byte, so - realloc and - rallocx calls do not zero memory that + realloc() and + rallocx() calls do not zero memory that was previously allocated. This is intended for debugging and will impact performance negatively. This option is disabled by default. @@ -1099,7 +1217,6 @@ malloc_conf = "xmalloc:true";]]> opt.tcache (bool) r- - [] Thread-specific caching (tcache) enabled/disabled. When there are multiple threads, each thread uses a tcache for objects up to @@ -1108,9 +1225,7 @@ malloc_conf = "xmalloc:true";]]> increased memory use. See the opt.lg_tcache_max option for related tuning information. This option is enabled by - default unless running inside Valgrind, in which case it is - forcefully disabled. + default. @@ -1118,7 +1233,6 @@ malloc_conf = "xmalloc:true";]]> opt.lg_tcache_max (size_t) r- - [] Maximum size class (log base 2) to cache in the thread-specific cache (tcache). At a minimum, all small size classes @@ -1126,6 +1240,28 @@ malloc_conf = "xmalloc:true";]]> default maximum is 32 KiB (2^15). + + + opt.thp + (const char *) + r- + + Transparent hugepage (THP) mode. Settings "always", + "never" and "default" are available if THP is supported by the operating + system. The "always" setting enables transparent hugepage for all user + memory mappings with + MADV_HUGEPAGE; "never" + ensures no transparent hugepage with + MADV_NOHUGEPAGE; the default + setting "default" makes no changes. Note that: this option does not + affect THP for jemalloc internal metadata (see opt.metadata_thp); + in addition, for arenas with customized extent_hooks, + this option is bypassed as it is implemented as part of the default + extent hooks. + + opt.prof @@ -1150,7 +1286,8 @@ malloc_conf = "xmalloc:true";]]> the jeprof command, which is based on the pprof that is developed as part of the gperftools - package. + package. See HEAP PROFILE + FORMAT for heap profile format documentation. @@ -1277,11 +1414,11 @@ malloc_conf = "xmalloc:true";]]> <prefix>.<pid>.<seq>.f.heap, where <prefix> is controlled by the opt.prof_prefix - option. Note that atexit may allocate + option. Note that atexit() may allocate memory during application initialization and then deadlock internally - when jemalloc in turn calls atexit, so - this option is not univerally usable (though the application can - register its own atexit function with + when jemalloc in turn calls atexit(), so + this option is not universally usable (though the application can + register its own atexit() function with equivalent functionality). This option is disabled by default. @@ -1311,7 +1448,7 @@ malloc_conf = "xmalloc:true";]]> Get or set the arena associated with the calling thread. If the specified arena was not initialized beforehand (see the arenas.initialized + linkend="arena.i.initialized">arena.i.initialized mallctl), it will be automatically initialized as a side effect of calling this interface. @@ -1340,7 +1477,7 @@ malloc_conf = "xmalloc:true";]]> thread.allocated mallctl. This is useful for avoiding the overhead of repeated - mallctl* calls. + mallctl*() calls. @@ -1367,7 +1504,7 @@ malloc_conf = "xmalloc:true";]]> thread.deallocated mallctl. This is useful for avoiding the overhead of repeated - mallctl* calls. + mallctl*() calls. @@ -1375,7 +1512,6 @@ malloc_conf = "xmalloc:true";]]> thread.tcache.enabled (bool) rw - [] Enable/disable calling thread's tcache. The tcache is implicitly flushed as a side effect of becoming @@ -1389,7 +1525,6 @@ malloc_conf = "xmalloc:true";]]> thread.tcache.flush (void) -- - [] Flush calling thread's thread-specific cache (tcache). This interface releases all cached objects and internal data structures @@ -1418,8 +1553,8 @@ malloc_conf = "xmalloc:true";]]> can cause asynchronous string deallocation. Furthermore, each invocation of this interface can only read or write; simultaneous read/write is not supported due to string lifetime limitations. The - name string must nil-terminated and comprised only of characters in the - sets recognized + name string must be nil-terminated and comprised only of characters in + the sets recognized by isgraph 3 and isblank @@ -1445,7 +1580,6 @@ malloc_conf = "xmalloc:true";]]> tcache.create (unsigned) r- - [] Create an explicit thread-specific cache (tcache) and return an identifier that can be passed to the tcache.flush (unsigned) -w - [] Flush the specified thread-specific cache (tcache). The same considerations apply to this interface as to thread.tcache.flush, - except that the tcache will never be automatically be discarded. + except that the tcache will never be automatically discarded. @@ -1476,25 +1609,86 @@ malloc_conf = "xmalloc:true";]]> tcache.destroy (unsigned) -w - [] Flush the specified thread-specific cache (tcache) and make the identifier available for use during a future tcache creation. + + + arena.<i>.initialized + (bool) + r- + + Get whether the specified arena's statistics are + initialized (i.e. the arena was initialized prior to the current epoch). + This interface can also be nominally used to query whether the merged + statistics corresponding to MALLCTL_ARENAS_ALL are + initialized (always true). + + + + + arena.<i>.decay + (void) + -- + + Trigger decay-based purging of unused dirty/muzzy pages + for arena <i>, or for all arenas if <i> equals + MALLCTL_ARENAS_ALL. The proportion of unused + dirty/muzzy pages to be purged depends on the current time; see opt.dirty_decay_ms + and opt.muzy_decay_ms + for details. + + arena.<i>.purge (void) -- - Purge unused dirty pages for arena <i>, or for - all arenas if <i> equals arenas.narenas. + Purge all unused dirty pages for arena <i>, or for + all arenas if <i> equals MALLCTL_ARENAS_ALL. + + + arena.<i>.reset + (void) + -- + + Discard all of the arena's extant allocations. This + interface can only be used with arenas explicitly created via arenas.create. None + of the arena's discarded/cached allocations may accessed afterward. As + part of this requirement, all thread caches which were used to + allocate/deallocate in conjunction with the arena must be flushed + beforehand. + + + + + arena.<i>.destroy + (void) + -- + + Destroy the arena. Discard all of the arena's extant + allocations using the same mechanism as for arena.<i>.reset + (with all the same constraints and side effects), merge the arena stats + into those accessible at arena index + MALLCTL_ARENAS_DESTROYED, and then completely + discard all metadata associated with the arena. Future calls to arenas.create may + recycle the arena index. Destruction will fail if any threads are + currently associated with the arena as a result of calls to thread.arena. + + arena.<i>.dss @@ -1503,71 +1697,109 @@ malloc_conf = "xmalloc:true";]]> Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals - arenas.narenas. See - opt.dss for supported + MALLCTL_ARENAS_ALL. See opt.dss for supported settings. - + - arena.<i>.lg_dirty_mult + arena.<i>.dirty_decay_ms (ssize_t) rw - Current per-arena minimum ratio (log base 2) of active - to dirty pages for arena <i>. Each time this interface is set and - the ratio is increased, pages are synchronously purged as necessary to - impose the new ratio. See opt.lg_dirty_mult + Current per-arena approximate time in milliseconds from + the creation of a set of unused dirty pages until an equivalent set of + unused dirty pages is purged and/or reused. Each time this interface is + set, all currently unused dirty pages are considered to have fully + decayed, which causes immediate purging of all unused dirty pages unless + the decay time is set to -1 (i.e. purging disabled). See opt.dirty_decay_ms for additional information. - + - arena.<i>.chunk_hooks - (chunk_hooks_t) + arena.<i>.muzzy_decay_ms + (ssize_t) rw - Get or set the chunk management hook functions for arena - <i>. The functions must be capable of operating on all extant - chunks associated with arena <i>, usually by passing unknown - chunks to the replaced functions. In practice, it is feasible to - control allocation for arenas created via arenas.extend such - that all chunks originate from an application-supplied chunk allocator - (by setting custom chunk hook functions just after arena creation), but - the automatically created arenas may have already created chunks prior - to the application having an opportunity to take over chunk + Current per-arena approximate time in milliseconds from + the creation of a set of unused muzzy pages until an equivalent set of + unused muzzy pages is purged and/or reused. Each time this interface is + set, all currently unused muzzy pages are considered to have fully + decayed, which causes immediate purging of all unused muzzy pages unless + the decay time is set to -1 (i.e. purging disabled). See opt.muzzy_decay_ms + for additional information. + + + + + arena.<i>.retain_grow_limit + (size_t) + rw + + Maximum size to grow retained region (only relevant when + opt.retain is + enabled). This controls the maximum increment to expand virtual memory, + or allocation through arena.<i>extent_hooks. + In particular, if customized extent hooks reserve physical memory + (e.g. 1G huge pages), this is useful to control the allocation hook's + input size. The default is no limit. + + + + + arena.<i>.extent_hooks + (extent_hooks_t *) + rw + + Get or set the extent management hook functions for + arena <i>. The functions must be capable of operating on all + extant extents associated with arena <i>, usually by passing + unknown extents to the replaced functions. In practice, it is feasible + to control allocation for arenas explicitly created via arenas.create such + that all extents originate from an application-supplied extent allocator + (by specifying the custom extent hook functions during arena creation), + but the automatically created arenas will have already created extents + prior to the application having an opportunity to take over extent allocation. - The chunk_hooks_t structure comprises function +typedef extent_hooks_s extent_hooks_t; +struct extent_hooks_s { + extent_alloc_t *alloc; + extent_dalloc_t *dalloc; + extent_destroy_t *destroy; + extent_commit_t *commit; + extent_decommit_t *decommit; + extent_purge_t *purge_lazy; + extent_purge_t *purge_forced; + extent_split_t *split; + extent_merge_t *merge; +};]]> + The extent_hooks_t structure comprises function pointers which are described individually below. jemalloc uses these - functions to manage chunk lifetime, which starts off with allocation of + functions to manage extent lifetime, which starts off with allocation of mapped committed memory, in the simplest case followed by deallocation. - However, there are performance and platform reasons to retain chunks for - later reuse. Cleanup attempts cascade from deallocation to decommit to - purging, which gives the chunk management functions opportunities to - reject the most permanent cleanup operations in favor of less permanent - (and often less costly) operations. The chunk splitting and merging - operations can also be opted out of, but this is mainly intended to - support platforms on which virtual memory mappings provided by the - operating system kernel do not automatically coalesce and split, e.g. - Windows. + However, there are performance and platform reasons to retain extents + for later reuse. Cleanup attempts cascade from deallocation to decommit + to forced purging to lazy purging, which gives the extent management + functions opportunities to reject the most permanent cleanup operations + in favor of less permanent (and often less costly) operations. All + operations except allocation can be universally opted out of by setting + the hook pointers to NULL, or selectively opted out + of by returning failure. Note that once the extent hook is set, the + structure is accessed directly by the associated arenas, so it must + remain valid for the entire lifetime of the arenas. - typedef void *(chunk_alloc_t) - void *chunk + typedef void *(extent_alloc_t) + extent_hooks_t *extent_hooks + void *new_addr size_t size size_t alignment bool *zero @@ -1575,62 +1807,83 @@ typedef struct { unsigned arena_ind - A chunk allocation function conforms to the - chunk_alloc_t type and upon success returns a pointer to + An extent allocation function conforms to the + extent_alloc_t type and upon success returns a pointer to size bytes of mapped memory on behalf of arena - arena_ind such that the chunk's base address is a - multiple of alignment, as well as setting - *zero to indicate whether the chunk is zeroed and - *commit to indicate whether the chunk is + arena_ind such that the extent's base address is + a multiple of alignment, as well as setting + *zero to indicate whether the extent is zeroed + and *commit to indicate whether the extent is committed. Upon error the function returns NULL and leaves *zero and *commit unmodified. The - size parameter is always a multiple of the chunk + size parameter is always a multiple of the page size. The alignment parameter is always a power - of two at least as large as the chunk size. Zeroing is mandatory if + of two at least as large as the page size. Zeroing is mandatory if *zero is true upon function entry. Committing is mandatory if *commit is true upon function entry. - If chunk is not NULL, the - returned pointer must be chunk on success or + If new_addr is not NULL, the + returned pointer must be new_addr on success or NULL on error. Committed memory may be committed in absolute terms as on a system that does not overcommit, or in implicit terms as on a system that overcommits and satisfies physical memory needs on demand via soft page faults. Note that replacing the - default chunk allocation function makes the arena's arena.<i>.dss setting irrelevant. - typedef bool (chunk_dalloc_t) - void *chunk + typedef bool (extent_dalloc_t) + extent_hooks_t *extent_hooks + void *addr size_t size bool committed unsigned arena_ind - A chunk deallocation function conforms to the - chunk_dalloc_t type and deallocates a - chunk of given size with + An extent deallocation function conforms to the + extent_dalloc_t type and deallocates an extent at given + addr and size with committed/decommited memory as indicated, on behalf of arena arena_ind, returning false upon success. If the function returns true, this indicates opt-out from - deallocation; the virtual memory mapping associated with the chunk + deallocation; the virtual memory mapping associated with the extent remains mapped, in the same commit state, and available for future use, in which case it will be automatically retained for later reuse. - typedef bool (chunk_commit_t) - void *chunk + typedef void (extent_destroy_t) + extent_hooks_t *extent_hooks + void *addr + size_t size + bool committed + unsigned arena_ind + + + + An extent destruction function conforms to the + extent_destroy_t type and unconditionally destroys an + extent at given addr and + size with + committed/decommited memory as indicated, on + behalf of arena arena_ind. This function may be + called to destroy retained extents during arena destruction (see arena.<i>.destroy). + + + typedef bool (extent_commit_t) + extent_hooks_t *extent_hooks + void *addr size_t size size_t offset size_t length unsigned arena_ind - A chunk commit function conforms to the - chunk_commit_t type and commits zeroed physical memory to - back pages within a chunk of given + An extent commit function conforms to the + extent_commit_t type and commits zeroed physical memory to + back pages within an extent at given addr and size at offset bytes, extending for length on behalf of arena arena_ind, returning false upon success. @@ -1641,46 +1894,56 @@ typedef struct { physical memory to satisfy the request. - typedef bool (chunk_decommit_t) - void *chunk + typedef bool (extent_decommit_t) + extent_hooks_t *extent_hooks + void *addr size_t size size_t offset size_t length unsigned arena_ind - A chunk decommit function conforms to the - chunk_decommit_t type and decommits any physical memory - that is backing pages within a chunk of given - size at offset bytes, - extending for length on behalf of arena + An extent decommit function conforms to the + extent_decommit_t type and decommits any physical memory + that is backing pages within an extent at given + addr and size at + offset bytes, extending for + length on behalf of arena arena_ind, returning false upon success, in which - case the pages will be committed via the chunk commit function before + case the pages will be committed via the extent commit function before being reused. If the function returns true, this indicates opt-out from decommit; the memory remains committed and available for future use, in which case it will be automatically retained for later reuse. - typedef bool (chunk_purge_t) - void *chunk - size_tsize + typedef bool (extent_purge_t) + extent_hooks_t *extent_hooks + void *addr + size_t size size_t offset size_t length unsigned arena_ind - A chunk purge function conforms to the chunk_purge_t - type and optionally discards physical pages within the virtual memory - mapping associated with chunk of given - size at offset bytes, - extending for length on behalf of arena - arena_ind, returning false if pages within the - purged virtual memory range will be zero-filled the next time they are - accessed. + An extent purge function conforms to the + extent_purge_t type and discards physical pages + within the virtual memory mapping associated with an extent at given + addr and size at + offset bytes, extending for + length on behalf of arena + arena_ind. A lazy extent purge function (e.g. + implemented via + madvise(...MADV_FREE)) + can delay purging indefinitely and leave the pages within the purged + virtual memory range in an indeterminite state, whereas a forced extent + purge function immediately purges, and the pages within the virtual + memory range will be zero-filled the next time they are accessed. If + the function returns true, this indicates failure to purge. - typedef bool (chunk_split_t) - void *chunk + typedef bool (extent_split_t) + extent_hooks_t *extent_hooks + void *addr size_t size size_t size_a size_t size_b @@ -1688,35 +1951,36 @@ typedef struct { unsigned arena_ind - A chunk split function conforms to the chunk_split_t - type and optionally splits chunk of given - size into two adjacent chunks, the first of - size_a bytes, and the second of - size_b bytes, operating on + An extent split function conforms to the + extent_split_t type and optionally splits an extent at + given addr and size into + two adjacent extents, the first of size_a bytes, + and the second of size_b bytes, operating on committed/decommitted memory as indicated, on behalf of arena arena_ind, returning false upon - success. If the function returns true, this indicates that the chunk + success. If the function returns true, this indicates that the extent remains unsplit and therefore should continue to be operated on as a whole. - typedef bool (chunk_merge_t) - void *chunk_a + typedef bool (extent_merge_t) + extent_hooks_t *extent_hooks + void *addr_a size_t size_a - void *chunk_b + void *addr_b size_t size_b bool committed unsigned arena_ind - A chunk merge function conforms to the chunk_merge_t - type and optionally merges adjacent chunks, - chunk_a of given size_a - and chunk_b of given - size_b into one contiguous chunk, operating on + An extent merge function conforms to the + extent_merge_t type and optionally merges adjacent extents, + at given addr_a and size_a + with given addr_b and + size_b into one contiguous extent, operating on committed/decommitted memory as indicated, on behalf of arena arena_ind, returning false upon - success. If the function returns true, this indicates that the chunks + success. If the function returns true, this indicates that the extents remain distinct mappings and therefore should continue to be operated on independently. @@ -1731,29 +1995,35 @@ typedef struct { Current limit on number of arenas. - + - arenas.initialized - (bool *) - r- + arenas.dirty_decay_ms + (ssize_t) + rw - An array of arenas.narenas - booleans. Each boolean indicates whether the corresponding arena is - initialized. + Current default per-arena approximate time in + milliseconds from the creation of a set of unused dirty pages until an + equivalent set of unused dirty pages is purged and/or reused, used to + initialize arena.<i>.dirty_decay_ms + during arena creation. See opt.dirty_decay_ms + for additional information. - + - arenas.lg_dirty_mult + arenas.muzzy_decay_ms (ssize_t) rw - Current default per-arena minimum ratio (log base 2) of - active to dirty pages, used to initialize arena.<i>.lg_dirty_mult + Current default per-arena approximate time in + milliseconds from the creation of a set of unused muzzy pages until an + equivalent set of unused muzzy pages is purged and/or reused, used to + initialize arena.<i>.muzzy_decay_ms during arena creation. See opt.lg_dirty_mult + linkend="opt.muzzy_decay_ms">opt.muzzy_decay_ms for additional information. @@ -1780,7 +2050,6 @@ typedef struct { arenas.tcache_max (size_t) r- - [] Maximum thread-cached size class. @@ -1799,7 +2068,6 @@ typedef struct { arenas.nhbins (unsigned) r- - [] Total number of thread cache bin size classes. @@ -1820,30 +2088,30 @@ typedef struct { (uint32_t) r- - Number of regions per page run. + Number of regions per slab. - + - arenas.bin.<i>.run_size + arenas.bin.<i>.slab_size (size_t) r- - Number of bytes per page run. + Number of bytes per slab. - + - arenas.nlruns + arenas.nlextents (unsigned) r- Total number of large size classes. - + - arenas.lrun.<i>.size + arenas.lextent.<i>.size (size_t) r- @@ -1851,33 +2119,24 @@ typedef struct { class. - - - arenas.nhchunks - (unsigned) - r- - - Total number of huge size classes. - - - + - arenas.hchunk.<i>.size - (size_t) - r- + arenas.create + (unsigned, extent_hooks_t *) + rw - Maximum size supported by this huge size - class. + Explicitly create a new arena outside the range of + automatically managed arenas, with optionally specified extent hooks, + and return the new arena index. - + - arenas.extend - (unsigned) - r- + arenas.lookup + (unsigned, void*) + rw - Extend the array of arenas by appending a new arena, - and returning the new arena index. + Index of the arena to which an allocation belongs to. @@ -1976,30 +2235,12 @@ typedef struct { [] Average number of bytes allocated between - inverval-based profile dumps. See the + interval-based profile dumps. See the opt.lg_prof_interval option for additional information. - - - stats.cactive - (size_t *) - r- - [] - - Pointer to a counter that contains an approximate count - of the current number of bytes in active pages. The estimate may be - high, but never low, because each arena rounds up when computing its - contribution to the counter. Note that the epoch mallctl has no bearing - on this counter. Furthermore, counter consistency is maintained via - atomic operations, so it is necessary to use an atomic operation in - order to guarantee a consistent read when dereferencing the pointer. - - - stats.allocated @@ -2023,7 +2264,9 @@ typedef struct { equal to stats.allocated. This does not include - stats.arenas.<i>.pdirty, nor pages + stats.arenas.<i>.pdirty, + + stats.arenas.<i>.pmuzzy, nor pages entirely devoted to allocator metadata. @@ -2035,11 +2278,28 @@ typedef struct { [] Total number of bytes dedicated to metadata, which - comprise base allocations used for bootstrap-sensitive internal - allocator data structures, arena chunk headers (see stats.arenas.<i>.metadata.mapped), + comprise base allocations used for bootstrap-sensitive allocator + metadata structures (see stats.arenas.<i>.base) and internal allocations (see stats.arenas.<i>.metadata.allocated). + linkend="stats.arenas.i.internal">stats.arenas.<i>.internal). + Transparent huge page (enabled with opt.metadata_thp) usage is not + considered. + + + + + stats.metadata_thp + (size_t) + r- + [] + + Number of transparent huge pages (THP) used for + metadata. See stats.metadata and + opt.metadata_thp) for + details. @@ -2066,15 +2326,155 @@ typedef struct { r- [] - Total number of bytes in active chunks mapped by the - allocator. This is a multiple of the chunk size, and is larger than - stats.active. - This does not include inactive chunks, even those that contain unused - dirty pages, which means that there is no strict ordering between this - and Total number of bytes in active extents mapped by the + allocator. This is larger than stats.active. This + does not include inactive extents, even those that contain unused dirty + pages, which means that there is no strict ordering between this and + stats.resident. + + + stats.retained + (size_t) + r- + [] + + Total number of bytes in virtual memory mappings that + were retained rather than being returned to the operating system via + e.g. munmap + 2 or similar. Retained virtual + memory is typically untouched, decommitted, or purged, so it has no + strongly associated physical memory (see extent hooks for details). + Retained memory is excluded from mapped memory statistics, e.g. stats.mapped. + + + + + + stats.background_thread.num_threads + (size_t) + r- + [] + + Number of background + threads running currently. + + + + + stats.background_thread.num_runs + (uint64_t) + r- + [] + + Total number of runs from all background threads. + + + + + stats.background_thread.run_interval + (uint64_t) + r- + [] + + Average run interval in nanoseconds of background threads. + + + + + stats.mutexes.ctl.{counter}; + (counter specific type) + r- + [] + + Statistics on ctl mutex (global + scope; mallctl related). {counter} is one of the + counters below: + + num_ops (uint64_t): + Total number of lock acquisition operations on this mutex. + + num_spin_acq (uint64_t): Number + of times the mutex was spin-acquired. When the mutex is currently + locked and cannot be acquired immediately, a short period of + spin-retry within jemalloc will be performed. Acquired through spin + generally means the contention was lightweight and not causing context + switches. + + num_wait (uint64_t): Number of + times the mutex was wait-acquired, which means the mutex contention + was not solved by spin-retry, and blocking operation was likely + involved in order to acquire the mutex. This event generally implies + higher cost / longer delay, and should be investigated if it happens + often. + + max_wait_time (uint64_t): + Maximum length of time in nanoseconds spent on a single wait-acquired + lock operation. Note that to avoid profiling overhead on the common + path, this does not consider spin-acquired cases. + + total_wait_time (uint64_t): + Cumulative time in nanoseconds spent on wait-acquired lock operations. + Similarly, spin-acquired cases are not considered. + + max_num_thds (uint32_t): Maximum + number of threads waiting on this mutex simultaneously. Similarly, + spin-acquired cases are not considered. + + num_owner_switch (uint64_t): + Number of times the current mutex owner is different from the previous + one. This event does not generally imply an issue; rather it is an + indicator of how often the protected data are accessed by different + threads. + + + + + + + + + stats.mutexes.background_thread.{counter} + (counter specific type) r- + [] + + Statistics on background_thread mutex + (global scope; background_thread + related). {counter} is one of the counters in mutex profiling + counters. + + + + + stats.mutexes.prof.{counter} + (counter specific type) r- + [] + + Statistics on prof mutex (global + scope; profiling related). {counter} is one of the + counters in mutex profiling + counters. + + + + + stats.mutexes.reset + (void) -- + [] + + Reset all mutex profile statistics, including global + mutexes, arena mutexes and bin mutexes. + + stats.arenas.<i>.dss @@ -2089,15 +2489,29 @@ typedef struct { - + - stats.arenas.<i>.lg_dirty_mult + stats.arenas.<i>.dirty_decay_ms (ssize_t) r- - Minimum ratio (log base 2) of active to dirty pages. - See opt.lg_dirty_mult + Approximate time in milliseconds from the creation of a + set of unused dirty pages until an equivalent set of unused dirty pages + is purged and/or reused. See opt.dirty_decay_ms + for details. + + + + + stats.arenas.<i>.muzzy_decay_ms + (ssize_t) + r- + + Approximate time in milliseconds from the creation of a + set of unused muzzy pages until an equivalent set of unused muzzy pages + is purged and/or reused. See opt.muzzy_decay_ms for details. @@ -2111,13 +2525,25 @@ typedef struct { arena. + + + stats.arenas.<i>.uptime + (uint64_t) + r- + + Time elapsed (in nanoseconds) since the arena was + created. If <i> equals 0 or + MALLCTL_ARENAS_ALL, this is the uptime since malloc + initialization. + + stats.arenas.<i>.pactive (size_t) r- - Number of pages in active runs. + Number of pages in active extents. @@ -2126,10 +2552,23 @@ typedef struct { (size_t) r- - Number of pages within unused runs that are potentially - dirty, and for which madvise... - MADV_DONTNEED or - similar has not been called. + Number of pages within unused extents that are + potentially dirty, and for which madvise() or + similar has not been called. See opt.dirty_decay_ms + for a description of dirty pages. + + + + + stats.arenas.<i>.pmuzzy + (size_t) + r- + + Number of pages within unused extents that are muzzy. + See opt.muzzy_decay_ms + for a description of muzzy pages. @@ -2142,20 +2581,33 @@ typedef struct { Number of mapped bytes. - + + + stats.arenas.<i>.retained + (size_t) + r- + [] + + Number of retained bytes. See stats.retained for + details. + + + - stats.arenas.<i>.metadata.mapped + stats.arenas.<i>.base (size_t) r- [] - Number of mapped bytes in arena chunk headers, which - track the states of the non-metadata pages. + + Number of bytes dedicated to bootstrap-sensitive allocator metadata + structures. - + - stats.arenas.<i>.metadata.allocated + stats.arenas.<i>.internal (size_t) r- [] @@ -2163,180 +2615,199 @@ typedef struct { Number of bytes dedicated to internal allocations. Internal allocations differ from application-originated allocations in that they are for internal use, and that they are omitted from heap - profiles. This statistic is reported separately from stats.metadata and - stats.arenas.<i>.metadata.mapped - because it overlaps with e.g. the stats.allocated and - stats.active - statistics, whereas the other metadata statistics do - not. + profiles. - + - stats.arenas.<i>.npurge - (uint64_t) + stats.arenas.<i>.metadata_thp + (size_t) r- [] - Number of dirty page purge sweeps performed. - + Number of transparent huge pages (THP) used for + metadata. See opt.metadata_thp + for details. - + - stats.arenas.<i>.nmadvise - (uint64_t) + stats.arenas.<i>.resident + (size_t) r- [] - Number of madvise... - MADV_DONTNEED or - similar calls made to purge dirty pages. + Maximum number of bytes in physically resident data + pages mapped by the arena, comprising all pages dedicated to allocator + metadata, pages backing active allocations, and unused dirty pages. + This is a maximum rather than precise because pages may not actually be + physically resident if they correspond to demand-zeroed virtual memory + that has not yet been touched. This is a multiple of the page + size. - + - stats.arenas.<i>.purged + stats.arenas.<i>.dirty_npurge (uint64_t) r- [] - Number of pages purged. + Number of dirty page purge sweeps performed. + - + - stats.arenas.<i>.small.allocated - (size_t) + stats.arenas.<i>.dirty_nmadvise + (uint64_t) r- [] - Number of bytes currently allocated by small objects. - + Number of madvise() or similar + calls made to purge dirty pages. - + - stats.arenas.<i>.small.nmalloc + stats.arenas.<i>.dirty_purged (uint64_t) r- [] - Cumulative number of allocation requests served by - small bins. + Number of dirty pages purged. - + - stats.arenas.<i>.small.ndalloc + stats.arenas.<i>.muzzy_npurge (uint64_t) r- [] - Cumulative number of small objects returned to bins. + Number of muzzy page purge sweeps performed. - + - stats.arenas.<i>.small.nrequests + stats.arenas.<i>.muzzy_nmadvise (uint64_t) r- [] - Cumulative number of small allocation requests. - + Number of madvise() or similar + calls made to purge muzzy pages. - + - stats.arenas.<i>.large.allocated + stats.arenas.<i>.muzzy_purged + (uint64_t) + r- + [] + + Number of muzzy pages purged. + + + + + stats.arenas.<i>.small.allocated (size_t) r- [] - Number of bytes currently allocated by large objects. + Number of bytes currently allocated by small objects. - + - stats.arenas.<i>.large.nmalloc + stats.arenas.<i>.small.nmalloc (uint64_t) r- [] - Cumulative number of large allocation requests served - directly by the arena. + Cumulative number of times a small allocation was + requested from the arena's bins, whether to fill the relevant tcache if + opt.tcache is + enabled, or to directly satisfy an allocation request + otherwise. - + - stats.arenas.<i>.large.ndalloc + stats.arenas.<i>.small.ndalloc (uint64_t) r- [] - Cumulative number of large deallocation requests served - directly by the arena. + Cumulative number of times a small allocation was + returned to the arena's bins, whether to flush the relevant tcache if + opt.tcache is + enabled, or to directly deallocate an allocation + otherwise. - + - stats.arenas.<i>.large.nrequests + stats.arenas.<i>.small.nrequests (uint64_t) r- [] - Cumulative number of large allocation requests. - + Cumulative number of allocation requests satisfied by + all bin size classes. - + - stats.arenas.<i>.huge.allocated + stats.arenas.<i>.large.allocated (size_t) r- [] - Number of bytes currently allocated by huge objects. + Number of bytes currently allocated by large objects. - + - stats.arenas.<i>.huge.nmalloc + stats.arenas.<i>.large.nmalloc (uint64_t) r- [] - Cumulative number of huge allocation requests served - directly by the arena. + Cumulative number of times a large extent was allocated + from the arena, whether to fill the relevant tcache if opt.tcache is enabled and + the size class is within the range being cached, or to directly satisfy + an allocation request otherwise. - + - stats.arenas.<i>.huge.ndalloc + stats.arenas.<i>.large.ndalloc (uint64_t) r- [] - Cumulative number of huge deallocation requests served - directly by the arena. + Cumulative number of times a large extent was returned + to the arena, whether to flush the relevant tcache if opt.tcache is enabled and + the size class is within the range being cached, or to directly + deallocate an allocation otherwise. - + - stats.arenas.<i>.huge.nrequests + stats.arenas.<i>.large.nrequests (uint64_t) r- [] - Cumulative number of huge allocation requests. - + Cumulative number of allocation requests satisfied by + all large size classes. @@ -2346,8 +2817,11 @@ typedef struct { r- [] - Cumulative number of allocations served by bin. - + Cumulative number of times a bin region of the + corresponding size class was allocated from the arena, whether to fill + the relevant tcache if opt.tcache is enabled, or + to directly satisfy an allocation request otherwise. @@ -2357,8 +2831,11 @@ typedef struct { r- [] - Cumulative number of allocations returned to bin. - + Cumulative number of times a bin region of the + corresponding size class was returned to the arena, whether to flush the + relevant tcache if opt.tcache is enabled, or + to directly deallocate an allocation otherwise. @@ -2368,8 +2845,8 @@ typedef struct { r- [] - Cumulative number of allocation - requests. + Cumulative number of allocation requests satisfied by + bin regions of the corresponding size class. @@ -2388,7 +2865,6 @@ typedef struct { stats.arenas.<i>.bins.<j>.nfills (uint64_t) r- - [ ] Cumulative number of tcache fills. @@ -2398,131 +2874,273 @@ typedef struct { stats.arenas.<i>.bins.<j>.nflushes (uint64_t) r- - [ ] Cumulative number of tcache flushes. - + - stats.arenas.<i>.bins.<j>.nruns + stats.arenas.<i>.bins.<j>.nslabs (uint64_t) r- [] - Cumulative number of runs created. + Cumulative number of slabs created. - + - stats.arenas.<i>.bins.<j>.nreruns + stats.arenas.<i>.bins.<j>.nreslabs (uint64_t) r- [] - Cumulative number of times the current run from which + Cumulative number of times the current slab from which to allocate changed. - + - stats.arenas.<i>.bins.<j>.curruns + stats.arenas.<i>.bins.<j>.curslabs (size_t) r- [] - Current number of runs. + Current number of slabs. - + - stats.arenas.<i>.lruns.<j>.nmalloc + stats.arenas.<i>.bins.<j>.mutex.{counter} + (counter specific type) r- + [] + + Statistics on + arena.<i>.bins.<j> mutex (arena bin + scope; bin operation related). {counter} is one of + the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.lextents.<j>.nmalloc (uint64_t) r- [] - Cumulative number of allocation requests for this size - class served directly by the arena. + Cumulative number of times a large extent of the + corresponding size class was allocated from the arena, whether to fill + the relevant tcache if opt.tcache is enabled and + the size class is within the range being cached, or to directly satisfy + an allocation request otherwise. - + - stats.arenas.<i>.lruns.<j>.ndalloc + stats.arenas.<i>.lextents.<j>.ndalloc (uint64_t) r- [] - Cumulative number of deallocation requests for this - size class served directly by the arena. + Cumulative number of times a large extent of the + corresponding size class was returned to the arena, whether to flush the + relevant tcache if opt.tcache is enabled and + the size class is within the range being cached, or to directly + deallocate an allocation otherwise. - + - stats.arenas.<i>.lruns.<j>.nrequests + stats.arenas.<i>.lextents.<j>.nrequests (uint64_t) r- [] - Cumulative number of allocation requests for this size - class. + Cumulative number of allocation requests satisfied by + large extents of the corresponding size class. - + - stats.arenas.<i>.lruns.<j>.curruns + stats.arenas.<i>.lextents.<j>.curlextents (size_t) r- [] - Current number of runs for this size class. + Current number of large allocations for this size class. - + - stats.arenas.<i>.hchunks.<j>.nmalloc - (uint64_t) - r- + stats.arenas.<i>.mutexes.large.{counter} + (counter specific type) r- [] - Cumulative number of allocation requests for this size - class served directly by the arena. + Statistics on arena.<i>.large + mutex (arena scope; large allocation related). + {counter} is one of the counters in mutex profiling + counters. - + - stats.arenas.<i>.hchunks.<j>.ndalloc - (uint64_t) - r- + stats.arenas.<i>.mutexes.extent_avail.{counter} + (counter specific type) r- [] - Cumulative number of deallocation requests for this - size class served directly by the arena. + Statistics on arena.<i>.extent_avail + mutex (arena scope; extent avail related). + {counter} is one of the counters in mutex profiling + counters. - + - stats.arenas.<i>.hchunks.<j>.nrequests - (uint64_t) - r- + stats.arenas.<i>.mutexes.extents_dirty.{counter} + (counter specific type) r- [] - Cumulative number of allocation requests for this size - class. + Statistics on arena.<i>.extents_dirty + mutex (arena scope; dirty extents related). + {counter} is one of the counters in mutex profiling + counters. - + - stats.arenas.<i>.hchunks.<j>.curhchunks - (size_t) - r- + stats.arenas.<i>.mutexes.extents_muzzy.{counter} + (counter specific type) r- [] - Current number of huge allocations for this size class. - + Statistics on arena.<i>.extents_muzzy + mutex (arena scope; muzzy extents related). + {counter} is one of the counters in mutex profiling + counters. + + + + stats.arenas.<i>.mutexes.extents_retained.{counter} + (counter specific type) r- + [] + + Statistics on arena.<i>.extents_retained + mutex (arena scope; retained extents related). + {counter} is one of the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.mutexes.decay_dirty.{counter} + (counter specific type) r- + [] + + Statistics on arena.<i>.decay_dirty + mutex (arena scope; decay for dirty pages related). + {counter} is one of the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.mutexes.decay_muzzy.{counter} + (counter specific type) r- + [] + + Statistics on arena.<i>.decay_muzzy + mutex (arena scope; decay for muzzy pages related). + {counter} is one of the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.mutexes.base.{counter} + (counter specific type) r- + [] + + Statistics on arena.<i>.base + mutex (arena scope; base allocator related). + {counter} is one of the counters in mutex profiling + counters. + + + + + stats.arenas.<i>.mutexes.tcache_list.{counter} + (counter specific type) r- + [] + + Statistics on + arena.<i>.tcache_list mutex (arena scope; + tcache to arena association related). This mutex is expected to be + accessed less often. {counter} is one of the + counters in mutex profiling + counters. + + + + HEAP PROFILE FORMAT + Although the heap profiling functionality was originally designed to + be compatible with the + pprof command that is developed as part of the gperftools + package, the addition of per thread heap profiling functionality + required a different heap profile format. The jeprof + command is derived from pprof, with enhancements to + support the heap profile format described here. + + In the following hypothetical heap profile, [...] + indicates elision for the sake of compactness. The following matches the above heap profile, but most +tokens are replaced with <description> to indicate +descriptions of the corresponding fields. / + : : [: ] + [...] + : : [: ] + [...] + : : [: ] + [...] +@ [...] [...] + : : [: ] + : : [: ] + : : [: ] +[...] + +MAPPED_LIBRARIES: +/maps>]]> + + DEBUGGING MALLOC PROBLEMS When debugging, it is a good idea to configure/build jemalloc with @@ -2532,7 +3150,7 @@ typedef struct { of run-time assertions that catch application errors such as double-free, write-after-free, etc. - Programs often accidentally depend on “uninitialized” + Programs often accidentally depend on uninitialized memory actually being filled with zero bytes. Junk filling (see the opt.junk option) tends to expose such bugs in the form of obviously incorrect @@ -2544,9 +3162,7 @@ typedef struct { This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information - would be prohibitive. However, jemalloc does integrate with the most - excellent Valgrind tool if the - configuration option is enabled. + would be prohibitive. DIAGNOSTIC MESSAGES @@ -2561,29 +3177,29 @@ typedef struct { to override the function which emits the text strings forming the errors and warnings if for some reason the STDERR_FILENO file descriptor is not suitable for this. - malloc_message takes the + malloc_message() takes the cbopaque pointer argument that is NULL unless overridden by the arguments in a call to - malloc_stats_print, followed by a string + malloc_stats_print(), followed by a string pointer. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock. All messages are prefixed by - “<jemalloc>: ”. + <jemalloc>: . RETURN VALUES Standard API - The malloc and - calloc functions return a pointer to the + The malloc() and + calloc() functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned and errno is set to ENOMEM. - The posix_memalign function + The posix_memalign() function returns the value 0 if successful; otherwise it returns an error value. - The posix_memalign function will fail + The posix_memalign() function will fail if: @@ -2602,11 +3218,11 @@ typedef struct { - The aligned_alloc function returns + The aligned_alloc() function returns a pointer to the allocated memory if successful; otherwise a NULL pointer is returned and errno is set. The - aligned_alloc function will fail if: + aligned_alloc() function will fail if: EINVAL @@ -2623,44 +3239,44 @@ typedef struct { - The realloc function returns a + The realloc() function returns a pointer, possibly identical to ptr, to the allocated memory if successful; otherwise a NULL pointer is returned, and errno is set to ENOMEM if the error was the result of an - allocation failure. The realloc + allocation failure. The realloc() function always leaves the original buffer intact when an error occurs. - The free function returns no + The free() function returns no value. Non-standard API - The mallocx and - rallocx functions return a pointer to + The mallocx() and + rallocx() functions return a pointer to the allocated memory if successful; otherwise a NULL pointer is returned to indicate insufficient contiguous memory was available to service the allocation request. - The xallocx function returns the + The xallocx() function returns the real size of the resulting resized allocation pointed to by ptr, which is a value less than size if the allocation could not be adequately grown in place. - The sallocx function returns the + The sallocx() function returns the real size of the allocation pointed to by ptr. - The nallocx returns the real size + The nallocx() returns the real size that would result from a successful equivalent - mallocx function call, or zero if + mallocx() function call, or zero if insufficient memory is available to perform the size computation. - The mallctl, - mallctlnametomib, and - mallctlbymib functions return 0 on + The mallctl(), + mallctlnametomib(), and + mallctlbymib() functions return 0 on success; otherwise they return an error value. The functions will fail if: @@ -2696,13 +3312,13 @@ typedef struct { EFAULT An interface with side effects failed in some way - not directly related to mallctl* + not directly related to mallctl*() read/write processing. - The malloc_usable_size function + The malloc_usable_size() function returns the usable size of the allocation pointed to by ptr. @@ -2727,9 +3343,10 @@ typedef struct { To dump core whenever a problem occurs: ln -s 'abort:true' /etc/malloc.conf - To specify in the source a chunk size that is 16 MiB: + To specify in the source that only one arena should be automatically + created: +malloc_conf = "narenas:1";]]> SEE ALSO @@ -2750,13 +3367,13 @@ malloc_conf = "lg_chunk:24";]]> STANDARDS - The malloc, - calloc, - realloc, and - free functions conform to ISO/IEC - 9899:1990 (“ISO C90”). - - The posix_memalign function conforms - to IEEE Std 1003.1-2001 (“POSIX.1”). + The malloc(), + calloc(), + realloc(), and + free() functions conform to ISO/IEC + 9899:1990 (ISO C90). + + The posix_memalign() function conforms + to IEEE Std 1003.1-2001 (POSIX.1). diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/doc/manpages.xsl.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/doc/manpages.xsl.in similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/doc/manpages.xsl.in rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/doc/manpages.xsl.in diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/doc/stylesheet.xsl b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/doc/stylesheet.xsl new file mode 100644 index 0000000..619365d --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/doc/stylesheet.xsl @@ -0,0 +1,10 @@ + + ansi + + + + + + + + diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/arena.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/arena.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena.h diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_externs.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_externs.h new file mode 100644 index 0000000..4b3732b --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_externs.h @@ -0,0 +1,94 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H +#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H + +#include "jemalloc/internal/bin.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats.h" + +extern ssize_t opt_dirty_decay_ms; +extern ssize_t opt_muzzy_decay_ms; + +extern percpu_arena_mode_t opt_percpu_arena; +extern const char *percpu_arena_mode_names[]; + +extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS]; +extern malloc_mutex_t arenas_lock; + +void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, + unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, + ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy); +void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, + bin_stats_t *bstats, arena_stats_large_t *lstats); +void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent); +#ifdef JEMALLOC_JET +size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr); +#endif +extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, + size_t usize, size_t alignment, bool *zero); +void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, + extent_t *extent); +void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, + extent_t *extent, size_t oldsize); +void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, + extent_t *extent, size_t oldsize); +ssize_t arena_dirty_decay_ms_get(arena_t *arena); +bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms); +ssize_t arena_muzzy_decay_ms_get(arena_t *arena); +bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms); +void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, + bool all); +void arena_reset(tsd_t *tsd, arena_t *arena); +void arena_destroy(tsd_t *tsd, arena_t *arena); +void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); +void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, + bool zero); + +typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *); +extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small; + +void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, + szind_t ind, bool zero); +void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache); +void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize); +void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, + bool slow_path); +void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, + extent_t *extent, void *ptr); +void arena_dalloc_small(tsdn_t *tsdn, void *ptr); +bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero); +void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, + size_t size, size_t alignment, bool zero, tcache_t *tcache); +dss_prec_t arena_dss_prec_get(arena_t *arena); +bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); +ssize_t arena_dirty_decay_ms_default_get(void); +bool arena_dirty_decay_ms_default_set(ssize_t decay_ms); +ssize_t arena_muzzy_decay_ms_default_get(void); +bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms); +bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, + size_t *old_limit, size_t *new_limit); +unsigned arena_nthreads_get(arena_t *arena, bool internal); +void arena_nthreads_inc(arena_t *arena, bool internal); +void arena_nthreads_dec(arena_t *arena, bool internal); +size_t arena_extent_sn_next(arena_t *arena); +arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); +void arena_boot(void); +void arena_prefork0(tsdn_t *tsdn, arena_t *arena); +void arena_prefork1(tsdn_t *tsdn, arena_t *arena); +void arena_prefork2(tsdn_t *tsdn, arena_t *arena); +void arena_prefork3(tsdn_t *tsdn, arena_t *arena); +void arena_prefork4(tsdn_t *tsdn, arena_t *arena); +void arena_prefork5(tsdn_t *tsdn, arena_t *arena); +void arena_prefork6(tsdn_t *tsdn, arena_t *arena); +void arena_prefork7(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); + +#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h new file mode 100644 index 0000000..9abf7f6 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_inlines_a.h @@ -0,0 +1,57 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H +#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H + +static inline unsigned +arena_ind_get(const arena_t *arena) { + return base_ind_get(arena->base); +} + +static inline void +arena_internal_add(arena_t *arena, size_t size) { + atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED); +} + +static inline void +arena_internal_sub(arena_t *arena, size_t size) { + atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED); +} + +static inline size_t +arena_internal_get(arena_t *arena) { + return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED); +} + +static inline bool +arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { + cassert(config_prof); + + if (likely(prof_interval == 0 || !prof_active_get_unlocked())) { + return false; + } + + return prof_accum_add(tsdn, &arena->prof_accum, accumbytes); +} + +static inline void +percpu_arena_update(tsd_t *tsd, unsigned cpu) { + assert(have_percpu_arena); + arena_t *oldarena = tsd_arena_get(tsd); + assert(oldarena != NULL); + unsigned oldind = arena_ind_get(oldarena); + + if (oldind != cpu) { + unsigned newind = cpu; + arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true); + assert(newarena != NULL); + + /* Set new arena/tcache associations. */ + arena_migrate(tsd, oldind, newind); + tcache_t *tcache = tcache_get(tsd); + if (tcache != NULL) { + tcache_arena_reassociate(tsd_tsdn(tsd), tcache, + newarena); + } + } +} + +#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h new file mode 100644 index 0000000..2b7e77e --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_inlines_b.h @@ -0,0 +1,354 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H +#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H + +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/ticker.h" + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { + cassert(config_prof); + assert(ptr != NULL); + + /* Static check. */ + if (alloc_ctx == NULL) { + const extent_t *extent = iealloc(tsdn, ptr); + if (unlikely(!extent_slab_get(extent))) { + return large_prof_tctx_get(tsdn, extent); + } + } else { + if (unlikely(!alloc_ctx->slab)) { + return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr)); + } + } + return (prof_tctx_t *)(uintptr_t)1U; +} + +JEMALLOC_ALWAYS_INLINE void +arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize, + alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + + /* Static check. */ + if (alloc_ctx == NULL) { + extent_t *extent = iealloc(tsdn, ptr); + if (unlikely(!extent_slab_get(extent))) { + large_prof_tctx_set(tsdn, extent, tctx); + } + } else { + if (unlikely(!alloc_ctx->slab)) { + large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx); + } + } +} + +static inline void +arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, UNUSED prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + + extent_t *extent = iealloc(tsdn, ptr); + assert(!extent_slab_get(extent)); + + large_prof_tctx_reset(tsdn, extent); +} + +JEMALLOC_ALWAYS_INLINE void +arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { + tsd_t *tsd; + ticker_t *decay_ticker; + + if (unlikely(tsdn_null(tsdn))) { + return; + } + tsd = tsdn_tsd(tsdn); + decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena)); + if (unlikely(decay_ticker == NULL)) { + return; + } + if (unlikely(ticker_ticks(decay_ticker, nticks))) { + arena_decay(tsdn, arena, false, false); + } +} + +JEMALLOC_ALWAYS_INLINE void +arena_decay_tick(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx); + + arena_decay_ticks(tsdn, arena, 1); +} + +JEMALLOC_ALWAYS_INLINE void * +arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, + tcache_t *tcache, bool slow_path) { + assert(!tsdn_null(tsdn) || tcache == NULL); + assert(size != 0); + + if (likely(tcache != NULL)) { + if (likely(size <= SMALL_MAXCLASS)) { + return tcache_alloc_small(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path); + } + if (likely(size <= tcache_maxclass)) { + return tcache_alloc_large(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path); + } + /* (size > tcache_maxclass) case falls through. */ + assert(size > tcache_maxclass); + } + + return arena_malloc_hard(tsdn, arena, size, ind, zero); +} + +JEMALLOC_ALWAYS_INLINE arena_t * +arena_aalloc(tsdn_t *tsdn, const void *ptr) { + return extent_arena_get(iealloc(tsdn, ptr)); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_salloc(tsdn_t *tsdn, const void *ptr) { + assert(ptr != NULL); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true); + assert(szind != NSIZES); + + return sz_index2size(szind); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_vsalloc(tsdn_t *tsdn, const void *ptr) { + /* + * Return 0 if ptr is not within an extent managed by jemalloc. This + * function has two extra costs relative to isalloc(): + * - The rtree calls cannot claim to be dependent lookups, which induces + * rtree lookup load dependencies. + * - The lookup may fail, so there is an extra branch to check for + * failure. + */ + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *extent; + szind_t szind; + if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, false, &extent, &szind)) { + return 0; + } + + if (extent == NULL) { + return 0; + } + assert(extent_state_get(extent) == extent_state_active); + /* Only slab members should be looked up via interior pointers. */ + assert(extent_addr_get(extent) == ptr || extent_slab_get(extent)); + + assert(szind != NSIZES); + + return sz_index2size(szind); +} + +static inline void +arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { + assert(ptr != NULL); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + szind_t szind; + bool slab; + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, + true, &szind, &slab); + + if (config_debug) { + extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, + rtree_ctx, (uintptr_t)ptr, true); + assert(szind == extent_szind_get(extent)); + assert(szind < NSIZES); + assert(slab == extent_slab_get(extent)); + } + + if (likely(slab)) { + /* Small allocation. */ + arena_dalloc_small(tsdn, ptr); + } else { + extent_t *extent = iealloc(tsdn, ptr); + large_dalloc(tsdn, extent); + } +} + +JEMALLOC_ALWAYS_INLINE void +arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, + alloc_ctx_t *alloc_ctx, bool slow_path) { + assert(!tsdn_null(tsdn) || tcache == NULL); + assert(ptr != NULL); + + if (unlikely(tcache == NULL)) { + arena_dalloc_no_tcache(tsdn, ptr); + return; + } + + szind_t szind; + bool slab; + rtree_ctx_t *rtree_ctx; + if (alloc_ctx != NULL) { + szind = alloc_ctx->szind; + slab = alloc_ctx->slab; + assert(szind != NSIZES); + } else { + rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &szind, &slab); + } + + if (config_debug) { + rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); + extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, + rtree_ctx, (uintptr_t)ptr, true); + assert(szind == extent_szind_get(extent)); + assert(szind < NSIZES); + assert(slab == extent_slab_get(extent)); + } + + if (likely(slab)) { + /* Small allocation. */ + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, + slow_path); + } else { + if (szind < nhbins) { + if (config_prof && unlikely(szind < NBINS)) { + arena_dalloc_promoted(tsdn, ptr, tcache, + slow_path); + } else { + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + szind, slow_path); + } + } else { + extent_t *extent = iealloc(tsdn, ptr); + large_dalloc(tsdn, extent); + } + } +} + +static inline void +arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) { + assert(ptr != NULL); + assert(size <= LARGE_MAXCLASS); + + szind_t szind; + bool slab; + if (!config_prof || !opt_prof) { + /* + * There is no risk of being confused by a promoted sampled + * object, so base szind and slab on the given size. + */ + szind = sz_size2index(size); + slab = (szind < NBINS); + } + + if ((config_prof && opt_prof) || config_debug) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, + &rtree_ctx_fallback); + + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &szind, &slab); + + assert(szind == sz_size2index(size)); + assert((config_prof && opt_prof) || slab == (szind < NBINS)); + + if (config_debug) { + extent_t *extent = rtree_extent_read(tsdn, + &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); + assert(szind == extent_szind_get(extent)); + assert(slab == extent_slab_get(extent)); + } + } + + if (likely(slab)) { + /* Small allocation. */ + arena_dalloc_small(tsdn, ptr); + } else { + extent_t *extent = iealloc(tsdn, ptr); + large_dalloc(tsdn, extent); + } +} + +JEMALLOC_ALWAYS_INLINE void +arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + alloc_ctx_t *alloc_ctx, bool slow_path) { + assert(!tsdn_null(tsdn) || tcache == NULL); + assert(ptr != NULL); + assert(size <= LARGE_MAXCLASS); + + if (unlikely(tcache == NULL)) { + arena_sdalloc_no_tcache(tsdn, ptr, size); + return; + } + + szind_t szind; + bool slab; + UNUSED alloc_ctx_t local_ctx; + if (config_prof && opt_prof) { + if (alloc_ctx == NULL) { + /* Uncommon case and should be a static check. */ + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, + &rtree_ctx_fallback); + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &local_ctx.szind, + &local_ctx.slab); + assert(local_ctx.szind == sz_size2index(size)); + alloc_ctx = &local_ctx; + } + slab = alloc_ctx->slab; + szind = alloc_ctx->szind; + } else { + /* + * There is no risk of being confused by a promoted sampled + * object, so base szind and slab on the given size. + */ + szind = sz_size2index(size); + slab = (szind < NBINS); + } + + if (config_debug) { + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &szind, &slab); + extent_t *extent = rtree_extent_read(tsdn, + &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); + assert(szind == extent_szind_get(extent)); + assert(slab == extent_slab_get(extent)); + } + + if (likely(slab)) { + /* Small allocation. */ + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, + slow_path); + } else { + if (szind < nhbins) { + if (config_prof && unlikely(szind < NBINS)) { + arena_dalloc_promoted(tsdn, ptr, tcache, + slow_path); + } else { + tcache_dalloc_large(tsdn_tsd(tsdn), + tcache, ptr, szind, slow_path); + } + } else { + extent_t *extent = iealloc(tsdn, ptr); + large_dalloc(tsdn, extent); + } + } +} + +#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_stats.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_stats.h new file mode 100644 index 0000000..5f3dca8 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_stats.h @@ -0,0 +1,237 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H +#define JEMALLOC_INTERNAL_ARENA_STATS_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/size_classes.h" + +/* + * In those architectures that support 64-bit atomics, we use atomic updates for + * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize + * externally. + */ +#ifdef JEMALLOC_ATOMIC_U64 +typedef atomic_u64_t arena_stats_u64_t; +#else +/* Must hold the arena stats mutex while reading atomically. */ +typedef uint64_t arena_stats_u64_t; +#endif + +typedef struct arena_stats_large_s arena_stats_large_t; +struct arena_stats_large_s { + /* + * Total number of allocation/deallocation requests served directly by + * the arena. + */ + arena_stats_u64_t nmalloc; + arena_stats_u64_t ndalloc; + + /* + * Number of allocation requests that correspond to this size class. + * This includes requests served by tcache, though tcache only + * periodically merges into this counter. + */ + arena_stats_u64_t nrequests; /* Partially derived. */ + + /* Current number of allocations of this size class. */ + size_t curlextents; /* Derived. */ +}; + +typedef struct arena_stats_decay_s arena_stats_decay_t; +struct arena_stats_decay_s { + /* Total number of purge sweeps. */ + arena_stats_u64_t npurge; + /* Total number of madvise calls made. */ + arena_stats_u64_t nmadvise; + /* Total number of pages purged. */ + arena_stats_u64_t purged; +}; + +/* + * Arena stats. Note that fields marked "derived" are not directly maintained + * within the arena code; rather their values are derived during stats merge + * requests. + */ +typedef struct arena_stats_s arena_stats_t; +struct arena_stats_s { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_t mtx; +#endif + + /* Number of bytes currently mapped, excluding retained memory. */ + atomic_zu_t mapped; /* Partially derived. */ + + /* + * Number of unused virtual memory bytes currently retained. Retained + * bytes are technically mapped (though always decommitted or purged), + * but they are excluded from the mapped statistic (above). + */ + atomic_zu_t retained; /* Derived. */ + + arena_stats_decay_t decay_dirty; + arena_stats_decay_t decay_muzzy; + + atomic_zu_t base; /* Derived. */ + atomic_zu_t internal; + atomic_zu_t resident; /* Derived. */ + atomic_zu_t metadata_thp; + + atomic_zu_t allocated_large; /* Derived. */ + arena_stats_u64_t nmalloc_large; /* Derived. */ + arena_stats_u64_t ndalloc_large; /* Derived. */ + arena_stats_u64_t nrequests_large; /* Derived. */ + + /* Number of bytes cached in tcache associated with this arena. */ + atomic_zu_t tcache_bytes; /* Derived. */ + + mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]; + + /* One element for each large size class. */ + arena_stats_large_t lstats[NSIZES - NBINS]; + + /* Arena uptime. */ + nstime_t uptime; +}; + +static inline bool +arena_stats_init(UNUSED tsdn_t *tsdn, arena_stats_t *arena_stats) { + if (config_debug) { + for (size_t i = 0; i < sizeof(arena_stats_t); i++) { + assert(((char *)arena_stats)[i] == 0); + } + } +#ifndef JEMALLOC_ATOMIC_U64 + if (malloc_mutex_init(&arena_stats->mtx, "arena_stats", + WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { + return true; + } +#endif + /* Memory is zeroed, so there is no need to clear stats. */ + return false; +} + +static inline void +arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_lock(tsdn, &arena_stats->mtx); +#endif +} + +static inline void +arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_unlock(tsdn, &arena_stats->mtx); +#endif +} + +static inline uint64_t +arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, + arena_stats_u64_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_u64(p, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + return *p; +#endif +} + +static inline void +arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, + arena_stats_u64_t *p, uint64_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + atomic_fetch_add_u64(p, x, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + *p += x; +#endif +} + +UNUSED static inline void +arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, + arena_stats_u64_t *p, uint64_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); + assert(r - x <= r); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + *p -= x; + assert(*p + x >= *p); +#endif +} + +/* + * Non-atomically sets *dst += src. *dst needs external synchronization. + * This lets us avoid the cost of a fetch_add when its unnecessary (note that + * the types here are atomic). + */ +static inline void +arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) { +#ifdef JEMALLOC_ATOMIC_U64 + uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); + atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED); +#else + *dst += src; +#endif +} + +static inline size_t +arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_zu(p, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + return atomic_load_zu(p, ATOMIC_RELAXED); +#endif +} + +static inline void +arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, + size_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + atomic_fetch_add_zu(p, x, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); + atomic_store_zu(p, cur + x, ATOMIC_RELAXED); +#endif +} + +static inline void +arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, + size_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); + assert(r - x <= r); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); + atomic_store_zu(p, cur - x, ATOMIC_RELAXED); +#endif +} + +/* Like the _u64 variant, needs an externally synchronized *dst. */ +static inline void +arena_stats_accum_zu(atomic_zu_t *dst, size_t src) { + size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); + atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED); +} + +static inline void +arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, + szind_t szind, uint64_t nrequests) { + arena_stats_lock(tsdn, arena_stats); + arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind - + NBINS].nrequests, nrequests); + arena_stats_unlock(tsdn, arena_stats); +} + +static inline void +arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) { + arena_stats_lock(tsdn, arena_stats); + arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size); + arena_stats_unlock(tsdn, arena_stats); +} + + +#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_structs_a.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_structs_a.h new file mode 100644 index 0000000..46aa77c --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_structs_a.h @@ -0,0 +1,11 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H +#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H + +#include "jemalloc/internal/bitmap.h" + +struct arena_slab_data_s { + /* Per region allocated/deallocated bitmap. */ + bitmap_t bitmap[BITMAP_GROUPS_MAX]; +}; + +#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_structs_b.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_structs_b.h new file mode 100644 index 0000000..38bc959 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_structs_b.h @@ -0,0 +1,229 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H +#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H + +#include "jemalloc/internal/arena_stats.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bin.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/ticker.h" + +struct arena_decay_s { + /* Synchronizes all non-atomic fields. */ + malloc_mutex_t mtx; + /* + * True if a thread is currently purging the extents associated with + * this decay structure. + */ + bool purging; + /* + * Approximate time in milliseconds from the creation of a set of unused + * dirty pages until an equivalent set of unused dirty pages is purged + * and/or reused. + */ + atomic_zd_t time_ms; + /* time / SMOOTHSTEP_NSTEPS. */ + nstime_t interval; + /* + * Time at which the current decay interval logically started. We do + * not actually advance to a new epoch until sometime after it starts + * because of scheduling and computation delays, and it is even possible + * to completely skip epochs. In all cases, during epoch advancement we + * merge all relevant activity into the most recently recorded epoch. + */ + nstime_t epoch; + /* Deadline randomness generator. */ + uint64_t jitter_state; + /* + * Deadline for current epoch. This is the sum of interval and per + * epoch jitter which is a uniform random variable in [0..interval). + * Epochs always advance by precise multiples of interval, but we + * randomize the deadline to reduce the likelihood of arenas purging in + * lockstep. + */ + nstime_t deadline; + /* + * Number of unpurged pages at beginning of current epoch. During epoch + * advancement we use the delta between arena->decay_*.nunpurged and + * extents_npages_get(&arena->extents_*) to determine how many dirty + * pages, if any, were generated. + */ + size_t nunpurged; + /* + * Trailing log of how many unused dirty pages were generated during + * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last + * element is the most recent epoch. Corresponding epoch times are + * relative to epoch. + */ + size_t backlog[SMOOTHSTEP_NSTEPS]; + + /* + * Pointer to associated stats. These stats are embedded directly in + * the arena's stats due to how stats structures are shared between the + * arena and ctl code. + * + * Synchronization: Same as associated arena's stats field. */ + arena_stats_decay_t *stats; + /* Peak number of pages in associated extents. Used for debug only. */ + uint64_t ceil_npages; +}; + +struct arena_s { + /* + * Number of threads currently assigned to this arena. Each thread has + * two distinct assignments, one for application-serving allocation, and + * the other for internal metadata allocation. Internal metadata must + * not be allocated from arenas explicitly created via the arenas.create + * mallctl, because the arena..reset mallctl indiscriminately + * discards all allocations for the affected arena. + * + * 0: Application allocation. + * 1: Internal metadata allocation. + * + * Synchronization: atomic. + */ + atomic_u_t nthreads[2]; + + /* + * When percpu_arena is enabled, to amortize the cost of reading / + * updating the current CPU id, track the most recent thread accessing + * this arena, and only read CPU if there is a mismatch. + */ + tsdn_t *last_thd; + + /* Synchronization: internal. */ + arena_stats_t stats; + + /* + * Lists of tcaches and cache_bin_array_descriptors for extant threads + * associated with this arena. Stats from these are merged + * incrementally, and at exit if opt_stats_print is enabled. + * + * Synchronization: tcache_ql_mtx. + */ + ql_head(tcache_t) tcache_ql; + ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql; + malloc_mutex_t tcache_ql_mtx; + + /* Synchronization: internal. */ + prof_accum_t prof_accum; + uint64_t prof_accumbytes; + + /* + * PRNG state for cache index randomization of large allocation base + * pointers. + * + * Synchronization: atomic. + */ + atomic_zu_t offset_state; + + /* + * Extent serial number generator state. + * + * Synchronization: atomic. + */ + atomic_zu_t extent_sn_next; + + /* + * Represents a dss_prec_t, but atomically. + * + * Synchronization: atomic. + */ + atomic_u_t dss_prec; + + /* + * Number of pages in active extents. + * + * Synchronization: atomic. + */ + atomic_zu_t nactive; + + /* + * Extant large allocations. + * + * Synchronization: large_mtx. + */ + extent_list_t large; + /* Synchronizes all large allocation/update/deallocation. */ + malloc_mutex_t large_mtx; + + /* + * Collections of extents that were previously allocated. These are + * used when allocating extents, in an attempt to re-use address space. + * + * Synchronization: internal. + */ + extents_t extents_dirty; + extents_t extents_muzzy; + extents_t extents_retained; + + /* + * Decay-based purging state, responsible for scheduling extent state + * transitions. + * + * Synchronization: internal. + */ + arena_decay_t decay_dirty; /* dirty --> muzzy */ + arena_decay_t decay_muzzy; /* muzzy --> retained */ + + /* + * Next extent size class in a growing series to use when satisfying a + * request via the extent hooks (only if opt_retain). This limits the + * number of disjoint virtual memory ranges so that extent merging can + * be effective even if multiple arenas' extent allocation requests are + * highly interleaved. + * + * retain_grow_limit is the max allowed size ind to expand (unless the + * required size is greater). Default is no limit, and controlled + * through mallctl only. + * + * Synchronization: extent_grow_mtx + */ + pszind_t extent_grow_next; + pszind_t retain_grow_limit; + malloc_mutex_t extent_grow_mtx; + + /* + * Available extent structures that were allocated via + * base_alloc_extent(). + * + * Synchronization: extent_avail_mtx. + */ + extent_tree_t extent_avail; + malloc_mutex_t extent_avail_mtx; + + /* + * bins is used to store heaps of free regions. + * + * Synchronization: internal. + */ + bin_t bins[NBINS]; + + /* + * Base allocator, from which arena metadata are allocated. + * + * Synchronization: internal. + */ + base_t *base; + /* Used to determine uptime. Read-only after initialization. */ + nstime_t create_time; +}; + +/* Used in conjunction with tsd for fast arena-related context lookup. */ +struct arena_tdata_s { + ticker_t decay_ticker; +}; + +/* Used to pass rtree lookup context down the path. */ +struct alloc_ctx_s { + szind_t szind; + bool slab; +}; + +#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_types.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_types.h new file mode 100644 index 0000000..70001b5 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/arena_types.h @@ -0,0 +1,43 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H +#define JEMALLOC_INTERNAL_ARENA_TYPES_H + +/* Maximum number of regions in one slab. */ +#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN) +#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS) + +/* Default decay times in milliseconds. */ +#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000) +#define MUZZY_DECAY_MS_DEFAULT ZD(10 * 1000) +/* Number of event ticks between time checks. */ +#define DECAY_NTICKS_PER_UPDATE 1000 + +typedef struct arena_slab_data_s arena_slab_data_t; +typedef struct arena_decay_s arena_decay_t; +typedef struct arena_s arena_t; +typedef struct arena_tdata_s arena_tdata_t; +typedef struct alloc_ctx_s alloc_ctx_t; + +typedef enum { + percpu_arena_mode_names_base = 0, /* Used for options processing. */ + + /* + * *_uninit are used only during bootstrapping, and must correspond + * to initialized variant plus percpu_arena_mode_enabled_base. + */ + percpu_arena_uninit = 0, + per_phycpu_arena_uninit = 1, + + /* All non-disabled modes must come after percpu_arena_disabled. */ + percpu_arena_disabled = 2, + + percpu_arena_mode_names_limit = 3, /* Used for options processing. */ + percpu_arena_mode_enabled_base = 3, + + percpu_arena = 3, + per_phycpu_arena = 4 /* Hyper threads share arena. */ +} percpu_arena_mode_t; + +#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base) +#define PERCPU_ARENA_DEFAULT percpu_arena_disabled + +#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/assert.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/assert.h new file mode 100644 index 0000000..be4d45b --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/assert.h @@ -0,0 +1,56 @@ +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/util.h" + +/* + * Define a custom assert() in order to reduce the chances of deadlock during + * assertion failure. + */ +#ifndef assert +#define assert(e) do { \ + if (unlikely(config_debug && !(e))) { \ + malloc_printf( \ + ": %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ +} while (0) +#endif + +#ifndef not_reached +#define not_reached() do { \ + if (config_debug) { \ + malloc_printf( \ + ": %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ + unreachable(); \ +} while (0) +#endif + +#ifndef not_implemented +#define not_implemented() do { \ + if (config_debug) { \ + malloc_printf(": %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) +#endif + +#ifndef assert_not_implemented +#define assert_not_implemented(e) do { \ + if (unlikely(config_debug && !(e))) { \ + not_implemented(); \ + } \ +} while (0) +#endif + +/* Use to assert a particular configuration, e.g., cassert(config_debug). */ +#ifndef cassert +#define cassert(c) do { \ + if (unlikely(!(c))) { \ + not_reached(); \ + } \ +} while (0) +#endif diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic.h new file mode 100644 index 0000000..adadb1a --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic.h @@ -0,0 +1,77 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_H +#define JEMALLOC_INTERNAL_ATOMIC_H + +#define ATOMIC_INLINE static inline + +#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) +# include "jemalloc/internal/atomic_gcc_atomic.h" +#elif defined(JEMALLOC_GCC_SYNC_ATOMICS) +# include "jemalloc/internal/atomic_gcc_sync.h" +#elif defined(_MSC_VER) +# include "jemalloc/internal/atomic_msvc.h" +#elif defined(JEMALLOC_C11_ATOMICS) +# include "jemalloc/internal/atomic_c11.h" +#else +# error "Don't have atomics implemented on this platform." +#endif + +/* + * This header gives more or less a backport of C11 atomics. The user can write + * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate + * counterparts of the C11 atomic functions for type, as so: + * JEMALLOC_GENERATE_ATOMICS(int *, pi, 3); + * and then write things like: + * int *some_ptr; + * atomic_pi_t atomic_ptr_to_int; + * atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED); + * int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL); + * assert(some_ptr == prev_value); + * and expect things to work in the obvious way. + * + * Also included (with naming differences to avoid conflicts with the standard + * library): + * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence). + * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT). + */ + +/* + * Pure convenience, so that we don't have to type "atomic_memory_order_" + * quite so often. + */ +#define ATOMIC_RELAXED atomic_memory_order_relaxed +#define ATOMIC_ACQUIRE atomic_memory_order_acquire +#define ATOMIC_RELEASE atomic_memory_order_release +#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel +#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst + +/* + * Not all platforms have 64-bit atomics. If we do, this #define exposes that + * fact. + */ +#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) +# define JEMALLOC_ATOMIC_U64 +#endif + +JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR) + +/* + * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only + * platform that actually needs to know the size, MSVC. + */ +JEMALLOC_GENERATE_ATOMICS(bool, b, 0) + +JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT) + +JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR) + +JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR) + +JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2) + +#ifdef JEMALLOC_ATOMIC_U64 +JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3) +#endif + +#undef ATOMIC_INLINE + +#endif /* JEMALLOC_INTERNAL_ATOMIC_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_c11.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_c11.h new file mode 100644 index 0000000..a5f9313 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_c11.h @@ -0,0 +1,97 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H +#define JEMALLOC_INTERNAL_ATOMIC_C11_H + +#include + +#define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__) + +#define atomic_memory_order_t memory_order +#define atomic_memory_order_relaxed memory_order_relaxed +#define atomic_memory_order_acquire memory_order_acquire +#define atomic_memory_order_release memory_order_release +#define atomic_memory_order_acq_rel memory_order_acq_rel +#define atomic_memory_order_seq_cst memory_order_seq_cst + +#define atomic_fence atomic_thread_fence + +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +typedef _Atomic(type) atomic_##short_type##_t; \ + \ +ATOMIC_INLINE type \ +atomic_load_##short_type(const atomic_##short_type##_t *a, \ + atomic_memory_order_t mo) { \ + /* \ + * A strict interpretation of the C standard prevents \ + * atomic_load from taking a const argument, but it's \ + * convenient for our purposes. This cast is a workaround. \ + */ \ + atomic_##short_type##_t* a_nonconst = \ + (atomic_##short_type##_t*)a; \ + return atomic_load_explicit(a_nonconst, mo); \ +} \ + \ +ATOMIC_INLINE void \ +atomic_store_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + atomic_store_explicit(a, val, mo); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return atomic_exchange_explicit(a, val, mo); \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return atomic_compare_exchange_weak_explicit(a, expected, \ + desired, success_mo, failure_mo); \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return atomic_compare_exchange_strong_explicit(a, expected, \ + desired, success_mo, failure_mo); \ +} + +/* + * Integral types have some special operations available that non-integral ones + * lack. + */ +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ + \ +ATOMIC_INLINE type \ +atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_add_explicit(a, val, mo); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_sub_explicit(a, val, mo); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_and_explicit(a, val, mo); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_or_explicit(a, val, mo); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_xor_explicit(a, val, mo); \ +} + +#endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h new file mode 100644 index 0000000..6b73a14 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h @@ -0,0 +1,127 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H +#define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H + +#include "jemalloc/internal/assert.h" + +#define ATOMIC_INIT(...) {__VA_ARGS__} + +typedef enum { + atomic_memory_order_relaxed, + atomic_memory_order_acquire, + atomic_memory_order_release, + atomic_memory_order_acq_rel, + atomic_memory_order_seq_cst +} atomic_memory_order_t; + +ATOMIC_INLINE int +atomic_enum_to_builtin(atomic_memory_order_t mo) { + switch (mo) { + case atomic_memory_order_relaxed: + return __ATOMIC_RELAXED; + case atomic_memory_order_acquire: + return __ATOMIC_ACQUIRE; + case atomic_memory_order_release: + return __ATOMIC_RELEASE; + case atomic_memory_order_acq_rel: + return __ATOMIC_ACQ_REL; + case atomic_memory_order_seq_cst: + return __ATOMIC_SEQ_CST; + } + /* Can't happen; the switch is exhaustive. */ + not_reached(); +} + +ATOMIC_INLINE void +atomic_fence(atomic_memory_order_t mo) { + __atomic_thread_fence(atomic_enum_to_builtin(mo)); +} + +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +typedef struct { \ + type repr; \ +} atomic_##short_type##_t; \ + \ +ATOMIC_INLINE type \ +atomic_load_##short_type(const atomic_##short_type##_t *a, \ + atomic_memory_order_t mo) { \ + type result; \ + __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \ + return result; \ +} \ + \ +ATOMIC_INLINE void \ +atomic_store_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + type result; \ + __atomic_exchange(&a->repr, &val, &result, \ + atomic_enum_to_builtin(mo)); \ + return result; \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return __atomic_compare_exchange(&a->repr, expected, &desired, \ + true, atomic_enum_to_builtin(success_mo), \ + atomic_enum_to_builtin(failure_mo)); \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return __atomic_compare_exchange(&a->repr, expected, &desired, \ + false, \ + atomic_enum_to_builtin(success_mo), \ + atomic_enum_to_builtin(failure_mo)); \ +} + + +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ + \ +ATOMIC_INLINE type \ +atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_add(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_sub(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_and(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_or(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_xor(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} + +#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h new file mode 100644 index 0000000..30846e4 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h @@ -0,0 +1,191 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H +#define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H + +#define ATOMIC_INIT(...) {__VA_ARGS__} + +typedef enum { + atomic_memory_order_relaxed, + atomic_memory_order_acquire, + atomic_memory_order_release, + atomic_memory_order_acq_rel, + atomic_memory_order_seq_cst +} atomic_memory_order_t; + +ATOMIC_INLINE void +atomic_fence(atomic_memory_order_t mo) { + /* Easy cases first: no barrier, and full barrier. */ + if (mo == atomic_memory_order_relaxed) { + asm volatile("" ::: "memory"); + return; + } + if (mo == atomic_memory_order_seq_cst) { + asm volatile("" ::: "memory"); + __sync_synchronize(); + asm volatile("" ::: "memory"); + return; + } + asm volatile("" ::: "memory"); +# if defined(__i386__) || defined(__x86_64__) + /* This is implicit on x86. */ +# elif defined(__ppc__) + asm volatile("lwsync"); +# elif defined(__sparc__) && defined(__arch64__) + if (mo == atomic_memory_order_acquire) { + asm volatile("membar #LoadLoad | #LoadStore"); + } else if (mo == atomic_memory_order_release) { + asm volatile("membar #LoadStore | #StoreStore"); + } else { + asm volatile("membar #LoadLoad | #LoadStore | #StoreStore"); + } +# else + __sync_synchronize(); +# endif + asm volatile("" ::: "memory"); +} + +/* + * A correct implementation of seq_cst loads and stores on weakly ordered + * architectures could do either of the following: + * 1. store() is weak-fence -> store -> strong fence, load() is load -> + * strong-fence. + * 2. store() is strong-fence -> store, load() is strong-fence -> load -> + * weak-fence. + * The tricky thing is, load() and store() above can be the load or store + * portions of a gcc __sync builtin, so we have to follow GCC's lead, which + * means going with strategy 2. + * On strongly ordered architectures, the natural strategy is to stick a strong + * fence after seq_cst stores, and have naked loads. So we want the strong + * fences in different places on different architectures. + * atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to + * accomplish this. + */ + +ATOMIC_INLINE void +atomic_pre_sc_load_fence() { +# if defined(__i386__) || defined(__x86_64__) || \ + (defined(__sparc__) && defined(__arch64__)) + atomic_fence(atomic_memory_order_relaxed); +# else + atomic_fence(atomic_memory_order_seq_cst); +# endif +} + +ATOMIC_INLINE void +atomic_post_sc_store_fence() { +# if defined(__i386__) || defined(__x86_64__) || \ + (defined(__sparc__) && defined(__arch64__)) + atomic_fence(atomic_memory_order_seq_cst); +# else + atomic_fence(atomic_memory_order_relaxed); +# endif + +} + +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +typedef struct { \ + type volatile repr; \ +} atomic_##short_type##_t; \ + \ +ATOMIC_INLINE type \ +atomic_load_##short_type(const atomic_##short_type##_t *a, \ + atomic_memory_order_t mo) { \ + if (mo == atomic_memory_order_seq_cst) { \ + atomic_pre_sc_load_fence(); \ + } \ + type result = a->repr; \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_acquire); \ + } \ + return result; \ +} \ + \ +ATOMIC_INLINE void \ +atomic_store_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_release); \ + } \ + a->repr = val; \ + if (mo == atomic_memory_order_seq_cst) { \ + atomic_post_sc_store_fence(); \ + } \ +} \ + \ +ATOMIC_INLINE type \ +atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + /* \ + * Because of FreeBSD, we care about gcc 4.2, which doesn't have\ + * an atomic exchange builtin. We fake it with a CAS loop. \ + */ \ + while (true) { \ + type old = a->repr; \ + if (__sync_bool_compare_and_swap(&a->repr, old, val)) { \ + return old; \ + } \ + } \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ + desired); \ + if (prev == *expected) { \ + return true; \ + } else { \ + *expected = prev; \ + return false; \ + } \ +} \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ + desired); \ + if (prev == *expected) { \ + return true; \ + } else { \ + *expected = prev; \ + return false; \ + } \ +} + +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ + \ +ATOMIC_INLINE type \ +atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_add(&a->repr, val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_sub(&a->repr, val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_and(&a->repr, val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_or(&a->repr, val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_xor(&a->repr, val); \ +} + +#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_msvc.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_msvc.h new file mode 100644 index 0000000..67057ce --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/atomic_msvc.h @@ -0,0 +1,158 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H +#define JEMALLOC_INTERNAL_ATOMIC_MSVC_H + +#define ATOMIC_INIT(...) {__VA_ARGS__} + +typedef enum { + atomic_memory_order_relaxed, + atomic_memory_order_acquire, + atomic_memory_order_release, + atomic_memory_order_acq_rel, + atomic_memory_order_seq_cst +} atomic_memory_order_t; + +typedef char atomic_repr_0_t; +typedef short atomic_repr_1_t; +typedef long atomic_repr_2_t; +typedef __int64 atomic_repr_3_t; + +ATOMIC_INLINE void +atomic_fence(atomic_memory_order_t mo) { + _ReadWriteBarrier(); +# if defined(_M_ARM) || defined(_M_ARM64) + /* ARM needs a barrier for everything but relaxed. */ + if (mo != atomic_memory_order_relaxed) { + MemoryBarrier(); + } +# elif defined(_M_IX86) || defined (_M_X64) + /* x86 needs a barrier only for seq_cst. */ + if (mo == atomic_memory_order_seq_cst) { + MemoryBarrier(); + } +# else +# error "Don't know how to create atomics for this platform for MSVC." +# endif + _ReadWriteBarrier(); +} + +#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t + +#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b) +#define ATOMIC_RAW_CONCAT(a, b) a ## b + +#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \ + base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size)) + +#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \ + ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size) + +#define ATOMIC_INTERLOCKED_SUFFIX_0 8 +#define ATOMIC_INTERLOCKED_SUFFIX_1 16 +#define ATOMIC_INTERLOCKED_SUFFIX_2 +#define ATOMIC_INTERLOCKED_SUFFIX_3 64 + +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ +typedef struct { \ + ATOMIC_INTERLOCKED_REPR(lg_size) repr; \ +} atomic_##short_type##_t; \ + \ +ATOMIC_INLINE type \ +atomic_load_##short_type(const atomic_##short_type##_t *a, \ + atomic_memory_order_t mo) { \ + ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_acquire); \ + } \ + return (type) ret; \ +} \ + \ +ATOMIC_INLINE void \ +atomic_store_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_release); \ + } \ + a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \ + if (mo == atomic_memory_order_seq_cst) { \ + atomic_fence(atomic_memory_order_seq_cst); \ + } \ +} \ + \ +ATOMIC_INLINE type \ +atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \ + lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + ATOMIC_INTERLOCKED_REPR(lg_size) e = \ + (ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \ + ATOMIC_INTERLOCKED_REPR(lg_size) d = \ + (ATOMIC_INTERLOCKED_REPR(lg_size))desired; \ + ATOMIC_INTERLOCKED_REPR(lg_size) old = \ + ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \ + lg_size)(&a->repr, d, e); \ + if (old == e) { \ + return true; \ + } else { \ + *expected = (type)old; \ + return false; \ + } \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + /* We implement the weak version with strong semantics. */ \ + return atomic_compare_exchange_weak_##short_type(a, expected, \ + desired, success_mo, failure_mo); \ +} + + +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \ +JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ + \ +ATOMIC_INLINE type \ +atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \ + lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + /* \ + * MSVC warns on negation of unsigned operands, but for us it \ + * gives exactly the right semantics (MAX_TYPE + 1 - operand). \ + */ \ + __pragma(warning(push)) \ + __pragma(warning(disable: 4146)) \ + return atomic_fetch_add_##short_type(a, -val, mo); \ + __pragma(warning(pop)) \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \ + &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \ + &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \ + &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} + +#endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/background_thread_externs.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/background_thread_externs.h new file mode 100644 index 0000000..3209aa4 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/background_thread_externs.h @@ -0,0 +1,33 @@ +#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H +#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H + +extern bool opt_background_thread; +extern size_t opt_max_background_threads; +extern malloc_mutex_t background_thread_lock; +extern atomic_b_t background_thread_enabled_state; +extern size_t n_background_threads; +extern size_t max_background_threads; +extern background_thread_info_t *background_thread_info; +extern bool can_enable_background_thread; + +bool background_thread_create(tsd_t *tsd, unsigned arena_ind); +bool background_threads_enable(tsd_t *tsd); +bool background_threads_disable(tsd_t *tsd); +void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, size_t npages_new); +void background_thread_prefork0(tsdn_t *tsdn); +void background_thread_prefork1(tsdn_t *tsdn); +void background_thread_postfork_parent(tsdn_t *tsdn); +void background_thread_postfork_child(tsdn_t *tsdn); +bool background_thread_stats_read(tsdn_t *tsdn, + background_thread_stats_t *stats); +void background_thread_ctl_init(tsdn_t *tsdn); + +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER +extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *, + void *(*)(void *), void *__restrict); +#endif +bool background_thread_boot0(void); +bool background_thread_boot1(tsdn_t *tsdn); + +#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h new file mode 100644 index 0000000..ef50231 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/background_thread_inlines.h @@ -0,0 +1,57 @@ +#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H +#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H + +JEMALLOC_ALWAYS_INLINE bool +background_thread_enabled(void) { + return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED); +} + +JEMALLOC_ALWAYS_INLINE void +background_thread_enabled_set(tsdn_t *tsdn, bool state) { + malloc_mutex_assert_owner(tsdn, &background_thread_lock); + atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED); +} + +JEMALLOC_ALWAYS_INLINE background_thread_info_t * +arena_background_thread_info_get(arena_t *arena) { + unsigned arena_ind = arena_ind_get(arena); + return &background_thread_info[arena_ind % ncpus]; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +background_thread_wakeup_time_get(background_thread_info_t *info) { + uint64_t next_wakeup = nstime_ns(&info->next_wakeup); + assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) == + (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP)); + return next_wakeup; +} + +JEMALLOC_ALWAYS_INLINE void +background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info, + uint64_t wakeup_time) { + malloc_mutex_assert_owner(tsdn, &info->mtx); + atomic_store_b(&info->indefinite_sleep, + wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE); + nstime_init(&info->next_wakeup, wakeup_time); +} + +JEMALLOC_ALWAYS_INLINE bool +background_thread_indefinite_sleep(background_thread_info_t *info) { + return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE); +} + +JEMALLOC_ALWAYS_INLINE void +arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena, + bool is_background_thread) { + if (!background_thread_enabled() || is_background_thread) { + return; + } + background_thread_info_t *info = + arena_background_thread_info_get(arena); + if (background_thread_indefinite_sleep(info)) { + background_thread_interval_check(tsdn, arena, + &arena->decay_dirty, 0); + } +} + +#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/background_thread_structs.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/background_thread_structs.h new file mode 100644 index 0000000..c1107df --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/background_thread_structs.h @@ -0,0 +1,53 @@ +#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H +#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H + +/* This file really combines "structs" and "types", but only transitionally. */ + +#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK) +# define JEMALLOC_PTHREAD_CREATE_WRAPPER +#endif + +#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX +#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT + +typedef enum { + background_thread_stopped, + background_thread_started, + /* Thread waits on the global lock when paused (for arena_reset). */ + background_thread_paused, +} background_thread_state_t; + +struct background_thread_info_s { +#ifdef JEMALLOC_BACKGROUND_THREAD + /* Background thread is pthread specific. */ + pthread_t thread; + pthread_cond_t cond; +#endif + malloc_mutex_t mtx; + background_thread_state_t state; + /* When true, it means no wakeup scheduled. */ + atomic_b_t indefinite_sleep; + /* Next scheduled wakeup time (absolute time in ns). */ + nstime_t next_wakeup; + /* + * Since the last background thread run, newly added number of pages + * that need to be purged by the next wakeup. This is adjusted on + * epoch advance, and is used to determine whether we should signal the + * background thread to wake up earlier. + */ + size_t npages_to_purge_new; + /* Stats: total number of runs since started. */ + uint64_t tot_n_runs; + /* Stats: total sleep time since started. */ + nstime_t tot_sleep_time; +}; +typedef struct background_thread_info_s background_thread_info_t; + +struct background_thread_stats_s { + size_t num_threads; + uint64_t num_runs; + nstime_t run_interval; +}; +typedef struct background_thread_stats_s background_thread_stats_t; + +#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/base.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/base.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base.h diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_externs.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_externs.h new file mode 100644 index 0000000..7b705c9 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_externs.h @@ -0,0 +1,22 @@ +#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H +#define JEMALLOC_INTERNAL_BASE_EXTERNS_H + +extern metadata_thp_mode_t opt_metadata_thp; +extern const char *metadata_thp_mode_names[]; + +base_t *b0get(void); +base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); +void base_delete(tsdn_t *tsdn, base_t *base); +extent_hooks_t *base_extent_hooks_get(base_t *base); +extent_hooks_t *base_extent_hooks_set(base_t *base, + extent_hooks_t *extent_hooks); +void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment); +extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base); +void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, + size_t *resident, size_t *mapped, size_t *n_thp); +void base_prefork(tsdn_t *tsdn, base_t *base); +void base_postfork_parent(tsdn_t *tsdn, base_t *base); +void base_postfork_child(tsdn_t *tsdn, base_t *base); +bool base_boot(tsdn_t *tsdn); + +#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_inlines.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_inlines.h new file mode 100644 index 0000000..aec0e2e --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_inlines.h @@ -0,0 +1,13 @@ +#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H +#define JEMALLOC_INTERNAL_BASE_INLINES_H + +static inline unsigned +base_ind_get(const base_t *base) { + return base->ind; +} + +static inline bool +metadata_thp_enabled(void) { + return (opt_metadata_thp != metadata_thp_disabled); +} +#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_structs.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_structs.h new file mode 100644 index 0000000..2102247 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_structs.h @@ -0,0 +1,59 @@ +#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H +#define JEMALLOC_INTERNAL_BASE_STRUCTS_H + +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/size_classes.h" + +/* Embedded at the beginning of every block of base-managed virtual memory. */ +struct base_block_s { + /* Total size of block's virtual memory mapping. */ + size_t size; + + /* Next block in list of base's blocks. */ + base_block_t *next; + + /* Tracks unused trailing space. */ + extent_t extent; +}; + +struct base_s { + /* Associated arena's index within the arenas array. */ + unsigned ind; + + /* + * User-configurable extent hook functions. Points to an + * extent_hooks_t. + */ + atomic_p_t extent_hooks; + + /* Protects base_alloc() and base_stats_get() operations. */ + malloc_mutex_t mtx; + + /* Using THP when true (metadata_thp auto mode). */ + bool auto_thp_switched; + /* + * Most recent size class in the series of increasingly large base + * extents. Logarithmic spacing between subsequent allocations ensures + * that the total number of distinct mappings remains small. + */ + pszind_t pind_last; + + /* Serial number generation state. */ + size_t extent_sn_next; + + /* Chain of all blocks associated with base. */ + base_block_t *blocks; + + /* Heap of extents that track unused trailing space within blocks. */ + extent_heap_t avail[NSIZES]; + + /* Stats, only maintained if config_stats. */ + size_t allocated; + size_t resident; + size_t mapped; + /* Number of THP regions touched. */ + size_t n_thp; +}; + +#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_types.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_types.h new file mode 100644 index 0000000..b6db77d --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/base_types.h @@ -0,0 +1,33 @@ +#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H +#define JEMALLOC_INTERNAL_BASE_TYPES_H + +typedef struct base_block_s base_block_t; +typedef struct base_s base_t; + +#define METADATA_THP_DEFAULT metadata_thp_disabled + +/* + * In auto mode, arenas switch to huge pages for the base allocator on the + * second base block. a0 switches to thp on the 5th block (after 20 megabytes + * of metadata), since more metadata (e.g. rtree nodes) come from a0's base. + */ + +#define BASE_AUTO_THP_THRESHOLD 2 +#define BASE_AUTO_THP_THRESHOLD_A0 5 + +typedef enum { + metadata_thp_disabled = 0, + /* + * Lazily enable hugepage for metadata. To avoid high RSS caused by THP + * + low usage arena (i.e. THP becomes a significant percentage), the + * "auto" option only starts using THP after a base allocator used up + * the first THP region. Starting from the second hugepage (in a single + * arena), "auto" behaves the same as "always", i.e. madvise hugepage + * right away. + */ + metadata_thp_auto = 1, + metadata_thp_always = 2, + metadata_thp_mode_limit = 3 +} metadata_thp_mode_t; + +#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bin.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bin.h new file mode 100644 index 0000000..9b416ad --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bin.h @@ -0,0 +1,106 @@ +#ifndef JEMALLOC_INTERNAL_BIN_H +#define JEMALLOC_INTERNAL_BIN_H + +#include "jemalloc/internal/extent_types.h" +#include "jemalloc/internal/extent_structs.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/bin_stats.h" + +/* + * A bin contains a set of extents that are currently being used for slab + * allocations. + */ + +/* + * Read-only information associated with each element of arena_t's bins array + * is stored separately, partly to reduce memory usage (only one copy, rather + * than one per arena), but mainly to avoid false cacheline sharing. + * + * Each slab has the following layout: + * + * /--------------------\ + * | region 0 | + * |--------------------| + * | region 1 | + * |--------------------| + * | ... | + * | ... | + * | ... | + * |--------------------| + * | region nregs-1 | + * \--------------------/ + */ +typedef struct bin_info_s bin_info_t; +struct bin_info_s { + /* Size of regions in a slab for this bin's size class. */ + size_t reg_size; + + /* Total size of a slab for this bin's size class. */ + size_t slab_size; + + /* Total number of regions in a slab for this bin's size class. */ + uint32_t nregs; + + /* + * Metadata used to manipulate bitmaps for slabs associated with this + * bin. + */ + bitmap_info_t bitmap_info; +}; + +extern const bin_info_t bin_infos[NBINS]; + + +typedef struct bin_s bin_t; +struct bin_s { + /* All operations on bin_t fields require lock ownership. */ + malloc_mutex_t lock; + + /* + * Current slab being used to service allocations of this bin's size + * class. slabcur is independent of slabs_{nonfull,full}; whenever + * slabcur is reassigned, the previous slab must be deallocated or + * inserted into slabs_{nonfull,full}. + */ + extent_t *slabcur; + + /* + * Heap of non-full slabs. This heap is used to assure that new + * allocations come from the non-full slab that is oldest/lowest in + * memory. + */ + extent_heap_t slabs_nonfull; + + /* List used to track full slabs. */ + extent_list_t slabs_full; + + /* Bin statistics. */ + bin_stats_t stats; +}; + +/* Initializes a bin to empty. Returns true on error. */ +bool bin_init(bin_t *bin); + +/* Forking. */ +void bin_prefork(tsdn_t *tsdn, bin_t *bin); +void bin_postfork_parent(tsdn_t *tsdn, bin_t *bin); +void bin_postfork_child(tsdn_t *tsdn, bin_t *bin); + +/* Stats. */ +static inline void +bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) { + malloc_mutex_lock(tsdn, &bin->lock); + malloc_mutex_prof_read(tsdn, &dst_bin_stats->mutex_data, &bin->lock); + dst_bin_stats->nmalloc += bin->stats.nmalloc; + dst_bin_stats->ndalloc += bin->stats.ndalloc; + dst_bin_stats->nrequests += bin->stats.nrequests; + dst_bin_stats->curregs += bin->stats.curregs; + dst_bin_stats->nfills += bin->stats.nfills; + dst_bin_stats->nflushes += bin->stats.nflushes; + dst_bin_stats->nslabs += bin->stats.nslabs; + dst_bin_stats->reslabs += bin->stats.reslabs; + dst_bin_stats->curslabs += bin->stats.curslabs; + malloc_mutex_unlock(tsdn, &bin->lock); +} + +#endif /* JEMALLOC_INTERNAL_BIN_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bin_stats.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bin_stats.h new file mode 100644 index 0000000..86e673e --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bin_stats.h @@ -0,0 +1,51 @@ +#ifndef JEMALLOC_INTERNAL_BIN_STATS_H +#define JEMALLOC_INTERNAL_BIN_STATS_H + +#include "jemalloc/internal/mutex_prof.h" + +typedef struct bin_stats_s bin_stats_t; +struct bin_stats_s { + /* + * Total number of allocation/deallocation requests served directly by + * the bin. Note that tcache may allocate an object, then recycle it + * many times, resulting many increments to nrequests, but only one + * each to nmalloc and ndalloc. + */ + uint64_t nmalloc; + uint64_t ndalloc; + + /* + * Number of allocation requests that correspond to the size of this + * bin. This includes requests served by tcache, though tcache only + * periodically merges into this counter. + */ + uint64_t nrequests; + + /* + * Current number of regions of this size class, including regions + * currently cached by tcache. + */ + size_t curregs; + + /* Number of tcache fills from this bin. */ + uint64_t nfills; + + /* Number of tcache flushes to this bin. */ + uint64_t nflushes; + + /* Total number of slabs created for this bin's size class. */ + uint64_t nslabs; + + /* + * Total number of slabs reused by extracting them from the slabs heap + * for this bin's size class. + */ + uint64_t reslabs; + + /* Current number of slabs in this bin. */ + size_t curslabs; + + mutex_prof_data_t mutex_data; +}; + +#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bit_util.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bit_util.h new file mode 100644 index 0000000..8d078a8 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bit_util.h @@ -0,0 +1,165 @@ +#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H +#define JEMALLOC_INTERNAL_BIT_UTIL_H + +#include "jemalloc/internal/assert.h" + +#define BIT_UTIL_INLINE static inline + +/* Sanity check. */ +#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ + || !defined(JEMALLOC_INTERNAL_FFS) +# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure +#endif + + +BIT_UTIL_INLINE unsigned +ffs_llu(unsigned long long bitmap) { + return JEMALLOC_INTERNAL_FFSLL(bitmap); +} + +BIT_UTIL_INLINE unsigned +ffs_lu(unsigned long bitmap) { + return JEMALLOC_INTERNAL_FFSL(bitmap); +} + +BIT_UTIL_INLINE unsigned +ffs_u(unsigned bitmap) { + return JEMALLOC_INTERNAL_FFS(bitmap); +} + +BIT_UTIL_INLINE unsigned +ffs_zu(size_t bitmap) { +#if LG_SIZEOF_PTR == LG_SIZEOF_INT + return ffs_u(bitmap); +#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG + return ffs_lu(bitmap); +#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG + return ffs_llu(bitmap); +#else +#error No implementation for size_t ffs() +#endif +} + +BIT_UTIL_INLINE unsigned +ffs_u64(uint64_t bitmap) { +#if LG_SIZEOF_LONG == 3 + return ffs_lu(bitmap); +#elif LG_SIZEOF_LONG_LONG == 3 + return ffs_llu(bitmap); +#else +#error No implementation for 64-bit ffs() +#endif +} + +BIT_UTIL_INLINE unsigned +ffs_u32(uint32_t bitmap) { +#if LG_SIZEOF_INT == 2 + return ffs_u(bitmap); +#else +#error No implementation for 32-bit ffs() +#endif + return ffs_u(bitmap); +} + +BIT_UTIL_INLINE uint64_t +pow2_ceil_u64(uint64_t x) { + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x |= x >> 32; + x++; + return x; +} + +BIT_UTIL_INLINE uint32_t +pow2_ceil_u32(uint32_t x) { + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x++; + return x; +} + +/* Compute the smallest power of 2 that is >= x. */ +BIT_UTIL_INLINE size_t +pow2_ceil_zu(size_t x) { +#if (LG_SIZEOF_PTR == 3) + return pow2_ceil_u64(x); +#else + return pow2_ceil_u32(x); +#endif +} + +#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) +BIT_UTIL_INLINE unsigned +lg_floor(size_t x) { + size_t ret; + assert(x != 0); + + asm ("bsr %1, %0" + : "=r"(ret) // Outputs. + : "r"(x) // Inputs. + ); + assert(ret < UINT_MAX); + return (unsigned)ret; +} +#elif (defined(_MSC_VER)) +BIT_UTIL_INLINE unsigned +lg_floor(size_t x) { + unsigned long ret; + + assert(x != 0); + +#if (LG_SIZEOF_PTR == 3) + _BitScanReverse64(&ret, x); +#elif (LG_SIZEOF_PTR == 2) + _BitScanReverse(&ret, x); +#else +# error "Unsupported type size for lg_floor()" +#endif + assert(ret < UINT_MAX); + return (unsigned)ret; +} +#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) +BIT_UTIL_INLINE unsigned +lg_floor(size_t x) { + assert(x != 0); + +#if (LG_SIZEOF_PTR == LG_SIZEOF_INT) + return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x); +#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) + return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x); +#else +# error "Unsupported type size for lg_floor()" +#endif +} +#else +BIT_UTIL_INLINE unsigned +lg_floor(size_t x) { + assert(x != 0); + + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); +#if (LG_SIZEOF_PTR == 3) + x |= (x >> 32); +#endif + if (x == SIZE_T_MAX) { + return (8 << LG_SIZEOF_PTR) - 1; + } + x++; + return ffs_zu(x) - 2; +} +#endif + +#undef BIT_UTIL_INLINE + +#endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bitmap.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bitmap.h new file mode 100644 index 0000000..ac99029 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/bitmap.h @@ -0,0 +1,369 @@ +#ifndef JEMALLOC_INTERNAL_BITMAP_H +#define JEMALLOC_INTERNAL_BITMAP_H + +#include "jemalloc/internal/arena_types.h" +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/size_classes.h" + +typedef unsigned long bitmap_t; +#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG + +/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ +#if LG_SLAB_MAXREGS > LG_CEIL_NSIZES +/* Maximum bitmap bit count is determined by maximum regions per slab. */ +# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS +#else +/* Maximum bitmap bit count is determined by number of extent size classes. */ +# define LG_BITMAP_MAXBITS LG_CEIL_NSIZES +#endif +#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) + +/* Number of bits per group. */ +#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) +#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS) +#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) + +/* + * Do some analysis on how big the bitmap is before we use a tree. For a brute + * force linear search, if we would have to call ffs_lu() more than 2^3 times, + * use a tree instead. + */ +#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 +# define BITMAP_USE_TREE +#endif + +/* Number of groups required to store a given number of bits. */ +#define BITMAP_BITS2GROUPS(nbits) \ + (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) + +/* + * Number of groups required at a particular level for a given number of bits. + */ +#define BITMAP_GROUPS_L0(nbits) \ + BITMAP_BITS2GROUPS(nbits) +#define BITMAP_GROUPS_L1(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) +#define BITMAP_GROUPS_L2(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) +#define BITMAP_GROUPS_L3(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ + BITMAP_BITS2GROUPS((nbits))))) +#define BITMAP_GROUPS_L4(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))))) + +/* + * Assuming the number of levels, number of groups required for a given number + * of bits. + */ +#define BITMAP_GROUPS_1_LEVEL(nbits) \ + BITMAP_GROUPS_L0(nbits) +#define BITMAP_GROUPS_2_LEVEL(nbits) \ + (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) +#define BITMAP_GROUPS_3_LEVEL(nbits) \ + (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) +#define BITMAP_GROUPS_4_LEVEL(nbits) \ + (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) +#define BITMAP_GROUPS_5_LEVEL(nbits) \ + (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits)) + +/* + * Maximum number of groups required to support LG_BITMAP_MAXBITS. + */ +#ifdef BITMAP_USE_TREE + +#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS) +#else +# error "Unsupported bitmap size" +#endif + +/* + * Maximum number of levels possible. This could be statically computed based + * on LG_BITMAP_MAXBITS: + * + * #define BITMAP_MAX_LEVELS \ + * (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ + * + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) + * + * However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so + * instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the + * various cascading macros. The only additional cost this incurs is some + * unused trailing entries in bitmap_info_t structures; the bitmaps themselves + * are not impacted. + */ +#define BITMAP_MAX_LEVELS 5 + +#define BITMAP_INFO_INITIALIZER(nbits) { \ + /* nbits. */ \ + nbits, \ + /* nlevels. */ \ + (BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \ + (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \ + (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \ + (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \ + /* levels. */ \ + { \ + {0}, \ + {BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \ + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \ + BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \ + BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \ + + BITMAP_GROUPS_L0(nbits)} \ + } \ +} + +#else /* BITMAP_USE_TREE */ + +#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits) +#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) + +#define BITMAP_INFO_INITIALIZER(nbits) { \ + /* nbits. */ \ + nbits, \ + /* ngroups. */ \ + BITMAP_BITS2GROUPS(nbits) \ +} + +#endif /* BITMAP_USE_TREE */ + +typedef struct bitmap_level_s { + /* Offset of this level's groups within the array of groups. */ + size_t group_offset; +} bitmap_level_t; + +typedef struct bitmap_info_s { + /* Logical number of bits in bitmap (stored at bottom level). */ + size_t nbits; + +#ifdef BITMAP_USE_TREE + /* Number of levels necessary for nbits. */ + unsigned nlevels; + + /* + * Only the first (nlevels+1) elements are used, and levels are ordered + * bottom to top (e.g. the bottom level is stored in levels[0]). + */ + bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; +#else /* BITMAP_USE_TREE */ + /* Number of groups necessary for nbits. */ + size_t ngroups; +#endif /* BITMAP_USE_TREE */ +} bitmap_info_t; + +void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); +void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill); +size_t bitmap_size(const bitmap_info_t *binfo); + +static inline bool +bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { +#ifdef BITMAP_USE_TREE + size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; + bitmap_t rg = bitmap[rgoff]; + /* The bitmap is full iff the root group is 0. */ + return (rg == 0); +#else + size_t i; + + for (i = 0; i < binfo->ngroups; i++) { + if (bitmap[i] != 0) { + return false; + } + } + return true; +#endif +} + +static inline bool +bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { + size_t goff; + bitmap_t g; + + assert(bit < binfo->nbits); + goff = bit >> LG_BITMAP_GROUP_NBITS; + g = bitmap[goff]; + return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); +} + +static inline void +bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { + size_t goff; + bitmap_t *gp; + bitmap_t g; + + assert(bit < binfo->nbits); + assert(!bitmap_get(bitmap, binfo, bit)); + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; + assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + assert(bitmap_get(bitmap, binfo, bit)); +#ifdef BITMAP_USE_TREE + /* Propagate group state transitions up the tree. */ + if (g == 0) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; + assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + if (g != 0) { + break; + } + } + } +#endif +} + +/* ffu: find first unset >= bit. */ +static inline size_t +bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { + assert(min_bit < binfo->nbits); + +#ifdef BITMAP_USE_TREE + size_t bit = 0; + for (unsigned level = binfo->nlevels; level--;) { + size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level + + 1)); + bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit + >> lg_bits_per_group)]; + unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit - + bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS)); + assert(group_nmask <= BITMAP_GROUP_NBITS); + bitmap_t group_mask = ~((1LU << group_nmask) - 1); + bitmap_t group_masked = group & group_mask; + if (group_masked == 0LU) { + if (group == 0LU) { + return binfo->nbits; + } + /* + * min_bit was preceded by one or more unset bits in + * this group, but there are no other unset bits in this + * group. Try again starting at the first bit of the + * next sibling. This will recurse at most once per + * non-root level. + */ + size_t sib_base = bit + (ZU(1) << lg_bits_per_group); + assert(sib_base > min_bit); + assert(sib_base > bit); + if (sib_base >= binfo->nbits) { + return binfo->nbits; + } + return bitmap_ffu(bitmap, binfo, sib_base); + } + bit += ((size_t)(ffs_lu(group_masked) - 1)) << + (lg_bits_per_group - LG_BITMAP_GROUP_NBITS); + } + assert(bit >= min_bit); + assert(bit < binfo->nbits); + return bit; +#else + size_t i = min_bit >> LG_BITMAP_GROUP_NBITS; + bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK)) + - 1); + size_t bit; + do { + bit = ffs_lu(g); + if (bit != 0) { + return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); + } + i++; + g = bitmap[i]; + } while (i < binfo->ngroups); + return binfo->nbits; +#endif +} + +/* sfu: set first unset. */ +static inline size_t +bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { + size_t bit; + bitmap_t g; + unsigned i; + + assert(!bitmap_full(bitmap, binfo)); + +#ifdef BITMAP_USE_TREE + i = binfo->nlevels - 1; + g = bitmap[binfo->levels[i].group_offset]; + bit = ffs_lu(g) - 1; + while (i > 0) { + i--; + g = bitmap[binfo->levels[i].group_offset + bit]; + bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); + } +#else + i = 0; + g = bitmap[0]; + while ((bit = ffs_lu(g)) == 0) { + i++; + g = bitmap[i]; + } + bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); +#endif + bitmap_set(bitmap, binfo, bit); + return bit; +} + +static inline void +bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { + size_t goff; + bitmap_t *gp; + bitmap_t g; + UNUSED bool propagate; + + assert(bit < binfo->nbits); + assert(bitmap_get(bitmap, binfo, bit)); + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; + propagate = (g == 0); + assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + assert(!bitmap_get(bitmap, binfo, bit)); +#ifdef BITMAP_USE_TREE + /* Propagate group state transitions up the tree. */ + if (propagate) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; + propagate = (g == 0); + assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) + == 0); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + if (!propagate) { + break; + } + } + } +#endif /* BITMAP_USE_TREE */ +} + +#endif /* JEMALLOC_INTERNAL_BITMAP_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/cache_bin.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/cache_bin.h new file mode 100644 index 0000000..12f3ef2 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/cache_bin.h @@ -0,0 +1,114 @@ +#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H +#define JEMALLOC_INTERNAL_CACHE_BIN_H + +#include "jemalloc/internal/ql.h" + +/* + * The cache_bins are the mechanism that the tcache and the arena use to + * communicate. The tcache fills from and flushes to the arena by passing a + * cache_bin_t to fill/flush. When the arena needs to pull stats from the + * tcaches associated with it, it does so by iterating over its + * cache_bin_array_descriptor_t objects and reading out per-bin stats it + * contains. This makes it so that the arena need not know about the existence + * of the tcache at all. + */ + + +/* + * The count of the number of cached allocations in a bin. We make this signed + * so that negative numbers can encode "invalid" states (e.g. a low water mark + * of -1 for a cache that has been depleted). + */ +typedef int32_t cache_bin_sz_t; + +typedef struct cache_bin_stats_s cache_bin_stats_t; +struct cache_bin_stats_s { + /* + * Number of allocation requests that corresponded to the size of this + * bin. + */ + uint64_t nrequests; +}; + +/* + * Read-only information associated with each element of tcache_t's tbins array + * is stored separately, mainly to reduce memory usage. + */ +typedef struct cache_bin_info_s cache_bin_info_t; +struct cache_bin_info_s { + /* Upper limit on ncached. */ + cache_bin_sz_t ncached_max; +}; + +typedef struct cache_bin_s cache_bin_t; +struct cache_bin_s { + /* Min # cached since last GC. */ + cache_bin_sz_t low_water; + /* # of cached objects. */ + cache_bin_sz_t ncached; + /* + * ncached and stats are both modified frequently. Let's keep them + * close so that they have a higher chance of being on the same + * cacheline, thus less write-backs. + */ + cache_bin_stats_t tstats; + /* + * Stack of available objects. + * + * To make use of adjacent cacheline prefetch, the items in the avail + * stack goes to higher address for newer allocations. avail points + * just above the available space, which means that + * avail[-ncached, ... -1] are available items and the lowest item will + * be allocated first. + */ + void **avail; +}; + +typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t; +struct cache_bin_array_descriptor_s { + /* + * The arena keeps a list of the cache bins associated with it, for + * stats collection. + */ + ql_elm(cache_bin_array_descriptor_t) link; + /* Pointers to the tcache bins. */ + cache_bin_t *bins_small; + cache_bin_t *bins_large; +}; + +static inline void +cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor, + cache_bin_t *bins_small, cache_bin_t *bins_large) { + ql_elm_new(descriptor, link); + descriptor->bins_small = bins_small; + descriptor->bins_large = bins_large; +} + +JEMALLOC_ALWAYS_INLINE void * +cache_bin_alloc_easy(cache_bin_t *bin, bool *success) { + void *ret; + + if (unlikely(bin->ncached == 0)) { + bin->low_water = -1; + *success = false; + return NULL; + } + /* + * success (instead of ret) should be checked upon the return of this + * function. We avoid checking (ret == NULL) because there is never a + * null stored on the avail stack (which is unknown to the compiler), + * and eagerly checking ret would cause pipeline stall (waiting for the + * cacheline). + */ + *success = true; + ret = *(bin->avail - bin->ncached); + bin->ncached--; + + if (unlikely(bin->ncached < bin->low_water)) { + bin->low_water = bin->ncached; + } + + return ret; +} + +#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/chunk.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/chunk.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/chunk.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/chunk.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/chunk_dss.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/chunk_dss.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/chunk_dss.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/chunk_dss.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ckh.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ckh.h new file mode 100644 index 0000000..7b3850b --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ckh.h @@ -0,0 +1,101 @@ +#ifndef JEMALLOC_INTERNAL_CKH_H +#define JEMALLOC_INTERNAL_CKH_H + +#include "jemalloc/internal/tsd.h" + +/* Cuckoo hashing implementation. Skip to the end for the interface. */ + +/******************************************************************************/ +/* INTERNAL DEFINITIONS -- IGNORE */ +/******************************************************************************/ + +/* Maintain counters used to get an idea of performance. */ +/* #define CKH_COUNT */ +/* Print counter values in ckh_delete() (requires CKH_COUNT). */ +/* #define CKH_VERBOSE */ + +/* + * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit + * one bucket per L1 cache line. + */ +#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) + +/* Typedefs to allow easy function pointer passing. */ +typedef void ckh_hash_t (const void *, size_t[2]); +typedef bool ckh_keycomp_t (const void *, const void *); + +/* Hash table cell. */ +typedef struct { + const void *key; + const void *data; +} ckhc_t; + +/* The hash table itself. */ +typedef struct { +#ifdef CKH_COUNT + /* Counters used to get an idea of performance. */ + uint64_t ngrows; + uint64_t nshrinks; + uint64_t nshrinkfails; + uint64_t ninserts; + uint64_t nrelocs; +#endif + + /* Used for pseudo-random number generation. */ + uint64_t prng_state; + + /* Total number of items. */ + size_t count; + + /* + * Minimum and current number of hash table buckets. There are + * 2^LG_CKH_BUCKET_CELLS cells per bucket. + */ + unsigned lg_minbuckets; + unsigned lg_curbuckets; + + /* Hash and comparison functions. */ + ckh_hash_t *hash; + ckh_keycomp_t *keycomp; + + /* Hash table with 2^lg_curbuckets buckets. */ + ckhc_t *tab; +} ckh_t; + +/******************************************************************************/ +/* BEGIN PUBLIC API */ +/******************************************************************************/ + +/* Lifetime management. Minitems is the initial capacity. */ +bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, + ckh_keycomp_t *keycomp); +void ckh_delete(tsd_t *tsd, ckh_t *ckh); + +/* Get the number of elements in the set. */ +size_t ckh_count(ckh_t *ckh); + +/* + * To iterate over the elements in the table, initialize *tabind to 0 and call + * this function until it returns true. Each call that returns false will + * update *key and *data to the next element in the table, assuming the pointers + * are non-NULL. + */ +bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); + +/* + * Basic hash table operations -- insert, removal, lookup. For ckh_remove and + * ckh_search, key or data can be NULL. The hash-table only stores pointers to + * the key and value, and doesn't do any lifetime management. + */ +bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); +bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, + void **data); +bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); + +/* Some useful hash and comparison functions for strings and pointers. */ +void ckh_string_hash(const void *key, size_t r_hash[2]); +bool ckh_string_keycomp(const void *k1, const void *k2); +void ckh_pointer_hash(const void *key, size_t r_hash[2]); +bool ckh_pointer_keycomp(const void *k1, const void *k2); + +#endif /* JEMALLOC_INTERNAL_CKH_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ctl.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ctl.h new file mode 100644 index 0000000..d927d94 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ctl.h @@ -0,0 +1,131 @@ +#ifndef JEMALLOC_INTERNAL_CTL_H +#define JEMALLOC_INTERNAL_CTL_H + +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats.h" + +/* Maximum ctl tree depth. */ +#define CTL_MAX_DEPTH 7 + +typedef struct ctl_node_s { + bool named; +} ctl_node_t; + +typedef struct ctl_named_node_s { + ctl_node_t node; + const char *name; + /* If (nchildren == 0), this is a terminal node. */ + size_t nchildren; + const ctl_node_t *children; + int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *, + size_t); +} ctl_named_node_t; + +typedef struct ctl_indexed_node_s { + struct ctl_node_s node; + const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, + size_t); +} ctl_indexed_node_t; + +typedef struct ctl_arena_stats_s { + arena_stats_t astats; + + /* Aggregate stats for small size classes, based on bin stats. */ + size_t allocated_small; + uint64_t nmalloc_small; + uint64_t ndalloc_small; + uint64_t nrequests_small; + + bin_stats_t bstats[NBINS]; + arena_stats_large_t lstats[NSIZES - NBINS]; +} ctl_arena_stats_t; + +typedef struct ctl_stats_s { + size_t allocated; + size_t active; + size_t metadata; + size_t metadata_thp; + size_t resident; + size_t mapped; + size_t retained; + + background_thread_stats_t background_thread; + mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes]; +} ctl_stats_t; + +typedef struct ctl_arena_s ctl_arena_t; +struct ctl_arena_s { + unsigned arena_ind; + bool initialized; + ql_elm(ctl_arena_t) destroyed_link; + + /* Basic stats, supported even if !config_stats. */ + unsigned nthreads; + const char *dss; + ssize_t dirty_decay_ms; + ssize_t muzzy_decay_ms; + size_t pactive; + size_t pdirty; + size_t pmuzzy; + + /* NULL if !config_stats. */ + ctl_arena_stats_t *astats; +}; + +typedef struct ctl_arenas_s { + uint64_t epoch; + unsigned narenas; + ql_head(ctl_arena_t) destroyed; + + /* + * Element 0 corresponds to merged stats for extant arenas (accessed via + * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for + * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the + * remaining MALLOCX_ARENA_LIMIT elements correspond to arenas. + */ + ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT]; +} ctl_arenas_t; + +int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen); +int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp); + +int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen); +bool ctl_boot(void); +void ctl_prefork(tsdn_t *tsdn); +void ctl_postfork_parent(tsdn_t *tsdn); +void ctl_postfork_child(tsdn_t *tsdn); + +#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ + if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ + != 0) { \ + malloc_printf( \ + ": Failure in xmallctl(\"%s\", ...)\n", \ + name); \ + abort(); \ + } \ +} while (0) + +#define xmallctlnametomib(name, mibp, miblenp) do { \ + if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ + malloc_printf(": Failure in " \ + "xmallctlnametomib(\"%s\", ...)\n", name); \ + abort(); \ + } \ +} while (0) + +#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ + if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ + newlen) != 0) { \ + malloc_write( \ + ": Failure in xmallctlbymib()\n"); \ + abort(); \ + } \ +} while (0) + +#endif /* JEMALLOC_INTERNAL_CTL_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/div.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/div.h new file mode 100644 index 0000000..aebae93 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/div.h @@ -0,0 +1,41 @@ +#ifndef JEMALLOC_INTERNAL_DIV_H +#define JEMALLOC_INTERNAL_DIV_H + +#include "jemalloc/internal/assert.h" + +/* + * This module does the division that computes the index of a region in a slab, + * given its offset relative to the base. + * That is, given a divisor d, an n = i * d (all integers), we'll return i. + * We do some pre-computation to do this more quickly than a CPU division + * instruction. + * We bound n < 2^32, and don't support dividing by one. + */ + +typedef struct div_info_s div_info_t; +struct div_info_s { + uint32_t magic; +#ifdef JEMALLOC_DEBUG + size_t d; +#endif +}; + +void div_init(div_info_t *div_info, size_t divisor); + +static inline size_t +div_compute(div_info_t *div_info, size_t n) { + assert(n <= (uint32_t)-1); + /* + * This generates, e.g. mov; imul; shr on x86-64. On a 32-bit machine, + * the compilers I tried were all smart enough to turn this into the + * appropriate "get the high 32 bits of the result of a multiply" (e.g. + * mul; mov edx eax; on x86, umull on arm, etc.). + */ + size_t i = ((uint64_t)n * (uint64_t)div_info->magic) >> 32; +#ifdef JEMALLOC_DEBUG + assert(i * div_info->d == n); +#endif + return i; +} + +#endif /* JEMALLOC_INTERNAL_DIV_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/emitter.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/emitter.h new file mode 100644 index 0000000..3a2b2f7 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/emitter.h @@ -0,0 +1,435 @@ +#ifndef JEMALLOC_INTERNAL_EMITTER_H +#define JEMALLOC_INTERNAL_EMITTER_H + +#include "jemalloc/internal/ql.h" + +typedef enum emitter_output_e emitter_output_t; +enum emitter_output_e { + emitter_output_json, + emitter_output_table +}; + +typedef enum emitter_justify_e emitter_justify_t; +enum emitter_justify_e { + emitter_justify_left, + emitter_justify_right, + /* Not for users; just to pass to internal functions. */ + emitter_justify_none +}; + +typedef enum emitter_type_e emitter_type_t; +enum emitter_type_e { + emitter_type_bool, + emitter_type_int, + emitter_type_unsigned, + emitter_type_uint32, + emitter_type_uint64, + emitter_type_size, + emitter_type_ssize, + emitter_type_string, + /* + * A title is a column title in a table; it's just a string, but it's + * not quoted. + */ + emitter_type_title, +}; + +typedef struct emitter_col_s emitter_col_t; +struct emitter_col_s { + /* Filled in by the user. */ + emitter_justify_t justify; + int width; + emitter_type_t type; + union { + bool bool_val; + int int_val; + unsigned unsigned_val; + uint32_t uint32_val; + uint64_t uint64_val; + size_t size_val; + ssize_t ssize_val; + const char *str_val; + }; + + /* Filled in by initialization. */ + ql_elm(emitter_col_t) link; +}; + +typedef struct emitter_row_s emitter_row_t; +struct emitter_row_s { + ql_head(emitter_col_t) cols; +}; + +static inline void +emitter_row_init(emitter_row_t *row) { + ql_new(&row->cols); +} + +static inline void +emitter_col_init(emitter_col_t *col, emitter_row_t *row) { + ql_elm_new(col, link); + ql_tail_insert(&row->cols, col, link); +} + +typedef struct emitter_s emitter_t; +struct emitter_s { + emitter_output_t output; + /* The output information. */ + void (*write_cb)(void *, const char *); + void *cbopaque; + int nesting_depth; + /* True if we've already emitted a value at the given depth. */ + bool item_at_depth; +}; + +static inline void +emitter_init(emitter_t *emitter, emitter_output_t emitter_output, + void (*write_cb)(void *, const char *), void *cbopaque) { + emitter->output = emitter_output; + emitter->write_cb = write_cb; + emitter->cbopaque = cbopaque; + emitter->item_at_depth = false; + emitter->nesting_depth = 0; +} + +/* Internal convenience function. Write to the emitter the given string. */ +JEMALLOC_FORMAT_PRINTF(2, 3) +static inline void +emitter_printf(emitter_t *emitter, const char *format, ...) { + va_list ap; + + va_start(ap, format); + malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap); + va_end(ap); +} + +/* Write to the emitter the given string, but only in table mode. */ +JEMALLOC_FORMAT_PRINTF(2, 3) +static inline void +emitter_table_printf(emitter_t *emitter, const char *format, ...) { + if (emitter->output == emitter_output_table) { + va_list ap; + va_start(ap, format); + malloc_vcprintf(emitter->write_cb, emitter->cbopaque, format, ap); + va_end(ap); + } +} + +static inline void +emitter_gen_fmt(char *out_fmt, size_t out_size, const char *fmt_specifier, + emitter_justify_t justify, int width) { + size_t written; + if (justify == emitter_justify_none) { + written = malloc_snprintf(out_fmt, out_size, + "%%%s", fmt_specifier); + } else if (justify == emitter_justify_left) { + written = malloc_snprintf(out_fmt, out_size, + "%%-%d%s", width, fmt_specifier); + } else { + written = malloc_snprintf(out_fmt, out_size, + "%%%d%s", width, fmt_specifier); + } + /* Only happens in case of bad format string, which *we* choose. */ + assert(written < out_size); +} + +/* + * Internal. Emit the given value type in the relevant encoding (so that the + * bool true gets mapped to json "true", but the string "true" gets mapped to + * json "\"true\"", for instance. + * + * Width is ignored if justify is emitter_justify_none. + */ +static inline void +emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width, + emitter_type_t value_type, const void *value) { + size_t str_written; +#define BUF_SIZE 256 +#define FMT_SIZE 10 + /* + * We dynamically generate a format string to emit, to let us use the + * snprintf machinery. This is kinda hacky, but gets the job done + * quickly without having to think about the various snprintf edge + * cases. + */ + char fmt[FMT_SIZE]; + char buf[BUF_SIZE]; + +#define EMIT_SIMPLE(type, format) \ + emitter_gen_fmt(fmt, FMT_SIZE, format, justify, width); \ + emitter_printf(emitter, fmt, *(const type *)value); \ + + switch (value_type) { + case emitter_type_bool: + emitter_gen_fmt(fmt, FMT_SIZE, "s", justify, width); + emitter_printf(emitter, fmt, *(const bool *)value ? + "true" : "false"); + break; + case emitter_type_int: + EMIT_SIMPLE(int, "d") + break; + case emitter_type_unsigned: + EMIT_SIMPLE(unsigned, "u") + break; + case emitter_type_ssize: + EMIT_SIMPLE(ssize_t, "zd") + break; + case emitter_type_size: + EMIT_SIMPLE(size_t, "zu") + break; + case emitter_type_string: + str_written = malloc_snprintf(buf, BUF_SIZE, "\"%s\"", + *(const char *const *)value); + /* + * We control the strings we output; we shouldn't get anything + * anywhere near the fmt size. + */ + assert(str_written < BUF_SIZE); + emitter_gen_fmt(fmt, FMT_SIZE, "s", justify, width); + emitter_printf(emitter, fmt, buf); + break; + case emitter_type_uint32: + EMIT_SIMPLE(uint32_t, FMTu32) + break; + case emitter_type_uint64: + EMIT_SIMPLE(uint64_t, FMTu64) + break; + case emitter_type_title: + EMIT_SIMPLE(char *const, "s"); + break; + default: + unreachable(); + } +#undef BUF_SIZE +#undef FMT_SIZE +} + + +/* Internal functions. In json mode, tracks nesting state. */ +static inline void +emitter_nest_inc(emitter_t *emitter) { + emitter->nesting_depth++; + emitter->item_at_depth = false; +} + +static inline void +emitter_nest_dec(emitter_t *emitter) { + emitter->nesting_depth--; + emitter->item_at_depth = true; +} + +static inline void +emitter_indent(emitter_t *emitter) { + int amount = emitter->nesting_depth; + const char *indent_str; + if (emitter->output == emitter_output_json) { + indent_str = "\t"; + } else { + amount *= 2; + indent_str = " "; + } + for (int i = 0; i < amount; i++) { + emitter_printf(emitter, "%s", indent_str); + } +} + +static inline void +emitter_json_key_prefix(emitter_t *emitter) { + emitter_printf(emitter, "%s\n", emitter->item_at_depth ? "," : ""); + emitter_indent(emitter); +} + +static inline void +emitter_begin(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + assert(emitter->nesting_depth == 0); + emitter_printf(emitter, "{"); + emitter_nest_inc(emitter); + } else { + // tabular init + emitter_printf(emitter, "%s", ""); + } +} + +static inline void +emitter_end(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + assert(emitter->nesting_depth == 1); + emitter_nest_dec(emitter); + emitter_printf(emitter, "\n}\n"); + } +} + +/* + * Note emits a different kv pair as well, but only in table mode. Omits the + * note if table_note_key is NULL. + */ +static inline void +emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key, + emitter_type_t value_type, const void *value, + const char *table_note_key, emitter_type_t table_note_value_type, + const void *table_note_value) { + if (emitter->output == emitter_output_json) { + assert(emitter->nesting_depth > 0); + emitter_json_key_prefix(emitter); + emitter_printf(emitter, "\"%s\": ", json_key); + emitter_print_value(emitter, emitter_justify_none, -1, + value_type, value); + } else { + emitter_indent(emitter); + emitter_printf(emitter, "%s: ", table_key); + emitter_print_value(emitter, emitter_justify_none, -1, + value_type, value); + if (table_note_key != NULL) { + emitter_printf(emitter, " (%s: ", table_note_key); + emitter_print_value(emitter, emitter_justify_none, -1, + table_note_value_type, table_note_value); + emitter_printf(emitter, ")"); + } + emitter_printf(emitter, "\n"); + } + emitter->item_at_depth = true; +} + +static inline void +emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key, + emitter_type_t value_type, const void *value) { + emitter_kv_note(emitter, json_key, table_key, value_type, value, NULL, + emitter_type_bool, NULL); +} + +static inline void +emitter_json_kv(emitter_t *emitter, const char *json_key, + emitter_type_t value_type, const void *value) { + if (emitter->output == emitter_output_json) { + emitter_kv(emitter, json_key, NULL, value_type, value); + } +} + +static inline void +emitter_table_kv(emitter_t *emitter, const char *table_key, + emitter_type_t value_type, const void *value) { + if (emitter->output == emitter_output_table) { + emitter_kv(emitter, NULL, table_key, value_type, value); + } +} + +static inline void +emitter_dict_begin(emitter_t *emitter, const char *json_key, + const char *table_header) { + if (emitter->output == emitter_output_json) { + emitter_json_key_prefix(emitter); + emitter_printf(emitter, "\"%s\": {", json_key); + emitter_nest_inc(emitter); + } else { + emitter_indent(emitter); + emitter_printf(emitter, "%s\n", table_header); + emitter_nest_inc(emitter); + } +} + +static inline void +emitter_dict_end(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + assert(emitter->nesting_depth > 0); + emitter_nest_dec(emitter); + emitter_printf(emitter, "\n"); + emitter_indent(emitter); + emitter_printf(emitter, "}"); + } else { + emitter_nest_dec(emitter); + } +} + +static inline void +emitter_json_dict_begin(emitter_t *emitter, const char *json_key) { + if (emitter->output == emitter_output_json) { + emitter_dict_begin(emitter, json_key, NULL); + } +} + +static inline void +emitter_json_dict_end(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + emitter_dict_end(emitter); + } +} + +static inline void +emitter_table_dict_begin(emitter_t *emitter, const char *table_key) { + if (emitter->output == emitter_output_table) { + emitter_dict_begin(emitter, NULL, table_key); + } +} + +static inline void +emitter_table_dict_end(emitter_t *emitter) { + if (emitter->output == emitter_output_table) { + emitter_dict_end(emitter); + } +} + +static inline void +emitter_json_arr_begin(emitter_t *emitter, const char *json_key) { + if (emitter->output == emitter_output_json) { + emitter_json_key_prefix(emitter); + emitter_printf(emitter, "\"%s\": [", json_key); + emitter_nest_inc(emitter); + } +} + +static inline void +emitter_json_arr_end(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + assert(emitter->nesting_depth > 0); + emitter_nest_dec(emitter); + emitter_printf(emitter, "\n"); + emitter_indent(emitter); + emitter_printf(emitter, "]"); + } +} + +static inline void +emitter_json_arr_obj_begin(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + emitter_json_key_prefix(emitter); + emitter_printf(emitter, "{"); + emitter_nest_inc(emitter); + } +} + +static inline void +emitter_json_arr_obj_end(emitter_t *emitter) { + if (emitter->output == emitter_output_json) { + assert(emitter->nesting_depth > 0); + emitter_nest_dec(emitter); + emitter_printf(emitter, "\n"); + emitter_indent(emitter); + emitter_printf(emitter, "}"); + } +} + +static inline void +emitter_json_arr_value(emitter_t *emitter, emitter_type_t value_type, + const void *value) { + if (emitter->output == emitter_output_json) { + emitter_json_key_prefix(emitter); + emitter_print_value(emitter, emitter_justify_none, -1, + value_type, value); + } +} + +static inline void +emitter_table_row(emitter_t *emitter, emitter_row_t *row) { + if (emitter->output != emitter_output_table) { + return; + } + emitter_col_t *col; + ql_foreach(col, &row->cols, link) { + emitter_print_value(emitter, col->justify, col->width, + col->type, (const void *)&col->bool_val); + } + emitter_table_printf(emitter, "\n"); +} + +#endif /* JEMALLOC_INTERNAL_EMITTER_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/extent.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/extent.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent.h diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_dss.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_dss.h new file mode 100644 index 0000000..e8f02ce --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_dss.h @@ -0,0 +1,26 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H +#define JEMALLOC_INTERNAL_EXTENT_DSS_H + +typedef enum { + dss_prec_disabled = 0, + dss_prec_primary = 1, + dss_prec_secondary = 2, + + dss_prec_limit = 3 +} dss_prec_t; +#define DSS_PREC_DEFAULT dss_prec_secondary +#define DSS_DEFAULT "secondary" + +extern const char *dss_prec_names[]; + +extern const char *opt_dss; + +dss_prec_t extent_dss_prec_get(void); +bool extent_dss_prec_set(dss_prec_t dss_prec); +void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit); +bool extent_in_dss(void *addr); +bool extent_dss_mergeable(void *addr_a, void *addr_b); +void extent_dss_boot(void); + +#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_externs.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_externs.h new file mode 100644 index 0000000..b8a4d02 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_externs.h @@ -0,0 +1,73 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H +#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H + +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/rtree.h" + +extern size_t opt_lg_extent_max_active_fit; + +extern rtree_t extents_rtree; +extern const extent_hooks_t extent_hooks_default; +extern mutex_pool_t extent_mutex_pool; + +extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena); +void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent); + +extent_hooks_t *extent_hooks_get(arena_t *arena); +extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena, + extent_hooks_t *extent_hooks); + +#ifdef JEMALLOC_JET +size_t extent_size_quantize_floor(size_t size); +size_t extent_size_quantize_ceil(size_t size); +#endif + +rb_proto(, extent_avail_, extent_tree_t, extent_t) +ph_proto(, extent_heap_, extent_heap_t, extent_t) + +bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, + bool delay_coalesce); +extent_state_t extents_state_get(const extents_t *extents); +size_t extents_npages_get(extents_t *extents); +extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, + size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, + bool *zero, bool *commit); +void extents_dalloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent); +extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min); +void extents_prefork(tsdn_t *tsdn, extents_t *extents); +void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents); +void extents_postfork_child(tsdn_t *tsdn, extents_t *extents); +extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit); +void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent); +void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent); +void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent); +bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length); +bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length); +bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length); +bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length); +extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b); +bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b); + +bool extent_boot(void); + +#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_inlines.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_inlines.h new file mode 100644 index 0000000..77181df --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_inlines.h @@ -0,0 +1,433 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H +#define JEMALLOC_INTERNAL_EXTENT_INLINES_H + +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/sz.h" + +static inline void +extent_lock(tsdn_t *tsdn, extent_t *extent) { + assert(extent != NULL); + mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent); +} + +static inline void +extent_unlock(tsdn_t *tsdn, extent_t *extent) { + assert(extent != NULL); + mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent); +} + +static inline void +extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) { + assert(extent1 != NULL && extent2 != NULL); + mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1, + (uintptr_t)extent2); +} + +static inline void +extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) { + assert(extent1 != NULL && extent2 != NULL); + mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1, + (uintptr_t)extent2); +} + +static inline arena_t * +extent_arena_get(const extent_t *extent) { + unsigned arena_ind = (unsigned)((extent->e_bits & + EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT); + /* + * The following check is omitted because we should never actually read + * a NULL arena pointer. + */ + if (false && arena_ind >= MALLOCX_ARENA_LIMIT) { + return NULL; + } + assert(arena_ind < MALLOCX_ARENA_LIMIT); + return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE); +} + +static inline szind_t +extent_szind_get_maybe_invalid(const extent_t *extent) { + szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >> + EXTENT_BITS_SZIND_SHIFT); + assert(szind <= NSIZES); + return szind; +} + +static inline szind_t +extent_szind_get(const extent_t *extent) { + szind_t szind = extent_szind_get_maybe_invalid(extent); + assert(szind < NSIZES); /* Never call when "invalid". */ + return szind; +} + +static inline size_t +extent_usize_get(const extent_t *extent) { + return sz_index2size(extent_szind_get(extent)); +} + +static inline size_t +extent_sn_get(const extent_t *extent) { + return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >> + EXTENT_BITS_SN_SHIFT); +} + +static inline extent_state_t +extent_state_get(const extent_t *extent) { + return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >> + EXTENT_BITS_STATE_SHIFT); +} + +static inline bool +extent_zeroed_get(const extent_t *extent) { + return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >> + EXTENT_BITS_ZEROED_SHIFT); +} + +static inline bool +extent_committed_get(const extent_t *extent) { + return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >> + EXTENT_BITS_COMMITTED_SHIFT); +} + +static inline bool +extent_dumpable_get(const extent_t *extent) { + return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >> + EXTENT_BITS_DUMPABLE_SHIFT); +} + +static inline bool +extent_slab_get(const extent_t *extent) { + return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >> + EXTENT_BITS_SLAB_SHIFT); +} + +static inline unsigned +extent_nfree_get(const extent_t *extent) { + assert(extent_slab_get(extent)); + return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >> + EXTENT_BITS_NFREE_SHIFT); +} + +static inline void * +extent_base_get(const extent_t *extent) { + assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || + !extent_slab_get(extent)); + return PAGE_ADDR2BASE(extent->e_addr); +} + +static inline void * +extent_addr_get(const extent_t *extent) { + assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || + !extent_slab_get(extent)); + return extent->e_addr; +} + +static inline size_t +extent_size_get(const extent_t *extent) { + return (extent->e_size_esn & EXTENT_SIZE_MASK); +} + +static inline size_t +extent_esn_get(const extent_t *extent) { + return (extent->e_size_esn & EXTENT_ESN_MASK); +} + +static inline size_t +extent_bsize_get(const extent_t *extent) { + return extent->e_bsize; +} + +static inline void * +extent_before_get(const extent_t *extent) { + return (void *)((uintptr_t)extent_base_get(extent) - PAGE); +} + +static inline void * +extent_last_get(const extent_t *extent) { + return (void *)((uintptr_t)extent_base_get(extent) + + extent_size_get(extent) - PAGE); +} + +static inline void * +extent_past_get(const extent_t *extent) { + return (void *)((uintptr_t)extent_base_get(extent) + + extent_size_get(extent)); +} + +static inline arena_slab_data_t * +extent_slab_data_get(extent_t *extent) { + assert(extent_slab_get(extent)); + return &extent->e_slab_data; +} + +static inline const arena_slab_data_t * +extent_slab_data_get_const(const extent_t *extent) { + assert(extent_slab_get(extent)); + return &extent->e_slab_data; +} + +static inline prof_tctx_t * +extent_prof_tctx_get(const extent_t *extent) { + return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx, + ATOMIC_ACQUIRE); +} + +static inline void +extent_arena_set(extent_t *extent, arena_t *arena) { + unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U << + MALLOCX_ARENA_BITS) - 1); + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) | + ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT); +} + +static inline void +extent_addr_set(extent_t *extent, void *addr) { + extent->e_addr = addr; +} + +static inline void +extent_addr_randomize(UNUSED tsdn_t *tsdn, extent_t *extent, size_t alignment) { + assert(extent_base_get(extent) == extent_addr_get(extent)); + + if (alignment < PAGE) { + unsigned lg_range = LG_PAGE - + lg_floor(CACHELINE_CEILING(alignment)); + size_t r; + if (!tsdn_null(tsdn)) { + tsd_t *tsd = tsdn_tsd(tsdn); + r = (size_t)prng_lg_range_u64( + tsd_offset_statep_get(tsd), lg_range); + } else { + r = prng_lg_range_zu( + &extent_arena_get(extent)->offset_state, + lg_range, true); + } + uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE - + lg_range); + extent->e_addr = (void *)((uintptr_t)extent->e_addr + + random_offset); + assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) == + extent->e_addr); + } +} + +static inline void +extent_size_set(extent_t *extent, size_t size) { + assert((size & ~EXTENT_SIZE_MASK) == 0); + extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK); +} + +static inline void +extent_esn_set(extent_t *extent, size_t esn) { + extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn & + EXTENT_ESN_MASK); +} + +static inline void +extent_bsize_set(extent_t *extent, size_t bsize) { + extent->e_bsize = bsize; +} + +static inline void +extent_szind_set(extent_t *extent, szind_t szind) { + assert(szind <= NSIZES); /* NSIZES means "invalid". */ + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) | + ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT); +} + +static inline void +extent_nfree_set(extent_t *extent, unsigned nfree) { + assert(extent_slab_get(extent)); + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) | + ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT); +} + +static inline void +extent_nfree_inc(extent_t *extent) { + assert(extent_slab_get(extent)); + extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT); +} + +static inline void +extent_nfree_dec(extent_t *extent) { + assert(extent_slab_get(extent)); + extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT); +} + +static inline void +extent_sn_set(extent_t *extent, size_t sn) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) | + ((uint64_t)sn << EXTENT_BITS_SN_SHIFT); +} + +static inline void +extent_state_set(extent_t *extent, extent_state_t state) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) | + ((uint64_t)state << EXTENT_BITS_STATE_SHIFT); +} + +static inline void +extent_zeroed_set(extent_t *extent, bool zeroed) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) | + ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT); +} + +static inline void +extent_committed_set(extent_t *extent, bool committed) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) | + ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT); +} + +static inline void +extent_dumpable_set(extent_t *extent, bool dumpable) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) | + ((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT); +} + +static inline void +extent_slab_set(extent_t *extent, bool slab) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) | + ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT); +} + +static inline void +extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) { + atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE); +} + +static inline void +extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size, + bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed, + bool committed, bool dumpable) { + assert(addr == PAGE_ADDR2BASE(addr) || !slab); + + extent_arena_set(extent, arena); + extent_addr_set(extent, addr); + extent_size_set(extent, size); + extent_slab_set(extent, slab); + extent_szind_set(extent, szind); + extent_sn_set(extent, sn); + extent_state_set(extent, state); + extent_zeroed_set(extent, zeroed); + extent_committed_set(extent, committed); + extent_dumpable_set(extent, dumpable); + ql_elm_new(extent, ql_link); + if (config_prof) { + extent_prof_tctx_set(extent, NULL); + } +} + +static inline void +extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) { + extent_arena_set(extent, NULL); + extent_addr_set(extent, addr); + extent_bsize_set(extent, bsize); + extent_slab_set(extent, false); + extent_szind_set(extent, NSIZES); + extent_sn_set(extent, sn); + extent_state_set(extent, extent_state_active); + extent_zeroed_set(extent, true); + extent_committed_set(extent, true); + extent_dumpable_set(extent, true); +} + +static inline void +extent_list_init(extent_list_t *list) { + ql_new(list); +} + +static inline extent_t * +extent_list_first(const extent_list_t *list) { + return ql_first(list); +} + +static inline extent_t * +extent_list_last(const extent_list_t *list) { + return ql_last(list, ql_link); +} + +static inline void +extent_list_append(extent_list_t *list, extent_t *extent) { + ql_tail_insert(list, extent, ql_link); +} + +static inline void +extent_list_prepend(extent_list_t *list, extent_t *extent) { + ql_head_insert(list, extent, ql_link); +} + +static inline void +extent_list_replace(extent_list_t *list, extent_t *to_remove, + extent_t *to_insert) { + ql_after_insert(to_remove, to_insert, ql_link); + ql_remove(list, to_remove, ql_link); +} + +static inline void +extent_list_remove(extent_list_t *list, extent_t *extent) { + ql_remove(list, extent, ql_link); +} + +static inline int +extent_sn_comp(const extent_t *a, const extent_t *b) { + size_t a_sn = extent_sn_get(a); + size_t b_sn = extent_sn_get(b); + + return (a_sn > b_sn) - (a_sn < b_sn); +} + +static inline int +extent_esn_comp(const extent_t *a, const extent_t *b) { + size_t a_esn = extent_esn_get(a); + size_t b_esn = extent_esn_get(b); + + return (a_esn > b_esn) - (a_esn < b_esn); +} + +static inline int +extent_ad_comp(const extent_t *a, const extent_t *b) { + uintptr_t a_addr = (uintptr_t)extent_addr_get(a); + uintptr_t b_addr = (uintptr_t)extent_addr_get(b); + + return (a_addr > b_addr) - (a_addr < b_addr); +} + +static inline int +extent_ead_comp(const extent_t *a, const extent_t *b) { + uintptr_t a_eaddr = (uintptr_t)a; + uintptr_t b_eaddr = (uintptr_t)b; + + return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr); +} + +static inline int +extent_snad_comp(const extent_t *a, const extent_t *b) { + int ret; + + ret = extent_sn_comp(a, b); + if (ret != 0) { + return ret; + } + + ret = extent_ad_comp(a, b); + return ret; +} + +static inline int +extent_esnead_comp(const extent_t *a, const extent_t *b) { + int ret; + + ret = extent_esn_comp(a, b); + if (ret != 0) { + return ret; + } + + ret = extent_ead_comp(a, b); + return ret; +} + +#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_mmap.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_mmap.h new file mode 100644 index 0000000..55f17ee --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_mmap.h @@ -0,0 +1,10 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H +#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H + +extern bool opt_retain; + +void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, + bool *zero, bool *commit); +bool extent_dalloc_mmap(void *addr, size_t size); + +#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_structs.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_structs.h new file mode 100644 index 0000000..4873b9e --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_structs.h @@ -0,0 +1,219 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H +#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/size_classes.h" + +typedef enum { + extent_state_active = 0, + extent_state_dirty = 1, + extent_state_muzzy = 2, + extent_state_retained = 3 +} extent_state_t; + +/* Extent (span of pages). Use accessor functions for e_* fields. */ +struct extent_s { + /* + * Bitfield containing several fields: + * + * a: arena_ind + * b: slab + * c: committed + * d: dumpable + * z: zeroed + * t: state + * i: szind + * f: nfree + * n: sn + * + * nnnnnnnn ... nnnnffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa + * + * arena_ind: Arena from which this extent came, or all 1 bits if + * unassociated. + * + * slab: The slab flag indicates whether the extent is used for a slab + * of small regions. This helps differentiate small size classes, + * and it indicates whether interior pointers can be looked up via + * iealloc(). + * + * committed: The committed flag indicates whether physical memory is + * committed to the extent, whether explicitly or implicitly + * as on a system that overcommits and satisfies physical + * memory needs on demand via soft page faults. + * + * dumpable: The dumpable flag indicates whether or not we've set the + * memory in question to be dumpable. Note that this + * interacts somewhat subtly with user-specified extent hooks, + * since we don't know if *they* are fiddling with + * dumpability (in which case, we don't want to undo whatever + * they're doing). To deal with this scenario, we: + * - Make dumpable false only for memory allocated with the + * default hooks. + * - Only allow memory to go from non-dumpable to dumpable, + * and only once. + * - Never make the OS call to allow dumping when the + * dumpable bit is already set. + * These three constraints mean that we will never + * accidentally dump user memory that the user meant to set + * nondumpable with their extent hooks. + * + * + * zeroed: The zeroed flag is used by extent recycling code to track + * whether memory is zero-filled. + * + * state: The state flag is an extent_state_t. + * + * szind: The szind flag indicates usable size class index for + * allocations residing in this extent, regardless of whether the + * extent is a slab. Extent size and usable size often differ + * even for non-slabs, either due to sz_large_pad or promotion of + * sampled small regions. + * + * nfree: Number of free regions in slab. + * + * sn: Serial number (potentially non-unique). + * + * Serial numbers may wrap around if !opt_retain, but as long as + * comparison functions fall back on address comparison for equal + * serial numbers, stable (if imperfect) ordering is maintained. + * + * Serial numbers may not be unique even in the absence of + * wrap-around, e.g. when splitting an extent and assigning the same + * serial number to both resulting adjacent extents. + */ + uint64_t e_bits; +#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT)) + +#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS +#define EXTENT_BITS_ARENA_SHIFT 0 +#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT) + +#define EXTENT_BITS_SLAB_WIDTH 1 +#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT) +#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT) + +#define EXTENT_BITS_COMMITTED_WIDTH 1 +#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT) +#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT) + +#define EXTENT_BITS_DUMPABLE_WIDTH 1 +#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT) +#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT) + +#define EXTENT_BITS_ZEROED_WIDTH 1 +#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT) +#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT) + +#define EXTENT_BITS_STATE_WIDTH 2 +#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT) +#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT) + +#define EXTENT_BITS_SZIND_WIDTH LG_CEIL_NSIZES +#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT) +#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT) + +#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1) +#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT) +#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT) + +#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT) +#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT) + + /* Pointer to the extent that this structure is responsible for. */ + void *e_addr; + + union { + /* + * Extent size and serial number associated with the extent + * structure (different than the serial number for the extent at + * e_addr). + * + * ssssssss [...] ssssssss ssssnnnn nnnnnnnn + */ + size_t e_size_esn; + #define EXTENT_SIZE_MASK ((size_t)~(PAGE-1)) + #define EXTENT_ESN_MASK ((size_t)PAGE-1) + /* Base extent size, which may not be a multiple of PAGE. */ + size_t e_bsize; + }; + + /* + * List linkage, used by a variety of lists: + * - bin_t's slabs_full + * - extents_t's LRU + * - stashed dirty extents + * - arena's large allocations + */ + ql_elm(extent_t) ql_link; + + /* + * Linkage for per size class sn/address-ordered heaps, and + * for extent_avail + */ + phn(extent_t) ph_link; + + union { + /* Small region slab metadata. */ + arena_slab_data_t e_slab_data; + + /* + * Profile counters, used for large objects. Points to a + * prof_tctx_t. + */ + atomic_p_t e_prof_tctx; + }; +}; +typedef ql_head(extent_t) extent_list_t; +typedef ph(extent_t) extent_tree_t; +typedef ph(extent_t) extent_heap_t; + +/* Quantized collection of extents, with built-in LRU queue. */ +struct extents_s { + malloc_mutex_t mtx; + + /* + * Quantized per size class heaps of extents. + * + * Synchronization: mtx. + */ + extent_heap_t heaps[NPSIZES+1]; + + /* + * Bitmap for which set bits correspond to non-empty heaps. + * + * Synchronization: mtx. + */ + bitmap_t bitmap[BITMAP_GROUPS(NPSIZES+1)]; + + /* + * LRU of all extents in heaps. + * + * Synchronization: mtx. + */ + extent_list_t lru; + + /* + * Page sum for all extents in heaps. + * + * The synchronization here is a little tricky. Modifications to npages + * must hold mtx, but reads need not (though, a reader who sees npages + * without holding the mutex can't assume anything about the rest of the + * state of the extents_t). + */ + atomic_zu_t npages; + + /* All stored extents must be in the same state. */ + extent_state_t state; + + /* + * If true, delay coalescing until eviction; otherwise coalesce during + * deallocation. + */ + bool delay_coalesce; +}; + +#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_types.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_types.h new file mode 100644 index 0000000..c0561d9 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/extent_types.h @@ -0,0 +1,17 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H +#define JEMALLOC_INTERNAL_EXTENT_TYPES_H + +typedef struct extent_s extent_t; +typedef struct extents_s extents_t; + +#define EXTENT_HOOKS_INITIALIZER NULL + +#define EXTENT_GROW_MAX_PIND (NPSIZES - 1) + +/* + * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit) + * is the max ratio between the size of the active extent and the new extent. + */ +#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6 + +#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/hash.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/hash.h similarity index 67% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/hash.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/hash.h index bcead33..dcfc992 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/hash.h +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/hash.h @@ -1,93 +1,76 @@ +#ifndef JEMALLOC_INTERNAL_HASH_H +#define JEMALLOC_INTERNAL_HASH_H + +#include "jemalloc/internal/assert.h" + /* * The following hash function is based on MurmurHash3, placed into the public - * domain by Austin Appleby. See http://code.google.com/p/smhasher/ for + * domain by Austin Appleby. See https://github.com/aappleby/smhasher for * details. */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -uint32_t hash_x86_32(const void *key, int len, uint32_t seed); -void hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]); -void hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]); -void hash(const void *key, size_t len, const uint32_t seed, - size_t r_hash[2]); -#endif -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) /******************************************************************************/ /* Internal implementation. */ -JEMALLOC_INLINE uint32_t -hash_rotl_32(uint32_t x, int8_t r) -{ - +static inline uint32_t +hash_rotl_32(uint32_t x, int8_t r) { return ((x << r) | (x >> (32 - r))); } -JEMALLOC_INLINE uint64_t -hash_rotl_64(uint64_t x, int8_t r) -{ - +static inline uint64_t +hash_rotl_64(uint64_t x, int8_t r) { return ((x << r) | (x >> (64 - r))); } -JEMALLOC_INLINE uint32_t -hash_get_block_32(const uint32_t *p, int i) -{ +static inline uint32_t +hash_get_block_32(const uint32_t *p, int i) { + /* Handle unaligned read. */ + if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { + uint32_t ret; - return (p[i]); + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); + return ret; + } + + return p[i]; } -JEMALLOC_INLINE uint64_t -hash_get_block_64(const uint64_t *p, int i) -{ +static inline uint64_t +hash_get_block_64(const uint64_t *p, int i) { + /* Handle unaligned read. */ + if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { + uint64_t ret; - return (p[i]); -} + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); + return ret; + } -JEMALLOC_INLINE uint32_t -hash_fmix_32(uint32_t h) -{ + return p[i]; +} +static inline uint32_t +hash_fmix_32(uint32_t h) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; - return (h); + return h; } -JEMALLOC_INLINE uint64_t -hash_fmix_64(uint64_t k) -{ - +static inline uint64_t +hash_fmix_64(uint64_t k) { k ^= k >> 33; k *= KQU(0xff51afd7ed558ccd); k ^= k >> 33; k *= KQU(0xc4ceb9fe1a85ec53); k ^= k >> 33; - return (k); + return k; } -JEMALLOC_INLINE uint32_t -hash_x86_32(const void *key, int len, uint32_t seed) -{ +static inline uint32_t +hash_x86_32(const void *key, int len, uint32_t seed) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 4; @@ -133,13 +116,12 @@ hash_x86_32(const void *key, int len, uint32_t seed) h1 = hash_fmix_32(h1); - return (h1); + return h1; } -UNUSED JEMALLOC_INLINE void +UNUSED static inline void hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]) -{ + uint64_t r_out[2]) { const uint8_t * data = (const uint8_t *) key; const int nblocks = len / 16; @@ -238,10 +220,9 @@ hash_x86_128(const void *key, const int len, uint32_t seed, r_out[1] = (((uint64_t) h4) << 32) | h3; } -UNUSED JEMALLOC_INLINE void +UNUSED static inline void hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]) -{ + uint64_t r_out[2]) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 16; @@ -279,22 +260,22 @@ hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t k2 = 0; switch (len & 15) { - case 15: k2 ^= ((uint64_t)(tail[14])) << 48; - case 14: k2 ^= ((uint64_t)(tail[13])) << 40; - case 13: k2 ^= ((uint64_t)(tail[12])) << 32; - case 12: k2 ^= ((uint64_t)(tail[11])) << 24; - case 11: k2 ^= ((uint64_t)(tail[10])) << 16; - case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; + case 15: k2 ^= ((uint64_t)(tail[14])) << 48; /* falls through */ + case 14: k2 ^= ((uint64_t)(tail[13])) << 40; /* falls through */ + case 13: k2 ^= ((uint64_t)(tail[12])) << 32; /* falls through */ + case 12: k2 ^= ((uint64_t)(tail[11])) << 24; /* falls through */ + case 11: k2 ^= ((uint64_t)(tail[10])) << 16; /* falls through */ + case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; /* falls through */ case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; - - case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; - case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; - case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; - case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; - case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; - case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; - case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; + /* falls through */ + case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; /* falls through */ + case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; /* falls through */ + case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; /* falls through */ + case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; /* falls through */ + case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; /* falls through */ + case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; /* falls through */ + case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; /* falls through */ case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; } @@ -318,19 +299,20 @@ hash_x64_128(const void *key, const int len, const uint32_t seed, /******************************************************************************/ /* API. */ -JEMALLOC_INLINE void -hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) -{ +static inline void +hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) { + assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ + #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) - hash_x64_128(key, len, seed, (uint64_t *)r_hash); + hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); #else - uint64_t hashes[2]; - hash_x86_128(key, len, seed, hashes); - r_hash[0] = (size_t)hashes[0]; - r_hash[1] = (size_t)hashes[1]; + { + uint64_t hashes[2]; + hash_x86_128(key, (int)len, seed, hashes); + r_hash[0] = (size_t)hashes[0]; + r_hash[1] = (size_t)hashes[1]; + } #endif } -#endif -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_HASH_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/hooks.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/hooks.h new file mode 100644 index 0000000..cd49afc --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/hooks.h @@ -0,0 +1,19 @@ +#ifndef JEMALLOC_INTERNAL_HOOKS_H +#define JEMALLOC_INTERNAL_HOOKS_H + +extern JEMALLOC_EXPORT void (*hooks_arena_new_hook)(); +extern JEMALLOC_EXPORT void (*hooks_libc_hook)(); + +#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn) + +#define open JEMALLOC_HOOK(open, hooks_libc_hook) +#define read JEMALLOC_HOOK(read, hooks_libc_hook) +#define write JEMALLOC_HOOK(write, hooks_libc_hook) +#define readlink JEMALLOC_HOOK(readlink, hooks_libc_hook) +#define close JEMALLOC_HOOK(close, hooks_libc_hook) +#define creat JEMALLOC_HOOK(creat, hooks_libc_hook) +#define secure_getenv JEMALLOC_HOOK(secure_getenv, hooks_libc_hook) +/* Note that this is undef'd and re-define'd in src/prof.c. */ +#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook) + +#endif /* JEMALLOC_INTERNAL_HOOKS_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/huge.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/huge.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/huge.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/huge.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h similarity index 60% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h index a601d6e..be70df5 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h @@ -1,11 +1,20 @@ #ifndef JEMALLOC_INTERNAL_DECLS_H -#define JEMALLOC_INTERNAL_DECLS_H +#define JEMALLOC_INTERNAL_DECLS_H #include #ifdef _WIN32 # include # include "msvc_compat/windows_extra.h" - +# ifdef _WIN64 +# if LG_VADDR <= 32 +# error Generate the headers using x64 vcargs +# endif +# else +# if LG_VADDR > 32 +# undef LG_VADDR +# define LG_VADDR 32 +# endif +# endif #else # include # include @@ -14,10 +23,27 @@ # if !defined(SYS_write) && defined(__NR_write) # define SYS_write __NR_write # endif +# if defined(SYS_open) && defined(__aarch64__) + /* Android headers may define SYS_open to __NR_open even though + * __NR_open may not exist on AArch64 (superseded by __NR_openat). */ +# undef SYS_open +# endif # include # endif # include +# include +# ifdef JEMALLOC_OS_UNFAIR_LOCK +# include +# endif +# ifdef JEMALLOC_GLIBC_MALLOC_HOOK +# include +# endif # include +# include +# include +# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME +# include +# endif #endif #include @@ -25,6 +51,9 @@ #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX #endif +#ifndef SSIZE_MAX +# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1)) +#endif #include #include #include @@ -50,9 +79,7 @@ typedef intptr_t ssize_t; # pragma warning(disable: 4996) #if _MSC_VER < 1800 static int -isblank(int c) -{ - +isblank(int c) { return (c == '\t' || c == ' '); } #endif diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in similarity index 53% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in index b0f8caa..8dad9a1 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in @@ -1,5 +1,5 @@ #ifndef JEMALLOC_INTERNAL_DEFS_H_ -#define JEMALLOC_INTERNAL_DEFS_H_ +#define JEMALLOC_INTERNAL_DEFS_H_ /* * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all * public APIs to be prefixed. This makes it possible, with some care, to use @@ -8,6 +8,18 @@ #undef JEMALLOC_PREFIX #undef JEMALLOC_CPREFIX +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#undef JEMALLOC_OVERRIDE___LIBC_CALLOC +#undef JEMALLOC_OVERRIDE___LIBC_FREE +#undef JEMALLOC_OVERRIDE___LIBC_MALLOC +#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN +#undef JEMALLOC_OVERRIDE___LIBC_REALLOC +#undef JEMALLOC_OVERRIDE___LIBC_VALLOC +#undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN + /* * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. * For shared libraries, symbol visibility mechanisms prevent these symbols @@ -21,18 +33,24 @@ * order to yield to another virtual CPU. */ #undef CPU_SPINWAIT +/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */ +#undef HAVE_CPU_SPINWAIT + +/* + * Number of significant bits in virtual addresses. This may be less than the + * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 + * bits are the same as bit 47. + */ +#undef LG_VADDR /* Defined if C11 atomics are available. */ -#undef JEMALLOC_C11ATOMICS +#undef JEMALLOC_C11_ATOMICS -/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ -#undef JEMALLOC_ATOMIC9 +/* Defined if GCC __atomic atomics are available. */ +#undef JEMALLOC_GCC_ATOMIC_ATOMICS -/* - * Defined if OSAtomic*() functions are available, as provided by Darwin, and - * documented in the atomic(3) manual page. - */ -#undef JEMALLOC_OSATOMIC +/* Defined if GCC __sync atomics are available. */ +#undef JEMALLOC_GCC_SYNC_ATOMICS /* * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and @@ -56,9 +74,9 @@ #undef JEMALLOC_HAVE_BUILTIN_CLZ /* - * Defined if madvise(2) is available. + * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. */ -#undef JEMALLOC_HAVE_MADVISE +#undef JEMALLOC_OS_UNFAIR_LOCK /* * Defined if OSSpin*() functions are available, as provided by Darwin, and @@ -66,6 +84,9 @@ */ #undef JEMALLOC_OSSPIN +/* Defined if syscall(2) is usable. */ +#undef JEMALLOC_USE_SYSCALL + /* * Defined if secure_getenv(3) is available. */ @@ -76,6 +97,27 @@ */ #undef JEMALLOC_HAVE_ISSETUGID +/* Defined if pthread_atfork(3) is available. */ +#undef JEMALLOC_HAVE_PTHREAD_ATFORK + +/* Defined if pthread_setname_np(3) is available. */ +#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. + */ +#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. + */ +#undef JEMALLOC_HAVE_CLOCK_MONOTONIC + +/* + * Defined if mach_absolute_time() is available. + */ +#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME + /* * Defined if _malloc_thread_cleanup() exists. At least in the case of * FreeBSD, pthread_key_create() allocates, which if used during malloc @@ -102,12 +144,6 @@ /* Non-empty if the tls_model attribute is supported. */ #undef JEMALLOC_TLS_MODEL -/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ -#undef JEMALLOC_CC_SILENCE - -/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ -#undef JEMALLOC_CODE_COVERAGE - /* * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables * inline functions. @@ -130,36 +166,23 @@ #undef JEMALLOC_PROF_GCC /* - * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. - * This makes it possible to allocate/deallocate objects without any locking - * when the cache is in the steady state. - */ -#undef JEMALLOC_TCACHE - -/* - * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage + * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage * segment (DSS). */ #undef JEMALLOC_DSS -/* Support memory filling (junk/zero/quarantine/redzone). */ +/* Support memory filling (junk/zero). */ #undef JEMALLOC_FILL /* Support utrace(2)-based tracing. */ #undef JEMALLOC_UTRACE -/* Support Valgrind. */ -#undef JEMALLOC_VALGRIND - /* Support optional abort() on OOM. */ #undef JEMALLOC_XMALLOC /* Support lazy locking (avoid locking unless a second thread is launched). */ #undef JEMALLOC_LAZY_LOCK -/* Minimum size class to support is 2^LG_TINY_MIN bytes. */ -#undef LG_TINY_MIN - /* * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size * classes). @@ -169,6 +192,13 @@ /* One page is 2^LG_PAGE bytes. */ #undef LG_PAGE +/* + * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the + * system does not explicitly support huge pages; system calls that require + * explicit huge page support are separately configured. + */ +#undef LG_HUGEPAGE + /* * If defined, adjacent virtual memory mappings with identical attributes * automatically coalesce, and they fragment when changes are made to subranges. @@ -179,27 +209,29 @@ #undef JEMALLOC_MAPS_COALESCE /* - * If defined, use munmap() to unmap freed chunks, rather than storing them for - * later reuse. This is disabled by default on Linux because common sequences - * of mmap()/munmap() calls will cause virtual memory map holes. + * If defined, retain memory for later reuse by default rather than using e.g. + * munmap() to unmap freed extents. This is enabled on 64-bit Linux because + * common sequences of mmap()/munmap() calls will cause virtual memory map + * holes. */ -#undef JEMALLOC_MUNMAP +#undef JEMALLOC_RETAIN /* TLS is used to map arenas and magazine caches to threads. */ #undef JEMALLOC_TLS /* - * ffs()/ffsl() functions to use for bitmapping. Don't use these directly; - * instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h. + * Used to mark unreachable code to quiet "end of non-void" compiler warnings. + * Don't use this directly; instead use unreachable() from util.h */ -#undef JEMALLOC_INTERNAL_FFSL -#undef JEMALLOC_INTERNAL_FFS +#undef JEMALLOC_INTERNAL_UNREACHABLE /* - * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside - * within jemalloc-owned chunks before dereferencing them. + * ffs*() functions to use for bitmapping. Don't use these directly; instead, + * use ffs_*() from util.h. */ -#undef JEMALLOC_IVSALLOC +#undef JEMALLOC_INTERNAL_FFSLL +#undef JEMALLOC_INTERNAL_FFSL +#undef JEMALLOC_INTERNAL_FFS /* * If defined, explicitly attempt to more uniformly distribute large allocation @@ -207,24 +239,65 @@ */ #undef JEMALLOC_CACHE_OBLIVIOUS +/* + * If defined, enable logging facilities. We make this a configure option to + * avoid taking extra branches everywhere. + */ +#undef JEMALLOC_LOG + /* * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. */ #undef JEMALLOC_ZONE -#undef JEMALLOC_ZONE_VERSION + +/* + * Methods for determining whether the OS overcommits. + * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's + * /proc/sys/vm.overcommit_memory file. + * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. + */ +#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT +#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY + +/* Defined if madvise(2) is available. */ +#undef JEMALLOC_HAVE_MADVISE + +/* + * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE + * arguments to madvise(2). + */ +#undef JEMALLOC_HAVE_MADVISE_HUGE /* * Methods for purging unused pages differ between operating systems. * - * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, + * madvise(..., MADV_FREE) : This marks pages as being unused, such that they + * will be discarded rather than swapped out. + * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is + * defined, this immediately discards pages, * such that new pages will be demand-zeroed if - * the address region is later touched. - * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being - * unused, such that they will be discarded rather - * than swapped out. + * the address region is later touched; + * otherwise this behaves similarly to + * MADV_FREE, though typically with higher + * system overhead. */ -#undef JEMALLOC_PURGE_MADVISE_DONTNEED #undef JEMALLOC_PURGE_MADVISE_FREE +#undef JEMALLOC_PURGE_MADVISE_DONTNEED +#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS + +/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */ +#undef JEMALLOC_DEFINE_MADVISE_FREE + +/* + * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise. + */ +#undef JEMALLOC_MADVISE_DONTDUMP + +/* + * Defined if transparent huge pages (THPs) are supported via the + * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. + */ +#undef JEMALLOC_THP /* Define if operating system has alloca.h header. */ #undef JEMALLOC_HAS_ALLOCA_H @@ -241,6 +314,9 @@ /* sizeof(long) == 2^LG_SIZEOF_LONG. */ #undef LG_SIZEOF_LONG +/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ +#undef LG_SIZEOF_LONG_LONG + /* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ #undef LG_SIZEOF_INTMAX_T @@ -250,13 +326,41 @@ /* glibc memalign hook. */ #undef JEMALLOC_GLIBC_MEMALIGN_HOOK +/* pthread support */ +#undef JEMALLOC_HAVE_PTHREAD + +/* dlsym() support */ +#undef JEMALLOC_HAVE_DLSYM + /* Adaptive mutex support in pthreads. */ #undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP +/* GNU specific sched_getcpu support */ +#undef JEMALLOC_HAVE_SCHED_GETCPU + +/* GNU specific sched_setaffinity support */ +#undef JEMALLOC_HAVE_SCHED_SETAFFINITY + +/* + * If defined, all the features necessary for background threads are present. + */ +#undef JEMALLOC_BACKGROUND_THREAD + /* * If defined, jemalloc symbols are not exported (doesn't work when * JEMALLOC_PREFIX is not defined). */ #undef JEMALLOC_EXPORT +/* config.malloc_conf options string. */ +#undef JEMALLOC_CONFIG_MALLOC_CONF + +/* If defined, jemalloc takes the malloc/free/etc. symbol names. */ +#undef JEMALLOC_IS_MALLOC + +/* + * Defined if strerror_r returns char * if _GNU_SOURCE is defined. + */ +#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE + #endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h new file mode 100644 index 0000000..e10fb27 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h @@ -0,0 +1,53 @@ +#ifndef JEMALLOC_INTERNAL_EXTERNS_H +#define JEMALLOC_INTERNAL_EXTERNS_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/tsd_types.h" + +/* TSD checks this to set thread local slow state accordingly. */ +extern bool malloc_slow; + +/* Run-time options. */ +extern bool opt_abort; +extern bool opt_abort_conf; +extern const char *opt_junk; +extern bool opt_junk_alloc; +extern bool opt_junk_free; +extern bool opt_utrace; +extern bool opt_xmalloc; +extern bool opt_zero; +extern unsigned opt_narenas; + +/* Number of CPUs. */ +extern unsigned ncpus; + +/* Number of arenas used for automatic multiplexing of threads and arenas. */ +extern unsigned narenas_auto; + +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + */ +extern atomic_p_t arenas[]; + +void *a0malloc(size_t size); +void a0dalloc(void *ptr); +void *bootstrap_malloc(size_t size); +void *bootstrap_calloc(size_t num, size_t size); +void bootstrap_free(void *ptr); +void arena_set(unsigned ind, arena_t *arena); +unsigned narenas_total_get(void); +arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); +arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); +arena_t *arena_choose_hard(tsd_t *tsd, bool internal); +void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); +void iarena_cleanup(tsd_t *tsd); +void arena_cleanup(tsd_t *tsd); +void arenas_tdata_cleanup(tsd_t *tsd); +void jemalloc_prefork(void); +void jemalloc_postfork_parent(void); +void jemalloc_postfork_child(void); +bool malloc_initialized(void); + +#endif /* JEMALLOC_INTERNAL_EXTERNS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h new file mode 100644 index 0000000..437eaa4 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h @@ -0,0 +1,94 @@ +#ifndef JEMALLOC_INTERNAL_INCLUDES_H +#define JEMALLOC_INTERNAL_INCLUDES_H + +/* + * jemalloc can conceptually be broken into components (arena, tcache, etc.), + * but there are circular dependencies that cannot be broken without + * substantial performance degradation. + * + * Historically, we dealt with this by each header into four sections (types, + * structs, externs, and inlines), and included each header file multiple times + * in this file, picking out the portion we want on each pass using the + * following #defines: + * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data + * types. + * JEMALLOC_H_STRUCTS : Data structures. + * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. + * JEMALLOC_H_INLINES : Inline functions. + * + * We're moving toward a world in which the dependencies are explicit; each file + * will #include the headers it depends on (rather than relying on them being + * implicitly available via this file including every header file in the + * project). + * + * We're now in an intermediate state: we've broken up the header files to avoid + * having to include each one multiple times, but have not yet moved the + * dependency information into the header files (i.e. we still rely on the + * ordering in this file to ensure all a header's dependencies are available in + * its translation unit). Each component is now broken up into multiple header + * files, corresponding to the sections above (e.g. instead of "foo.h", we now + * have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h"). + * + * Those files which have been converted to explicitly include their + * inter-component dependencies are now in the initial HERMETIC HEADERS + * section. All headers may still rely on jemalloc_preamble.h (which, by fiat, + * must be included first in every translation unit) for system headers and + * global jemalloc definitions, however. + */ + +/******************************************************************************/ +/* TYPES */ +/******************************************************************************/ + +#include "jemalloc/internal/extent_types.h" +#include "jemalloc/internal/base_types.h" +#include "jemalloc/internal/arena_types.h" +#include "jemalloc/internal/tcache_types.h" +#include "jemalloc/internal/prof_types.h" + +/******************************************************************************/ +/* STRUCTS */ +/******************************************************************************/ + +#include "jemalloc/internal/arena_structs_a.h" +#include "jemalloc/internal/extent_structs.h" +#include "jemalloc/internal/base_structs.h" +#include "jemalloc/internal/prof_structs.h" +#include "jemalloc/internal/arena_structs_b.h" +#include "jemalloc/internal/tcache_structs.h" +#include "jemalloc/internal/background_thread_structs.h" + +/******************************************************************************/ +/* EXTERNS */ +/******************************************************************************/ + +#include "jemalloc/internal/jemalloc_internal_externs.h" +#include "jemalloc/internal/extent_externs.h" +#include "jemalloc/internal/base_externs.h" +#include "jemalloc/internal/arena_externs.h" +#include "jemalloc/internal/large_externs.h" +#include "jemalloc/internal/tcache_externs.h" +#include "jemalloc/internal/prof_externs.h" +#include "jemalloc/internal/background_thread_externs.h" + +/******************************************************************************/ +/* INLINES */ +/******************************************************************************/ + +#include "jemalloc/internal/jemalloc_internal_inlines_a.h" +#include "jemalloc/internal/base_inlines.h" +/* + * Include portions of arena code interleaved with tcache code in order to + * resolve circular dependencies. + */ +#include "jemalloc/internal/prof_inlines_a.h" +#include "jemalloc/internal/arena_inlines_a.h" +#include "jemalloc/internal/extent_inlines.h" +#include "jemalloc/internal/jemalloc_internal_inlines_b.h" +#include "jemalloc/internal/tcache_inlines.h" +#include "jemalloc/internal/arena_inlines_b.h" +#include "jemalloc/internal/jemalloc_internal_inlines_c.h" +#include "jemalloc/internal/prof_inlines_b.h" +#include "jemalloc/internal/background_thread_inlines.h" + +#endif /* JEMALLOC_INTERNAL_INCLUDES_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h new file mode 100644 index 0000000..c6a1f7e --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h @@ -0,0 +1,172 @@ +#ifndef JEMALLOC_INTERNAL_INLINES_A_H +#define JEMALLOC_INTERNAL_INLINES_A_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/ticker.h" + +JEMALLOC_ALWAYS_INLINE malloc_cpuid_t +malloc_getcpu(void) { + assert(have_percpu_arena); +#if defined(JEMALLOC_HAVE_SCHED_GETCPU) + return (malloc_cpuid_t)sched_getcpu(); +#else + not_reached(); + return -1; +#endif +} + +/* Return the chosen arena index based on current cpu. */ +JEMALLOC_ALWAYS_INLINE unsigned +percpu_arena_choose(void) { + assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)); + + malloc_cpuid_t cpuid = malloc_getcpu(); + assert(cpuid >= 0); + + unsigned arena_ind; + if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus / + 2)) { + arena_ind = cpuid; + } else { + assert(opt_percpu_arena == per_phycpu_arena); + /* Hyper threads on the same physical CPU share arena. */ + arena_ind = cpuid - ncpus / 2; + } + + return arena_ind; +} + +/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */ +JEMALLOC_ALWAYS_INLINE unsigned +percpu_arena_ind_limit(percpu_arena_mode_t mode) { + assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode)); + if (mode == per_phycpu_arena && ncpus > 1) { + if (ncpus % 2) { + /* This likely means a misconfig. */ + return ncpus / 2 + 1; + } + return ncpus / 2; + } else { + return ncpus; + } +} + +static inline arena_tdata_t * +arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) { + arena_tdata_t *tdata; + arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); + + if (unlikely(arenas_tdata == NULL)) { + /* arenas_tdata hasn't been initialized yet. */ + return arena_tdata_get_hard(tsd, ind); + } + if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) { + /* + * ind is invalid, cache is old (too small), or tdata to be + * initialized. + */ + return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) : + NULL); + } + + tdata = &arenas_tdata[ind]; + if (likely(tdata != NULL) || !refresh_if_missing) { + return tdata; + } + return arena_tdata_get_hard(tsd, ind); +} + +static inline arena_t * +arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { + arena_t *ret; + + assert(ind < MALLOCX_ARENA_LIMIT); + + ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE); + if (unlikely(ret == NULL)) { + if (init_if_missing) { + ret = arena_init(tsdn, ind, + (extent_hooks_t *)&extent_hooks_default); + } + } + return ret; +} + +static inline ticker_t * +decay_ticker_get(tsd_t *tsd, unsigned ind) { + arena_tdata_t *tdata; + + tdata = arena_tdata_get(tsd, ind, true); + if (unlikely(tdata == NULL)) { + return NULL; + } + return &tdata->decay_ticker; +} + +JEMALLOC_ALWAYS_INLINE cache_bin_t * +tcache_small_bin_get(tcache_t *tcache, szind_t binind) { + assert(binind < NBINS); + return &tcache->bins_small[binind]; +} + +JEMALLOC_ALWAYS_INLINE cache_bin_t * +tcache_large_bin_get(tcache_t *tcache, szind_t binind) { + assert(binind >= NBINS &&binind < nhbins); + return &tcache->bins_large[binind - NBINS]; +} + +JEMALLOC_ALWAYS_INLINE bool +tcache_available(tsd_t *tsd) { + /* + * Thread specific auto tcache might be unavailable if: 1) during tcache + * initialization, or 2) disabled through thread.tcache.enabled mallctl + * or config options. This check covers all cases. + */ + if (likely(tsd_tcache_enabled_get(tsd))) { + /* Associated arena == NULL implies tcache init in progress. */ + assert(tsd_tcachep_get(tsd)->arena == NULL || + tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail != + NULL); + return true; + } + + return false; +} + +JEMALLOC_ALWAYS_INLINE tcache_t * +tcache_get(tsd_t *tsd) { + if (!tcache_available(tsd)) { + return NULL; + } + + return tsd_tcachep_get(tsd); +} + +static inline void +pre_reentrancy(tsd_t *tsd, arena_t *arena) { + /* arena is the current context. Reentry from a0 is not allowed. */ + assert(arena != arena_get(tsd_tsdn(tsd), 0, false)); + + bool fast = tsd_fast(tsd); + assert(tsd_reentrancy_level_get(tsd) < INT8_MAX); + ++*tsd_reentrancy_levelp_get(tsd); + if (fast) { + /* Prepare slow path for reentrancy. */ + tsd_slow_update(tsd); + assert(tsd->state == tsd_state_nominal_slow); + } +} + +static inline void +post_reentrancy(tsd_t *tsd) { + int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd); + assert(*reentrancy_level > 0); + if (--*reentrancy_level == 0) { + tsd_slow_update(tsd); + } +} + +#endif /* JEMALLOC_INTERNAL_INLINES_A_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h new file mode 100644 index 0000000..2e76e5d --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h @@ -0,0 +1,86 @@ +#ifndef JEMALLOC_INTERNAL_INLINES_B_H +#define JEMALLOC_INTERNAL_INLINES_B_H + +#include "jemalloc/internal/rtree.h" + +/* Choose an arena based on a per-thread value. */ +static inline arena_t * +arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { + arena_t *ret; + + if (arena != NULL) { + return arena; + } + + /* During reentrancy, arena 0 is the safest bet. */ + if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) { + return arena_get(tsd_tsdn(tsd), 0, true); + } + + ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); + if (unlikely(ret == NULL)) { + ret = arena_choose_hard(tsd, internal); + assert(ret); + if (tcache_available(tsd)) { + tcache_t *tcache = tcache_get(tsd); + if (tcache->arena != NULL) { + /* See comments in tcache_data_init().*/ + assert(tcache->arena == + arena_get(tsd_tsdn(tsd), 0, false)); + if (tcache->arena != ret) { + tcache_arena_reassociate(tsd_tsdn(tsd), + tcache, ret); + } + } else { + tcache_arena_associate(tsd_tsdn(tsd), tcache, + ret); + } + } + } + + /* + * Note that for percpu arena, if the current arena is outside of the + * auto percpu arena range, (i.e. thread is assigned to a manually + * managed arena), then percpu arena is skipped. + */ + if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) && + !internal && (arena_ind_get(ret) < + percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd != + tsd_tsdn(tsd))) { + unsigned ind = percpu_arena_choose(); + if (arena_ind_get(ret) != ind) { + percpu_arena_update(tsd, ind); + ret = tsd_arena_get(tsd); + } + ret->last_thd = tsd_tsdn(tsd); + } + + return ret; +} + +static inline arena_t * +arena_choose(tsd_t *tsd, arena_t *arena) { + return arena_choose_impl(tsd, arena, false); +} + +static inline arena_t * +arena_ichoose(tsd_t *tsd, arena_t *arena) { + return arena_choose_impl(tsd, arena, true); +} + +static inline bool +arena_is_auto(arena_t *arena) { + assert(narenas_auto > 0); + return (arena_ind_get(arena) < narenas_auto); +} + +JEMALLOC_ALWAYS_INLINE extent_t * +iealloc(tsdn_t *tsdn, const void *ptr) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true); +} + +#endif /* JEMALLOC_INTERNAL_INLINES_B_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h new file mode 100644 index 0000000..290e5cf --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h @@ -0,0 +1,246 @@ +#ifndef JEMALLOC_INTERNAL_INLINES_C_H +#define JEMALLOC_INTERNAL_INLINES_C_H + +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/witness.h" + +/* + * Translating the names of the 'i' functions: + * Abbreviations used in the first part of the function name (before + * alloc/dalloc) describe what that function accomplishes: + * a: arena (query) + * s: size (query, or sized deallocation) + * e: extent (query) + * p: aligned (allocates) + * vs: size (query, without knowing that the pointer is into the heap) + * r: rallocx implementation + * x: xallocx implementation + * Abbreviations used in the second part of the function name (after + * alloc/dalloc) describe the arguments it takes + * z: whether to return zeroed memory + * t: accepts a tcache_t * parameter + * m: accepts an arena_t * parameter + */ + +JEMALLOC_ALWAYS_INLINE arena_t * +iaalloc(tsdn_t *tsdn, const void *ptr) { + assert(ptr != NULL); + + return arena_aalloc(tsdn, ptr); +} + +JEMALLOC_ALWAYS_INLINE size_t +isalloc(tsdn_t *tsdn, const void *ptr) { + assert(ptr != NULL); + + return arena_salloc(tsdn, ptr); +} + +JEMALLOC_ALWAYS_INLINE void * +iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, + bool is_internal, arena_t *arena, bool slow_path) { + void *ret; + + assert(size != 0); + assert(!is_internal || tcache == NULL); + assert(!is_internal || arena == NULL || arena_is_auto(arena)); + if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + } + + ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); + if (config_stats && is_internal && likely(ret != NULL)) { + arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); + } + return ret; +} + +JEMALLOC_ALWAYS_INLINE void * +ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) { + return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false, + NULL, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void * +ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, bool is_internal, arena_t *arena) { + void *ret; + + assert(usize != 0); + assert(usize == sz_sa2u(usize, alignment)); + assert(!is_internal || tcache == NULL); + assert(!is_internal || arena == NULL || arena_is_auto(arena)); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); + assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); + if (config_stats && is_internal && likely(ret != NULL)) { + arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); + } + return ret; +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena) { + return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena); +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) { + return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, + tcache_get(tsd), false, NULL); +} + +JEMALLOC_ALWAYS_INLINE size_t +ivsalloc(tsdn_t *tsdn, const void *ptr) { + return arena_vsalloc(tsdn, ptr); +} + +JEMALLOC_ALWAYS_INLINE void +idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx, + bool is_internal, bool slow_path) { + assert(ptr != NULL); + assert(!is_internal || tcache == NULL); + assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr))); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + if (config_stats && is_internal) { + arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr)); + } + if (!is_internal && !tsdn_null(tsdn) && + tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) { + assert(tcache == NULL); + } + arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void +idalloc(tsd_t *tsd, void *ptr) { + idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true); +} + +JEMALLOC_ALWAYS_INLINE void +isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + alloc_ctx_t *alloc_ctx, bool slow_path) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero, tcache_t *tcache, + arena_t *arena) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + void *p; + size_t usize, copysize; + + usize = sz_sa2u(size + extra, alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + return NULL; + } + p = ipalloct(tsdn, usize, alignment, zero, tcache, arena); + if (p == NULL) { + if (extra == 0) { + return NULL; + } + /* Try again, without extra this time. */ + usize = sz_sa2u(size, alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + return NULL; + } + p = ipalloct(tsdn, usize, alignment, zero, tcache, arena); + if (p == NULL) { + return NULL; + } + } + /* + * Copy at most size bytes (not size+extra), since the caller has no + * expectation that the extra bytes will be reliably preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + memcpy(p, ptr, copysize); + isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); + return p; +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero, tcache_t *tcache, arena_t *arena) { + assert(ptr != NULL); + assert(size != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* + * Existing object alignment is inadequate; allocate new space + * and copy. + */ + return iralloct_realign(tsdn, ptr, oldsize, size, 0, alignment, + zero, tcache, arena); + } + + return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero, + tcache); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero) { + return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero, + tcache_get(tsd), NULL); +} + +JEMALLOC_ALWAYS_INLINE bool +ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero) { + assert(ptr != NULL); + assert(size != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* Existing object alignment is inadequate. */ + return true; + } + + return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero); +} + +JEMALLOC_ALWAYS_INLINE int +iget_defrag_hint(tsdn_t *tsdn, void* ptr, int *bin_util, int *run_util) { + int defrag = 0; + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + szind_t szind; + bool is_slab; + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, true, &szind, &is_slab); + if (likely(is_slab)) { + /* Small allocation. */ + extent_t *slab = iealloc(tsdn, ptr); + arena_t *arena = extent_arena_get(slab); + szind_t binind = extent_szind_get(slab); + bin_t *bin = &arena->bins[binind]; + malloc_mutex_lock(tsdn, &bin->lock); + /* don't bother moving allocations from the slab currently used for new allocations */ + if (slab != bin->slabcur) { + const bin_info_t *bin_info = &bin_infos[binind]; + size_t availregs = bin_info->nregs * bin->stats.curslabs; + *bin_util = ((long long)bin->stats.curregs<<16) / availregs; + *run_util = ((long long)(bin_info->nregs - extent_nfree_get(slab))<<16) / bin_info->nregs; + defrag = 1; + } + malloc_mutex_unlock(tsdn, &bin->lock); + } + return defrag; +} + +#endif /* JEMALLOC_INTERNAL_INLINES_C_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h new file mode 100644 index 0000000..ed75d37 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h @@ -0,0 +1,43 @@ +#ifndef JEMALLOC_INTERNAL_MACROS_H +#define JEMALLOC_INTERNAL_MACROS_H + +#ifdef JEMALLOC_DEBUG +# define JEMALLOC_ALWAYS_INLINE static inline +#else +# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline +#endif +#ifdef _MSC_VER +# define inline _inline +#endif + +#define UNUSED JEMALLOC_ATTR(unused) + +#define ZU(z) ((size_t)z) +#define ZD(z) ((ssize_t)z) +#define QU(q) ((uint64_t)q) +#define QD(q) ((int64_t)q) + +#define KZU(z) ZU(z##ULL) +#define KZD(z) ZD(z##LL) +#define KQU(q) QU(q##ULL) +#define KQD(q) QI(q##LL) + +#ifndef __DECONST +# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) +#endif + +#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus) +# define restrict +#endif + +/* Various function pointers are statick and immutable except during testing. */ +#ifdef JEMALLOC_JET +# define JET_MUTABLE +#else +# define JET_MUTABLE const +#endif + +#define JEMALLOC_VA_ARGS_HEAD(head, ...) head +#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__ + +#endif /* JEMALLOC_INTERNAL_MACROS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h new file mode 100644 index 0000000..1b750b1 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h @@ -0,0 +1,185 @@ +#ifndef JEMALLOC_INTERNAL_TYPES_H +#define JEMALLOC_INTERNAL_TYPES_H + +/* Page size index type. */ +typedef unsigned pszind_t; + +/* Size class index type. */ +typedef unsigned szind_t; + +/* Processor / core id type. */ +typedef int malloc_cpuid_t; + +/* + * Flags bits: + * + * a: arena + * t: tcache + * 0: unused + * z: zero + * n: alignment + * + * aaaaaaaa aaaatttt tttttttt 0znnnnnn + */ +#define MALLOCX_ARENA_BITS 12 +#define MALLOCX_TCACHE_BITS 12 +#define MALLOCX_LG_ALIGN_BITS 6 +#define MALLOCX_ARENA_SHIFT 20 +#define MALLOCX_TCACHE_SHIFT 8 +#define MALLOCX_ARENA_MASK \ + (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT) +/* NB: Arena index bias decreases the maximum number of arenas by 1. */ +#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1) +#define MALLOCX_TCACHE_MASK \ + (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT) +#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3) +#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1) +/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ +#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ + (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) +#define MALLOCX_ALIGN_GET(flags) \ + (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) +#define MALLOCX_ZERO_GET(flags) \ + ((bool)(flags & MALLOCX_ZERO)) + +#define MALLOCX_TCACHE_GET(flags) \ + (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2) +#define MALLOCX_ARENA_GET(flags) \ + (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1) + +/* Smallest size class to support. */ +#define TINY_MIN (1U << LG_TINY_MIN) + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +#ifndef LG_QUANTUM +# if (defined(__i386__) || defined(_M_IX86)) +# define LG_QUANTUM 4 +# endif +# ifdef __ia64__ +# define LG_QUANTUM 4 +# endif +# ifdef __alpha__ +# define LG_QUANTUM 4 +# endif +# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) +# define LG_QUANTUM 4 +# endif +# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) +# define LG_QUANTUM 4 +# endif +# ifdef __arm__ +# define LG_QUANTUM 3 +# endif +# ifdef __aarch64__ +# define LG_QUANTUM 4 +# endif +# ifdef __hppa__ +# define LG_QUANTUM 4 +# endif +# ifdef __m68k__ +# define LG_QUANTUM 3 +# endif +# ifdef __mips__ +# define LG_QUANTUM 3 +# endif +# ifdef __nios2__ +# define LG_QUANTUM 3 +# endif +# ifdef __or1k__ +# define LG_QUANTUM 3 +# endif +# ifdef __powerpc__ +# define LG_QUANTUM 4 +# endif +# if defined(__riscv) || defined(__riscv__) +# define LG_QUANTUM 4 +# endif +# ifdef __s390__ +# define LG_QUANTUM 4 +# endif +# if (defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || \ + defined(__SH4_SINGLE_ONLY__)) +# define LG_QUANTUM 4 +# endif +# ifdef __tile__ +# define LG_QUANTUM 4 +# endif +# ifdef __le32__ +# define LG_QUANTUM 4 +# endif +# ifndef LG_QUANTUM +# error "Unknown minimum alignment for architecture; specify via " + "--with-lg-quantum" +# endif +#endif + +#define QUANTUM ((size_t)(1U << LG_QUANTUM)) +#define QUANTUM_MASK (QUANTUM - 1) + +/* Return the smallest quantum multiple that is >= a. */ +#define QUANTUM_CEILING(a) \ + (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) + +#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) +#define LONG_MASK (LONG - 1) + +/* Return the smallest long multiple that is >= a. */ +#define LONG_CEILING(a) \ + (((a) + LONG_MASK) & ~LONG_MASK) + +#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) +#define PTR_MASK (SIZEOF_PTR - 1) + +/* Return the smallest (void *) multiple that is >= a. */ +#define PTR_CEILING(a) \ + (((a) + PTR_MASK) & ~PTR_MASK) + +/* + * Maximum size of L1 cache line. This is used to avoid cache line aliasing. + * In addition, this controls the spacing of cacheline-spaced size classes. + * + * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can + * only handle raw constants. + */ +#define LG_CACHELINE 6 +#define CACHELINE 64 +#define CACHELINE_MASK (CACHELINE - 1) + +/* Return the smallest cacheline multiple that is >= s. */ +#define CACHELINE_CEILING(s) \ + (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) + +/* Return the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2BASE(a, alignment) \ + ((void *)((uintptr_t)(a) & ((~(alignment)) + 1))) + +/* Return the offset between a and the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ + ((size_t)((uintptr_t)(a) & (alignment - 1))) + +/* Return the smallest alignment multiple that is >= s. */ +#define ALIGNMENT_CEILING(s, alignment) \ + (((s) + (alignment - 1)) & ((~(alignment)) + 1)) + +/* Declare a variable-length array. */ +#if __STDC_VERSION__ < 199901L +# ifdef _MSC_VER +# include +# define alloca _alloca +# else +# ifdef JEMALLOC_HAS_ALLOCA_H +# include +# else +# include +# endif +# endif +# define VARIABLE_ARRAY(type, name, count) \ + type *name = alloca(sizeof(type) * (count)) +#else +# define VARIABLE_ARRAY(type, name, count) type name[(count)] +#endif + +#endif /* JEMALLOC_INTERNAL_TYPES_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in new file mode 100644 index 0000000..e621fbc --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in @@ -0,0 +1,194 @@ +#ifndef JEMALLOC_PREAMBLE_H +#define JEMALLOC_PREAMBLE_H + +#include "jemalloc_internal_defs.h" +#include "jemalloc/internal/jemalloc_internal_decls.h" + +#ifdef JEMALLOC_UTRACE +#include +#endif + +#define JEMALLOC_NO_DEMANGLE +#ifdef JEMALLOC_JET +# undef JEMALLOC_IS_MALLOC +# define JEMALLOC_N(n) jet_##n +# include "jemalloc/internal/public_namespace.h" +# define JEMALLOC_NO_RENAME +# include "../jemalloc@install_suffix@.h" +# undef JEMALLOC_NO_RENAME +#else +# define JEMALLOC_N(n) @private_namespace@##n +# include "../jemalloc@install_suffix@.h" +#endif + +#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) +#include +#endif + +#ifdef JEMALLOC_ZONE +#include +#include +#include +#endif + +#include "jemalloc/internal/jemalloc_internal_macros.h" + +/* + * Note that the ordering matters here; the hook itself is name-mangled. We + * want the inclusion of hooks to happen early, so that we hook as much as + * possible. + */ +#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE +# ifndef JEMALLOC_JET +# include "jemalloc/internal/private_namespace.h" +# else +# include "jemalloc/internal/private_namespace_jet.h" +# endif +#endif +#include "jemalloc/internal/hooks.h" + +#ifdef JEMALLOC_DEFINE_MADVISE_FREE +# define JEMALLOC_MADV_FREE 8 +#endif + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +static const bool have_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; +static const bool have_madvise_huge = +#ifdef JEMALLOC_HAVE_MADVISE_HUGE + true +#else + false +#endif + ; +static const bool config_fill = +#ifdef JEMALLOC_FILL + true +#else + false +#endif + ; +static const bool config_lazy_lock = +#ifdef JEMALLOC_LAZY_LOCK + true +#else + false +#endif + ; +static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; +static const bool config_prof = +#ifdef JEMALLOC_PROF + true +#else + false +#endif + ; +static const bool config_prof_libgcc = +#ifdef JEMALLOC_PROF_LIBGCC + true +#else + false +#endif + ; +static const bool config_prof_libunwind = +#ifdef JEMALLOC_PROF_LIBUNWIND + true +#else + false +#endif + ; +static const bool maps_coalesce = +#ifdef JEMALLOC_MAPS_COALESCE + true +#else + false +#endif + ; +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; +static const bool config_tls = +#ifdef JEMALLOC_TLS + true +#else + false +#endif + ; +static const bool config_utrace = +#ifdef JEMALLOC_UTRACE + true +#else + false +#endif + ; +static const bool config_xmalloc = +#ifdef JEMALLOC_XMALLOC + true +#else + false +#endif + ; +static const bool config_cache_oblivious = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + true +#else + false +#endif + ; +/* + * Undocumented, for jemalloc development use only at the moment. See the note + * in jemalloc/internal/log.h. + */ +static const bool config_log = +#ifdef JEMALLOC_LOG + true +#else + false +#endif + ; +#ifdef JEMALLOC_HAVE_SCHED_GETCPU +/* Currently percpu_arena depends on sched_getcpu. */ +#define JEMALLOC_PERCPU_ARENA +#endif +static const bool have_percpu_arena = +#ifdef JEMALLOC_PERCPU_ARENA + true +#else + false +#endif + ; +/* + * Undocumented, and not recommended; the application should take full + * responsibility for tracking provenance. + */ +static const bool force_ivsalloc = +#ifdef JEMALLOC_FORCE_IVSALLOC + true +#else + false +#endif + ; +static const bool have_background_thread = +#ifdef JEMALLOC_BACKGROUND_THREAD + true +#else + false +#endif + ; + +#endif /* JEMALLOC_PREAMBLE_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/large_externs.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/large_externs.h new file mode 100644 index 0000000..3f36282 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/large_externs.h @@ -0,0 +1,26 @@ +#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H +#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H + +void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); +void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero); +bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, + size_t usize_max, bool zero); +void *large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, + size_t alignment, bool zero, tcache_t *tcache); + +typedef void (large_dalloc_junk_t)(void *, size_t); +extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk; + +typedef void (large_dalloc_maybe_junk_t)(void *, size_t); +extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk; + +void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent); +void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent); +void large_dalloc(tsdn_t *tsdn, extent_t *extent); +size_t large_salloc(tsdn_t *tsdn, const extent_t *extent); +prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent); +void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx); +void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent); + +#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/log.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/log.h new file mode 100644 index 0000000..6420858 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/log.h @@ -0,0 +1,115 @@ +#ifndef JEMALLOC_INTERNAL_LOG_H +#define JEMALLOC_INTERNAL_LOG_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" + +#ifdef JEMALLOC_LOG +# define JEMALLOC_LOG_VAR_BUFSIZE 1000 +#else +# define JEMALLOC_LOG_VAR_BUFSIZE 1 +#endif + +#define JEMALLOC_LOG_BUFSIZE 4096 + +/* + * The log malloc_conf option is a '|'-delimited list of log_var name segments + * which should be logged. The names are themselves hierarchical, with '.' as + * the delimiter (a "segment" is just a prefix in the log namespace). So, if + * you have: + * + * log("arena", "log msg for arena"); // 1 + * log("arena.a", "log msg for arena.a"); // 2 + * log("arena.b", "log msg for arena.b"); // 3 + * log("arena.a.a", "log msg for arena.a.a"); // 4 + * log("extent.a", "log msg for extent.a"); // 5 + * log("extent.b", "log msg for extent.b"); // 6 + * + * And your malloc_conf option is "log=arena.a|extent", then lines 2, 4, 5, and + * 6 will print at runtime. You can enable logging from all log vars by + * writing "log=.". + * + * None of this should be regarded as a stable API for right now. It's intended + * as a debugging interface, to let us keep around some of our printf-debugging + * statements. + */ + +extern char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; +extern atomic_b_t log_init_done; + +typedef struct log_var_s log_var_t; +struct log_var_s { + /* + * Lowest bit is "inited", second lowest is "enabled". Putting them in + * a single word lets us avoid any fences on weak architectures. + */ + atomic_u_t state; + const char *name; +}; + +#define LOG_NOT_INITIALIZED 0U +#define LOG_INITIALIZED_NOT_ENABLED 1U +#define LOG_ENABLED 2U + +#define LOG_VAR_INIT(name_str) {ATOMIC_INIT(LOG_NOT_INITIALIZED), name_str} + +/* + * Returns the value we should assume for state (which is not necessarily + * accurate; if logging is done before logging has finished initializing, then + * we default to doing the safe thing by logging everything). + */ +unsigned log_var_update_state(log_var_t *log_var); + +/* We factor out the metadata management to allow us to test more easily. */ +#define log_do_begin(log_var) \ +if (config_log) { \ + unsigned log_state = atomic_load_u(&(log_var).state, \ + ATOMIC_RELAXED); \ + if (unlikely(log_state == LOG_NOT_INITIALIZED)) { \ + log_state = log_var_update_state(&(log_var)); \ + assert(log_state != LOG_NOT_INITIALIZED); \ + } \ + if (log_state == LOG_ENABLED) { \ + { + /* User code executes here. */ +#define log_do_end(log_var) \ + } \ + } \ +} + +/* + * MSVC has some preprocessor bugs in its expansion of __VA_ARGS__ during + * preprocessing. To work around this, we take all potential extra arguments in + * a var-args functions. Since a varargs macro needs at least one argument in + * the "...", we accept the format string there, and require that the first + * argument in this "..." is a const char *. + */ +static inline void +log_impl_varargs(const char *name, ...) { + char buf[JEMALLOC_LOG_BUFSIZE]; + va_list ap; + + va_start(ap, name); + const char *format = va_arg(ap, const char *); + size_t dst_offset = 0; + dst_offset += malloc_snprintf(buf, JEMALLOC_LOG_BUFSIZE, "%s: ", name); + dst_offset += malloc_vsnprintf(buf + dst_offset, + JEMALLOC_LOG_BUFSIZE - dst_offset, format, ap); + dst_offset += malloc_snprintf(buf + dst_offset, + JEMALLOC_LOG_BUFSIZE - dst_offset, "\n"); + va_end(ap); + + malloc_write(buf); +} + +/* Call as log("log.var.str", "format_string %d", arg_for_format_string); */ +#define LOG(log_var_str, ...) \ +do { \ + static log_var_t log_var = LOG_VAR_INIT(log_var_str); \ + log_do_begin(log_var) \ + log_impl_varargs((log_var).name, __VA_ARGS__); \ + log_do_end(log_var) \ +} while (0) + +#endif /* JEMALLOC_INTERNAL_LOG_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/malloc_io.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/malloc_io.h new file mode 100644 index 0000000..bfe556b --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/malloc_io.h @@ -0,0 +1,102 @@ +#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H +#define JEMALLOC_INTERNAL_MALLOC_IO_H + +#ifdef _WIN32 +# ifdef _WIN64 +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "ll" +# else +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "" +# endif +# define FMTd32 "d" +# define FMTu32 "u" +# define FMTx32 "x" +# define FMTd64 FMT64_PREFIX "d" +# define FMTu64 FMT64_PREFIX "u" +# define FMTx64 FMT64_PREFIX "x" +# define FMTdPTR FMTPTR_PREFIX "d" +# define FMTuPTR FMTPTR_PREFIX "u" +# define FMTxPTR FMTPTR_PREFIX "x" +#else +# include +# define FMTd32 PRId32 +# define FMTu32 PRIu32 +# define FMTx32 PRIx32 +# define FMTd64 PRId64 +# define FMTu64 PRIu64 +# define FMTx64 PRIx64 +# define FMTdPTR PRIdPTR +# define FMTuPTR PRIuPTR +# define FMTxPTR PRIxPTR +#endif + +/* Size of stack-allocated buffer passed to buferror(). */ +#define BUFERROR_BUF 64 + +/* + * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be + * large enough for all possible uses within jemalloc. + */ +#define MALLOC_PRINTF_BUFSIZE 4096 + +int buferror(int err, char *buf, size_t buflen); +uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, + int base); +void malloc_write(const char *s); + +/* + * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating + * point math. + */ +size_t malloc_vsnprintf(char *str, size_t size, const char *format, + va_list ap); +size_t malloc_snprintf(char *str, size_t size, const char *format, ...) + JEMALLOC_FORMAT_PRINTF(3, 4); +/* + * The caller can set write_cb and cbopaque to null to choose to print with the + * je_malloc_message hook. + */ +void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, va_list ap); +void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); +void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); + +static inline ssize_t +malloc_write_fd(int fd, const void *buf, size_t count) { +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write) + /* + * Use syscall(2) rather than write(2) when possible in order to avoid + * the possibility of memory allocation within libc. This is necessary + * on FreeBSD; most operating systems do not have this problem though. + * + * syscall() returns long or int, depending on platform, so capture the + * result in the widest plausible type to avoid compiler warnings. + */ + long result = syscall(SYS_write, fd, buf, count); +#else + ssize_t result = (ssize_t)write(fd, buf, +#ifdef _WIN32 + (unsigned int) +#endif + count); +#endif + return (ssize_t)result; +} + +static inline ssize_t +malloc_read_fd(int fd, void *buf, size_t count) { +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) + long result = syscall(SYS_read, fd, buf, count); +#else + ssize_t result = read(fd, buf, +#ifdef _WIN32 + (unsigned int) +#endif + count); +#endif + return (ssize_t)result; +} + +#endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/mb.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mb.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/mb.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mb.h diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mutex.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mutex.h new file mode 100644 index 0000000..6520c25 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mutex.h @@ -0,0 +1,248 @@ +#ifndef JEMALLOC_INTERNAL_MUTEX_H +#define JEMALLOC_INTERNAL_MUTEX_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/witness.h" + +typedef enum { + /* Can only acquire one mutex of a given witness rank at a time. */ + malloc_mutex_rank_exclusive, + /* + * Can acquire multiple mutexes of the same witness rank, but in + * address-ascending order only. + */ + malloc_mutex_address_ordered +} malloc_mutex_lock_order_t; + +typedef struct malloc_mutex_s malloc_mutex_t; +struct malloc_mutex_s { + union { + struct { + /* + * prof_data is defined first to reduce cacheline + * bouncing: the data is not touched by the mutex holder + * during unlocking, while might be modified by + * contenders. Having it before the mutex itself could + * avoid prefetching a modified cacheline (for the + * unlocking thread). + */ + mutex_prof_data_t prof_data; +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + SRWLOCK lock; +# else + CRITICAL_SECTION lock; +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock lock; +#elif (defined(JEMALLOC_OSSPIN)) + OSSpinLock lock; +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) + pthread_mutex_t lock; + malloc_mutex_t *postponed_next; +#else + pthread_mutex_t lock; +#endif + }; + /* + * We only touch witness when configured w/ debug. However we + * keep the field in a union when !debug so that we don't have + * to pollute the code base with #ifdefs, while avoid paying the + * memory cost. + */ +#if !defined(JEMALLOC_DEBUG) + witness_t witness; + malloc_mutex_lock_order_t lock_order; +#endif + }; + +#if defined(JEMALLOC_DEBUG) + witness_t witness; + malloc_mutex_lock_order_t lock_order; +#endif +}; + +/* + * Based on benchmark results, a fixed spin with this amount of retries works + * well for our critical sections. + */ +#define MALLOC_MUTEX_MAX_SPIN 250 + +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 +# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock)) +# else +# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock)) +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) +# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock)) +#elif (defined(JEMALLOC_OSSPIN)) +# define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock)) +#else +# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0) +#endif + +#define LOCK_PROF_DATA_INITIALIZER \ + {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \ + ATOMIC_INIT(0), 0, NULL, 0} + +#ifdef _WIN32 +# define MALLOC_MUTEX_INITIALIZER +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} +#elif (defined(JEMALLOC_OSSPIN)) +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, 0}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} +#else +# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} +#endif + +#ifdef JEMALLOC_LAZY_LOCK +extern bool isthreaded; +#else +# undef isthreaded /* Undo private_namespace.h definition. */ +# define isthreaded true +#endif + +bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, + witness_rank_t rank, malloc_mutex_lock_order_t lock_order); +void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); +bool malloc_mutex_boot(void); +void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex); + +void malloc_mutex_lock_slow(malloc_mutex_t *mutex); + +static inline void +malloc_mutex_lock_final(malloc_mutex_t *mutex) { + MALLOC_MUTEX_LOCK(mutex); +} + +static inline bool +malloc_mutex_trylock_final(malloc_mutex_t *mutex) { + return MALLOC_MUTEX_TRYLOCK(mutex); +} + +static inline void +mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) { + if (config_stats) { + mutex_prof_data_t *data = &mutex->prof_data; + data->n_lock_ops++; + if (data->prev_owner != tsdn) { + data->prev_owner = tsdn; + data->n_owner_switches++; + } + } +} + +/* Trylock: return false if the lock is successfully acquired. */ +static inline bool +malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); + if (isthreaded) { + if (malloc_mutex_trylock_final(mutex)) { + return true; + } + mutex_owner_stats_update(tsdn, mutex); + } + witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); + + return false; +} + +/* Aggregate lock prof data. */ +static inline void +malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) { + nstime_add(&sum->tot_wait_time, &data->tot_wait_time); + if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) { + nstime_copy(&sum->max_wait_time, &data->max_wait_time); + } + + sum->n_wait_times += data->n_wait_times; + sum->n_spin_acquired += data->n_spin_acquired; + + if (sum->max_n_thds < data->max_n_thds) { + sum->max_n_thds = data->max_n_thds; + } + uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds, + ATOMIC_RELAXED); + uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32( + &data->n_waiting_thds, ATOMIC_RELAXED); + atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds, + ATOMIC_RELAXED); + sum->n_owner_switches += data->n_owner_switches; + sum->n_lock_ops += data->n_lock_ops; +} + +static inline void +malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); + if (isthreaded) { + if (malloc_mutex_trylock_final(mutex)) { + malloc_mutex_lock_slow(mutex); + } + mutex_owner_stats_update(tsdn, mutex); + } + witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); +} + +static inline void +malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); + if (isthreaded) { + MALLOC_MUTEX_UNLOCK(mutex); + } +} + +static inline void +malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); +} + +static inline void +malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); +} + +/* Copy the prof data from mutex for processing. */ +static inline void +malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data, + malloc_mutex_t *mutex) { + mutex_prof_data_t *source = &mutex->prof_data; + /* Can only read holding the mutex. */ + malloc_mutex_assert_owner(tsdn, mutex); + + /* + * Not *really* allowed (we shouldn't be doing non-atomic loads of + * atomic data), but the mutex protection makes this safe, and writing + * a member-for-member copy is tedious for this situation. + */ + *data = *source; + /* n_wait_thds is not reported (modified w/o locking). */ + atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED); +} + +#endif /* JEMALLOC_INTERNAL_MUTEX_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mutex_pool.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mutex_pool.h new file mode 100644 index 0000000..726cece --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mutex_pool.h @@ -0,0 +1,94 @@ +#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H +#define JEMALLOC_INTERNAL_MUTEX_POOL_H + +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/witness.h" + +/* We do mod reductions by this value, so it should be kept a power of 2. */ +#define MUTEX_POOL_SIZE 256 + +typedef struct mutex_pool_s mutex_pool_t; +struct mutex_pool_s { + malloc_mutex_t mutexes[MUTEX_POOL_SIZE]; +}; + +bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank); + +/* Internal helper - not meant to be called outside this module. */ +static inline malloc_mutex_t * +mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) { + size_t hash_result[2]; + hash(&key, sizeof(key), 0xd50dcc1b, hash_result); + return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE]; +} + +static inline void +mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) { + for (int i = 0; i < MUTEX_POOL_SIZE; i++) { + malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]); + } +} + +/* + * Note that a mutex pool doesn't work exactly the way an embdedded mutex would. + * You're not allowed to acquire mutexes in the pool one at a time. You have to + * acquire all the mutexes you'll need in a single function call, and then + * release them all in a single function call. + */ + +static inline void +mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { + mutex_pool_assert_not_held(tsdn, pool); + + malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); + malloc_mutex_lock(tsdn, mutex); +} + +static inline void +mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { + malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); + malloc_mutex_unlock(tsdn, mutex); + + mutex_pool_assert_not_held(tsdn, pool); +} + +static inline void +mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, + uintptr_t key2) { + mutex_pool_assert_not_held(tsdn, pool); + + malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); + malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); + if ((uintptr_t)mutex1 < (uintptr_t)mutex2) { + malloc_mutex_lock(tsdn, mutex1); + malloc_mutex_lock(tsdn, mutex2); + } else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) { + malloc_mutex_lock(tsdn, mutex1); + } else { + malloc_mutex_lock(tsdn, mutex2); + malloc_mutex_lock(tsdn, mutex1); + } +} + +static inline void +mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, + uintptr_t key2) { + malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); + malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); + if (mutex1 == mutex2) { + malloc_mutex_unlock(tsdn, mutex1); + } else { + malloc_mutex_unlock(tsdn, mutex1); + malloc_mutex_unlock(tsdn, mutex2); + } + + mutex_pool_assert_not_held(tsdn, pool); +} + +static inline void +mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { + malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key)); +} + +#endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mutex_prof.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mutex_prof.h new file mode 100644 index 0000000..ce183d3 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/mutex_prof.h @@ -0,0 +1,99 @@ +#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H +#define JEMALLOC_INTERNAL_MUTEX_PROF_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/tsd_types.h" + +#define MUTEX_PROF_GLOBAL_MUTEXES \ + OP(background_thread) \ + OP(ctl) \ + OP(prof) + +typedef enum { +#define OP(mtx) global_prof_mutex_##mtx, + MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + mutex_prof_num_global_mutexes +} mutex_prof_global_ind_t; + +#define MUTEX_PROF_ARENA_MUTEXES \ + OP(large) \ + OP(extent_avail) \ + OP(extents_dirty) \ + OP(extents_muzzy) \ + OP(extents_retained) \ + OP(decay_dirty) \ + OP(decay_muzzy) \ + OP(base) \ + OP(tcache_list) + +typedef enum { +#define OP(mtx) arena_prof_mutex_##mtx, + MUTEX_PROF_ARENA_MUTEXES +#undef OP + mutex_prof_num_arena_mutexes +} mutex_prof_arena_ind_t; + +#define MUTEX_PROF_UINT64_COUNTERS \ + OP(num_ops, uint64_t, "n_lock_ops") \ + OP(num_wait, uint64_t, "n_waiting") \ + OP(num_spin_acq, uint64_t, "n_spin_acq") \ + OP(num_owner_switch, uint64_t, "n_owner_switch") \ + OP(total_wait_time, uint64_t, "total_wait_ns") \ + OP(max_wait_time, uint64_t, "max_wait_ns") + +#define MUTEX_PROF_UINT32_COUNTERS \ + OP(max_num_thds, uint32_t, "max_n_thds") + +#define MUTEX_PROF_COUNTERS \ + MUTEX_PROF_UINT64_COUNTERS \ + MUTEX_PROF_UINT32_COUNTERS + +#define OP(counter, type, human) mutex_counter_##counter, + +#define COUNTER_ENUM(counter_list, t) \ + typedef enum { \ + counter_list \ + mutex_prof_num_##t##_counters \ + } mutex_prof_##t##_counter_ind_t; + +COUNTER_ENUM(MUTEX_PROF_UINT64_COUNTERS, uint64_t) +COUNTER_ENUM(MUTEX_PROF_UINT32_COUNTERS, uint32_t) + +#undef COUNTER_ENUM +#undef OP + +typedef struct { + /* + * Counters touched on the slow path, i.e. when there is lock + * contention. We update them once we have the lock. + */ + /* Total time (in nano seconds) spent waiting on this mutex. */ + nstime_t tot_wait_time; + /* Max time (in nano seconds) spent on a single lock operation. */ + nstime_t max_wait_time; + /* # of times have to wait for this mutex (after spinning). */ + uint64_t n_wait_times; + /* # of times acquired the mutex through local spinning. */ + uint64_t n_spin_acquired; + /* Max # of threads waiting for the mutex at the same time. */ + uint32_t max_n_thds; + /* Current # of threads waiting on the lock. Atomic synced. */ + atomic_u32_t n_waiting_thds; + + /* + * Data touched on the fast path. These are modified right after we + * grab the lock, so it's placed closest to the end (i.e. right before + * the lock) so that we have a higher chance of them being on the same + * cacheline. + */ + /* # of times the mutex holder is different than the previous one. */ + uint64_t n_owner_switches; + /* Previous mutex holder, to facilitate n_owner_switches. */ + tsdn_t *prev_owner; + /* # of lock() operations in total. */ + uint64_t n_lock_ops; +} mutex_prof_data_t; + +#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/nstime.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/nstime.h new file mode 100644 index 0000000..17c177c --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/nstime.h @@ -0,0 +1,34 @@ +#ifndef JEMALLOC_INTERNAL_NSTIME_H +#define JEMALLOC_INTERNAL_NSTIME_H + +/* Maximum supported number of seconds (~584 years). */ +#define NSTIME_SEC_MAX KQU(18446744072) +#define NSTIME_ZERO_INITIALIZER {0} + +typedef struct { + uint64_t ns; +} nstime_t; + +void nstime_init(nstime_t *time, uint64_t ns); +void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); +uint64_t nstime_ns(const nstime_t *time); +uint64_t nstime_sec(const nstime_t *time); +uint64_t nstime_msec(const nstime_t *time); +uint64_t nstime_nsec(const nstime_t *time); +void nstime_copy(nstime_t *time, const nstime_t *source); +int nstime_compare(const nstime_t *a, const nstime_t *b); +void nstime_add(nstime_t *time, const nstime_t *addend); +void nstime_iadd(nstime_t *time, uint64_t addend); +void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); +void nstime_isubtract(nstime_t *time, uint64_t subtrahend); +void nstime_imultiply(nstime_t *time, uint64_t multiplier); +void nstime_idivide(nstime_t *time, uint64_t divisor); +uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); + +typedef bool (nstime_monotonic_t)(void); +extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic; + +typedef bool (nstime_update_t)(nstime_t *); +extern nstime_update_t *JET_MUTABLE nstime_update; + +#endif /* JEMALLOC_INTERNAL_NSTIME_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/pages.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/pages.h new file mode 100644 index 0000000..7dae633 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/pages.h @@ -0,0 +1,88 @@ +#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H +#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H + +/* Page size. LG_PAGE is determined by the configure script. */ +#ifdef PAGE_MASK +# undef PAGE_MASK +#endif +#define PAGE ((size_t)(1U << LG_PAGE)) +#define PAGE_MASK ((size_t)(PAGE - 1)) +/* Return the page base address for the page containing address a. */ +#define PAGE_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~PAGE_MASK)) +/* Return the smallest pagesize multiple that is >= s. */ +#define PAGE_CEILING(s) \ + (((s) + PAGE_MASK) & ~PAGE_MASK) + +/* Huge page size. LG_HUGEPAGE is determined by the configure script. */ +#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE)) +#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1)) +/* Return the huge page base address for the huge page containing address a. */ +#define HUGEPAGE_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK)) +/* Return the smallest pagesize multiple that is >= s. */ +#define HUGEPAGE_CEILING(s) \ + (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK) + +/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */ +#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE) +# define PAGES_CAN_PURGE_LAZY +#endif +/* + * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported. + * + * The only supported way to hard-purge on Windows is to decommit and then + * re-commit, but doing so is racy, and if re-commit fails it's a pain to + * propagate the "poisoned" memory state. Since we typically decommit as the + * next step after purging on Windows anyway, there's no point in adding such + * complexity. + */ +#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ + defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \ + defined(JEMALLOC_MAPS_COALESCE)) +# define PAGES_CAN_PURGE_FORCED +#endif + +static const bool pages_can_purge_lazy = +#ifdef PAGES_CAN_PURGE_LAZY + true +#else + false +#endif + ; +static const bool pages_can_purge_forced = +#ifdef PAGES_CAN_PURGE_FORCED + true +#else + false +#endif + ; + +typedef enum { + thp_mode_default = 0, /* Do not change hugepage settings. */ + thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */ + thp_mode_never = 2, /* Always set MADV_NOHUGEPAGE. */ + + thp_mode_names_limit = 3, /* Used for option processing. */ + thp_mode_not_supported = 3 /* No THP support detected. */ +} thp_mode_t; + +#define THP_MODE_DEFAULT thp_mode_default +extern thp_mode_t opt_thp; +extern thp_mode_t init_system_thp_mode; /* Initial system wide state. */ +extern const char *thp_mode_names[]; + +void *pages_map(void *addr, size_t size, size_t alignment, bool *commit); +void pages_unmap(void *addr, size_t size); +bool pages_commit(void *addr, size_t size); +bool pages_decommit(void *addr, size_t size); +bool pages_purge_lazy(void *addr, size_t size); +bool pages_purge_forced(void *addr, size_t size); +bool pages_huge(void *addr, size_t size); +bool pages_nohuge(void *addr, size_t size); +bool pages_dontdump(void *addr, size_t size); +bool pages_dodump(void *addr, size_t size); +bool pages_boot(void); +void pages_set_thp_state (void *ptr, size_t size); + +#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ph.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ph.h new file mode 100644 index 0000000..84d6778 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ph.h @@ -0,0 +1,391 @@ +/* + * A Pairing Heap implementation. + * + * "The Pairing Heap: A New Form of Self-Adjusting Heap" + * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf + * + * With auxiliary twopass list, described in a follow on paper. + * + * "Pairing Heaps: Experiments and Analysis" + * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf + * + ******************************************************************************* + */ + +#ifndef PH_H_ +#define PH_H_ + +/* Node structure. */ +#define phn(a_type) \ +struct { \ + a_type *phn_prev; \ + a_type *phn_next; \ + a_type *phn_lchild; \ +} + +/* Root structure. */ +#define ph(a_type) \ +struct { \ + a_type *ph_root; \ +} + +/* Internal utility macros. */ +#define phn_lchild_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_lchild) +#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \ + a_phn->a_field.phn_lchild = a_lchild; \ +} while (0) + +#define phn_next_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_next) +#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \ + a_phn->a_field.phn_prev = a_prev; \ +} while (0) + +#define phn_prev_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_prev) +#define phn_next_set(a_type, a_field, a_phn, a_next) do { \ + a_phn->a_field.phn_next = a_next; \ +} while (0) + +#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \ + a_type *phn0child; \ + \ + assert(a_phn0 != NULL); \ + assert(a_phn1 != NULL); \ + assert(a_cmp(a_phn0, a_phn1) <= 0); \ + \ + phn_prev_set(a_type, a_field, a_phn1, a_phn0); \ + phn0child = phn_lchild_get(a_type, a_field, a_phn0); \ + phn_next_set(a_type, a_field, a_phn1, phn0child); \ + if (phn0child != NULL) { \ + phn_prev_set(a_type, a_field, phn0child, a_phn1); \ + } \ + phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \ +} while (0) + +#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \ + if (a_phn0 == NULL) { \ + r_phn = a_phn1; \ + } else if (a_phn1 == NULL) { \ + r_phn = a_phn0; \ + } else if (a_cmp(a_phn0, a_phn1) < 0) { \ + phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \ + a_cmp); \ + r_phn = a_phn0; \ + } else { \ + phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \ + a_cmp); \ + r_phn = a_phn1; \ + } \ +} while (0) + +#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \ + a_type *head = NULL; \ + a_type *tail = NULL; \ + a_type *phn0 = a_phn; \ + a_type *phn1 = phn_next_get(a_type, a_field, phn0); \ + \ + /* \ + * Multipass merge, wherein the first two elements of a FIFO \ + * are repeatedly merged, and each result is appended to the \ + * singly linked FIFO, until the FIFO contains only a single \ + * element. We start with a sibling list but no reference to \ + * its tail, so we do a single pass over the sibling list to \ + * populate the FIFO. \ + */ \ + if (phn1 != NULL) { \ + a_type *phnrest = phn_next_get(a_type, a_field, phn1); \ + if (phnrest != NULL) { \ + phn_prev_set(a_type, a_field, phnrest, NULL); \ + } \ + phn_prev_set(a_type, a_field, phn0, NULL); \ + phn_next_set(a_type, a_field, phn0, NULL); \ + phn_prev_set(a_type, a_field, phn1, NULL); \ + phn_next_set(a_type, a_field, phn1, NULL); \ + phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \ + head = tail = phn0; \ + phn0 = phnrest; \ + while (phn0 != NULL) { \ + phn1 = phn_next_get(a_type, a_field, phn0); \ + if (phn1 != NULL) { \ + phnrest = phn_next_get(a_type, a_field, \ + phn1); \ + if (phnrest != NULL) { \ + phn_prev_set(a_type, a_field, \ + phnrest, NULL); \ + } \ + phn_prev_set(a_type, a_field, phn0, \ + NULL); \ + phn_next_set(a_type, a_field, phn0, \ + NULL); \ + phn_prev_set(a_type, a_field, phn1, \ + NULL); \ + phn_next_set(a_type, a_field, phn1, \ + NULL); \ + phn_merge(a_type, a_field, phn0, phn1, \ + a_cmp, phn0); \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = phnrest; \ + } else { \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = NULL; \ + } \ + } \ + phn0 = head; \ + phn1 = phn_next_get(a_type, a_field, phn0); \ + if (phn1 != NULL) { \ + while (true) { \ + head = phn_next_get(a_type, a_field, \ + phn1); \ + assert(phn_prev_get(a_type, a_field, \ + phn0) == NULL); \ + phn_next_set(a_type, a_field, phn0, \ + NULL); \ + assert(phn_prev_get(a_type, a_field, \ + phn1) == NULL); \ + phn_next_set(a_type, a_field, phn1, \ + NULL); \ + phn_merge(a_type, a_field, phn0, phn1, \ + a_cmp, phn0); \ + if (head == NULL) { \ + break; \ + } \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = head; \ + phn1 = phn_next_get(a_type, a_field, \ + phn0); \ + } \ + } \ + } \ + r_phn = phn0; \ +} while (0) + +#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \ + a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \ + if (phn != NULL) { \ + phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \ + phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \ + phn_prev_set(a_type, a_field, phn, NULL); \ + ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \ + assert(phn_next_get(a_type, a_field, phn) == NULL); \ + phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \ + a_ph->ph_root); \ + } \ +} while (0) + +#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \ + a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \ + if (lchild == NULL) { \ + r_phn = NULL; \ + } else { \ + ph_merge_siblings(a_type, a_field, lchild, a_cmp, \ + r_phn); \ + } \ +} while (0) + +/* + * The ph_proto() macro generates function prototypes that correspond to the + * functions generated by an equivalently parameterized call to ph_gen(). + */ +#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \ +a_attr void a_prefix##new(a_ph_type *ph); \ +a_attr bool a_prefix##empty(a_ph_type *ph); \ +a_attr a_type *a_prefix##first(a_ph_type *ph); \ +a_attr a_type *a_prefix##any(a_ph_type *ph); \ +a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \ +a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \ +a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \ +a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn); + +/* + * The ph_gen() macro generates a type-specific pairing heap implementation, + * based on the above cpp macros. + */ +#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \ +a_attr void \ +a_prefix##new(a_ph_type *ph) { \ + memset(ph, 0, sizeof(ph(a_type))); \ +} \ +a_attr bool \ +a_prefix##empty(a_ph_type *ph) { \ + return (ph->ph_root == NULL); \ +} \ +a_attr a_type * \ +a_prefix##first(a_ph_type *ph) { \ + if (ph->ph_root == NULL) { \ + return NULL; \ + } \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + return ph->ph_root; \ +} \ +a_attr a_type * \ +a_prefix##any(a_ph_type *ph) { \ + if (ph->ph_root == NULL) { \ + return NULL; \ + } \ + a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \ + if (aux != NULL) { \ + return aux; \ + } \ + return ph->ph_root; \ +} \ +a_attr void \ +a_prefix##insert(a_ph_type *ph, a_type *phn) { \ + memset(&phn->a_field, 0, sizeof(phn(a_type))); \ + \ + /* \ + * Treat the root as an aux list during insertion, and lazily \ + * merge during a_prefix##remove_first(). For elements that \ + * are inserted, then removed via a_prefix##remove() before the \ + * aux list is ever processed, this makes insert/remove \ + * constant-time, whereas eager merging would make insert \ + * O(log n). \ + */ \ + if (ph->ph_root == NULL) { \ + ph->ph_root = phn; \ + } else { \ + phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \ + a_field, ph->ph_root)); \ + if (phn_next_get(a_type, a_field, ph->ph_root) != \ + NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, ph->ph_root), \ + phn); \ + } \ + phn_prev_set(a_type, a_field, phn, ph->ph_root); \ + phn_next_set(a_type, a_field, ph->ph_root, phn); \ + } \ +} \ +a_attr a_type * \ +a_prefix##remove_first(a_ph_type *ph) { \ + a_type *ret; \ + \ + if (ph->ph_root == NULL) { \ + return NULL; \ + } \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + \ + ret = ph->ph_root; \ + \ + ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ + ph->ph_root); \ + \ + return ret; \ +} \ +a_attr a_type * \ +a_prefix##remove_any(a_ph_type *ph) { \ + /* \ + * Remove the most recently inserted aux list element, or the \ + * root if the aux list is empty. This has the effect of \ + * behaving as a LIFO (and insertion/removal is therefore \ + * constant-time) if a_prefix##[remove_]first() are never \ + * called. \ + */ \ + if (ph->ph_root == NULL) { \ + return NULL; \ + } \ + a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \ + if (ret != NULL) { \ + a_type *aux = phn_next_get(a_type, a_field, ret); \ + phn_next_set(a_type, a_field, ph->ph_root, aux); \ + if (aux != NULL) { \ + phn_prev_set(a_type, a_field, aux, \ + ph->ph_root); \ + } \ + return ret; \ + } \ + ret = ph->ph_root; \ + ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ + ph->ph_root); \ + return ret; \ +} \ +a_attr void \ +a_prefix##remove(a_ph_type *ph, a_type *phn) { \ + a_type *replace, *parent; \ + \ + if (ph->ph_root == phn) { \ + /* \ + * We can delete from aux list without merging it, but \ + * we need to merge if we are dealing with the root \ + * node and it has children. \ + */ \ + if (phn_lchild_get(a_type, a_field, phn) == NULL) { \ + ph->ph_root = phn_next_get(a_type, a_field, \ + phn); \ + if (ph->ph_root != NULL) { \ + phn_prev_set(a_type, a_field, \ + ph->ph_root, NULL); \ + } \ + return; \ + } \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + if (ph->ph_root == phn) { \ + ph_merge_children(a_type, a_field, ph->ph_root, \ + a_cmp, ph->ph_root); \ + return; \ + } \ + } \ + \ + /* Get parent (if phn is leftmost child) before mutating. */ \ + if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \ + if (phn_lchild_get(a_type, a_field, parent) != phn) { \ + parent = NULL; \ + } \ + } \ + /* Find a possible replacement node, and link to parent. */ \ + ph_merge_children(a_type, a_field, phn, a_cmp, replace); \ + /* Set next/prev for sibling linked list. */ \ + if (replace != NULL) { \ + if (parent != NULL) { \ + phn_prev_set(a_type, a_field, replace, parent); \ + phn_lchild_set(a_type, a_field, parent, \ + replace); \ + } else { \ + phn_prev_set(a_type, a_field, replace, \ + phn_prev_get(a_type, a_field, phn)); \ + if (phn_prev_get(a_type, a_field, phn) != \ + NULL) { \ + phn_next_set(a_type, a_field, \ + phn_prev_get(a_type, a_field, phn), \ + replace); \ + } \ + } \ + phn_next_set(a_type, a_field, replace, \ + phn_next_get(a_type, a_field, phn)); \ + if (phn_next_get(a_type, a_field, phn) != NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, phn), \ + replace); \ + } \ + } else { \ + if (parent != NULL) { \ + a_type *next = phn_next_get(a_type, a_field, \ + phn); \ + phn_lchild_set(a_type, a_field, parent, next); \ + if (next != NULL) { \ + phn_prev_set(a_type, a_field, next, \ + parent); \ + } \ + } else { \ + assert(phn_prev_get(a_type, a_field, phn) != \ + NULL); \ + phn_next_set(a_type, a_field, \ + phn_prev_get(a_type, a_field, phn), \ + phn_next_get(a_type, a_field, phn)); \ + } \ + if (phn_next_get(a_type, a_field, phn) != NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, phn), \ + phn_prev_get(a_type, a_field, phn)); \ + } \ + } \ +} + +#endif /* PH_H_ */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/private_namespace.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/private_namespace.sh new file mode 100755 index 0000000..6ef1346 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/private_namespace.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +for symbol in `cat "$@"` ; do + echo "#define ${symbol} JEMALLOC_N(${symbol})" +done diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/private_symbols.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/private_symbols.sh new file mode 100755 index 0000000..442a259 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/private_symbols.sh @@ -0,0 +1,51 @@ +#!/bin/sh +# +# Generate private_symbols[_jet].awk. +# +# Usage: private_symbols.sh * +# +# is typically "" or "_". + +sym_prefix=$1 +shift + +cat <' output. +# +# Handle lines like: +# 0000000000000008 D opt_junk +# 0000000000007574 T malloc_initialized +(NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) { + print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix)) +} + +# Process 'dumpbin /SYMBOLS ' output. +# +# Handle lines like: +# 353 00008098 SECT4 notype External | opt_junk +# 3F1 00000000 SECT7 notype () External | malloc_initialized +($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) { + print $NF +} +EOF diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/private_symbols.txt b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/private_symbols.txt similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/private_symbols.txt rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/private_symbols.txt diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/private_unnamespace.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/private_unnamespace.sh similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/private_unnamespace.sh rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/private_unnamespace.sh diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prng.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prng.h new file mode 100644 index 0000000..15cc2d1 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prng.h @@ -0,0 +1,185 @@ +#ifndef JEMALLOC_INTERNAL_PRNG_H +#define JEMALLOC_INTERNAL_PRNG_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bit_util.h" + +/* + * Simple linear congruential pseudo-random number generator: + * + * prng(y) = (a*x + c) % m + * + * where the following constants ensure maximal period: + * + * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. + * c == Odd number (relatively prime to 2^n). + * m == 2^32 + * + * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. + * + * This choice of m has the disadvantage that the quality of the bits is + * proportional to bit position. For example, the lowest bit has a cycle of 2, + * the next has a cycle of 4, etc. For this reason, we prefer to use the upper + * bits. + */ + +/******************************************************************************/ +/* INTERNAL DEFINITIONS -- IGNORE */ +/******************************************************************************/ +#define PRNG_A_32 UINT32_C(1103515241) +#define PRNG_C_32 UINT32_C(12347) + +#define PRNG_A_64 UINT64_C(6364136223846793005) +#define PRNG_C_64 UINT64_C(1442695040888963407) + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_state_next_u32(uint32_t state) { + return (state * PRNG_A_32) + PRNG_C_32; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_state_next_u64(uint64_t state) { + return (state * PRNG_A_64) + PRNG_C_64; +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_state_next_zu(size_t state) { +#if LG_SIZEOF_PTR == 2 + return (state * PRNG_A_32) + PRNG_C_32; +#elif LG_SIZEOF_PTR == 3 + return (state * PRNG_A_64) + PRNG_C_64; +#else +#error Unsupported pointer size +#endif +} + +/******************************************************************************/ +/* BEGIN PUBLIC API */ +/******************************************************************************/ + +/* + * The prng_lg_range functions give a uniform int in the half-open range [0, + * 2**lg_range). If atomic is true, they do so safely from multiple threads. + * Multithreaded 64-bit prngs aren't supported. + */ + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) { + uint32_t ret, state0, state1; + + assert(lg_range > 0); + assert(lg_range <= 32); + + state0 = atomic_load_u32(state, ATOMIC_RELAXED); + + if (atomic) { + do { + state1 = prng_state_next_u32(state0); + } while (!atomic_compare_exchange_weak_u32(state, &state0, + state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); + } else { + state1 = prng_state_next_u32(state0); + atomic_store_u32(state, state1, ATOMIC_RELAXED); + } + ret = state1 >> (32 - lg_range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_lg_range_u64(uint64_t *state, unsigned lg_range) { + uint64_t ret, state1; + + assert(lg_range > 0); + assert(lg_range <= 64); + + state1 = prng_state_next_u64(*state); + *state = state1; + ret = state1 >> (64 - lg_range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) { + size_t ret, state0, state1; + + assert(lg_range > 0); + assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); + + state0 = atomic_load_zu(state, ATOMIC_RELAXED); + + if (atomic) { + do { + state1 = prng_state_next_zu(state0); + } while (atomic_compare_exchange_weak_zu(state, &state0, + state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); + } else { + state1 = prng_state_next_zu(state0); + atomic_store_zu(state, state1, ATOMIC_RELAXED); + } + ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); + + return ret; +} + +/* + * The prng_range functions behave like the prng_lg_range, but return a result + * in [0, range) instead of [0, 2**lg_range). + */ + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) { + uint32_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_u32(state, lg_range, atomic); + } while (ret >= range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_range_u64(uint64_t *state, uint64_t range) { + uint64_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_u64(state, lg_range); + } while (ret >= range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) { + size_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_zu(state, lg_range, atomic); + } while (ret >= range); + + return ret; +} + +#endif /* JEMALLOC_INTERNAL_PRNG_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/prof.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/prof.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof.h diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_externs.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_externs.h new file mode 100644 index 0000000..0434869 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_externs.h @@ -0,0 +1,92 @@ +#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H +#define JEMALLOC_INTERNAL_PROF_EXTERNS_H + +#include "jemalloc/internal/mutex.h" + +extern malloc_mutex_t bt2gctx_mtx; + +extern bool opt_prof; +extern bool opt_prof_active; +extern bool opt_prof_thread_active_init; +extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ +extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ +extern bool opt_prof_gdump; /* High-water memory dumping. */ +extern bool opt_prof_final; /* Final profile dumping. */ +extern bool opt_prof_leak; /* Dump leak summary at exit. */ +extern bool opt_prof_accum; /* Report cumulative bytes. */ +extern char opt_prof_prefix[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + +/* Accessed via prof_active_[gs]et{_unlocked,}(). */ +extern bool prof_active; + +/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ +extern bool prof_gdump_val; + +/* + * Profile dump interval, measured in bytes allocated. Each arena triggers a + * profile dump when it reaches this threshold. The effect is that the + * interval between profile dumps averages prof_interval, though the actual + * interval between dumps will tend to be sporadic, and the interval will be a + * maximum of approximately (prof_interval * narenas). + */ +extern uint64_t prof_interval; + +/* + * Initialized as opt_lg_prof_sample, and potentially modified during profiling + * resets. + */ +extern size_t lg_prof_sample; + +void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); +void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); +void bt_init(prof_bt_t *bt, void **vec); +void prof_backtrace(prof_bt_t *bt); +prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); +#ifdef JEMALLOC_JET +size_t prof_tdata_count(void); +size_t prof_bt_count(void); +#endif +typedef int (prof_dump_open_t)(bool, const char *); +extern prof_dump_open_t *JET_MUTABLE prof_dump_open; + +typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); +extern prof_dump_header_t *JET_MUTABLE prof_dump_header; +#ifdef JEMALLOC_JET +void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, + uint64_t *accumbytes); +#endif +bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum); +void prof_idump(tsdn_t *tsdn); +bool prof_mdump(tsd_t *tsd, const char *filename); +void prof_gdump(tsdn_t *tsdn); +prof_tdata_t *prof_tdata_init(tsd_t *tsd); +prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); +void prof_reset(tsd_t *tsd, size_t lg_sample); +void prof_tdata_cleanup(tsd_t *tsd); +bool prof_active_get(tsdn_t *tsdn); +bool prof_active_set(tsdn_t *tsdn, bool active); +const char *prof_thread_name_get(tsd_t *tsd); +int prof_thread_name_set(tsd_t *tsd, const char *thread_name); +bool prof_thread_active_get(tsd_t *tsd); +bool prof_thread_active_set(tsd_t *tsd, bool active); +bool prof_thread_active_init_get(tsdn_t *tsdn); +bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); +bool prof_gdump_get(tsdn_t *tsdn); +bool prof_gdump_set(tsdn_t *tsdn, bool active); +void prof_boot0(void); +void prof_boot1(void); +bool prof_boot2(tsd_t *tsd); +void prof_prefork0(tsdn_t *tsdn); +void prof_prefork1(tsdn_t *tsdn); +void prof_postfork_parent(tsdn_t *tsdn); +void prof_postfork_child(tsdn_t *tsdn); +void prof_sample_threshold_update(prof_tdata_t *tdata); + +#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h new file mode 100644 index 0000000..a6efb48 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_inlines_a.h @@ -0,0 +1,83 @@ +#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H +#define JEMALLOC_INTERNAL_PROF_INLINES_A_H + +#include "jemalloc/internal/mutex.h" + +static inline bool +prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) { + cassert(config_prof); + + bool overflow; + uint64_t a0, a1; + + /* + * If the application allocates fast enough (and/or if idump is slow + * enough), extreme overflow here (a1 >= prof_interval * 2) can cause + * idump trigger coalescing. This is an intentional mechanism that + * avoids rate-limiting allocation. + */ +#ifdef JEMALLOC_ATOMIC_U64 + a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); + do { + a1 = a0 + accumbytes; + assert(a1 >= a0); + overflow = (a1 >= prof_interval); + if (overflow) { + a1 %= prof_interval; + } + } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, + a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); +#else + malloc_mutex_lock(tsdn, &prof_accum->mtx); + a0 = prof_accum->accumbytes; + a1 = a0 + accumbytes; + overflow = (a1 >= prof_interval); + if (overflow) { + a1 %= prof_interval; + } + prof_accum->accumbytes = a1; + malloc_mutex_unlock(tsdn, &prof_accum->mtx); +#endif + return overflow; +} + +static inline void +prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) { + cassert(config_prof); + + /* + * Cancel out as much of the excessive prof_accumbytes increase as + * possible without underflowing. Interval-triggered dumps occur + * slightly more often than intended as a result of incomplete + * canceling. + */ + uint64_t a0, a1; +#ifdef JEMALLOC_ATOMIC_U64 + a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); + do { + a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS - + usize) : 0; + } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, + a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); +#else + malloc_mutex_lock(tsdn, &prof_accum->mtx); + a0 = prof_accum->accumbytes; + a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS - usize) : + 0; + prof_accum->accumbytes = a1; + malloc_mutex_unlock(tsdn, &prof_accum->mtx); +#endif +} + +JEMALLOC_ALWAYS_INLINE bool +prof_active_get_unlocked(void) { + /* + * Even if opt_prof is true, sampling can be temporarily disabled by + * setting prof_active to false. No locking is used when reading + * prof_active in the fast path, so there are no guarantees regarding + * how long it will take for all threads to notice state changes. + */ + return prof_active; +} + +#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h new file mode 100644 index 0000000..6ff465a --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h @@ -0,0 +1,206 @@ +#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H +#define JEMALLOC_INTERNAL_PROF_INLINES_B_H + +#include "jemalloc/internal/sz.h" + +JEMALLOC_ALWAYS_INLINE bool +prof_gdump_get_unlocked(void) { + /* + * No locking is used when reading prof_gdump_val in the fast path, so + * there are no guarantees regarding how long it will take for all + * threads to notice state changes. + */ + return prof_gdump_val; +} + +JEMALLOC_ALWAYS_INLINE prof_tdata_t * +prof_tdata_get(tsd_t *tsd, bool create) { + prof_tdata_t *tdata; + + cassert(config_prof); + + tdata = tsd_prof_tdata_get(tsd); + if (create) { + if (unlikely(tdata == NULL)) { + if (tsd_nominal(tsd)) { + tdata = prof_tdata_init(tsd); + tsd_prof_tdata_set(tsd, tdata); + } + } else if (unlikely(tdata->expired)) { + tdata = prof_tdata_reinit(tsd, tdata); + tsd_prof_tdata_set(tsd, tdata); + } + assert(tdata == NULL || tdata->attached); + } + + return tdata; +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { + cassert(config_prof); + assert(ptr != NULL); + + return arena_prof_tctx_get(tsdn, ptr, alloc_ctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_reset(tsdn, ptr, tctx); +} + +JEMALLOC_ALWAYS_INLINE bool +prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, + prof_tdata_t **tdata_out) { + prof_tdata_t *tdata; + + cassert(config_prof); + + tdata = prof_tdata_get(tsd, true); + if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) { + tdata = NULL; + } + + if (tdata_out != NULL) { + *tdata_out = tdata; + } + + if (unlikely(tdata == NULL)) { + return true; + } + + if (likely(tdata->bytes_until_sample >= usize)) { + if (update) { + tdata->bytes_until_sample -= usize; + } + return true; + } else { + if (tsd_reentrancy_level_get(tsd) > 0) { + return true; + } + /* Compute new sample threshold. */ + if (update) { + prof_sample_threshold_update(tdata); + } + return !tdata->active; + } +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) { + prof_tctx_t *ret; + prof_tdata_t *tdata; + prof_bt_t bt; + + assert(usize == sz_s2u(usize)); + + if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, + &tdata))) { + ret = (prof_tctx_t *)(uintptr_t)1U; + } else { + bt_init(&bt, tdata->vec); + prof_backtrace(&bt); + ret = prof_lookup(tsd, &bt); + } + + return ret; +} + +JEMALLOC_ALWAYS_INLINE void +prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx, + prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + assert(usize == isalloc(tsdn, ptr)); + + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { + prof_malloc_sample_object(tsdn, ptr, usize, tctx); + } else { + prof_tctx_set(tsdn, ptr, usize, alloc_ctx, + (prof_tctx_t *)(uintptr_t)1U); + } +} + +JEMALLOC_ALWAYS_INLINE void +prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, + bool prof_active, bool updated, const void *old_ptr, size_t old_usize, + prof_tctx_t *old_tctx) { + bool sampled, old_sampled, moved; + + cassert(config_prof); + assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); + + if (prof_active && !updated && ptr != NULL) { + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + if (prof_sample_accum_update(tsd, usize, true, NULL)) { + /* + * Don't sample. The usize passed to prof_alloc_prep() + * was larger than what actually got allocated, so a + * backtrace was captured for this allocation, even + * though its actual usize was insufficient to cross the + * sample threshold. + */ + prof_alloc_rollback(tsd, tctx, true); + tctx = (prof_tctx_t *)(uintptr_t)1U; + } + } + + sampled = ((uintptr_t)tctx > (uintptr_t)1U); + old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); + moved = (ptr != old_ptr); + + if (unlikely(sampled)) { + prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); + } else if (moved) { + prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL, + (prof_tctx_t *)(uintptr_t)1U); + } else if (unlikely(old_sampled)) { + /* + * prof_tctx_set() would work for the !moved case as well, but + * prof_tctx_reset() is slightly cheaper, and the proper thing + * to do here in the presence of explicit knowledge re: moved + * state. + */ + prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx); + } else { + assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) == + (uintptr_t)1U); + } + + /* + * The prof_free_sampled_object() call must come after the + * prof_malloc_sample_object() call, because tctx and old_tctx may be + * the same, in which case reversing the call order could cause the tctx + * to be prematurely destroyed as a side effect of momentarily zeroed + * counters. + */ + if (unlikely(old_sampled)) { + prof_free_sampled_object(tsd, old_usize, old_tctx); + } +} + +JEMALLOC_ALWAYS_INLINE void +prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) { + prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); + + cassert(config_prof); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { + prof_free_sampled_object(tsd, usize, tctx); + } +} + +#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_structs.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_structs.h new file mode 100644 index 0000000..0d58ae1 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_structs.h @@ -0,0 +1,201 @@ +#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H +#define JEMALLOC_INTERNAL_PROF_STRUCTS_H + +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/rb.h" + +struct prof_bt_s { + /* Backtrace, stored as len program counters. */ + void **vec; + unsigned len; +}; + +#ifdef JEMALLOC_PROF_LIBGCC +/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ +typedef struct { + prof_bt_t *bt; + unsigned max; +} prof_unwind_data_t; +#endif + +struct prof_accum_s { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_t mtx; + uint64_t accumbytes; +#else + atomic_u64_t accumbytes; +#endif +}; + +struct prof_cnt_s { + /* Profiling counters. */ + uint64_t curobjs; + uint64_t curbytes; + uint64_t accumobjs; + uint64_t accumbytes; +}; + +typedef enum { + prof_tctx_state_initializing, + prof_tctx_state_nominal, + prof_tctx_state_dumping, + prof_tctx_state_purgatory /* Dumper must finish destroying. */ +} prof_tctx_state_t; + +struct prof_tctx_s { + /* Thread data for thread that performed the allocation. */ + prof_tdata_t *tdata; + + /* + * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be + * defunct during teardown. + */ + uint64_t thr_uid; + uint64_t thr_discrim; + + /* Profiling counters, protected by tdata->lock. */ + prof_cnt_t cnts; + + /* Associated global context. */ + prof_gctx_t *gctx; + + /* + * UID that distinguishes multiple tctx's created by the same thread, + * but coexisting in gctx->tctxs. There are two ways that such + * coexistence can occur: + * - A dumper thread can cause a tctx to be retained in the purgatory + * state. + * - Although a single "producer" thread must create all tctx's which + * share the same thr_uid, multiple "consumers" can each concurrently + * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only + * gets called once each time cnts.cur{objs,bytes} drop to 0, but this + * threshold can be hit again before the first consumer finishes + * executing prof_tctx_destroy(). + */ + uint64_t tctx_uid; + + /* Linkage into gctx's tctxs. */ + rb_node(prof_tctx_t) tctx_link; + + /* + * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents + * sample vs destroy race. + */ + bool prepared; + + /* Current dump-related state, protected by gctx->lock. */ + prof_tctx_state_t state; + + /* + * Copy of cnts snapshotted during early dump phase, protected by + * dump_mtx. + */ + prof_cnt_t dump_cnts; +}; +typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; + +struct prof_gctx_s { + /* Protects nlimbo, cnt_summed, and tctxs. */ + malloc_mutex_t *lock; + + /* + * Number of threads that currently cause this gctx to be in a state of + * limbo due to one of: + * - Initializing this gctx. + * - Initializing per thread counters associated with this gctx. + * - Preparing to destroy this gctx. + * - Dumping a heap profile that includes this gctx. + * nlimbo must be 1 (single destroyer) in order to safely destroy the + * gctx. + */ + unsigned nlimbo; + + /* + * Tree of profile counters, one for each thread that has allocated in + * this context. + */ + prof_tctx_tree_t tctxs; + + /* Linkage for tree of contexts to be dumped. */ + rb_node(prof_gctx_t) dump_link; + + /* Temporary storage for summation during dump. */ + prof_cnt_t cnt_summed; + + /* Associated backtrace. */ + prof_bt_t bt; + + /* Backtrace vector, variable size, referred to by bt. */ + void *vec[1]; +}; +typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; + +struct prof_tdata_s { + malloc_mutex_t *lock; + + /* Monotonically increasing unique thread identifier. */ + uint64_t thr_uid; + + /* + * Monotonically increasing discriminator among tdata structures + * associated with the same thr_uid. + */ + uint64_t thr_discrim; + + /* Included in heap profile dumps if non-NULL. */ + char *thread_name; + + bool attached; + bool expired; + + rb_node(prof_tdata_t) tdata_link; + + /* + * Counter used to initialize prof_tctx_t's tctx_uid. No locking is + * necessary when incrementing this field, because only one thread ever + * does so. + */ + uint64_t tctx_uid_next; + + /* + * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks + * backtraces for which it has non-zero allocation/deallocation counters + * associated with thread-specific prof_tctx_t objects. Other threads + * may write to prof_tctx_t contents when freeing associated objects. + */ + ckh_t bt2tctx; + + /* Sampling state. */ + uint64_t prng_state; + uint64_t bytes_until_sample; + + /* State used to avoid dumping while operating on prof internals. */ + bool enq; + bool enq_idump; + bool enq_gdump; + + /* + * Set to true during an early dump phase for tdata's which are + * currently being dumped. New threads' tdata's have this initialized + * to false so that they aren't accidentally included in later dump + * phases. + */ + bool dumping; + + /* + * True if profiling is active for this tdata's thread + * (thread.prof.active mallctl). + */ + bool active; + + /* Temporary storage for summation during dump. */ + prof_cnt_t cnt_summed; + + /* Backtrace vector, used for calls to prof_backtrace(). */ + void *vec[PROF_BT_MAX]; +}; +typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; + +#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_types.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_types.h new file mode 100644 index 0000000..1eff995 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/prof_types.h @@ -0,0 +1,56 @@ +#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H +#define JEMALLOC_INTERNAL_PROF_TYPES_H + +typedef struct prof_bt_s prof_bt_t; +typedef struct prof_accum_s prof_accum_t; +typedef struct prof_cnt_s prof_cnt_t; +typedef struct prof_tctx_s prof_tctx_t; +typedef struct prof_gctx_s prof_gctx_t; +typedef struct prof_tdata_s prof_tdata_t; + +/* Option defaults. */ +#ifdef JEMALLOC_PROF +# define PROF_PREFIX_DEFAULT "jeprof" +#else +# define PROF_PREFIX_DEFAULT "" +#endif +#define LG_PROF_SAMPLE_DEFAULT 19 +#define LG_PROF_INTERVAL_DEFAULT -1 + +/* + * Hard limit on stack backtrace depth. The version of prof_backtrace() that + * is based on __builtin_return_address() necessarily has a hard-coded number + * of backtrace frame handlers, and should be kept in sync with this setting. + */ +#define PROF_BT_MAX 128 + +/* Initial hash table size. */ +#define PROF_CKH_MINITEMS 64 + +/* Size of memory buffer to use when writing dump files. */ +#define PROF_DUMP_BUFSIZE 65536 + +/* Size of stack-allocated buffer used by prof_printf(). */ +#define PROF_PRINTF_BUFSIZE 128 + +/* + * Number of mutexes shared among all gctx's. No space is allocated for these + * unless profiling is enabled, so it's okay to over-provision. + */ +#define PROF_NCTX_LOCKS 1024 + +/* + * Number of mutexes shared among all tdata's. No space is allocated for these + * unless profiling is enabled, so it's okay to over-provision. + */ +#define PROF_NTDATA_LOCKS 256 + +/* + * prof_tdata pointers close to NULL are used to encode state information that + * is used for cleaning up during thread shutdown. + */ +#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) +#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) +#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY + +#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/public_namespace.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/public_namespace.sh similarity index 67% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/public_namespace.sh rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/public_namespace.sh index 362109f..4d415ba 100755 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/public_namespace.sh +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/public_namespace.sh @@ -2,5 +2,5 @@ for nm in `cat $1` ; do n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'` - echo "#define je_${n} JEMALLOC_N(${n})" + echo "#define je_${n} JEMALLOC_N(${n})" done diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/public_unnamespace.sh diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/ql.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ql.h similarity index 59% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/ql.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ql.h index 1834bb8..8029040 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/ql.h +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ql.h @@ -1,59 +1,64 @@ +#ifndef JEMALLOC_INTERNAL_QL_H +#define JEMALLOC_INTERNAL_QL_H + +#include "jemalloc/internal/qr.h" + /* List definitions. */ -#define ql_head(a_type) \ +#define ql_head(a_type) \ struct { \ a_type *qlh_first; \ } -#define ql_head_initializer(a_head) {NULL} +#define ql_head_initializer(a_head) {NULL} -#define ql_elm(a_type) qr(a_type) +#define ql_elm(a_type) qr(a_type) /* List functions. */ -#define ql_new(a_head) do { \ +#define ql_new(a_head) do { \ (a_head)->qlh_first = NULL; \ } while (0) -#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) +#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) -#define ql_first(a_head) ((a_head)->qlh_first) +#define ql_first(a_head) ((a_head)->qlh_first) -#define ql_last(a_head, a_field) \ +#define ql_last(a_head, a_field) \ ((ql_first(a_head) != NULL) \ ? qr_prev(ql_first(a_head), a_field) : NULL) -#define ql_next(a_head, a_elm, a_field) \ +#define ql_next(a_head, a_elm, a_field) \ ((ql_last(a_head, a_field) != (a_elm)) \ ? qr_next((a_elm), a_field) : NULL) -#define ql_prev(a_head, a_elm, a_field) \ +#define ql_prev(a_head, a_elm, a_field) \ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ : NULL) -#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ +#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ qr_before_insert((a_qlelm), (a_elm), a_field); \ if (ql_first(a_head) == (a_qlelm)) { \ ql_first(a_head) = (a_elm); \ } \ } while (0) -#define ql_after_insert(a_qlelm, a_elm, a_field) \ +#define ql_after_insert(a_qlelm, a_elm, a_field) \ qr_after_insert((a_qlelm), (a_elm), a_field) -#define ql_head_insert(a_head, a_elm, a_field) do { \ +#define ql_head_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = (a_elm); \ } while (0) -#define ql_tail_insert(a_head, a_elm, a_field) do { \ +#define ql_tail_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = qr_next((a_elm), a_field); \ } while (0) -#define ql_remove(a_head, a_elm, a_field) do { \ +#define ql_remove(a_head, a_elm, a_field) do { \ if (ql_first(a_head) == (a_elm)) { \ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ } \ @@ -64,18 +69,20 @@ struct { \ } \ } while (0) -#define ql_head_remove(a_head, a_type, a_field) do { \ +#define ql_head_remove(a_head, a_type, a_field) do { \ a_type *t = ql_first(a_head); \ ql_remove((a_head), t, a_field); \ } while (0) -#define ql_tail_remove(a_head, a_type, a_field) do { \ +#define ql_tail_remove(a_head, a_type, a_field) do { \ a_type *t = ql_last(a_head, a_field); \ ql_remove((a_head), t, a_field); \ } while (0) -#define ql_foreach(a_var, a_head, a_field) \ +#define ql_foreach(a_var, a_head, a_field) \ qr_foreach((a_var), ql_first(a_head), a_field) -#define ql_reverse_foreach(a_var, a_head, a_field) \ +#define ql_reverse_foreach(a_var, a_head, a_field) \ qr_reverse_foreach((a_var), ql_first(a_head), a_field) + +#endif /* JEMALLOC_INTERNAL_QL_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/qr.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/qr.h similarity index 68% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/qr.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/qr.h index 0fbaec2..1e1056b 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/qr.h +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/qr.h @@ -1,38 +1,39 @@ +#ifndef JEMALLOC_INTERNAL_QR_H +#define JEMALLOC_INTERNAL_QR_H + /* Ring definitions. */ -#define qr(a_type) \ +#define qr(a_type) \ struct { \ a_type *qre_next; \ a_type *qre_prev; \ } /* Ring functions. */ -#define qr_new(a_qr, a_field) do { \ +#define qr_new(a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) -#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) +#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) -#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) +#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) -#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ +#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qrelm); \ (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ (a_qrelm)->a_field.qre_prev = (a_qr); \ } while (0) -#define qr_after_insert(a_qrelm, a_qr, a_field) \ - do \ - { \ +#define qr_after_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ (a_qr)->a_field.qre_prev = (a_qrelm); \ (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ (a_qrelm)->a_field.qre_next = (a_qr); \ - } while (0) +} while (0) -#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ - void *t; \ +#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \ + a_type *t; \ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ t = (a_qr_a)->a_field.qre_prev; \ @@ -44,10 +45,10 @@ struct { \ * qr_meld() and qr_split() are functionally equivalent, so there's no need to * have two copies of the code. */ -#define qr_split(a_qr_a, a_qr_b, a_field) \ - qr_meld((a_qr_a), (a_qr_b), a_field) +#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \ + qr_meld((a_qr_a), (a_qr_b), a_type, a_field) -#define qr_remove(a_qr, a_field) do { \ +#define qr_remove(a_qr, a_field) do { \ (a_qr)->a_field.qre_prev->a_field.qre_next \ = (a_qr)->a_field.qre_next; \ (a_qr)->a_field.qre_next->a_field.qre_prev \ @@ -56,14 +57,16 @@ struct { \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) -#define qr_foreach(var, a_qr, a_field) \ +#define qr_foreach(var, a_qr, a_field) \ for ((var) = (a_qr); \ (var) != NULL; \ (var) = (((var)->a_field.qre_next != (a_qr)) \ ? (var)->a_field.qre_next : NULL)) -#define qr_reverse_foreach(var, a_qr, a_field) \ +#define qr_reverse_foreach(var, a_qr, a_field) \ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ (var) != NULL; \ (var) = (((var) != (a_qr)) \ ? (var)->a_field.qre_prev : NULL)) + +#endif /* JEMALLOC_INTERNAL_QR_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/quarantine.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/quarantine.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/quarantine.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/quarantine.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/rb.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/rb.h similarity index 80% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/rb.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/rb.h index 2ca8e59..47fa5ca 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/rb.h +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/rb.h @@ -20,17 +20,21 @@ */ #ifndef RB_H_ -#define RB_H_ +#define RB_H_ + +#ifndef __PGI +#define RB_COMPACT +#endif #ifdef RB_COMPACT /* Node structure. */ -#define rb_node(a_type) \ +#define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right_red; \ } #else -#define rb_node(a_type) \ +#define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right; \ @@ -39,111 +43,116 @@ struct { \ #endif /* Root structure. */ -#define rb_tree(a_type) \ +#define rb_tree(a_type) \ struct { \ a_type *rbt_root; \ - a_type rbt_nil; \ } /* Left accessors. */ -#define rbtn_left_get(a_type, a_field, a_node) \ +#define rbtn_left_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_left) -#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ +#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ (a_node)->a_field.rbn_left = a_left; \ } while (0) #ifdef RB_COMPACT /* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ +#define rbtn_right_get(a_type, a_field, a_node) \ ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ & ((ssize_t)-2))) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ } while (0) /* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ +#define rbtn_red_get(a_type, a_field, a_node) \ ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ & ((size_t)1))) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ | ((ssize_t)a_red)); \ } while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ +#define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ } while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ +#define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ } while (0) + +/* Node initializer. */ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + /* Bookkeeping bit cannot be used by node pointer. */ \ + assert(((uintptr_t)(a_node) & 0x1) == 0); \ + rbtn_left_set(a_type, a_field, (a_node), NULL); \ + rbtn_right_set(a_type, a_field, (a_node), NULL); \ + rbtn_red_set(a_type, a_field, (a_node)); \ +} while (0) #else /* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ +#define rbtn_right_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_right) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right = a_right; \ } while (0) /* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ +#define rbtn_red_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_red) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_red = (a_red); \ } while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ +#define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = true; \ } while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ +#define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = false; \ } while (0) -#endif /* Node initializer. */ -#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ - rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ - rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + rbtn_left_set(a_type, a_field, (a_node), NULL); \ + rbtn_right_set(a_type, a_field, (a_node), NULL); \ rbtn_red_set(a_type, a_field, (a_node)); \ } while (0) +#endif /* Tree initializer. */ -#define rb_new(a_type, a_field, a_rbt) do { \ - (a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \ - rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \ - rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \ +#define rb_new(a_type, a_field, a_rbt) do { \ + (a_rbt)->rbt_root = NULL; \ } while (0) /* Internal utility macros. */ -#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ +#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ - if ((r_node) != &(a_rbt)->rbt_nil) { \ + if ((r_node) != NULL) { \ for (; \ - rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\ + rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) -#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ +#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ - if ((r_node) != &(a_rbt)->rbt_nil) { \ - for (; rbtn_right_get(a_type, a_field, (r_node)) != \ - &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \ - (r_node))) { \ + if ((r_node) != NULL) { \ + for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ + (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) -#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ +#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ rbtn_right_set(a_type, a_field, (a_node), \ rbtn_left_get(a_type, a_field, (r_node))); \ rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ } while (0) -#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ +#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ rbtn_left_set(a_type, a_field, (a_node), \ rbtn_right_get(a_type, a_field, (r_node))); \ @@ -155,7 +164,7 @@ struct { \ * functions generated by an equivalently parameterized call to rb_gen(). */ -#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ +#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree); \ a_attr bool \ @@ -169,11 +178,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, a_type *key); \ +a_prefix##search(a_rbt_type *rbtree, const a_type *key); \ a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \ +a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \ a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \ +a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ a_attr void \ @@ -183,7 +192,10 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg); \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ +a_attr void \ +a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ + void *arg); /* * The rb_gen() macro generates a type-specific red-black tree implementation, @@ -254,7 +266,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ * last/first. * * static ex_node_t * - * ex_search(ex_t *tree, ex_node_t *key); + * ex_search(ex_t *tree, const ex_node_t *key); * Description: Search for node that matches key. * Args: * tree: Pointer to an initialized red-black tree object. @@ -262,9 +274,9 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ * Ret: Node in tree that matches key, or NULL if no match. * * static ex_node_t * - * ex_nsearch(ex_t *tree, ex_node_t *key); + * ex_nsearch(ex_t *tree, const ex_node_t *key); * static ex_node_t * - * ex_psearch(ex_t *tree, ex_node_t *key); + * ex_psearch(ex_t *tree, const ex_node_t *key); * Description: Search for node that matches key. If no match is found, * return what would be key's successor/predecessor, were * key in tree. @@ -312,44 +324,52 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ * arg : Opaque pointer passed to cb(). * Ret: NULL if iteration completed, or the non-NULL callback return value * that caused termination of the iteration. + * + * static void + * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg); + * Description: Iterate over the tree with post-order traversal, remove + * each node, and run the callback if non-null. This is + * used for destroying a tree without paying the cost to + * rebalance it. The tree must not be otherwise altered + * during traversal. + * Args: + * tree: Pointer to an initialized red-black tree object. + * cb : Callback function, which, if non-null, is called for each node + * during iteration. There is no way to stop iteration once it + * has begun. + * arg : Opaque pointer passed to cb(). */ -#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ +#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree) { \ rb_new(a_type, a_field, rbtree); \ } \ a_attr bool \ a_prefix##empty(a_rbt_type *rbtree) { \ - return (rbtree->rbt_root == &rbtree->rbt_nil); \ + return (rbtree->rbt_root == NULL); \ } \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ - if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ + if (rbtn_right_get(a_type, a_field, node) != NULL) { \ rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ - assert(tnode != &rbtree->rbt_nil); \ - ret = &rbtree->rbt_nil; \ + assert(tnode != NULL); \ + ret = NULL; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ @@ -360,24 +380,21 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ } else { \ break; \ } \ - assert(tnode != &rbtree->rbt_nil); \ + assert(tnode != NULL); \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ - if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ + if (rbtn_left_get(a_type, a_field, node) != NULL) { \ rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ - assert(tnode != &rbtree->rbt_nil); \ - ret = &rbtree->rbt_nil; \ + assert(tnode != NULL); \ + ret = NULL; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ @@ -388,20 +405,17 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ } else { \ break; \ } \ - assert(tnode != &rbtree->rbt_nil); \ + assert(tnode != NULL); \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ +a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ int cmp; \ ret = rbtree->rbt_root; \ - while (ret != &rbtree->rbt_nil \ + while (ret != NULL \ && (cmp = (a_cmp)(key, ret)) != 0) { \ if (cmp < 0) { \ ret = rbtn_left_get(a_type, a_field, ret); \ @@ -409,17 +423,14 @@ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ ret = rbtn_right_get(a_type, a_field, ret); \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ +a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ - ret = &rbtree->rbt_nil; \ - while (tnode != &rbtree->rbt_nil) { \ + ret = NULL; \ + while (tnode != NULL) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ ret = tnode; \ @@ -431,17 +442,14 @@ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ break; \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ +a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ - ret = &rbtree->rbt_nil; \ - while (tnode != &rbtree->rbt_nil) { \ + ret = NULL; \ + while (tnode != NULL) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ @@ -453,10 +461,7 @@ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ break; \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ @@ -467,7 +472,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ rbt_node_new(a_type, a_field, rbtree, node); \ /* Wind. */ \ path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ + for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ assert(cmp != 0); \ if (cmp < 0) { \ @@ -487,7 +492,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ rbtn_left_set(a_type, a_field, cnode, left); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ /* Fix up 4-node. */ \ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ @@ -502,7 +508,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ rbtn_right_set(a_type, a_field, cnode, right); \ if (rbtn_red_get(a_type, a_field, right)) { \ a_type *left = rbtn_left_get(a_type, a_field, cnode); \ - if (rbtn_red_get(a_type, a_field, left)) { \ + if (left != NULL && rbtn_red_get(a_type, a_field, \ + left)) { \ /* Split 4-node. */ \ rbtn_black_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, right); \ @@ -535,7 +542,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ /* Wind. */ \ nodep = NULL; /* Silence compiler warning. */ \ path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ + for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ @@ -547,8 +554,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ /* Find node's successor, in preparation for swap. */ \ pathp->cmp = 1; \ nodep = pathp; \ - for (pathp++; pathp->node != &rbtree->rbt_nil; \ - pathp++) { \ + for (pathp++; pathp->node != NULL; pathp++) { \ pathp->cmp = -1; \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ @@ -590,7 +596,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ } \ } else { \ a_type *left = rbtn_left_get(a_type, a_field, node); \ - if (left != &rbtree->rbt_nil) { \ + if (left != NULL) { \ /* node has no successor, but it has a left child. */\ /* Splice node out, without losing the left child. */\ assert(!rbtn_red_get(a_type, a_field, node)); \ @@ -610,33 +616,32 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ return; \ } else if (pathp == path) { \ /* The tree only contained one node. */ \ - rbtree->rbt_root = &rbtree->rbt_nil; \ + rbtree->rbt_root = NULL; \ return; \ } \ } \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ /* Prune red node, which requires no fixup. */ \ assert(pathp[-1].cmp < 0); \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - &rbtree->rbt_nil); \ + rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ return; \ } \ /* The node to be pruned is black, so unwind until balance is */\ /* restored. */\ - pathp->node = &rbtree->rbt_nil; \ + pathp->node = NULL; \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ assert(pathp->cmp != 0); \ if (pathp->cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp->node, \ pathp[1].node); \ - assert(!rbtn_red_get(a_type, a_field, pathp[1].node)); \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ a_type *tnode; \ - if (rbtn_red_get(a_type, a_field, rightleft)) { \ + if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ + rightleft)) { \ /* In the following diagrams, ||, //, and \\ */\ /* indicate the path to the removed node. */\ /* */\ @@ -679,7 +684,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ - if (rbtn_red_get(a_type, a_field, rightleft)) { \ + if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ + rightleft)) { \ /* || */\ /* pathp(b) */\ /* // \ */\ @@ -733,7 +739,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ left); \ a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ leftright); \ - if (rbtn_red_get(a_type, a_field, leftrightleft)) { \ + if (leftrightleft != NULL && rbtn_red_get(a_type, \ + a_field, leftrightleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ @@ -759,7 +766,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ /* (b) */\ /* / */\ /* (b) */\ - assert(leftright != &rbtree->rbt_nil); \ + assert(leftright != NULL); \ rbtn_red_set(a_type, a_field, leftright); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ @@ -782,7 +789,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ return; \ } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ /* || */\ /* pathp(r) */\ /* / \\ */\ @@ -820,7 +828,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ } \ } else { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ @@ -866,17 +875,17 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ a_attr a_type * \ a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == &rbtree->rbt_nil) { \ - return (&rbtree->rbt_nil); \ + if (node == NULL) { \ + return NULL; \ } else { \ a_type *ret; \ if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ - a_field, node), cb, arg)) != &rbtree->rbt_nil \ - || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \ + arg)) != NULL) { \ + return ret; \ } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ + return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ @@ -886,22 +895,22 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ if (cmp < 0) { \ a_type *ret; \ if ((ret = a_prefix##iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ + return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg); \ } else if (cmp > 0) { \ - return (a_prefix##iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##iter_start(rbtree, start, \ + rbtn_right_get(a_type, a_field, node), cb, arg); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + return ret; \ } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ + return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ @@ -914,25 +923,22 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ } else { \ ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == &rbtree->rbt_nil) { \ - return (&rbtree->rbt_nil); \ + if (node == NULL) { \ + return NULL; \ } else { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ @@ -943,22 +949,22 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ if (cmp > 0) { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ } else if (cmp < 0) { \ - return (a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##reverse_iter_start(rbtree, start, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + return ret; \ } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ @@ -972,10 +978,29 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ cb, arg); \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ + return ret; \ +} \ +a_attr void \ +a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \ + a_type *, void *), void *arg) { \ + if (node == NULL) { \ + return; \ } \ - return (ret); \ + a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \ + node), cb, arg); \ + rbtn_left_set(a_type, a_field, (node), NULL); \ + a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \ + node), cb, arg); \ + rbtn_right_set(a_type, a_field, (node), NULL); \ + if (cb) { \ + cb(node, arg); \ + } \ +} \ +a_attr void \ +a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ + void *arg) { \ + a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ + rbtree->rbt_root = NULL; \ } #endif /* RB_H_ */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/rtree.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/rtree.h new file mode 100644 index 0000000..b59d33a --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/rtree.h @@ -0,0 +1,492 @@ +#ifndef JEMALLOC_INTERNAL_RTREE_H +#define JEMALLOC_INTERNAL_RTREE_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree_tsd.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/tsd.h" + +/* + * This radix tree implementation is tailored to the singular purpose of + * associating metadata with extents that are currently owned by jemalloc. + * + ******************************************************************************* + */ + +/* Number of high insignificant bits. */ +#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR) +/* Number of low insigificant bits. */ +#define RTREE_NLIB LG_PAGE +/* Number of significant bits. */ +#define RTREE_NSB (LG_VADDR - RTREE_NLIB) +/* Number of levels in radix tree. */ +#if RTREE_NSB <= 10 +# define RTREE_HEIGHT 1 +#elif RTREE_NSB <= 36 +# define RTREE_HEIGHT 2 +#elif RTREE_NSB <= 52 +# define RTREE_HEIGHT 3 +#else +# error Unsupported number of significant virtual address bits +#endif +/* Use compact leaf representation if virtual address encoding allows. */ +#if RTREE_NHIB >= LG_CEIL_NSIZES +# define RTREE_LEAF_COMPACT +#endif + +/* Needed for initialization only. */ +#define RTREE_LEAFKEY_INVALID ((uintptr_t)1) + +typedef struct rtree_node_elm_s rtree_node_elm_t; +struct rtree_node_elm_s { + atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */ +}; + +struct rtree_leaf_elm_s { +#ifdef RTREE_LEAF_COMPACT + /* + * Single pointer-width field containing all three leaf element fields. + * For example, on a 64-bit x64 system with 48 significant virtual + * memory address bits, the index, extent, and slab fields are packed as + * such: + * + * x: index + * e: extent + * b: slab + * + * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b + */ + atomic_p_t le_bits; +#else + atomic_p_t le_extent; /* (extent_t *) */ + atomic_u_t le_szind; /* (szind_t) */ + atomic_b_t le_slab; /* (bool) */ +#endif +}; + +typedef struct rtree_level_s rtree_level_t; +struct rtree_level_s { + /* Number of key bits distinguished by this level. */ + unsigned bits; + /* + * Cumulative number of key bits distinguished by traversing to + * corresponding tree level. + */ + unsigned cumbits; +}; + +typedef struct rtree_s rtree_t; +struct rtree_s { + malloc_mutex_t init_lock; + /* Number of elements based on rtree_levels[0].bits. */ +#if RTREE_HEIGHT > 1 + rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; +#else + rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; +#endif +}; + +/* + * Split the bits into one to three partitions depending on number of + * significant bits. It the number of bits does not divide evenly into the + * number of levels, place one remainder bit per level starting at the leaf + * level. + */ +static const rtree_level_t rtree_levels[] = { +#if RTREE_HEIGHT == 1 + {RTREE_NSB, RTREE_NHIB + RTREE_NSB} +#elif RTREE_HEIGHT == 2 + {RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2}, + {RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB} +#elif RTREE_HEIGHT == 3 + {RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3}, + {RTREE_NSB/3 + RTREE_NSB%3/2, + RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2}, + {RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB} +#else +# error Unsupported rtree height +#endif +}; + +bool rtree_new(rtree_t *rtree, bool zeroed); + +typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t); +extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc; + +typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t); +extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc; + +typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *); +extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc; + +typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *); +extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc; +#ifdef JEMALLOC_JET +void rtree_delete(tsdn_t *tsdn, rtree_t *rtree); +#endif +rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, + rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing); + +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_leafkey(uintptr_t key) { + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); + unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - + rtree_levels[RTREE_HEIGHT-1].bits); + unsigned maskbits = ptrbits - cumbits; + uintptr_t mask = ~((ZU(1) << maskbits) - 1); + return (key & mask); +} + +JEMALLOC_ALWAYS_INLINE size_t +rtree_cache_direct_map(uintptr_t key) { + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); + unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - + rtree_levels[RTREE_HEIGHT-1].bits); + unsigned maskbits = ptrbits - cumbits; + return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1)); +} + +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_subkey(uintptr_t key, unsigned level) { + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); + unsigned cumbits = rtree_levels[level].cumbits; + unsigned shiftbits = ptrbits - cumbits; + unsigned maskbits = rtree_levels[level].bits; + uintptr_t mask = (ZU(1) << maskbits) - 1; + return ((key >> shiftbits) & mask); +} + +/* + * Atomic getters. + * + * dependent: Reading a value on behalf of a pointer to a valid allocation + * is guaranteed to be a clean read even without synchronization, + * because the rtree update became visible in memory before the + * pointer came into existence. + * !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be + * dependent on a previous rtree write, which means a stale read + * could result if synchronization were omitted here. + */ +# ifdef RTREE_LEAF_COMPACT +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + bool dependent) { + return (uintptr_t)atomic_load_p(&elm->le_bits, dependent + ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); +} + +JEMALLOC_ALWAYS_INLINE extent_t * +rtree_leaf_elm_bits_extent_get(uintptr_t bits) { +# ifdef __aarch64__ + /* + * aarch64 doesn't sign extend the highest virtual address bit to set + * the higher ones. Instead, the high bits gets zeroed. + */ + uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1; + /* Mask off the slab bit. */ + uintptr_t low_bit_mask = ~(uintptr_t)1; + uintptr_t mask = high_bit_mask & low_bit_mask; + return (extent_t *)(bits & mask); +# else + /* Restore sign-extended high bits, mask slab bit. */ + return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >> + RTREE_NHIB) & ~((uintptr_t)0x1)); +# endif +} + +JEMALLOC_ALWAYS_INLINE szind_t +rtree_leaf_elm_bits_szind_get(uintptr_t bits) { + return (szind_t)(bits >> LG_VADDR); +} + +JEMALLOC_ALWAYS_INLINE bool +rtree_leaf_elm_bits_slab_get(uintptr_t bits) { + return (bool)(bits & (uintptr_t)0x1); +} + +# endif + +JEMALLOC_ALWAYS_INLINE extent_t * +rtree_leaf_elm_extent_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, + rtree_leaf_elm_t *elm, bool dependent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + return rtree_leaf_elm_bits_extent_get(bits); +#else + extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent + ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); + return extent; +#endif +} + +JEMALLOC_ALWAYS_INLINE szind_t +rtree_leaf_elm_szind_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, + rtree_leaf_elm_t *elm, bool dependent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + return rtree_leaf_elm_bits_szind_get(bits); +#else + return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED + : ATOMIC_ACQUIRE); +#endif +} + +JEMALLOC_ALWAYS_INLINE bool +rtree_leaf_elm_slab_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, + rtree_leaf_elm_t *elm, bool dependent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + return rtree_leaf_elm_bits_slab_get(bits); +#else + return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED : + ATOMIC_ACQUIRE); +#endif +} + +static inline void +rtree_leaf_elm_extent_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, + rtree_leaf_elm_t *elm, extent_t *extent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); + uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << + LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) + | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE); +#endif +} + +static inline void +rtree_leaf_elm_szind_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, + rtree_leaf_elm_t *elm, szind_t szind) { + assert(szind <= NSIZES); + +#ifdef RTREE_LEAF_COMPACT + uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, + true); + uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | + ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & + (((uintptr_t)0x1 << LG_VADDR) - 1)) | + ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE); +#endif +} + +static inline void +rtree_leaf_elm_slab_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, + rtree_leaf_elm_t *elm, bool slab) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, + true); + uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << + LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & + (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE); +#endif +} + +static inline void +rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + extent_t *extent, szind_t szind, bool slab) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | + ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) | + ((uintptr_t)slab); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); + rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); + /* + * Write extent last, since the element is atomically considered valid + * as soon as the extent field is non-NULL. + */ + rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent); +#endif +} + +static inline void +rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, szind_t szind, bool slab) { + assert(!slab || szind < NBINS); + + /* + * The caller implicitly assures that it is the only writer to the szind + * and slab fields, and that the extent field cannot currently change. + */ + rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); + rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); +} + +JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * +rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, bool init_missing) { + assert(key != 0); + assert(!dependent || !init_missing); + + size_t slot = rtree_cache_direct_map(key); + uintptr_t leafkey = rtree_leafkey(key); + assert(leafkey != RTREE_LEAFKEY_INVALID); + + /* Fast path: L1 direct mapped cache. */ + if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) { + rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; + assert(leaf != NULL); + uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); + return &leaf[subkey]; + } + /* + * Search the L2 LRU cache. On hit, swap the matching element into the + * slot in L1 cache, and move the position in L2 up by 1. + */ +#define RTREE_CACHE_CHECK_L2(i) do { \ + if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \ + rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \ + assert(leaf != NULL); \ + if (i > 0) { \ + /* Bubble up by one. */ \ + rtree_ctx->l2_cache[i].leafkey = \ + rtree_ctx->l2_cache[i - 1].leafkey; \ + rtree_ctx->l2_cache[i].leaf = \ + rtree_ctx->l2_cache[i - 1].leaf; \ + rtree_ctx->l2_cache[i - 1].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[i - 1].leaf = \ + rtree_ctx->cache[slot].leaf; \ + } else { \ + rtree_ctx->l2_cache[0].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[0].leaf = \ + rtree_ctx->cache[slot].leaf; \ + } \ + rtree_ctx->cache[slot].leafkey = leafkey; \ + rtree_ctx->cache[slot].leaf = leaf; \ + uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \ + return &leaf[subkey]; \ + } \ +} while (0) + /* Check the first cache entry. */ + RTREE_CACHE_CHECK_L2(0); + /* Search the remaining cache elements. */ + for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) { + RTREE_CACHE_CHECK_L2(i); + } +#undef RTREE_CACHE_CHECK_L2 + + return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key, + dependent, init_missing); +} + +static inline bool +rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, + extent_t *extent, szind_t szind, bool slab) { + /* Use rtree_clear() to set the extent to NULL. */ + assert(extent != NULL); + + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, + key, false, true); + if (elm == NULL) { + return true; + } + + assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL); + rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab); + + return false; +} + +JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * +rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, + bool dependent) { + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, + key, dependent, false); + if (!dependent && elm == NULL) { + return NULL; + } + assert(elm != NULL); + return elm; +} + +JEMALLOC_ALWAYS_INLINE extent_t * +rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return NULL; + } + return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); +} + +JEMALLOC_ALWAYS_INLINE szind_t +rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return NSIZES; + } + return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); +} + +/* + * rtree_slab_read() is intentionally omitted because slab is always read in + * conjunction with szind, which makes rtree_szind_slab_read() a better choice. + */ + +JEMALLOC_ALWAYS_INLINE bool +rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return true; + } + *r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); + *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return true; + } +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + *r_szind = rtree_leaf_elm_bits_szind_get(bits); + *r_slab = rtree_leaf_elm_bits_slab_get(bits); +#else + *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); + *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent); +#endif + return false; +} + +static inline void +rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, szind_t szind, bool slab) { + assert(!slab || szind < NBINS); + + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); + rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab); +} + +static inline void +rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); + assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) != + NULL); + rtree_leaf_elm_write(tsdn, rtree, elm, NULL, NSIZES, false); +} + +#endif /* JEMALLOC_INTERNAL_RTREE_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/rtree_tsd.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/rtree_tsd.h new file mode 100644 index 0000000..93a7517 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/rtree_tsd.h @@ -0,0 +1,50 @@ +#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H +#define JEMALLOC_INTERNAL_RTREE_CTX_H + +/* + * Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each + * entry supports an entire leaf, so the cache hit rate is typically high even + * with a small number of entries. In rare cases extent activity will straddle + * the boundary between two leaf nodes. Furthermore, an arena may use a + * combination of dss and mmap. Note that as memory usage grows past the amount + * that this cache can directly cover, the cache will become less effective if + * locality of reference is low, but the consequence is merely cache misses + * while traversing the tree nodes. + * + * The L1 direct mapped cache offers consistent and low cost on cache hit. + * However collision could affect hit rate negatively. This is resolved by + * combining with a L2 LRU cache, which requires linear search and re-ordering + * on access but suffers no collision. Note that, the cache will itself suffer + * cache misses if made overly large, plus the cost of linear search in the LRU + * cache. + */ +#define RTREE_CTX_LG_NCACHE 4 +#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE) +#define RTREE_CTX_NCACHE_L2 8 + +/* + * Zero initializer required for tsd initialization only. Proper initialization + * done via rtree_ctx_data_init(). + */ +#define RTREE_CTX_ZERO_INITIALIZER {{{0}}, {{0}}} + + +typedef struct rtree_leaf_elm_s rtree_leaf_elm_t; + +typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t; +struct rtree_ctx_cache_elm_s { + uintptr_t leafkey; + rtree_leaf_elm_t *leaf; +}; + +typedef struct rtree_ctx_s rtree_ctx_t; +struct rtree_ctx_s { + /* Direct mapped cache. */ + rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE]; + /* L2 LRU cache. */ + rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2]; +}; + +void rtree_ctx_data_init(rtree_ctx_t *ctx); + +#endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/size_classes.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/size_classes.sh similarity index 58% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/size_classes.sh rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/size_classes.sh index fc82036..998994d 100755 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/size_classes.sh +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/size_classes.sh @@ -40,6 +40,54 @@ lg() { done } +lg_ceil() { + y=$1 + lg ${y}; lg_floor=${lg_result} + pow2 ${lg_floor}; pow2_floor=${pow2_result} + if [ ${pow2_floor} -lt ${y} ] ; then + lg_ceil_result=$((${lg_floor} + 1)) + else + lg_ceil_result=${lg_floor} + fi +} + +reg_size_compute() { + lg_grp=$1 + lg_delta=$2 + ndelta=$3 + + pow2 ${lg_grp}; grp=${pow2_result} + pow2 ${lg_delta}; delta=${pow2_result} + reg_size=$((${grp} + ${delta}*${ndelta})) +} + +slab_size() { + lg_p=$1 + lg_grp=$2 + lg_delta=$3 + ndelta=$4 + + pow2 ${lg_p}; p=${pow2_result} + reg_size_compute ${lg_grp} ${lg_delta} ${ndelta} + + # Compute smallest slab size that is an integer multiple of reg_size. + try_slab_size=${p} + try_nregs=$((${try_slab_size} / ${reg_size})) + perfect=0 + while [ ${perfect} -eq 0 ] ; do + perfect_slab_size=${try_slab_size} + perfect_nregs=${try_nregs} + + try_slab_size=$((${try_slab_size} + ${p})) + try_nregs=$((${try_slab_size} / ${reg_size})) + if [ ${perfect_slab_size} -eq $((${perfect_nregs} * ${reg_size})) ] ; then + perfect=1 + fi + done + + slab_size_pgs=$((${perfect_slab_size} / ${p})) +} + size_class() { index=$1 lg_grp=$2 @@ -48,6 +96,21 @@ size_class() { lg_p=$5 lg_kmax=$6 + if [ ${lg_delta} -ge ${lg_p} ] ; then + psz="yes" + else + pow2 ${lg_p}; p=${pow2_result} + pow2 ${lg_grp}; grp=${pow2_result} + pow2 ${lg_delta}; delta=${pow2_result} + sz=$((${grp} + ${delta} * ${ndelta})) + npgs=$((${sz} / ${p})) + if [ ${sz} -eq $((${npgs} * ${p})) ] ; then + psz="yes" + else + psz="no" + fi + fi + lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta} if [ ${pow2_result} -lt ${ndelta} ] ; then rem="yes" @@ -65,8 +128,10 @@ size_class() { if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then bin="yes" + slab_size ${lg_p} ${lg_grp} ${lg_delta} ${ndelta}; pgs=${slab_size_pgs} else bin="no" + pgs=0 fi if [ ${lg_size} -lt ${lg_kmax} \ -o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then @@ -74,14 +139,16 @@ size_class() { else lg_delta_lookup="no" fi - printf ' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${bin} ${lg_delta_lookup} + printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %3d, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${pgs} ${lg_delta_lookup} # Defined upon return: - # - lg_delta_lookup (${lg_delta} or "no") + # - psz ("yes" or "no") # - bin ("yes" or "no") + # - pgs + # - lg_delta_lookup (${lg_delta} or "no") } sep_line() { - echo " \\" + echo " \\" } size_classes() { @@ -94,13 +161,14 @@ size_classes() { pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result} pow2 ${lg_g}; g=${pow2_result} - echo "#define SIZE_CLASSES \\" - echo " /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \\" + echo "#define SIZE_CLASSES \\" + echo " /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \\" ntbins=0 nlbins=0 lg_tiny_maxclass='"NA"' nbins=0 + npsizes=0 # Tiny size classes. ndelta=0 @@ -112,6 +180,9 @@ size_classes() { if [ ${lg_delta_lookup} != "no" ] ; then nlbins=$((${index} + 1)) fi + if [ ${psz} = "yes" ] ; then + npsizes=$((${npsizes} + 1)) + fi if [ ${bin} != "no" ] ; then nbins=$((${index} + 1)) fi @@ -133,19 +204,25 @@ size_classes() { index=$((${index} + 1)) lg_grp=$((${lg_grp} + 1)) lg_delta=$((${lg_delta} + 1)) + if [ ${psz} = "yes" ] ; then + npsizes=$((${npsizes} + 1)) + fi fi while [ ${ndelta} -lt ${g} ] ; do size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax} index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) + if [ ${psz} = "yes" ] ; then + npsizes=$((${npsizes} + 1)) + fi done # All remaining groups. lg_grp=$((${lg_grp} + ${lg_g})) - while [ ${lg_grp} -lt ${ptr_bits} ] ; do + while [ ${lg_grp} -lt $((${ptr_bits} - 1)) ] ; do sep_line ndelta=1 - if [ ${lg_grp} -eq $((${ptr_bits} - 1)) ] ; then + if [ ${lg_grp} -eq $((${ptr_bits} - 2)) ] ; then ndelta_limit=$((${g} - 1)) else ndelta_limit=${g} @@ -157,6 +234,9 @@ size_classes() { # Final written value is correct: lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" fi + if [ ${psz} = "yes" ] ; then + npsizes=$((${npsizes} + 1)) + fi if [ ${bin} != "no" ] ; then nbins=$((${index} + 1)) # Final written value is correct: @@ -168,7 +248,7 @@ size_classes() { fi fi # Final written value is correct: - huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" + large_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) done @@ -177,51 +257,61 @@ size_classes() { done echo nsizes=${index} + lg_ceil ${nsizes}; lg_ceil_nsizes=${lg_ceil_result} # Defined upon completion: # - ntbins # - nlbins # - nbins # - nsizes + # - lg_ceil_nsizes + # - npsizes # - lg_tiny_maxclass # - lookup_maxclass # - small_maxclass # - lg_large_minclass - # - huge_maxclass + # - large_maxclass } cat < 255) +#if (NBINS > 256) # error "Too many small size classes" #endif -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_SIZE_CLASSES_H */ EOF diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/smoothstep.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/smoothstep.h new file mode 100644 index 0000000..2e14430 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/smoothstep.h @@ -0,0 +1,232 @@ +#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H +#define JEMALLOC_INTERNAL_SMOOTHSTEP_H + +/* + * This file was generated by the following command: + * sh smoothstep.sh smoother 200 24 3 15 + */ +/******************************************************************************/ + +/* + * This header defines a precomputed table based on the smoothstep family of + * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 + * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so + * that floating point math can be avoided. + * + * 3 2 + * smoothstep(x) = -2x + 3x + * + * 5 4 3 + * smootherstep(x) = 6x - 15x + 10x + * + * 7 6 5 4 + * smootheststep(x) = -20x + 70x - 84x + 35x + */ + +#define SMOOTHSTEP_VARIANT "smoother" +#define SMOOTHSTEP_NSTEPS 200 +#define SMOOTHSTEP_BFP 24 +#define SMOOTHSTEP \ + /* STEP(step, h, x, y) */ \ + STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ + STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ + STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ + STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ + STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ + STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ + STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ + STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ + STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ + STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ + STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ + STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ + STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ + STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ + STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ + STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ + STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ + STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ + STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ + STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ + STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ + STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ + STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ + STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ + STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ + STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ + STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ + STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ + STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ + STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ + STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ + STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ + STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ + STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ + STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ + STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ + STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ + STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ + STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ + STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ + STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ + STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ + STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ + STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ + STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ + STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ + STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ + STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ + STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ + STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ + STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ + STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ + STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ + STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ + STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ + STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ + STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ + STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ + STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ + STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ + STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ + STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ + STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ + STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ + STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ + STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ + STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ + STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ + STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ + STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ + STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ + STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ + STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ + STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ + STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ + STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ + STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ + STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ + STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ + STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ + STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ + STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ + STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ + STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ + STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ + STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ + STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ + STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ + STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ + STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ + STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ + STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ + STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ + STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ + STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ + STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ + STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ + STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ + STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ + STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ + STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ + STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ + STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ + STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ + STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ + STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ + STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ + STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ + STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ + STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ + STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ + STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ + STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ + STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ + STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ + STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ + STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ + STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ + STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ + STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ + STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ + STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ + STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ + STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ + STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ + STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ + STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ + STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ + STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ + STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ + STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ + STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ + STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ + STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ + STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ + STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ + STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ + STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ + STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ + STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ + STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ + STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ + STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ + STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ + STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ + STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ + STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ + STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ + STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ + STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ + STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ + STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ + STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ + STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ + STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ + STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ + STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ + STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ + STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ + STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ + STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ + STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ + STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ + STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ + STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ + STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ + STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ + STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ + STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ + STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ + STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ + STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ + STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ + STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ + STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ + STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ + STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ + STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ + STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ + STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ + STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ + STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ + STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ + STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ + STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ + STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ + STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ + STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ + STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ + STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ + STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ + STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ + STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ + STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ + STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ + STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ + STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ + STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ + STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ + STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \ + +#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/smoothstep.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/smoothstep.sh new file mode 100755 index 0000000..65de97b --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/smoothstep.sh @@ -0,0 +1,101 @@ +#!/bin/sh +# +# Generate a discrete lookup table for a sigmoid function in the smoothstep +# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table +# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode +# the entries using a binary fixed point representation. +# +# Usage: smoothstep.sh +# +# is in {smooth, smoother, smoothest}. +# must be greater than zero. +# must be in [0..62]; reasonable values are roughly [10..30]. +# is x decimal precision. +# is y decimal precision. + +#set -x + +cmd="sh smoothstep.sh $*" +variant=$1 +nsteps=$2 +bfp=$3 +xprec=$4 +yprec=$5 + +case "${variant}" in + smooth) + ;; + smoother) + ;; + smoothest) + ;; + *) + echo "Unsupported variant" + exit 1 + ;; +esac + +smooth() { + step=$1 + y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` + h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` +} + +smoother() { + step=$1 + y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` + h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` +} + +smoothest() { + step=$1 + y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'` + h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' ` +} + +cat <iteration < 5) { + for (i = 0; i < (1U << spin->iteration); i++) { + spin_cpu_spinwait(); + } + spin->iteration++; + } else { +#ifdef _WIN32 + SwitchToThread(); +#else + sched_yield(); +#endif + } +} + +#undef SPIN_INLINE + +#endif /* JEMALLOC_INTERNAL_SPIN_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/stats.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/stats.h new file mode 100644 index 0000000..852e342 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/stats.h @@ -0,0 +1,30 @@ +#ifndef JEMALLOC_INTERNAL_STATS_H +#define JEMALLOC_INTERNAL_STATS_H + +/* OPTION(opt, var_name, default, set_value_to) */ +#define STATS_PRINT_OPTIONS \ + OPTION('J', json, false, true) \ + OPTION('g', general, true, false) \ + OPTION('m', merged, config_stats, false) \ + OPTION('d', destroyed, config_stats, false) \ + OPTION('a', unmerged, config_stats, false) \ + OPTION('b', bins, true, false) \ + OPTION('l', large, true, false) \ + OPTION('x', mutex, true, false) + +enum { +#define OPTION(o, v, d, s) stats_print_option_num_##v, + STATS_PRINT_OPTIONS +#undef OPTION + stats_print_tot_num_options +}; + +/* Options for stats_print. */ +extern bool opt_stats_print; +extern char opt_stats_print_opts[stats_print_tot_num_options+1]; + +/* Implements je_malloc_stats_print. */ +void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts); + +#endif /* JEMALLOC_INTERNAL_STATS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/sz.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/sz.h new file mode 100644 index 0000000..9794628 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/sz.h @@ -0,0 +1,317 @@ +#ifndef JEMALLOC_INTERNAL_SIZE_H +#define JEMALLOC_INTERNAL_SIZE_H + +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/util.h" + +/* + * sz module: Size computations. + * + * Some abbreviations used here: + * p: Page + * ind: Index + * s, sz: Size + * u: Usable size + * a: Aligned + * + * These are not always used completely consistently, but should be enough to + * interpret function names. E.g. sz_psz2ind converts page size to page size + * index; sz_sa2u converts a (size, alignment) allocation request to the usable + * size that would result from such an allocation. + */ + +/* + * sz_pind2sz_tab encodes the same information as could be computed by + * sz_pind2sz_compute(). + */ +extern size_t const sz_pind2sz_tab[NPSIZES+1]; +/* + * sz_index2size_tab encodes the same information as could be computed (at + * unacceptable cost in some code paths) by sz_index2size_compute(). + */ +extern size_t const sz_index2size_tab[NSIZES]; +/* + * sz_size2index_tab is a compact lookup table that rounds request sizes up to + * size classes. In order to reduce cache footprint, the table is compressed, + * and all accesses are via sz_size2index(). + */ +extern uint8_t const sz_size2index_tab[]; + +static const size_t sz_large_pad = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + PAGE +#else + 0 +#endif + ; + +JEMALLOC_ALWAYS_INLINE pszind_t +sz_psz2ind(size_t psz) { + if (unlikely(psz > LARGE_MAXCLASS)) { + return NPSIZES; + } + { + pszind_t x = lg_floor((psz<<1)-1); + pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x - + (LG_SIZE_CLASS_GROUP + LG_PAGE); + pszind_t grp = shift << LG_SIZE_CLASS_GROUP; + + pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? + LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; + + size_t delta_inverse_mask = ZU(-1) << lg_delta; + pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + pszind_t ind = grp + mod; + return ind; + } +} + +static inline size_t +sz_pind2sz_compute(pszind_t pind) { + if (unlikely(pind == NPSIZES)) { + return LARGE_MAXCLASS + PAGE; + } + { + size_t grp = pind >> LG_SIZE_CLASS_GROUP; + size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_PAGE + + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_PAGE-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t sz = grp_size + mod_size; + return sz; + } +} + +static inline size_t +sz_pind2sz_lookup(pszind_t pind) { + size_t ret = (size_t)sz_pind2sz_tab[pind]; + assert(ret == sz_pind2sz_compute(pind)); + return ret; +} + +static inline size_t +sz_pind2sz(pszind_t pind) { + assert(pind < NPSIZES+1); + return sz_pind2sz_lookup(pind); +} + +static inline size_t +sz_psz2u(size_t psz) { + if (unlikely(psz > LARGE_MAXCLASS)) { + return LARGE_MAXCLASS + PAGE; + } + { + size_t x = lg_floor((psz<<1)-1); + size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? + LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (psz + delta_mask) & ~delta_mask; + return usize; + } +} + +static inline szind_t +sz_size2index_compute(size_t size) { + if (unlikely(size > LARGE_MAXCLASS)) { + return NSIZES; + } +#if (NTBINS != 0) + if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { + szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; + szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); + return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); + } +#endif + { + szind_t x = lg_floor((size<<1)-1); + szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 : + x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM); + szind_t grp = shift << LG_SIZE_CLASS_GROUP; + + szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; + + size_t delta_inverse_mask = ZU(-1) << lg_delta; + szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + szind_t index = NTBINS + grp + mod; + return index; + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +sz_size2index_lookup(size_t size) { + assert(size <= LOOKUP_MAXCLASS); + { + szind_t ret = (sz_size2index_tab[(size-1) >> LG_TINY_MIN]); + assert(ret == sz_size2index_compute(size)); + return ret; + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +sz_size2index(size_t size) { + assert(size > 0); + if (likely(size <= LOOKUP_MAXCLASS)) { + return sz_size2index_lookup(size); + } + return sz_size2index_compute(size); +} + +static inline size_t +sz_index2size_compute(szind_t index) { +#if (NTBINS > 0) + if (index < NTBINS) { + return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index)); + } +#endif + { + size_t reduced_index = index - NTBINS; + size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP; + size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) - + 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_QUANTUM + + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_QUANTUM-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t usize = grp_size + mod_size; + return usize; + } +} + +JEMALLOC_ALWAYS_INLINE size_t +sz_index2size_lookup(szind_t index) { + size_t ret = (size_t)sz_index2size_tab[index]; + assert(ret == sz_index2size_compute(index)); + return ret; +} + +JEMALLOC_ALWAYS_INLINE size_t +sz_index2size(szind_t index) { + assert(index < NSIZES); + return sz_index2size_lookup(index); +} + +JEMALLOC_ALWAYS_INLINE size_t +sz_s2u_compute(size_t size) { + if (unlikely(size > LARGE_MAXCLASS)) { + return 0; + } +#if (NTBINS > 0) + if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { + size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; + size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); + return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : + (ZU(1) << lg_ceil)); + } +#endif + { + size_t x = lg_floor((size<<1)-1); + size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (size + delta_mask) & ~delta_mask; + return usize; + } +} + +JEMALLOC_ALWAYS_INLINE size_t +sz_s2u_lookup(size_t size) { + size_t ret = sz_index2size_lookup(sz_size2index_lookup(size)); + + assert(ret == sz_s2u_compute(size)); + return ret; +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size. + */ +JEMALLOC_ALWAYS_INLINE size_t +sz_s2u(size_t size) { + assert(size > 0); + if (likely(size <= LOOKUP_MAXCLASS)) { + return sz_s2u_lookup(size); + } + return sz_s2u_compute(size); +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size and alignment. + */ +JEMALLOC_ALWAYS_INLINE size_t +sz_sa2u(size_t size, size_t alignment) { + size_t usize; + + assert(alignment != 0 && ((alignment - 1) & alignment) == 0); + + /* Try for a small size class. */ + if (size <= SMALL_MAXCLASS && alignment < PAGE) { + /* + * Round size up to the nearest multiple of alignment. + * + * This done, we can take advantage of the fact that for each + * small size class, every object is aligned at the smallest + * power of two that is non-zero in the base two representation + * of the size. For example: + * + * Size | Base 2 | Minimum alignment + * -----+----------+------------------ + * 96 | 1100000 | 32 + * 144 | 10100000 | 32 + * 192 | 11000000 | 64 + */ + usize = sz_s2u(ALIGNMENT_CEILING(size, alignment)); + if (usize < LARGE_MINCLASS) { + return usize; + } + } + + /* Large size class. Beware of overflow. */ + + if (unlikely(alignment > LARGE_MAXCLASS)) { + return 0; + } + + /* Make sure result is a large size class. */ + if (size <= LARGE_MINCLASS) { + usize = LARGE_MINCLASS; + } else { + usize = sz_s2u(size); + if (usize < size) { + /* size_t overflow. */ + return 0; + } + } + + /* + * Calculate the multi-page mapping that large_palloc() would need in + * order to guarantee the alignment. + */ + if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) { + /* size_t overflow. */ + return 0; + } + return usize; +} + +#endif /* JEMALLOC_INTERNAL_SIZE_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/tcache.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/tcache.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache.h diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_externs.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_externs.h new file mode 100644 index 0000000..790367b --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_externs.h @@ -0,0 +1,55 @@ +#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H +#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H + +#include "jemalloc/internal/size_classes.h" + +extern bool opt_tcache; +extern ssize_t opt_lg_tcache_max; + +extern cache_bin_info_t *tcache_bin_info; + +/* + * Number of tcache bins. There are NBINS small-object bins, plus 0 or more + * large-object bins. + */ +extern unsigned nhbins; + +/* Maximum cached size class. */ +extern size_t tcache_maxclass; + +/* + * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and + * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are + * completely disjoint from this data structure. tcaches starts off as a sparse + * array, so it has no physical memory footprint until individual pages are + * touched. This allows the entire array to be allocated the first time an + * explicit tcache is created without a disproportionate impact on memory usage. + */ +extern tcaches_t *tcaches; + +size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); +void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); +void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + cache_bin_t *tbin, szind_t binind, bool *tcache_success); +void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, + szind_t binind, unsigned rem); +void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, + unsigned rem, tcache_t *tcache); +void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, + arena_t *arena); +tcache_t *tcache_create_explicit(tsd_t *tsd); +void tcache_cleanup(tsd_t *tsd); +void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); +bool tcaches_create(tsd_t *tsd, unsigned *r_ind); +void tcaches_flush(tsd_t *tsd, unsigned ind); +void tcaches_destroy(tsd_t *tsd, unsigned ind); +bool tcache_boot(tsdn_t *tsdn); +void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); +void tcache_prefork(tsdn_t *tsdn); +void tcache_postfork_parent(tsdn_t *tsdn); +void tcache_postfork_child(tsdn_t *tsdn); +void tcache_flush(tsd_t *tsd); +bool tsd_tcache_data_init(tsd_t *tsd); +bool tsd_tcache_enabled_data_init(tsd_t *tsd); + +#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_inlines.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_inlines.h new file mode 100644 index 0000000..0f6ab8c --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_inlines.h @@ -0,0 +1,223 @@ +#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H +#define JEMALLOC_INTERNAL_TCACHE_INLINES_H + +#include "jemalloc/internal/bin.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/util.h" + +static inline bool +tcache_enabled_get(tsd_t *tsd) { + return tsd_tcache_enabled_get(tsd); +} + +static inline void +tcache_enabled_set(tsd_t *tsd, bool enabled) { + bool was_enabled = tsd_tcache_enabled_get(tsd); + + if (!was_enabled && enabled) { + tsd_tcache_data_init(tsd); + } else if (was_enabled && !enabled) { + tcache_cleanup(tsd); + } + /* Commit the state last. Above calls check current state. */ + tsd_tcache_enabled_set(tsd, enabled); + tsd_slow_update(tsd); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_event(tsd_t *tsd, tcache_t *tcache) { + if (TCACHE_GC_INCR == 0) { + return; + } + + if (unlikely(ticker_tick(&tcache->gc_ticker))) { + tcache_event_hard(tsd, tcache); + } +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, + UNUSED size_t size, szind_t binind, bool zero, bool slow_path) { + void *ret; + cache_bin_t *bin; + bool tcache_success; + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + assert(binind < NBINS); + bin = tcache_small_bin_get(tcache, binind); + ret = cache_bin_alloc_easy(bin, &tcache_success); + assert(tcache_success == (ret != NULL)); + if (unlikely(!tcache_success)) { + bool tcache_hard_success; + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) { + return NULL; + } + + ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, + bin, binind, &tcache_hard_success); + if (tcache_hard_success == false) { + return NULL; + } + } + + assert(ret); + /* + * Only compute usize if required. The checks in the following if + * statement are all static. + */ + if (config_prof || (slow_path && config_fill) || unlikely(zero)) { + usize = sz_index2size(binind); + assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); + } + + if (likely(!zero)) { + if (slow_path && config_fill) { + if (unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, &bin_infos[binind], + false); + } else if (unlikely(opt_zero)) { + memset(ret, 0, usize); + } + } + } else { + if (slow_path && config_fill && unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, &bin_infos[binind], true); + } + memset(ret, 0, usize); + } + + if (config_stats) { + bin->tstats.nrequests++; + } + if (config_prof) { + tcache->prof_accumbytes += usize; + } + tcache_event(tsd, tcache); + return ret; +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, + szind_t binind, bool zero, bool slow_path) { + void *ret; + cache_bin_t *bin; + bool tcache_success; + + assert(binind >= NBINS &&binind < nhbins); + bin = tcache_large_bin_get(tcache, binind); + ret = cache_bin_alloc_easy(bin, &tcache_success); + assert(tcache_success == (ret != NULL)); + if (unlikely(!tcache_success)) { + /* + * Only allocate one large object at a time, because it's quite + * expensive to create one and not use it. + */ + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) { + return NULL; + } + + ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero); + if (ret == NULL) { + return NULL; + } + } else { + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + /* Only compute usize on demand */ + if (config_prof || (slow_path && config_fill) || + unlikely(zero)) { + usize = sz_index2size(binind); + assert(usize <= tcache_maxclass); + } + + if (likely(!zero)) { + if (slow_path && config_fill) { + if (unlikely(opt_junk_alloc)) { + memset(ret, JEMALLOC_ALLOC_JUNK, + usize); + } else if (unlikely(opt_zero)) { + memset(ret, 0, usize); + } + } + } else { + memset(ret, 0, usize); + } + + if (config_stats) { + bin->tstats.nrequests++; + } + if (config_prof) { + tcache->prof_accumbytes += usize; + } + } + + tcache_event(tsd, tcache); + return ret; +} + +JEMALLOC_ALWAYS_INLINE void +tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, + bool slow_path) { + cache_bin_t *bin; + cache_bin_info_t *bin_info; + + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS); + + if (slow_path && config_fill && unlikely(opt_junk_free)) { + arena_dalloc_junk_small(ptr, &bin_infos[binind]); + } + + bin = tcache_small_bin_get(tcache, binind); + bin_info = &tcache_bin_info[binind]; + if (unlikely(bin->ncached == bin_info->ncached_max)) { + tcache_bin_flush_small(tsd, tcache, bin, binind, + (bin_info->ncached_max >> 1)); + } + assert(bin->ncached < bin_info->ncached_max); + bin->ncached++; + *(bin->avail - bin->ncached) = ptr; + + tcache_event(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, + bool slow_path) { + cache_bin_t *bin; + cache_bin_info_t *bin_info; + + assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); + + if (slow_path && config_fill && unlikely(opt_junk_free)) { + large_dalloc_junk(ptr, sz_index2size(binind)); + } + + bin = tcache_large_bin_get(tcache, binind); + bin_info = &tcache_bin_info[binind]; + if (unlikely(bin->ncached == bin_info->ncached_max)) { + tcache_bin_flush_large(tsd, bin, binind, + (bin_info->ncached_max >> 1), tcache); + } + assert(bin->ncached < bin_info->ncached_max); + bin->ncached++; + *(bin->avail - bin->ncached) = ptr; + + tcache_event(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE tcache_t * +tcaches_get(tsd_t *tsd, unsigned ind) { + tcaches_t *elm = &tcaches[ind]; + if (unlikely(elm->tcache == NULL)) { + elm->tcache = tcache_create_explicit(tsd); + } + return elm->tcache; +} + +#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_structs.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_structs.h new file mode 100644 index 0000000..07b7387 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_structs.h @@ -0,0 +1,61 @@ +#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H +#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H + +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/cache_bin.h" +#include "jemalloc/internal/ticker.h" + +struct tcache_s { + /* + * To minimize our cache-footprint, we put the frequently accessed data + * together at the start of this struct. + */ + + /* Cleared after arena_prof_accum(). */ + uint64_t prof_accumbytes; + /* Drives incremental GC. */ + ticker_t gc_ticker; + /* + * The pointer stacks associated with bins follow as a contiguous array. + * During tcache initialization, the avail pointer in each element of + * tbins is initialized to point to the proper offset within this array. + */ + cache_bin_t bins_small[NBINS]; + + /* + * This data is less hot; we can be a little less careful with our + * footprint here. + */ + /* Lets us track all the tcaches in an arena. */ + ql_elm(tcache_t) link; + /* + * The descriptor lets the arena find our cache bins without seeing the + * tcache definition. This enables arenas to aggregate stats across + * tcaches without having a tcache dependency. + */ + cache_bin_array_descriptor_t cache_bin_array_descriptor; + + /* The arena this tcache is associated with. */ + arena_t *arena; + /* Next bin to GC. */ + szind_t next_gc_bin; + /* For small bins, fill (ncached_max >> lg_fill_div). */ + uint8_t lg_fill_div[NBINS]; + /* + * We put the cache bins for large size classes at the end of the + * struct, since some of them might not get used. This might end up + * letting us avoid touching an extra page if we don't have to. + */ + cache_bin_t bins_large[NSIZES-NBINS]; +}; + +/* Linkage for list of available (previously used) explicit tcache IDs. */ +struct tcaches_s { + union { + tcache_t *tcache; + tcaches_t *next; + }; +}; + +#endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_types.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_types.h new file mode 100644 index 0000000..e49bc9d --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tcache_types.h @@ -0,0 +1,56 @@ +#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H +#define JEMALLOC_INTERNAL_TCACHE_TYPES_H + +#include "jemalloc/internal/size_classes.h" + +typedef struct tcache_s tcache_t; +typedef struct tcaches_s tcaches_t; + +/* + * tcache pointers close to NULL are used to encode state information that is + * used for two purposes: preventing thread caching on a per thread basis and + * cleaning up during thread shutdown. + */ +#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) +#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) +#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) +#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY + +/* + * Absolute minimum number of cache slots for each small bin. + */ +#define TCACHE_NSLOTS_SMALL_MIN 20 + +/* + * Absolute maximum number of cache slots for each small bin in the thread + * cache. This is an additional constraint beyond that imposed as: twice the + * number of regions per slab for this size class. + * + * This constant must be an even number. + */ +#define TCACHE_NSLOTS_SMALL_MAX 200 + +/* Number of cache slots for large size classes. */ +#define TCACHE_NSLOTS_LARGE 20 + +/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ +#define LG_TCACHE_MAXCLASS_DEFAULT 15 + +/* + * TCACHE_GC_SWEEP is the approximate number of allocation events between + * full GC sweeps. Integer rounding may cause the actual number to be + * slightly higher, since GC is performed incrementally. + */ +#define TCACHE_GC_SWEEP 8192 + +/* Number of tcache allocation/deallocation events between incremental GCs. */ +#define TCACHE_GC_INCR \ + ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) + +/* Used in TSD static initializer only. Real init in tcache_data_init(). */ +#define TCACHE_ZERO_INITIALIZER {0} + +/* Used in TSD static initializer only. Will be initialized to opt_tcache. */ +#define TCACHE_ENABLED_ZERO_INITIALIZER false + +#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ticker.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ticker.h new file mode 100644 index 0000000..4b36047 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/ticker.h @@ -0,0 +1,78 @@ +#ifndef JEMALLOC_INTERNAL_TICKER_H +#define JEMALLOC_INTERNAL_TICKER_H + +#include "jemalloc/internal/util.h" + +/** + * A ticker makes it easy to count-down events until some limit. You + * ticker_init the ticker to trigger every nticks events. You then notify it + * that an event has occurred with calls to ticker_tick (or that nticks events + * have occurred with a call to ticker_ticks), which will return true (and reset + * the counter) if the countdown hit zero. + */ + +typedef struct { + int32_t tick; + int32_t nticks; +} ticker_t; + +static inline void +ticker_init(ticker_t *ticker, int32_t nticks) { + ticker->tick = nticks; + ticker->nticks = nticks; +} + +static inline void +ticker_copy(ticker_t *ticker, const ticker_t *other) { + *ticker = *other; +} + +static inline int32_t +ticker_read(const ticker_t *ticker) { + return ticker->tick; +} + +/* + * Not intended to be a public API. Unfortunately, on x86, neither gcc nor + * clang seems smart enough to turn + * ticker->tick -= nticks; + * if (unlikely(ticker->tick < 0)) { + * fixup ticker + * return true; + * } + * return false; + * into + * subq %nticks_reg, (%ticker_reg) + * js fixup ticker + * + * unless we force "fixup ticker" out of line. In that case, gcc gets it right, + * but clang now does worse than before. So, on x86 with gcc, we force it out + * of line, but otherwise let the inlining occur. Ordinarily this wouldn't be + * worth the hassle, but this is on the fast path of both malloc and free (via + * tcache_event). + */ +#if defined(__GNUC__) && !defined(__clang__) \ + && (defined(__x86_64__) || defined(__i386__)) +JEMALLOC_NOINLINE +#endif +static bool +ticker_fixup(ticker_t *ticker) { + ticker->tick = ticker->nticks; + return true; +} + +static inline bool +ticker_ticks(ticker_t *ticker, int32_t nticks) { + ticker->tick -= nticks; + if (unlikely(ticker->tick < 0)) { + return ticker_fixup(ticker); + } + return false; +} + +static inline bool +ticker_tick(ticker_t *ticker) { + return ticker_ticks(ticker, 1); +} + +#endif /* JEMALLOC_INTERNAL_TICKER_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd.h new file mode 100644 index 0000000..0b9841a --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd.h @@ -0,0 +1,326 @@ +#ifndef JEMALLOC_INTERNAL_TSD_H +#define JEMALLOC_INTERNAL_TSD_H + +#include "jemalloc/internal/arena_types.h" +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/jemalloc_internal_externs.h" +#include "jemalloc/internal/prof_types.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/rtree_tsd.h" +#include "jemalloc/internal/tcache_types.h" +#include "jemalloc/internal/tcache_structs.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/witness.h" + +/* + * Thread-Specific-Data layout + * --- data accessed on tcache fast path: state, rtree_ctx, stats, prof --- + * s: state + * e: tcache_enabled + * m: thread_allocated (config_stats) + * f: thread_deallocated (config_stats) + * p: prof_tdata (config_prof) + * c: rtree_ctx (rtree cache accessed on deallocation) + * t: tcache + * --- data not accessed on tcache fast path: arena-related fields --- + * d: arenas_tdata_bypass + * r: reentrancy_level + * x: narenas_tdata + * i: iarena + * a: arena + * o: arenas_tdata + * Loading TSD data is on the critical path of basically all malloc operations. + * In particular, tcache and rtree_ctx rely on hot CPU cache to be effective. + * Use a compact layout to reduce cache footprint. + * +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+ + * |---------------------------- 1st cacheline ----------------------------| + * | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] | + * |---------------------------- 2nd cacheline ----------------------------| + * | [c * 64 ........ ........ ........ ........ ........ ........ .......] | + * |---------------------------- 3nd cacheline ----------------------------| + * | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... | + * +-------------------------------------------------------------------------+ + * Note: the entire tcache is embedded into TSD and spans multiple cachelines. + * + * The last 3 members (i, a and o) before tcache isn't really needed on tcache + * fast path. However we have a number of unused tcache bins and witnesses + * (never touched unless config_debug) at the end of tcache, so we place them + * there to avoid breaking the cachelines and possibly paging in an extra page. + */ +#ifdef JEMALLOC_JET +typedef void (*test_callback_t)(int *); +# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10 +# define MALLOC_TEST_TSD \ + O(test_data, int, int) \ + O(test_callback, test_callback_t, int) +# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL +#else +# define MALLOC_TEST_TSD +# define MALLOC_TEST_TSD_INITIALIZER +#endif + +/* O(name, type, nullable type */ +#define MALLOC_TSD \ + O(tcache_enabled, bool, bool) \ + O(arenas_tdata_bypass, bool, bool) \ + O(reentrancy_level, int8_t, int8_t) \ + O(narenas_tdata, uint32_t, uint32_t) \ + O(offset_state, uint64_t, uint64_t) \ + O(thread_allocated, uint64_t, uint64_t) \ + O(thread_deallocated, uint64_t, uint64_t) \ + O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \ + O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \ + O(iarena, arena_t *, arena_t *) \ + O(arena, arena_t *, arena_t *) \ + O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\ + O(tcache, tcache_t, tcache_t) \ + O(witness_tsd, witness_tsd_t, witness_tsdn_t) \ + MALLOC_TEST_TSD + +#define TSD_INITIALIZER { \ + tsd_state_uninitialized, \ + TCACHE_ENABLED_ZERO_INITIALIZER, \ + false, \ + 0, \ + 0, \ + 0, \ + 0, \ + 0, \ + NULL, \ + RTREE_CTX_ZERO_INITIALIZER, \ + NULL, \ + NULL, \ + NULL, \ + TCACHE_ZERO_INITIALIZER, \ + WITNESS_TSD_INITIALIZER \ + MALLOC_TEST_TSD_INITIALIZER \ +} + +enum { + tsd_state_nominal = 0, /* Common case --> jnz. */ + tsd_state_nominal_slow = 1, /* Initialized but on slow path. */ + /* the above 2 nominal states should be lower values. */ + tsd_state_nominal_max = 1, /* used for comparison only. */ + tsd_state_minimal_initialized = 2, + tsd_state_purgatory = 3, + tsd_state_reincarnated = 4, + tsd_state_uninitialized = 5 +}; + +/* Manually limit tsd_state_t to a single byte. */ +typedef uint8_t tsd_state_t; + +/* The actual tsd. */ +struct tsd_s { + /* + * The contents should be treated as totally opaque outside the tsd + * module. Access any thread-local state through the getters and + * setters below. + */ + tsd_state_t state; +#define O(n, t, nt) \ + t use_a_getter_or_setter_instead_##n; +MALLOC_TSD +#undef O +}; + +/* + * Wrapper around tsd_t that makes it possible to avoid implicit conversion + * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be + * explicitly converted to tsd_t, which is non-nullable. + */ +struct tsdn_s { + tsd_t tsd; +}; +#define TSDN_NULL ((tsdn_t *)0) +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsd_tsdn(tsd_t *tsd) { + return (tsdn_t *)tsd; +} + +JEMALLOC_ALWAYS_INLINE bool +tsdn_null(const tsdn_t *tsdn) { + return tsdn == NULL; +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsdn_tsd(tsdn_t *tsdn) { + assert(!tsdn_null(tsdn)); + + return &tsdn->tsd; +} + +void *malloc_tsd_malloc(size_t size); +void malloc_tsd_dalloc(void *wrapper); +void malloc_tsd_cleanup_register(bool (*f)(void)); +tsd_t *malloc_tsd_boot0(void); +void malloc_tsd_boot1(void); +void tsd_cleanup(void *arg); +tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal); +void tsd_slow_update(tsd_t *tsd); + +/* + * We put the platform-specific data declarations and inlines into their own + * header files to avoid cluttering this file. They define tsd_boot0, + * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set. + */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#include "jemalloc/internal/tsd_malloc_thread_cleanup.h" +#elif (defined(JEMALLOC_TLS)) +#include "jemalloc/internal/tsd_tls.h" +#elif (defined(_WIN32)) +#include "jemalloc/internal/tsd_win.h" +#else +#include "jemalloc/internal/tsd_generic.h" +#endif + +/* + * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of + * foo. This omits some safety checks, and so can be used during tsd + * initialization and cleanup. + */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE t * \ +tsd_##n##p_get_unsafe(tsd_t *tsd) { \ + return &tsd->use_a_getter_or_setter_instead_##n; \ +} +MALLOC_TSD +#undef O + +/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE t * \ +tsd_##n##p_get(tsd_t *tsd) { \ + assert(tsd->state == tsd_state_nominal || \ + tsd->state == tsd_state_nominal_slow || \ + tsd->state == tsd_state_reincarnated || \ + tsd->state == tsd_state_minimal_initialized); \ + return tsd_##n##p_get_unsafe(tsd); \ +} +MALLOC_TSD +#undef O + +/* + * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn + * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type. + */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE nt * \ +tsdn_##n##p_get(tsdn_t *tsdn) { \ + if (tsdn_null(tsdn)) { \ + return NULL; \ + } \ + tsd_t *tsd = tsdn_tsd(tsdn); \ + return (nt *)tsd_##n##p_get(tsd); \ +} +MALLOC_TSD +#undef O + +/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE t \ +tsd_##n##_get(tsd_t *tsd) { \ + return *tsd_##n##p_get(tsd); \ +} +MALLOC_TSD +#undef O + +/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE void \ +tsd_##n##_set(tsd_t *tsd, t val) { \ + assert(tsd->state != tsd_state_reincarnated && \ + tsd->state != tsd_state_minimal_initialized); \ + *tsd_##n##p_get(tsd) = val; \ +} +MALLOC_TSD +#undef O + +JEMALLOC_ALWAYS_INLINE void +tsd_assert_fast(tsd_t *tsd) { + assert(!malloc_slow && tsd_tcache_enabled_get(tsd) && + tsd_reentrancy_level_get(tsd) == 0); +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_fast(tsd_t *tsd) { + bool fast = (tsd->state == tsd_state_nominal); + if (fast) { + tsd_assert_fast(tsd); + } + + return fast; +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch_impl(bool init, bool minimal) { + tsd_t *tsd = tsd_get(init); + + if (!init && tsd_get_allocates() && tsd == NULL) { + return NULL; + } + assert(tsd != NULL); + + if (unlikely(tsd->state != tsd_state_nominal)) { + return tsd_fetch_slow(tsd, minimal); + } + assert(tsd_fast(tsd)); + tsd_assert_fast(tsd); + + return tsd; +} + +/* Get a minimal TSD that requires no cleanup. See comments in free(). */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch_min(void) { + return tsd_fetch_impl(true, true); +} + +/* For internal background threads use only. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_internal_fetch(void) { + tsd_t *tsd = tsd_fetch_min(); + /* Use reincarnated state to prevent full initialization. */ + tsd->state = tsd_state_reincarnated; + + return tsd; +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch(void) { + return tsd_fetch_impl(true, false); +} + +static inline bool +tsd_nominal(tsd_t *tsd) { + return (tsd->state <= tsd_state_nominal_max); +} + +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsdn_fetch(void) { + if (!tsd_booted_get()) { + return NULL; + } + + return tsd_tsdn(tsd_fetch_impl(false, false)); +} + +JEMALLOC_ALWAYS_INLINE rtree_ctx_t * +tsd_rtree_ctx(tsd_t *tsd) { + return tsd_rtree_ctxp_get(tsd); +} + +JEMALLOC_ALWAYS_INLINE rtree_ctx_t * +tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) { + /* + * If tsd cannot be accessed, initialize the fallback rtree_ctx and + * return a pointer to it. + */ + if (unlikely(tsdn_null(tsdn))) { + rtree_ctx_data_init(fallback); + return fallback; + } + return tsd_rtree_ctx(tsdn_tsd(tsdn)); +} + +#endif /* JEMALLOC_INTERNAL_TSD_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_generic.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_generic.h new file mode 100644 index 0000000..1e52ef7 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_generic.h @@ -0,0 +1,157 @@ +#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H +#error This file should be included only once, by tsd.h. +#endif +#define JEMALLOC_INTERNAL_TSD_GENERIC_H + +typedef struct tsd_init_block_s tsd_init_block_t; +struct tsd_init_block_s { + ql_elm(tsd_init_block_t) link; + pthread_t thread; + void *data; +}; + +/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */ +typedef struct tsd_init_head_s tsd_init_head_t; + +typedef struct { + bool initialized; + tsd_t val; +} tsd_wrapper_t; + +void *tsd_init_check_recursion(tsd_init_head_t *head, + tsd_init_block_t *block); +void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); + +extern pthread_key_t tsd_tsd; +extern tsd_init_head_t tsd_init_head; +extern tsd_wrapper_t tsd_boot_wrapper; +extern bool tsd_booted; + +/* Initialization/cleanup. */ +JEMALLOC_ALWAYS_INLINE void +tsd_cleanup_wrapper(void *arg) { + tsd_wrapper_t *wrapper = (tsd_wrapper_t *)arg; + + if (wrapper->initialized) { + wrapper->initialized = false; + tsd_cleanup(&wrapper->val); + if (wrapper->initialized) { + /* Trigger another cleanup round. */ + if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) + { + malloc_write(": Error setting TSD\n"); + if (opt_abort) { + abort(); + } + } + return; + } + } + malloc_tsd_dalloc(wrapper); +} + +JEMALLOC_ALWAYS_INLINE void +tsd_wrapper_set(tsd_wrapper_t *wrapper) { + if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) { + malloc_write(": Error setting TSD\n"); + abort(); + } +} + +JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * +tsd_wrapper_get(bool init) { + tsd_wrapper_t *wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd); + + if (init && unlikely(wrapper == NULL)) { + tsd_init_block_t block; + wrapper = (tsd_wrapper_t *) + tsd_init_check_recursion(&tsd_init_head, &block); + if (wrapper) { + return wrapper; + } + wrapper = (tsd_wrapper_t *) + malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + block.data = (void *)wrapper; + if (wrapper == NULL) { + malloc_write(": Error allocating TSD\n"); + abort(); + } else { + wrapper->initialized = false; + tsd_t initializer = TSD_INITIALIZER; + wrapper->val = initializer; + } + tsd_wrapper_set(wrapper); + tsd_init_finish(&tsd_init_head, &block); + } + return wrapper; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot0(void) { + if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) { + return true; + } + tsd_wrapper_set(&tsd_boot_wrapper); + tsd_booted = true; + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_boot1(void) { + tsd_wrapper_t *wrapper; + wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + if (wrapper == NULL) { + malloc_write(": Error allocating TSD\n"); + abort(); + } + tsd_boot_wrapper.initialized = false; + tsd_cleanup(&tsd_boot_wrapper.val); + wrapper->initialized = false; + tsd_t initializer = TSD_INITIALIZER; + wrapper->val = initializer; + tsd_wrapper_set(wrapper); +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot(void) { + if (tsd_boot0()) { + return true; + } + tsd_boot1(); + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_booted_get(void) { + return tsd_booted; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_get_allocates(void) { + return true; +} + +/* Get/set. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_get(bool init) { + tsd_wrapper_t *wrapper; + + assert(tsd_booted); + wrapper = tsd_wrapper_get(init); + if (tsd_get_allocates() && !init && wrapper == NULL) { + return NULL; + } + return &wrapper->val; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_set(tsd_t *val) { + tsd_wrapper_t *wrapper; + + assert(tsd_booted); + wrapper = tsd_wrapper_get(true); + if (likely(&wrapper->val != val)) { + wrapper->val = *(val); + } + wrapper->initialized = true; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h new file mode 100644 index 0000000..beb467a --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h @@ -0,0 +1,60 @@ +#ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H +#error This file should be included only once, by tsd.h. +#endif +#define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H + +extern __thread tsd_t tsd_tls; +extern __thread bool tsd_initialized; +extern bool tsd_booted; + +/* Initialization/cleanup. */ +JEMALLOC_ALWAYS_INLINE bool +tsd_cleanup_wrapper(void) { + if (tsd_initialized) { + tsd_initialized = false; + tsd_cleanup(&tsd_tls); + } + return tsd_initialized; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot0(void) { + malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); + tsd_booted = true; + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_boot1(void) { + /* Do nothing. */ +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot(void) { + return tsd_boot0(); +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_booted_get(void) { + return tsd_booted; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_get_allocates(void) { + return false; +} + +/* Get/set. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_get(bool init) { + assert(tsd_booted); + return &tsd_tls; +} +JEMALLOC_ALWAYS_INLINE void +tsd_set(tsd_t *val) { + assert(tsd_booted); + if (likely(&tsd_tls != val)) { + tsd_tls = (*val); + } + tsd_initialized = true; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_tls.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_tls.h new file mode 100644 index 0000000..0de64b7 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_tls.h @@ -0,0 +1,59 @@ +#ifdef JEMALLOC_INTERNAL_TSD_TLS_H +#error This file should be included only once, by tsd.h. +#endif +#define JEMALLOC_INTERNAL_TSD_TLS_H + +extern __thread tsd_t tsd_tls; +extern pthread_key_t tsd_tsd; +extern bool tsd_booted; + +/* Initialization/cleanup. */ +JEMALLOC_ALWAYS_INLINE bool +tsd_boot0(void) { + if (pthread_key_create(&tsd_tsd, &tsd_cleanup) != 0) { + return true; + } + tsd_booted = true; + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_boot1(void) { + /* Do nothing. */ +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot(void) { + return tsd_boot0(); +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_booted_get(void) { + return tsd_booted; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_get_allocates(void) { + return false; +} + +/* Get/set. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_get(UNUSED bool init) { + assert(tsd_booted); + return &tsd_tls; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_set(tsd_t *val) { + assert(tsd_booted); + if (likely(&tsd_tls != val)) { + tsd_tls = (*val); + } + if (pthread_setspecific(tsd_tsd, (void *)(&tsd_tls)) != 0) { + malloc_write(": Error setting tsd.\n"); + if (opt_abort) { + abort(); + } + } +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_types.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_types.h new file mode 100644 index 0000000..6200af6 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_types.h @@ -0,0 +1,10 @@ +#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H +#define JEMALLOC_INTERNAL_TSD_TYPES_H + +#define MALLOC_TSD_CLEANUPS_MAX 2 + +typedef struct tsd_s tsd_t; +typedef struct tsdn_s tsdn_t; +typedef bool (*malloc_tsd_cleanup_t)(void); + +#endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_win.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_win.h new file mode 100644 index 0000000..cf30d18 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/tsd_win.h @@ -0,0 +1,139 @@ +#ifdef JEMALLOC_INTERNAL_TSD_WIN_H +#error This file should be included only once, by tsd.h. +#endif +#define JEMALLOC_INTERNAL_TSD_WIN_H + +typedef struct { + bool initialized; + tsd_t val; +} tsd_wrapper_t; + +extern DWORD tsd_tsd; +extern tsd_wrapper_t tsd_boot_wrapper; +extern bool tsd_booted; + +/* Initialization/cleanup. */ +JEMALLOC_ALWAYS_INLINE bool +tsd_cleanup_wrapper(void) { + DWORD error = GetLastError(); + tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd); + SetLastError(error); + + if (wrapper == NULL) { + return false; + } + + if (wrapper->initialized) { + wrapper->initialized = false; + tsd_cleanup(&wrapper->val); + if (wrapper->initialized) { + /* Trigger another cleanup round. */ + return true; + } + } + malloc_tsd_dalloc(wrapper); + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_wrapper_set(tsd_wrapper_t *wrapper) { + if (!TlsSetValue(tsd_tsd, (void *)wrapper)) { + malloc_write(": Error setting TSD\n"); + abort(); + } +} + +JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * +tsd_wrapper_get(bool init) { + DWORD error = GetLastError(); + tsd_wrapper_t *wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd); + SetLastError(error); + + if (init && unlikely(wrapper == NULL)) { + wrapper = (tsd_wrapper_t *) + malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + if (wrapper == NULL) { + malloc_write(": Error allocating TSD\n"); + abort(); + } else { + wrapper->initialized = false; + /* MSVC is finicky about aggregate initialization. */ + tsd_t tsd_initializer = TSD_INITIALIZER; + wrapper->val = tsd_initializer; + } + tsd_wrapper_set(wrapper); + } + return wrapper; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot0(void) { + tsd_tsd = TlsAlloc(); + if (tsd_tsd == TLS_OUT_OF_INDEXES) { + return true; + } + malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); + tsd_wrapper_set(&tsd_boot_wrapper); + tsd_booted = true; + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_boot1(void) { + tsd_wrapper_t *wrapper; + wrapper = (tsd_wrapper_t *) + malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + if (wrapper == NULL) { + malloc_write(": Error allocating TSD\n"); + abort(); + } + tsd_boot_wrapper.initialized = false; + tsd_cleanup(&tsd_boot_wrapper.val); + wrapper->initialized = false; + tsd_t initializer = TSD_INITIALIZER; + wrapper->val = initializer; + tsd_wrapper_set(wrapper); +} +JEMALLOC_ALWAYS_INLINE bool +tsd_boot(void) { + if (tsd_boot0()) { + return true; + } + tsd_boot1(); + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_booted_get(void) { + return tsd_booted; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_get_allocates(void) { + return true; +} + +/* Get/set. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_get(bool init) { + tsd_wrapper_t *wrapper; + + assert(tsd_booted); + wrapper = tsd_wrapper_get(init); + if (tsd_get_allocates() && !init && wrapper == NULL) { + return NULL; + } + return &wrapper->val; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_set(tsd_t *val) { + tsd_wrapper_t *wrapper; + + assert(tsd_booted); + wrapper = tsd_wrapper_get(true); + if (likely(&wrapper->val != val)) { + wrapper->val = *(val); + } + wrapper->initialized = true; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/util.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/util.h new file mode 100644 index 0000000..304cb54 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/util.h @@ -0,0 +1,67 @@ +#ifndef JEMALLOC_INTERNAL_UTIL_H +#define JEMALLOC_INTERNAL_UTIL_H + +#define UTIL_INLINE static inline + +/* Junk fill patterns. */ +#ifndef JEMALLOC_ALLOC_JUNK +# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) +#endif +#ifndef JEMALLOC_FREE_JUNK +# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) +#endif + +/* + * Wrap a cpp argument that contains commas such that it isn't broken up into + * multiple arguments. + */ +#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ + +/* cpp macro definition stringification. */ +#define STRINGIFY_HELPER(x) #x +#define STRINGIFY(x) STRINGIFY_HELPER(x) + +/* + * Silence compiler warnings due to uninitialized values. This is used + * wherever the compiler fails to recognize that the variable is never used + * uninitialized. + */ +#define JEMALLOC_CC_SILENCE_INIT(v) = v + +#ifdef __GNUC__ +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +#else +# define likely(x) !!(x) +# define unlikely(x) !!(x) +#endif + +#if !defined(JEMALLOC_INTERNAL_UNREACHABLE) +# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure +#endif + +#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() + +/* Set error code. */ +UTIL_INLINE void +set_errno(int errnum) { +#ifdef _WIN32 + SetLastError(errnum); +#else + errno = errnum; +#endif +} + +/* Get last error code. */ +UTIL_INLINE int +get_errno(void) { +#ifdef _WIN32 + return GetLastError(); +#else + return errno; +#endif +} + +#undef UTIL_INLINE + +#endif /* JEMALLOC_INTERNAL_UTIL_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/valgrind.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/valgrind.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/internal/valgrind.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/valgrind.h diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/witness.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/witness.h new file mode 100644 index 0000000..7ace8ae --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/internal/witness.h @@ -0,0 +1,346 @@ +#ifndef JEMALLOC_INTERNAL_WITNESS_H +#define JEMALLOC_INTERNAL_WITNESS_H + +#include "jemalloc/internal/ql.h" + +/******************************************************************************/ +/* LOCK RANKS */ +/******************************************************************************/ + +/* + * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness + * machinery. + */ + +#define WITNESS_RANK_OMIT 0U + +#define WITNESS_RANK_MIN 1U + +#define WITNESS_RANK_INIT 1U +#define WITNESS_RANK_CTL 1U +#define WITNESS_RANK_TCACHES 2U +#define WITNESS_RANK_ARENAS 3U + +#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U + +#define WITNESS_RANK_PROF_DUMP 5U +#define WITNESS_RANK_PROF_BT2GCTX 6U +#define WITNESS_RANK_PROF_TDATAS 7U +#define WITNESS_RANK_PROF_TDATA 8U +#define WITNESS_RANK_PROF_GCTX 9U + +#define WITNESS_RANK_BACKGROUND_THREAD 10U + +/* + * Used as an argument to witness_assert_depth_to_rank() in order to validate + * depth excluding non-core locks with lower ranks. Since the rank argument to + * witness_assert_depth_to_rank() is inclusive rather than exclusive, this + * definition can have the same value as the minimally ranked core lock. + */ +#define WITNESS_RANK_CORE 11U + +#define WITNESS_RANK_DECAY 11U +#define WITNESS_RANK_TCACHE_QL 12U +#define WITNESS_RANK_EXTENT_GROW 13U +#define WITNESS_RANK_EXTENTS 14U +#define WITNESS_RANK_EXTENT_AVAIL 15U + +#define WITNESS_RANK_EXTENT_POOL 16U +#define WITNESS_RANK_RTREE 17U +#define WITNESS_RANK_BASE 18U +#define WITNESS_RANK_ARENA_LARGE 19U + +#define WITNESS_RANK_LEAF 0xffffffffU +#define WITNESS_RANK_BIN WITNESS_RANK_LEAF +#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF +#define WITNESS_RANK_DSS WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF + +/******************************************************************************/ +/* PER-WITNESS DATA */ +/******************************************************************************/ +#if defined(JEMALLOC_DEBUG) +# define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}} +#else +# define WITNESS_INITIALIZER(name, rank) +#endif + +typedef struct witness_s witness_t; +typedef unsigned witness_rank_t; +typedef ql_head(witness_t) witness_list_t; +typedef int witness_comp_t (const witness_t *, void *, const witness_t *, + void *); + +struct witness_s { + /* Name, used for printing lock order reversal messages. */ + const char *name; + + /* + * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses + * must be acquired in order of increasing rank. + */ + witness_rank_t rank; + + /* + * If two witnesses are of equal rank and they have the samp comp + * function pointer, it is called as a last attempt to differentiate + * between witnesses of equal rank. + */ + witness_comp_t *comp; + + /* Opaque data, passed to comp(). */ + void *opaque; + + /* Linkage for thread's currently owned locks. */ + ql_elm(witness_t) link; +}; + +/******************************************************************************/ +/* PER-THREAD DATA */ +/******************************************************************************/ +typedef struct witness_tsd_s witness_tsd_t; +struct witness_tsd_s { + witness_list_t witnesses; + bool forking; +}; + +#define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false } +#define WITNESS_TSDN_NULL ((witness_tsdn_t *)0) + +/******************************************************************************/ +/* (PER-THREAD) NULLABILITY HELPERS */ +/******************************************************************************/ +typedef struct witness_tsdn_s witness_tsdn_t; +struct witness_tsdn_s { + witness_tsd_t witness_tsd; +}; + +JEMALLOC_ALWAYS_INLINE witness_tsdn_t * +witness_tsd_tsdn(witness_tsd_t *witness_tsd) { + return (witness_tsdn_t *)witness_tsd; +} + +JEMALLOC_ALWAYS_INLINE bool +witness_tsdn_null(witness_tsdn_t *witness_tsdn) { + return witness_tsdn == NULL; +} + +JEMALLOC_ALWAYS_INLINE witness_tsd_t * +witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) { + assert(!witness_tsdn_null(witness_tsdn)); + return &witness_tsdn->witness_tsd; +} + +/******************************************************************************/ +/* API */ +/******************************************************************************/ +void witness_init(witness_t *witness, const char *name, witness_rank_t rank, + witness_comp_t *comp, void *opaque); + +typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); +extern witness_lock_error_t *JET_MUTABLE witness_lock_error; + +typedef void (witness_owner_error_t)(const witness_t *); +extern witness_owner_error_t *JET_MUTABLE witness_owner_error; + +typedef void (witness_not_owner_error_t)(const witness_t *); +extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error; + +typedef void (witness_depth_error_t)(const witness_list_t *, + witness_rank_t rank_inclusive, unsigned depth); +extern witness_depth_error_t *JET_MUTABLE witness_depth_error; + +void witnesses_cleanup(witness_tsd_t *witness_tsd); +void witness_prefork(witness_tsd_t *witness_tsd); +void witness_postfork_parent(witness_tsd_t *witness_tsd); +void witness_postfork_child(witness_tsd_t *witness_tsd); + +/* Helper, not intended for direct use. */ +static inline bool +witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) { + witness_list_t *witnesses; + witness_t *w; + + cassert(config_debug); + + witnesses = &witness_tsd->witnesses; + ql_foreach(w, witnesses, link) { + if (w == witness) { + return true; + } + } + + return false; +} + +static inline void +witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) { + witness_tsd_t *witness_tsd; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + if (witness->rank == WITNESS_RANK_OMIT) { + return; + } + + if (witness_owner(witness_tsd, witness)) { + return; + } + witness_owner_error(witness); +} + +static inline void +witness_assert_not_owner(witness_tsdn_t *witness_tsdn, + const witness_t *witness) { + witness_tsd_t *witness_tsd; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + if (witness->rank == WITNESS_RANK_OMIT) { + return; + } + + witnesses = &witness_tsd->witnesses; + ql_foreach(w, witnesses, link) { + if (w == witness) { + witness_not_owner_error(witness); + } + } +} + +static inline void +witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn, + witness_rank_t rank_inclusive, unsigned depth) { + witness_tsd_t *witness_tsd; + unsigned d; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + + d = 0; + witnesses = &witness_tsd->witnesses; + w = ql_last(witnesses, link); + if (w != NULL) { + ql_reverse_foreach(w, witnesses, link) { + if (w->rank < rank_inclusive) { + break; + } + d++; + } + } + if (d != depth) { + witness_depth_error(witnesses, rank_inclusive, depth); + } +} + +static inline void +witness_assert_depth(witness_tsdn_t *witness_tsdn, unsigned depth) { + witness_assert_depth_to_rank(witness_tsdn, WITNESS_RANK_MIN, depth); +} + +static inline void +witness_assert_lockless(witness_tsdn_t *witness_tsdn) { + witness_assert_depth(witness_tsdn, 0); +} + +static inline void +witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) { + witness_tsd_t *witness_tsd; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + if (witness->rank == WITNESS_RANK_OMIT) { + return; + } + + witness_assert_not_owner(witness_tsdn, witness); + + witnesses = &witness_tsd->witnesses; + w = ql_last(witnesses, link); + if (w == NULL) { + /* No other locks; do nothing. */ + } else if (witness_tsd->forking && w->rank <= witness->rank) { + /* Forking, and relaxed ranking satisfied. */ + } else if (w->rank > witness->rank) { + /* Not forking, rank order reversal. */ + witness_lock_error(witnesses, witness); + } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != + witness->comp || w->comp(w, w->opaque, witness, witness->opaque) > + 0)) { + /* + * Missing/incompatible comparison function, or comparison + * function indicates rank order reversal. + */ + witness_lock_error(witnesses, witness); + } + + ql_elm_new(witness, link); + ql_tail_insert(witnesses, witness, link); +} + +static inline void +witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) { + witness_tsd_t *witness_tsd; + witness_list_t *witnesses; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + if (witness->rank == WITNESS_RANK_OMIT) { + return; + } + + /* + * Check whether owner before removal, rather than relying on + * witness_assert_owner() to abort, so that unit tests can test this + * function's failure mode without causing undefined behavior. + */ + if (witness_owner(witness_tsd, witness)) { + witnesses = &witness_tsd->witnesses; + ql_remove(witnesses, witness, link); + } else { + witness_assert_owner(witness_tsdn, witness); + } +} + +#endif /* JEMALLOC_INTERNAL_WITNESS_H */ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc.sh similarity index 87% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc.sh rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc.sh index c085814..b19b154 100755 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc.sh +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc.sh @@ -4,7 +4,7 @@ objroot=$1 cat < #include -#define JEMALLOC_VERSION "@jemalloc_version@" -#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ -#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ -#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ -#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ -#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" +#define JEMALLOC_VERSION "@jemalloc_version@" +#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@ +#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@ +#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@ +#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@ +#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" -# define MALLOCX_LG_ALIGN(la) (la) -# if LG_SIZEOF_PTR == 2 -# define MALLOCX_ALIGN(a) (ffs(a)-1) -# else -# define MALLOCX_ALIGN(a) \ - ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) -# endif -# define MALLOCX_ZERO ((int)0x40) +#define MALLOCX_LG_ALIGN(la) ((int)(la)) +#if LG_SIZEOF_PTR == 2 +# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) +#else +# define MALLOCX_ALIGN(a) \ + ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ + ffs((int)(((size_t)(a))>>32))+31)) +#endif +#define MALLOCX_ZERO ((int)0x40) /* * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 * encodes MALLOCX_TCACHE_NONE. */ -# define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) -# define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) +#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) +#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) /* * Bias arena index bits so that 0 encodes "use an automatically chosen arena". */ -# define MALLOCX_ARENA(a) ((int)(((a)+1) << 20)) +#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) + +/* + * Use as arena index in "arena..{purge,decay,dss}" and + * "stats.arenas..*" mallctl interfaces to select all arenas. This + * definition is intentionally specified in raw decimal format to support + * cpp-based string concatenation, e.g. + * + * #define STRINGIFY_HELPER(x) #x + * #define STRINGIFY(x) STRINGIFY_HELPER(x) + * + * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL, + * 0); + */ +#define MALLCTL_ARENAS_ALL 4096 +/* + * Use as arena index in "stats.arenas..*" mallctl interfaces to select + * destroyed arenas. + */ +#define MALLCTL_ARENAS_DESTROYED 4097 #if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) # define JEMALLOC_CXX_THROW throw() @@ -36,32 +56,7 @@ # define JEMALLOC_CXX_THROW #endif -#ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_ATTR(s) __attribute__((s)) -# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) -# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE -# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) -# else -# define JEMALLOC_ALLOC_SIZE(s) -# define JEMALLOC_ALLOC_SIZE2(s1, s2) -# endif -# ifndef JEMALLOC_EXPORT -# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) -# endif -# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF -# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) -# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) -# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) -# else -# define JEMALLOC_FORMAT_PRINTF(s, i) -# endif -# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) -# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) -# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) -# define JEMALLOC_RESTRICT_RETURN -# define JEMALLOC_ALLOCATOR -#elif _MSC_VER +#if defined(_MSC_VER) # define JEMALLOC_ATTR(s) # define JEMALLOC_ALIGNED(s) __declspec(align(s)) # define JEMALLOC_ALLOC_SIZE(s) @@ -87,6 +82,31 @@ # else # define JEMALLOC_ALLOCATOR # endif +#elif defined(JEMALLOC_HAVE_ATTR) +# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) +# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE +# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) +# else +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# endif +# ifndef JEMALLOC_EXPORT +# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) +# endif +# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) +# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) +# else +# define JEMALLOC_FORMAT_PRINTF(s, i) +# endif +# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) +# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) +# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR #else # define JEMALLOC_ATTR(s) # define JEMALLOC_ALIGNED(s) diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh similarity index 98% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh index df328b7..c675bb4 100755 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc_mangle.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/sh -eu public_symbols_txt=$1 symbol_prefix=$2 diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc_protos.h.in diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc_rename.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc_rename.sh similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/jemalloc/jemalloc_rename.sh rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc_rename.sh diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in new file mode 100644 index 0000000..1a58874 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/jemalloc/jemalloc_typedefs.h.in @@ -0,0 +1,77 @@ +typedef struct extent_hooks_s extent_hooks_t; + +/* + * void * + * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + * size_t alignment, bool *zero, bool *commit, unsigned arena_ind); + */ +typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *, + bool *, unsigned); + +/* + * bool + * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size, + * bool committed, unsigned arena_ind); + */ +typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool, + unsigned); + +/* + * void + * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size, + * bool committed, unsigned arena_ind); + */ +typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool, + unsigned); + +/* + * bool + * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + unsigned); + +/* + * bool + * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t, + size_t, unsigned); + +/* + * bool + * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + unsigned); + +/* + * bool + * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t size_a, size_t size_b, bool committed, unsigned arena_ind); + */ +typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + bool, unsigned); + +/* + * bool + * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + * void *addr_b, size_t size_b, bool committed, unsigned arena_ind); + */ +typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t, + bool, unsigned); + +struct extent_hooks_s { + extent_alloc_t *alloc; + extent_dalloc_t *dalloc; + extent_destroy_t *destroy; + extent_commit_t *commit; + extent_decommit_t *decommit; + extent_purge_t *purge_lazy; + extent_purge_t *purge_forced; + extent_split_t *split; + extent_merge_t *merge; +}; diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/msvc_compat/C99/stdbool.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/msvc_compat/C99/stdbool.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/msvc_compat/C99/stdbool.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/msvc_compat/C99/stdbool.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/msvc_compat/C99/stdint.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/msvc_compat/C99/stdint.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/include/msvc_compat/C99/stdint.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/msvc_compat/C99/stdint.h diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/msvc_compat/strings.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/msvc_compat/strings.h new file mode 100644 index 0000000..996f256 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/msvc_compat/strings.h @@ -0,0 +1,58 @@ +#ifndef strings_h +#define strings_h + +/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided + * for both */ +#ifdef _MSC_VER +# include +# pragma intrinsic(_BitScanForward) +static __forceinline int ffsl(long x) { + unsigned long i; + + if (_BitScanForward(&i, x)) { + return i + 1; + } + return 0; +} + +static __forceinline int ffs(int x) { + return ffsl(x); +} + +# ifdef _M_X64 +# pragma intrinsic(_BitScanForward64) +# endif + +static __forceinline int ffsll(unsigned __int64 x) { + unsigned long i; +#ifdef _M_X64 + if (_BitScanForward64(&i, x)) { + return i + 1; + } + return 0; +#else +// Fallback for 32-bit build where 64-bit version not available +// assuming little endian + union { + unsigned __int64 ll; + unsigned long l[2]; + } s; + + s.ll = x; + + if (_BitScanForward(&i, s.l[0])) { + return i + 1; + } else if(_BitScanForward(&i, s.l[1])) { + return i + 33; + } + return 0; +#endif +} + +#else +# define ffsll(x) __builtin_ffsll(x) +# define ffsl(x) __builtin_ffsl(x) +# define ffs(x) __builtin_ffs(x) +#endif + +#endif /* strings_h */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/msvc_compat/windows_extra.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/msvc_compat/windows_extra.h new file mode 100644 index 0000000..a6ebb93 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/include/msvc_compat/windows_extra.h @@ -0,0 +1,6 @@ +#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H +#define MSVC_COMPAT_WINDOWS_EXTRA_H + +#include + +#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/install-sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/install-sh new file mode 100755 index 0000000..ebc6691 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/install-sh @@ -0,0 +1,250 @@ +#! /bin/sh +# +# install - install a program, script, or datafile +# This comes from X11R5 (mit/util/scripts/install.sh). +# +# Copyright 1991 by the Massachusetts Institute of Technology +# +# Permission to use, copy, modify, distribute, and sell this software and its +# documentation for any purpose is hereby granted without fee, provided that +# the above copyright notice appear in all copies and that both that +# copyright notice and this permission notice appear in supporting +# documentation, and that the name of M.I.T. not be used in advertising or +# publicity pertaining to distribution of the software without specific, +# written prior permission. M.I.T. makes no representations about the +# suitability of this software for any purpose. It is provided "as is" +# without express or implied warranty. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. It can only install one file at a time, a restriction +# shared with many OS's install programs. + + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit="${DOITPROG-}" + + +# put in absolute paths if you don't have them in your path; or use env. vars. + +mvprog="${MVPROG-mv}" +cpprog="${CPPROG-cp}" +chmodprog="${CHMODPROG-chmod}" +chownprog="${CHOWNPROG-chown}" +chgrpprog="${CHGRPPROG-chgrp}" +stripprog="${STRIPPROG-strip}" +rmprog="${RMPROG-rm}" +mkdirprog="${MKDIRPROG-mkdir}" + +transformbasename="" +transform_arg="" +instcmd="$mvprog" +chmodcmd="$chmodprog 0755" +chowncmd="" +chgrpcmd="" +stripcmd="" +rmcmd="$rmprog -f" +mvcmd="$mvprog" +src="" +dst="" +dir_arg="" + +while [ x"$1" != x ]; do + case $1 in + -c) instcmd="$cpprog" + shift + continue;; + + -d) dir_arg=true + shift + continue;; + + -m) chmodcmd="$chmodprog $2" + shift + shift + continue;; + + -o) chowncmd="$chownprog $2" + shift + shift + continue;; + + -g) chgrpcmd="$chgrpprog $2" + shift + shift + continue;; + + -s) stripcmd="$stripprog" + shift + continue;; + + -t=*) transformarg=`echo $1 | sed 's/-t=//'` + shift + continue;; + + -b=*) transformbasename=`echo $1 | sed 's/-b=//'` + shift + continue;; + + *) if [ x"$src" = x ] + then + src=$1 + else + # this colon is to work around a 386BSD /bin/sh bug + : + dst=$1 + fi + shift + continue;; + esac +done + +if [ x"$src" = x ] +then + echo "install: no input file specified" + exit 1 +else + true +fi + +if [ x"$dir_arg" != x ]; then + dst=$src + src="" + + if [ -d $dst ]; then + instcmd=: + else + instcmd=mkdir + fi +else + +# Waiting for this to be detected by the "$instcmd $src $dsttmp" command +# might cause directories to be created, which would be especially bad +# if $src (and thus $dsttmp) contains '*'. + + if [ -f $src -o -d $src ] + then + true + else + echo "install: $src does not exist" + exit 1 + fi + + if [ x"$dst" = x ] + then + echo "install: no destination specified" + exit 1 + else + true + fi + +# If destination is a directory, append the input filename; if your system +# does not like double slashes in filenames, you may need to add some logic + + if [ -d $dst ] + then + dst="$dst"/`basename $src` + else + true + fi +fi + +## this sed command emulates the dirname command +dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` + +# Make sure that the destination directory exists. +# this part is taken from Noah Friedman's mkinstalldirs script + +# Skip lots of stat calls in the usual case. +if [ ! -d "$dstdir" ]; then +defaultIFS=' +' +IFS="${IFS-${defaultIFS}}" + +oIFS="${IFS}" +# Some sh's can't handle IFS=/ for some reason. +IFS='%' +set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` +IFS="${oIFS}" + +pathcomp='' + +while [ $# -ne 0 ] ; do + pathcomp="${pathcomp}${1}" + shift + + if [ ! -d "${pathcomp}" ] ; + then + $mkdirprog "${pathcomp}" + else + true + fi + + pathcomp="${pathcomp}/" +done +fi + +if [ x"$dir_arg" != x ] +then + $doit $instcmd $dst && + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi +else + +# If we're going to rename the final executable, determine the name now. + + if [ x"$transformarg" = x ] + then + dstfile=`basename $dst` + else + dstfile=`basename $dst $transformbasename | + sed $transformarg`$transformbasename + fi + +# don't allow the sed command to completely eliminate the filename + + if [ x"$dstfile" = x ] + then + dstfile=`basename $dst` + else + true + fi + +# Make a temp file name in the proper directory. + + dsttmp=$dstdir/#inst.$$# + +# Move or copy the file name to the temp name + + $doit $instcmd $src $dsttmp && + + trap "rm -f ${dsttmp}" 0 && + +# and set any options; do chmod last to preserve setuid bits + +# If any of these fail, we abort the whole thing. If we want to +# ignore errors from any of these, just make sure not to ignore +# errors from the above "$doit $instcmd $src $dsttmp" command. + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && + +# Now rename the file to the real destination. + + $doit $rmcmd -f $dstdir/$dstfile && + $doit $mvcmd $dsttmp $dstdir/$dstfile + +fi && + + +exit 0 diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/jemalloc.pc.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/jemalloc.pc.in similarity index 70% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/jemalloc.pc.in rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/jemalloc.pc.in index 1a3ad9b..c428a86 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/jemalloc.pc.in +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/jemalloc.pc.in @@ -6,7 +6,7 @@ install_suffix=@install_suffix@ Name: jemalloc Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. -URL: http://www.canonware.com/jemalloc -Version: @jemalloc_version@ +URL: http://jemalloc.net/ +Version: @jemalloc_version_major@.@jemalloc_version_minor@.@jemalloc_version_bugfix@_@jemalloc_version_nrev@ Cflags: -I${includedir} Libs: -L${libdir} -ljemalloc${install_suffix} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/m4/ax_cxx_compile_stdcxx.m4 b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/m4/ax_cxx_compile_stdcxx.m4 new file mode 100644 index 0000000..2c18e49 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/m4/ax_cxx_compile_stdcxx.m4 @@ -0,0 +1,562 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional]) +# +# DESCRIPTION +# +# Check for baseline language coverage in the compiler for the specified +# version of the C++ standard. If necessary, add switches to CXX and +# CXXCPP to enable support. VERSION may be '11' (for the C++11 standard) +# or '14' (for the C++14 standard). +# +# The second argument, if specified, indicates whether you insist on an +# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g. +# -std=c++11). If neither is specified, you get whatever works, with +# preference for an extended mode. +# +# The third argument, if specified 'mandatory' or if left unspecified, +# indicates that baseline support for the specified C++ standard is +# required and that the macro should error out if no mode with that +# support is found. If specified 'optional', then configuration proceeds +# regardless, after defining HAVE_CXX${VERSION} if and only if a +# supporting mode is found. +# +# LICENSE +# +# Copyright (c) 2008 Benjamin Kosnik +# Copyright (c) 2012 Zack Weinberg +# Copyright (c) 2013 Roy Stogner +# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov +# Copyright (c) 2015 Paul Norman +# Copyright (c) 2015 Moritz Klammler +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 4 + +dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro +dnl (serial version number 13). + +AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl + m4_if([$1], [11], [], + [$1], [14], [], + [$1], [17], [m4_fatal([support for C++17 not yet implemented in AX_CXX_COMPILE_STDCXX])], + [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl + m4_if([$2], [], [], + [$2], [ext], [], + [$2], [noext], [], + [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl + m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true], + [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true], + [$3], [optional], [ax_cxx_compile_cxx$1_required=false], + [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])]) + AC_LANG_PUSH([C++])dnl + ac_success=no + AC_CACHE_CHECK(whether $CXX supports C++$1 features by default, + ax_cv_cxx_compile_cxx$1, + [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], + [ax_cv_cxx_compile_cxx$1=yes], + [ax_cv_cxx_compile_cxx$1=no])]) + if test x$ax_cv_cxx_compile_cxx$1 = xyes; then + ac_success=yes + fi + + m4_if([$2], [noext], [], [dnl + if test x$ac_success = xno; then + for switch in -std=gnu++$1 -std=gnu++0x; do + cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) + AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, + $cachevar, + [ac_save_CXX="$CXX" + CXX="$CXX $switch" + AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], + [eval $cachevar=yes], + [eval $cachevar=no]) + CXX="$ac_save_CXX"]) + if eval test x\$$cachevar = xyes; then + CXX="$CXX $switch" + if test -n "$CXXCPP" ; then + CXXCPP="$CXXCPP $switch" + fi + ac_success=yes + break + fi + done + fi]) + + m4_if([$2], [ext], [], [dnl + if test x$ac_success = xno; then + dnl HP's aCC needs +std=c++11 according to: + dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf + dnl Cray's crayCC needs "-h std=c++11" + for switch in -std=c++$1 -std=c++0x +std=c++$1 "-h std=c++$1"; do + cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) + AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, + $cachevar, + [ac_save_CXX="$CXX" + CXX="$CXX $switch" + AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], + [eval $cachevar=yes], + [eval $cachevar=no]) + CXX="$ac_save_CXX"]) + if eval test x\$$cachevar = xyes; then + CXX="$CXX $switch" + if test -n "$CXXCPP" ; then + CXXCPP="$CXXCPP $switch" + fi + ac_success=yes + break + fi + done + fi]) + AC_LANG_POP([C++]) + if test x$ax_cxx_compile_cxx$1_required = xtrue; then + if test x$ac_success = xno; then + AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.]) + fi + fi + if test x$ac_success = xno; then + HAVE_CXX$1=0 + AC_MSG_NOTICE([No compiler with C++$1 support was found]) + else + HAVE_CXX$1=1 + AC_DEFINE(HAVE_CXX$1,1, + [define if the compiler supports basic C++$1 syntax]) + fi + AC_SUBST(HAVE_CXX$1) +]) + + +dnl Test body for checking C++11 support + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11], + _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 +) + + +dnl Test body for checking C++14 support + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14], + _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 + _AX_CXX_COMPILE_STDCXX_testbody_new_in_14 +) + + +dnl Tests for new features in C++11 + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[ + +// If the compiler admits that it is not ready for C++11, why torture it? +// Hopefully, this will speed up the test. + +#ifndef __cplusplus + +#error "This is not a C++ compiler" + +#elif __cplusplus < 201103L + +#error "This is not a C++11 compiler" + +#else + +namespace cxx11 +{ + + namespace test_static_assert + { + + template + struct check + { + static_assert(sizeof(int) <= sizeof(T), "not big enough"); + }; + + } + + namespace test_final_override + { + + struct Base + { + virtual void f() {} + }; + + struct Derived : public Base + { + virtual void f() override {} + }; + + } + + namespace test_double_right_angle_brackets + { + + template < typename T > + struct check {}; + + typedef check single_type; + typedef check> double_type; + typedef check>> triple_type; + typedef check>>> quadruple_type; + + } + + namespace test_decltype + { + + int + f() + { + int a = 1; + decltype(a) b = 2; + return a + b; + } + + } + + namespace test_type_deduction + { + + template < typename T1, typename T2 > + struct is_same + { + static const bool value = false; + }; + + template < typename T > + struct is_same + { + static const bool value = true; + }; + + template < typename T1, typename T2 > + auto + add(T1 a1, T2 a2) -> decltype(a1 + a2) + { + return a1 + a2; + } + + int + test(const int c, volatile int v) + { + static_assert(is_same::value == true, ""); + static_assert(is_same::value == false, ""); + static_assert(is_same::value == false, ""); + auto ac = c; + auto av = v; + auto sumi = ac + av + 'x'; + auto sumf = ac + av + 1.0; + static_assert(is_same::value == true, ""); + static_assert(is_same::value == true, ""); + static_assert(is_same::value == true, ""); + static_assert(is_same::value == false, ""); + static_assert(is_same::value == true, ""); + return (sumf > 0.0) ? sumi : add(c, v); + } + + } + + namespace test_noexcept + { + + int f() { return 0; } + int g() noexcept { return 0; } + + static_assert(noexcept(f()) == false, ""); + static_assert(noexcept(g()) == true, ""); + + } + + namespace test_constexpr + { + + template < typename CharT > + unsigned long constexpr + strlen_c_r(const CharT *const s, const unsigned long acc) noexcept + { + return *s ? strlen_c_r(s + 1, acc + 1) : acc; + } + + template < typename CharT > + unsigned long constexpr + strlen_c(const CharT *const s) noexcept + { + return strlen_c_r(s, 0UL); + } + + static_assert(strlen_c("") == 0UL, ""); + static_assert(strlen_c("1") == 1UL, ""); + static_assert(strlen_c("example") == 7UL, ""); + static_assert(strlen_c("another\0example") == 7UL, ""); + + } + + namespace test_rvalue_references + { + + template < int N > + struct answer + { + static constexpr int value = N; + }; + + answer<1> f(int&) { return answer<1>(); } + answer<2> f(const int&) { return answer<2>(); } + answer<3> f(int&&) { return answer<3>(); } + + void + test() + { + int i = 0; + const int c = 0; + static_assert(decltype(f(i))::value == 1, ""); + static_assert(decltype(f(c))::value == 2, ""); + static_assert(decltype(f(0))::value == 3, ""); + } + + } + + namespace test_uniform_initialization + { + + struct test + { + static const int zero {}; + static const int one {1}; + }; + + static_assert(test::zero == 0, ""); + static_assert(test::one == 1, ""); + + } + + namespace test_lambdas + { + + void + test1() + { + auto lambda1 = [](){}; + auto lambda2 = lambda1; + lambda1(); + lambda2(); + } + + int + test2() + { + auto a = [](int i, int j){ return i + j; }(1, 2); + auto b = []() -> int { return '0'; }(); + auto c = [=](){ return a + b; }(); + auto d = [&](){ return c; }(); + auto e = [a, &b](int x) mutable { + const auto identity = [](int y){ return y; }; + for (auto i = 0; i < a; ++i) + a += b--; + return x + identity(a + b); + }(0); + return a + b + c + d + e; + } + + int + test3() + { + const auto nullary = [](){ return 0; }; + const auto unary = [](int x){ return x; }; + using nullary_t = decltype(nullary); + using unary_t = decltype(unary); + const auto higher1st = [](nullary_t f){ return f(); }; + const auto higher2nd = [unary](nullary_t f1){ + return [unary, f1](unary_t f2){ return f2(unary(f1())); }; + }; + return higher1st(nullary) + higher2nd(nullary)(unary); + } + + } + + namespace test_variadic_templates + { + + template + struct sum; + + template + struct sum + { + static constexpr auto value = N0 + sum::value; + }; + + template <> + struct sum<> + { + static constexpr auto value = 0; + }; + + static_assert(sum<>::value == 0, ""); + static_assert(sum<1>::value == 1, ""); + static_assert(sum<23>::value == 23, ""); + static_assert(sum<1, 2>::value == 3, ""); + static_assert(sum<5, 5, 11>::value == 21, ""); + static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); + + } + + // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae + // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function + // because of this. + namespace test_template_alias_sfinae + { + + struct foo {}; + + template + using member = typename T::member_type; + + template + void func(...) {} + + template + void func(member*) {} + + void test(); + + void test() { func(0); } + + } + +} // namespace cxx11 + +#endif // __cplusplus >= 201103L + +]]) + + +dnl Tests for new features in C++14 + +m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[ + +// If the compiler admits that it is not ready for C++14, why torture it? +// Hopefully, this will speed up the test. + +#ifndef __cplusplus + +#error "This is not a C++ compiler" + +#elif __cplusplus < 201402L + +#error "This is not a C++14 compiler" + +#else + +namespace cxx14 +{ + + namespace test_polymorphic_lambdas + { + + int + test() + { + const auto lambda = [](auto&&... args){ + const auto istiny = [](auto x){ + return (sizeof(x) == 1UL) ? 1 : 0; + }; + const int aretiny[] = { istiny(args)... }; + return aretiny[0]; + }; + return lambda(1, 1L, 1.0f, '1'); + } + + } + + namespace test_binary_literals + { + + constexpr auto ivii = 0b0000000000101010; + static_assert(ivii == 42, "wrong value"); + + } + + namespace test_generalized_constexpr + { + + template < typename CharT > + constexpr unsigned long + strlen_c(const CharT *const s) noexcept + { + auto length = 0UL; + for (auto p = s; *p; ++p) + ++length; + return length; + } + + static_assert(strlen_c("") == 0UL, ""); + static_assert(strlen_c("x") == 1UL, ""); + static_assert(strlen_c("test") == 4UL, ""); + static_assert(strlen_c("another\0test") == 7UL, ""); + + } + + namespace test_lambda_init_capture + { + + int + test() + { + auto x = 0; + const auto lambda1 = [a = x](int b){ return a + b; }; + const auto lambda2 = [a = lambda1(x)](){ return a; }; + return lambda2(); + } + + } + + namespace test_digit_seperators + { + + constexpr auto ten_million = 100'000'000; + static_assert(ten_million == 100000000, ""); + + } + + namespace test_return_type_deduction + { + + auto f(int& x) { return x; } + decltype(auto) g(int& x) { return x; } + + template < typename T1, typename T2 > + struct is_same + { + static constexpr auto value = false; + }; + + template < typename T > + struct is_same + { + static constexpr auto value = true; + }; + + int + test() + { + auto x = 0; + static_assert(is_same::value, ""); + static_assert(is_same::value, ""); + return x; + } + + } + +} // namespace cxx14 + +#endif // __cplusplus >= 201402L + +]]) diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/ReadMe.txt b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/ReadMe.txt new file mode 100644 index 0000000..633a7d4 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/ReadMe.txt @@ -0,0 +1,23 @@ + +How to build jemalloc for Windows +================================= + +1. Install Cygwin with at least the following packages: + * autoconf + * autogen + * gawk + * grep + * sed + +2. Install Visual Studio 2015 or 2017 with Visual C++ + +3. Add Cygwin\bin to the PATH environment variable + +4. Open "x64 Native Tools Command Prompt for VS 2017" + (note: x86/x64 doesn't matter at this point) + +5. Generate header files: + sh -c "CC=cl ./autogen.sh" + +6. Now the project can be opened and built in Visual Studio: + msvc\jemalloc_vc2017.sln diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/jemalloc_vc2015.sln b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/jemalloc_vc2015.sln new file mode 100644 index 0000000..aedd5e5 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/jemalloc_vc2015.sln @@ -0,0 +1,63 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.24720.0 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}" + ProjectSection(SolutionItems) = preProject + ReadMe.txt = ReadMe.txt + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2015\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2015\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Debug-static|x64 = Debug-static|x64 + Debug-static|x86 = Debug-static|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + Release-static|x64 = Release-static|x64 + Release-static|x86 = Release-static|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/jemalloc_vc2017.sln b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/jemalloc_vc2017.sln new file mode 100644 index 0000000..c22fcb4 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/jemalloc_vc2017.sln @@ -0,0 +1,63 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.24720.0 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{70A99006-6DE9-472B-8F83-4CEE6C616DF3}" + ProjectSection(SolutionItems) = preProject + ReadMe.txt = ReadMe.txt + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jemalloc", "projects\vc2017\jemalloc\jemalloc.vcxproj", "{8D6BB292-9E1C-413D-9F98-4864BDC1514A}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test_threads", "projects\vc2017\test_threads\test_threads.vcxproj", "{09028CFD-4EB7-491D-869C-0708DB97ED44}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Debug-static|x64 = Debug-static|x64 + Debug-static|x86 = Debug-static|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + Release-static|x64 = Release-static|x64 + Release-static|x86 = Release-static|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.ActiveCfg = Debug|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x64.Build.0 = Debug|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.ActiveCfg = Debug|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug|x86.Build.0 = Debug|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.ActiveCfg = Debug-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x64.Build.0 = Debug-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.ActiveCfg = Debug-static|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Debug-static|x86.Build.0 = Debug-static|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.ActiveCfg = Release|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x64.Build.0 = Release|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.ActiveCfg = Release|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release|x86.Build.0 = Release|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.ActiveCfg = Release-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x64.Build.0 = Release-static|x64 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.ActiveCfg = Release-static|Win32 + {8D6BB292-9E1C-413D-9F98-4864BDC1514A}.Release-static|x86.Build.0 = Release-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.ActiveCfg = Debug|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x64.Build.0 = Debug|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.ActiveCfg = Debug|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug|x86.Build.0 = Debug|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.ActiveCfg = Debug-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x64.Build.0 = Debug-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.ActiveCfg = Debug-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Debug-static|x86.Build.0 = Debug-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.ActiveCfg = Release|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x64.Build.0 = Release|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.ActiveCfg = Release|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release|x86.Build.0 = Release|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.ActiveCfg = Release-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x64.Build.0 = Release-static|x64 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.ActiveCfg = Release-static|Win32 + {09028CFD-4EB7-491D-869C-0708DB97ED44}.Release-static|x86.Build.0 = Release-static|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj new file mode 100644 index 0000000..f7b175b --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj @@ -0,0 +1,348 @@ + + + + + Debug-static + Win32 + + + Debug-static + x64 + + + Debug + Win32 + + + Release-static + Win32 + + + Release-static + x64 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {8D6BB292-9E1C-413D-9F98-4864BDC1514A} + Win32Proj + jemalloc + 8.1 + + + + DynamicLibrary + true + v140 + MultiByte + + + StaticLibrary + true + v140 + MultiByte + + + DynamicLibrary + false + v140 + true + MultiByte + + + StaticLibrary + false + v140 + true + MultiByte + + + DynamicLibrary + true + v140 + MultiByte + + + StaticLibrary + true + v140 + MultiByte + + + DynamicLibrary + false + v140 + true + MultiByte + + + StaticLibrary + false + v140 + true + MultiByte + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)d + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-$(PlatformToolset)-$(Configuration) + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-$(PlatformToolset)-$(Configuration) + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)d + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) + + + + + + Level3 + Disabled + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + + + + + + + Level3 + Disabled + JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + + + + + + + Level3 + Disabled + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + + + + + + + Level3 + Disabled + JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + 4090;4146;4267;4334 + OldStyle + false + + + Windows + true + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + 4090;4146;4267;4334 + OldStyle + + + Windows + true + true + true + + + + + + \ No newline at end of file diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters new file mode 100644 index 0000000..11cfcd0 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/jemalloc/jemalloc.vcxproj.filters @@ -0,0 +1,101 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + \ No newline at end of file diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj new file mode 100644 index 0000000..325876d --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj @@ -0,0 +1,327 @@ + + + + + Debug-static + Win32 + + + Debug-static + x64 + + + Debug + Win32 + + + Release-static + Win32 + + + Release-static + x64 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + {09028CFD-4EB7-491D-869C-0708DB97ED44} + Win32Proj + test_threads + 8.1 + + + + Application + true + v140 + MultiByte + + + Application + true + v140 + MultiByte + + + Application + false + v140 + true + MultiByte + + + Application + false + v140 + true + MultiByte + + + Application + true + v140 + MultiByte + + + Application + true + v140 + MultiByte + + + Application + false + v140 + true + MultiByte + + + Application + false + v140 + true + MultiByte + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + true + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + true + + + true + $(SolutionDir)$(Platform)\$(Configuration)\ + + + true + $(SolutionDir)$(Platform)\$(Configuration)\ + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + $(SolutionDir)$(Platform)\$(Configuration) + jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + Level3 + Disabled + JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + + + Console + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + Level3 + Disabled + _DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + $(SolutionDir)$(Platform)\$(Configuration) + + + + + + + Level3 + Disabled + JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + + + Console + true + jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + $(SolutionDir)$(Platform)\$(Configuration) + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + Level3 + + + MaxSpeed + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + {8d6bb292-9e1c-413d-9f98-4864bdc1514a} + + + + + + + + + \ No newline at end of file diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters new file mode 100644 index 0000000..fa4588f --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2015/test_threads/test_threads.vcxproj.filters @@ -0,0 +1,26 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;xsd + + + + + Source Files + + + Source Files + + + + + Header Files + + + \ No newline at end of file diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj new file mode 100644 index 0000000..ed71de8 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj @@ -0,0 +1,347 @@ + + + + + Debug-static + Win32 + + + Debug-static + x64 + + + Debug + Win32 + + + Release-static + Win32 + + + Release-static + x64 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {8D6BB292-9E1C-413D-9F98-4864BDC1514A} + Win32Proj + jemalloc + + + + DynamicLibrary + true + v141 + MultiByte + + + StaticLibrary + true + v141 + MultiByte + + + DynamicLibrary + false + v141 + true + MultiByte + + + StaticLibrary + false + v141 + true + MultiByte + + + DynamicLibrary + true + v141 + MultiByte + + + StaticLibrary + true + v141 + MultiByte + + + DynamicLibrary + false + v141 + true + MultiByte + + + StaticLibrary + false + v141 + true + MultiByte + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)d + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-$(PlatformToolset)-$(Configuration) + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-$(PlatformToolset)-$(Configuration) + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)d + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + $(ProjectName)-vc$(PlatformToolsetVersion)-$(Configuration) + + + + + + Level3 + Disabled + _REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + + + + + + + Level3 + Disabled + JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + + + + + + + Level3 + Disabled + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;JEMALLOC_DEBUG;_DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + + + + + + + Level3 + Disabled + JEMALLOC_NO_PRIVATE_NAMESPACE;JEMALLOC_DEBUG;_REENTRANT;JEMALLOC_EXPORT=;_DEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + 4090;4146;4267;4334 + OldStyle + false + + + Windows + true + + + + + Level3 + + + MaxSpeed + true + true + _REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + _REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;_WINDLL;DLLEXPORT;NDEBUG;%(PreprocessorDefinitions) + 4090;4146;4267;4334 + $(OutputPath)$(TargetName).pdb + + + Windows + true + true + true + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_NO_PRIVATE_NAMESPACE;_REENTRANT;JEMALLOC_EXPORT=;NDEBUG;_LIB;%(PreprocessorDefinitions) + ..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + 4090;4146;4267;4334 + OldStyle + + + Windows + true + true + true + + + + + + \ No newline at end of file diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters new file mode 100644 index 0000000..11cfcd0 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/jemalloc/jemalloc.vcxproj.filters @@ -0,0 +1,101 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + \ No newline at end of file diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj new file mode 100644 index 0000000..c35b0f5 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj @@ -0,0 +1,326 @@ + + + + + Debug-static + Win32 + + + Debug-static + x64 + + + Debug + Win32 + + + Release-static + Win32 + + + Release-static + x64 + + + Release + Win32 + + + Debug + x64 + + + Release + x64 + + + + {09028CFD-4EB7-491D-869C-0708DB97ED44} + Win32Proj + test_threads + + + + Application + true + v141 + MultiByte + + + Application + true + v141 + MultiByte + + + Application + false + v141 + true + MultiByte + + + Application + false + v141 + true + MultiByte + + + Application + true + v141 + MultiByte + + + Application + true + v141 + MultiByte + + + Application + false + v141 + true + MultiByte + + + Application + false + v141 + true + MultiByte + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + true + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + true + + + true + $(SolutionDir)$(Platform)\$(Configuration)\ + + + true + $(SolutionDir)$(Platform)\$(Configuration)\ + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + $(SolutionDir)$(Platform)\$(Configuration)\ + $(Platform)\$(Configuration)\ + false + + + + + + Level3 + Disabled + WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + $(SolutionDir)$(Platform)\$(Configuration) + jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + Level3 + Disabled + JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + + + Console + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + Level3 + Disabled + _DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + jemallocd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + $(SolutionDir)$(Platform)\$(Configuration) + + + + + + + Level3 + Disabled + JEMALLOC_EXPORT=;JEMALLOC_STATIC;_DEBUG;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreadedDebug + + + Console + true + jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + $(SolutionDir)$(Platform)\$(Configuration) + + + + + Level3 + + + MaxSpeed + true + true + WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc-$(PlatformToolset)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + Level3 + + + MaxSpeed + true + true + NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + Level3 + + + MaxSpeed + true + true + JEMALLOC_EXPORT=;JEMALLOC_STATIC;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ..\..\..\..\test\include;..\..\..\..\include;..\..\..\..\include\msvc_compat;%(AdditionalIncludeDirectories) + MultiThreaded + + + Console + true + true + true + $(SolutionDir)$(Platform)\$(Configuration) + jemalloc-vc$(PlatformToolsetVersion)-$(Configuration).lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies) + + + + + + + + + {8d6bb292-9e1c-413d-9f98-4864bdc1514a} + + + + + + + + + \ No newline at end of file diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters new file mode 100644 index 0000000..fa4588f --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/projects/vc2017/test_threads/test_threads.vcxproj.filters @@ -0,0 +1,26 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;xsd + + + + + Source Files + + + Source Files + + + + + Header Files + + + \ No newline at end of file diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/test_threads/test_threads.cpp b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/test_threads/test_threads.cpp new file mode 100644 index 0000000..92e3162 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/test_threads/test_threads.cpp @@ -0,0 +1,88 @@ +// jemalloc C++ threaded test +// Author: Rustam Abdullaev +// Public Domain + +#include +#include +#include +#include +#include +#include +#include +#include + +using std::vector; +using std::thread; +using std::uniform_int_distribution; +using std::minstd_rand; + +int test_threads() { + je_malloc_conf = "narenas:3"; + int narenas = 0; + size_t sz = sizeof(narenas); + je_mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0); + if (narenas != 3) { + printf("Error: unexpected number of arenas: %d\n", narenas); + return 1; + } + static const int sizes[] = { 7, 16, 32, 60, 91, 100, 120, 144, 169, 199, 255, 400, 670, 900, 917, 1025, 3333, 5190, 13131, 49192, 99999, 123123, 255265, 2333111 }; + static const int numSizes = (int)(sizeof(sizes) / sizeof(sizes[0])); + vector workers; + static const int numThreads = narenas + 1, numAllocsMax = 25, numIter1 = 50, numIter2 = 50; + je_malloc_stats_print(NULL, NULL, NULL); + size_t allocated1; + size_t sz1 = sizeof(allocated1); + je_mallctl("stats.active", (void *)&allocated1, &sz1, NULL, 0); + printf("\nPress Enter to start threads...\n"); + getchar(); + printf("Starting %d threads x %d x %d iterations...\n", numThreads, numIter1, numIter2); + for (int i = 0; i < numThreads; i++) { + workers.emplace_back([tid=i]() { + uniform_int_distribution sizeDist(0, numSizes - 1); + minstd_rand rnd(tid * 17); + uint8_t* ptrs[numAllocsMax]; + int ptrsz[numAllocsMax]; + for (int i = 0; i < numIter1; ++i) { + thread t([&]() { + for (int i = 0; i < numIter2; ++i) { + const int numAllocs = numAllocsMax - sizeDist(rnd); + for (int j = 0; j < numAllocs; j += 64) { + const int x = sizeDist(rnd); + const int sz = sizes[x]; + ptrsz[j] = sz; + ptrs[j] = (uint8_t*)je_malloc(sz); + if (!ptrs[j]) { + printf("Unable to allocate %d bytes in thread %d, iter %d, alloc %d. %d\n", sz, tid, i, j, x); + exit(1); + } + for (int k = 0; k < sz; k++) + ptrs[j][k] = tid + k; + } + for (int j = 0; j < numAllocs; j += 64) { + for (int k = 0, sz = ptrsz[j]; k < sz; k++) + if (ptrs[j][k] != (uint8_t)(tid + k)) { + printf("Memory error in thread %d, iter %d, alloc %d @ %d : %02X!=%02X\n", tid, i, j, k, ptrs[j][k], (uint8_t)(tid + k)); + exit(1); + } + je_free(ptrs[j]); + } + } + }); + t.join(); + } + }); + } + for (thread& t : workers) { + t.join(); + } + je_malloc_stats_print(NULL, NULL, NULL); + size_t allocated2; + je_mallctl("stats.active", (void *)&allocated2, &sz1, NULL, 0); + size_t leaked = allocated2 - allocated1; + printf("\nDone. Leaked: %zd bytes\n", leaked); + bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet) + printf("\nTest %s!\n", (failed ? "FAILED" : "successful")); + printf("\nPress Enter to continue...\n"); + getchar(); + return failed ? 1 : 0; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/test_threads/test_threads.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/test_threads/test_threads.h new file mode 100644 index 0000000..64d0cdb --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/test_threads/test_threads.h @@ -0,0 +1,3 @@ +#pragma once + +int test_threads(); diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/test_threads/test_threads_main.cpp b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/test_threads/test_threads_main.cpp new file mode 100644 index 0000000..0a022fb --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/msvc/test_threads/test_threads_main.cpp @@ -0,0 +1,11 @@ +#include "test_threads.h" +#include +#include +#include + +using namespace std::chrono_literals; + +int main(int argc, char** argv) { + int rc = test_threads(); + return rc; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/release.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/release.c new file mode 100644 index 0000000..e69de29 diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/run_tests.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/run_tests.sh new file mode 100755 index 0000000..b434f15 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/run_tests.sh @@ -0,0 +1 @@ +$(dirname "$)")/scripts/gen_run_tests.py | bash diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/scripts/gen_run_tests.py b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/scripts/gen_run_tests.py new file mode 100755 index 0000000..a87ecff --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/scripts/gen_run_tests.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python + +import sys +from itertools import combinations +from os import uname +from multiprocessing import cpu_count + +# Later, we want to test extended vaddr support. Apparently, the "real" way of +# checking this is flaky on OS X. +bits_64 = sys.maxsize > 2**32 + +nparallel = cpu_count() * 2 + +uname = uname()[0] + +def powerset(items): + result = [] + for i in xrange(len(items) + 1): + result += combinations(items, i) + return result + +possible_compilers = [('gcc', 'g++'), ('clang', 'clang++')] +possible_compiler_opts = [ + '-m32', +] +possible_config_opts = [ + '--enable-debug', + '--enable-prof', + '--disable-stats', +] +if bits_64: + possible_config_opts.append('--with-lg-vaddr=56') + +possible_malloc_conf_opts = [ + 'tcache:false', + 'dss:primary', + 'percpu_arena:percpu', + 'background_thread:true', +] + +print 'set -e' +print 'if [ -f Makefile ] ; then make relclean ; fi' +print 'autoconf' +print 'rm -rf run_tests.out' +print 'mkdir run_tests.out' +print 'cd run_tests.out' + +ind = 0 +for cc, cxx in possible_compilers: + for compiler_opts in powerset(possible_compiler_opts): + for config_opts in powerset(possible_config_opts): + for malloc_conf_opts in powerset(possible_malloc_conf_opts): + if cc is 'clang' \ + and '-m32' in possible_compiler_opts \ + and '--enable-prof' in config_opts: + continue + config_line = ( + 'EXTRA_CFLAGS=-Werror EXTRA_CXXFLAGS=-Werror ' + + 'CC="{} {}" '.format(cc, " ".join(compiler_opts)) + + 'CXX="{} {}" '.format(cxx, " ".join(compiler_opts)) + + '../../configure ' + + " ".join(config_opts) + (' --with-malloc-conf=' + + ",".join(malloc_conf_opts) if len(malloc_conf_opts) > 0 + else '') + ) + + # We don't want to test large vaddr spaces in 32-bit mode. + if ('-m32' in compiler_opts and '--with-lg-vaddr=56' in + config_opts): + continue + + # Per CPU arenas are only supported on Linux. + linux_supported = ('percpu_arena:percpu' in malloc_conf_opts \ + or 'background_thread:true' in malloc_conf_opts) + # Heap profiling and dss are not supported on OS X. + darwin_unsupported = ('--enable-prof' in config_opts or \ + 'dss:primary' in malloc_conf_opts) + if (uname == 'Linux' and linux_supported) \ + or (not linux_supported and (uname != 'Darwin' or \ + not darwin_unsupported)): + print """cat < run_test_%(ind)d.sh +#!/bin/sh + +set -e + +abort() { + echo "==> Error" >> run_test.log + echo "Error; see run_tests.out/run_test_%(ind)d.out/run_test.log" + exit 255 # Special exit code tells xargs to terminate. +} + +# Environment variables are not supported. +run_cmd() { + echo "==> \$@" >> run_test.log + \$@ >> run_test.log 2>&1 || abort +} + +echo "=> run_test_%(ind)d: %(config_line)s" +mkdir run_test_%(ind)d.out +cd run_test_%(ind)d.out + +echo "==> %(config_line)s" >> run_test.log +%(config_line)s >> run_test.log 2>&1 || abort + +run_cmd make all tests +run_cmd make check +run_cmd make distclean +EOF +chmod 755 run_test_%(ind)d.sh""" % {'ind': ind, 'config_line': config_line} + ind += 1 + +print 'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs -P %(nparallel)d -n 1 sh' % {'last_ind': ind-1, 'nparallel': nparallel} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/scripts/gen_travis.py b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/scripts/gen_travis.py new file mode 100755 index 0000000..6dd3929 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/scripts/gen_travis.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python + +from itertools import combinations + +travis_template = """\ +language: generic + +matrix: + include: +%s + +before_script: + - autoconf + - ./configure ${COMPILER_FLAGS:+ \ + CC="$CC $COMPILER_FLAGS" \ + CXX="$CXX $COMPILER_FLAGS" } \ + $CONFIGURE_FLAGS + - make -j3 + - make -j3 tests + +script: + - make check +""" + +# The 'default' configuration is gcc, on linux, with no compiler or configure +# flags. We also test with clang, -m32, --enable-debug, --enable-prof, +# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing +# travis though, we don't test all 2**7 = 128 possible combinations of these; +# instead, we only test combinations of up to 2 'unusual' settings, under the +# hope that bugs involving interactions of such settings are rare. +# Things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29 +MAX_UNUSUAL_OPTIONS = 2 + +os_default = 'linux' +os_unusual = 'osx' + +compilers_default = 'CC=gcc CXX=g++' +compilers_unusual = 'CC=clang CXX=clang++' + +compiler_flag_unusuals = ['-m32'] + +configure_flag_unusuals = [ + '--enable-debug', + '--enable-prof', + '--disable-stats', +] + +malloc_conf_unusuals = [ + 'tcache:false', + 'dss:primary', + 'percpu_arena:percpu', + 'background_thread:true', +] + +all_unusuals = ( + [os_unusual] + [compilers_unusual] + compiler_flag_unusuals + + configure_flag_unusuals + malloc_conf_unusuals +) + +unusual_combinations_to_test = [] +for i in xrange(MAX_UNUSUAL_OPTIONS + 1): + unusual_combinations_to_test += combinations(all_unusuals, i) + +include_rows = "" +for unusual_combination in unusual_combinations_to_test: + os = os_default + if os_unusual in unusual_combination: + os = os_unusual + + compilers = compilers_default + if compilers_unusual in unusual_combination: + compilers = compilers_unusual + + compiler_flags = [ + x for x in unusual_combination if x in compiler_flag_unusuals] + + configure_flags = [ + x for x in unusual_combination if x in configure_flag_unusuals] + + malloc_conf = [ + x for x in unusual_combination if x in malloc_conf_unusuals] + # Filter out unsupported configurations on OS X. + if os == 'osx' and ('dss:primary' in malloc_conf or \ + 'percpu_arena:percpu' in malloc_conf or 'background_thread:true' \ + in malloc_conf): + continue + if len(malloc_conf) > 0: + configure_flags.append('--with-malloc-conf=' + ",".join(malloc_conf)) + + # Filter out an unsupported configuration - heap profiling on OS X. + if os == 'osx' and '--enable-prof' in configure_flags: + continue + + # We get some spurious errors when -Warray-bounds is enabled. + env_string = ('{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}" ' + 'EXTRA_CFLAGS="-Werror -Wno-array-bounds"').format( + compilers, " ".join(compiler_flags), " ".join(configure_flags)) + + include_rows += ' - os: %s\n' % os + include_rows += ' env: %s\n' % env_string + if '-m32' in unusual_combination and os == 'linux': + include_rows += ' addons:\n' + include_rows += ' apt:\n' + include_rows += ' packages:\n' + include_rows += ' - gcc-multilib\n' + +print travis_template % include_rows diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/arena.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/arena.c new file mode 100644 index 0000000..5d55bf1 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/arena.c @@ -0,0 +1,2043 @@ +#define JEMALLOC_ARENA_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/div.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/util.h" + +/******************************************************************************/ +/* Data. */ + +/* + * Define names for both unininitialized and initialized phases, so that + * options and mallctl processing are straightforward. + */ +const char *percpu_arena_mode_names[] = { + "percpu", + "phycpu", + "disabled", + "percpu", + "phycpu" +}; +percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; + +ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; +ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; + +static atomic_zd_t dirty_decay_ms_default; +static atomic_zd_t muzzy_decay_ms_default; + +const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { +#define STEP(step, h, x, y) \ + h, + SMOOTHSTEP +#undef STEP +}; + +static div_info_t arena_binind_div_info[NBINS]; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, + size_t npages_decay_max, bool is_background_thread); +static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, + bool is_background_thread, bool all); +static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + bin_t *bin); +static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + bin_t *bin); + +/******************************************************************************/ + +void +arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy) { + *nthreads += arena_nthreads_get(arena, false); + *dss = dss_prec_names[arena_dss_prec_get(arena)]; + *dirty_decay_ms = arena_dirty_decay_ms_get(arena); + *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); + *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); + *ndirty += extents_npages_get(&arena->extents_dirty); + *nmuzzy += extents_npages_get(&arena->extents_muzzy); +} + +void +arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, + bin_stats_t *bstats, arena_stats_large_t *lstats) { + cassert(config_stats); + + arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, + muzzy_decay_ms, nactive, ndirty, nmuzzy); + + size_t base_allocated, base_resident, base_mapped, metadata_thp; + base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, + &base_mapped, &metadata_thp); + + arena_stats_lock(tsdn, &arena->stats); + + arena_stats_accum_zu(&astats->mapped, base_mapped + + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); + arena_stats_accum_zu(&astats->retained, + extents_npages_get(&arena->extents_retained) << LG_PAGE); + + arena_stats_accum_u64(&astats->decay_dirty.npurge, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.npurge)); + arena_stats_accum_u64(&astats->decay_dirty.nmadvise, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.nmadvise)); + arena_stats_accum_u64(&astats->decay_dirty.purged, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.purged)); + + arena_stats_accum_u64(&astats->decay_muzzy.npurge, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.npurge)); + arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.nmadvise)); + arena_stats_accum_u64(&astats->decay_muzzy.purged, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.purged)); + + arena_stats_accum_zu(&astats->base, base_allocated); + arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); + arena_stats_accum_zu(&astats->metadata_thp, metadata_thp); + arena_stats_accum_zu(&astats->resident, base_resident + + (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + + extents_npages_get(&arena->extents_dirty) + + extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); + + for (szind_t i = 0; i < NSIZES - NBINS; i++) { + uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].nmalloc); + arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); + arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); + + uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].ndalloc); + arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); + arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); + + uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].nrequests); + arena_stats_accum_u64(&lstats[i].nrequests, + nmalloc + nrequests); + arena_stats_accum_u64(&astats->nrequests_large, + nmalloc + nrequests); + + assert(nmalloc >= ndalloc); + assert(nmalloc - ndalloc <= SIZE_T_MAX); + size_t curlextents = (size_t)(nmalloc - ndalloc); + lstats[i].curlextents += curlextents; + arena_stats_accum_zu(&astats->allocated_large, + curlextents * sz_index2size(NBINS + i)); + } + + arena_stats_unlock(tsdn, &arena->stats); + + /* tcache_bytes counts currently cached bytes. */ + atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + cache_bin_array_descriptor_t *descriptor; + ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { + szind_t i = 0; + for (; i < NBINS; i++) { + cache_bin_t *tbin = &descriptor->bins_small[i]; + arena_stats_accum_zu(&astats->tcache_bytes, + tbin->ncached * sz_index2size(i)); + } + for (; i < nhbins; i++) { + cache_bin_t *tbin = &descriptor->bins_large[i]; + arena_stats_accum_zu(&astats->tcache_bytes, + tbin->ncached * sz_index2size(i)); + } + } + malloc_mutex_prof_read(tsdn, + &astats->mutex_prof_data[arena_prof_mutex_tcache_list], + &arena->tcache_ql_mtx); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); + +#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ + malloc_mutex_lock(tsdn, &arena->mtx); \ + malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ + &arena->mtx); \ + malloc_mutex_unlock(tsdn, &arena->mtx); + + /* Gather per arena mutex profiling data. */ + READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); + READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, + arena_prof_mutex_extent_avail) + READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, + arena_prof_mutex_extents_dirty) + READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, + arena_prof_mutex_extents_muzzy) + READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, + arena_prof_mutex_extents_retained) + READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, + arena_prof_mutex_decay_dirty) + READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, + arena_prof_mutex_decay_muzzy) + READ_ARENA_MUTEX_PROF_DATA(base->mtx, + arena_prof_mutex_base) +#undef READ_ARENA_MUTEX_PROF_DATA + + nstime_copy(&astats->uptime, &arena->create_time); + nstime_update(&astats->uptime); + nstime_subtract(&astats->uptime, &arena->create_time); + + for (szind_t i = 0; i < NBINS; i++) { + bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]); + } +} + +void +arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, + extent); + if (arena_dirty_decay_ms_get(arena) == 0) { + arena_decay_dirty(tsdn, arena, false, true); + } else { + arena_background_thread_inactivity_check(tsdn, arena, false); + } +} + +static void * +arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { + void *ret; + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + size_t regind; + + assert(extent_nfree_get(slab) > 0); + assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); + + regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); + ret = (void *)((uintptr_t)extent_addr_get(slab) + + (uintptr_t)(bin_info->reg_size * regind)); + extent_nfree_dec(slab); + return ret; +} + +#ifndef JEMALLOC_JET +static +#endif +size_t +arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { + size_t diff, regind; + + /* Freeing a pointer outside the slab can cause assertion failure. */ + assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); + assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); + /* Freeing an interior pointer can cause assertion failure. */ + assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % + (uintptr_t)bin_infos[binind].reg_size == 0); + + diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); + + /* Avoid doing division with a variable divisor. */ + regind = div_compute(&arena_binind_div_info[binind], diff); + + assert(regind < bin_infos[binind].nregs); + + return regind; +} + +static void +arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) { + szind_t binind = extent_szind_get(slab); + const bin_info_t *bin_info = &bin_infos[binind]; + size_t regind = arena_slab_regind(slab, binind, ptr); + + assert(extent_nfree_get(slab) < bin_info->nregs); + /* Freeing an unallocated pointer can cause assertion failure. */ + assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); + + bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); + extent_nfree_inc(slab); +} + +static void +arena_nactive_add(arena_t *arena, size_t add_pages) { + atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); +} + +static void +arena_nactive_sub(arena_t *arena, size_t sub_pages) { + assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); + atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); +} + +static void +arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { + szind_t index, hindex; + + cassert(config_stats); + + if (usize < LARGE_MINCLASS) { + usize = LARGE_MINCLASS; + } + index = sz_size2index(usize); + hindex = (index >= NBINS) ? index - NBINS : 0; + + arena_stats_add_u64(tsdn, &arena->stats, + &arena->stats.lstats[hindex].nmalloc, 1); +} + +static void +arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { + szind_t index, hindex; + + cassert(config_stats); + + if (usize < LARGE_MINCLASS) { + usize = LARGE_MINCLASS; + } + index = sz_size2index(usize); + hindex = (index >= NBINS) ? index - NBINS : 0; + + arena_stats_add_u64(tsdn, &arena->stats, + &arena->stats.lstats[hindex].ndalloc, 1); +} + +static void +arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, + size_t usize) { + arena_large_dalloc_stats_update(tsdn, arena, oldusize); + arena_large_malloc_stats_update(tsdn, arena, usize); +} + +extent_t * +arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool *zero) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + szind_t szind = sz_size2index(usize); + size_t mapped_add; + bool commit = true; + extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, + szind, zero, &commit); + if (extent == NULL) { + extent = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, + false, szind, zero, &commit); + } + size_t size = usize + sz_large_pad; + if (extent == NULL) { + extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, + usize, sz_large_pad, alignment, false, szind, zero, + &commit); + if (config_stats) { + /* + * extent may be NULL on OOM, but in that case + * mapped_add isn't used below, so there's no need to + * conditionlly set it to 0 here. + */ + mapped_add = size; + } + } else if (config_stats) { + mapped_add = 0; + } + + if (extent != NULL) { + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_malloc_stats_update(tsdn, arena, usize); + if (mapped_add != 0) { + arena_stats_add_zu(tsdn, &arena->stats, + &arena->stats.mapped, mapped_add); + } + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_add(arena, size >> LG_PAGE); + } + + return extent; +} + +void +arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_dalloc_stats_update(tsdn, arena, + extent_usize_get(extent)); + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); +} + +void +arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + size_t oldusize) { + size_t usize = extent_usize_get(extent); + size_t udiff = oldusize - usize; + + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_sub(arena, udiff >> LG_PAGE); +} + +void +arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + size_t oldusize) { + size_t usize = extent_usize_get(extent); + size_t udiff = usize - oldusize; + + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_add(arena, udiff >> LG_PAGE); +} + +static ssize_t +arena_decay_ms_read(arena_decay_t *decay) { + return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); +} + +static void +arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { + atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); +} + +static void +arena_decay_deadline_init(arena_decay_t *decay) { + /* + * Generate a new deadline that is uniformly random within the next + * epoch after the current one. + */ + nstime_copy(&decay->deadline, &decay->epoch); + nstime_add(&decay->deadline, &decay->interval); + if (arena_decay_ms_read(decay) > 0) { + nstime_t jitter; + + nstime_init(&jitter, prng_range_u64(&decay->jitter_state, + nstime_ns(&decay->interval))); + nstime_add(&decay->deadline, &jitter); + } +} + +static bool +arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { + return (nstime_compare(&decay->deadline, time) <= 0); +} + +static size_t +arena_decay_backlog_npages_limit(const arena_decay_t *decay) { + uint64_t sum; + size_t npages_limit_backlog; + unsigned i; + + /* + * For each element of decay_backlog, multiply by the corresponding + * fixed-point smoothstep decay factor. Sum the products, then divide + * to round down to the nearest whole number of pages. + */ + sum = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + sum += decay->backlog[i] * h_steps[i]; + } + npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); + + return npages_limit_backlog; +} + +static void +arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { + size_t npages_delta = (current_npages > decay->nunpurged) ? + current_npages - decay->nunpurged : 0; + decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; + + if (config_debug) { + if (current_npages > decay->ceil_npages) { + decay->ceil_npages = current_npages; + } + size_t npages_limit = arena_decay_backlog_npages_limit(decay); + assert(decay->ceil_npages >= npages_limit); + if (decay->ceil_npages > npages_limit) { + decay->ceil_npages = npages_limit; + } + } +} + +static void +arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, + size_t current_npages) { + if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { + memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * + sizeof(size_t)); + } else { + size_t nadvance_z = (size_t)nadvance_u64; + + assert((uint64_t)nadvance_z == nadvance_u64); + + memmove(decay->backlog, &decay->backlog[nadvance_z], + (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); + if (nadvance_z > 1) { + memset(&decay->backlog[SMOOTHSTEP_NSTEPS - + nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); + } + } + + arena_decay_backlog_update_last(decay, current_npages); +} + +static void +arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, size_t current_npages, size_t npages_limit, + bool is_background_thread) { + if (current_npages > npages_limit) { + arena_decay_to_limit(tsdn, arena, decay, extents, false, + npages_limit, current_npages - npages_limit, + is_background_thread); + } +} + +static void +arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, + size_t current_npages) { + assert(arena_decay_deadline_reached(decay, time)); + + nstime_t delta; + nstime_copy(&delta, time); + nstime_subtract(&delta, &decay->epoch); + + uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); + assert(nadvance_u64 > 0); + + /* Add nadvance_u64 decay intervals to epoch. */ + nstime_copy(&delta, &decay->interval); + nstime_imultiply(&delta, nadvance_u64); + nstime_add(&decay->epoch, &delta); + + /* Set a new deadline. */ + arena_decay_deadline_init(decay); + + /* Update the backlog. */ + arena_decay_backlog_update(decay, nadvance_u64, current_npages); +} + +static void +arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, const nstime_t *time, bool is_background_thread) { + size_t current_npages = extents_npages_get(extents); + arena_decay_epoch_advance_helper(decay, time, current_npages); + + size_t npages_limit = arena_decay_backlog_npages_limit(decay); + /* We may unlock decay->mtx when try_purge(). Finish logging first. */ + decay->nunpurged = (npages_limit > current_npages) ? npages_limit : + current_npages; + + if (!background_thread_enabled() || is_background_thread) { + arena_decay_try_purge(tsdn, arena, decay, extents, + current_npages, npages_limit, is_background_thread); + } +} + +static void +arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) { + arena_decay_ms_write(decay, decay_ms); + if (decay_ms > 0) { + nstime_init(&decay->interval, (uint64_t)decay_ms * + KQU(1000000)); + nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); + } + + nstime_init(&decay->epoch, 0); + nstime_update(&decay->epoch); + decay->jitter_state = (uint64_t)(uintptr_t)decay; + arena_decay_deadline_init(decay); + decay->nunpurged = 0; + memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); +} + +static bool +arena_decay_init(arena_decay_t *decay, ssize_t decay_ms, + arena_stats_decay_t *stats) { + if (config_debug) { + for (size_t i = 0; i < sizeof(arena_decay_t); i++) { + assert(((char *)decay)[i] == 0); + } + decay->ceil_npages = 0; + } + if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, + malloc_mutex_rank_exclusive)) { + return true; + } + decay->purging = false; + arena_decay_reinit(decay, decay_ms); + /* Memory is zeroed, so there is no need to clear stats. */ + if (config_stats) { + decay->stats = stats; + } + return false; +} + +static bool +arena_decay_ms_valid(ssize_t decay_ms) { + if (decay_ms < -1) { + return false; + } + if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * + KQU(1000)) { + return true; + } + return false; +} + +static bool +arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool is_background_thread) { + malloc_mutex_assert_owner(tsdn, &decay->mtx); + + /* Purge all or nothing if the option is disabled. */ + ssize_t decay_ms = arena_decay_ms_read(decay); + if (decay_ms <= 0) { + if (decay_ms == 0) { + arena_decay_to_limit(tsdn, arena, decay, extents, false, + 0, extents_npages_get(extents), + is_background_thread); + } + return false; + } + + nstime_t time; + nstime_init(&time, 0); + nstime_update(&time); + if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) + > 0)) { + /* + * Time went backwards. Move the epoch back in time and + * generate a new deadline, with the expectation that time + * typically flows forward for long enough periods of time that + * epochs complete. Unfortunately, this strategy is susceptible + * to clock jitter triggering premature epoch advances, but + * clock jitter estimation and compensation isn't feasible here + * because calls into this code are event-driven. + */ + nstime_copy(&decay->epoch, &time); + arena_decay_deadline_init(decay); + } else { + /* Verify that time does not go backwards. */ + assert(nstime_compare(&decay->epoch, &time) <= 0); + } + + /* + * If the deadline has been reached, advance to the current epoch and + * purge to the new limit if necessary. Note that dirty pages created + * during the current epoch are not subject to purge until a future + * epoch, so as a result purging only happens during epoch advances, or + * being triggered by background threads (scheduled event). + */ + bool advance_epoch = arena_decay_deadline_reached(decay, &time); + if (advance_epoch) { + arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, + is_background_thread); + } else if (is_background_thread) { + arena_decay_try_purge(tsdn, arena, decay, extents, + extents_npages_get(extents), + arena_decay_backlog_npages_limit(decay), + is_background_thread); + } + + return advance_epoch; +} + +static ssize_t +arena_decay_ms_get(arena_decay_t *decay) { + return arena_decay_ms_read(decay); +} + +ssize_t +arena_dirty_decay_ms_get(arena_t *arena) { + return arena_decay_ms_get(&arena->decay_dirty); +} + +ssize_t +arena_muzzy_decay_ms_get(arena_t *arena) { + return arena_decay_ms_get(&arena->decay_muzzy); +} + +static bool +arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + + malloc_mutex_lock(tsdn, &decay->mtx); + /* + * Restart decay backlog from scratch, which may cause many dirty pages + * to be immediately purged. It would conceptually be possible to map + * the old backlog onto the new backlog, but there is no justification + * for such complexity since decay_ms changes are intended to be + * infrequent, either between the {-1, 0, >0} states, or a one-time + * arbitrary change during initial arena configuration. + */ + arena_decay_reinit(decay, decay_ms); + arena_maybe_decay(tsdn, arena, decay, extents, false); + malloc_mutex_unlock(tsdn, &decay->mtx); + + return false; +} + +bool +arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, + ssize_t decay_ms) { + return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, + &arena->extents_dirty, decay_ms); +} + +bool +arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, + ssize_t decay_ms) { + return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, + &arena->extents_muzzy, decay_ms); +} + +static size_t +arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, + size_t npages_decay_max, extent_list_t *decay_extents) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + /* Stash extents according to npages_limit. */ + size_t nstashed = 0; + extent_t *extent; + while (nstashed < npages_decay_max && + (extent = extents_evict(tsdn, arena, r_extent_hooks, extents, + npages_limit)) != NULL) { + extent_list_append(decay_extents, extent); + nstashed += extent_size_get(extent) >> LG_PAGE; + } + return nstashed; +} + +static size_t +arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, + bool all, extent_list_t *decay_extents, bool is_background_thread) { + UNUSED size_t nmadvise, nunmapped; + size_t npurged; + + if (config_stats) { + nmadvise = 0; + nunmapped = 0; + } + npurged = 0; + + ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); + for (extent_t *extent = extent_list_first(decay_extents); extent != + NULL; extent = extent_list_first(decay_extents)) { + if (config_stats) { + nmadvise++; + } + size_t npages = extent_size_get(extent) >> LG_PAGE; + npurged += npages; + extent_list_remove(decay_extents, extent); + switch (extents_state_get(extents)) { + case extent_state_active: + not_reached(); + case extent_state_dirty: + if (!all && muzzy_decay_ms != 0 && + !extent_purge_lazy_wrapper(tsdn, arena, + r_extent_hooks, extent, 0, + extent_size_get(extent))) { + extents_dalloc(tsdn, arena, r_extent_hooks, + &arena->extents_muzzy, extent); + arena_background_thread_inactivity_check(tsdn, + arena, is_background_thread); + break; + } + /* Fall through. */ + case extent_state_muzzy: + extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, + extent); + if (config_stats) { + nunmapped += npages; + } + break; + case extent_state_retained: + default: + not_reached(); + } + } + + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, + 1); + arena_stats_add_u64(tsdn, &arena->stats, + &decay->stats->nmadvise, nmadvise); + arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, + npurged); + arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, + nunmapped << LG_PAGE); + arena_stats_unlock(tsdn, &arena->stats); + } + + return npurged; +} + +/* + * npages_limit: Decay at most npages_decay_max pages without violating the + * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper + * bound on number of pages in order to prevent unbounded growth (namely in + * stashed), otherwise unbounded new pages could be added to extents during the + * current decay run, so that the purging thread never finishes. + */ +static void +arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, + bool is_background_thread) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 1); + malloc_mutex_assert_owner(tsdn, &decay->mtx); + + if (decay->purging) { + return; + } + decay->purging = true; + malloc_mutex_unlock(tsdn, &decay->mtx); + + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + + extent_list_t decay_extents; + extent_list_init(&decay_extents); + + size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, + npages_limit, npages_decay_max, &decay_extents); + if (npurge != 0) { + UNUSED size_t npurged = arena_decay_stashed(tsdn, arena, + &extent_hooks, decay, extents, all, &decay_extents, + is_background_thread); + assert(npurged == npurge); + } + + malloc_mutex_lock(tsdn, &decay->mtx); + decay->purging = false; +} + +static bool +arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool is_background_thread, bool all) { + if (all) { + malloc_mutex_lock(tsdn, &decay->mtx); + arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, + extents_npages_get(extents), is_background_thread); + malloc_mutex_unlock(tsdn, &decay->mtx); + + return false; + } + + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + /* No need to wait if another thread is in progress. */ + return true; + } + + bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, + is_background_thread); + UNUSED size_t npages_new; + if (epoch_advanced) { + /* Backlog is updated on epoch advance. */ + npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; + } + malloc_mutex_unlock(tsdn, &decay->mtx); + + if (have_background_thread && background_thread_enabled() && + epoch_advanced && !is_background_thread) { + background_thread_interval_check(tsdn, arena, decay, + npages_new); + } + + return false; +} + +static bool +arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, + bool all) { + return arena_decay_impl(tsdn, arena, &arena->decay_dirty, + &arena->extents_dirty, is_background_thread, all); +} + +static bool +arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, + bool all) { + return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, + &arena->extents_muzzy, is_background_thread, all); +} + +void +arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { + if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { + return; + } + arena_decay_muzzy(tsdn, arena, is_background_thread, all); +} + +static void +arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { + arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); + + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); +} + +static void +arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { + assert(extent_nfree_get(slab) > 0); + extent_heap_insert(&bin->slabs_nonfull, slab); +} + +static void +arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { + extent_heap_remove(&bin->slabs_nonfull, slab); +} + +static extent_t * +arena_bin_slabs_nonfull_tryget(bin_t *bin) { + extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); + if (slab == NULL) { + return NULL; + } + if (config_stats) { + bin->stats.reslabs++; + } + return slab; +} + +static void +arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { + assert(extent_nfree_get(slab) == 0); + /* + * Tracking extents is required by arena_reset, which is not allowed + * for auto arenas. Bypass this step to avoid touching the extent + * linkage (often results in cache misses) for auto arenas. + */ + if (arena_is_auto(arena)) { + return; + } + extent_list_append(&bin->slabs_full, slab); +} + +static void +arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { + if (arena_is_auto(arena)) { + return; + } + extent_list_remove(&bin->slabs_full, slab); +} + +void +arena_reset(tsd_t *tsd, arena_t *arena) { + /* + * Locking in this function is unintuitive. The caller guarantees that + * no concurrent operations are happening in this arena, but there are + * still reasons that some locking is necessary: + * + * - Some of the functions in the transitive closure of calls assume + * appropriate locks are held, and in some cases these locks are + * temporarily dropped to avoid lock order reversal or deadlock due to + * reentry. + * - mallctl("epoch", ...) may concurrently refresh stats. While + * strictly speaking this is a "concurrent operation", disallowing + * stats refreshes would impose an inconvenient burden. + */ + + /* Large allocations. */ + malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); + + for (extent_t *extent = extent_list_first(&arena->large); extent != + NULL; extent = extent_list_first(&arena->large)) { + void *ptr = extent_base_get(extent); + size_t usize; + + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + + if (config_stats || (config_prof && opt_prof)) { + usize = sz_index2size(alloc_ctx.szind); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + } + /* Remove large allocation from prof sample set. */ + if (config_prof && opt_prof) { + prof_free(tsd, ptr, usize, &alloc_ctx); + } + large_dalloc(tsd_tsdn(tsd), extent); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); + + /* Bins. */ + for (unsigned i = 0; i < NBINS; i++) { + extent_t *slab; + bin_t *bin = &arena->bins[i]; + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + if (bin->slabcur != NULL) { + slab = bin->slabcur; + bin->slabcur = NULL; + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != + NULL) { + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + for (slab = extent_list_first(&bin->slabs_full); slab != NULL; + slab = extent_list_first(&bin->slabs_full)) { + arena_bin_slabs_full_remove(arena, bin, slab); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + if (config_stats) { + bin->stats.curregs = 0; + bin->stats.curslabs = 0; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + } + + atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); +} + +static void +arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { + /* + * Iterate over the retained extents and destroy them. This gives the + * extent allocator underlying the extent hooks an opportunity to unmap + * all retained memory without having to keep its own metadata + * structures. In practice, virtual memory for dss-allocated extents is + * leaked here, so best practice is to avoid dss for arenas to be + * destroyed, or provide custom extent hooks that track retained + * dss-based extents for later reuse. + */ + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + extent_t *extent; + while ((extent = extents_evict(tsdn, arena, &extent_hooks, + &arena->extents_retained, 0)) != NULL) { + extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); + } +} + +void +arena_destroy(tsd_t *tsd, arena_t *arena) { + assert(base_ind_get(arena->base) >= narenas_auto); + assert(arena_nthreads_get(arena, false) == 0); + assert(arena_nthreads_get(arena, true) == 0); + + /* + * No allocations have occurred since arena_reset() was called. + * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached + * extents, so only retained extents may remain. + */ + assert(extents_npages_get(&arena->extents_dirty) == 0); + assert(extents_npages_get(&arena->extents_muzzy) == 0); + + /* Deallocate retained memory. */ + arena_destroy_retained(tsd_tsdn(tsd), arena); + + /* + * Remove the arena pointer from the arenas array. We rely on the fact + * that there is no way for the application to get a dirty read from the + * arenas array unless there is an inherent race in the application + * involving access of an arena being concurrently destroyed. The + * application must synchronize knowledge of the arena's validity, so as + * long as we use an atomic write to update the arenas array, the + * application will get a clean read any time after it synchronizes + * knowledge that the arena is no longer valid. + */ + arena_set(base_ind_get(arena->base), NULL); + + /* + * Destroy the base allocator, which manages all metadata ever mapped by + * this arena. + */ + base_delete(tsd_tsdn(tsd), arena->base); +} + +static extent_t * +arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info, + szind_t szind) { + extent_t *slab; + bool zero, commit; + + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + zero = false; + commit = true; + slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, + bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); + + if (config_stats && slab != NULL) { + arena_stats_mapped_add(tsdn, &arena->stats, + bin_info->slab_size); + } + + return slab; +} + +static extent_t * +arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, + const bin_info_t *bin_info) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + szind_t szind = sz_size2index(bin_info->reg_size); + bool zero = false; + bool commit = true; + extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, + binind, &zero, &commit); + if (slab == NULL) { + slab = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, + true, binind, &zero, &commit); + } + if (slab == NULL) { + slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, + bin_info, szind); + if (slab == NULL) { + return NULL; + } + } + assert(extent_slab_get(slab)); + + /* Initialize slab internals. */ + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + extent_nfree_set(slab, bin_info->nregs); + bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); + + arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); + + return slab; +} + +static extent_t * +arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, + szind_t binind) { + extent_t *slab; + const bin_info_t *bin_info; + + /* Look for a usable slab. */ + slab = arena_bin_slabs_nonfull_tryget(bin); + if (slab != NULL) { + return slab; + } + /* No existing slabs have any space available. */ + + bin_info = &bin_infos[binind]; + + /* Allocate a new slab. */ + malloc_mutex_unlock(tsdn, &bin->lock); + /******************************/ + slab = arena_slab_alloc(tsdn, arena, binind, bin_info); + /********************************/ + malloc_mutex_lock(tsdn, &bin->lock); + if (slab != NULL) { + if (config_stats) { + bin->stats.nslabs++; + bin->stats.curslabs++; + } + return slab; + } + + /* + * arena_slab_alloc() failed, but another thread may have made + * sufficient memory available while this one dropped bin->lock above, + * so search one more time. + */ + slab = arena_bin_slabs_nonfull_tryget(bin); + if (slab != NULL) { + return slab; + } + + return NULL; +} + +/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ +static void * +arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, + szind_t binind) { + const bin_info_t *bin_info; + extent_t *slab; + + bin_info = &bin_infos[binind]; + if (!arena_is_auto(arena) && bin->slabcur != NULL) { + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + bin->slabcur = NULL; + } + slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind); + if (bin->slabcur != NULL) { + /* + * Another thread updated slabcur while this one ran without the + * bin lock in arena_bin_nonfull_slab_get(). + */ + if (extent_nfree_get(bin->slabcur) > 0) { + void *ret = arena_slab_reg_alloc(bin->slabcur, + bin_info); + if (slab != NULL) { + /* + * arena_slab_alloc() may have allocated slab, + * or it may have been pulled from + * slabs_nonfull. Therefore it is unsafe to + * make any assumptions about how slab has + * previously been used, and + * arena_bin_lower_slab() must be called, as if + * a region were just deallocated from the slab. + */ + if (extent_nfree_get(slab) == bin_info->nregs) { + arena_dalloc_bin_slab(tsdn, arena, slab, + bin); + } else { + arena_bin_lower_slab(tsdn, arena, slab, + bin); + } + } + return ret; + } + + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + bin->slabcur = NULL; + } + + if (slab == NULL) { + return NULL; + } + bin->slabcur = slab; + + assert(extent_nfree_get(bin->slabcur) > 0); + + return arena_slab_reg_alloc(slab, bin_info); +} + +void +arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { + unsigned i, nfill; + bin_t *bin; + + assert(tbin->ncached == 0); + + if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { + prof_idump(tsdn); + } + bin = &arena->bins[binind]; + malloc_mutex_lock(tsdn, &bin->lock); + for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> + tcache->lg_fill_div[binind]); i < nfill; i++) { + extent_t *slab; + void *ptr; + if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > + 0) { + ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]); + } else { + ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); + } + if (ptr == NULL) { + /* + * OOM. tbin->avail isn't yet filled down to its first + * element, so the successful allocations (if any) must + * be moved just before tbin->avail before bailing out. + */ + if (i > 0) { + memmove(tbin->avail - i, tbin->avail - nfill, + i * sizeof(void *)); + } + break; + } + if (config_fill && unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ptr, &bin_infos[binind], true); + } + /* Insert such that low regions get used first. */ + *(tbin->avail - nfill + i) = ptr; + } + if (config_stats) { + bin->stats.nmalloc += i; + bin->stats.nrequests += tbin->tstats.nrequests; + bin->stats.curregs += i; + bin->stats.nfills++; + tbin->tstats.nrequests = 0; + } + malloc_mutex_unlock(tsdn, &bin->lock); + tbin->ncached = i; + arena_decay_tick(tsdn, arena); +} + +void +arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) { + if (!zero) { + memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); + } +} + +static void +arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) { + memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); +} +arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = + arena_dalloc_junk_small_impl; + +static void * +arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { + void *ret; + bin_t *bin; + size_t usize; + extent_t *slab; + + assert(binind < NBINS); + bin = &arena->bins[binind]; + usize = sz_index2size(binind); + + malloc_mutex_lock(tsdn, &bin->lock); + if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { + ret = arena_slab_reg_alloc(slab, &bin_infos[binind]); + } else { + ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); + } + + if (ret == NULL) { + malloc_mutex_unlock(tsdn, &bin->lock); + return NULL; + } + + if (config_stats) { + bin->stats.nmalloc++; + bin->stats.nrequests++; + bin->stats.curregs++; + } + malloc_mutex_unlock(tsdn, &bin->lock); + if (config_prof && arena_prof_accum(tsdn, arena, usize)) { + prof_idump(tsdn); + } + + if (!zero) { + if (config_fill) { + if (unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, + &bin_infos[binind], false); + } else if (unlikely(opt_zero)) { + memset(ret, 0, usize); + } + } + } else { + if (config_fill && unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, &bin_infos[binind], + true); + } + memset(ret, 0, usize); + } + + arena_decay_tick(tsdn, arena); + return ret; +} + +void * +arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, + bool zero) { + assert(!tsdn_null(tsdn) || arena != NULL); + + if (likely(!tsdn_null(tsdn))) { + arena = arena_choose(tsdn_tsd(tsdn), arena); + } + if (unlikely(arena == NULL)) { + return NULL; + } + + if (likely(size <= SMALL_MAXCLASS)) { + return arena_malloc_small(tsdn, arena, ind, zero); + } + return large_malloc(tsdn, arena, sz_index2size(ind), zero); +} + +void * +arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero, tcache_t *tcache) { + void *ret; + + if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE + && (usize & PAGE_MASK) == 0))) { + /* Small; alignment doesn't require special slab placement. */ + ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), + zero, tcache, true); + } else { + if (likely(alignment <= CACHELINE)) { + ret = large_malloc(tsdn, arena, usize, zero); + } else { + ret = large_palloc(tsdn, arena, usize, alignment, zero); + } + } + return ret; +} + +void +arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { + cassert(config_prof); + assert(ptr != NULL); + assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); + assert(usize <= SMALL_MAXCLASS); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true); + arena_t *arena = extent_arena_get(extent); + + szind_t szind = sz_size2index(usize); + extent_szind_set(extent, szind); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, + szind, false); + + prof_accum_cancel(tsdn, &arena->prof_accum, usize); + + assert(isalloc(tsdn, ptr) == usize); +} + +static size_t +arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { + cassert(config_prof); + assert(ptr != NULL); + + extent_szind_set(extent, NBINS); + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, + NBINS, false); + + assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); + + return LARGE_MINCLASS; +} + +void +arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, + bool slow_path) { + cassert(config_prof); + assert(opt_prof); + + extent_t *extent = iealloc(tsdn, ptr); + size_t usize = arena_prof_demote(tsdn, extent, ptr); + if (usize <= tcache_maxclass) { + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + sz_size2index(usize), slow_path); + } else { + large_dalloc(tsdn, extent); + } +} + +static void +arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { + /* Dissociate slab from bin. */ + if (slab == bin->slabcur) { + bin->slabcur = NULL; + } else { + szind_t binind = extent_szind_get(slab); + const bin_info_t *bin_info = &bin_infos[binind]; + + /* + * The following block's conditional is necessary because if the + * slab only contains one region, then it never gets inserted + * into the non-full slabs heap. + */ + if (bin_info->nregs == 1) { + arena_bin_slabs_full_remove(arena, bin, slab); + } else { + arena_bin_slabs_nonfull_remove(bin, slab); + } + } +} + +static void +arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + bin_t *bin) { + assert(slab != bin->slabcur); + + malloc_mutex_unlock(tsdn, &bin->lock); + /******************************/ + arena_slab_dalloc(tsdn, arena, slab); + /****************************/ + malloc_mutex_lock(tsdn, &bin->lock); + if (config_stats) { + bin->stats.curslabs--; + } +} + +static void +arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab, + bin_t *bin) { + assert(extent_nfree_get(slab) > 0); + + /* + * Make sure that if bin->slabcur is non-NULL, it refers to the + * oldest/lowest non-full slab. It is okay to NULL slabcur out rather + * than proactively keeping it pointing at the oldest/lowest non-full + * slab. + */ + if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { + /* Switch slabcur. */ + if (extent_nfree_get(bin->slabcur) > 0) { + arena_bin_slabs_nonfull_insert(bin, bin->slabcur); + } else { + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + } + bin->slabcur = slab; + if (config_stats) { + bin->stats.reslabs++; + } + } else { + arena_bin_slabs_nonfull_insert(bin, slab); + } +} + +static void +arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + void *ptr, bool junked) { + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + szind_t binind = extent_szind_get(slab); + bin_t *bin = &arena->bins[binind]; + const bin_info_t *bin_info = &bin_infos[binind]; + + if (!junked && config_fill && unlikely(opt_junk_free)) { + arena_dalloc_junk_small(ptr, bin_info); + } + + arena_slab_reg_dalloc(slab, slab_data, ptr); + unsigned nfree = extent_nfree_get(slab); + if (nfree == bin_info->nregs) { + arena_dissociate_bin_slab(arena, slab, bin); + arena_dalloc_bin_slab(tsdn, arena, slab, bin); + } else if (nfree == 1 && slab != bin->slabcur) { + arena_bin_slabs_full_remove(arena, bin, slab); + arena_bin_lower_slab(tsdn, arena, slab, bin); + } + + if (config_stats) { + bin->stats.ndalloc++; + bin->stats.curregs--; + } +} + +void +arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + void *ptr) { + arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true); +} + +static void +arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { + szind_t binind = extent_szind_get(extent); + bin_t *bin = &arena->bins[binind]; + + malloc_mutex_lock(tsdn, &bin->lock); + arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); + malloc_mutex_unlock(tsdn, &bin->lock); +} + +void +arena_dalloc_small(tsdn_t *tsdn, void *ptr) { + extent_t *extent = iealloc(tsdn, ptr); + arena_t *arena = extent_arena_get(extent); + + arena_dalloc_bin(tsdn, arena, extent, ptr); + arena_decay_tick(tsdn, arena); +} + +bool +arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero) { + /* Calls with non-zero extra had to clamp extra. */ + assert(extra == 0 || size + extra <= LARGE_MAXCLASS); + + if (unlikely(size > LARGE_MAXCLASS)) { + return true; + } + + extent_t *extent = iealloc(tsdn, ptr); + size_t usize_min = sz_s2u(size); + size_t usize_max = sz_s2u(size + extra); + if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) { + /* + * Avoid moving the allocation if the size class can be left the + * same. + */ + assert(bin_infos[sz_size2index(oldsize)].reg_size == + oldsize); + if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) != + sz_size2index(oldsize)) && (size > oldsize || usize_max < + oldsize)) { + return true; + } + + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) { + return large_ralloc_no_move(tsdn, extent, usize_min, usize_max, + zero); + } + + return true; +} + +static void * +arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) { + if (alignment == 0) { + return arena_malloc(tsdn, arena, usize, sz_size2index(usize), + zero, tcache, true); + } + usize = sz_sa2u(usize, alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + return NULL; + } + return ipalloct(tsdn, usize, alignment, zero, tcache, arena); +} + +void * +arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, + size_t size, size_t alignment, bool zero, tcache_t *tcache) { + size_t usize = sz_s2u(size); + if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) { + return NULL; + } + + if (likely(usize <= SMALL_MAXCLASS)) { + /* Try to avoid moving the allocation. */ + if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) { + return ptr; + } + } + + if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) { + return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize, + alignment, zero, tcache); + } + + /* + * size and oldsize are different enough that we need to move the + * object. In that case, fall back to allocating new space and copying. + */ + void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, + zero, tcache); + if (ret == NULL) { + return NULL; + } + + /* + * Junk/zero-filling were already done by + * ipalloc()/arena_malloc(). + */ + + size_t copysize = (usize < oldsize) ? usize : oldsize; + memcpy(ret, ptr, copysize); + isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); + return ret; +} + +dss_prec_t +arena_dss_prec_get(arena_t *arena) { + return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); +} + +bool +arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { + if (!have_dss) { + return (dss_prec != dss_prec_disabled); + } + atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); + return false; +} + +ssize_t +arena_dirty_decay_ms_default_get(void) { + return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); +} + +bool +arena_dirty_decay_ms_default_set(ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); + return false; +} + +ssize_t +arena_muzzy_decay_ms_default_get(void) { + return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); +} + +bool +arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); + return false; +} + +bool +arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, + size_t *new_limit) { + assert(opt_retain); + + pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); + if (new_limit != NULL) { + size_t limit = *new_limit; + /* Grow no more than the new limit. */ + if ((new_ind = sz_psz2ind(limit + 1) - 1) > + EXTENT_GROW_MAX_PIND) { + return true; + } + } + + malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx); + if (old_limit != NULL) { + *old_limit = sz_pind2sz(arena->retain_grow_limit); + } + if (new_limit != NULL) { + arena->retain_grow_limit = new_ind; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx); + + return false; +} + +unsigned +arena_nthreads_get(arena_t *arena, bool internal) { + return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); +} + +void +arena_nthreads_inc(arena_t *arena, bool internal) { + atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); +} + +void +arena_nthreads_dec(arena_t *arena, bool internal) { + atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); +} + +size_t +arena_extent_sn_next(arena_t *arena) { + return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); +} + +arena_t * +arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + arena_t *arena; + base_t *base; + unsigned i; + + if (ind == 0) { + base = b0get(); + } else { + base = base_new(tsdn, ind, extent_hooks); + if (base == NULL) { + return NULL; + } + } + + arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE); + if (arena == NULL) { + goto label_error; + } + + atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); + atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); + arena->last_thd = NULL; + + if (config_stats) { + if (arena_stats_init(tsdn, &arena->stats)) { + goto label_error; + } + + ql_new(&arena->tcache_ql); + ql_new(&arena->cache_bin_array_descriptor_ql); + if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", + WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { + goto label_error; + } + } + + if (config_prof) { + if (prof_accum_init(tsdn, &arena->prof_accum)) { + goto label_error; + } + } + + if (config_cache_oblivious) { + /* + * A nondeterministic seed based on the address of arena reduces + * the likelihood of lockstep non-uniform cache index + * utilization among identical concurrent processes, but at the + * cost of test repeatability. For debug builds, instead use a + * deterministic seed. + */ + atomic_store_zu(&arena->offset_state, config_debug ? ind : + (size_t)(uintptr_t)arena, ATOMIC_RELAXED); + } + + atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); + + atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), + ATOMIC_RELAXED); + + atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); + + extent_list_init(&arena->large); + if (malloc_mutex_init(&arena->large_mtx, "arena_large", + WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { + goto label_error; + } + + /* + * Delay coalescing for dirty extents despite the disruptive effect on + * memory layout for best-fit extent allocation, since cached extents + * are likely to be reused soon after deallocation, and the cost of + * merging/splitting extents is non-trivial. + */ + if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, + true)) { + goto label_error; + } + /* + * Coalesce muzzy extents immediately, because operations on them are in + * the critical path much less often than for dirty extents. + */ + if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, + false)) { + goto label_error; + } + /* + * Coalesce retained extents immediately, in part because they will + * never be evicted (and therefore there's no opportunity for delayed + * coalescing), but also because operations on retained extents are not + * in the critical path. + */ + if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, + false)) { + goto label_error; + } + + if (arena_decay_init(&arena->decay_dirty, + arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { + goto label_error; + } + if (arena_decay_init(&arena->decay_muzzy, + arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { + goto label_error; + } + + arena->extent_grow_next = sz_psz2ind(HUGEPAGE); + arena->retain_grow_limit = EXTENT_GROW_MAX_PIND; + if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", + WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { + goto label_error; + } + + extent_avail_new(&arena->extent_avail); + if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", + WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { + goto label_error; + } + + /* Initialize bins. */ + for (i = 0; i < NBINS; i++) { + bool err = bin_init(&arena->bins[i]); + if (err) { + goto label_error; + } + } + + arena->base = base; + /* Set arena before creating background threads. */ + arena_set(ind, arena); + + nstime_init(&arena->create_time, 0); + nstime_update(&arena->create_time); + + /* We don't support reentrancy for arena 0 bootstrapping. */ + if (ind != 0) { + /* + * If we're here, then arena 0 already exists, so bootstrapping + * is done enough that we should have tsd. + */ + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn), arena); + if (hooks_arena_new_hook) { + hooks_arena_new_hook(); + } + post_reentrancy(tsdn_tsd(tsdn)); + } + + return arena; +label_error: + if (ind != 0) { + base_delete(tsdn, base); + } + return NULL; +} + +void +arena_boot(void) { + arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); + arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); +#define REGIND_bin_yes(index, reg_size) \ + div_init(&arena_binind_div_info[(index)], (reg_size)); +#define REGIND_bin_no(index, reg_size) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ + lg_delta_lookup) \ + REGIND_bin_##bin(index, (1U<decay_dirty.mtx); + malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); +} + +void +arena_prefork1(tsdn_t *tsdn, arena_t *arena) { + if (config_stats) { + malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); + } +} + +void +arena_prefork2(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); +} + +void +arena_prefork3(tsdn_t *tsdn, arena_t *arena) { + extents_prefork(tsdn, &arena->extents_dirty); + extents_prefork(tsdn, &arena->extents_muzzy); + extents_prefork(tsdn, &arena->extents_retained); +} + +void +arena_prefork4(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); +} + +void +arena_prefork5(tsdn_t *tsdn, arena_t *arena) { + base_prefork(tsdn, arena->base); +} + +void +arena_prefork6(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->large_mtx); +} + +void +arena_prefork7(tsdn_t *tsdn, arena_t *arena) { + for (unsigned i = 0; i < NBINS; i++) { + bin_prefork(tsdn, &arena->bins[i]); + } +} + +void +arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { + unsigned i; + + for (i = 0; i < NBINS; i++) { + bin_postfork_parent(tsdn, &arena->bins[i]); + } + malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); + base_postfork_parent(tsdn, arena->base); + malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); + extents_postfork_parent(tsdn, &arena->extents_dirty); + extents_postfork_parent(tsdn, &arena->extents_muzzy); + extents_postfork_parent(tsdn, &arena->extents_retained); + malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); + if (config_stats) { + malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); + } +} + +void +arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { + unsigned i; + + atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); + atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); + if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { + arena_nthreads_inc(arena, false); + } + if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { + arena_nthreads_inc(arena, true); + } + if (config_stats) { + ql_new(&arena->tcache_ql); + ql_new(&arena->cache_bin_array_descriptor_ql); + tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); + if (tcache != NULL && tcache->arena == arena) { + ql_elm_new(tcache, link); + ql_tail_insert(&arena->tcache_ql, tcache, link); + cache_bin_array_descriptor_init( + &tcache->cache_bin_array_descriptor, + tcache->bins_small, tcache->bins_large); + ql_tail_insert(&arena->cache_bin_array_descriptor_ql, + &tcache->cache_bin_array_descriptor, link); + } + } + + for (i = 0; i < NBINS; i++) { + bin_postfork_child(tsdn, &arena->bins[i]); + } + malloc_mutex_postfork_child(tsdn, &arena->large_mtx); + base_postfork_child(tsdn, arena->base); + malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); + extents_postfork_child(tsdn, &arena->extents_dirty); + extents_postfork_child(tsdn, &arena->extents_muzzy); + extents_postfork_child(tsdn, &arena->extents_retained); + malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); + malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); + if (config_stats) { + malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); + } +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/atomic.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/atomic.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/atomic.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/atomic.c diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/background_thread.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/background_thread.c new file mode 100644 index 0000000..3517a3b --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/background_thread.c @@ -0,0 +1,909 @@ +#define JEMALLOC_BACKGROUND_THREAD_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" + +/******************************************************************************/ +/* Data. */ + +/* This option should be opt-in only. */ +#define BACKGROUND_THREAD_DEFAULT false +/* Read-only after initialization. */ +bool opt_background_thread = BACKGROUND_THREAD_DEFAULT; +size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT; + +/* Used for thread creation, termination and stats. */ +malloc_mutex_t background_thread_lock; +/* Indicates global state. Atomic because decay reads this w/o locking. */ +atomic_b_t background_thread_enabled_state; +size_t n_background_threads; +size_t max_background_threads; +/* Thread info per-index. */ +background_thread_info_t *background_thread_info; + +/* False if no necessary runtime support. */ +bool can_enable_background_thread; + +/******************************************************************************/ + +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER +#include + +static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, + void *(*)(void *), void *__restrict); + +static void +pthread_create_wrapper_init(void) { +#ifdef JEMALLOC_LAZY_LOCK + if (!isthreaded) { + isthreaded = true; + } +#endif +} + +int +pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr, + void *(*start_routine)(void *), void *__restrict arg) { + pthread_create_wrapper_init(); + + return pthread_create_fptr(thread, attr, start_routine, arg); +} +#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */ + +#ifndef JEMALLOC_BACKGROUND_THREAD +#define NOT_REACHED { not_reached(); } +bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED +bool background_threads_enable(tsd_t *tsd) NOT_REACHED +bool background_threads_disable(tsd_t *tsd) NOT_REACHED +void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, size_t npages_new) NOT_REACHED +void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED +void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED +void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED +void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED +bool background_thread_stats_read(tsdn_t *tsdn, + background_thread_stats_t *stats) NOT_REACHED +void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED +#undef NOT_REACHED +#else + +static bool background_thread_enabled_at_fork; + +static void +background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) { + background_thread_wakeup_time_set(tsdn, info, 0); + info->npages_to_purge_new = 0; + if (config_stats) { + info->tot_n_runs = 0; + nstime_init(&info->tot_sleep_time, 0); + } +} + +static inline bool +set_current_thread_affinity(UNUSED int cpu) { +#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); + + return (ret != 0); +#else + return false; +#endif +} + +/* Threshold for determining when to wake up the background thread. */ +#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024) +#define BILLION UINT64_C(1000000000) +/* Minimal sleep interval 100 ms. */ +#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10) + +static inline size_t +decay_npurge_after_interval(arena_decay_t *decay, size_t interval) { + size_t i; + uint64_t sum = 0; + for (i = 0; i < interval; i++) { + sum += decay->backlog[i] * h_steps[i]; + } + for (; i < SMOOTHSTEP_NSTEPS; i++) { + sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]); + } + + return (size_t)(sum >> SMOOTHSTEP_BFP); +} + +static uint64_t +arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay, + extents_t *extents) { + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + /* Use minimal interval if decay is contended. */ + return BACKGROUND_THREAD_MIN_INTERVAL_NS; + } + + uint64_t interval; + ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); + if (decay_time <= 0) { + /* Purging is eagerly done or disabled currently. */ + interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; + goto label_done; + } + + uint64_t decay_interval_ns = nstime_ns(&decay->interval); + assert(decay_interval_ns > 0); + size_t npages = extents_npages_get(extents); + if (npages == 0) { + unsigned i; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + if (decay->backlog[i] > 0) { + break; + } + } + if (i == SMOOTHSTEP_NSTEPS) { + /* No dirty pages recorded. Sleep indefinitely. */ + interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; + goto label_done; + } + } + if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) { + /* Use max interval. */ + interval = decay_interval_ns * SMOOTHSTEP_NSTEPS; + goto label_done; + } + + size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns; + size_t ub = SMOOTHSTEP_NSTEPS; + /* Minimal 2 intervals to ensure reaching next epoch deadline. */ + lb = (lb < 2) ? 2 : lb; + if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) || + (lb + 2 > ub)) { + interval = BACKGROUND_THREAD_MIN_INTERVAL_NS; + goto label_done; + } + + assert(lb + 2 <= ub); + size_t npurge_lb, npurge_ub; + npurge_lb = decay_npurge_after_interval(decay, lb); + if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) { + interval = decay_interval_ns * lb; + goto label_done; + } + npurge_ub = decay_npurge_after_interval(decay, ub); + if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) { + interval = decay_interval_ns * ub; + goto label_done; + } + + unsigned n_search = 0; + size_t target, npurge; + while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub) + && (lb + 2 < ub)) { + target = (lb + ub) / 2; + npurge = decay_npurge_after_interval(decay, target); + if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) { + ub = target; + npurge_ub = npurge; + } else { + lb = target; + npurge_lb = npurge; + } + assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1); + } + interval = decay_interval_ns * (ub + lb) / 2; +label_done: + interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ? + BACKGROUND_THREAD_MIN_INTERVAL_NS : interval; + malloc_mutex_unlock(tsdn, &decay->mtx); + + return interval; +} + +/* Compute purge interval for background threads. */ +static uint64_t +arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) { + uint64_t i1, i2; + i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty, + &arena->extents_dirty); + if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) { + return i1; + } + i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy, + &arena->extents_muzzy); + + return i1 < i2 ? i1 : i2; +} + +static void +background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, + uint64_t interval) { + if (config_stats) { + info->tot_n_runs++; + } + info->npages_to_purge_new = 0; + + struct timeval tv; + /* Specific clock required by timedwait. */ + gettimeofday(&tv, NULL); + nstime_t before_sleep; + nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000); + + int ret; + if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) { + assert(background_thread_indefinite_sleep(info)); + ret = pthread_cond_wait(&info->cond, &info->mtx.lock); + assert(ret == 0); + } else { + assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS && + interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP); + /* We need malloc clock (can be different from tv). */ + nstime_t next_wakeup; + nstime_init(&next_wakeup, 0); + nstime_update(&next_wakeup); + nstime_iadd(&next_wakeup, interval); + assert(nstime_ns(&next_wakeup) < + BACKGROUND_THREAD_INDEFINITE_SLEEP); + background_thread_wakeup_time_set(tsdn, info, + nstime_ns(&next_wakeup)); + + nstime_t ts_wakeup; + nstime_copy(&ts_wakeup, &before_sleep); + nstime_iadd(&ts_wakeup, interval); + struct timespec ts; + ts.tv_sec = (size_t)nstime_sec(&ts_wakeup); + ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup); + + assert(!background_thread_indefinite_sleep(info)); + ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts); + assert(ret == ETIMEDOUT || ret == 0); + background_thread_wakeup_time_set(tsdn, info, + BACKGROUND_THREAD_INDEFINITE_SLEEP); + } + if (config_stats) { + gettimeofday(&tv, NULL); + nstime_t after_sleep; + nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000); + if (nstime_compare(&after_sleep, &before_sleep) > 0) { + nstime_subtract(&after_sleep, &before_sleep); + nstime_add(&info->tot_sleep_time, &after_sleep); + } + } +} + +static bool +background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) { + if (unlikely(info->state == background_thread_paused)) { + malloc_mutex_unlock(tsdn, &info->mtx); + /* Wait on global lock to update status. */ + malloc_mutex_lock(tsdn, &background_thread_lock); + malloc_mutex_unlock(tsdn, &background_thread_lock); + malloc_mutex_lock(tsdn, &info->mtx); + return true; + } + + return false; +} + +static inline void +background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) { + uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; + unsigned narenas = narenas_total_get(); + + for (unsigned i = ind; i < narenas; i += max_background_threads) { + arena_t *arena = arena_get(tsdn, i, false); + if (!arena) { + continue; + } + arena_decay(tsdn, arena, true, false); + if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) { + /* Min interval will be used. */ + continue; + } + uint64_t interval = arena_decay_compute_purge_interval(tsdn, + arena); + assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS); + if (min_interval > interval) { + min_interval = interval; + } + } + background_thread_sleep(tsdn, info, min_interval); +} + +static bool +background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) { + if (info == &background_thread_info[0]) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), + &background_thread_lock); + } else { + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), + &background_thread_lock); + } + + pre_reentrancy(tsd, NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + bool has_thread; + assert(info->state != background_thread_paused); + if (info->state == background_thread_started) { + has_thread = true; + info->state = background_thread_stopped; + pthread_cond_signal(&info->cond); + } else { + has_thread = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + + if (!has_thread) { + post_reentrancy(tsd); + return false; + } + void *ret; + if (pthread_join(info->thread, &ret)) { + post_reentrancy(tsd); + return true; + } + assert(ret == NULL); + n_background_threads--; + post_reentrancy(tsd); + + return false; +} + +static void *background_thread_entry(void *ind_arg); + +static int +background_thread_create_signals_masked(pthread_t *thread, + const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { + /* + * Mask signals during thread creation so that the thread inherits + * an empty signal set. + */ + sigset_t set; + sigfillset(&set); + sigset_t oldset; + int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset); + if (mask_err != 0) { + return mask_err; + } + int create_err = pthread_create_wrapper(thread, attr, start_routine, + arg); + /* + * Restore the signal mask. Failure to restore the signal mask here + * changes program behavior. + */ + int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL); + if (restore_err != 0) { + malloc_printf(": background thread creation " + "failed (%d), and signal mask restoration failed " + "(%d)\n", create_err, restore_err); + if (opt_abort) { + abort(); + } + } + return create_err; +} + +static bool +check_background_thread_creation(tsd_t *tsd, unsigned *n_created, + bool *created_threads) { + bool ret = false; + if (likely(*n_created == n_background_threads)) { + return ret; + } + + tsdn_t *tsdn = tsd_tsdn(tsd); + malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx); + for (unsigned i = 1; i < max_background_threads; i++) { + if (created_threads[i]) { + continue; + } + background_thread_info_t *info = &background_thread_info[i]; + malloc_mutex_lock(tsdn, &info->mtx); + /* + * In case of the background_thread_paused state because of + * arena reset, delay the creation. + */ + bool create = (info->state == background_thread_started); + malloc_mutex_unlock(tsdn, &info->mtx); + if (!create) { + continue; + } + + pre_reentrancy(tsd, NULL); + int err = background_thread_create_signals_masked(&info->thread, + NULL, background_thread_entry, (void *)(uintptr_t)i); + post_reentrancy(tsd); + + if (err == 0) { + (*n_created)++; + created_threads[i] = true; + } else { + malloc_printf(": background thread " + "creation failed (%d)\n", err); + if (opt_abort) { + abort(); + } + } + /* Return to restart the loop since we unlocked. */ + ret = true; + break; + } + malloc_mutex_lock(tsdn, &background_thread_info[0].mtx); + + return ret; +} + +static void +background_thread0_work(tsd_t *tsd) { + /* Thread0 is also responsible for launching / terminating threads. */ + VARIABLE_ARRAY(bool, created_threads, max_background_threads); + unsigned i; + for (i = 1; i < max_background_threads; i++) { + created_threads[i] = false; + } + /* Start working, and create more threads when asked. */ + unsigned n_created = 1; + while (background_thread_info[0].state != background_thread_stopped) { + if (background_thread_pause_check(tsd_tsdn(tsd), + &background_thread_info[0])) { + continue; + } + if (check_background_thread_creation(tsd, &n_created, + (bool *)&created_threads)) { + continue; + } + background_work_sleep_once(tsd_tsdn(tsd), + &background_thread_info[0], 0); + } + + /* + * Shut down other threads at exit. Note that the ctl thread is holding + * the global background_thread mutex (and is waiting) for us. + */ + assert(!background_thread_enabled()); + for (i = 1; i < max_background_threads; i++) { + background_thread_info_t *info = &background_thread_info[i]; + assert(info->state != background_thread_paused); + if (created_threads[i]) { + background_threads_disable_single(tsd, info); + } else { + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + if (info->state != background_thread_stopped) { + /* The thread was not created. */ + assert(info->state == + background_thread_started); + n_background_threads--; + info->state = background_thread_stopped; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + } + background_thread_info[0].state = background_thread_stopped; + assert(n_background_threads == 1); +} + +static void +background_work(tsd_t *tsd, unsigned ind) { + background_thread_info_t *info = &background_thread_info[ind]; + + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + background_thread_wakeup_time_set(tsd_tsdn(tsd), info, + BACKGROUND_THREAD_INDEFINITE_SLEEP); + if (ind == 0) { + background_thread0_work(tsd); + } else { + while (info->state != background_thread_stopped) { + if (background_thread_pause_check(tsd_tsdn(tsd), + info)) { + continue; + } + background_work_sleep_once(tsd_tsdn(tsd), info, ind); + } + } + assert(info->state == background_thread_stopped); + background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0); + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); +} + +static void * +background_thread_entry(void *ind_arg) { + unsigned thread_ind = (unsigned)(uintptr_t)ind_arg; + assert(thread_ind < max_background_threads); +#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP + pthread_setname_np(pthread_self(), "jemalloc_bg_thd"); +#endif + if (opt_percpu_arena != percpu_arena_disabled) { + set_current_thread_affinity((int)thread_ind); + } + /* + * Start periodic background work. We use internal tsd which avoids + * side effects, for example triggering new arena creation (which in + * turn triggers another background thread creation). + */ + background_work(tsd_internal_fetch(), thread_ind); + assert(pthread_equal(pthread_self(), + background_thread_info[thread_ind].thread)); + + return NULL; +} + +static void +background_thread_init(tsd_t *tsd, background_thread_info_t *info) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + info->state = background_thread_started; + background_thread_info_init(tsd_tsdn(tsd), info); + n_background_threads++; +} + +/* Create a new background thread if needed. */ +bool +background_thread_create(tsd_t *tsd, unsigned arena_ind) { + assert(have_background_thread); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + + /* We create at most NCPUs threads. */ + size_t thread_ind = arena_ind % max_background_threads; + background_thread_info_t *info = &background_thread_info[thread_ind]; + + bool need_new_thread; + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + need_new_thread = background_thread_enabled() && + (info->state == background_thread_stopped); + if (need_new_thread) { + background_thread_init(tsd, info); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + if (!need_new_thread) { + return false; + } + if (arena_ind != 0) { + /* Threads are created asynchronously by Thread 0. */ + background_thread_info_t *t0 = &background_thread_info[0]; + malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx); + assert(t0->state == background_thread_started); + pthread_cond_signal(&t0->cond); + malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx); + + return false; + } + + pre_reentrancy(tsd, NULL); + /* + * To avoid complications (besides reentrancy), create internal + * background threads with the underlying pthread_create. + */ + int err = background_thread_create_signals_masked(&info->thread, NULL, + background_thread_entry, (void *)thread_ind); + post_reentrancy(tsd); + + if (err != 0) { + malloc_printf(": arena 0 background thread creation " + "failed (%d)\n", err); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_stopped; + n_background_threads--; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + + return true; + } + + return false; +} + +bool +background_threads_enable(tsd_t *tsd) { + assert(n_background_threads == 0); + assert(background_thread_enabled()); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + + VARIABLE_ARRAY(bool, marked, max_background_threads); + unsigned i, nmarked; + for (i = 0; i < max_background_threads; i++) { + marked[i] = false; + } + nmarked = 0; + /* Thread 0 is required and created at the end. */ + marked[0] = true; + /* Mark the threads we need to create for thread 0. */ + unsigned n = narenas_total_get(); + for (i = 1; i < n; i++) { + if (marked[i % max_background_threads] || + arena_get(tsd_tsdn(tsd), i, false) == NULL) { + continue; + } + background_thread_info_t *info = &background_thread_info[ + i % max_background_threads]; + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + assert(info->state == background_thread_stopped); + background_thread_init(tsd, info); + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + marked[i % max_background_threads] = true; + if (++nmarked == max_background_threads) { + break; + } + } + + return background_thread_create(tsd, 0); +} + +bool +background_threads_disable(tsd_t *tsd) { + assert(!background_thread_enabled()); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + + /* Thread 0 will be responsible for terminating other threads. */ + if (background_threads_disable_single(tsd, + &background_thread_info[0])) { + return true; + } + assert(n_background_threads == 0); + + return false; +} + +/* Check if we need to signal the background thread early. */ +void +background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, size_t npages_new) { + background_thread_info_t *info = arena_background_thread_info_get( + arena); + if (malloc_mutex_trylock(tsdn, &info->mtx)) { + /* + * Background thread may hold the mutex for a long period of + * time. We'd like to avoid the variance on application + * threads. So keep this non-blocking, and leave the work to a + * future epoch. + */ + return; + } + + if (info->state != background_thread_started) { + goto label_done; + } + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + goto label_done; + } + + ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); + if (decay_time <= 0) { + /* Purging is eagerly done or disabled currently. */ + goto label_done_unlock2; + } + uint64_t decay_interval_ns = nstime_ns(&decay->interval); + assert(decay_interval_ns > 0); + + nstime_t diff; + nstime_init(&diff, background_thread_wakeup_time_get(info)); + if (nstime_compare(&diff, &decay->epoch) <= 0) { + goto label_done_unlock2; + } + nstime_subtract(&diff, &decay->epoch); + if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) { + goto label_done_unlock2; + } + + if (npages_new > 0) { + size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns); + /* + * Compute how many new pages we would need to purge by the next + * wakeup, which is used to determine if we should signal the + * background thread. + */ + uint64_t npurge_new; + if (n_epoch >= SMOOTHSTEP_NSTEPS) { + npurge_new = npages_new; + } else { + uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1]; + assert(h_steps_max >= + h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); + npurge_new = npages_new * (h_steps_max - + h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); + npurge_new >>= SMOOTHSTEP_BFP; + } + info->npages_to_purge_new += npurge_new; + } + + bool should_signal; + if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) { + should_signal = true; + } else if (unlikely(background_thread_indefinite_sleep(info)) && + (extents_npages_get(&arena->extents_dirty) > 0 || + extents_npages_get(&arena->extents_muzzy) > 0 || + info->npages_to_purge_new > 0)) { + should_signal = true; + } else { + should_signal = false; + } + + if (should_signal) { + info->npages_to_purge_new = 0; + pthread_cond_signal(&info->cond); + } +label_done_unlock2: + malloc_mutex_unlock(tsdn, &decay->mtx); +label_done: + malloc_mutex_unlock(tsdn, &info->mtx); +} + +void +background_thread_prefork0(tsdn_t *tsdn) { + malloc_mutex_prefork(tsdn, &background_thread_lock); + background_thread_enabled_at_fork = background_thread_enabled(); +} + +void +background_thread_prefork1(tsdn_t *tsdn) { + for (unsigned i = 0; i < max_background_threads; i++) { + malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx); + } +} + +void +background_thread_postfork_parent(tsdn_t *tsdn) { + for (unsigned i = 0; i < max_background_threads; i++) { + malloc_mutex_postfork_parent(tsdn, + &background_thread_info[i].mtx); + } + malloc_mutex_postfork_parent(tsdn, &background_thread_lock); +} + +void +background_thread_postfork_child(tsdn_t *tsdn) { + for (unsigned i = 0; i < max_background_threads; i++) { + malloc_mutex_postfork_child(tsdn, + &background_thread_info[i].mtx); + } + malloc_mutex_postfork_child(tsdn, &background_thread_lock); + if (!background_thread_enabled_at_fork) { + return; + } + + /* Clear background_thread state (reset to disabled for child). */ + malloc_mutex_lock(tsdn, &background_thread_lock); + n_background_threads = 0; + background_thread_enabled_set(tsdn, false); + for (unsigned i = 0; i < max_background_threads; i++) { + background_thread_info_t *info = &background_thread_info[i]; + malloc_mutex_lock(tsdn, &info->mtx); + info->state = background_thread_stopped; + int ret = pthread_cond_init(&info->cond, NULL); + assert(ret == 0); + background_thread_info_init(tsdn, info); + malloc_mutex_unlock(tsdn, &info->mtx); + } + malloc_mutex_unlock(tsdn, &background_thread_lock); +} + +bool +background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { + assert(config_stats); + malloc_mutex_lock(tsdn, &background_thread_lock); + if (!background_thread_enabled()) { + malloc_mutex_unlock(tsdn, &background_thread_lock); + return true; + } + + stats->num_threads = n_background_threads; + uint64_t num_runs = 0; + nstime_init(&stats->run_interval, 0); + for (unsigned i = 0; i < max_background_threads; i++) { + background_thread_info_t *info = &background_thread_info[i]; + malloc_mutex_lock(tsdn, &info->mtx); + if (info->state != background_thread_stopped) { + num_runs += info->tot_n_runs; + nstime_add(&stats->run_interval, &info->tot_sleep_time); + } + malloc_mutex_unlock(tsdn, &info->mtx); + } + stats->num_runs = num_runs; + if (num_runs > 0) { + nstime_idivide(&stats->run_interval, num_runs); + } + malloc_mutex_unlock(tsdn, &background_thread_lock); + + return false; +} + +#undef BACKGROUND_THREAD_NPAGES_THRESHOLD +#undef BILLION +#undef BACKGROUND_THREAD_MIN_INTERVAL_NS + +static bool +pthread_create_fptr_init(void) { + if (pthread_create_fptr != NULL) { + return false; + } + pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); + if (pthread_create_fptr == NULL) { + can_enable_background_thread = false; + if (config_lazy_lock || opt_background_thread) { + malloc_write(": Error in dlsym(RTLD_NEXT, " + "\"pthread_create\")\n"); + abort(); + } + } else { + can_enable_background_thread = true; + } + + return false; +} + +/* + * When lazy lock is enabled, we need to make sure setting isthreaded before + * taking any background_thread locks. This is called early in ctl (instead of + * wait for the pthread_create calls to trigger) because the mutex is required + * before creating background threads. + */ +void +background_thread_ctl_init(tsdn_t *tsdn) { + malloc_mutex_assert_not_owner(tsdn, &background_thread_lock); +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER + pthread_create_fptr_init(); + pthread_create_wrapper_init(); +#endif +} + +#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */ + +bool +background_thread_boot0(void) { + if (!have_background_thread && opt_background_thread) { + malloc_printf(": option background_thread currently " + "supports pthread only\n"); + return true; + } +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER + if ((config_lazy_lock || opt_background_thread) && + pthread_create_fptr_init()) { + return true; + } +#endif + return false; +} + +bool +background_thread_boot1(tsdn_t *tsdn) { +#ifdef JEMALLOC_BACKGROUND_THREAD + assert(have_background_thread); + assert(narenas_total_get() > 0); + + if (opt_max_background_threads == MAX_BACKGROUND_THREAD_LIMIT && + ncpus < MAX_BACKGROUND_THREAD_LIMIT) { + opt_max_background_threads = ncpus; + } + max_background_threads = opt_max_background_threads; + + background_thread_enabled_set(tsdn, opt_background_thread); + if (malloc_mutex_init(&background_thread_lock, + "background_thread_global", + WITNESS_RANK_BACKGROUND_THREAD_GLOBAL, + malloc_mutex_rank_exclusive)) { + return true; + } + + background_thread_info = (background_thread_info_t *)base_alloc(tsdn, + b0get(), opt_max_background_threads * + sizeof(background_thread_info_t), CACHELINE); + if (background_thread_info == NULL) { + return true; + } + + for (unsigned i = 0; i < max_background_threads; i++) { + background_thread_info_t *info = &background_thread_info[i]; + /* Thread mutex is rank_inclusive because of thread0. */ + if (malloc_mutex_init(&info->mtx, "background_thread", + WITNESS_RANK_BACKGROUND_THREAD, + malloc_mutex_address_ordered)) { + return true; + } + if (pthread_cond_init(&info->cond, NULL)) { + return true; + } + malloc_mutex_lock(tsdn, &info->mtx); + info->state = background_thread_stopped; + background_thread_info_init(tsdn, info); + malloc_mutex_unlock(tsdn, &info->mtx); + } +#endif + + return false; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/base.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/base.c new file mode 100644 index 0000000..b0324b5 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/base.c @@ -0,0 +1,514 @@ +#define JEMALLOC_BASE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/sz.h" + +/******************************************************************************/ +/* Data. */ + +static base_t *b0; + +metadata_thp_mode_t opt_metadata_thp = METADATA_THP_DEFAULT; + +const char *metadata_thp_mode_names[] = { + "disabled", + "auto", + "always" +}; + +/******************************************************************************/ + +static inline bool +metadata_thp_madvise(void) { + return (metadata_thp_enabled() && + (init_system_thp_mode == thp_mode_default)); +} + +static void * +base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) { + void *addr; + bool zero = true; + bool commit = true; + + /* Use huge page sizes and alignment regardless of opt_metadata_thp. */ + assert(size == HUGEPAGE_CEILING(size)); + size_t alignment = HUGEPAGE; + if (extent_hooks == &extent_hooks_default) { + addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit); + } else { + /* No arena context as we are creating new arenas. */ + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, NULL); + addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment, + &zero, &commit, ind); + post_reentrancy(tsd); + } + + return addr; +} + +static void +base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, + size_t size) { + /* + * Cascade through dalloc, decommit, purge_forced, and purge_lazy, + * stopping at first success. This cascade is performed for consistency + * with the cascade in extent_dalloc_wrapper() because an application's + * custom hooks may not support e.g. dalloc. This function is only ever + * called as a side effect of arena destruction, so although it might + * seem pointless to do anything besides dalloc here, the application + * may in fact want the end state of all associated virtual memory to be + * in some consistent-but-allocated state. + */ + if (extent_hooks == &extent_hooks_default) { + if (!extent_dalloc_mmap(addr, size)) { + goto label_done; + } + if (!pages_decommit(addr, size)) { + goto label_done; + } + if (!pages_purge_forced(addr, size)) { + goto label_done; + } + if (!pages_purge_lazy(addr, size)) { + goto label_done; + } + /* Nothing worked. This should never happen. */ + not_reached(); + } else { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, NULL); + if (extent_hooks->dalloc != NULL && + !extent_hooks->dalloc(extent_hooks, addr, size, true, + ind)) { + goto label_post_reentrancy; + } + if (extent_hooks->decommit != NULL && + !extent_hooks->decommit(extent_hooks, addr, size, 0, size, + ind)) { + goto label_post_reentrancy; + } + if (extent_hooks->purge_forced != NULL && + !extent_hooks->purge_forced(extent_hooks, addr, size, 0, + size, ind)) { + goto label_post_reentrancy; + } + if (extent_hooks->purge_lazy != NULL && + !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size, + ind)) { + goto label_post_reentrancy; + } + /* Nothing worked. That's the application's problem. */ + label_post_reentrancy: + post_reentrancy(tsd); + } +label_done: + if (metadata_thp_madvise()) { + /* Set NOHUGEPAGE after unmap to avoid kernel defrag. */ + assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 && + (size & HUGEPAGE_MASK) == 0); + pages_nohuge(addr, size); + } +} + +static void +base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr, + size_t size) { + size_t sn; + + sn = *extent_sn_next; + (*extent_sn_next)++; + + extent_binit(extent, addr, size, sn); +} + +static size_t +base_get_num_blocks(base_t *base, bool with_new_block) { + base_block_t *b = base->blocks; + assert(b != NULL); + + size_t n_blocks = with_new_block ? 2 : 1; + while (b->next != NULL) { + n_blocks++; + b = b->next; + } + + return n_blocks; +} + +static void +base_auto_thp_switch(tsdn_t *tsdn, base_t *base) { + assert(opt_metadata_thp == metadata_thp_auto); + malloc_mutex_assert_owner(tsdn, &base->mtx); + if (base->auto_thp_switched) { + return; + } + /* Called when adding a new block. */ + bool should_switch; + if (base_ind_get(base) != 0) { + should_switch = (base_get_num_blocks(base, true) == + BASE_AUTO_THP_THRESHOLD); + } else { + should_switch = (base_get_num_blocks(base, true) == + BASE_AUTO_THP_THRESHOLD_A0); + } + if (!should_switch) { + return; + } + + base->auto_thp_switched = true; + assert(!config_stats || base->n_thp == 0); + /* Make the initial blocks THP lazily. */ + base_block_t *block = base->blocks; + while (block != NULL) { + assert((block->size & HUGEPAGE_MASK) == 0); + pages_huge(block, block->size); + if (config_stats) { + base->n_thp += HUGEPAGE_CEILING(block->size - + extent_bsize_get(&block->extent)) >> LG_HUGEPAGE; + } + block = block->next; + assert(block == NULL || (base_ind_get(base) == 0)); + } +} + +static void * +base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, + size_t alignment) { + void *ret; + + assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM)); + assert(size == ALIGNMENT_CEILING(size, alignment)); + + *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent), + alignment) - (uintptr_t)extent_addr_get(extent); + ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size); + assert(extent_bsize_get(extent) >= *gap_size + size); + extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) + + *gap_size + size), extent_bsize_get(extent) - *gap_size - size, + extent_sn_get(extent)); + return ret; +} + +static void +base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size, + void *addr, size_t size) { + if (extent_bsize_get(extent) > 0) { + /* + * Compute the index for the largest size class that does not + * exceed extent's size. + */ + szind_t index_floor = + sz_size2index(extent_bsize_get(extent) + 1) - 1; + extent_heap_insert(&base->avail[index_floor], extent); + } + + if (config_stats) { + base->allocated += size; + /* + * Add one PAGE to base_resident for every page boundary that is + * crossed by the new allocation. Adjust n_thp similarly when + * metadata_thp is enabled. + */ + base->resident += PAGE_CEILING((uintptr_t)addr + size) - + PAGE_CEILING((uintptr_t)addr - gap_size); + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + if (metadata_thp_madvise() && (opt_metadata_thp == + metadata_thp_always || base->auto_thp_switched)) { + base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size) + - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >> + LG_HUGEPAGE; + assert(base->mapped >= base->n_thp << LG_HUGEPAGE); + } + } +} + +static void * +base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size, + size_t alignment) { + void *ret; + size_t gap_size; + + ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment); + base_extent_bump_alloc_post(base, extent, gap_size, ret, size); + return ret; +} + +/* + * Allocate a block of virtual memory that is large enough to start with a + * base_block_t header, followed by an object of specified size and alignment. + * On success a pointer to the initialized base_block_t header is returned. + */ +static base_block_t * +base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks, + unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size, + size_t alignment) { + alignment = ALIGNMENT_CEILING(alignment, QUANTUM); + size_t usize = ALIGNMENT_CEILING(size, alignment); + size_t header_size = sizeof(base_block_t); + size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) - + header_size; + /* + * Create increasingly larger blocks in order to limit the total number + * of disjoint virtual memory ranges. Choose the next size in the page + * size class series (skipping size classes that are not a multiple of + * HUGEPAGE), or a size large enough to satisfy the requested size and + * alignment, whichever is larger. + */ + size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size + + usize)); + pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 : + *pind_last; + size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next)); + size_t block_size = (min_block_size > next_block_size) ? min_block_size + : next_block_size; + base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind, + block_size); + if (block == NULL) { + return NULL; + } + + if (metadata_thp_madvise()) { + void *addr = (void *)block; + assert(((uintptr_t)addr & HUGEPAGE_MASK) == 0 && + (block_size & HUGEPAGE_MASK) == 0); + if (opt_metadata_thp == metadata_thp_always) { + pages_huge(addr, block_size); + } else if (opt_metadata_thp == metadata_thp_auto && + base != NULL) { + /* base != NULL indicates this is not a new base. */ + malloc_mutex_lock(tsdn, &base->mtx); + base_auto_thp_switch(tsdn, base); + if (base->auto_thp_switched) { + pages_huge(addr, block_size); + } + malloc_mutex_unlock(tsdn, &base->mtx); + } + } + + *pind_last = sz_psz2ind(block_size); + block->size = block_size; + block->next = NULL; + assert(block_size >= header_size); + base_extent_init(extent_sn_next, &block->extent, + (void *)((uintptr_t)block + header_size), block_size - header_size); + return block; +} + +/* + * Allocate an extent that is at least as large as specified size, with + * specified alignment. + */ +static extent_t * +base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { + malloc_mutex_assert_owner(tsdn, &base->mtx); + + extent_hooks_t *extent_hooks = base_extent_hooks_get(base); + /* + * Drop mutex during base_block_alloc(), because an extent hook will be + * called. + */ + malloc_mutex_unlock(tsdn, &base->mtx); + base_block_t *block = base_block_alloc(tsdn, base, extent_hooks, + base_ind_get(base), &base->pind_last, &base->extent_sn_next, size, + alignment); + malloc_mutex_lock(tsdn, &base->mtx); + if (block == NULL) { + return NULL; + } + block->next = base->blocks; + base->blocks = block; + if (config_stats) { + base->allocated += sizeof(base_block_t); + base->resident += PAGE_CEILING(sizeof(base_block_t)); + base->mapped += block->size; + if (metadata_thp_madvise() && + !(opt_metadata_thp == metadata_thp_auto + && !base->auto_thp_switched)) { + assert(base->n_thp > 0); + base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >> + LG_HUGEPAGE; + } + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + assert(base->n_thp << LG_HUGEPAGE <= base->mapped); + } + return &block->extent; +} + +base_t * +b0get(void) { + return b0; +} + +base_t * +base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + pszind_t pind_last = 0; + size_t extent_sn_next = 0; + base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind, + &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM); + if (block == NULL) { + return NULL; + } + + size_t gap_size; + size_t base_alignment = CACHELINE; + size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment); + base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent, + &gap_size, base_size, base_alignment); + base->ind = ind; + atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED); + if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, + malloc_mutex_rank_exclusive)) { + base_unmap(tsdn, extent_hooks, ind, block, block->size); + return NULL; + } + base->pind_last = pind_last; + base->extent_sn_next = extent_sn_next; + base->blocks = block; + base->auto_thp_switched = false; + for (szind_t i = 0; i < NSIZES; i++) { + extent_heap_new(&base->avail[i]); + } + if (config_stats) { + base->allocated = sizeof(base_block_t); + base->resident = PAGE_CEILING(sizeof(base_block_t)); + base->mapped = block->size; + base->n_thp = (opt_metadata_thp == metadata_thp_always) && + metadata_thp_madvise() ? HUGEPAGE_CEILING(sizeof(base_block_t)) + >> LG_HUGEPAGE : 0; + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + assert(base->n_thp << LG_HUGEPAGE <= base->mapped); + } + base_extent_bump_alloc_post(base, &block->extent, gap_size, base, + base_size); + + return base; +} + +void +base_delete(tsdn_t *tsdn, base_t *base) { + extent_hooks_t *extent_hooks = base_extent_hooks_get(base); + base_block_t *next = base->blocks; + do { + base_block_t *block = next; + next = block->next; + base_unmap(tsdn, extent_hooks, base_ind_get(base), block, + block->size); + } while (next != NULL); +} + +extent_hooks_t * +base_extent_hooks_get(base_t *base) { + return (extent_hooks_t *)atomic_load_p(&base->extent_hooks, + ATOMIC_ACQUIRE); +} + +extent_hooks_t * +base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) { + extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base); + atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE); + return old_extent_hooks; +} + +static void * +base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment, + size_t *esn) { + alignment = QUANTUM_CEILING(alignment); + size_t usize = ALIGNMENT_CEILING(size, alignment); + size_t asize = usize + alignment - QUANTUM; + + extent_t *extent = NULL; + malloc_mutex_lock(tsdn, &base->mtx); + for (szind_t i = sz_size2index(asize); i < NSIZES; i++) { + extent = extent_heap_remove_first(&base->avail[i]); + if (extent != NULL) { + /* Use existing space. */ + break; + } + } + if (extent == NULL) { + /* Try to allocate more space. */ + extent = base_extent_alloc(tsdn, base, usize, alignment); + } + void *ret; + if (extent == NULL) { + ret = NULL; + goto label_return; + } + + ret = base_extent_bump_alloc(base, extent, usize, alignment); + if (esn != NULL) { + *esn = extent_sn_get(extent); + } +label_return: + malloc_mutex_unlock(tsdn, &base->mtx); + return ret; +} + +/* + * base_alloc() returns zeroed memory, which is always demand-zeroed for the + * auto arenas, in order to make multi-page sparse data structures such as radix + * tree nodes efficient with respect to physical memory usage. Upon success a + * pointer to at least size bytes with specified alignment is returned. Note + * that size is rounded up to the nearest multiple of alignment to avoid false + * sharing. + */ +void * +base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { + return base_alloc_impl(tsdn, base, size, alignment, NULL); +} + +extent_t * +base_alloc_extent(tsdn_t *tsdn, base_t *base) { + size_t esn; + extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t), + CACHELINE, &esn); + if (extent == NULL) { + return NULL; + } + extent_esn_set(extent, esn); + return extent; +} + +void +base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident, + size_t *mapped, size_t *n_thp) { + cassert(config_stats); + + malloc_mutex_lock(tsdn, &base->mtx); + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + *allocated = base->allocated; + *resident = base->resident; + *mapped = base->mapped; + *n_thp = base->n_thp; + malloc_mutex_unlock(tsdn, &base->mtx); +} + +void +base_prefork(tsdn_t *tsdn, base_t *base) { + malloc_mutex_prefork(tsdn, &base->mtx); +} + +void +base_postfork_parent(tsdn_t *tsdn, base_t *base) { + malloc_mutex_postfork_parent(tsdn, &base->mtx); +} + +void +base_postfork_child(tsdn_t *tsdn, base_t *base) { + malloc_mutex_postfork_child(tsdn, &base->mtx); +} + +bool +base_boot(tsdn_t *tsdn) { + b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); + return (b0 == NULL); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/bin.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/bin.c new file mode 100644 index 0000000..0886bc4 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/bin.c @@ -0,0 +1,50 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/bin.h" +#include "jemalloc/internal/witness.h" + +const bin_info_t bin_infos[NBINS] = { +#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \ + {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)}, +#define BIN_INFO_bin_no(reg_size, slab_size, nregs) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ + lg_delta_lookup) \ + BIN_INFO_bin_##bin((1U<lock, "bin", WITNESS_RANK_BIN, + malloc_mutex_rank_exclusive)) { + return true; + } + bin->slabcur = NULL; + extent_heap_new(&bin->slabs_nonfull); + extent_list_init(&bin->slabs_full); + if (config_stats) { + memset(&bin->stats, 0, sizeof(bin_stats_t)); + } + return false; +} + +void +bin_prefork(tsdn_t *tsdn, bin_t *bin) { + malloc_mutex_prefork(tsdn, &bin->lock); +} + +void +bin_postfork_parent(tsdn_t *tsdn, bin_t *bin) { + malloc_mutex_postfork_parent(tsdn, &bin->lock); +} + +void +bin_postfork_child(tsdn_t *tsdn, bin_t *bin) { + malloc_mutex_postfork_child(tsdn, &bin->lock); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/bitmap.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/bitmap.c new file mode 100644 index 0000000..468b317 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/bitmap.c @@ -0,0 +1,121 @@ +#define JEMALLOC_BITMAP_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" + +/******************************************************************************/ + +#ifdef BITMAP_USE_TREE + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { + unsigned i; + size_t group_count; + + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + /* + * Compute the number of groups necessary to store nbits bits, and + * progressively work upward through the levels until reaching a level + * that requires only one group. + */ + binfo->levels[0].group_offset = 0; + group_count = BITMAP_BITS2GROUPS(nbits); + for (i = 1; group_count > 1; i++) { + assert(i < BITMAP_MAX_LEVELS); + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + group_count = BITMAP_BITS2GROUPS(group_count); + } + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); + binfo->nlevels = i; + binfo->nbits = nbits; +} + +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) { + return binfo->levels[binfo->nlevels].group_offset; +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { + size_t extra; + unsigned i; + + /* + * Bits are actually inverted with regard to the external bitmap + * interface. + */ + + if (fill) { + /* The "filled" bitmap starts out with all 0 bits. */ + memset(bitmap, 0, bitmap_size(binfo)); + return; + } + + /* + * The "empty" bitmap starts out with all 1 bits, except for trailing + * unused bits (if any). Note that each group uses bit 0 to correspond + * to the first logical bit in the group, so extra bits are the most + * significant bits of the last group. + */ + memset(bitmap, 0xffU, bitmap_size(binfo)); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) { + bitmap[binfo->levels[1].group_offset - 1] >>= extra; + } + for (i = 1; i < binfo->nlevels; i++) { + size_t group_count = binfo->levels[i].group_offset - + binfo->levels[i-1].group_offset; + extra = (BITMAP_GROUP_NBITS - (group_count & + BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) { + bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; + } + } +} + +#else /* BITMAP_USE_TREE */ + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + binfo->ngroups = BITMAP_BITS2GROUPS(nbits); + binfo->nbits = nbits; +} + +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) { + return binfo->ngroups; +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { + size_t extra; + + if (fill) { + memset(bitmap, 0, bitmap_size(binfo)); + return; + } + + memset(bitmap, 0xffU, bitmap_size(binfo)); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) { + bitmap[binfo->ngroups - 1] >>= extra; + } +} + +#endif /* BITMAP_USE_TREE */ + +size_t +bitmap_size(const bitmap_info_t *binfo) { + return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP); +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/chunk.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/chunk.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/chunk.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/chunk.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/chunk_dss.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/chunk_dss.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/chunk_dss.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/chunk_dss.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/chunk_mmap.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/chunk_mmap.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/chunk_mmap.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/chunk_mmap.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/ckh.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/ckh.c similarity index 76% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/ckh.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/ckh.c index 53a1c1e..e95e0a3 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/ckh.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/ckh.c @@ -34,8 +34,18 @@ * respectively. * ******************************************************************************/ -#define JEMALLOC_CKH_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_CKH_C_ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/ckh.h" + +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/util.h" /******************************************************************************/ /* Function prototypes for non-inline static functions. */ @@ -49,27 +59,26 @@ static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); * Search bucket for key and return the cell number if found; SIZE_T_MAX * otherwise. */ -JEMALLOC_INLINE_C size_t -ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) -{ +static size_t +ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) { ckhc_t *cell; unsigned i; for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; - if (cell->key != NULL && ckh->keycomp(key, cell->key)) - return ((bucket << LG_CKH_BUCKET_CELLS) + i); + if (cell->key != NULL && ckh->keycomp(key, cell->key)) { + return (bucket << LG_CKH_BUCKET_CELLS) + i; + } } - return (SIZE_T_MAX); + return SIZE_T_MAX; } /* * Search table for key and return cell number if found; SIZE_T_MAX otherwise. */ -JEMALLOC_INLINE_C size_t -ckh_isearch(ckh_t *ckh, const void *key) -{ +static size_t +ckh_isearch(ckh_t *ckh, const void *key) { size_t hashes[2], bucket, cell; assert(ckh != NULL); @@ -79,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key) /* Search primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); - if (cell != SIZE_T_MAX) - return (cell); + if (cell != SIZE_T_MAX) { + return cell; + } /* Search secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); - return (cell); + return cell; } -JEMALLOC_INLINE_C bool +static bool ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, - const void *data) -{ + const void *data) { ckhc_t *cell; unsigned offset, i; @@ -99,7 +108,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, * Cycle through the cells in the bucket, starting at a random position. * The randomness avoids worst-case search overhead as buckets fill up. */ - prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; @@ -107,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, cell->key = key; cell->data = data; ckh->count++; - return (false); + return false; } } - return (true); + return true; } /* @@ -120,10 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, * eviction/relocation procedure until either success or detection of an * eviction/relocation bucket cycle. */ -JEMALLOC_INLINE_C bool +static bool ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, - void const **argdata) -{ + void const **argdata) { const void *key, *data, *tkey, *tdata; ckhc_t *cell; size_t hashes[2], bucket, tbucket; @@ -141,7 +150,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, * were an item for which both hashes indicated the same * bucket. */ - prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + i = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; assert(cell->key != NULL); @@ -181,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, if (tbucket == argbucket) { *argkey = key; *argdata = data; - return (true); + return true; } bucket = tbucket; - if (!ckh_try_bucket_insert(ckh, bucket, key, data)) - return (false); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } } } -JEMALLOC_INLINE_C bool -ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) -{ +static bool +ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { size_t hashes[2], bucket; const void *key = *argkey; const void *data = *argdata; @@ -201,27 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) /* Try to insert in primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (!ckh_try_bucket_insert(ckh, bucket, key, data)) - return (false); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } /* Try to insert in secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (!ckh_try_bucket_insert(ckh, bucket, key, data)) - return (false); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } /* * Try to find a place for this item via iterative eviction/relocation. */ - return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); + return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata); } /* * Try to rebuild the hash table from scratch by inserting all items from the * old table into the new. */ -JEMALLOC_INLINE_C bool -ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) -{ +static bool +ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { size_t count, i, nins; const void *key, *data; @@ -233,22 +244,20 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) data = aTab[i].data; if (ckh_try_insert(ckh, &key, &data)) { ckh->count = count; - return (true); + return true; } nins++; } } - return (false); + return false; } static bool -ckh_grow(tsd_t *tsd, ckh_t *ckh) -{ +ckh_grow(tsd_t *tsd, ckh_t *ckh) { bool ret; ckhc_t *tab, *ttab; - size_t lg_curcells; - unsigned lg_prevbuckets; + unsigned lg_prevbuckets, lg_curcells; #ifdef CKH_COUNT ckh->ngrows++; @@ -265,13 +274,13 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) size_t usize; lg_curcells++; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) { + usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { ret = true; goto label_return; } - tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, - true, NULL); + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, + true, NULL, true, arena_ichoose(tsd, NULL)); if (tab == NULL) { ret = true; goto label_return; @@ -283,27 +292,26 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsd, tab, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); break; } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; } ret = false; label_return: - return (ret); + return ret; } static void -ckh_shrink(tsd_t *tsd, ckh_t *ckh) -{ +ckh_shrink(tsd_t *tsd, ckh_t *ckh) { ckhc_t *tab, *ttab; - size_t lg_curcells, usize; - unsigned lg_prevbuckets; + size_t usize; + unsigned lg_prevbuckets, lg_curcells; /* * It is possible (though unlikely, given well behaved hashes) that the @@ -311,11 +319,12 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) + usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { return; - tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, - NULL); + } + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, + true, arena_ichoose(tsd, NULL)); if (tab == NULL) { /* * An OOM error isn't worth propagating, since it doesn't @@ -330,7 +339,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsd, tab, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); #ifdef CKH_COUNT ckh->nshrinks++; #endif @@ -338,7 +347,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; #ifdef CKH_COUNT @@ -348,8 +357,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, - ckh_keycomp_t *keycomp) -{ + ckh_keycomp_t *keycomp) { bool ret; size_t mincells, usize; unsigned lg_mincells; @@ -379,20 +387,21 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; for (lg_mincells = LG_CKH_BUCKET_CELLS; (ZU(1) << lg_mincells) < mincells; - lg_mincells++) - ; /* Do nothing. */ + lg_mincells++) { + /* Do nothing. */ + } ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->hash = hash; ckh->keycomp = keycomp; - usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); - if (usize == 0) { + usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { ret = true; goto label_return; } - ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, - NULL); + ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, + NULL, true, arena_ichoose(tsd, NULL)); if (ckh->tab == NULL) { ret = true; goto label_return; @@ -400,13 +409,11 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ret = false; label_return: - return (ret); + return ret; } void -ckh_delete(tsd_t *tsd, ckh_t *ckh) -{ - +ckh_delete(tsd_t *tsd, ckh_t *ckh) { assert(ckh != NULL); #ifdef CKH_VERBOSE @@ -421,43 +428,42 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh) (unsigned long long)ckh->nrelocs); #endif - idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true); - if (config_debug) - memset(ckh, 0x5a, sizeof(ckh_t)); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); + if (config_debug) { + memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); + } } size_t -ckh_count(ckh_t *ckh) -{ - +ckh_count(ckh_t *ckh) { assert(ckh != NULL); - return (ckh->count); + return ckh->count; } bool -ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) -{ +ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) { size_t i, ncells; for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { if (ckh->tab[i].key != NULL) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[i].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[i].data; + } *tabind = i + 1; - return (false); + return false; } } - return (true); + return true; } bool -ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) -{ +ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) { bool ret; assert(ckh != NULL); @@ -476,23 +482,24 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) ret = false; label_return: - return (ret); + return ret; } bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, - void **data) -{ + void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[cell].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[cell].data; + } ckh->tab[cell].key = NULL; ckh->tab[cell].data = NULL; /* Not necessary. */ @@ -505,51 +512,47 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, ckh_shrink(tsd, ckh); } - return (false); + return false; } - return (true); + return true; } bool -ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) -{ +ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[cell].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[cell].data; - return (false); + } + return false; } - return (true); + return true; } void -ckh_string_hash(const void *key, size_t r_hash[2]) -{ - +ckh_string_hash(const void *key, size_t r_hash[2]) { hash(key, strlen((const char *)key), 0x94122f33U, r_hash); } bool -ckh_string_keycomp(const void *k1, const void *k2) -{ - - assert(k1 != NULL); - assert(k2 != NULL); +ckh_string_keycomp(const void *k1, const void *k2) { + assert(k1 != NULL); + assert(k2 != NULL); - return (strcmp((char *)k1, (char *)k2) ? false : true); + return !strcmp((char *)k1, (char *)k2); } void -ckh_pointer_hash(const void *key, size_t r_hash[2]) -{ +ckh_pointer_hash(const void *key, size_t r_hash[2]) { union { const void *v; size_t i; @@ -561,8 +564,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2]) } bool -ckh_pointer_keycomp(const void *k1, const void *k2) -{ - - return ((k1 == k2) ? true : false); +ckh_pointer_keycomp(const void *k1, const void *k2) { + return (k1 == k2); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/ctl.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/ctl.c new file mode 100644 index 0000000..1e713a3 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/ctl.c @@ -0,0 +1,2883 @@ +#define JEMALLOC_CTL_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/util.h" + +/******************************************************************************/ +/* Data. */ + +/* + * ctl_mtx protects the following: + * - ctl_stats->* + */ +static malloc_mutex_t ctl_mtx; +static bool ctl_initialized; +static ctl_stats_t *ctl_stats; +static ctl_arenas_t *ctl_arenas; + +/******************************************************************************/ +/* Helpers for named and indexed nodes. */ + +static const ctl_named_node_t * +ctl_named_node(const ctl_node_t *node) { + return ((node->named) ? (const ctl_named_node_t *)node : NULL); +} + +static const ctl_named_node_t * +ctl_named_children(const ctl_named_node_t *node, size_t index) { + const ctl_named_node_t *children = ctl_named_node(node->children); + + return (children ? &children[index] : NULL); +} + +static const ctl_indexed_node_t * +ctl_indexed_node(const ctl_node_t *node) { + return (!node->named ? (const ctl_indexed_node_t *)node : NULL); +} + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +#define CTL_PROTO(n) \ +static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen); + +#define INDEX_PROTO(n) \ +static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ + const size_t *mib, size_t miblen, size_t i); + +CTL_PROTO(version) +CTL_PROTO(epoch) +CTL_PROTO(background_thread) +CTL_PROTO(max_background_threads) +CTL_PROTO(thread_tcache_enabled) +CTL_PROTO(thread_tcache_flush) +CTL_PROTO(thread_prof_name) +CTL_PROTO(thread_prof_active) +CTL_PROTO(thread_arena) +CTL_PROTO(thread_allocated) +CTL_PROTO(thread_allocatedp) +CTL_PROTO(thread_deallocated) +CTL_PROTO(thread_deallocatedp) +CTL_PROTO(config_cache_oblivious) +CTL_PROTO(config_debug) +CTL_PROTO(config_fill) +CTL_PROTO(config_lazy_lock) +CTL_PROTO(config_malloc_conf) +CTL_PROTO(config_prof) +CTL_PROTO(config_prof_libgcc) +CTL_PROTO(config_prof_libunwind) +CTL_PROTO(config_stats) +CTL_PROTO(config_utrace) +CTL_PROTO(config_xmalloc) +CTL_PROTO(opt_abort) +CTL_PROTO(opt_abort_conf) +CTL_PROTO(opt_metadata_thp) +CTL_PROTO(opt_retain) +CTL_PROTO(opt_dss) +CTL_PROTO(opt_narenas) +CTL_PROTO(opt_percpu_arena) +CTL_PROTO(opt_background_thread) +CTL_PROTO(opt_max_background_threads) +CTL_PROTO(opt_dirty_decay_ms) +CTL_PROTO(opt_muzzy_decay_ms) +CTL_PROTO(opt_stats_print) +CTL_PROTO(opt_stats_print_opts) +CTL_PROTO(opt_junk) +CTL_PROTO(opt_zero) +CTL_PROTO(opt_utrace) +CTL_PROTO(opt_xmalloc) +CTL_PROTO(opt_tcache) +CTL_PROTO(opt_thp) +CTL_PROTO(opt_lg_extent_max_active_fit) +CTL_PROTO(opt_lg_tcache_max) +CTL_PROTO(opt_prof) +CTL_PROTO(opt_prof_prefix) +CTL_PROTO(opt_prof_active) +CTL_PROTO(opt_prof_thread_active_init) +CTL_PROTO(opt_lg_prof_sample) +CTL_PROTO(opt_lg_prof_interval) +CTL_PROTO(opt_prof_gdump) +CTL_PROTO(opt_prof_final) +CTL_PROTO(opt_prof_leak) +CTL_PROTO(opt_prof_accum) +CTL_PROTO(tcache_create) +CTL_PROTO(tcache_flush) +CTL_PROTO(tcache_destroy) +CTL_PROTO(arena_i_initialized) +CTL_PROTO(arena_i_decay) +CTL_PROTO(arena_i_purge) +CTL_PROTO(arena_i_reset) +CTL_PROTO(arena_i_destroy) +CTL_PROTO(arena_i_dss) +CTL_PROTO(arena_i_dirty_decay_ms) +CTL_PROTO(arena_i_muzzy_decay_ms) +CTL_PROTO(arena_i_extent_hooks) +CTL_PROTO(arena_i_retain_grow_limit) +INDEX_PROTO(arena_i) +CTL_PROTO(arenas_bin_i_size) +CTL_PROTO(arenas_bin_i_nregs) +CTL_PROTO(arenas_bin_i_slab_size) +INDEX_PROTO(arenas_bin_i) +CTL_PROTO(arenas_lextent_i_size) +INDEX_PROTO(arenas_lextent_i) +CTL_PROTO(arenas_narenas) +CTL_PROTO(arenas_dirty_decay_ms) +CTL_PROTO(arenas_muzzy_decay_ms) +CTL_PROTO(arenas_quantum) +CTL_PROTO(arenas_page) +CTL_PROTO(arenas_tcache_max) +CTL_PROTO(arenas_nbins) +CTL_PROTO(arenas_nhbins) +CTL_PROTO(arenas_nlextents) +CTL_PROTO(arenas_create) +CTL_PROTO(arenas_lookup) +CTL_PROTO(prof_thread_active_init) +CTL_PROTO(prof_active) +CTL_PROTO(prof_dump) +CTL_PROTO(prof_gdump) +CTL_PROTO(prof_reset) +CTL_PROTO(prof_interval) +CTL_PROTO(lg_prof_sample) +CTL_PROTO(stats_arenas_i_small_allocated) +CTL_PROTO(stats_arenas_i_small_nmalloc) +CTL_PROTO(stats_arenas_i_small_ndalloc) +CTL_PROTO(stats_arenas_i_small_nrequests) +CTL_PROTO(stats_arenas_i_large_allocated) +CTL_PROTO(stats_arenas_i_large_nmalloc) +CTL_PROTO(stats_arenas_i_large_ndalloc) +CTL_PROTO(stats_arenas_i_large_nrequests) +CTL_PROTO(stats_arenas_i_bins_j_nmalloc) +CTL_PROTO(stats_arenas_i_bins_j_ndalloc) +CTL_PROTO(stats_arenas_i_bins_j_nrequests) +CTL_PROTO(stats_arenas_i_bins_j_curregs) +CTL_PROTO(stats_arenas_i_bins_j_nfills) +CTL_PROTO(stats_arenas_i_bins_j_nflushes) +CTL_PROTO(stats_arenas_i_bins_j_nslabs) +CTL_PROTO(stats_arenas_i_bins_j_nreslabs) +CTL_PROTO(stats_arenas_i_bins_j_curslabs) +INDEX_PROTO(stats_arenas_i_bins_j) +CTL_PROTO(stats_arenas_i_lextents_j_nmalloc) +CTL_PROTO(stats_arenas_i_lextents_j_ndalloc) +CTL_PROTO(stats_arenas_i_lextents_j_nrequests) +CTL_PROTO(stats_arenas_i_lextents_j_curlextents) +INDEX_PROTO(stats_arenas_i_lextents_j) +CTL_PROTO(stats_arenas_i_nthreads) +CTL_PROTO(stats_arenas_i_uptime) +CTL_PROTO(stats_arenas_i_dss) +CTL_PROTO(stats_arenas_i_dirty_decay_ms) +CTL_PROTO(stats_arenas_i_muzzy_decay_ms) +CTL_PROTO(stats_arenas_i_pactive) +CTL_PROTO(stats_arenas_i_pdirty) +CTL_PROTO(stats_arenas_i_pmuzzy) +CTL_PROTO(stats_arenas_i_mapped) +CTL_PROTO(stats_arenas_i_retained) +CTL_PROTO(stats_arenas_i_dirty_npurge) +CTL_PROTO(stats_arenas_i_dirty_nmadvise) +CTL_PROTO(stats_arenas_i_dirty_purged) +CTL_PROTO(stats_arenas_i_muzzy_npurge) +CTL_PROTO(stats_arenas_i_muzzy_nmadvise) +CTL_PROTO(stats_arenas_i_muzzy_purged) +CTL_PROTO(stats_arenas_i_base) +CTL_PROTO(stats_arenas_i_internal) +CTL_PROTO(stats_arenas_i_metadata_thp) +CTL_PROTO(stats_arenas_i_tcache_bytes) +CTL_PROTO(stats_arenas_i_resident) +INDEX_PROTO(stats_arenas_i) +CTL_PROTO(stats_allocated) +CTL_PROTO(stats_active) +CTL_PROTO(stats_background_thread_num_threads) +CTL_PROTO(stats_background_thread_num_runs) +CTL_PROTO(stats_background_thread_run_interval) +CTL_PROTO(stats_metadata) +CTL_PROTO(stats_metadata_thp) +CTL_PROTO(stats_resident) +CTL_PROTO(stats_mapped) +CTL_PROTO(stats_retained) + +#define MUTEX_STATS_CTL_PROTO_GEN(n) \ +CTL_PROTO(stats_##n##_num_ops) \ +CTL_PROTO(stats_##n##_num_wait) \ +CTL_PROTO(stats_##n##_num_spin_acq) \ +CTL_PROTO(stats_##n##_num_owner_switch) \ +CTL_PROTO(stats_##n##_total_wait_time) \ +CTL_PROTO(stats_##n##_max_wait_time) \ +CTL_PROTO(stats_##n##_max_num_thds) + +/* Global mutexes. */ +#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +/* Per arena mutexes. */ +#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +/* Arena bin mutexes. */ +MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex) +#undef MUTEX_STATS_CTL_PROTO_GEN + +CTL_PROTO(stats_mutexes_reset) + +/******************************************************************************/ +/* mallctl tree. */ + +#define NAME(n) {true}, n +#define CHILD(t, c) \ + sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ + (ctl_node_t *)c##_node, \ + NULL +#define CTL(c) 0, NULL, c##_ctl + +/* + * Only handles internal indexed nodes, since there are currently no external + * ones. + */ +#define INDEX(i) {false}, i##_index + +static const ctl_named_node_t thread_tcache_node[] = { + {NAME("enabled"), CTL(thread_tcache_enabled)}, + {NAME("flush"), CTL(thread_tcache_flush)} +}; + +static const ctl_named_node_t thread_prof_node[] = { + {NAME("name"), CTL(thread_prof_name)}, + {NAME("active"), CTL(thread_prof_active)} +}; + +static const ctl_named_node_t thread_node[] = { + {NAME("arena"), CTL(thread_arena)}, + {NAME("allocated"), CTL(thread_allocated)}, + {NAME("allocatedp"), CTL(thread_allocatedp)}, + {NAME("deallocated"), CTL(thread_deallocated)}, + {NAME("deallocatedp"), CTL(thread_deallocatedp)}, + {NAME("tcache"), CHILD(named, thread_tcache)}, + {NAME("prof"), CHILD(named, thread_prof)} +}; + +static const ctl_named_node_t config_node[] = { + {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, + {NAME("debug"), CTL(config_debug)}, + {NAME("fill"), CTL(config_fill)}, + {NAME("lazy_lock"), CTL(config_lazy_lock)}, + {NAME("malloc_conf"), CTL(config_malloc_conf)}, + {NAME("prof"), CTL(config_prof)}, + {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, + {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, + {NAME("stats"), CTL(config_stats)}, + {NAME("utrace"), CTL(config_utrace)}, + {NAME("xmalloc"), CTL(config_xmalloc)} +}; + +static const ctl_named_node_t opt_node[] = { + {NAME("abort"), CTL(opt_abort)}, + {NAME("abort_conf"), CTL(opt_abort_conf)}, + {NAME("metadata_thp"), CTL(opt_metadata_thp)}, + {NAME("retain"), CTL(opt_retain)}, + {NAME("dss"), CTL(opt_dss)}, + {NAME("narenas"), CTL(opt_narenas)}, + {NAME("percpu_arena"), CTL(opt_percpu_arena)}, + {NAME("background_thread"), CTL(opt_background_thread)}, + {NAME("max_background_threads"), CTL(opt_max_background_threads)}, + {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)}, + {NAME("stats_print"), CTL(opt_stats_print)}, + {NAME("stats_print_opts"), CTL(opt_stats_print_opts)}, + {NAME("junk"), CTL(opt_junk)}, + {NAME("zero"), CTL(opt_zero)}, + {NAME("utrace"), CTL(opt_utrace)}, + {NAME("xmalloc"), CTL(opt_xmalloc)}, + {NAME("tcache"), CTL(opt_tcache)}, + {NAME("thp"), CTL(opt_thp)}, + {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)}, + {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, + {NAME("prof"), CTL(opt_prof)}, + {NAME("prof_prefix"), CTL(opt_prof_prefix)}, + {NAME("prof_active"), CTL(opt_prof_active)}, + {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)}, + {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, + {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, + {NAME("prof_gdump"), CTL(opt_prof_gdump)}, + {NAME("prof_final"), CTL(opt_prof_final)}, + {NAME("prof_leak"), CTL(opt_prof_leak)}, + {NAME("prof_accum"), CTL(opt_prof_accum)} +}; + +static const ctl_named_node_t tcache_node[] = { + {NAME("create"), CTL(tcache_create)}, + {NAME("flush"), CTL(tcache_flush)}, + {NAME("destroy"), CTL(tcache_destroy)} +}; + +static const ctl_named_node_t arena_i_node[] = { + {NAME("initialized"), CTL(arena_i_initialized)}, + {NAME("decay"), CTL(arena_i_decay)}, + {NAME("purge"), CTL(arena_i_purge)}, + {NAME("reset"), CTL(arena_i_reset)}, + {NAME("destroy"), CTL(arena_i_destroy)}, + {NAME("dss"), CTL(arena_i_dss)}, + {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)}, + {NAME("extent_hooks"), CTL(arena_i_extent_hooks)}, + {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)} +}; +static const ctl_named_node_t super_arena_i_node[] = { + {NAME(""), CHILD(named, arena_i)} +}; + +static const ctl_indexed_node_t arena_node[] = { + {INDEX(arena_i)} +}; + +static const ctl_named_node_t arenas_bin_i_node[] = { + {NAME("size"), CTL(arenas_bin_i_size)}, + {NAME("nregs"), CTL(arenas_bin_i_nregs)}, + {NAME("slab_size"), CTL(arenas_bin_i_slab_size)} +}; +static const ctl_named_node_t super_arenas_bin_i_node[] = { + {NAME(""), CHILD(named, arenas_bin_i)} +}; + +static const ctl_indexed_node_t arenas_bin_node[] = { + {INDEX(arenas_bin_i)} +}; + +static const ctl_named_node_t arenas_lextent_i_node[] = { + {NAME("size"), CTL(arenas_lextent_i_size)} +}; +static const ctl_named_node_t super_arenas_lextent_i_node[] = { + {NAME(""), CHILD(named, arenas_lextent_i)} +}; + +static const ctl_indexed_node_t arenas_lextent_node[] = { + {INDEX(arenas_lextent_i)} +}; + +static const ctl_named_node_t arenas_node[] = { + {NAME("narenas"), CTL(arenas_narenas)}, + {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)}, + {NAME("quantum"), CTL(arenas_quantum)}, + {NAME("page"), CTL(arenas_page)}, + {NAME("tcache_max"), CTL(arenas_tcache_max)}, + {NAME("nbins"), CTL(arenas_nbins)}, + {NAME("nhbins"), CTL(arenas_nhbins)}, + {NAME("bin"), CHILD(indexed, arenas_bin)}, + {NAME("nlextents"), CTL(arenas_nlextents)}, + {NAME("lextent"), CHILD(indexed, arenas_lextent)}, + {NAME("create"), CTL(arenas_create)}, + {NAME("lookup"), CTL(arenas_lookup)} +}; + +static const ctl_named_node_t prof_node[] = { + {NAME("thread_active_init"), CTL(prof_thread_active_init)}, + {NAME("active"), CTL(prof_active)}, + {NAME("dump"), CTL(prof_dump)}, + {NAME("gdump"), CTL(prof_gdump)}, + {NAME("reset"), CTL(prof_reset)}, + {NAME("interval"), CTL(prof_interval)}, + {NAME("lg_sample"), CTL(lg_prof_sample)} +}; + +static const ctl_named_node_t stats_arenas_i_small_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} +}; + +static const ctl_named_node_t stats_arenas_i_large_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} +}; + +#define MUTEX_PROF_DATA_NODE(prefix) \ +static const ctl_named_node_t stats_##prefix##_node[] = { \ + {NAME("num_ops"), \ + CTL(stats_##prefix##_num_ops)}, \ + {NAME("num_wait"), \ + CTL(stats_##prefix##_num_wait)}, \ + {NAME("num_spin_acq"), \ + CTL(stats_##prefix##_num_spin_acq)}, \ + {NAME("num_owner_switch"), \ + CTL(stats_##prefix##_num_owner_switch)}, \ + {NAME("total_wait_time"), \ + CTL(stats_##prefix##_total_wait_time)}, \ + {NAME("max_wait_time"), \ + CTL(stats_##prefix##_max_wait_time)}, \ + {NAME("max_num_thds"), \ + CTL(stats_##prefix##_max_num_thds)} \ + /* Note that # of current waiting thread not provided. */ \ +}; + +MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex) + +static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, + {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, + {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, + {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)}, + {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)}, + {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}, + {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)} +}; + +static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_bins_j)} +}; + +static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { + {INDEX(stats_arenas_i_bins_j)} +}; + +static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)}, + {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)} +}; +static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_lextents_j)} +}; + +static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = { + {INDEX(stats_arenas_i_lextents_j)} +}; + +#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +static const ctl_named_node_t stats_arenas_i_mutexes_node[] = { +#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)}, +MUTEX_PROF_ARENA_MUTEXES +#undef OP +}; + +static const ctl_named_node_t stats_arenas_i_node[] = { + {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, + {NAME("uptime"), CTL(stats_arenas_i_uptime)}, + {NAME("dss"), CTL(stats_arenas_i_dss)}, + {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)}, + {NAME("pactive"), CTL(stats_arenas_i_pactive)}, + {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, + {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)}, + {NAME("mapped"), CTL(stats_arenas_i_mapped)}, + {NAME("retained"), CTL(stats_arenas_i_retained)}, + {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)}, + {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)}, + {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)}, + {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)}, + {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)}, + {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)}, + {NAME("base"), CTL(stats_arenas_i_base)}, + {NAME("internal"), CTL(stats_arenas_i_internal)}, + {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)}, + {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)}, + {NAME("resident"), CTL(stats_arenas_i_resident)}, + {NAME("small"), CHILD(named, stats_arenas_i_small)}, + {NAME("large"), CHILD(named, stats_arenas_i_large)}, + {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, + {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}, + {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)} +}; +static const ctl_named_node_t super_stats_arenas_i_node[] = { + {NAME(""), CHILD(named, stats_arenas_i)} +}; + +static const ctl_indexed_node_t stats_arenas_node[] = { + {INDEX(stats_arenas_i)} +}; + +static const ctl_named_node_t stats_background_thread_node[] = { + {NAME("num_threads"), CTL(stats_background_thread_num_threads)}, + {NAME("num_runs"), CTL(stats_background_thread_num_runs)}, + {NAME("run_interval"), CTL(stats_background_thread_run_interval)} +}; + +#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +static const ctl_named_node_t stats_mutexes_node[] = { +#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)}, +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + {NAME("reset"), CTL(stats_mutexes_reset)} +}; +#undef MUTEX_PROF_DATA_NODE + +static const ctl_named_node_t stats_node[] = { + {NAME("allocated"), CTL(stats_allocated)}, + {NAME("active"), CTL(stats_active)}, + {NAME("metadata"), CTL(stats_metadata)}, + {NAME("metadata_thp"), CTL(stats_metadata_thp)}, + {NAME("resident"), CTL(stats_resident)}, + {NAME("mapped"), CTL(stats_mapped)}, + {NAME("retained"), CTL(stats_retained)}, + {NAME("background_thread"), + CHILD(named, stats_background_thread)}, + {NAME("mutexes"), CHILD(named, stats_mutexes)}, + {NAME("arenas"), CHILD(indexed, stats_arenas)} +}; + +static const ctl_named_node_t root_node[] = { + {NAME("version"), CTL(version)}, + {NAME("epoch"), CTL(epoch)}, + {NAME("background_thread"), CTL(background_thread)}, + {NAME("max_background_threads"), CTL(max_background_threads)}, + {NAME("thread"), CHILD(named, thread)}, + {NAME("config"), CHILD(named, config)}, + {NAME("opt"), CHILD(named, opt)}, + {NAME("tcache"), CHILD(named, tcache)}, + {NAME("arena"), CHILD(indexed, arena)}, + {NAME("arenas"), CHILD(named, arenas)}, + {NAME("prof"), CHILD(named, prof)}, + {NAME("stats"), CHILD(named, stats)} +}; +static const ctl_named_node_t super_root_node[] = { + {NAME(""), CHILD(named, root)} +}; + +#undef NAME +#undef CHILD +#undef CTL +#undef INDEX + +/******************************************************************************/ + +/* + * Sets *dst + *src non-atomically. This is safe, since everything is + * synchronized by the ctl mutex. + */ +static void +ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) { +#ifdef JEMALLOC_ATOMIC_U64 + uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); + uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED); + atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED); +#else + *dst += *src; +#endif +} + +/* Likewise: with ctl mutex synchronization, reading is simple. */ +static uint64_t +ctl_arena_stats_read_u64(arena_stats_u64_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_u64(p, ATOMIC_RELAXED); +#else + return *p; +#endif +} + +static void +accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) { + size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); + size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED); + atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED); +} + +/******************************************************************************/ + +static unsigned +arenas_i2a_impl(size_t i, bool compat, bool validate) { + unsigned a; + + switch (i) { + case MALLCTL_ARENAS_ALL: + a = 0; + break; + case MALLCTL_ARENAS_DESTROYED: + a = 1; + break; + default: + if (compat && i == ctl_arenas->narenas) { + /* + * Provide deprecated backward compatibility for + * accessing the merged stats at index narenas rather + * than via MALLCTL_ARENAS_ALL. This is scheduled for + * removal in 6.0.0. + */ + a = 0; + } else if (validate && i >= ctl_arenas->narenas) { + a = UINT_MAX; + } else { + /* + * This function should never be called for an index + * more than one past the range of indices that have + * initialized ctl data. + */ + assert(i < ctl_arenas->narenas || (!validate && i == + ctl_arenas->narenas)); + a = (unsigned)i + 2; + } + break; + } + + return a; +} + +static unsigned +arenas_i2a(size_t i) { + return arenas_i2a_impl(i, true, false); +} + +static ctl_arena_t * +arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) { + ctl_arena_t *ret; + + assert(!compat || !init); + + ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)]; + if (init && ret == NULL) { + if (config_stats) { + struct container_s { + ctl_arena_t ctl_arena; + ctl_arena_stats_t astats; + }; + struct container_s *cont = + (struct container_s *)base_alloc(tsd_tsdn(tsd), + b0get(), sizeof(struct container_s), QUANTUM); + if (cont == NULL) { + return NULL; + } + ret = &cont->ctl_arena; + ret->astats = &cont->astats; + } else { + ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(), + sizeof(ctl_arena_t), QUANTUM); + if (ret == NULL) { + return NULL; + } + } + ret->arena_ind = (unsigned)i; + ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret; + } + + assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i)); + return ret; +} + +static ctl_arena_t * +arenas_i(size_t i) { + ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false); + assert(ret != NULL); + return ret; +} + +static void +ctl_arena_clear(ctl_arena_t *ctl_arena) { + ctl_arena->nthreads = 0; + ctl_arena->dss = dss_prec_names[dss_prec_limit]; + ctl_arena->dirty_decay_ms = -1; + ctl_arena->muzzy_decay_ms = -1; + ctl_arena->pactive = 0; + ctl_arena->pdirty = 0; + ctl_arena->pmuzzy = 0; + if (config_stats) { + memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t)); + ctl_arena->astats->allocated_small = 0; + ctl_arena->astats->nmalloc_small = 0; + ctl_arena->astats->ndalloc_small = 0; + ctl_arena->astats->nrequests_small = 0; + memset(ctl_arena->astats->bstats, 0, NBINS * + sizeof(bin_stats_t)); + memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) * + sizeof(arena_stats_large_t)); + } +} + +static void +ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { + unsigned i; + + if (config_stats) { + arena_stats_merge(tsdn, arena, &ctl_arena->nthreads, + &ctl_arena->dss, &ctl_arena->dirty_decay_ms, + &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, + &ctl_arena->pdirty, &ctl_arena->pmuzzy, + &ctl_arena->astats->astats, ctl_arena->astats->bstats, + ctl_arena->astats->lstats); + + for (i = 0; i < NBINS; i++) { + ctl_arena->astats->allocated_small += + ctl_arena->astats->bstats[i].curregs * + sz_index2size(i); + ctl_arena->astats->nmalloc_small += + ctl_arena->astats->bstats[i].nmalloc; + ctl_arena->astats->ndalloc_small += + ctl_arena->astats->bstats[i].ndalloc; + ctl_arena->astats->nrequests_small += + ctl_arena->astats->bstats[i].nrequests; + } + } else { + arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads, + &ctl_arena->dss, &ctl_arena->dirty_decay_ms, + &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, + &ctl_arena->pdirty, &ctl_arena->pmuzzy); + } +} + +static void +ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, + bool destroyed) { + unsigned i; + + if (!destroyed) { + ctl_sdarena->nthreads += ctl_arena->nthreads; + ctl_sdarena->pactive += ctl_arena->pactive; + ctl_sdarena->pdirty += ctl_arena->pdirty; + ctl_sdarena->pmuzzy += ctl_arena->pmuzzy; + } else { + assert(ctl_arena->nthreads == 0); + assert(ctl_arena->pactive == 0); + assert(ctl_arena->pdirty == 0); + assert(ctl_arena->pmuzzy == 0); + } + + if (config_stats) { + ctl_arena_stats_t *sdstats = ctl_sdarena->astats; + ctl_arena_stats_t *astats = ctl_arena->astats; + + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.mapped, + &astats->astats.mapped); + accum_atomic_zu(&sdstats->astats.retained, + &astats->astats.retained); + } + + ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge, + &astats->astats.decay_dirty.npurge); + ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise, + &astats->astats.decay_dirty.nmadvise); + ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged, + &astats->astats.decay_dirty.purged); + + ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge, + &astats->astats.decay_muzzy.npurge); + ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise, + &astats->astats.decay_muzzy.nmadvise); + ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged, + &astats->astats.decay_muzzy.purged); + +#define OP(mtx) malloc_mutex_prof_merge( \ + &(sdstats->astats.mutex_prof_data[ \ + arena_prof_mutex_##mtx]), \ + &(astats->astats.mutex_prof_data[ \ + arena_prof_mutex_##mtx])); +MUTEX_PROF_ARENA_MUTEXES +#undef OP + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.base, + &astats->astats.base); + accum_atomic_zu(&sdstats->astats.internal, + &astats->astats.internal); + accum_atomic_zu(&sdstats->astats.resident, + &astats->astats.resident); + accum_atomic_zu(&sdstats->astats.metadata_thp, + &astats->astats.metadata_thp); + } else { + assert(atomic_load_zu( + &astats->astats.internal, ATOMIC_RELAXED) == 0); + } + + if (!destroyed) { + sdstats->allocated_small += astats->allocated_small; + } else { + assert(astats->allocated_small == 0); + } + sdstats->nmalloc_small += astats->nmalloc_small; + sdstats->ndalloc_small += astats->ndalloc_small; + sdstats->nrequests_small += astats->nrequests_small; + + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.allocated_large, + &astats->astats.allocated_large); + } else { + assert(atomic_load_zu(&astats->astats.allocated_large, + ATOMIC_RELAXED) == 0); + } + ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large, + &astats->astats.nmalloc_large); + ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large, + &astats->astats.ndalloc_large); + ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large, + &astats->astats.nrequests_large); + + accum_atomic_zu(&sdstats->astats.tcache_bytes, + &astats->astats.tcache_bytes); + + if (ctl_arena->arena_ind == 0) { + sdstats->astats.uptime = astats->astats.uptime; + } + + for (i = 0; i < NBINS; i++) { + sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; + sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; + sdstats->bstats[i].nrequests += + astats->bstats[i].nrequests; + if (!destroyed) { + sdstats->bstats[i].curregs += + astats->bstats[i].curregs; + } else { + assert(astats->bstats[i].curregs == 0); + } + sdstats->bstats[i].nfills += astats->bstats[i].nfills; + sdstats->bstats[i].nflushes += + astats->bstats[i].nflushes; + sdstats->bstats[i].nslabs += astats->bstats[i].nslabs; + sdstats->bstats[i].reslabs += astats->bstats[i].reslabs; + if (!destroyed) { + sdstats->bstats[i].curslabs += + astats->bstats[i].curslabs; + } else { + assert(astats->bstats[i].curslabs == 0); + } + malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, + &astats->bstats[i].mutex_data); + } + + for (i = 0; i < NSIZES - NBINS; i++) { + ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc, + &astats->lstats[i].nmalloc); + ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc, + &astats->lstats[i].ndalloc); + ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests, + &astats->lstats[i].nrequests); + if (!destroyed) { + sdstats->lstats[i].curlextents += + astats->lstats[i].curlextents; + } else { + assert(astats->lstats[i].curlextents == 0); + } + } + } +} + +static void +ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, + unsigned i, bool destroyed) { + ctl_arena_t *ctl_arena = arenas_i(i); + + ctl_arena_clear(ctl_arena); + ctl_arena_stats_amerge(tsdn, ctl_arena, arena); + /* Merge into sum stats as well. */ + ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed); +} + +static unsigned +ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { + unsigned arena_ind; + ctl_arena_t *ctl_arena; + + if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) != + NULL) { + ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link); + arena_ind = ctl_arena->arena_ind; + } else { + arena_ind = ctl_arenas->narenas; + } + + /* Trigger stats allocation. */ + if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) { + return UINT_MAX; + } + + /* Initialize new arena. */ + if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) { + return UINT_MAX; + } + + if (arena_ind == ctl_arenas->narenas) { + ctl_arenas->narenas++; + } + + return arena_ind; +} + +static void +ctl_background_thread_stats_read(tsdn_t *tsdn) { + background_thread_stats_t *stats = &ctl_stats->background_thread; + if (!have_background_thread || + background_thread_stats_read(tsdn, stats)) { + memset(stats, 0, sizeof(background_thread_stats_t)); + nstime_init(&stats->run_interval, 0); + } +} + +static void +ctl_refresh(tsdn_t *tsdn) { + unsigned i; + ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL); + VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas); + + /* + * Clear sum stats, since they will be merged into by + * ctl_arena_refresh(). + */ + ctl_arena_clear(ctl_sarena); + + for (i = 0; i < ctl_arenas->narenas; i++) { + tarenas[i] = arena_get(tsdn, i, false); + } + + for (i = 0; i < ctl_arenas->narenas; i++) { + ctl_arena_t *ctl_arena = arenas_i(i); + bool initialized = (tarenas[i] != NULL); + + ctl_arena->initialized = initialized; + if (initialized) { + ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i, + false); + } + } + + if (config_stats) { + ctl_stats->allocated = ctl_sarena->astats->allocated_small + + atomic_load_zu(&ctl_sarena->astats->astats.allocated_large, + ATOMIC_RELAXED); + ctl_stats->active = (ctl_sarena->pactive << LG_PAGE); + ctl_stats->metadata = atomic_load_zu( + &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) + + atomic_load_zu(&ctl_sarena->astats->astats.internal, + ATOMIC_RELAXED); + ctl_stats->metadata_thp = atomic_load_zu( + &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED); + ctl_stats->resident = atomic_load_zu( + &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED); + ctl_stats->mapped = atomic_load_zu( + &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED); + ctl_stats->retained = atomic_load_zu( + &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED); + + ctl_background_thread_stats_read(tsdn); + +#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \ + malloc_mutex_lock(tsdn, &mtx); \ + malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \ + malloc_mutex_unlock(tsdn, &mtx); + + if (config_prof && opt_prof) { + READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof, + bt2gctx_mtx); + } + if (have_background_thread) { + READ_GLOBAL_MUTEX_PROF_DATA( + global_prof_mutex_background_thread, + background_thread_lock); + } else { + memset(&ctl_stats->mutex_prof_data[ + global_prof_mutex_background_thread], 0, + sizeof(mutex_prof_data_t)); + } + /* We own ctl mutex already. */ + malloc_mutex_prof_read(tsdn, + &ctl_stats->mutex_prof_data[global_prof_mutex_ctl], + &ctl_mtx); +#undef READ_GLOBAL_MUTEX_PROF_DATA + } + ctl_arenas->epoch++; +} + +static bool +ctl_init(tsd_t *tsd) { + bool ret; + tsdn_t *tsdn = tsd_tsdn(tsd); + + malloc_mutex_lock(tsdn, &ctl_mtx); + if (!ctl_initialized) { + ctl_arena_t *ctl_sarena, *ctl_darena; + unsigned i; + + /* + * Allocate demand-zeroed space for pointers to the full + * range of supported arena indices. + */ + if (ctl_arenas == NULL) { + ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn, + b0get(), sizeof(ctl_arenas_t), QUANTUM); + if (ctl_arenas == NULL) { + ret = true; + goto label_return; + } + } + + if (config_stats && ctl_stats == NULL) { + ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(), + sizeof(ctl_stats_t), QUANTUM); + if (ctl_stats == NULL) { + ret = true; + goto label_return; + } + } + + /* + * Allocate space for the current full range of arenas + * here rather than doing it lazily elsewhere, in order + * to limit when OOM-caused errors can occur. + */ + if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false, + true)) == NULL) { + ret = true; + goto label_return; + } + ctl_sarena->initialized = true; + + if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED, + false, true)) == NULL) { + ret = true; + goto label_return; + } + ctl_arena_clear(ctl_darena); + /* + * Don't toggle ctl_darena to initialized until an arena is + * actually destroyed, so that arena..initialized can be used + * to query whether the stats are relevant. + */ + + ctl_arenas->narenas = narenas_total_get(); + for (i = 0; i < ctl_arenas->narenas; i++) { + if (arenas_i_impl(tsd, i, false, true) == NULL) { + ret = true; + goto label_return; + } + } + + ql_new(&ctl_arenas->destroyed); + ctl_refresh(tsdn); + + ctl_initialized = true; + } + + ret = false; +label_return: + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; +} + +static int +ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, + size_t *mibp, size_t *depthp) { + int ret; + const char *elm, *tdot, *dot; + size_t elen, i, j; + const ctl_named_node_t *node; + + elm = name; + /* Equivalent to strchrnul(). */ + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + if (elen == 0) { + ret = ENOENT; + goto label_return; + } + node = super_root_node; + for (i = 0; i < *depthp; i++) { + assert(node); + assert(node->nchildren > 0); + if (ctl_named_node(node->children) != NULL) { + const ctl_named_node_t *pnode = node; + + /* Children are named. */ + for (j = 0; j < node->nchildren; j++) { + const ctl_named_node_t *child = + ctl_named_children(node, j); + if (strlen(child->name) == elen && + strncmp(elm, child->name, elen) == 0) { + node = child; + if (nodesp != NULL) { + nodesp[i] = + (const ctl_node_t *)node; + } + mibp[i] = j; + break; + } + } + if (node == pnode) { + ret = ENOENT; + goto label_return; + } + } else { + uintmax_t index; + const ctl_indexed_node_t *inode; + + /* Children are indexed. */ + index = malloc_strtoumax(elm, NULL, 10); + if (index == UINTMAX_MAX || index > SIZE_T_MAX) { + ret = ENOENT; + goto label_return; + } + + inode = ctl_indexed_node(node->children); + node = inode->index(tsdn, mibp, *depthp, (size_t)index); + if (node == NULL) { + ret = ENOENT; + goto label_return; + } + + if (nodesp != NULL) { + nodesp[i] = (const ctl_node_t *)node; + } + mibp[i] = (size_t)index; + } + + if (node->ctl != NULL) { + /* Terminal node. */ + if (*dot != '\0') { + /* + * The name contains more elements than are + * in this path through the tree. + */ + ret = ENOENT; + goto label_return; + } + /* Complete lookup successful. */ + *depthp = i + 1; + break; + } + + /* Update elm. */ + if (*dot == '\0') { + /* No more elements. */ + ret = ENOENT; + goto label_return; + } + elm = &dot[1]; + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : + strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + } + + ret = 0; +label_return: + return ret; +} + +int +ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) { + int ret; + size_t depth; + ctl_node_t const *nodes[CTL_MAX_DEPTH]; + size_t mib[CTL_MAX_DEPTH]; + const ctl_named_node_t *node; + + if (!ctl_initialized && ctl_init(tsd)) { + ret = EAGAIN; + goto label_return; + } + + depth = CTL_MAX_DEPTH; + ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); + if (ret != 0) { + goto label_return; + } + + node = ctl_named_node(nodes[depth-1]); + if (node != NULL && node->ctl) { + ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); + } else { + /* The name refers to a partial path through the ctl tree. */ + ret = ENOENT; + } + +label_return: + return(ret); +} + +int +ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) { + int ret; + + if (!ctl_initialized && ctl_init(tsd)) { + ret = EAGAIN; + goto label_return; + } + + ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp); +label_return: + return(ret); +} + +int +ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + const ctl_named_node_t *node; + size_t i; + + if (!ctl_initialized && ctl_init(tsd)) { + ret = EAGAIN; + goto label_return; + } + + /* Iterate down the tree. */ + node = super_root_node; + for (i = 0; i < miblen; i++) { + assert(node); + assert(node->nchildren > 0); + if (ctl_named_node(node->children) != NULL) { + /* Children are named. */ + if (node->nchildren <= mib[i]) { + ret = ENOENT; + goto label_return; + } + node = ctl_named_children(node, mib[i]); + } else { + const ctl_indexed_node_t *inode; + + /* Indexed element. */ + inode = ctl_indexed_node(node->children); + node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); + if (node == NULL) { + ret = ENOENT; + goto label_return; + } + } + } + + /* Call the ctl function. */ + if (node && node->ctl) { + ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + } else { + /* Partial MIB. */ + ret = ENOENT; + } + +label_return: + return(ret); +} + +bool +ctl_boot(void) { + if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL, + malloc_mutex_rank_exclusive)) { + return true; + } + + ctl_initialized = false; + + return false; +} + +void +ctl_prefork(tsdn_t *tsdn) { + malloc_mutex_prefork(tsdn, &ctl_mtx); +} + +void +ctl_postfork_parent(tsdn_t *tsdn) { + malloc_mutex_postfork_parent(tsdn, &ctl_mtx); +} + +void +ctl_postfork_child(tsdn_t *tsdn) { + malloc_mutex_postfork_child(tsdn, &ctl_mtx); +} + +/******************************************************************************/ +/* *_ctl() functions. */ + +#define READONLY() do { \ + if (newp != NULL || newlen != 0) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define WRITEONLY() do { \ + if (oldp != NULL || oldlenp != NULL) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define READ_XOR_WRITE() do { \ + if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ + newlen != 0)) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define READ(v, t) do { \ + if (oldp != NULL && oldlenp != NULL) { \ + if (*oldlenp != sizeof(t)) { \ + size_t copylen = (sizeof(t) <= *oldlenp) \ + ? sizeof(t) : *oldlenp; \ + memcpy(oldp, (void *)&(v), copylen); \ + ret = EINVAL; \ + goto label_return; \ + } \ + *(t *)oldp = (v); \ + } \ +} while (0) + +#define WRITE(v, t) do { \ + if (newp != NULL) { \ + if (newlen != sizeof(t)) { \ + ret = EINVAL; \ + goto label_return; \ + } \ + (v) = *(t *)newp; \ + } \ +} while (0) + +#define MIB_UNSIGNED(v, i) do { \ + if (mib[i] > UINT_MAX) { \ + ret = EFAULT; \ + goto label_return; \ + } \ + v = (unsigned)mib[i]; \ +} while (0) + +/* + * There's a lot of code duplication in the following macros due to limitations + * in how nested cpp macros are expanded. + */ +#define CTL_RO_CLGEN(c, l, n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + if (!(c)) { \ + return ENOENT; \ + } \ + if (l) { \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + } \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + if (l) { \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + } \ + return ret; \ +} + +#define CTL_RO_CGEN(c, n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + if (!(c)) { \ + return ENOENT; \ + } \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return ret; \ +} + +#define CTL_RO_GEN(n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return ret; \ +} + +/* + * ctl_mtx is not acquired, under the assumption that no pertinent data will + * mutate during the call. + */ +#define CTL_RO_NL_CGEN(c, n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + if (!(c)) { \ + return ENOENT; \ + } \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return ret; \ +} + +#define CTL_RO_NL_GEN(n, v, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + READONLY(); \ + oldval = (v); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return ret; \ +} + +#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + if (!(c)) { \ + return ENOENT; \ + } \ + READONLY(); \ + oldval = (m(tsd)); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return ret; \ +} + +#define CTL_RO_CONFIG_GEN(n, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ + \ + READONLY(); \ + oldval = n; \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return ret; \ +} + +/******************************************************************************/ + +CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) + +static int +epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + UNUSED uint64_t newval; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + WRITE(newval, uint64_t); + if (newp != NULL) { + ctl_refresh(tsd_tsdn(tsd)); + } + READ(ctl_arenas->epoch, uint64_t); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + bool oldval; + + if (!have_background_thread) { + return ENOENT; + } + background_thread_ctl_init(tsd_tsdn(tsd)); + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + if (newp == NULL) { + oldval = background_thread_enabled(); + READ(oldval, bool); + } else { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = background_thread_enabled(); + READ(oldval, bool); + + bool newval = *(bool *)newp; + if (newval == oldval) { + ret = 0; + goto label_return; + } + + background_thread_enabled_set(tsd_tsdn(tsd), newval); + if (newval) { + if (!can_enable_background_thread) { + malloc_printf(": Error in dlsym(" + "RTLD_NEXT, \"pthread_create\"). Cannot " + "enable background_thread\n"); + ret = EFAULT; + goto label_return; + } + if (background_threads_enable(tsd)) { + ret = EFAULT; + goto label_return; + } + } else { + if (background_threads_disable(tsd)) { + ret = EFAULT; + goto label_return; + } + } + } + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + + return ret; +} + +static int +max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + size_t oldval; + + if (!have_background_thread) { + return ENOENT; + } + background_thread_ctl_init(tsd_tsdn(tsd)); + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + if (newp == NULL) { + oldval = max_background_threads; + READ(oldval, size_t); + } else { + if (newlen != sizeof(size_t)) { + ret = EINVAL; + goto label_return; + } + oldval = max_background_threads; + READ(oldval, size_t); + + size_t newval = *(size_t *)newp; + if (newval == oldval) { + ret = 0; + goto label_return; + } + if (newval > opt_max_background_threads) { + ret = EINVAL; + goto label_return; + } + + if (background_thread_enabled()) { + if (!can_enable_background_thread) { + malloc_printf(": Error in dlsym(" + "RTLD_NEXT, \"pthread_create\"). Cannot " + "enable background_thread\n"); + ret = EFAULT; + goto label_return; + } + background_thread_enabled_set(tsd_tsdn(tsd), false); + if (background_threads_disable(tsd)) { + ret = EFAULT; + goto label_return; + } + max_background_threads = newval; + background_thread_enabled_set(tsd_tsdn(tsd), true); + if (background_threads_enable(tsd)) { + ret = EFAULT; + goto label_return; + } + } else { + max_background_threads = newval; + } + } + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + + return ret; +} + +/******************************************************************************/ + +CTL_RO_CONFIG_GEN(config_cache_oblivious, bool) +CTL_RO_CONFIG_GEN(config_debug, bool) +CTL_RO_CONFIG_GEN(config_fill, bool) +CTL_RO_CONFIG_GEN(config_lazy_lock, bool) +CTL_RO_CONFIG_GEN(config_malloc_conf, const char *) +CTL_RO_CONFIG_GEN(config_prof, bool) +CTL_RO_CONFIG_GEN(config_prof_libgcc, bool) +CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) +CTL_RO_CONFIG_GEN(config_stats, bool) +CTL_RO_CONFIG_GEN(config_utrace, bool) +CTL_RO_CONFIG_GEN(config_xmalloc, bool) + +/******************************************************************************/ + +CTL_RO_NL_GEN(opt_abort, opt_abort, bool) +CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool) +CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp], + const char *) +CTL_RO_NL_GEN(opt_retain, opt_retain, bool) +CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) +CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) +CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena], + const char *) +CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool) +CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t) +CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t) +CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t) +CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) +CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *) +CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) +CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) +CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) +CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) +CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) +CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *) +CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit, + size_t) +CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) +CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) +CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init, + opt_prof_thread_active_init, bool) +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) + +/******************************************************************************/ + +static int +thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + arena_t *oldarena; + unsigned newind, oldind; + + oldarena = arena_choose(tsd, NULL); + if (oldarena == NULL) { + return EAGAIN; + } + newind = oldind = arena_ind_get(oldarena); + WRITE(newind, unsigned); + READ(oldind, unsigned); + + if (newind != oldind) { + arena_t *newarena; + + if (newind >= narenas_total_get()) { + /* New arena index is out of range. */ + ret = EFAULT; + goto label_return; + } + + if (have_percpu_arena && + PERCPU_ARENA_ENABLED(opt_percpu_arena)) { + if (newind < percpu_arena_ind_limit(opt_percpu_arena)) { + /* + * If perCPU arena is enabled, thread_arena + * control is not allowed for the auto arena + * range. + */ + ret = EPERM; + goto label_return; + } + } + + /* Initialize arena if necessary. */ + newarena = arena_get(tsd_tsdn(tsd), newind, true); + if (newarena == NULL) { + ret = EAGAIN; + goto label_return; + } + /* Set new arena/tcache associations. */ + arena_migrate(tsd, oldind, newind); + if (tcache_available(tsd)) { + tcache_arena_reassociate(tsd_tsdn(tsd), + tsd_tcachep_get(tsd), newarena); + } + } + + ret = 0; +label_return: + return ret; +} + +CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, + uint64_t) +CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, + uint64_t *) +CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get, + uint64_t) +CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, + tsd_thread_deallocatedp_get, uint64_t *) + +static int +thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + bool oldval; + + oldval = tcache_enabled_get(tsd); + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + tcache_enabled_set(tsd, *(bool *)newp); + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +static int +thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + if (!tcache_available(tsd)) { + ret = EFAULT; + goto label_return; + } + + READONLY(); + WRITEONLY(); + + tcache_flush(tsd); + + ret = 0; +label_return: + return ret; +} + +static int +thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + if (!config_prof) { + return ENOENT; + } + + READ_XOR_WRITE(); + + if (newp != NULL) { + if (newlen != sizeof(const char *)) { + ret = EINVAL; + goto label_return; + } + + if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != + 0) { + goto label_return; + } + } else { + const char *oldname = prof_thread_name_get(tsd); + READ(oldname, const char *); + } + + ret = 0; +label_return: + return ret; +} + +static int +thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + bool oldval; + + if (!config_prof) { + return ENOENT; + } + + oldval = prof_thread_active_get(tsd); + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + if (prof_thread_active_set(tsd, *(bool *)newp)) { + ret = EAGAIN; + goto label_return; + } + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +/******************************************************************************/ + +static int +tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned tcache_ind; + + READONLY(); + if (tcaches_create(tsd, &tcache_ind)) { + ret = EFAULT; + goto label_return; + } + READ(tcache_ind, unsigned); + + ret = 0; +label_return: + return ret; +} + +static int +tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned tcache_ind; + + WRITEONLY(); + tcache_ind = UINT_MAX; + WRITE(tcache_ind, unsigned); + if (tcache_ind == UINT_MAX) { + ret = EFAULT; + goto label_return; + } + tcaches_flush(tsd, tcache_ind); + + ret = 0; +label_return: + return ret; +} + +static int +tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned tcache_ind; + + WRITEONLY(); + tcache_ind = UINT_MAX; + WRITE(tcache_ind, unsigned); + if (tcache_ind == UINT_MAX) { + ret = EFAULT; + goto label_return; + } + tcaches_destroy(tsd, tcache_ind); + + ret = 0; +label_return: + return ret; +} + +/******************************************************************************/ + +static int +arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + tsdn_t *tsdn = tsd_tsdn(tsd); + unsigned arena_ind; + bool initialized; + + READONLY(); + MIB_UNSIGNED(arena_ind, 1); + + malloc_mutex_lock(tsdn, &ctl_mtx); + initialized = arenas_i(arena_ind)->initialized; + malloc_mutex_unlock(tsdn, &ctl_mtx); + + READ(initialized, bool); + + ret = 0; +label_return: + return ret; +} + +static void +arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) { + malloc_mutex_lock(tsdn, &ctl_mtx); + { + unsigned narenas = ctl_arenas->narenas; + + /* + * Access via index narenas is deprecated, and scheduled for + * removal in 6.0.0. + */ + if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) { + unsigned i; + VARIABLE_ARRAY(arena_t *, tarenas, narenas); + + for (i = 0; i < narenas; i++) { + tarenas[i] = arena_get(tsdn, i, false); + } + + /* + * No further need to hold ctl_mtx, since narenas and + * tarenas contain everything needed below. + */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + for (i = 0; i < narenas; i++) { + if (tarenas[i] != NULL) { + arena_decay(tsdn, tarenas[i], false, + all); + } + } + } else { + arena_t *tarena; + + assert(arena_ind < narenas); + + tarena = arena_get(tsdn, arena_ind, false); + + /* No further need to hold ctl_mtx. */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + if (tarena != NULL) { + arena_decay(tsdn, tarena, false, all); + } + } + } +} + +static int +arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + + READONLY(); + WRITEONLY(); + MIB_UNSIGNED(arena_ind, 1); + arena_i_decay(tsd_tsdn(tsd), arena_ind, false); + + ret = 0; +label_return: + return ret; +} + +static int +arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + + READONLY(); + WRITEONLY(); + MIB_UNSIGNED(arena_ind, 1); + arena_i_decay(tsd_tsdn(tsd), arena_ind, true); + + ret = 0; +label_return: + return ret; +} + +static int +arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind, + arena_t **arena) { + int ret; + + READONLY(); + WRITEONLY(); + MIB_UNSIGNED(*arena_ind, 1); + + *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false); + if (*arena == NULL || arena_is_auto(*arena)) { + ret = EFAULT; + goto label_return; + } + + ret = 0; +label_return: + return ret; +} + +static void +arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) { + /* Temporarily disable the background thread during arena reset. */ + if (have_background_thread) { + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + if (background_thread_enabled()) { + unsigned ind = arena_ind % ncpus; + background_thread_info_t *info = + &background_thread_info[ind]; + assert(info->state == background_thread_started); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_paused; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + } +} + +static void +arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) { + if (have_background_thread) { + if (background_thread_enabled()) { + unsigned ind = arena_ind % ncpus; + background_thread_info_t *info = + &background_thread_info[ind]; + assert(info->state == background_thread_paused); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_started; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + } +} + +static int +arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + + ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, + newp, newlen, &arena_ind, &arena); + if (ret != 0) { + return ret; + } + + arena_reset_prepare_background_thread(tsd, arena_ind); + arena_reset(tsd, arena); + arena_reset_finish_background_thread(tsd, arena_ind); + + return ret; +} + +static int +arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + ctl_arena_t *ctl_darena, *ctl_arena; + + ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, + newp, newlen, &arena_ind, &arena); + if (ret != 0) { + goto label_return; + } + + if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena, + true) != 0) { + ret = EFAULT; + goto label_return; + } + + arena_reset_prepare_background_thread(tsd, arena_ind); + /* Merge stats after resetting and purging arena. */ + arena_reset(tsd, arena); + arena_decay(tsd_tsdn(tsd), arena, false, true); + ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED); + ctl_darena->initialized = true; + ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true); + /* Destroy arena. */ + arena_destroy(tsd, arena); + ctl_arena = arenas_i(arena_ind); + ctl_arena->initialized = false; + /* Record arena index for later recycling via arenas.create. */ + ql_elm_new(ctl_arena, destroyed_link); + ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link); + arena_reset_finish_background_thread(tsd, arena_ind); + + assert(ret == 0); +label_return: + return ret; +} + +static int +arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + const char *dss = NULL; + unsigned arena_ind; + dss_prec_t dss_prec_old = dss_prec_limit; + dss_prec_t dss_prec = dss_prec_limit; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + WRITE(dss, const char *); + MIB_UNSIGNED(arena_ind, 1); + if (dss != NULL) { + int i; + bool match = false; + + for (i = 0; i < dss_prec_limit; i++) { + if (strcmp(dss_prec_names[i], dss) == 0) { + dss_prec = i; + match = true; + break; + } + } + + if (!match) { + ret = EINVAL; + goto label_return; + } + } + + /* + * Access via index narenas is deprecated, and scheduled for removal in + * 6.0.0. + */ + if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == + ctl_arenas->narenas) { + if (dss_prec != dss_prec_limit && + extent_dss_prec_set(dss_prec)) { + ret = EFAULT; + goto label_return; + } + dss_prec_old = extent_dss_prec_get(); + } else { + arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL || (dss_prec != dss_prec_limit && + arena_dss_prec_set(arena, dss_prec))) { + ret = EFAULT; + goto label_return; + } + dss_prec_old = arena_dss_prec_get(arena); + } + + dss = dss_prec_names[dss_prec_old]; + READ(dss, const char *); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { + int ret; + unsigned arena_ind; + arena_t *arena; + + MIB_UNSIGNED(arena_ind, 1); + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL) { + ret = EFAULT; + goto label_return; + } + + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) : + arena_muzzy_decay_ms_get(arena); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena, + *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd), + arena, *(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + + ret = 0; +label_return: + return ret; +} + +static int +arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, true); +} + +static int +arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, false); +} + +static int +arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + MIB_UNSIGNED(arena_ind, 1); + if (arena_ind < narenas_total_get()) { + extent_hooks_t *old_extent_hooks; + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL) { + if (arena_ind >= narenas_auto) { + ret = EFAULT; + goto label_return; + } + old_extent_hooks = + (extent_hooks_t *)&extent_hooks_default; + READ(old_extent_hooks, extent_hooks_t *); + if (newp != NULL) { + /* Initialize a new arena as a side effect. */ + extent_hooks_t *new_extent_hooks + JEMALLOC_CC_SILENCE_INIT(NULL); + WRITE(new_extent_hooks, extent_hooks_t *); + arena = arena_init(tsd_tsdn(tsd), arena_ind, + new_extent_hooks); + if (arena == NULL) { + ret = EFAULT; + goto label_return; + } + } + } else { + if (newp != NULL) { + extent_hooks_t *new_extent_hooks + JEMALLOC_CC_SILENCE_INIT(NULL); + WRITE(new_extent_hooks, extent_hooks_t *); + old_extent_hooks = extent_hooks_set(tsd, arena, + new_extent_hooks); + READ(old_extent_hooks, extent_hooks_t *); + } else { + old_extent_hooks = extent_hooks_get(arena); + READ(old_extent_hooks, extent_hooks_t *); + } + } + } else { + ret = EFAULT; + goto label_return; + } + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + + if (!opt_retain) { + /* Only relevant when retain is enabled. */ + return ENOENT; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + MIB_UNSIGNED(arena_ind, 1); + if (arena_ind < narenas_total_get() && (arena = + arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { + size_t old_limit, new_limit; + if (newp != NULL) { + WRITE(new_limit, size_t); + } + bool err = arena_retain_grow_limit_get_set(tsd, arena, + &old_limit, newp != NULL ? &new_limit : NULL); + if (!err) { + READ(old_limit, size_t); + ret = 0; + } else { + ret = EFAULT; + } + } else { + ret = EFAULT; + } +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static const ctl_named_node_t * +arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { + const ctl_named_node_t *ret; + + malloc_mutex_lock(tsdn, &ctl_mtx); + switch (i) { + case MALLCTL_ARENAS_ALL: + case MALLCTL_ARENAS_DESTROYED: + break; + default: + if (i > ctl_arenas->narenas) { + ret = NULL; + goto label_return; + } + break; + } + + ret = super_arena_i_node; +label_return: + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; +} + +/******************************************************************************/ + +static int +arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned narenas; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + READONLY(); + if (*oldlenp != sizeof(unsigned)) { + ret = EINVAL; + goto label_return; + } + narenas = ctl_arenas->narenas; + READ(narenas, unsigned); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { + int ret; + + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() : + arena_muzzy_decay_ms_default_get()); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp) + : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + + ret = 0; +label_return: + return ret; +} + +static int +arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, true); +} + +static int +arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, false); +} + +CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) +CTL_RO_NL_GEN(arenas_page, PAGE, size_t) +CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t) +CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) +CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned) +CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t) +CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t) +CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t) +static const ctl_named_node_t * +arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { + if (i > NBINS) { + return NULL; + } + return super_arenas_bin_i_node; +} + +CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned) +CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]), + size_t) +static const ctl_named_node_t * +arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t i) { + if (i > NSIZES - NBINS) { + return NULL; + } + return super_arenas_lextent_i_node; +} + +static int +arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + extent_hooks_t *extent_hooks; + unsigned arena_ind; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + + extent_hooks = (extent_hooks_t *)&extent_hooks_default; + WRITE(extent_hooks, extent_hooks_t *); + if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) { + ret = EAGAIN; + goto label_return; + } + READ(arena_ind, unsigned); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + void *ptr; + extent_t *extent; + arena_t *arena; + + ptr = NULL; + ret = EINVAL; + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + WRITE(ptr, void *); + extent = iealloc(tsd_tsdn(tsd), ptr); + if (extent == NULL) + goto label_return; + + arena = extent_arena_get(extent); + if (arena == NULL) + goto label_return; + + arena_ind = arena_ind_get(arena); + READ(arena_ind, unsigned); + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +/******************************************************************************/ + +static int +prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + bool oldval; + + if (!config_prof) { + return ENOENT; + } + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_thread_active_init_set(tsd_tsdn(tsd), + *(bool *)newp); + } else { + oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +static int +prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + bool oldval; + + if (!config_prof) { + return ENOENT; + } + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); + } else { + oldval = prof_active_get(tsd_tsdn(tsd)); + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +static int +prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + const char *filename = NULL; + + if (!config_prof) { + return ENOENT; + } + + WRITEONLY(); + WRITE(filename, const char *); + + if (prof_mdump(tsd, filename)) { + ret = EFAULT; + goto label_return; + } + + ret = 0; +label_return: + return ret; +} + +static int +prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + bool oldval; + + if (!config_prof) { + return ENOENT; + } + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); + } else { + oldval = prof_gdump_get(tsd_tsdn(tsd)); + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +static int +prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + size_t lg_sample = lg_prof_sample; + + if (!config_prof) { + return ENOENT; + } + + WRITEONLY(); + WRITE(lg_sample, size_t); + if (lg_sample >= (sizeof(uint64_t) << 3)) { + lg_sample = (sizeof(uint64_t) << 3) - 1; + } + + prof_reset(tsd, lg_sample); + + ret = 0; +label_return: + return ret; +} + +CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) +CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) + +/******************************************************************************/ + +CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t) +CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t) +CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t) +CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t) +CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t) +CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t) +CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t) + +CTL_RO_CGEN(config_stats, stats_background_thread_num_threads, + ctl_stats->background_thread.num_threads, size_t) +CTL_RO_CGEN(config_stats, stats_background_thread_num_runs, + ctl_stats->background_thread.num_runs, uint64_t) +CTL_RO_CGEN(config_stats, stats_background_thread_run_interval, + nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t) + +CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *) +CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms, + ssize_t) +CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms, + ssize_t) +CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned) +CTL_RO_GEN(stats_arenas_i_uptime, + nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t) +CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t) +CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t) +CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_retained, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED), + size_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_base, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_internal, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp, + ATOMIC_RELAXED), size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes, + ATOMIC_RELAXED), size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_resident, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED), + size_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, + arenas_i(mib[2])->astats->allocated_small, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, + arenas_i(mib[2])->astats->nmalloc_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, + arenas_i(mib[2])->astats->ndalloc_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, + arenas_i(mib[2])->astats->nrequests_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large, + ATOMIC_RELAXED), size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t) +/* + * Note: "nmalloc" here instead of "nrequests" in the read. This is intentional. + */ +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t) /* Intentional. */ + +/* Lock profiling related APIs below. */ +#define RO_MUTEX_CTL_GEN(n, l) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \ + l.n_lock_ops, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \ + l.n_wait_times, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \ + l.n_spin_acquired, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \ + l.n_owner_switches, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \ + nstime_ns(&l.tot_wait_time), uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \ + nstime_ns(&l.max_wait_time), uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \ + l.max_n_thds, uint32_t) + +/* Global mutexes. */ +#define OP(mtx) \ + RO_MUTEX_CTL_GEN(mutexes_##mtx, \ + ctl_stats->mutex_prof_data[global_prof_mutex_##mtx]) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +/* Per arena mutexes */ +#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \ + arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx]) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +/* tcache bin mutex */ +RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex, + arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data) +#undef RO_MUTEX_CTL_GEN + +/* Resets all mutex stats, including global, arena and bin mutexes. */ +static int +stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + if (!config_stats) { + return ENOENT; + } + + tsdn_t *tsdn = tsd_tsdn(tsd); + +#define MUTEX_PROF_RESET(mtx) \ + malloc_mutex_lock(tsdn, &mtx); \ + malloc_mutex_prof_data_reset(tsdn, &mtx); \ + malloc_mutex_unlock(tsdn, &mtx); + + /* Global mutexes: ctl and prof. */ + MUTEX_PROF_RESET(ctl_mtx); + if (have_background_thread) { + MUTEX_PROF_RESET(background_thread_lock); + } + if (config_prof && opt_prof) { + MUTEX_PROF_RESET(bt2gctx_mtx); + } + + + /* Per arena mutexes. */ + unsigned n = narenas_total_get(); + + for (unsigned i = 0; i < n; i++) { + arena_t *arena = arena_get(tsdn, i, false); + if (!arena) { + continue; + } + MUTEX_PROF_RESET(arena->large_mtx); + MUTEX_PROF_RESET(arena->extent_avail_mtx); + MUTEX_PROF_RESET(arena->extents_dirty.mtx); + MUTEX_PROF_RESET(arena->extents_muzzy.mtx); + MUTEX_PROF_RESET(arena->extents_retained.mtx); + MUTEX_PROF_RESET(arena->decay_dirty.mtx); + MUTEX_PROF_RESET(arena->decay_muzzy.mtx); + MUTEX_PROF_RESET(arena->tcache_ql_mtx); + MUTEX_PROF_RESET(arena->base->mtx); + + for (szind_t i = 0; i < NBINS; i++) { + bin_t *bin = &arena->bins[i]; + MUTEX_PROF_RESET(bin->lock); + } + } +#undef MUTEX_PROF_RESET + return 0; +} + +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, + arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, + arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, + arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, + arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills, + arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes, + arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) + +static const ctl_named_node_t * +stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { + if (j > NBINS) { + return NULL; + } + return super_stats_arenas_i_bins_j_node; +} + +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests, + ctl_arena_stats_read_u64( + &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, + arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) + +static const ctl_named_node_t * +stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { + if (j > NSIZES - NBINS) { + return NULL; + } + return super_stats_arenas_i_lextents_j_node; +} + +static const ctl_named_node_t * +stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { + const ctl_named_node_t *ret; + size_t a; + + malloc_mutex_lock(tsdn, &ctl_mtx); + a = arenas_i2a_impl(i, true, true); + if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) { + ret = NULL; + goto label_return; + } + + ret = super_stats_arenas_i_node; +label_return: + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/div.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/div.c new file mode 100644 index 0000000..808892a --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/div.c @@ -0,0 +1,55 @@ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/div.h" + +#include "jemalloc/internal/assert.h" + +/* + * Suppose we have n = q * d, all integers. We know n and d, and want q = n / d. + * + * For any k, we have (here, all division is exact; not C-style rounding): + * floor(ceil(2^k / d) * n / 2^k) = floor((2^k + r) / d * n / 2^k), where + * r = (-2^k) mod d. + * + * Expanding this out: + * ... = floor(2^k / d * n / 2^k + r / d * n / 2^k) + * = floor(n / d + (r / d) * (n / 2^k)). + * + * The fractional part of n / d is 0 (because of the assumption that d divides n + * exactly), so we have: + * ... = n / d + floor((r / d) * (n / 2^k)) + * + * So that our initial expression is equal to the quantity we seek, so long as + * (r / d) * (n / 2^k) < 1. + * + * r is a remainder mod d, so r < d and r / d < 1 always. We can make + * n / 2 ^ k < 1 by setting k = 32. This gets us a value of magic that works. + */ + +void +div_init(div_info_t *div_info, size_t d) { + /* Nonsensical. */ + assert(d != 0); + /* + * This would make the value of magic too high to fit into a uint32_t + * (we would want magic = 2^32 exactly). This would mess with code gen + * on 32-bit machines. + */ + assert(d != 1); + + uint64_t two_to_k = ((uint64_t)1 << 32); + uint32_t magic = (uint32_t)(two_to_k / d); + + /* + * We want magic = ceil(2^k / d), but C gives us floor. We have to + * increment it unless the result was exact (i.e. unless d is a power of + * two). + */ + if (two_to_k % d != 0) { + magic++; + } + div_info->magic = magic; +#ifdef JEMALLOC_DEBUG + div_info->d = d; +#endif +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/extent.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/extent.c new file mode 100644 index 0000000..09d6d77 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/extent.c @@ -0,0 +1,2177 @@ +#define JEMALLOC_EXTENT_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" + +/******************************************************************************/ +/* Data. */ + +rtree_t extents_rtree; +/* Keyed by the address of the extent_t being protected. */ +mutex_pool_t extent_mutex_pool; + +size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; + +static const bitmap_info_t extents_bitmap_info = + BITMAP_INFO_INITIALIZER(NPSIZES+1); + +static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit, + unsigned arena_ind); +static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +static bool extent_decommit_default(extent_hooks_t *extent_hooks, + void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); +#ifdef PAGES_CAN_PURGE_LAZY +static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +#endif +static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +#ifdef PAGES_CAN_PURGE_FORCED +static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, + void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); +#endif +static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +#ifdef JEMALLOC_MAPS_COALESCE +static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t size_a, size_t size_b, bool committed, + unsigned arena_ind); +#endif +static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, + bool growing_retained); +#ifdef JEMALLOC_MAPS_COALESCE +static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, + size_t size_a, void *addr_b, size_t size_b, bool committed, + unsigned arena_ind); +#endif +static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, + bool growing_retained); + +const extent_hooks_t extent_hooks_default = { + extent_alloc_default, + extent_dalloc_default, + extent_destroy_default, + extent_commit_default, + extent_decommit_default +#ifdef PAGES_CAN_PURGE_LAZY + , + extent_purge_lazy_default +#else + , + NULL +#endif +#ifdef PAGES_CAN_PURGE_FORCED + , + extent_purge_forced_default +#else + , + NULL +#endif +#ifdef JEMALLOC_MAPS_COALESCE + , + extent_split_default, + extent_merge_default +#endif +}; + +/* Used exclusively for gdump triggering. */ +static atomic_zu_t curpages; +static atomic_zu_t highpages; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void extent_deregister(tsdn_t *tsdn, extent_t *extent); +static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, + size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind, + bool *zero, bool *commit, bool growing_retained); +static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained); +static void extent_record(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, + bool growing_retained); + +/******************************************************************************/ + +ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link, + extent_esnead_comp) + +typedef enum { + lock_result_success, + lock_result_failure, + lock_result_no_extent +} lock_result_t; + +static lock_result_t +extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, + extent_t **result) { + extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, + elm, true); + + if (extent1 == NULL) { + return lock_result_no_extent; + } + /* + * It's possible that the extent changed out from under us, and with it + * the leaf->extent mapping. We have to recheck while holding the lock. + */ + extent_lock(tsdn, extent1); + extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn, + &extents_rtree, elm, true); + + if (extent1 == extent2) { + *result = extent1; + return lock_result_success; + } else { + extent_unlock(tsdn, extent1); + return lock_result_failure; + } +} + +/* + * Returns a pool-locked extent_t * if there's one associated with the given + * address, and NULL otherwise. + */ +static extent_t * +extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) { + extent_t *ret = NULL; + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, + rtree_ctx, (uintptr_t)addr, false, false); + if (elm == NULL) { + return NULL; + } + lock_result_t lock_result; + do { + lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret); + } while (lock_result == lock_result_failure); + return ret; +} + +extent_t * +extent_alloc(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); + extent_t *extent = extent_avail_first(&arena->extent_avail); + if (extent == NULL) { + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); + return base_alloc_extent(tsdn, arena->base); + } + extent_avail_remove(&arena->extent_avail, extent); + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); + return extent; +} + +void +extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); + extent_avail_insert(&arena->extent_avail, extent); + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); +} + +extent_hooks_t * +extent_hooks_get(arena_t *arena) { + return base_extent_hooks_get(arena->base); +} + +extent_hooks_t * +extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) { + background_thread_info_t *info; + if (have_background_thread) { + info = arena_background_thread_info_get(arena); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + } + extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks); + if (have_background_thread) { + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + + return ret; +} + +static void +extent_hooks_assure_initialized(arena_t *arena, + extent_hooks_t **r_extent_hooks) { + if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) { + *r_extent_hooks = extent_hooks_get(arena); + } +} + +#ifndef JEMALLOC_JET +static +#endif +size_t +extent_size_quantize_floor(size_t size) { + size_t ret; + pszind_t pind; + + assert(size > 0); + assert((size & PAGE_MASK) == 0); + + pind = sz_psz2ind(size - sz_large_pad + 1); + if (pind == 0) { + /* + * Avoid underflow. This short-circuit would also do the right + * thing for all sizes in the range for which there are + * PAGE-spaced size classes, but it's simplest to just handle + * the one case that would cause erroneous results. + */ + return size; + } + ret = sz_pind2sz(pind - 1) + sz_large_pad; + assert(ret <= size); + return ret; +} + +#ifndef JEMALLOC_JET +static +#endif +size_t +extent_size_quantize_ceil(size_t size) { + size_t ret; + + assert(size > 0); + assert(size - sz_large_pad <= LARGE_MAXCLASS); + assert((size & PAGE_MASK) == 0); + + ret = extent_size_quantize_floor(size); + if (ret < size) { + /* + * Skip a quantization that may have an adequately large extent, + * because under-sized extents may be mixed in. This only + * happens when an unusual size is requested, i.e. for aligned + * allocation, and is just one of several places where linear + * search would potentially find sufficiently aligned available + * memory somewhere lower. + */ + ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + + sz_large_pad; + } + return ret; +} + +/* Generate pairing heap functions. */ +ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) + +bool +extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, + bool delay_coalesce) { + if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS, + malloc_mutex_rank_exclusive)) { + return true; + } + for (unsigned i = 0; i < NPSIZES+1; i++) { + extent_heap_new(&extents->heaps[i]); + } + bitmap_init(extents->bitmap, &extents_bitmap_info, true); + extent_list_init(&extents->lru); + atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED); + extents->state = state; + extents->delay_coalesce = delay_coalesce; + return false; +} + +extent_state_t +extents_state_get(const extents_t *extents) { + return extents->state; +} + +size_t +extents_npages_get(extents_t *extents) { + return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); +} + +static void +extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + assert(extent_state_get(extent) == extents->state); + + size_t size = extent_size_get(extent); + size_t psz = extent_size_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + if (extent_heap_empty(&extents->heaps[pind])) { + bitmap_unset(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + } + extent_heap_insert(&extents->heaps[pind], extent); + extent_list_append(&extents->lru, extent); + size_t npages = size >> LG_PAGE; + /* + * All modifications to npages hold the mutex (as asserted above), so we + * don't need an atomic fetch-add; we can get by with a load followed by + * a store. + */ + size_t cur_extents_npages = + atomic_load_zu(&extents->npages, ATOMIC_RELAXED); + atomic_store_zu(&extents->npages, cur_extents_npages + npages, + ATOMIC_RELAXED); +} + +static void +extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + assert(extent_state_get(extent) == extents->state); + + size_t size = extent_size_get(extent); + size_t psz = extent_size_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + extent_heap_remove(&extents->heaps[pind], extent); + if (extent_heap_empty(&extents->heaps[pind])) { + bitmap_set(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + } + extent_list_remove(&extents->lru, extent); + size_t npages = size >> LG_PAGE; + /* + * As in extents_insert_locked, we hold extents->mtx and so don't need + * atomic operations for updating extents->npages. + */ + size_t cur_extents_npages = + atomic_load_zu(&extents->npages, ATOMIC_RELAXED); + assert(cur_extents_npages >= npages); + atomic_store_zu(&extents->npages, + cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); +} + +/* + * Find an extent with size [min_size, max_size) to satisfy the alignment + * requirement. For each size, try only the first extent in the heap. + */ +static extent_t * +extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size, + size_t alignment) { + pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size)); + pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size)); + + for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, + &extents_bitmap_info, (size_t)pind); i < pind_max; i = + (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, + (size_t)i+1)) { + assert(i < NPSIZES); + assert(!extent_heap_empty(&extents->heaps[i])); + extent_t *extent = extent_heap_first(&extents->heaps[i]); + uintptr_t base = (uintptr_t)extent_base_get(extent); + size_t candidate_size = extent_size_get(extent); + assert(candidate_size >= min_size); + + uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base, + PAGE_CEILING(alignment)); + if (base > next_align || base + candidate_size <= next_align) { + /* Overflow or not crossing the next alignment. */ + continue; + } + + size_t leadsize = next_align - base; + if (candidate_size - leadsize >= min_size) { + return extent; + } + } + + return NULL; +} + +/* Do any-best-fit extent selection, i.e. select any extent that best fits. */ +static extent_t * +extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t size) { + pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); + pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + if (i < NPSIZES+1) { + /* + * In order to reduce fragmentation, avoid reusing and splitting + * large extents for much smaller sizes. + */ + if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { + return NULL; + } + assert(!extent_heap_empty(&extents->heaps[i])); + extent_t *extent = extent_heap_first(&extents->heaps[i]); + assert(extent_size_get(extent) >= size); + return extent; + } + + return NULL; +} + +/* + * Do first-fit extent selection, i.e. select the oldest/lowest extent that is + * large enough. + */ +static extent_t * +extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t size) { + extent_t *ret = NULL; + + pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); + for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, + &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i = + (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, + (size_t)i+1)) { + assert(!extent_heap_empty(&extents->heaps[i])); + extent_t *extent = extent_heap_first(&extents->heaps[i]); + assert(extent_size_get(extent) >= size); + if (ret == NULL || extent_snad_comp(extent, ret) < 0) { + ret = extent; + } + if (i == NPSIZES) { + break; + } + assert(i < NPSIZES); + } + + return ret; +} + +/* + * Do {best,first}-fit extent selection, where the selection policy choice is + * based on extents->delay_coalesce. Best-fit selection requires less + * searching, but its layout policy is less stable and may cause higher virtual + * memory fragmentation as a side effect. + */ +static extent_t * +extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t esize, size_t alignment) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + + size_t max_size = esize + PAGE_CEILING(alignment) - PAGE; + /* Beware size_t wrap-around. */ + if (max_size < esize) { + return NULL; + } + + extent_t *extent = extents->delay_coalesce ? + extents_best_fit_locked(tsdn, arena, extents, max_size) : + extents_first_fit_locked(tsdn, arena, extents, max_size); + + if (alignment > PAGE && extent == NULL) { + /* + * max_size guarantees the alignment requirement but is rather + * pessimistic. Next we try to satisfy the aligned allocation + * with sizes in [esize, max_size). + */ + extent = extents_fit_alignment(extents, esize, max_size, + alignment); + } + + return extent; +} + +static bool +extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent) { + extent_state_set(extent, extent_state_active); + bool coalesced; + extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, extent, &coalesced, false); + extent_state_set(extent, extents_state_get(extents)); + + if (!coalesced) { + return true; + } + extents_insert_locked(tsdn, extents, extent); + return false; +} + +extent_t * +extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + assert(size + pad != 0); + assert(alignment != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents, + new_addr, size, pad, alignment, slab, szind, zero, commit, false); + assert(extent == NULL || extent_dumpable_get(extent)); + return extent; +} + +void +extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent) { + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + assert(extent_dumpable_get(extent)); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_addr_set(extent, extent_base_get(extent)); + extent_zeroed_set(extent, false); + + extent_record(tsdn, arena, r_extent_hooks, extents, extent, false); +} + +extent_t * +extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, size_t npages_min) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + malloc_mutex_lock(tsdn, &extents->mtx); + + /* + * Get the LRU coalesced extent, if any. If coalescing was delayed, + * the loop will iterate until the LRU extent is fully coalesced. + */ + extent_t *extent; + while (true) { + /* Get the LRU extent, if any. */ + extent = extent_list_first(&extents->lru); + if (extent == NULL) { + goto label_return; + } + /* Check the eviction limit. */ + size_t extents_npages = atomic_load_zu(&extents->npages, + ATOMIC_RELAXED); + if (extents_npages <= npages_min) { + extent = NULL; + goto label_return; + } + extents_remove_locked(tsdn, extents, extent); + if (!extents->delay_coalesce) { + break; + } + /* Try to coalesce. */ + if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, extent)) { + break; + } + /* + * The LRU extent was just coalesced and the result placed in + * the LRU at its neighbor's position. Start over. + */ + } + + /* + * Either mark the extent active or deregister it to protect against + * concurrent operations. + */ + switch (extents_state_get(extents)) { + case extent_state_active: + not_reached(); + case extent_state_dirty: + case extent_state_muzzy: + extent_state_set(extent, extent_state_active); + break; + case extent_state_retained: + extent_deregister(tsdn, extent); + break; + default: + not_reached(); + } + +label_return: + malloc_mutex_unlock(tsdn, &extents->mtx); + return extent; +} + +static void +extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent, bool growing_retained) { + /* + * Leak extent after making sure its pages have already been purged, so + * that this is only a virtual memory leak. + */ + if (extents_state_get(extents) == extent_state_dirty) { + if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, + extent, 0, extent_size_get(extent), growing_retained)) { + extent_purge_forced_impl(tsdn, arena, r_extent_hooks, + extent, 0, extent_size_get(extent), + growing_retained); + } + } + extent_dalloc(tsdn, arena, extent); +} + +void +extents_prefork(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_prefork(tsdn, &extents->mtx); +} + +void +extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_postfork_parent(tsdn, &extents->mtx); +} + +void +extents_postfork_child(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_postfork_child(tsdn, &extents->mtx); +} + +static void +extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent) { + assert(extent_arena_get(extent) == arena); + assert(extent_state_get(extent) == extent_state_active); + + extent_state_set(extent, extents_state_get(extents)); + extents_insert_locked(tsdn, extents, extent); +} + +static void +extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent) { + malloc_mutex_lock(tsdn, &extents->mtx); + extent_deactivate_locked(tsdn, arena, extents, extent); + malloc_mutex_unlock(tsdn, &extents->mtx); +} + +static void +extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent) { + assert(extent_arena_get(extent) == arena); + assert(extent_state_get(extent) == extents_state_get(extents)); + + extents_remove_locked(tsdn, extents, extent); + extent_state_set(extent, extent_state_active); +} + +static bool +extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, + const extent_t *extent, bool dependent, bool init_missing, + rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { + *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent), dependent, init_missing); + if (!dependent && *r_elm_a == NULL) { + return true; + } + assert(*r_elm_a != NULL); + + *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_last_get(extent), dependent, init_missing); + if (!dependent && *r_elm_b == NULL) { + return true; + } + assert(*r_elm_b != NULL); + + return false; +} + +static void +extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a, + rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { + rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); + if (elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind, + slab); + } +} + +static void +extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent, + szind_t szind) { + assert(extent_slab_get(extent)); + + /* Register interior. */ + for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { + rtree_write(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << + LG_PAGE), extent, szind, true); + } +} + +static void +extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { + cassert(config_prof); + /* prof_gdump() requirement. */ + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (opt_prof && extent_state_get(extent) == extent_state_active) { + size_t nadd = extent_size_get(extent) >> LG_PAGE; + size_t cur = atomic_fetch_add_zu(&curpages, nadd, + ATOMIC_RELAXED) + nadd; + size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED); + while (cur > high && !atomic_compare_exchange_weak_zu( + &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) { + /* + * Don't refresh cur, because it may have decreased + * since this thread lost the highpages update race. + * Note that high is updated in case of CAS failure. + */ + } + if (cur > high && prof_gdump_get_unlocked()) { + prof_gdump(tsdn); + } + } +} + +static void +extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) { + cassert(config_prof); + + if (opt_prof && extent_state_get(extent) == extent_state_active) { + size_t nsub = extent_size_get(extent) >> LG_PAGE; + assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub); + atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED); + } +} + +static bool +extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *elm_a, *elm_b; + + /* + * We need to hold the lock to protect against a concurrent coalesce + * operation that sees us in a partial state. + */ + extent_lock(tsdn, extent); + + if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true, + &elm_a, &elm_b)) { + return true; + } + + szind_t szind = extent_szind_get_maybe_invalid(extent); + bool slab = extent_slab_get(extent); + extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab); + if (slab) { + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + + extent_unlock(tsdn, extent); + + if (config_prof && gdump_add) { + extent_gdump_add(tsdn, extent); + } + + return false; +} + +static bool +extent_register(tsdn_t *tsdn, extent_t *extent) { + return extent_register_impl(tsdn, extent, true); +} + +static bool +extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) { + return extent_register_impl(tsdn, extent, false); +} + +static void +extent_reregister(tsdn_t *tsdn, extent_t *extent) { + bool err = extent_register(tsdn, extent); + assert(!err); +} + +/* + * Removes all pointers to the given extent from the global rtree indices for + * its interior. This is relevant for slab extents, for which we need to do + * metadata lookups at places other than the head of the extent. We deregister + * on the interior, then, when an extent moves from being an active slab to an + * inactive state. + */ +static void +extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, + extent_t *extent) { + size_t i; + + assert(extent_slab_get(extent)); + + for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { + rtree_clear(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << + LG_PAGE)); + } +} + +/* + * Removes all pointers to the given extent from the global rtree. + */ +static void +extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *elm_a, *elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false, + &elm_a, &elm_b); + + extent_lock(tsdn, extent); + + extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false); + if (extent_slab_get(extent)) { + extent_interior_deregister(tsdn, rtree_ctx, extent); + extent_slab_set(extent, false); + } + + extent_unlock(tsdn, extent); + + if (config_prof && gdump) { + extent_gdump_sub(tsdn, extent); + } +} + +static void +extent_deregister(tsdn_t *tsdn, extent_t *extent) { + extent_deregister_impl(tsdn, extent, true); +} + +static void +extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) { + extent_deregister_impl(tsdn, extent, false); +} + +/* + * Tries to find and remove an extent from extents that can be used for the + * given allocation request. + */ +static extent_t * +extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + assert(alignment > 0); + if (config_debug && new_addr != NULL) { + /* + * Non-NULL new_addr has two use cases: + * + * 1) Recycle a known-extant extent, e.g. during purging. + * 2) Perform in-place expanding reallocation. + * + * Regardless of use case, new_addr must either refer to a + * non-existing extent, or to the base of an extant extent, + * since only active slabs support interior lookups (which of + * course cannot be recycled). + */ + assert(PAGE_ADDR2BASE(new_addr) == new_addr); + assert(pad == 0); + assert(alignment <= PAGE); + } + + size_t esize = size + pad; + malloc_mutex_lock(tsdn, &extents->mtx); + extent_hooks_assure_initialized(arena, r_extent_hooks); + extent_t *extent; + if (new_addr != NULL) { + extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr); + if (extent != NULL) { + /* + * We might null-out extent to report an error, but we + * still need to unlock the associated mutex after. + */ + extent_t *unlock_extent = extent; + assert(extent_base_get(extent) == new_addr); + if (extent_arena_get(extent) != arena || + extent_size_get(extent) < esize || + extent_state_get(extent) != + extents_state_get(extents)) { + extent = NULL; + } + extent_unlock(tsdn, unlock_extent); + } + } else { + extent = extents_fit_locked(tsdn, arena, extents, esize, + alignment); + } + if (extent == NULL) { + malloc_mutex_unlock(tsdn, &extents->mtx); + return NULL; + } + + extent_activate_locked(tsdn, arena, extents, extent); + malloc_mutex_unlock(tsdn, &extents->mtx); + + return extent; +} + +/* + * Given an allocation request and an extent guaranteed to be able to satisfy + * it, this splits off lead and trail extents, leaving extent pointing to an + * extent satisfying the allocation. + * This function doesn't put lead or trail into any extents_t; it's the caller's + * job to ensure that they can be reused. + */ +typedef enum { + /* + * Split successfully. lead, extent, and trail, are modified to extents + * describing the ranges before, in, and after the given allocation. + */ + extent_split_interior_ok, + /* + * The extent can't satisfy the given allocation request. None of the + * input extent_t *s are touched. + */ + extent_split_interior_cant_alloc, + /* + * In a potentially invalid state. Must leak (if *to_leak is non-NULL), + * and salvage what's still salvageable (if *to_salvage is non-NULL). + * None of lead, extent, or trail are valid. + */ + extent_split_interior_error +} extent_split_interior_result_t; + +static extent_split_interior_result_t +extent_split_interior(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, + /* The result of splitting, in case of success. */ + extent_t **extent, extent_t **lead, extent_t **trail, + /* The mess to clean up, in case of error. */ + extent_t **to_leak, extent_t **to_salvage, + void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, + szind_t szind, bool growing_retained) { + size_t esize = size + pad; + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent), + PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent); + assert(new_addr == NULL || leadsize == 0); + if (extent_size_get(*extent) < leadsize + esize) { + return extent_split_interior_cant_alloc; + } + size_t trailsize = extent_size_get(*extent) - leadsize - esize; + + *lead = NULL; + *trail = NULL; + *to_leak = NULL; + *to_salvage = NULL; + + /* Split the lead. */ + if (leadsize != 0) { + *lead = *extent; + *extent = extent_split_impl(tsdn, arena, r_extent_hooks, + *lead, leadsize, NSIZES, false, esize + trailsize, szind, + slab, growing_retained); + if (*extent == NULL) { + *to_leak = *lead; + *lead = NULL; + return extent_split_interior_error; + } + } + + /* Split the trail. */ + if (trailsize != 0) { + *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent, + esize, szind, slab, trailsize, NSIZES, false, + growing_retained); + if (*trail == NULL) { + *to_leak = *extent; + *to_salvage = *lead; + *lead = NULL; + *extent = NULL; + return extent_split_interior_error; + } + } + + if (leadsize == 0 && trailsize == 0) { + /* + * Splitting causes szind to be set as a side effect, but no + * splitting occurred. + */ + extent_szind_set(*extent, szind); + if (szind != NSIZES) { + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_addr_get(*extent), szind, slab); + if (slab && extent_size_get(*extent) > PAGE) { + rtree_szind_slab_update(tsdn, &extents_rtree, + rtree_ctx, + (uintptr_t)extent_past_get(*extent) - + (uintptr_t)PAGE, szind, slab); + } + } + } + + return extent_split_interior_ok; +} + +/* + * This fulfills the indicated allocation request out of the given extent (which + * the caller should have ensured was big enough). If there's any unused space + * before or after the resulting allocation, that space is given its own extent + * and put back into extents. + */ +static extent_t * +extent_recycle_split(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, + szind_t szind, extent_t *extent, bool growing_retained) { + extent_t *lead; + extent_t *trail; + extent_t *to_leak; + extent_t *to_salvage; + + extent_split_interior_result_t result = extent_split_interior( + tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, + &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind, + growing_retained); + + if (result == extent_split_interior_ok) { + if (lead != NULL) { + extent_deactivate(tsdn, arena, extents, lead); + } + if (trail != NULL) { + extent_deactivate(tsdn, arena, extents, trail); + } + return extent; + } else { + /* + * We should have picked an extent that was large enough to + * fulfill our allocation request. + */ + assert(result == extent_split_interior_error); + if (to_salvage != NULL) { + extent_deregister(tsdn, to_salvage); + } + if (to_leak != NULL) { + void *leak = extent_base_get(to_leak); + extent_deregister_no_gdump_sub(tsdn, to_leak); + extents_leak(tsdn, arena, r_extent_hooks, extents, + to_leak, growing_retained); + assert(extent_lock_from_addr(tsdn, rtree_ctx, leak) + == NULL); + } + return NULL; + } + unreachable(); +} + +/* + * Tries to satisfy the given allocation request by reusing one of the extents + * in the given extents_t. + */ +static extent_t * +extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + assert(new_addr == NULL || !slab); + assert(pad == 0 || !slab); + assert(!*zero || !slab); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, new_addr, size, pad, alignment, slab, + growing_retained); + if (extent == NULL) { + return NULL; + } + + extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, new_addr, size, pad, alignment, slab, szind, extent, + growing_retained); + if (extent == NULL) { + return NULL; + } + + if (*commit && !extent_committed_get(extent)) { + if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, + 0, extent_size_get(extent), growing_retained)) { + extent_record(tsdn, arena, r_extent_hooks, extents, + extent, growing_retained); + return NULL; + } + extent_zeroed_set(extent, true); + } + + if (extent_committed_get(extent)) { + *commit = true; + } + if (extent_zeroed_get(extent)) { + *zero = true; + } + + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + assert(extent_state_get(extent) == extent_state_active); + if (slab) { + extent_slab_set(extent, slab); + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + + if (*zero) { + void *addr = extent_base_get(extent); + size_t size = extent_size_get(extent); + if (!extent_zeroed_get(extent)) { + if (pages_purge_forced(addr, size)) { + memset(addr, 0, size); + } + } else if (config_debug) { + size_t *p = (size_t *)(uintptr_t)addr; + for (size_t i = 0; i < size / sizeof(size_t); i++) { + assert(p[i] == 0); + } + } + } + return extent; +} + +/* + * If the caller specifies (!*zero), it is still possible to receive zeroed + * memory, in which case *zero is toggled to true. arena_extent_alloc() takes + * advantage of this to avoid demanding zeroed extents, but taking advantage of + * them if they are returned. + */ +static void * +extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { + void *ret; + + assert(size != 0); + assert(alignment != 0); + + /* "primary" dss. */ + if (have_dss && dss_prec == dss_prec_primary && (ret = + extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) { + return ret; + } + /* mmap. */ + if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) + != NULL) { + return ret; + } + /* "secondary" dss. */ + if (have_dss && dss_prec == dss_prec_secondary && (ret = + extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) { + return ret; + } + + /* All strategies for allocation failed. */ + return NULL; +} + +static void * +extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit) { + void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, + commit, (dss_prec_t)atomic_load_u(&arena->dss_prec, + ATOMIC_RELAXED)); + if (have_madvise_huge && ret) { + pages_set_thp_state(ret, size); + } + return ret; +} + +static void * +extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { + tsdn_t *tsdn; + arena_t *arena; + + tsdn = tsdn_fetch(); + arena = arena_get(tsdn, arena_ind, false); + /* + * The arena we're allocating on behalf of must have been initialized + * already. + */ + assert(arena != NULL); + + return extent_alloc_default_impl(tsdn, arena, new_addr, size, + alignment, zero, commit); +} + +static void +extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + if (arena == arena_get(tsd_tsdn(tsd), 0, false)) { + /* + * The only legitimate case of customized extent hooks for a0 is + * hooks with no allocation activities. One such example is to + * place metadata on pre-allocated resources such as huge pages. + * In that case, rely on reentrancy_level checks to catch + * infinite recursions. + */ + pre_reentrancy(tsd, NULL); + } else { + pre_reentrancy(tsd, arena); + } +} + +static void +extent_hook_post_reentrancy(tsdn_t *tsdn) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + post_reentrancy(tsd); +} + +/* + * If virtual memory is retained, create increasingly larger extents from which + * to split requested extents in order to limit the total number of disjoint + * virtual memory ranges retained by each arena. + */ +static extent_t * +extent_grow_retained(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment, + bool slab, szind_t szind, bool *zero, bool *commit) { + malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx); + assert(pad == 0 || !slab); + assert(!*zero || !slab); + + size_t esize = size + pad; + size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE; + /* Beware size_t wrap-around. */ + if (alloc_size_min < esize) { + goto label_err; + } + /* + * Find the next extent size in the series that would be large enough to + * satisfy this request. + */ + pszind_t egn_skip = 0; + size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); + while (alloc_size < alloc_size_min) { + egn_skip++; + if (arena->extent_grow_next + egn_skip == NPSIZES) { + /* Outside legal range. */ + goto label_err; + } + assert(arena->extent_grow_next + egn_skip < NPSIZES); + alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); + } + + extent_t *extent = extent_alloc(tsdn, arena); + if (extent == NULL) { + goto label_err; + } + bool zeroed = false; + bool committed = false; + + void *ptr; + if (*r_extent_hooks == &extent_hooks_default) { + ptr = extent_alloc_default_impl(tsdn, arena, NULL, + alloc_size, PAGE, &zeroed, &committed); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, + alloc_size, PAGE, &zeroed, &committed, + arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + extent_init(extent, arena, ptr, alloc_size, false, NSIZES, + arena_extent_sn_next(arena), extent_state_active, zeroed, + committed, true); + if (ptr == NULL) { + extent_dalloc(tsdn, arena, extent); + goto label_err; + } + + if (extent_register_no_gdump_add(tsdn, extent)) { + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, true); + goto label_err; + } + + if (extent_zeroed_get(extent) && extent_committed_get(extent)) { + *zero = true; + } + if (extent_committed_get(extent)) { + *commit = true; + } + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *lead; + extent_t *trail; + extent_t *to_leak; + extent_t *to_salvage; + extent_split_interior_result_t result = extent_split_interior( + tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, + &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind, + true); + + if (result == extent_split_interior_ok) { + if (lead != NULL) { + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, lead, true); + } + if (trail != NULL) { + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, trail, true); + } + } else { + /* + * We should have allocated a sufficiently large extent; the + * cant_alloc case should not occur. + */ + assert(result == extent_split_interior_error); + if (to_salvage != NULL) { + if (config_prof) { + extent_gdump_add(tsdn, to_salvage); + } + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, to_salvage, true); + } + if (to_leak != NULL) { + extent_deregister_no_gdump_sub(tsdn, to_leak); + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, to_leak, true); + } + goto label_err; + } + + if (*commit && !extent_committed_get(extent)) { + if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, + extent_size_get(extent), true)) { + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, true); + goto label_err; + } + extent_zeroed_set(extent, true); + } + + /* + * Increment extent_grow_next if doing so wouldn't exceed the allowed + * range. + */ + if (arena->extent_grow_next + egn_skip + 1 <= + arena->retain_grow_limit) { + arena->extent_grow_next += egn_skip + 1; + } else { + arena->extent_grow_next = arena->retain_grow_limit; + } + /* All opportunities for failure are past. */ + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + + if (config_prof) { + /* Adjust gdump stats now that extent is final size. */ + extent_gdump_add(tsdn, extent); + } + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + if (slab) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, + &rtree_ctx_fallback); + + extent_slab_set(extent, true); + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + if (*zero && !extent_zeroed_get(extent)) { + void *addr = extent_base_get(extent); + size_t size = extent_size_get(extent); + if (pages_purge_forced(addr, size)) { + memset(addr, 0, size); + } + } + + return extent; +label_err: + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + return NULL; +} + +static extent_t * +extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + assert(size != 0); + assert(alignment != 0); + + malloc_mutex_lock(tsdn, &arena->extent_grow_mtx); + + extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, + &arena->extents_retained, new_addr, size, pad, alignment, slab, + szind, zero, commit, true); + if (extent != NULL) { + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + if (config_prof) { + extent_gdump_add(tsdn, extent); + } + } else if (opt_retain && new_addr == NULL) { + extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size, + pad, alignment, slab, szind, zero, commit); + /* extent_grow_retained() always releases extent_grow_mtx. */ + } else { + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + } + malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx); + + return extent; +} + +static extent_t * +extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + size_t esize = size + pad; + extent_t *extent = extent_alloc(tsdn, arena); + if (extent == NULL) { + return NULL; + } + void *addr; + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, + alignment, zero, commit); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, + esize, alignment, zero, commit, arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + if (addr == NULL) { + extent_dalloc(tsdn, arena, extent); + return NULL; + } + extent_init(extent, arena, addr, esize, slab, szind, + arena_extent_sn_next(arena), extent_state_active, *zero, *commit, + true); + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + if (extent_register(tsdn, extent)) { + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, false); + return NULL; + } + + return extent; +} + +extent_t * +extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks, + new_addr, size, pad, alignment, slab, szind, zero, commit); + if (extent == NULL) { + if (opt_retain && new_addr != NULL) { + /* + * When retain is enabled and new_addr is set, we do not + * attempt extent_alloc_wrapper_hard which does mmap + * that is very unlikely to succeed (unless it happens + * to be at the end). + */ + return NULL; + } + extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks, + new_addr, size, pad, alignment, slab, szind, zero, commit); + } + + assert(extent == NULL || extent_dumpable_get(extent)); + return extent; +} + +static bool +extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner, + const extent_t *outer) { + assert(extent_arena_get(inner) == arena); + if (extent_arena_get(outer) != arena) { + return false; + } + + assert(extent_state_get(inner) == extent_state_active); + if (extent_state_get(outer) != extents->state) { + return false; + } + + if (extent_committed_get(inner) != extent_committed_get(outer)) { + return false; + } + + return true; +} + +static bool +extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *inner, extent_t *outer, bool forward, + bool growing_retained) { + assert(extent_can_coalesce(arena, extents, inner, outer)); + + extent_activate_locked(tsdn, arena, extents, outer); + + malloc_mutex_unlock(tsdn, &extents->mtx); + bool err = extent_merge_impl(tsdn, arena, r_extent_hooks, + forward ? inner : outer, forward ? outer : inner, growing_retained); + malloc_mutex_lock(tsdn, &extents->mtx); + + if (err) { + extent_deactivate_locked(tsdn, arena, extents, outer); + } + + return err; +} + +static extent_t * +extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained) { + /* + * Continue attempting to coalesce until failure, to protect against + * races with other threads that are thwarted by this one. + */ + bool again; + do { + again = false; + + /* Try to coalesce forward. */ + extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, + extent_past_get(extent)); + if (next != NULL) { + /* + * extents->mtx only protects against races for + * like-state extents, so call extent_can_coalesce() + * before releasing next's pool lock. + */ + bool can_coalesce = extent_can_coalesce(arena, extents, + extent, next); + + extent_unlock(tsdn, next); + + if (can_coalesce && !extent_coalesce(tsdn, arena, + r_extent_hooks, extents, extent, next, true, + growing_retained)) { + if (extents->delay_coalesce) { + /* Do minimal coalescing. */ + *coalesced = true; + return extent; + } + again = true; + } + } + + /* Try to coalesce backward. */ + extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx, + extent_before_get(extent)); + if (prev != NULL) { + bool can_coalesce = extent_can_coalesce(arena, extents, + extent, prev); + extent_unlock(tsdn, prev); + + if (can_coalesce && !extent_coalesce(tsdn, arena, + r_extent_hooks, extents, extent, prev, false, + growing_retained)) { + extent = prev; + if (extents->delay_coalesce) { + /* Do minimal coalescing. */ + *coalesced = true; + return extent; + } + again = true; + } + } + } while (again); + + if (extents->delay_coalesce) { + *coalesced = false; + } + return extent; +} + +/* + * Does the metadata management portions of putting an unused extent into the + * given extents_t (coalesces, deregisters slab interiors, the heap operations). + */ +static void +extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent, bool growing_retained) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + assert((extents_state_get(extents) != extent_state_dirty && + extents_state_get(extents) != extent_state_muzzy) || + !extent_zeroed_get(extent)); + + malloc_mutex_lock(tsdn, &extents->mtx); + extent_hooks_assure_initialized(arena, r_extent_hooks); + + extent_szind_set(extent, NSIZES); + if (extent_slab_get(extent)) { + extent_interior_deregister(tsdn, rtree_ctx, extent); + extent_slab_set(extent, false); + } + + assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent), true) == extent); + + if (!extents->delay_coalesce) { + extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, extent, NULL, growing_retained); + } else if (extent_size_get(extent) >= LARGE_MINCLASS) { + /* Always coalesce large extents eagerly. */ + bool coalesced; + size_t prev_size; + do { + prev_size = extent_size_get(extent); + assert(extent_state_get(extent) == extent_state_active); + extent = extent_try_coalesce(tsdn, arena, + r_extent_hooks, rtree_ctx, extents, extent, + &coalesced, growing_retained); + } while (coalesced && + extent_size_get(extent) >= prev_size + LARGE_MINCLASS); + } + extent_deactivate_locked(tsdn, arena, extents, extent); + + malloc_mutex_unlock(tsdn, &extents->mtx); +} + +void +extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (extent_register(tsdn, extent)) { + extents_leak(tsdn, arena, &extent_hooks, + &arena->extents_retained, extent, false); + return; + } + extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); +} + +static bool +extent_dalloc_default_impl(void *addr, size_t size) { + if (!have_dss || !extent_in_dss(addr)) { + return extent_dalloc_mmap(addr, size); + } + return true; +} + +static bool +extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + return extent_dalloc_default_impl(addr, size); +} + +static bool +extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + bool err; + + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_addr_set(extent, extent_base_get(extent)); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + /* Try to deallocate. */ + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + err = extent_dalloc_default_impl(extent_base_get(extent), + extent_size_get(extent)); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + err = ((*r_extent_hooks)->dalloc == NULL || + (*r_extent_hooks)->dalloc(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), + extent_committed_get(extent), arena_ind_get(arena))); + extent_hook_post_reentrancy(tsdn); + } + + if (!err) { + extent_dalloc(tsdn, arena, extent); + } + + return err; +} + +void +extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + assert(extent_dumpable_get(extent)); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + /* + * Deregister first to avoid a race with other allocating threads, and + * reregister if deallocation fails. + */ + extent_deregister(tsdn, extent); + if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) { + return; + } + + extent_reregister(tsdn, extent); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + /* Try to decommit; purge if that fails. */ + bool zeroed; + if (!extent_committed_get(extent)) { + zeroed = true; + } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, + 0, extent_size_get(extent))) { + zeroed = true; + } else if ((*r_extent_hooks)->purge_forced != NULL && + !(*r_extent_hooks)->purge_forced(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), 0, + extent_size_get(extent), arena_ind_get(arena))) { + zeroed = true; + } else if (extent_state_get(extent) == extent_state_muzzy || + ((*r_extent_hooks)->purge_lazy != NULL && + !(*r_extent_hooks)->purge_lazy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), 0, + extent_size_get(extent), arena_ind_get(arena)))) { + zeroed = false; + } else { + zeroed = false; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_zeroed_set(extent, zeroed); + + if (config_prof) { + extent_gdump_sub(tsdn, extent); + } + + extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, + extent, false); +} + +static void +extent_destroy_default_impl(void *addr, size_t size) { + if (!have_dss || !extent_in_dss(addr)) { + pages_unmap(addr, size); + } +} + +static void +extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + extent_destroy_default_impl(addr, size); +} + +void +extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + /* Deregister first to avoid a race with other allocating threads. */ + extent_deregister(tsdn, extent); + + extent_addr_set(extent, extent_base_get(extent)); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + /* Try to destroy; silently fail otherwise. */ + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + extent_destroy_default_impl(extent_base_get(extent), + extent_size_get(extent)); + } else if ((*r_extent_hooks)->destroy != NULL) { + extent_hook_pre_reentrancy(tsdn, arena); + (*r_extent_hooks)->destroy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), + extent_committed_get(extent), arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + extent_dalloc(tsdn, arena, extent); +} + +static bool +extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} + +static bool +extent_commit_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = ((*r_extent_hooks)->commit == NULL || + (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), + extent_size_get(extent), offset, length, arena_ind_get(arena))); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_committed_set(extent, extent_committed_get(extent) || !err); + return err; +} + +bool +extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset, + length, false); +} + +static bool +extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} + +bool +extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = ((*r_extent_hooks)->decommit == NULL || + (*r_extent_hooks)->decommit(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena))); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_committed_set(extent, extent_committed_get(extent) && err); + return err; +} + +#ifdef PAGES_CAN_PURGE_LAZY +static bool +extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + assert(addr != NULL); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + + return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} +#endif + +static bool +extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->purge_lazy == NULL) { + return true; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + + return err; +} + +bool +extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, + offset, length, false); +} + +#ifdef PAGES_CAN_PURGE_FORCED +static bool +extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind) { + assert(addr != NULL); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + + return pages_purge_forced((void *)((uintptr_t)addr + + (uintptr_t)offset), length); +} +#endif + +static bool +extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->purge_forced == NULL) { + return true; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + return err; +} + +bool +extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, + offset, length, false); +} + +#ifdef JEMALLOC_MAPS_COALESCE +static bool +extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { + return !maps_coalesce; +} +#endif + +/* + * Accepts the extent to split, and the characteristics of each side of the + * split. The 'a' parameters go with the 'lead' of the resulting pair of + * extents (the lower addressed portion of the split), and the 'b' parameters go + * with the trail (the higher addressed portion). This makes 'extent' the lead, + * and returns the trail (except in case of error). + */ +static extent_t * +extent_split_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, + bool growing_retained) { + assert(extent_size_get(extent) == size_a + size_b); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->split == NULL) { + return NULL; + } + + extent_t *trail = extent_alloc(tsdn, arena); + if (trail == NULL) { + goto label_error_a; + } + + extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + + size_a), size_b, slab_b, szind_b, extent_sn_get(extent), + extent_state_get(extent), extent_zeroed_get(extent), + extent_committed_get(extent), extent_dumpable_get(extent)); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *lead_elm_a, *lead_elm_b; + { + extent_t lead; + + extent_init(&lead, arena, extent_addr_get(extent), size_a, + slab_a, szind_a, extent_sn_get(extent), + extent_state_get(extent), extent_zeroed_get(extent), + extent_committed_get(extent), extent_dumpable_get(extent)); + + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, + true, &lead_elm_a, &lead_elm_b); + } + rtree_leaf_elm_t *trail_elm_a, *trail_elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true, + &trail_elm_a, &trail_elm_b); + + if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL + || trail_elm_b == NULL) { + goto label_error_b; + } + + extent_lock2(tsdn, extent, trail); + + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), + size_a + size_b, size_a, size_b, extent_committed_get(extent), + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + if (err) { + goto label_error_c; + } + + extent_size_set(extent, size_a); + extent_szind_set(extent, szind_a); + + extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent, + szind_a, slab_a); + extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail, + szind_b, slab_b); + + extent_unlock2(tsdn, extent, trail); + + return trail; +label_error_c: + extent_unlock2(tsdn, extent, trail); +label_error_b: + extent_dalloc(tsdn, arena, trail); +label_error_a: + return NULL; +} + +extent_t * +extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) { + return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a, + szind_a, slab_a, size_b, szind_b, slab_b, false); +} + +static bool +extent_merge_default_impl(void *addr_a, void *addr_b) { + if (!maps_coalesce) { + return true; + } + if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { + return true; + } + + return false; +} + +#ifdef JEMALLOC_MAPS_COALESCE +static bool +extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { + return extent_merge_default_impl(addr_a, addr_b); +} +#endif + +static bool +extent_merge_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->merge == NULL) { + return true; + } + + bool err; + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + err = extent_merge_default_impl(extent_base_get(a), + extent_base_get(b)); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + err = (*r_extent_hooks)->merge(*r_extent_hooks, + extent_base_get(a), extent_size_get(a), extent_base_get(b), + extent_size_get(b), extent_committed_get(a), + arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + if (err) { + return true; + } + + /* + * The rtree writes must happen while all the relevant elements are + * owned, so the following code uses decomposed helper functions rather + * than extent_{,de}register() to do things in the right order. + */ + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a, + &a_elm_b); + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a, + &b_elm_b); + + extent_lock2(tsdn, a, b); + + if (a_elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, + NSIZES, false); + } + if (b_elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, + NSIZES, false); + } else { + b_elm_b = b_elm_a; + } + + extent_size_set(a, extent_size_get(a) + extent_size_get(b)); + extent_szind_set(a, NSIZES); + extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? + extent_sn_get(a) : extent_sn_get(b)); + extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); + + extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false); + + extent_unlock2(tsdn, a, b); + + extent_dalloc(tsdn, extent_arena_get(b), b); + + return false; +} + +bool +extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) { + return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false); +} + +bool +extent_boot(void) { + if (rtree_new(&extents_rtree, true)) { + return true; + } + + if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool", + WITNESS_RANK_EXTENT_POOL)) { + return true; + } + + if (have_dss) { + extent_dss_boot(); + } + + return false; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/extent_dss.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/extent_dss.c new file mode 100644 index 0000000..2b1ea9c --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/extent_dss.c @@ -0,0 +1,270 @@ +#define JEMALLOC_EXTENT_DSS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/spin.h" + +/******************************************************************************/ +/* Data. */ + +const char *opt_dss = DSS_DEFAULT; + +const char *dss_prec_names[] = { + "disabled", + "primary", + "secondary", + "N/A" +}; + +/* + * Current dss precedence default, used when creating new arenas. NB: This is + * stored as unsigned rather than dss_prec_t because in principle there's no + * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use + * atomic operations to synchronize the setting. + */ +static atomic_u_t dss_prec_default = ATOMIC_INIT( + (unsigned)DSS_PREC_DEFAULT); + +/* Base address of the DSS. */ +static void *dss_base; +/* Atomic boolean indicating whether a thread is currently extending DSS. */ +static atomic_b_t dss_extending; +/* Atomic boolean indicating whether the DSS is exhausted. */ +static atomic_b_t dss_exhausted; +/* Atomic current upper limit on DSS addresses. */ +static atomic_p_t dss_max; + +/******************************************************************************/ + +static void * +extent_dss_sbrk(intptr_t increment) { +#ifdef JEMALLOC_DSS + return sbrk(increment); +#else + not_implemented(); + return NULL; +#endif +} + +dss_prec_t +extent_dss_prec_get(void) { + dss_prec_t ret; + + if (!have_dss) { + return dss_prec_disabled; + } + ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE); + return ret; +} + +bool +extent_dss_prec_set(dss_prec_t dss_prec) { + if (!have_dss) { + return (dss_prec != dss_prec_disabled); + } + atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE); + return false; +} + +static void +extent_dss_extending_start(void) { + spin_t spinner = SPIN_INITIALIZER; + while (true) { + bool expected = false; + if (atomic_compare_exchange_weak_b(&dss_extending, &expected, + true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) { + break; + } + spin_adaptive(&spinner); + } +} + +static void +extent_dss_extending_finish(void) { + assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED)); + + atomic_store_b(&dss_extending, false, ATOMIC_RELEASE); +} + +static void * +extent_dss_max_update(void *new_addr) { + /* + * Get the current end of the DSS as max_cur and assure that dss_max is + * up to date. + */ + void *max_cur = extent_dss_sbrk(0); + if (max_cur == (void *)-1) { + return NULL; + } + atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE); + /* Fixed new_addr can only be supported if it is at the edge of DSS. */ + if (new_addr != NULL && max_cur != new_addr) { + return NULL; + } + return max_cur; +} + +void * +extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit) { + extent_t *gap; + + cassert(have_dss); + assert(size > 0); + assert(alignment > 0); + + /* + * sbrk() uses a signed increment argument, so take care not to + * interpret a large allocation request as a negative increment. + */ + if ((intptr_t)size < 0) { + return NULL; + } + + gap = extent_alloc(tsdn, arena); + if (gap == NULL) { + return NULL; + } + + extent_dss_extending_start(); + if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) { + /* + * The loop is necessary to recover from races with other + * threads that are using the DSS for something other than + * malloc. + */ + while (true) { + void *max_cur = extent_dss_max_update(new_addr); + if (max_cur == NULL) { + goto label_oom; + } + + /* + * Compute how much page-aligned gap space (if any) is + * necessary to satisfy alignment. This space can be + * recycled for later use. + */ + void *gap_addr_page = (void *)(PAGE_CEILING( + (uintptr_t)max_cur)); + void *ret = (void *)ALIGNMENT_CEILING( + (uintptr_t)gap_addr_page, alignment); + size_t gap_size_page = (uintptr_t)ret - + (uintptr_t)gap_addr_page; + if (gap_size_page != 0) { + extent_init(gap, arena, gap_addr_page, + gap_size_page, false, NSIZES, + arena_extent_sn_next(arena), + extent_state_active, false, true, true); + } + /* + * Compute the address just past the end of the desired + * allocation space. + */ + void *dss_next = (void *)((uintptr_t)ret + size); + if ((uintptr_t)ret < (uintptr_t)max_cur || + (uintptr_t)dss_next < (uintptr_t)max_cur) { + goto label_oom; /* Wrap-around. */ + } + /* Compute the increment, including subpage bytes. */ + void *gap_addr_subpage = max_cur; + size_t gap_size_subpage = (uintptr_t)ret - + (uintptr_t)gap_addr_subpage; + intptr_t incr = gap_size_subpage + size; + + assert((uintptr_t)max_cur + incr == (uintptr_t)ret + + size); + + /* Try to allocate. */ + void *dss_prev = extent_dss_sbrk(incr); + if (dss_prev == max_cur) { + /* Success. */ + atomic_store_p(&dss_max, dss_next, + ATOMIC_RELEASE); + extent_dss_extending_finish(); + + if (gap_size_page != 0) { + extent_dalloc_gap(tsdn, arena, gap); + } else { + extent_dalloc(tsdn, arena, gap); + } + if (!*commit) { + *commit = pages_decommit(ret, size); + } + if (*zero && *commit) { + extent_hooks_t *extent_hooks = + EXTENT_HOOKS_INITIALIZER; + extent_t extent; + + extent_init(&extent, arena, ret, size, + size, false, NSIZES, + extent_state_active, false, true, + true); + if (extent_purge_forced_wrapper(tsdn, + arena, &extent_hooks, &extent, 0, + size)) { + memset(ret, 0, size); + } + } + return ret; + } + /* + * Failure, whether due to OOM or a race with a raw + * sbrk() call from outside the allocator. + */ + if (dss_prev == (void *)-1) { + /* OOM. */ + atomic_store_b(&dss_exhausted, true, + ATOMIC_RELEASE); + goto label_oom; + } + } + } +label_oom: + extent_dss_extending_finish(); + extent_dalloc(tsdn, arena, gap); + return NULL; +} + +static bool +extent_in_dss_helper(void *addr, void *max) { + return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr < + (uintptr_t)max); +} + +bool +extent_in_dss(void *addr) { + cassert(have_dss); + + return extent_in_dss_helper(addr, atomic_load_p(&dss_max, + ATOMIC_ACQUIRE)); +} + +bool +extent_dss_mergeable(void *addr_a, void *addr_b) { + void *max; + + cassert(have_dss); + + if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b < + (uintptr_t)dss_base) { + return true; + } + + max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE); + return (extent_in_dss_helper(addr_a, max) == + extent_in_dss_helper(addr_b, max)); +} + +void +extent_dss_boot(void) { + cassert(have_dss); + + dss_base = extent_dss_sbrk(0); + atomic_store_b(&dss_extending, false, ATOMIC_RELAXED); + atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED); + atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED); +} + +/******************************************************************************/ diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/extent_mmap.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/extent_mmap.c new file mode 100644 index 0000000..8d607dc --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/extent_mmap.c @@ -0,0 +1,42 @@ +#define JEMALLOC_EXTENT_MMAP_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_mmap.h" + +/******************************************************************************/ +/* Data. */ + +bool opt_retain = +#ifdef JEMALLOC_RETAIN + true +#else + false +#endif + ; + +/******************************************************************************/ + +void * +extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, + bool *commit) { + void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment, + PAGE), commit); + if (ret == NULL) { + return NULL; + } + assert(ret != NULL); + if (*commit) { + *zero = true; + } + return ret; +} + +bool +extent_dalloc_mmap(void *addr, size_t size) { + if (!opt_retain) { + pages_unmap(addr, size); + } + return opt_retain; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/hash.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/hash.c new file mode 100644 index 0000000..7b2bdc2 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/hash.c @@ -0,0 +1,3 @@ +#define JEMALLOC_HASH_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/hooks.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/hooks.c new file mode 100644 index 0000000..6266ecd --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/hooks.c @@ -0,0 +1,12 @@ +#include "jemalloc/internal/jemalloc_preamble.h" + +/* + * The hooks are a little bit screwy -- they're not genuinely exported in the + * sense that we want them available to end-users, but we do want them visible + * from outside the generated library, so that we can use them in test code. + */ +JEMALLOC_EXPORT +void (*hooks_arena_new_hook)() = NULL; + +JEMALLOC_EXPORT +void (*hooks_libc_hook)() = NULL; diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/huge.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/huge.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/huge.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/huge.c diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/jemalloc.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/jemalloc.c new file mode 100644 index 0000000..5b936cb --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/jemalloc.c @@ -0,0 +1,3337 @@ +#define JEMALLOC_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/log.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/util.h" + +/******************************************************************************/ +/* Data. */ + +/* Runtime configuration options. */ +const char *je_malloc_conf +#ifndef _WIN32 + JEMALLOC_ATTR(weak) +#endif + ; +bool opt_abort = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +bool opt_abort_conf = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +const char *opt_junk = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + "true" +#else + "false" +#endif + ; +bool opt_junk_alloc = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + true +#else + false +#endif + ; +bool opt_junk_free = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + true +#else + false +#endif + ; + +bool opt_utrace = false; +bool opt_xmalloc = false; +bool opt_zero = false; +unsigned opt_narenas = 0; + +unsigned ncpus; + +/* Protects arenas initialization. */ +malloc_mutex_t arenas_lock; +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + * + * arenas[0..narenas_auto) are used for automatic multiplexing of threads and + * arenas. arenas[narenas_auto..narenas_total) are only used if the application + * takes some action to create them and allocate from them. + * + * Points to an arena_t. + */ +JEMALLOC_ALIGNED(CACHELINE) +atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; +static atomic_u_t narenas_total; /* Use narenas_total_*(). */ +static arena_t *a0; /* arenas[0]; read-only after initialization. */ +unsigned narenas_auto; /* Read-only after initialization. */ + +typedef enum { + malloc_init_uninitialized = 3, + malloc_init_a0_initialized = 2, + malloc_init_recursible = 1, + malloc_init_initialized = 0 /* Common case --> jnz. */ +} malloc_init_t; +static malloc_init_t malloc_init_state = malloc_init_uninitialized; + +/* False should be the common case. Set to true to trigger initialization. */ +bool malloc_slow = true; + +/* When malloc_slow is true, set the corresponding bits for sanity check. */ +enum { + flag_opt_junk_alloc = (1U), + flag_opt_junk_free = (1U << 1), + flag_opt_zero = (1U << 2), + flag_opt_utrace = (1U << 3), + flag_opt_xmalloc = (1U << 4) +}; +static uint8_t malloc_slow_flags; + +#ifdef JEMALLOC_THREADED_INIT +/* Used to let the initializing thread recursively allocate. */ +# define NO_INITIALIZER ((unsigned long)0) +# define INITIALIZER pthread_self() +# define IS_INITIALIZER (malloc_initializer == pthread_self()) +static pthread_t malloc_initializer = NO_INITIALIZER; +#else +# define NO_INITIALIZER false +# define INITIALIZER true +# define IS_INITIALIZER malloc_initializer +static bool malloc_initializer = NO_INITIALIZER; +#endif + +/* Used to avoid initialization races. */ +#ifdef _WIN32 +#if _WIN32_WINNT >= 0x0600 +static malloc_mutex_t init_lock = SRWLOCK_INIT; +#else +static malloc_mutex_t init_lock; +static bool init_lock_initialized = false; + +JEMALLOC_ATTR(constructor) +static void WINAPI +_init_init_lock(void) { + /* + * If another constructor in the same binary is using mallctl to e.g. + * set up extent hooks, it may end up running before this one, and + * malloc_init_hard will crash trying to lock the uninitialized lock. So + * we force an initialization of the lock in malloc_init_hard as well. + * We don't try to care about atomicity of the accessed to the + * init_lock_initialized boolean, since it really only matters early in + * the process creation, before any separate thread normally starts + * doing anything. + */ + if (!init_lock_initialized) { + malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT, + malloc_mutex_rank_exclusive); + } + init_lock_initialized = true; +} + +#ifdef _MSC_VER +# pragma section(".CRT$XCU", read) +JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) +static const void (WINAPI *init_init_lock)(void) = _init_init_lock; +#endif +#endif +#else +static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; +#endif + +typedef struct { + void *p; /* Input pointer (as in realloc(p, s)). */ + size_t s; /* Request size. */ + void *r; /* Result pointer. */ +} malloc_utrace_t; + +#ifdef JEMALLOC_UTRACE +# define UTRACE(a, b, c) do { \ + if (unlikely(opt_utrace)) { \ + int utrace_serrno = errno; \ + malloc_utrace_t ut; \ + ut.p = (a); \ + ut.s = (b); \ + ut.r = (c); \ + utrace(&ut, sizeof(ut)); \ + errno = utrace_serrno; \ + } \ +} while (0) +#else +# define UTRACE(a, b, c) +#endif + +/* Whether encountered any invalid config options. */ +static bool had_conf_error = false; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static bool malloc_init_hard_a0(void); +static bool malloc_init_hard(void); + +/******************************************************************************/ +/* + * Begin miscellaneous support functions. + */ + +bool +malloc_initialized(void) { + return (malloc_init_state == malloc_init_initialized); +} + +JEMALLOC_ALWAYS_INLINE bool +malloc_init_a0(void) { + if (unlikely(malloc_init_state == malloc_init_uninitialized)) { + return malloc_init_hard_a0(); + } + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +malloc_init(void) { + if (unlikely(!malloc_initialized()) && malloc_init_hard()) { + return true; + } + return false; +} + +/* + * The a0*() functions are used instead of i{d,}alloc() in situations that + * cannot tolerate TLS variable access. + */ + +static void * +a0ialloc(size_t size, bool zero, bool is_internal) { + if (unlikely(malloc_init_a0())) { + return NULL; + } + + return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, + is_internal, arena_get(TSDN_NULL, 0, true), true); +} + +static void +a0idalloc(void *ptr, bool is_internal) { + idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); +} + +void * +a0malloc(size_t size) { + return a0ialloc(size, false, true); +} + +void +a0dalloc(void *ptr) { + a0idalloc(ptr, true); +} + +/* + * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive + * situations that cannot tolerate TLS variable access (TLS allocation and very + * early internal data structure initialization). + */ + +void * +bootstrap_malloc(size_t size) { + if (unlikely(size == 0)) { + size = 1; + } + + return a0ialloc(size, false, false); +} + +void * +bootstrap_calloc(size_t num, size_t size) { + size_t num_size; + + num_size = num * size; + if (unlikely(num_size == 0)) { + assert(num == 0 || size == 0); + num_size = 1; + } + + return a0ialloc(num_size, true, false); +} + +void +bootstrap_free(void *ptr) { + if (unlikely(ptr == NULL)) { + return; + } + + a0idalloc(ptr, false); +} + +void +arena_set(unsigned ind, arena_t *arena) { + atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); +} + +static void +narenas_total_set(unsigned narenas) { + atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); +} + +static void +narenas_total_inc(void) { + atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); +} + +unsigned +narenas_total_get(void) { + return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); +} + +/* Create a new arena and insert it into the arenas array at index ind. */ +static arena_t * +arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + arena_t *arena; + + assert(ind <= narenas_total_get()); + if (ind >= MALLOCX_ARENA_LIMIT) { + return NULL; + } + if (ind == narenas_total_get()) { + narenas_total_inc(); + } + + /* + * Another thread may have already initialized arenas[ind] if it's an + * auto arena. + */ + arena = arena_get(tsdn, ind, false); + if (arena != NULL) { + assert(ind < narenas_auto); + return arena; + } + + /* Actually initialize the arena. */ + arena = arena_new(tsdn, ind, extent_hooks); + + return arena; +} + +static void +arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { + if (ind == 0) { + return; + } + if (have_background_thread) { + bool err; + malloc_mutex_lock(tsdn, &background_thread_lock); + err = background_thread_create(tsdn_tsd(tsdn), ind); + malloc_mutex_unlock(tsdn, &background_thread_lock); + if (err) { + malloc_printf(": error in background thread " + "creation for arena %u. Abort.\n", ind); + abort(); + } + } +} + +arena_t * +arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + arena_t *arena; + + malloc_mutex_lock(tsdn, &arenas_lock); + arena = arena_init_locked(tsdn, ind, extent_hooks); + malloc_mutex_unlock(tsdn, &arenas_lock); + + arena_new_create_background_thread(tsdn, ind); + + return arena; +} + +static void +arena_bind(tsd_t *tsd, unsigned ind, bool internal) { + arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_inc(arena, internal); + + if (internal) { + tsd_iarena_set(tsd, arena); + } else { + tsd_arena_set(tsd, arena); + } +} + +void +arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { + arena_t *oldarena, *newarena; + + oldarena = arena_get(tsd_tsdn(tsd), oldind, false); + newarena = arena_get(tsd_tsdn(tsd), newind, false); + arena_nthreads_dec(oldarena, false); + arena_nthreads_inc(newarena, false); + tsd_arena_set(tsd, newarena); +} + +static void +arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { + arena_t *arena; + + arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_dec(arena, internal); + + if (internal) { + tsd_iarena_set(tsd, NULL); + } else { + tsd_arena_set(tsd, NULL); + } +} + +arena_tdata_t * +arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { + arena_tdata_t *tdata, *arenas_tdata_old; + arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); + unsigned narenas_tdata_old, i; + unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); + unsigned narenas_actual = narenas_total_get(); + + /* + * Dissociate old tdata array (and set up for deallocation upon return) + * if it's too small. + */ + if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { + arenas_tdata_old = arenas_tdata; + narenas_tdata_old = narenas_tdata; + arenas_tdata = NULL; + narenas_tdata = 0; + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); + } else { + arenas_tdata_old = NULL; + narenas_tdata_old = 0; + } + + /* Allocate tdata array if it's missing. */ + if (arenas_tdata == NULL) { + bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); + narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; + + if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { + *arenas_tdata_bypassp = true; + arenas_tdata = (arena_tdata_t *)a0malloc( + sizeof(arena_tdata_t) * narenas_tdata); + *arenas_tdata_bypassp = false; + } + if (arenas_tdata == NULL) { + tdata = NULL; + goto label_return; + } + assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); + } + + /* + * Copy to tdata array. It's possible that the actual number of arenas + * has increased since narenas_total_get() was called above, but that + * causes no correctness issues unless two threads concurrently execute + * the arenas.create mallctl, which we trust mallctl synchronization to + * prevent. + */ + + /* Copy/initialize tickers. */ + for (i = 0; i < narenas_actual; i++) { + if (i < narenas_tdata_old) { + ticker_copy(&arenas_tdata[i].decay_ticker, + &arenas_tdata_old[i].decay_ticker); + } else { + ticker_init(&arenas_tdata[i].decay_ticker, + DECAY_NTICKS_PER_UPDATE); + } + } + if (narenas_tdata > narenas_actual) { + memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) + * (narenas_tdata - narenas_actual)); + } + + /* Read the refreshed tdata array. */ + tdata = &arenas_tdata[ind]; +label_return: + if (arenas_tdata_old != NULL) { + a0dalloc(arenas_tdata_old); + } + return tdata; +} + +/* Slow path, called only by arena_choose(). */ +arena_t * +arena_choose_hard(tsd_t *tsd, bool internal) { + arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); + + if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { + unsigned choose = percpu_arena_choose(); + ret = arena_get(tsd_tsdn(tsd), choose, true); + assert(ret != NULL); + arena_bind(tsd, arena_ind_get(ret), false); + arena_bind(tsd, arena_ind_get(ret), true); + + return ret; + } + + if (narenas_auto > 1) { + unsigned i, j, choose[2], first_null; + bool is_new_arena[2]; + + /* + * Determine binding for both non-internal and internal + * allocation. + * + * choose[0]: For application allocation. + * choose[1]: For internal metadata allocation. + */ + + for (j = 0; j < 2; j++) { + choose[j] = 0; + is_new_arena[j] = false; + } + + first_null = narenas_auto; + malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); + assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); + for (i = 1; i < narenas_auto; i++) { + if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { + /* + * Choose the first arena that has the lowest + * number of threads assigned to it. + */ + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get( + tsd_tsdn(tsd), i, false), !!j) < + arena_nthreads_get(arena_get( + tsd_tsdn(tsd), choose[j], false), + !!j)) { + choose[j] = i; + } + } + } else if (first_null == narenas_auto) { + /* + * Record the index of the first uninitialized + * arena, in case all extant arenas are in use. + * + * NB: It is possible for there to be + * discontinuities in terms of initialized + * versus uninitialized arenas, due to the + * "thread.arena" mallctl. + */ + first_null = i; + } + } + + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), + choose[j], false), !!j) == 0 || first_null == + narenas_auto) { + /* + * Use an unloaded arena, or the least loaded + * arena if all arenas are already initialized. + */ + if (!!j == internal) { + ret = arena_get(tsd_tsdn(tsd), + choose[j], false); + } + } else { + arena_t *arena; + + /* Initialize a new arena. */ + choose[j] = first_null; + arena = arena_init_locked(tsd_tsdn(tsd), + choose[j], + (extent_hooks_t *)&extent_hooks_default); + if (arena == NULL) { + malloc_mutex_unlock(tsd_tsdn(tsd), + &arenas_lock); + return NULL; + } + is_new_arena[j] = true; + if (!!j == internal) { + ret = arena; + } + } + arena_bind(tsd, choose[j], !!j); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); + + for (j = 0; j < 2; j++) { + if (is_new_arena[j]) { + assert(choose[j] > 0); + arena_new_create_background_thread( + tsd_tsdn(tsd), choose[j]); + } + } + + } else { + ret = arena_get(tsd_tsdn(tsd), 0, false); + arena_bind(tsd, 0, false); + arena_bind(tsd, 0, true); + } + + return ret; +} + +void +iarena_cleanup(tsd_t *tsd) { + arena_t *iarena; + + iarena = tsd_iarena_get(tsd); + if (iarena != NULL) { + arena_unbind(tsd, arena_ind_get(iarena), true); + } +} + +void +arena_cleanup(tsd_t *tsd) { + arena_t *arena; + + arena = tsd_arena_get(tsd); + if (arena != NULL) { + arena_unbind(tsd, arena_ind_get(arena), false); + } +} + +void +arenas_tdata_cleanup(tsd_t *tsd) { + arena_tdata_t *arenas_tdata; + + /* Prevent tsd->arenas_tdata from being (re)created. */ + *tsd_arenas_tdata_bypassp_get(tsd) = true; + + arenas_tdata = tsd_arenas_tdata_get(tsd); + if (arenas_tdata != NULL) { + tsd_arenas_tdata_set(tsd, NULL); + a0dalloc(arenas_tdata); + } +} + +static void +stats_print_atexit(void) { + if (config_stats) { + tsdn_t *tsdn; + unsigned narenas, i; + + tsdn = tsdn_fetch(); + + /* + * Merge stats from extant threads. This is racy, since + * individual threads do not lock when recording tcache stats + * events. As a consequence, the final stats may be slightly + * out of date by the time they are reported, if other threads + * continue to allocate. + */ + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena = arena_get(tsdn, i, false); + if (arena != NULL) { + tcache_t *tcache; + + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + ql_foreach(tcache, &arena->tcache_ql, link) { + tcache_stats_merge(tsdn, tcache, arena); + } + malloc_mutex_unlock(tsdn, + &arena->tcache_ql_mtx); + } + } + } + je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); +} + +/* + * Ensure that we don't hold any locks upon entry to or exit from allocator + * code (in a "broad" sense that doesn't count a reentrant allocation as an + * entrance or exit). + */ +JEMALLOC_ALWAYS_INLINE void +check_entry_exit_locking(tsdn_t *tsdn) { + if (!config_debug) { + return; + } + if (tsdn_null(tsdn)) { + return; + } + tsd_t *tsd = tsdn_tsd(tsdn); + /* + * It's possible we hold locks at entry/exit if we're in a nested + * allocation. + */ + int8_t reentrancy_level = tsd_reentrancy_level_get(tsd); + if (reentrancy_level != 0) { + return; + } + witness_assert_lockless(tsdn_witness_tsdp_get(tsdn)); +} + +/* + * End miscellaneous support functions. + */ +/******************************************************************************/ +/* + * Begin initialization functions. + */ + +static char * +jemalloc_secure_getenv(const char *name) { +#ifdef JEMALLOC_HAVE_SECURE_GETENV + return secure_getenv(name); +#else +# ifdef JEMALLOC_HAVE_ISSETUGID + if (issetugid() != 0) { + return NULL; + } +# endif + return getenv(name); +#endif +} + +static unsigned +malloc_ncpus(void) { + long result; + +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + result = si.dwNumberOfProcessors; +#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) + /* + * glibc >= 2.6 has the CPU_COUNT macro. + * + * glibc's sysconf() uses isspace(). glibc allocates for the first time + * *before* setting up the isspace tables. Therefore we need a + * different method to get the number of CPUs. + */ + { + cpu_set_t set; + + pthread_getaffinity_np(pthread_self(), sizeof(set), &set); + result = CPU_COUNT(&set); + } +#else + result = sysconf(_SC_NPROCESSORS_ONLN); +#endif + return ((result == -1) ? 1 : (unsigned)result); +} + +static void +init_opt_stats_print_opts(const char *v, size_t vlen) { + size_t opts_len = strlen(opt_stats_print_opts); + assert(opts_len <= stats_print_tot_num_options); + + for (size_t i = 0; i < vlen; i++) { + switch (v[i]) { +#define OPTION(o, v, d, s) case o: break; + STATS_PRINT_OPTIONS +#undef OPTION + default: continue; + } + + if (strchr(opt_stats_print_opts, v[i]) != NULL) { + /* Ignore repeated. */ + continue; + } + + opt_stats_print_opts[opts_len++] = v[i]; + opt_stats_print_opts[opts_len] = '\0'; + assert(opts_len <= stats_print_tot_num_options); + } + assert(opts_len == strlen(opt_stats_print_opts)); +} + +static bool +malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, + char const **v_p, size_t *vlen_p) { + bool accept; + const char *opts = *opts_p; + + *k_p = opts; + + for (accept = false; !accept;) { + switch (*opts) { + case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': + case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': + case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': + case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': + case 'Y': case 'Z': + case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': + case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': + case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': + case 's': case 't': case 'u': case 'v': case 'w': case 'x': + case 'y': case 'z': + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': case '8': case '9': + case '_': + opts++; + break; + case ':': + opts++; + *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; + *v_p = opts; + accept = true; + break; + case '\0': + if (opts != *opts_p) { + malloc_write(": Conf string ends " + "with key\n"); + } + return true; + default: + malloc_write(": Malformed conf string\n"); + return true; + } + } + + for (accept = false; !accept;) { + switch (*opts) { + case ',': + opts++; + /* + * Look ahead one character here, because the next time + * this function is called, it will assume that end of + * input has been cleanly reached if no input remains, + * but we have optimistically already consumed the + * comma if one exists. + */ + if (*opts == '\0') { + malloc_write(": Conf string ends " + "with comma\n"); + } + *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; + accept = true; + break; + case '\0': + *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; + accept = true; + break; + default: + opts++; + break; + } + } + + *opts_p = opts; + return false; +} + +static void +malloc_abort_invalid_conf(void) { + assert(opt_abort_conf); + malloc_printf(": Abort (abort_conf:true) on invalid conf " + "value (see above).\n"); + abort(); +} + +static void +malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, + size_t vlen) { + malloc_printf(": %s: %.*s:%.*s\n", msg, (int)klen, k, + (int)vlen, v); + /* If abort_conf is set, error out after processing all options. */ + had_conf_error = true; +} + +static void +malloc_slow_flag_init(void) { + /* + * Combine the runtime options into malloc_slow for fast path. Called + * after processing all the options. + */ + malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) + | (opt_junk_free ? flag_opt_junk_free : 0) + | (opt_zero ? flag_opt_zero : 0) + | (opt_utrace ? flag_opt_utrace : 0) + | (opt_xmalloc ? flag_opt_xmalloc : 0); + + malloc_slow = (malloc_slow_flags != 0); +} + +static void +malloc_conf_init(void) { + unsigned i; + char buf[PATH_MAX + 1]; + const char *opts, *k, *v; + size_t klen, vlen; + + for (i = 0; i < 4; i++) { + /* Get runtime configuration. */ + switch (i) { + case 0: + opts = config_malloc_conf; + break; + case 1: + if (je_malloc_conf != NULL) { + /* + * Use options that were compiled into the + * program. + */ + opts = je_malloc_conf; + } else { + /* No configuration specified. */ + buf[0] = '\0'; + opts = buf; + } + break; + case 2: { + ssize_t linklen = 0; +#ifndef _WIN32 + int saved_errno = errno; + const char *linkname = +# ifdef JEMALLOC_PREFIX + "/etc/"JEMALLOC_PREFIX"malloc.conf" +# else + "/etc/malloc.conf" +# endif + ; + + /* + * Try to use the contents of the "/etc/malloc.conf" + * symbolic link's name. + */ + linklen = readlink(linkname, buf, sizeof(buf) - 1); + if (linklen == -1) { + /* No configuration specified. */ + linklen = 0; + /* Restore errno. */ + set_errno(saved_errno); + } +#endif + buf[linklen] = '\0'; + opts = buf; + break; + } case 3: { + const char *envname = +#ifdef JEMALLOC_PREFIX + JEMALLOC_CPREFIX"MALLOC_CONF" +#else + "MALLOC_CONF" +#endif + ; + + if ((opts = jemalloc_secure_getenv(envname)) != NULL) { + /* + * Do nothing; opts is already initialized to + * the value of the MALLOC_CONF environment + * variable. + */ + } else { + /* No configuration specified. */ + buf[0] = '\0'; + opts = buf; + } + break; + } default: + not_reached(); + buf[0] = '\0'; + opts = buf; + } + + while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, + &vlen)) { +#define CONF_MATCH(n) \ + (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) +#define CONF_MATCH_VALUE(n) \ + (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) +#define CONF_HANDLE_BOOL(o, n) \ + if (CONF_MATCH(n)) { \ + if (CONF_MATCH_VALUE("true")) { \ + o = true; \ + } else if (CONF_MATCH_VALUE("false")) { \ + o = false; \ + } else { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } \ + continue; \ + } +#define CONF_MIN_no(um, min) false +#define CONF_MIN_yes(um, min) ((um) < (min)) +#define CONF_MAX_no(um, max) false +#define CONF_MAX_yes(um, max) ((um) > (max)) +#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ + if (CONF_MATCH(n)) { \ + uintmax_t um; \ + char *end; \ + \ + set_errno(0); \ + um = malloc_strtoumax(v, &end, 0); \ + if (get_errno() != 0 || (uintptr_t)end -\ + (uintptr_t)v != vlen) { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } else if (clip) { \ + if (CONF_MIN_##check_min(um, \ + (t)(min))) { \ + o = (t)(min); \ + } else if ( \ + CONF_MAX_##check_max(um, \ + (t)(max))) { \ + o = (t)(max); \ + } else { \ + o = (t)um; \ + } \ + } else { \ + if (CONF_MIN_##check_min(um, \ + (t)(min)) || \ + CONF_MAX_##check_max(um, \ + (t)(max))) { \ + malloc_conf_error( \ + "Out-of-range " \ + "conf value", \ + k, klen, v, vlen); \ + } else { \ + o = (t)um; \ + } \ + } \ + continue; \ + } +#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ + clip) \ + CONF_HANDLE_T_U(unsigned, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T_U(size_t, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ + if (CONF_MATCH(n)) { \ + long l; \ + char *end; \ + \ + set_errno(0); \ + l = strtol(v, &end, 0); \ + if (get_errno() != 0 || (uintptr_t)end -\ + (uintptr_t)v != vlen) { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } else if (l < (ssize_t)(min) || l > \ + (ssize_t)(max)) { \ + malloc_conf_error( \ + "Out-of-range conf value", \ + k, klen, v, vlen); \ + } else { \ + o = l; \ + } \ + continue; \ + } +#define CONF_HANDLE_CHAR_P(o, n, d) \ + if (CONF_MATCH(n)) { \ + size_t cpylen = (vlen <= \ + sizeof(o)-1) ? vlen : \ + sizeof(o)-1; \ + strncpy(o, v, cpylen); \ + o[cpylen] = '\0'; \ + continue; \ + } + + CONF_HANDLE_BOOL(opt_abort, "abort") + CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") + if (strncmp("metadata_thp", k, klen) == 0) { + int i; + bool match = false; + for (i = 0; i < metadata_thp_mode_limit; i++) { + if (strncmp(metadata_thp_mode_names[i], + v, vlen) == 0) { + opt_metadata_thp = i; + match = true; + break; + } + } + if (!match) { + malloc_conf_error("Invalid conf value", + k, klen, v, vlen); + } + continue; + } + CONF_HANDLE_BOOL(opt_retain, "retain") + if (strncmp("dss", k, klen) == 0) { + int i; + bool match = false; + for (i = 0; i < dss_prec_limit; i++) { + if (strncmp(dss_prec_names[i], v, vlen) + == 0) { + if (extent_dss_prec_set(i)) { + malloc_conf_error( + "Error setting dss", + k, klen, v, vlen); + } else { + opt_dss = + dss_prec_names[i]; + match = true; + break; + } + } + } + if (!match) { + malloc_conf_error("Invalid conf value", + k, klen, v, vlen); + } + continue; + } + CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, + UINT_MAX, yes, no, false) + CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, + "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < + QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : + SSIZE_MAX); + CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, + "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < + QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : + SSIZE_MAX); + CONF_HANDLE_BOOL(opt_stats_print, "stats_print") + if (CONF_MATCH("stats_print_opts")) { + init_opt_stats_print_opts(v, vlen); + continue; + } + if (config_fill) { + if (CONF_MATCH("junk")) { + if (CONF_MATCH_VALUE("true")) { + opt_junk = "true"; + opt_junk_alloc = opt_junk_free = + true; + } else if (CONF_MATCH_VALUE("false")) { + opt_junk = "false"; + opt_junk_alloc = opt_junk_free = + false; + } else if (CONF_MATCH_VALUE("alloc")) { + opt_junk = "alloc"; + opt_junk_alloc = true; + opt_junk_free = false; + } else if (CONF_MATCH_VALUE("free")) { + opt_junk = "free"; + opt_junk_alloc = false; + opt_junk_free = true; + } else { + malloc_conf_error( + "Invalid conf value", k, + klen, v, vlen); + } + continue; + } + CONF_HANDLE_BOOL(opt_zero, "zero") + } + if (config_utrace) { + CONF_HANDLE_BOOL(opt_utrace, "utrace") + } + if (config_xmalloc) { + CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") + } + CONF_HANDLE_BOOL(opt_tcache, "tcache") + CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, + "lg_extent_max_active_fit", 0, + (sizeof(size_t) << 3), yes, yes, false) + CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", + -1, (sizeof(size_t) << 3) - 1) + if (strncmp("percpu_arena", k, klen) == 0) { + bool match = false; + for (int i = percpu_arena_mode_names_base; i < + percpu_arena_mode_names_limit; i++) { + if (strncmp(percpu_arena_mode_names[i], + v, vlen) == 0) { + if (!have_percpu_arena) { + malloc_conf_error( + "No getcpu support", + k, klen, v, vlen); + } + opt_percpu_arena = i; + match = true; + break; + } + } + if (!match) { + malloc_conf_error("Invalid conf value", + k, klen, v, vlen); + } + continue; + } + CONF_HANDLE_BOOL(opt_background_thread, + "background_thread"); + CONF_HANDLE_SIZE_T(opt_max_background_threads, + "max_background_threads", 1, + opt_max_background_threads, yes, yes, + true); + if (config_prof) { + CONF_HANDLE_BOOL(opt_prof, "prof") + CONF_HANDLE_CHAR_P(opt_prof_prefix, + "prof_prefix", "jeprof") + CONF_HANDLE_BOOL(opt_prof_active, "prof_active") + CONF_HANDLE_BOOL(opt_prof_thread_active_init, + "prof_thread_active_init") + CONF_HANDLE_SIZE_T(opt_lg_prof_sample, + "lg_prof_sample", 0, (sizeof(uint64_t) << 3) + - 1, no, yes, true) + CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") + CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, + "lg_prof_interval", -1, + (sizeof(uint64_t) << 3) - 1) + CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") + CONF_HANDLE_BOOL(opt_prof_final, "prof_final") + CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") + } + if (config_log) { + if (CONF_MATCH("log")) { + size_t cpylen = ( + vlen <= sizeof(log_var_names) ? + vlen : sizeof(log_var_names) - 1); + strncpy(log_var_names, v, cpylen); + log_var_names[cpylen] = '\0'; + continue; + } + } + if (CONF_MATCH("thp")) { + bool match = false; + for (int i = 0; i < thp_mode_names_limit; i++) { + if (strncmp(thp_mode_names[i],v, vlen) + == 0) { + if (!have_madvise_huge) { + malloc_conf_error( + "No THP support", + k, klen, v, vlen); + } + opt_thp = i; + match = true; + break; + } + } + if (!match) { + malloc_conf_error("Invalid conf value", + k, klen, v, vlen); + } + continue; + } + malloc_conf_error("Invalid conf pair", k, klen, v, + vlen); +#undef CONF_MATCH +#undef CONF_MATCH_VALUE +#undef CONF_HANDLE_BOOL +#undef CONF_MIN_no +#undef CONF_MIN_yes +#undef CONF_MAX_no +#undef CONF_MAX_yes +#undef CONF_HANDLE_T_U +#undef CONF_HANDLE_UNSIGNED +#undef CONF_HANDLE_SIZE_T +#undef CONF_HANDLE_SSIZE_T +#undef CONF_HANDLE_CHAR_P + } + if (opt_abort_conf && had_conf_error) { + malloc_abort_invalid_conf(); + } + } + atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); +} + +static bool +malloc_init_hard_needed(void) { + if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == + malloc_init_recursible)) { + /* + * Another thread initialized the allocator before this one + * acquired init_lock, or this thread is the initializing + * thread, and it is recursively allocating. + */ + return false; + } +#ifdef JEMALLOC_THREADED_INIT + if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { + /* Busy-wait until the initializing thread completes. */ + spin_t spinner = SPIN_INITIALIZER; + do { + malloc_mutex_unlock(TSDN_NULL, &init_lock); + spin_adaptive(&spinner); + malloc_mutex_lock(TSDN_NULL, &init_lock); + } while (!malloc_initialized()); + return false; + } +#endif + return true; +} + +static bool +malloc_init_hard_a0_locked() { + malloc_initializer = INITIALIZER; + + if (config_prof) { + prof_boot0(); + } + malloc_conf_init(); + if (opt_stats_print) { + /* Print statistics at exit. */ + if (atexit(stats_print_atexit) != 0) { + malloc_write(": Error in atexit()\n"); + if (opt_abort) { + abort(); + } + } + } + if (pages_boot()) { + return true; + } + if (base_boot(TSDN_NULL)) { + return true; + } + if (extent_boot()) { + return true; + } + if (ctl_boot()) { + return true; + } + if (config_prof) { + prof_boot1(); + } + arena_boot(); + if (tcache_boot(TSDN_NULL)) { + return true; + } + if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, + malloc_mutex_rank_exclusive)) { + return true; + } + /* + * Create enough scaffolding to allow recursive allocation in + * malloc_ncpus(). + */ + narenas_auto = 1; + memset(arenas, 0, sizeof(arena_t *) * narenas_auto); + /* + * Initialize one arena here. The rest are lazily created in + * arena_choose_hard(). + */ + if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) + == NULL) { + return true; + } + a0 = arena_get(TSDN_NULL, 0, false); + malloc_init_state = malloc_init_a0_initialized; + + return false; +} + +static bool +malloc_init_hard_a0(void) { + bool ret; + + malloc_mutex_lock(TSDN_NULL, &init_lock); + ret = malloc_init_hard_a0_locked(); + malloc_mutex_unlock(TSDN_NULL, &init_lock); + return ret; +} + +/* Initialize data structures which may trigger recursive allocation. */ +static bool +malloc_init_hard_recursible(void) { + malloc_init_state = malloc_init_recursible; + + ncpus = malloc_ncpus(); + +#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ + && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ + !defined(__native_client__)) + /* LinuxThreads' pthread_atfork() allocates. */ + if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, + jemalloc_postfork_child) != 0) { + malloc_write(": Error in pthread_atfork()\n"); + if (opt_abort) { + abort(); + } + return true; + } +#endif + + if (background_thread_boot0()) { + return true; + } + + return false; +} + +static unsigned +malloc_narenas_default(void) { + assert(ncpus > 0); + /* + * For SMP systems, create more than one arena per CPU by + * default. + */ + if (ncpus > 1) { + return ncpus << 2; + } else { + return 1; + } +} + +static percpu_arena_mode_t +percpu_arena_as_initialized(percpu_arena_mode_t mode) { + assert(!malloc_initialized()); + assert(mode <= percpu_arena_disabled); + + if (mode != percpu_arena_disabled) { + mode += percpu_arena_mode_enabled_base; + } + + return mode; +} + +static bool +malloc_init_narenas(void) { + assert(ncpus > 0); + + if (opt_percpu_arena != percpu_arena_disabled) { + if (!have_percpu_arena || malloc_getcpu() < 0) { + opt_percpu_arena = percpu_arena_disabled; + malloc_printf(": perCPU arena getcpu() not " + "available. Setting narenas to %u.\n", opt_narenas ? + opt_narenas : malloc_narenas_default()); + if (opt_abort) { + abort(); + } + } else { + if (ncpus >= MALLOCX_ARENA_LIMIT) { + malloc_printf(": narenas w/ percpu" + "arena beyond limit (%d)\n", ncpus); + if (opt_abort) { + abort(); + } + return true; + } + /* NB: opt_percpu_arena isn't fully initialized yet. */ + if (percpu_arena_as_initialized(opt_percpu_arena) == + per_phycpu_arena && ncpus % 2 != 0) { + malloc_printf(": invalid " + "configuration -- per physical CPU arena " + "with odd number (%u) of CPUs (no hyper " + "threading?).\n", ncpus); + if (opt_abort) + abort(); + } + unsigned n = percpu_arena_ind_limit( + percpu_arena_as_initialized(opt_percpu_arena)); + if (opt_narenas < n) { + /* + * If narenas is specified with percpu_arena + * enabled, actual narenas is set as the greater + * of the two. percpu_arena_choose will be free + * to use any of the arenas based on CPU + * id. This is conservative (at a small cost) + * but ensures correctness. + * + * If for some reason the ncpus determined at + * boot is not the actual number (e.g. because + * of affinity setting from numactl), reserving + * narenas this way provides a workaround for + * percpu_arena. + */ + opt_narenas = n; + } + } + } + if (opt_narenas == 0) { + opt_narenas = malloc_narenas_default(); + } + assert(opt_narenas > 0); + + narenas_auto = opt_narenas; + /* + * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). + */ + if (narenas_auto >= MALLOCX_ARENA_LIMIT) { + narenas_auto = MALLOCX_ARENA_LIMIT - 1; + malloc_printf(": Reducing narenas to limit (%d)\n", + narenas_auto); + } + narenas_total_set(narenas_auto); + + return false; +} + +static void +malloc_init_percpu(void) { + opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); +} + +static bool +malloc_init_hard_finish(void) { + if (malloc_mutex_boot()) { + return true; + } + + malloc_init_state = malloc_init_initialized; + malloc_slow_flag_init(); + + return false; +} + +static void +malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { + malloc_mutex_assert_owner(tsdn, &init_lock); + malloc_mutex_unlock(tsdn, &init_lock); + if (reentrancy_set) { + assert(!tsdn_null(tsdn)); + tsd_t *tsd = tsdn_tsd(tsdn); + assert(tsd_reentrancy_level_get(tsd) > 0); + post_reentrancy(tsd); + } +} + +static bool +malloc_init_hard(void) { + tsd_t *tsd; + +#if defined(_WIN32) && _WIN32_WINNT < 0x0600 + _init_init_lock(); +#endif + malloc_mutex_lock(TSDN_NULL, &init_lock); + +#define UNLOCK_RETURN(tsdn, ret, reentrancy) \ + malloc_init_hard_cleanup(tsdn, reentrancy); \ + return ret; + + if (!malloc_init_hard_needed()) { + UNLOCK_RETURN(TSDN_NULL, false, false) + } + + if (malloc_init_state != malloc_init_a0_initialized && + malloc_init_hard_a0_locked()) { + UNLOCK_RETURN(TSDN_NULL, true, false) + } + + malloc_mutex_unlock(TSDN_NULL, &init_lock); + /* Recursive allocation relies on functional tsd. */ + tsd = malloc_tsd_boot0(); + if (tsd == NULL) { + return true; + } + if (malloc_init_hard_recursible()) { + return true; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); + /* Set reentrancy level to 1 during init. */ + pre_reentrancy(tsd, NULL); + /* Initialize narenas before prof_boot2 (for allocation). */ + if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) + } + if (config_prof && prof_boot2(tsd)) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) + } + + malloc_init_percpu(); + + if (malloc_init_hard_finish()) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) + } + post_reentrancy(tsd); + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); + + witness_assert_lockless(witness_tsd_tsdn( + tsd_witness_tsdp_get_unsafe(tsd))); + malloc_tsd_boot1(); + /* Update TSD after tsd_boot1. */ + tsd = tsd_fetch(); + if (opt_background_thread) { + assert(have_background_thread); + /* + * Need to finish init & unlock first before creating background + * threads (pthread_create depends on malloc). ctl_init (which + * sets isthreaded) needs to be called without holding any lock. + */ + background_thread_ctl_init(tsd_tsdn(tsd)); + + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + bool err = background_thread_create(tsd, 0); + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + if (err) { + return true; + } + } +#undef UNLOCK_RETURN + return false; +} + +/* + * End initialization functions. + */ +/******************************************************************************/ +/* + * Begin allocation-path internal functions and data structures. + */ + +/* + * Settings determined by the documented behavior of the allocation functions. + */ +typedef struct static_opts_s static_opts_t; +struct static_opts_s { + /* Whether or not allocation size may overflow. */ + bool may_overflow; + /* Whether or not allocations of size 0 should be treated as size 1. */ + bool bump_empty_alloc; + /* + * Whether to assert that allocations are not of size 0 (after any + * bumping). + */ + bool assert_nonempty_alloc; + + /* + * Whether or not to modify the 'result' argument to malloc in case of + * error. + */ + bool null_out_result_on_error; + /* Whether to set errno when we encounter an error condition. */ + bool set_errno_on_error; + + /* + * The minimum valid alignment for functions requesting aligned storage. + */ + size_t min_alignment; + + /* The error string to use if we oom. */ + const char *oom_string; + /* The error string to use if the passed-in alignment is invalid. */ + const char *invalid_alignment_string; + + /* + * False if we're configured to skip some time-consuming operations. + * + * This isn't really a malloc "behavior", but it acts as a useful + * summary of several other static (or at least, static after program + * initialization) options. + */ + bool slow; +}; + +JEMALLOC_ALWAYS_INLINE void +static_opts_init(static_opts_t *static_opts) { + static_opts->may_overflow = false; + static_opts->bump_empty_alloc = false; + static_opts->assert_nonempty_alloc = false; + static_opts->null_out_result_on_error = false; + static_opts->set_errno_on_error = false; + static_opts->min_alignment = 0; + static_opts->oom_string = ""; + static_opts->invalid_alignment_string = ""; + static_opts->slow = false; +} + +/* + * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we + * should have one constant here per magic value there. Note however that the + * representations need not be related. + */ +#define TCACHE_IND_NONE ((unsigned)-1) +#define TCACHE_IND_AUTOMATIC ((unsigned)-2) +#define ARENA_IND_AUTOMATIC ((unsigned)-1) + +typedef struct dynamic_opts_s dynamic_opts_t; +struct dynamic_opts_s { + void **result; + size_t num_items; + size_t item_size; + size_t alignment; + bool zero; + unsigned tcache_ind; + unsigned arena_ind; +}; + +JEMALLOC_ALWAYS_INLINE void +dynamic_opts_init(dynamic_opts_t *dynamic_opts) { + dynamic_opts->result = NULL; + dynamic_opts->num_items = 0; + dynamic_opts->item_size = 0; + dynamic_opts->alignment = 0; + dynamic_opts->zero = false; + dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; + dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; +} + +/* ind is ignored if dopts->alignment > 0. */ +JEMALLOC_ALWAYS_INLINE void * +imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, + size_t size, size_t usize, szind_t ind) { + tcache_t *tcache; + arena_t *arena; + + /* Fill in the tcache. */ + if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { + if (likely(!sopts->slow)) { + /* Getting tcache ptr unconditionally. */ + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + tcache = tcache_get(tsd); + } + } else if (dopts->tcache_ind == TCACHE_IND_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, dopts->tcache_ind); + } + + /* Fill in the arena. */ + if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { + /* + * In case of automatic arena management, we defer arena + * computation until as late as we can, hoping to fill the + * allocation out of the tcache. + */ + arena = NULL; + } else { + arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); + } + + if (unlikely(dopts->alignment != 0)) { + return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, + dopts->zero, tcache, arena); + } + + return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, + arena, sopts->slow); +} + +JEMALLOC_ALWAYS_INLINE void * +imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, + size_t usize, szind_t ind) { + void *ret; + + /* + * For small allocations, sampling bumps the usize. If so, we allocate + * from the ind_large bucket. + */ + szind_t ind_large; + size_t bumped_usize = usize; + + if (usize <= SMALL_MAXCLASS) { + assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) : + sz_sa2u(LARGE_MINCLASS, dopts->alignment)) + == LARGE_MINCLASS); + ind_large = sz_size2index(LARGE_MINCLASS); + bumped_usize = sz_s2u(LARGE_MINCLASS); + ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, + bumped_usize, ind_large); + if (unlikely(ret == NULL)) { + return NULL; + } + arena_prof_promote(tsd_tsdn(tsd), ret, usize); + } else { + ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); + } + + return ret; +} + +/* + * Returns true if the allocation will overflow, and false otherwise. Sets + * *size to the product either way. + */ +JEMALLOC_ALWAYS_INLINE bool +compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, + size_t *size) { + /* + * This function is just num_items * item_size, except that we may have + * to check for overflow. + */ + + if (!may_overflow) { + assert(dopts->num_items == 1); + *size = dopts->item_size; + return false; + } + + /* A size_t with its high-half bits all set to 1. */ + static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); + + *size = dopts->item_size * dopts->num_items; + + if (unlikely(*size == 0)) { + return (dopts->num_items != 0 && dopts->item_size != 0); + } + + /* + * We got a non-zero size, but we don't know if we overflowed to get + * there. To avoid having to do a divide, we'll be clever and note that + * if both A and B can be represented in N/2 bits, then their product + * can be represented in N bits (without the possibility of overflow). + */ + if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { + return false; + } + if (likely(*size / dopts->item_size == dopts->num_items)) { + return false; + } + return true; +} + +JEMALLOC_ALWAYS_INLINE int +imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { + /* Where the actual allocated memory will live. */ + void *allocation = NULL; + /* Filled in by compute_size_with_overflow below. */ + size_t size = 0; + /* + * For unaligned allocations, we need only ind. For aligned + * allocations, or in case of stats or profiling we need usize. + * + * These are actually dead stores, in that their values are reset before + * any branch on their value is taken. Sometimes though, it's + * convenient to pass them as arguments before this point. To avoid + * undefined behavior then, we initialize them with dummy stores. + */ + szind_t ind = 0; + size_t usize = 0; + + /* Reentrancy is only checked on slow path. */ + int8_t reentrancy_level; + + /* Compute the amount of memory the user wants. */ + if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, + &size))) { + goto label_oom; + } + + /* Validate the user input. */ + if (sopts->bump_empty_alloc) { + if (unlikely(size == 0)) { + size = 1; + } + } + + if (sopts->assert_nonempty_alloc) { + assert (size != 0); + } + + if (unlikely(dopts->alignment < sopts->min_alignment + || (dopts->alignment & (dopts->alignment - 1)) != 0)) { + goto label_invalid_alignment; + } + + /* This is the beginning of the "core" algorithm. */ + + if (dopts->alignment == 0) { + ind = sz_size2index(size); + if (unlikely(ind >= NSIZES)) { + goto label_oom; + } + if (config_stats || (config_prof && opt_prof)) { + usize = sz_index2size(ind); + assert(usize > 0 && usize <= LARGE_MAXCLASS); + } + } else { + usize = sz_sa2u(size, dopts->alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + goto label_oom; + } + } + + check_entry_exit_locking(tsd_tsdn(tsd)); + + /* + * If we need to handle reentrancy, we can do it out of a + * known-initialized arena (i.e. arena 0). + */ + reentrancy_level = tsd_reentrancy_level_get(tsd); + if (sopts->slow && unlikely(reentrancy_level > 0)) { + /* + * We should never specify particular arenas or tcaches from + * within our internal allocations. + */ + assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || + dopts->tcache_ind == TCACHE_IND_NONE); + assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); + dopts->tcache_ind = TCACHE_IND_NONE; + /* We know that arena 0 has already been initialized. */ + dopts->arena_ind = 0; + } + + /* If profiling is on, get our profiling context. */ + if (config_prof && opt_prof) { + /* + * Note that if we're going down this path, usize must have been + * initialized in the previous if statement. + */ + prof_tctx_t *tctx = prof_alloc_prep( + tsd, usize, prof_active_get_unlocked(), true); + + alloc_ctx_t alloc_ctx; + if (likely((uintptr_t)tctx == (uintptr_t)1U)) { + alloc_ctx.slab = (usize <= SMALL_MAXCLASS); + allocation = imalloc_no_sample( + sopts, dopts, tsd, usize, usize, ind); + } else if ((uintptr_t)tctx > (uintptr_t)1U) { + /* + * Note that ind might still be 0 here. This is fine; + * imalloc_sample ignores ind if dopts->alignment > 0. + */ + allocation = imalloc_sample( + sopts, dopts, tsd, usize, ind); + alloc_ctx.slab = false; + } else { + allocation = NULL; + } + + if (unlikely(allocation == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + goto label_oom; + } + prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); + } else { + /* + * If dopts->alignment > 0, then ind is still 0, but usize was + * computed in the previous if statement. Down the positive + * alignment path, imalloc_no_sample ignores ind and size + * (relying only on usize). + */ + allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, + ind); + if (unlikely(allocation == NULL)) { + goto label_oom; + } + } + + /* + * Allocation has been done at this point. We still have some + * post-allocation work to do though. + */ + assert(dopts->alignment == 0 + || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); + + if (config_stats) { + assert(usize == isalloc(tsd_tsdn(tsd), allocation)); + *tsd_thread_allocatedp_get(tsd) += usize; + } + + if (sopts->slow) { + UTRACE(0, size, allocation); + } + + /* Success! */ + check_entry_exit_locking(tsd_tsdn(tsd)); + *dopts->result = allocation; + return 0; + +label_oom: + if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->oom_string); + abort(); + } + + if (sopts->slow) { + UTRACE(NULL, size, NULL); + } + + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (sopts->set_errno_on_error) { + set_errno(ENOMEM); + } + + if (sopts->null_out_result_on_error) { + *dopts->result = NULL; + } + + return ENOMEM; + + /* + * This label is only jumped to by one goto; we move it out of line + * anyways to avoid obscuring the non-error paths, and for symmetry with + * the oom case. + */ +label_invalid_alignment: + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->invalid_alignment_string); + abort(); + } + + if (sopts->set_errno_on_error) { + set_errno(EINVAL); + } + + if (sopts->slow) { + UTRACE(NULL, size, NULL); + } + + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (sopts->null_out_result_on_error) { + *dopts->result = NULL; + } + + return EINVAL; +} + +/* Returns the errno-style error code of the allocation. */ +JEMALLOC_ALWAYS_INLINE int +imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { + if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->oom_string); + abort(); + } + UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); + set_errno(ENOMEM); + *dopts->result = NULL; + + return ENOMEM; + } + + /* We always need the tsd. Let's grab it right away. */ + tsd_t *tsd = tsd_fetch(); + assert(tsd); + if (likely(tsd_fast(tsd))) { + /* Fast and common path. */ + tsd_assert_fast(tsd); + sopts->slow = false; + return imalloc_body(sopts, dopts, tsd); + } else { + sopts->slow = true; + return imalloc_body(sopts, dopts, tsd); + } +} +/******************************************************************************/ +/* + * Begin malloc(3)-compatible functions. + */ + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_malloc(size_t size) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.malloc.entry", "size: %zu", size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = ": Error in malloc(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + + imalloc(&sopts, &dopts); + + LOG("core.malloc.exit", "result: %p", ret); + + return ret; +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +JEMALLOC_ATTR(nonnull(1)) +je_posix_memalign(void **memptr, size_t alignment, size_t size) { + int ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, " + "size: %zu", memptr, alignment, size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.min_alignment = sizeof(void *); + sopts.oom_string = + ": Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + ": Error allocating aligned memory: invalid alignment\n"; + + dopts.result = memptr; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + ret = imalloc(&sopts, &dopts); + + LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret, + *memptr); + + return ret; +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) +je_aligned_alloc(size_t alignment, size_t size) { + void *ret; + + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n", + alignment, size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.min_alignment = 1; + sopts.oom_string = + ": Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + ": Error allocating aligned memory: invalid alignment\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + imalloc(&sopts, &dopts); + + LOG("core.aligned_alloc.exit", "result: %p", ret); + + return ret; +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) +je_calloc(size_t num, size_t size) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.may_overflow = true; + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = ": Error in calloc(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = num; + dopts.item_size = size; + dopts.zero = true; + + imalloc(&sopts, &dopts); + + LOG("core.calloc.exit", "result: %p", ret); + + return ret; +} + +static void * +irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, + prof_tctx_t *tctx) { + void *p; + + if (tctx == NULL) { + return NULL; + } + if (usize <= SMALL_MAXCLASS) { + p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); + if (p == NULL) { + return NULL; + } + arena_prof_promote(tsd_tsdn(tsd), p, usize); + } else { + p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); + } + + return p; +} + +JEMALLOC_ALWAYS_INLINE void * +irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, + alloc_ctx_t *alloc_ctx) { + void *p; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); + tctx = prof_alloc_prep(tsd, usize, prof_active, true); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); + } else { + p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); + } + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return NULL; + } + prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, + old_tctx); + + return p; +} + +JEMALLOC_ALWAYS_INLINE void +ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { + if (!slow_path) { + tsd_assert_fast(tsd); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + if (tsd_reentrancy_level_get(tsd) != 0) { + assert(slow_path); + } + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + + size_t usize; + if (config_prof && opt_prof) { + usize = sz_index2size(alloc_ctx.szind); + prof_free(tsd, ptr, usize, &alloc_ctx); + } else if (config_stats) { + usize = sz_index2size(alloc_ctx.szind); + } + if (config_stats) { + *tsd_thread_deallocatedp_get(tsd) += usize; + } + + if (likely(!slow_path)) { + idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, + false); + } else { + idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, + true); + } +} + +JEMALLOC_ALWAYS_INLINE void +isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { + if (!slow_path) { + tsd_assert_fast(tsd); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + if (tsd_reentrancy_level_get(tsd) != 0) { + assert(slow_path); + } + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + alloc_ctx_t alloc_ctx, *ctx; + if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) { + /* + * When cache_oblivious is disabled and ptr is not page aligned, + * the allocation was not sampled -- usize can be used to + * determine szind directly. + */ + alloc_ctx.szind = sz_size2index(usize); + alloc_ctx.slab = true; + ctx = &alloc_ctx; + if (config_debug) { + alloc_ctx_t dbg_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, + rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind, + &dbg_ctx.slab); + assert(dbg_ctx.szind == alloc_ctx.szind); + assert(dbg_ctx.slab == alloc_ctx.slab); + } + } else if (config_prof && opt_prof) { + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind == sz_size2index(usize)); + ctx = &alloc_ctx; + } else { + ctx = NULL; + } + + if (config_prof && opt_prof) { + prof_free(tsd, ptr, usize, ctx); + } + if (config_stats) { + *tsd_thread_deallocatedp_get(tsd) += usize; + } + + if (likely(!slow_path)) { + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); + } else { + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); + } +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_realloc(void *ptr, size_t size) { + void *ret; + tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + size_t old_usize = 0; + + LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); + + if (unlikely(size == 0)) { + if (ptr != NULL) { + /* realloc(ptr, 0) is equivalent to free(ptr). */ + UTRACE(ptr, 0, 0); + tcache_t *tcache; + tsd_t *tsd = tsd_fetch(); + if (tsd_reentrancy_level_get(tsd) == 0) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + ifree(tsd, ptr, tcache, true); + + LOG("core.realloc.exit", "result: %p", NULL); + return NULL; + } + size = 1; + } + + if (likely(ptr != NULL)) { + assert(malloc_initialized() || IS_INITIALIZER); + tsd_t *tsd = tsd_fetch(); + + check_entry_exit_locking(tsd_tsdn(tsd)); + + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); + if (config_prof && opt_prof) { + usize = sz_s2u(size); + ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ? + NULL : irealloc_prof(tsd, ptr, old_usize, usize, + &alloc_ctx); + } else { + if (config_stats) { + usize = sz_s2u(size); + } + ret = iralloc(tsd, ptr, old_usize, size, 0, false); + } + tsdn = tsd_tsdn(tsd); + } else { + /* realloc(NULL, size) is equivalent to malloc(size). */ + void *ret = je_malloc(size); + LOG("core.realloc.exit", "result: %p", ret); + return ret; + } + + if (unlikely(ret == NULL)) { + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error in realloc(): " + "out of memory\n"); + abort(); + } + set_errno(ENOMEM); + } + if (config_stats && likely(ret != NULL)) { + tsd_t *tsd; + + assert(usize == isalloc(tsdn, ret)); + tsd = tsdn_tsd(tsdn); + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; + } + UTRACE(ptr, size, ret); + check_entry_exit_locking(tsdn); + + LOG("core.realloc.exit", "result: %p", ret); + return ret; +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_free(void *ptr) { + LOG("core.free.entry", "ptr: %p", ptr); + + UTRACE(ptr, 0, 0); + if (likely(ptr != NULL)) { + /* + * We avoid setting up tsd fully (e.g. tcache, arena binding) + * based on only free() calls -- other activities trigger the + * minimal to full transition. This is because free() may + * happen during thread shutdown after tls deallocation: if a + * thread never had any malloc activities until then, a + * fully-setup tsd won't be destructed properly. + */ + tsd_t *tsd = tsd_fetch_min(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; + if (likely(tsd_fast(tsd))) { + tsd_assert_fast(tsd); + /* Unconditionally get tcache ptr on fast path. */ + tcache = tsd_tcachep_get(tsd); + ifree(tsd, ptr, tcache, false); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + ifree(tsd, ptr, tcache, true); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + } + LOG("core.free.exit", ""); +} + +/* + * End malloc(3)-compatible functions. + */ +/******************************************************************************/ +/* + * Begin non-standard override functions. + */ + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) +je_memalign(size_t alignment, size_t size) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment, + size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.min_alignment = 1; + sopts.oom_string = + ": Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + ": Error allocating aligned memory: invalid alignment\n"; + sopts.null_out_result_on_error = true; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + imalloc(&sopts, &dopts); + + LOG("core.memalign.exit", "result: %p", ret); + return ret; +} +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) +je_valloc(size_t size) { + void *ret; + + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.valloc.entry", "size: %zu\n", size); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.min_alignment = PAGE; + sopts.oom_string = + ": Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + ": Error allocating aligned memory: invalid alignment\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = PAGE; + + imalloc(&sopts, &dopts); + + LOG("core.valloc.exit", "result: %p\n", ret); + return ret; +} +#endif + +#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) +/* + * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible + * to inconsistently reference libc's malloc(3)-compatible functions + * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). + * + * These definitions interpose hooks in glibc. The functions are actually + * passed an extra argument for the caller return address, which will be + * ignored. + */ +JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; +JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; +JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; +# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK +JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = + je_memalign; +# endif + +# ifdef CPU_COUNT +/* + * To enable static linking with glibc, the libc specific malloc interface must + * be implemented also, so none of glibc's malloc.o functions are added to the + * link. + */ +# define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) +/* To force macro expansion of je_ prefix before stringification. */ +# define PREALIAS(je_fn) ALIAS(je_fn) +# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC +void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_FREE +void __libc_free(void* ptr) PREALIAS(je_free); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC +void *__libc_malloc(size_t size) PREALIAS(je_malloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN +void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC +void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC +void *__libc_valloc(size_t size) PREALIAS(je_valloc); +# endif +# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN +int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); +# endif +# undef PREALIAS +# undef ALIAS +# endif +#endif + +/* + * End non-standard override functions. + */ +/******************************************************************************/ +/* + * Begin non-standard functions. + */ + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_mallocx(size_t size, int flags) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags); + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.assert_nonempty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.oom_string = ": Error in mallocx(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + if (unlikely(flags != 0)) { + if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { + dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); + } + + dopts.zero = MALLOCX_ZERO_GET(flags); + + if ((flags & MALLOCX_TCACHE_MASK) != 0) { + if ((flags & MALLOCX_TCACHE_MASK) + == MALLOCX_TCACHE_NONE) { + dopts.tcache_ind = TCACHE_IND_NONE; + } else { + dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); + } + } else { + dopts.tcache_ind = TCACHE_IND_AUTOMATIC; + } + + if ((flags & MALLOCX_ARENA_MASK) != 0) + dopts.arena_ind = MALLOCX_ARENA_GET(flags); + } + + imalloc(&sopts, &dopts); + + LOG("core.mallocx.exit", "result: %p", ret); + return ret; +} + +static void * +irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, + size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, + prof_tctx_t *tctx) { + void *p; + + if (tctx == NULL) { + return NULL; + } + if (usize <= SMALL_MAXCLASS) { + p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS, + alignment, zero, tcache, arena); + if (p == NULL) { + return NULL; + } + arena_prof_promote(tsdn, p, usize); + } else { + p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, + tcache, arena); + } + + return p; +} + +JEMALLOC_ALWAYS_INLINE void * +irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, + size_t alignment, size_t *usize, bool zero, tcache_t *tcache, + arena_t *arena, alloc_ctx_t *alloc_ctx) { + void *p; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); + tctx = prof_alloc_prep(tsd, *usize, prof_active, false); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, + *usize, alignment, zero, tcache, arena, tctx); + } else { + p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, + zero, tcache, arena); + } + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, false); + return NULL; + } + + if (p == old_ptr && alignment != 0) { + /* + * The allocation did not move, so it is possible that the size + * class is smaller than would guarantee the requested + * alignment, and that the alignment constraint was + * serendipitously satisfied. Additionally, old_usize may not + * be the same as the current usize because of in-place large + * reallocation. Therefore, query the actual value of usize. + */ + *usize = isalloc(tsd_tsdn(tsd), p); + } + prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, + old_usize, old_tctx); + + return p; +} + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_rallocx(void *ptr, size_t size, int flags) { + void *p; + tsd_t *tsd; + size_t usize; + size_t old_usize; + size_t alignment = MALLOCX_ALIGN_GET(flags); + bool zero = flags & MALLOCX_ZERO; + arena_t *arena; + tcache_t *tcache; + + LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, + size, flags); + + + assert(ptr != NULL); + assert(size != 0); + assert(malloc_initialized() || IS_INITIALIZER); + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { + unsigned arena_ind = MALLOCX_ARENA_GET(flags); + arena = arena_get(tsd_tsdn(tsd), arena_ind, true); + if (unlikely(arena == NULL)) { + goto label_oom; + } + } else { + arena = NULL; + } + + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } + } else { + tcache = tcache_get(tsd); + } + + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); + if (config_prof && opt_prof) { + usize = (alignment == 0) ? + sz_s2u(size) : sz_sa2u(size, alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + goto label_oom; + } + p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, + zero, tcache, arena, &alloc_ctx); + if (unlikely(p == NULL)) { + goto label_oom; + } + } else { + p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, + zero, tcache, arena); + if (unlikely(p == NULL)) { + goto label_oom; + } + if (config_stats) { + usize = isalloc(tsd_tsdn(tsd), p); + } + } + assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); + + if (config_stats) { + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; + } + UTRACE(ptr, size, p); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.rallocx.exit", "result: %p", p); + return p; +label_oom: + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(": Error in rallocx(): out of memory\n"); + abort(); + } + UTRACE(ptr, size, 0); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.rallocx.exit", "result: %p", NULL); + return NULL; +} + +JEMALLOC_ALWAYS_INLINE size_t +ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero) { + size_t usize; + + if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) { + return old_usize; + } + usize = isalloc(tsdn, ptr); + + return usize; +} + +static size_t +ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { + size_t usize; + + if (tctx == NULL) { + return old_usize; + } + usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, + zero); + + return usize; +} + +JEMALLOC_ALWAYS_INLINE size_t +ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { + size_t usize_max, usize; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); + /* + * usize isn't knowable before ixalloc() returns when extra is non-zero. + * Therefore, compute its maximum possible value and use that in + * prof_alloc_prep() to decide whether to capture a backtrace. + * prof_realloc() will use the actual usize to decide whether to sample. + */ + if (alignment == 0) { + usize_max = sz_s2u(size+extra); + assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS); + } else { + usize_max = sz_sa2u(size+extra, alignment); + if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) { + /* + * usize_max is out of range, and chances are that + * allocation will fail, but use the maximum possible + * value and carry on with prof_alloc_prep(), just in + * case allocation succeeds. + */ + usize_max = LARGE_MAXCLASS; + } + } + tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); + + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, + size, extra, alignment, zero, tctx); + } else { + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); + } + if (usize == old_usize) { + prof_alloc_rollback(tsd, tctx, false); + return usize; + } + prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, + old_tctx); + + return usize; +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_xallocx(void *ptr, size_t size, size_t extra, int flags) { + tsd_t *tsd; + size_t usize, old_usize; + size_t alignment = MALLOCX_ALIGN_GET(flags); + bool zero = flags & MALLOCX_ZERO; + + LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, " + "flags: %d", ptr, size, extra, flags); + + assert(ptr != NULL); + assert(size != 0); + assert(SIZE_T_MAX - size >= extra); + assert(malloc_initialized() || IS_INITIALIZER); + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); + /* + * The API explicitly absolves itself of protecting against (size + + * extra) numerical overflow, but we may need to clamp extra to avoid + * exceeding LARGE_MAXCLASS. + * + * Ordinarily, size limit checking is handled deeper down, but here we + * have to check as part of (size + extra) clamping, since we need the + * clamped value in the above helper functions. + */ + if (unlikely(size > LARGE_MAXCLASS)) { + usize = old_usize; + goto label_not_resized; + } + if (unlikely(LARGE_MAXCLASS - size < extra)) { + extra = LARGE_MAXCLASS - size; + } + + if (config_prof && opt_prof) { + usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, + alignment, zero, &alloc_ctx); + } else { + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); + } + if (unlikely(usize == old_usize)) { + goto label_not_resized; + } + + if (config_stats) { + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; + } +label_not_resized: + UTRACE(ptr, size, ptr); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.xallocx.exit", "result: %zu", usize); + return usize; +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +JEMALLOC_ATTR(pure) +je_sallocx(const void *ptr, UNUSED int flags) { + size_t usize; + tsdn_t *tsdn; + + LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags); + + assert(malloc_initialized() || IS_INITIALIZER); + assert(ptr != NULL); + + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); + + if (config_debug || force_ivsalloc) { + usize = ivsalloc(tsdn, ptr); + assert(force_ivsalloc || usize != 0); + } else { + usize = isalloc(tsdn, ptr); + } + + check_entry_exit_locking(tsdn); + + LOG("core.sallocx.exit", "result: %zu", usize); + return usize; +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_dallocx(void *ptr, int flags) { + LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags); + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + tsd_t *tsd = tsd_fetch(); + bool fast = tsd_fast(tsd); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + /* Not allowed to be reentrant and specify a custom tcache. */ + assert(tsd_reentrancy_level_get(tsd) == 0); + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } + } else { + if (likely(fast)) { + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + } + } + + UTRACE(ptr, 0, 0); + if (likely(fast)) { + tsd_assert_fast(tsd); + ifree(tsd, ptr, tcache, false); + } else { + ifree(tsd, ptr, tcache, true); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.dallocx.exit", ""); +} + +JEMALLOC_ALWAYS_INLINE size_t +inallocx(tsdn_t *tsdn, size_t size, int flags) { + check_entry_exit_locking(tsdn); + + size_t usize; + if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { + usize = sz_s2u(size); + } else { + usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); + } + check_entry_exit_locking(tsdn); + return usize; +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_sdallocx(void *ptr, size_t size, int flags) { + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, + size, flags); + + tsd_t *tsd = tsd_fetch(); + bool fast = tsd_fast(tsd); + size_t usize = inallocx(tsd_tsdn(tsd), size, flags); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + /* Not allowed to be reentrant and specify a custom tcache. */ + assert(tsd_reentrancy_level_get(tsd) == 0); + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } + } else { + if (likely(fast)) { + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + } + } + + UTRACE(ptr, 0, 0); + if (likely(fast)) { + tsd_assert_fast(tsd); + isfree(tsd, ptr, usize, tcache, false); + } else { + isfree(tsd, ptr, usize, tcache, true); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.sdallocx.exit", ""); +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +JEMALLOC_ATTR(pure) +je_nallocx(size_t size, int flags) { + size_t usize; + tsdn_t *tsdn; + + assert(size != 0); + + if (unlikely(malloc_init())) { + LOG("core.nallocx.exit", "result: %zu", ZU(0)); + return 0; + } + + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); + + usize = inallocx(tsdn, size, flags); + if (unlikely(usize > LARGE_MAXCLASS)) { + LOG("core.nallocx.exit", "result: %zu", ZU(0)); + return 0; + } + + check_entry_exit_locking(tsdn); + LOG("core.nallocx.exit", "result: %zu", usize); + return usize; +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) { + int ret; + tsd_t *tsd; + + LOG("core.mallctl.entry", "name: %s", name); + + if (unlikely(malloc_init())) { + LOG("core.mallctl.exit", "result: %d", EAGAIN); + return EAGAIN; + } + + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.mallctl.exit", "result: %d", ret); + return ret; +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { + int ret; + + LOG("core.mallctlnametomib.entry", "name: %s", name); + + if (unlikely(malloc_init())) { + LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN); + return EAGAIN; + } + + tsd_t *tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_nametomib(tsd, name, mibp, miblenp); + check_entry_exit_locking(tsd_tsdn(tsd)); + + LOG("core.mallctlnametomib.exit", "result: %d", ret); + return ret; +} + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) { + int ret; + tsd_t *tsd; + + LOG("core.mallctlbymib.entry", ""); + + if (unlikely(malloc_init())) { + LOG("core.mallctlbymib.exit", "result: %d", EAGAIN); + return EAGAIN; + } + + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + check_entry_exit_locking(tsd_tsdn(tsd)); + LOG("core.mallctlbymib.exit", "result: %d", ret); + return ret; +} + +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) { + tsdn_t *tsdn; + + LOG("core.malloc_stats_print.entry", ""); + + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); + stats_print(write_cb, cbopaque, opts); + check_entry_exit_locking(tsdn); + LOG("core.malloc_stats_print.exit", ""); +} + +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { + size_t ret; + tsdn_t *tsdn; + + LOG("core.malloc_usable_size.entry", "ptr: %p", ptr); + + assert(malloc_initialized() || IS_INITIALIZER); + + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); + + if (unlikely(ptr == NULL)) { + ret = 0; + } else { + if (config_debug || force_ivsalloc) { + ret = ivsalloc(tsdn, ptr); + assert(force_ivsalloc || ret != 0); + } else { + ret = isalloc(tsdn, ptr); + } + } + + check_entry_exit_locking(tsdn); + LOG("core.malloc_usable_size.exit", "result: %zu", ret); + return ret; +} + +/* + * End non-standard functions. + */ +/******************************************************************************/ +/* + * The following functions are used by threading libraries for protection of + * malloc during fork(). + */ + +/* + * If an application creates a thread before doing any allocation in the main + * thread, then calls fork(2) in the main thread followed by memory allocation + * in the child process, a race can occur that results in deadlock within the + * child: the main thread may have forked while the created thread had + * partially initialized the allocator. Ordinarily jemalloc prevents + * fork/malloc races via the following functions it registers during + * initialization using pthread_atfork(), but of course that does no good if + * the allocator isn't fully initialized at fork time. The following library + * constructor is a partial solution to this problem. It may still be possible + * to trigger the deadlock described above, but doing so would involve forking + * via a library constructor that runs before jemalloc's runs. + */ +#ifndef JEMALLOC_JET +JEMALLOC_ATTR(constructor) +static void +jemalloc_constructor(void) { + malloc_init(); +} +#endif + +#ifndef JEMALLOC_MUTEX_INIT_CB +void +jemalloc_prefork(void) +#else +JEMALLOC_EXPORT void +_malloc_prefork(void) +#endif +{ + tsd_t *tsd; + unsigned i, j, narenas; + arena_t *arena; + +#ifdef JEMALLOC_MUTEX_INIT_CB + if (!malloc_initialized()) { + return; + } +#endif + assert(malloc_initialized()); + + tsd = tsd_fetch(); + + narenas = narenas_total_get(); + + witness_prefork(tsd_witness_tsdp_get(tsd)); + /* Acquire all mutexes in a safe order. */ + ctl_prefork(tsd_tsdn(tsd)); + tcache_prefork(tsd_tsdn(tsd)); + malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); + if (have_background_thread) { + background_thread_prefork0(tsd_tsdn(tsd)); + } + prof_prefork0(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_prefork1(tsd_tsdn(tsd)); + } + /* Break arena prefork into stages to preserve lock order. */ + for (i = 0; i < 8; i++) { + for (j = 0; j < narenas; j++) { + if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != + NULL) { + switch (i) { + case 0: + arena_prefork0(tsd_tsdn(tsd), arena); + break; + case 1: + arena_prefork1(tsd_tsdn(tsd), arena); + break; + case 2: + arena_prefork2(tsd_tsdn(tsd), arena); + break; + case 3: + arena_prefork3(tsd_tsdn(tsd), arena); + break; + case 4: + arena_prefork4(tsd_tsdn(tsd), arena); + break; + case 5: + arena_prefork5(tsd_tsdn(tsd), arena); + break; + case 6: + arena_prefork6(tsd_tsdn(tsd), arena); + break; + case 7: + arena_prefork7(tsd_tsdn(tsd), arena); + break; + default: not_reached(); + } + } + } + } + prof_prefork1(tsd_tsdn(tsd)); +} + +#ifndef JEMALLOC_MUTEX_INIT_CB +void +jemalloc_postfork_parent(void) +#else +JEMALLOC_EXPORT void +_malloc_postfork(void) +#endif +{ + tsd_t *tsd; + unsigned i, narenas; + +#ifdef JEMALLOC_MUTEX_INIT_CB + if (!malloc_initialized()) { + return; + } +#endif + assert(malloc_initialized()); + + tsd = tsd_fetch(); + + witness_postfork_parent(tsd_witness_tsdp_get(tsd)); + /* Release all mutexes, now that fork() has completed. */ + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; + + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { + arena_postfork_parent(tsd_tsdn(tsd), arena); + } + } + prof_postfork_parent(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_postfork_parent(tsd_tsdn(tsd)); + } + malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); + tcache_postfork_parent(tsd_tsdn(tsd)); + ctl_postfork_parent(tsd_tsdn(tsd)); +} + +void +jemalloc_postfork_child(void) { + tsd_t *tsd; + unsigned i, narenas; + + assert(malloc_initialized()); + + tsd = tsd_fetch(); + + witness_postfork_child(tsd_witness_tsdp_get(tsd)); + /* Release all mutexes, now that fork() has completed. */ + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; + + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { + arena_postfork_child(tsd_tsdn(tsd), arena); + } + } + prof_postfork_child(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_postfork_child(tsd_tsdn(tsd)); + } + malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); + tcache_postfork_child(tsd_tsdn(tsd)); + ctl_postfork_child(tsd_tsdn(tsd)); +} + +/******************************************************************************/ + +/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation. + * returns 0 if the allocation is in the currently active run, + * or when it is not causing any frag issue (large or huge bin) + * returns the bin utilization and run utilization both in fixed point 16:16. + * If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */ +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +get_defrag_hint(void* ptr, int *bin_util, int *run_util) { + assert(ptr != NULL); + return iget_defrag_hint(TSDN_NULL, ptr, bin_util, run_util); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/jemalloc_cpp.cpp b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/jemalloc_cpp.cpp new file mode 100644 index 0000000..f0cedda --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/jemalloc_cpp.cpp @@ -0,0 +1,141 @@ +#include +#include + +#define JEMALLOC_CPP_CPP_ +#ifdef __cplusplus +extern "C" { +#endif + +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#ifdef __cplusplus +} +#endif + +// All operators in this file are exported. + +// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt +// thunk? +// +// extern __typeof (sdallocx) sdallocx_int +// __attribute ((alias ("sdallocx"), +// visibility ("hidden"))); +// +// ... but it needs to work with jemalloc namespaces. + +void *operator new(std::size_t size); +void *operator new[](std::size_t size); +void *operator new(std::size_t size, const std::nothrow_t &) noexcept; +void *operator new[](std::size_t size, const std::nothrow_t &) noexcept; +void operator delete(void *ptr) noexcept; +void operator delete[](void *ptr) noexcept; +void operator delete(void *ptr, const std::nothrow_t &) noexcept; +void operator delete[](void *ptr, const std::nothrow_t &) noexcept; + +#if __cpp_sized_deallocation >= 201309 +/* C++14's sized-delete operators. */ +void operator delete(void *ptr, std::size_t size) noexcept; +void operator delete[](void *ptr, std::size_t size) noexcept; +#endif + +JEMALLOC_NOINLINE +static void * +handleOOM(std::size_t size, bool nothrow) { + void *ptr = nullptr; + + while (ptr == nullptr) { + std::new_handler handler; + // GCC-4.8 and clang 4.0 do not have std::get_new_handler. + { + static std::mutex mtx; + std::lock_guard lock(mtx); + + handler = std::set_new_handler(nullptr); + std::set_new_handler(handler); + } + if (handler == nullptr) + break; + + try { + handler(); + } catch (const std::bad_alloc &) { + break; + } + + ptr = je_malloc(size); + } + + if (ptr == nullptr && !nothrow) + std::__throw_bad_alloc(); + return ptr; +} + +template +JEMALLOC_ALWAYS_INLINE +void * +newImpl(std::size_t size) noexcept(IsNoExcept) { + void *ptr = je_malloc(size); + if (likely(ptr != nullptr)) + return ptr; + + return handleOOM(size, IsNoExcept); +} + +void * +operator new(std::size_t size) { + return newImpl(size); +} + +void * +operator new[](std::size_t size) { + return newImpl(size); +} + +void * +operator new(std::size_t size, const std::nothrow_t &) noexcept { + return newImpl(size); +} + +void * +operator new[](std::size_t size, const std::nothrow_t &) noexcept { + return newImpl(size); +} + +void +operator delete(void *ptr) noexcept { + je_free(ptr); +} + +void +operator delete[](void *ptr) noexcept { + je_free(ptr); +} + +void +operator delete(void *ptr, const std::nothrow_t &) noexcept { + je_free(ptr); +} + +void operator delete[](void *ptr, const std::nothrow_t &) noexcept { + je_free(ptr); +} + +#if __cpp_sized_deallocation >= 201309 + +void +operator delete(void *ptr, std::size_t size) noexcept { + if (unlikely(ptr == nullptr)) { + return; + } + je_sdallocx(ptr, size, /*flags=*/0); +} + +void operator delete[](void *ptr, std::size_t size) noexcept { + if (unlikely(ptr == nullptr)) { + return; + } + je_sdallocx(ptr, size, /*flags=*/0); +} + +#endif // __cpp_sized_deallocation diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/large.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/large.c new file mode 100644 index 0000000..27a2c67 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/large.c @@ -0,0 +1,371 @@ +#define JEMALLOC_LARGE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/util.h" + +/******************************************************************************/ + +void * +large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { + assert(usize == sz_s2u(usize)); + + return large_palloc(tsdn, arena, usize, CACHELINE, zero); +} + +void * +large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero) { + size_t ausize; + extent_t *extent; + bool is_zeroed; + UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); + + assert(!tsdn_null(tsdn) || arena != NULL); + + ausize = sz_sa2u(usize, alignment); + if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) { + return NULL; + } + + if (config_fill && unlikely(opt_zero)) { + zero = true; + } + /* + * Copy zero into is_zeroed and pass the copy when allocating the + * extent, so that it is possible to make correct junk/zero fill + * decisions below, even if is_zeroed ends up true when zero is false. + */ + is_zeroed = zero; + if (likely(!tsdn_null(tsdn))) { + arena = arena_choose(tsdn_tsd(tsdn), arena); + } + if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn, + arena, usize, alignment, &is_zeroed)) == NULL) { + return NULL; + } + + /* See comments in arena_bin_slabs_full_insert(). */ + if (!arena_is_auto(arena)) { + /* Insert extent into large. */ + malloc_mutex_lock(tsdn, &arena->large_mtx); + extent_list_append(&arena->large, extent); + malloc_mutex_unlock(tsdn, &arena->large_mtx); + } + if (config_prof && arena_prof_accum(tsdn, arena, usize)) { + prof_idump(tsdn); + } + + if (zero) { + assert(is_zeroed); + } else if (config_fill && unlikely(opt_junk_alloc)) { + memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK, + extent_usize_get(extent)); + } + + arena_decay_tick(tsdn, arena); + return extent_addr_get(extent); +} + +static void +large_dalloc_junk_impl(void *ptr, size_t size) { + memset(ptr, JEMALLOC_FREE_JUNK, size); +} +large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl; + +static void +large_dalloc_maybe_junk_impl(void *ptr, size_t size) { + if (config_fill && have_dss && unlikely(opt_junk_free)) { + /* + * Only bother junk filling if the extent isn't about to be + * unmapped. + */ + if (opt_retain || (have_dss && extent_in_dss(ptr))) { + large_dalloc_junk(ptr, size); + } + } +} +large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk = + large_dalloc_maybe_junk_impl; + +static bool +large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { + arena_t *arena = extent_arena_get(extent); + size_t oldusize = extent_usize_get(extent); + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + size_t diff = extent_size_get(extent) - (usize + sz_large_pad); + + assert(oldusize > usize); + + if (extent_hooks->split == NULL) { + return true; + } + + /* Split excess pages. */ + if (diff != 0) { + extent_t *trail = extent_split_wrapper(tsdn, arena, + &extent_hooks, extent, usize + sz_large_pad, + sz_size2index(usize), false, diff, NSIZES, false); + if (trail == NULL) { + return true; + } + + if (config_fill && unlikely(opt_junk_free)) { + large_dalloc_maybe_junk(extent_addr_get(trail), + extent_size_get(trail)); + } + + arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail); + } + + arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize); + + return false; +} + +static bool +large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, + bool zero) { + arena_t *arena = extent_arena_get(extent); + size_t oldusize = extent_usize_get(extent); + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + size_t trailsize = usize - oldusize; + + if (extent_hooks->merge == NULL) { + return true; + } + + if (config_fill && unlikely(opt_zero)) { + zero = true; + } + /* + * Copy zero into is_zeroed_trail and pass the copy when allocating the + * extent, so that it is possible to make correct junk/zero fill + * decisions below, even if is_zeroed_trail ends up true when zero is + * false. + */ + bool is_zeroed_trail = zero; + bool commit = true; + extent_t *trail; + bool new_mapping; + if ((trail = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, extent_past_get(extent), trailsize, 0, + CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL + || (trail = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, extent_past_get(extent), trailsize, 0, + CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) { + if (config_stats) { + new_mapping = false; + } + } else { + if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, + extent_past_get(extent), trailsize, 0, CACHELINE, false, + NSIZES, &is_zeroed_trail, &commit)) == NULL) { + return true; + } + if (config_stats) { + new_mapping = true; + } + } + + if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) { + extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail); + return true; + } + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + szind_t szind = sz_size2index(usize); + extent_szind_set(extent, szind); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_addr_get(extent), szind, false); + + if (config_stats && new_mapping) { + arena_stats_mapped_add(tsdn, &arena->stats, trailsize); + } + + if (zero) { + if (config_cache_oblivious) { + /* + * Zero the trailing bytes of the original allocation's + * last page, since they are in an indeterminate state. + * There will always be trailing bytes, because ptr's + * offset from the beginning of the extent is a multiple + * of CACHELINE in [0 .. PAGE). + */ + void *zbase = (void *) + ((uintptr_t)extent_addr_get(extent) + oldusize); + void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + + PAGE)); + size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; + assert(nzero > 0); + memset(zbase, 0, nzero); + } + assert(is_zeroed_trail); + } else if (config_fill && unlikely(opt_junk_alloc)) { + memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize), + JEMALLOC_ALLOC_JUNK, usize - oldusize); + } + + arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize); + + return false; +} + +bool +large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, + size_t usize_max, bool zero) { + size_t oldusize = extent_usize_get(extent); + + /* The following should have been caught by callers. */ + assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS); + /* Both allocation sizes must be large to avoid a move. */ + assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS); + + if (usize_max > oldusize) { + /* Attempt to expand the allocation in-place. */ + if (!large_ralloc_no_move_expand(tsdn, extent, usize_max, + zero)) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + /* Try again, this time with usize_min. */ + if (usize_min < usize_max && usize_min > oldusize && + large_ralloc_no_move_expand(tsdn, extent, usize_min, + zero)) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + } + + /* + * Avoid moving the allocation if the existing extent size accommodates + * the new size. + */ + if (oldusize >= usize_min && oldusize <= usize_max) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + + /* Attempt to shrink the allocation in-place. */ + if (oldusize > usize_max) { + if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + } + return true; +} + +static void * +large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero) { + if (alignment <= CACHELINE) { + return large_malloc(tsdn, arena, usize, zero); + } + return large_palloc(tsdn, arena, usize, alignment, zero); +} + +void * +large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) { + size_t oldusize = extent_usize_get(extent); + + /* The following should have been caught by callers. */ + assert(usize > 0 && usize <= LARGE_MAXCLASS); + /* Both allocation sizes must be large to avoid a move. */ + assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS); + + /* Try to avoid moving the allocation. */ + if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { + return extent_addr_get(extent); + } + + /* + * usize and old size are different enough that we need to use a + * different size class. In that case, fall back to allocating new + * space and copying. + */ + void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, + zero); + if (ret == NULL) { + return NULL; + } + + size_t copysize = (usize < oldusize) ? usize : oldusize; + memcpy(ret, extent_addr_get(extent), copysize); + isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true); + return ret; +} + +/* + * junked_locked indicates whether the extent's data have been junk-filled, and + * whether the arena's large_mtx is currently held. + */ +static void +large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + bool junked_locked) { + if (!junked_locked) { + /* See comments in arena_bin_slabs_full_insert(). */ + if (!arena_is_auto(arena)) { + malloc_mutex_lock(tsdn, &arena->large_mtx); + extent_list_remove(&arena->large, extent); + malloc_mutex_unlock(tsdn, &arena->large_mtx); + } + large_dalloc_maybe_junk(extent_addr_get(extent), + extent_usize_get(extent)); + } else { + malloc_mutex_assert_owner(tsdn, &arena->large_mtx); + if (!arena_is_auto(arena)) { + extent_list_remove(&arena->large, extent); + } + } + arena_extent_dalloc_large_prep(tsdn, arena, extent); +} + +static void +large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent); +} + +void +large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) { + large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true); +} + +void +large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) { + large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent); +} + +void +large_dalloc(tsdn_t *tsdn, extent_t *extent) { + arena_t *arena = extent_arena_get(extent); + large_dalloc_prep_impl(tsdn, arena, extent, false); + large_dalloc_finish_impl(tsdn, arena, extent); + arena_decay_tick(tsdn, arena); +} + +size_t +large_salloc(tsdn_t *tsdn, const extent_t *extent) { + return extent_usize_get(extent); +} + +prof_tctx_t * +large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) { + return extent_prof_tctx_get(extent); +} + +void +large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) { + extent_prof_tctx_set(extent, tctx); +} + +void +large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) { + large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/log.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/log.c new file mode 100644 index 0000000..778902f --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/log.c @@ -0,0 +1,78 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/log.h" + +char log_var_names[JEMALLOC_LOG_VAR_BUFSIZE]; +atomic_b_t log_init_done = ATOMIC_INIT(false); + +/* + * Returns true if we were able to pick out a segment. Fills in r_segment_end + * with a pointer to the first character after the end of the string. + */ +static const char * +log_var_extract_segment(const char* segment_begin) { + const char *end; + for (end = segment_begin; *end != '\0' && *end != '|'; end++) { + } + return end; +} + +static bool +log_var_matches_segment(const char *segment_begin, const char *segment_end, + const char *log_var_begin, const char *log_var_end) { + assert(segment_begin <= segment_end); + assert(log_var_begin < log_var_end); + + ptrdiff_t segment_len = segment_end - segment_begin; + ptrdiff_t log_var_len = log_var_end - log_var_begin; + /* The special '.' segment matches everything. */ + if (segment_len == 1 && *segment_begin == '.') { + return true; + } + if (segment_len == log_var_len) { + return strncmp(segment_begin, log_var_begin, segment_len) == 0; + } else if (segment_len < log_var_len) { + return strncmp(segment_begin, log_var_begin, segment_len) == 0 + && log_var_begin[segment_len] == '.'; + } else { + return false; + } +} + +unsigned +log_var_update_state(log_var_t *log_var) { + const char *log_var_begin = log_var->name; + const char *log_var_end = log_var->name + strlen(log_var->name); + + /* Pointer to one before the beginning of the current segment. */ + const char *segment_begin = log_var_names; + + /* + * If log_init done is false, we haven't parsed the malloc conf yet. To + * avoid log-spew, we default to not displaying anything. + */ + if (!atomic_load_b(&log_init_done, ATOMIC_ACQUIRE)) { + return LOG_INITIALIZED_NOT_ENABLED; + } + + while (true) { + const char *segment_end = log_var_extract_segment( + segment_begin); + assert(segment_end < log_var_names + JEMALLOC_LOG_VAR_BUFSIZE); + if (log_var_matches_segment(segment_begin, segment_end, + log_var_begin, log_var_end)) { + atomic_store_u(&log_var->state, LOG_ENABLED, + ATOMIC_RELAXED); + return LOG_ENABLED; + } + if (*segment_end == '\0') { + /* Hit the end of the segment string with no match. */ + atomic_store_u(&log_var->state, + LOG_INITIALIZED_NOT_ENABLED, ATOMIC_RELAXED); + return LOG_INITIALIZED_NOT_ENABLED; + } + /* Otherwise, skip the delimiter and continue. */ + segment_begin = segment_end + 1; + } +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/malloc_io.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/malloc_io.c new file mode 100644 index 0000000..7bdc13f --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/malloc_io.c @@ -0,0 +1,676 @@ +#define JEMALLOC_MALLOC_IO_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/util.h" + +#ifdef assert +# undef assert +#endif +#ifdef not_reached +# undef not_reached +#endif +#ifdef not_implemented +# undef not_implemented +#endif +#ifdef assert_not_implemented +# undef assert_not_implemented +#endif + +/* + * Define simple versions of assertion macros that won't recurse in case + * of assertion failures in malloc_*printf(). + */ +#define assert(e) do { \ + if (config_debug && !(e)) { \ + malloc_write(": Failed assertion\n"); \ + abort(); \ + } \ +} while (0) + +#define not_reached() do { \ + if (config_debug) { \ + malloc_write(": Unreachable code reached\n"); \ + abort(); \ + } \ + unreachable(); \ +} while (0) + +#define not_implemented() do { \ + if (config_debug) { \ + malloc_write(": Not implemented\n"); \ + abort(); \ + } \ +} while (0) + +#define assert_not_implemented(e) do { \ + if (unlikely(config_debug && !(e))) { \ + not_implemented(); \ + } \ +} while (0) + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void wrtmessage(void *cbopaque, const char *s); +#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) +static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, + size_t *slen_p); +#define D2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); +#define O2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); +#define X2S_BUFSIZE (2 + U2S_BUFSIZE) +static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, + size_t *slen_p); + +/******************************************************************************/ + +/* malloc_message() setup. */ +static void +wrtmessage(void *cbopaque, const char *s) { + malloc_write_fd(STDERR_FILENO, s, strlen(s)); +} + +JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); + +/* + * Wrapper around malloc_message() that avoids the need for + * je_malloc_message(...) throughout the code. + */ +void +malloc_write(const char *s) { + if (je_malloc_message != NULL) { + je_malloc_message(NULL, s); + } else { + wrtmessage(NULL, s); + } +} + +/* + * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so + * provide a wrapper. + */ +int +buferror(int err, char *buf, size_t buflen) { +#ifdef _WIN32 + FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, + (LPSTR)buf, (DWORD)buflen, NULL); + return 0; +#elif defined(JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE) && defined(_GNU_SOURCE) + char *b = strerror_r(err, buf, buflen); + if (b != buf) { + strncpy(buf, b, buflen); + buf[buflen-1] = '\0'; + } + return 0; +#else + return strerror_r(err, buf, buflen); +#endif +} + +uintmax_t +malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { + uintmax_t ret, digit; + unsigned b; + bool neg; + const char *p, *ns; + + p = nptr; + if (base < 0 || base == 1 || base > 36) { + ns = p; + set_errno(EINVAL); + ret = UINTMAX_MAX; + goto label_return; + } + b = base; + + /* Swallow leading whitespace and get sign, if any. */ + neg = false; + while (true) { + switch (*p) { + case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': + p++; + break; + case '-': + neg = true; + /* Fall through. */ + case '+': + p++; + /* Fall through. */ + default: + goto label_prefix; + } + } + + /* Get prefix, if any. */ + label_prefix: + /* + * Note where the first non-whitespace/sign character is so that it is + * possible to tell whether any digits are consumed (e.g., " 0" vs. + * " -x"). + */ + ns = p; + if (*p == '0') { + switch (p[1]) { + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': + if (b == 0) { + b = 8; + } + if (b == 8) { + p++; + } + break; + case 'X': case 'x': + switch (p[2]) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + if (b == 0) { + b = 16; + } + if (b == 16) { + p += 2; + } + break; + default: + break; + } + break; + default: + p++; + ret = 0; + goto label_return; + } + } + if (b == 0) { + b = 10; + } + + /* Convert. */ + ret = 0; + while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b) + || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b) + || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) { + uintmax_t pret = ret; + ret *= b; + ret += digit; + if (ret < pret) { + /* Overflow. */ + set_errno(ERANGE); + ret = UINTMAX_MAX; + goto label_return; + } + p++; + } + if (neg) { + ret = (uintmax_t)(-((intmax_t)ret)); + } + + if (p == ns) { + /* No conversion performed. */ + set_errno(EINVAL); + ret = UINTMAX_MAX; + goto label_return; + } + +label_return: + if (endptr != NULL) { + if (p == ns) { + /* No characters were converted. */ + *endptr = (char *)nptr; + } else { + *endptr = (char *)p; + } + } + return ret; +} + +static char * +u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) { + unsigned i; + + i = U2S_BUFSIZE - 1; + s[i] = '\0'; + switch (base) { + case 10: + do { + i--; + s[i] = "0123456789"[x % (uint64_t)10]; + x /= (uint64_t)10; + } while (x > 0); + break; + case 16: { + const char *digits = (uppercase) + ? "0123456789ABCDEF" + : "0123456789abcdef"; + + do { + i--; + s[i] = digits[x & 0xf]; + x >>= 4; + } while (x > 0); + break; + } default: { + const char *digits = (uppercase) + ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + : "0123456789abcdefghijklmnopqrstuvwxyz"; + + assert(base >= 2 && base <= 36); + do { + i--; + s[i] = digits[x % (uint64_t)base]; + x /= (uint64_t)base; + } while (x > 0); + }} + + *slen_p = U2S_BUFSIZE - 1 - i; + return &s[i]; +} + +static char * +d2s(intmax_t x, char sign, char *s, size_t *slen_p) { + bool neg; + + if ((neg = (x < 0))) { + x = -x; + } + s = u2s(x, 10, false, s, slen_p); + if (neg) { + sign = '-'; + } + switch (sign) { + case '-': + if (!neg) { + break; + } + /* Fall through. */ + case ' ': + case '+': + s--; + (*slen_p)++; + *s = sign; + break; + default: not_reached(); + } + return s; +} + +static char * +o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) { + s = u2s(x, 8, false, s, slen_p); + if (alt_form && *s != '0') { + s--; + (*slen_p)++; + *s = '0'; + } + return s; +} + +static char * +x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) { + s = u2s(x, 16, uppercase, s, slen_p); + if (alt_form) { + s -= 2; + (*slen_p) += 2; + memcpy(s, uppercase ? "0X" : "0x", 2); + } + return s; +} + +size_t +malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { + size_t i; + const char *f; + +#define APPEND_C(c) do { \ + if (i < size) { \ + str[i] = (c); \ + } \ + i++; \ +} while (0) +#define APPEND_S(s, slen) do { \ + if (i < size) { \ + size_t cpylen = (slen <= size - i) ? slen : size - i; \ + memcpy(&str[i], s, cpylen); \ + } \ + i += slen; \ +} while (0) +#define APPEND_PADDED_S(s, slen, width, left_justify) do { \ + /* Left padding. */ \ + size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ + (size_t)width - slen : 0); \ + if (!left_justify && pad_len != 0) { \ + size_t j; \ + for (j = 0; j < pad_len; j++) { \ + APPEND_C(' '); \ + } \ + } \ + /* Value. */ \ + APPEND_S(s, slen); \ + /* Right padding. */ \ + if (left_justify && pad_len != 0) { \ + size_t j; \ + for (j = 0; j < pad_len; j++) { \ + APPEND_C(' '); \ + } \ + } \ +} while (0) +#define GET_ARG_NUMERIC(val, len) do { \ + switch (len) { \ + case '?': \ + val = va_arg(ap, int); \ + break; \ + case '?' | 0x80: \ + val = va_arg(ap, unsigned int); \ + break; \ + case 'l': \ + val = va_arg(ap, long); \ + break; \ + case 'l' | 0x80: \ + val = va_arg(ap, unsigned long); \ + break; \ + case 'q': \ + val = va_arg(ap, long long); \ + break; \ + case 'q' | 0x80: \ + val = va_arg(ap, unsigned long long); \ + break; \ + case 'j': \ + val = va_arg(ap, intmax_t); \ + break; \ + case 'j' | 0x80: \ + val = va_arg(ap, uintmax_t); \ + break; \ + case 't': \ + val = va_arg(ap, ptrdiff_t); \ + break; \ + case 'z': \ + val = va_arg(ap, ssize_t); \ + break; \ + case 'z' | 0x80: \ + val = va_arg(ap, size_t); \ + break; \ + case 'p': /* Synthetic; used for %p. */ \ + val = va_arg(ap, uintptr_t); \ + break; \ + default: \ + not_reached(); \ + val = 0; \ + } \ +} while (0) + + i = 0; + f = format; + while (true) { + switch (*f) { + case '\0': goto label_out; + case '%': { + bool alt_form = false; + bool left_justify = false; + bool plus_space = false; + bool plus_plus = false; + int prec = -1; + int width = -1; + unsigned char len = '?'; + char *s; + size_t slen; + + f++; + /* Flags. */ + while (true) { + switch (*f) { + case '#': + assert(!alt_form); + alt_form = true; + break; + case '-': + assert(!left_justify); + left_justify = true; + break; + case ' ': + assert(!plus_space); + plus_space = true; + break; + case '+': + assert(!plus_plus); + plus_plus = true; + break; + default: goto label_width; + } + f++; + } + /* Width. */ + label_width: + switch (*f) { + case '*': + width = va_arg(ap, int); + f++; + if (width < 0) { + left_justify = true; + width = -width; + } + break; + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': { + uintmax_t uwidth; + set_errno(0); + uwidth = malloc_strtoumax(f, (char **)&f, 10); + assert(uwidth != UINTMAX_MAX || get_errno() != + ERANGE); + width = (int)uwidth; + break; + } default: + break; + } + /* Width/precision separator. */ + if (*f == '.') { + f++; + } else { + goto label_length; + } + /* Precision. */ + switch (*f) { + case '*': + prec = va_arg(ap, int); + f++; + break; + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': { + uintmax_t uprec; + set_errno(0); + uprec = malloc_strtoumax(f, (char **)&f, 10); + assert(uprec != UINTMAX_MAX || get_errno() != + ERANGE); + prec = (int)uprec; + break; + } + default: break; + } + /* Length. */ + label_length: + switch (*f) { + case 'l': + f++; + if (*f == 'l') { + len = 'q'; + f++; + } else { + len = 'l'; + } + break; + case 'q': case 'j': case 't': case 'z': + len = *f; + f++; + break; + default: break; + } + /* Conversion specifier. */ + switch (*f) { + case '%': + /* %% */ + APPEND_C(*f); + f++; + break; + case 'd': case 'i': { + intmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[D2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len); + s = d2s(val, (plus_plus ? '+' : (plus_space ? + ' ' : '-')), buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'o': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[O2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len | 0x80); + s = o2s(val, alt_form, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'u': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[U2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len | 0x80); + s = u2s(val, 10, false, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'x': case 'X': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[X2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len | 0x80); + s = x2s(val, alt_form, *f == 'X', buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'c': { + unsigned char val; + char buf[2]; + + assert(len == '?' || len == 'l'); + assert_not_implemented(len != 'l'); + val = va_arg(ap, int); + buf[0] = val; + buf[1] = '\0'; + APPEND_PADDED_S(buf, 1, width, left_justify); + f++; + break; + } case 's': + assert(len == '?' || len == 'l'); + assert_not_implemented(len != 'l'); + s = va_arg(ap, char *); + slen = (prec < 0) ? strlen(s) : (size_t)prec; + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + case 'p': { + uintmax_t val; + char buf[X2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, 'p'); + s = x2s(val, true, false, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } default: not_reached(); + } + break; + } default: { + APPEND_C(*f); + f++; + break; + }} + } + label_out: + if (i < size) { + str[i] = '\0'; + } else { + str[size - 1] = '\0'; + } + +#undef APPEND_C +#undef APPEND_S +#undef APPEND_PADDED_S +#undef GET_ARG_NUMERIC + return i; +} + +JEMALLOC_FORMAT_PRINTF(3, 4) +size_t +malloc_snprintf(char *str, size_t size, const char *format, ...) { + size_t ret; + va_list ap; + + va_start(ap, format); + ret = malloc_vsnprintf(str, size, format, ap); + va_end(ap); + + return ret; +} + +void +malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, va_list ap) { + char buf[MALLOC_PRINTF_BUFSIZE]; + + if (write_cb == NULL) { + /* + * The caller did not provide an alternate write_cb callback + * function, so use the default one. malloc_write() is an + * inline function, so use malloc_message() directly here. + */ + write_cb = (je_malloc_message != NULL) ? je_malloc_message : + wrtmessage; + cbopaque = NULL; + } + + malloc_vsnprintf(buf, sizeof(buf), format, ap); + write_cb(cbopaque, buf); +} + +/* + * Print to a callback function in such a way as to (hopefully) avoid memory + * allocation. + */ +JEMALLOC_FORMAT_PRINTF(3, 4) +void +malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, ...) { + va_list ap; + + va_start(ap, format); + malloc_vcprintf(write_cb, cbopaque, format, ap); + va_end(ap); +} + +/* Print to stderr in such a way as to avoid memory allocation. */ +JEMALLOC_FORMAT_PRINTF(1, 2) +void +malloc_printf(const char *format, ...) { + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); +} + +/* + * Restore normal assertion macros, in order to make it possible to compile all + * C files as a single concatenation. + */ +#undef assert +#undef not_reached +#undef not_implemented +#undef assert_not_implemented +#include "jemalloc/internal/assert.h" diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/mb.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/mb.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/mb.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/mb.c diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/mutex.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/mutex.c new file mode 100644 index 0000000..30222b3 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/mutex.c @@ -0,0 +1,224 @@ +#define JEMALLOC_MUTEX_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/spin.h" + +#ifndef _CRT_SPINCOUNT +#define _CRT_SPINCOUNT 4000 +#endif + +/******************************************************************************/ +/* Data. */ + +#ifdef JEMALLOC_LAZY_LOCK +bool isthreaded = false; +#endif +#ifdef JEMALLOC_MUTEX_INIT_CB +static bool postpone_init = true; +static malloc_mutex_t *postponed_mutexes = NULL; +#endif + +/******************************************************************************/ +/* + * We intercept pthread_create() calls in order to toggle isthreaded if the + * process goes multi-threaded. + */ + +#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) +JEMALLOC_EXPORT int +pthread_create(pthread_t *__restrict thread, + const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), + void *__restrict arg) { + return pthread_create_wrapper(thread, attr, start_routine, arg); +} +#endif + +/******************************************************************************/ + +#ifdef JEMALLOC_MUTEX_INIT_CB +JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, + void *(calloc_cb)(size_t, size_t)); +#endif + +void +malloc_mutex_lock_slow(malloc_mutex_t *mutex) { + mutex_prof_data_t *data = &mutex->prof_data; + UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER; + + if (ncpus == 1) { + goto label_spin_done; + } + + int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN; + do { + spin_cpu_spinwait(); + if (!malloc_mutex_trylock_final(mutex)) { + data->n_spin_acquired++; + return; + } + } while (cnt++ < max_cnt); + + if (!config_stats) { + /* Only spin is useful when stats is off. */ + malloc_mutex_lock_final(mutex); + return; + } +label_spin_done: + nstime_update(&before); + /* Copy before to after to avoid clock skews. */ + nstime_t after; + nstime_copy(&after, &before); + uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1, + ATOMIC_RELAXED) + 1; + /* One last try as above two calls may take quite some cycles. */ + if (!malloc_mutex_trylock_final(mutex)) { + atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); + data->n_spin_acquired++; + return; + } + + /* True slow path. */ + malloc_mutex_lock_final(mutex); + /* Update more slow-path only counters. */ + atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); + nstime_update(&after); + + nstime_t delta; + nstime_copy(&delta, &after); + nstime_subtract(&delta, &before); + + data->n_wait_times++; + nstime_add(&data->tot_wait_time, &delta); + if (nstime_compare(&data->max_wait_time, &delta) < 0) { + nstime_copy(&data->max_wait_time, &delta); + } + if (n_thds > data->max_n_thds) { + data->max_n_thds = n_thds; + } +} + +static void +mutex_prof_data_init(mutex_prof_data_t *data) { + memset(data, 0, sizeof(mutex_prof_data_t)); + nstime_init(&data->max_wait_time, 0); + nstime_init(&data->tot_wait_time, 0); + data->prev_owner = NULL; +} + +void +malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_assert_owner(tsdn, mutex); + mutex_prof_data_init(&mutex->prof_data); +} + +static int +mutex_addr_comp(const witness_t *witness1, void *mutex1, + const witness_t *witness2, void *mutex2) { + assert(mutex1 != NULL); + assert(mutex2 != NULL); + uintptr_t mu1int = (uintptr_t)mutex1; + uintptr_t mu2int = (uintptr_t)mutex2; + if (mu1int < mu2int) { + return -1; + } else if (mu1int == mu2int) { + return 0; + } else { + return 1; + } +} + +bool +malloc_mutex_init(malloc_mutex_t *mutex, const char *name, + witness_rank_t rank, malloc_mutex_lock_order_t lock_order) { + mutex_prof_data_init(&mutex->prof_data); +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + InitializeSRWLock(&mutex->lock); +# else + if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, + _CRT_SPINCOUNT)) { + return true; + } +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + mutex->lock = OS_UNFAIR_LOCK_INIT; +#elif (defined(JEMALLOC_OSSPIN)) + mutex->lock = 0; +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) + if (postpone_init) { + mutex->postponed_next = postponed_mutexes; + postponed_mutexes = mutex; + } else { + if (_pthread_mutex_init_calloc_cb(&mutex->lock, + bootstrap_calloc) != 0) { + return true; + } + } +#else + pthread_mutexattr_t attr; + + if (pthread_mutexattr_init(&attr) != 0) { + return true; + } + pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); + if (pthread_mutex_init(&mutex->lock, &attr) != 0) { + pthread_mutexattr_destroy(&attr); + return true; + } + pthread_mutexattr_destroy(&attr); +#endif + if (config_debug) { + mutex->lock_order = lock_order; + if (lock_order == malloc_mutex_address_ordered) { + witness_init(&mutex->witness, name, rank, + mutex_addr_comp, mutex); + } else { + witness_init(&mutex->witness, name, rank, NULL, NULL); + } + } + return false; +} + +void +malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_lock(tsdn, mutex); +} + +void +malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_unlock(tsdn, mutex); +} + +void +malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { +#ifdef JEMALLOC_MUTEX_INIT_CB + malloc_mutex_unlock(tsdn, mutex); +#else + if (malloc_mutex_init(mutex, mutex->witness.name, + mutex->witness.rank, mutex->lock_order)) { + malloc_printf(": Error re-initializing mutex in " + "child\n"); + if (opt_abort) { + abort(); + } + } +#endif +} + +bool +malloc_mutex_boot(void) { +#ifdef JEMALLOC_MUTEX_INIT_CB + postpone_init = false; + while (postponed_mutexes != NULL) { + if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, + bootstrap_calloc) != 0) { + return true; + } + postponed_mutexes = postponed_mutexes->postponed_next; + } +#endif + return false; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/mutex_pool.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/mutex_pool.c new file mode 100644 index 0000000..f24d10e --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/mutex_pool.c @@ -0,0 +1,18 @@ +#define JEMALLOC_MUTEX_POOL_C_ + +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" + +bool +mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) { + for (int i = 0; i < MUTEX_POOL_SIZE; ++i) { + if (malloc_mutex_init(&pool->mutexes[i], name, rank, + malloc_mutex_address_ordered)) { + return true; + } + } + return false; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/nstime.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/nstime.c new file mode 100644 index 0000000..71db353 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/nstime.c @@ -0,0 +1,170 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/nstime.h" + +#include "jemalloc/internal/assert.h" + +#define BILLION UINT64_C(1000000000) +#define MILLION UINT64_C(1000000) + +void +nstime_init(nstime_t *time, uint64_t ns) { + time->ns = ns; +} + +void +nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) { + time->ns = sec * BILLION + nsec; +} + +uint64_t +nstime_ns(const nstime_t *time) { + return time->ns; +} + +uint64_t +nstime_msec(const nstime_t *time) { + return time->ns / MILLION; +} + +uint64_t +nstime_sec(const nstime_t *time) { + return time->ns / BILLION; +} + +uint64_t +nstime_nsec(const nstime_t *time) { + return time->ns % BILLION; +} + +void +nstime_copy(nstime_t *time, const nstime_t *source) { + *time = *source; +} + +int +nstime_compare(const nstime_t *a, const nstime_t *b) { + return (a->ns > b->ns) - (a->ns < b->ns); +} + +void +nstime_add(nstime_t *time, const nstime_t *addend) { + assert(UINT64_MAX - time->ns >= addend->ns); + + time->ns += addend->ns; +} + +void +nstime_iadd(nstime_t *time, uint64_t addend) { + assert(UINT64_MAX - time->ns >= addend); + + time->ns += addend; +} + +void +nstime_subtract(nstime_t *time, const nstime_t *subtrahend) { + assert(nstime_compare(time, subtrahend) >= 0); + + time->ns -= subtrahend->ns; +} + +void +nstime_isubtract(nstime_t *time, uint64_t subtrahend) { + assert(time->ns >= subtrahend); + + time->ns -= subtrahend; +} + +void +nstime_imultiply(nstime_t *time, uint64_t multiplier) { + assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << + 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns)); + + time->ns *= multiplier; +} + +void +nstime_idivide(nstime_t *time, uint64_t divisor) { + assert(divisor != 0); + + time->ns /= divisor; +} + +uint64_t +nstime_divide(const nstime_t *time, const nstime_t *divisor) { + assert(divisor->ns != 0); + + return time->ns / divisor->ns; +} + +#ifdef _WIN32 +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + FILETIME ft; + uint64_t ticks_100ns; + + GetSystemTimeAsFileTime(&ft); + ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; + + nstime_init(time, ticks_100ns * 100); +} +#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE) +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC) +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME) +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + nstime_init(time, mach_absolute_time()); +} +#else +# define NSTIME_MONOTONIC false +static void +nstime_get(nstime_t *time) { + struct timeval tv; + + gettimeofday(&tv, NULL); + nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000); +} +#endif + +static bool +nstime_monotonic_impl(void) { + return NSTIME_MONOTONIC; +#undef NSTIME_MONOTONIC +} +nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl; + +static bool +nstime_update_impl(nstime_t *time) { + nstime_t old_time; + + nstime_copy(&old_time, time); + nstime_get(time); + + /* Handle non-monotonic clocks. */ + if (unlikely(nstime_compare(&old_time, time) > 0)) { + nstime_copy(time, &old_time); + return true; + } + + return false; +} +nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl; diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/pages.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/pages.c new file mode 100644 index 0000000..2600269 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/pages.c @@ -0,0 +1,606 @@ +#define JEMALLOC_PAGES_C_ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/pages.h" + +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +#include +#ifdef __FreeBSD__ +#include +#endif +#endif + +/******************************************************************************/ +/* Data. */ + +/* Actual operating system page size, detected during bootstrap, <= PAGE. */ +static size_t os_page; + +#ifndef _WIN32 +# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) +# define PAGES_PROT_DECOMMIT (PROT_NONE) +static int mmap_flags; +#endif +static bool os_overcommits; + +const char *thp_mode_names[] = { + "default", + "always", + "never", + "not supported" +}; +thp_mode_t opt_thp = THP_MODE_DEFAULT; +thp_mode_t init_system_thp_mode; + +/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */ +static bool pages_can_purge_lazy_runtime = true; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void os_pages_unmap(void *addr, size_t size); + +/******************************************************************************/ + +static void * +os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) { + assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); + assert(ALIGNMENT_CEILING(size, os_page) == size); + assert(size != 0); + + if (os_overcommits) { + *commit = true; + } + + void *ret; +#ifdef _WIN32 + /* + * If VirtualAlloc can't allocate at the given address when one is + * given, it fails and returns NULL. + */ + ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0), + PAGE_READWRITE); +#else + /* + * We don't use MAP_FIXED here, because it can cause the *replacement* + * of existing mappings, and we only want to create new mappings. + */ + { + int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + + ret = mmap(addr, size, prot, mmap_flags, -1, 0); + } + assert(ret != NULL); + + if (ret == MAP_FAILED) { + ret = NULL; + } else if (addr != NULL && ret != addr) { + /* + * We succeeded in mapping memory, but not in the right place. + */ + os_pages_unmap(ret, size); + ret = NULL; + } +#endif + assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && + ret == addr)); + return ret; +} + +static void * +os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, + bool *commit) { + void *ret = (void *)((uintptr_t)addr + leadsize); + + assert(alloc_size >= leadsize + size); +#ifdef _WIN32 + os_pages_unmap(addr, alloc_size); + void *new_addr = os_pages_map(ret, size, PAGE, commit); + if (new_addr == ret) { + return ret; + } + if (new_addr != NULL) { + os_pages_unmap(new_addr, size); + } + return NULL; +#else + size_t trailsize = alloc_size - leadsize - size; + + if (leadsize != 0) { + os_pages_unmap(addr, leadsize); + } + if (trailsize != 0) { + os_pages_unmap((void *)((uintptr_t)ret + size), trailsize); + } + return ret; +#endif +} + +static void +os_pages_unmap(void *addr, size_t size) { + assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); + assert(ALIGNMENT_CEILING(size, os_page) == size); + +#ifdef _WIN32 + if (VirtualFree(addr, 0, MEM_RELEASE) == 0) +#else + if (munmap(addr, size) == -1) +#endif + { + char buf[BUFERROR_BUF]; + + buferror(get_errno(), buf, sizeof(buf)); + malloc_printf(": Error in " +#ifdef _WIN32 + "VirtualFree" +#else + "munmap" +#endif + "(): %s\n", buf); + if (opt_abort) { + abort(); + } + } +} + +static void * +pages_map_slow(size_t size, size_t alignment, bool *commit) { + size_t alloc_size = size + alignment - os_page; + /* Beware size_t wrap-around. */ + if (alloc_size < size) { + return NULL; + } + + void *ret; + do { + void *pages = os_pages_map(NULL, alloc_size, alignment, commit); + if (pages == NULL) { + return NULL; + } + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) + - (uintptr_t)pages; + ret = os_pages_trim(pages, alloc_size, leadsize, size, commit); + } while (ret == NULL); + + assert(ret != NULL); + assert(PAGE_ADDR2BASE(ret) == ret); + return ret; +} + +void * +pages_map(void *addr, size_t size, size_t alignment, bool *commit) { + assert(alignment >= PAGE); + assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr); + + /* + * Ideally, there would be a way to specify alignment to mmap() (like + * NetBSD has), but in the absence of such a feature, we have to work + * hard to efficiently create aligned mappings. The reliable, but + * slow method is to create a mapping that is over-sized, then trim the + * excess. However, that always results in one or two calls to + * os_pages_unmap(), and it can leave holes in the process's virtual + * memory map if memory grows downward. + * + * Optimistically try mapping precisely the right amount before falling + * back to the slow method, with the expectation that the optimistic + * approach works most of the time. + */ + + void *ret = os_pages_map(addr, size, os_page, commit); + if (ret == NULL || ret == addr) { + return ret; + } + assert(addr == NULL); + if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) { + os_pages_unmap(ret, size); + return pages_map_slow(size, alignment, commit); + } + + assert(PAGE_ADDR2BASE(ret) == ret); + return ret; +} + +void +pages_unmap(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + os_pages_unmap(addr, size); +} + +static bool +pages_commit_impl(void *addr, size_t size, bool commit) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + if (os_overcommits) { + return true; + } + +#ifdef _WIN32 + return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, + PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT))); +#else + { + int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, + -1, 0); + if (result == MAP_FAILED) { + return true; + } + if (result != addr) { + /* + * We succeeded in mapping memory, but not in the right + * place. + */ + os_pages_unmap(result, size); + return true; + } + return false; + } +#endif +} + +bool +pages_commit(void *addr, size_t size) { + return pages_commit_impl(addr, size, true); +} + +bool +pages_decommit(void *addr, size_t size) { + return pages_commit_impl(addr, size, false); +} + +bool +pages_purge_lazy(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + if (!pages_can_purge_lazy) { + return true; + } + if (!pages_can_purge_lazy_runtime) { + /* + * Built with lazy purge enabled, but detected it was not + * supported on the current system. + */ + return true; + } + +#ifdef _WIN32 + VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); + return false; +#elif defined(JEMALLOC_PURGE_MADVISE_FREE) + return (madvise(addr, size, +# ifdef MADV_FREE + MADV_FREE +# else + JEMALLOC_MADV_FREE +# endif + ) != 0); +#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ + !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) + return (madvise(addr, size, MADV_DONTNEED) != 0); +#else + not_reached(); +#endif +} + +bool +pages_purge_forced(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + if (!pages_can_purge_forced) { + return true; + } + +#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ + defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) + return (madvise(addr, size, MADV_DONTNEED) != 0); +#elif defined(JEMALLOC_MAPS_COALESCE) + /* Try to overlay a new demand-zeroed mapping. */ + return pages_commit(addr, size); +#else + not_reached(); +#endif +} + +static bool +pages_huge_impl(void *addr, size_t size, bool aligned) { + if (aligned) { + assert(HUGEPAGE_ADDR2BASE(addr) == addr); + assert(HUGEPAGE_CEILING(size) == size); + } +#ifdef JEMALLOC_HAVE_MADVISE_HUGE + return (madvise(addr, size, MADV_HUGEPAGE) != 0); +#else + return true; +#endif +} + +bool +pages_huge(void *addr, size_t size) { + return pages_huge_impl(addr, size, true); +} + +static bool +pages_huge_unaligned(void *addr, size_t size) { + return pages_huge_impl(addr, size, false); +} + +static bool +pages_nohuge_impl(void *addr, size_t size, bool aligned) { + if (aligned) { + assert(HUGEPAGE_ADDR2BASE(addr) == addr); + assert(HUGEPAGE_CEILING(size) == size); + } + +#ifdef JEMALLOC_HAVE_MADVISE_HUGE + return (madvise(addr, size, MADV_NOHUGEPAGE) != 0); +#else + return false; +#endif +} + +bool +pages_nohuge(void *addr, size_t size) { + return pages_nohuge_impl(addr, size, true); +} + +static bool +pages_nohuge_unaligned(void *addr, size_t size) { + return pages_nohuge_impl(addr, size, false); +} + +bool +pages_dontdump(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); +#ifdef JEMALLOC_MADVISE_DONTDUMP + return madvise(addr, size, MADV_DONTDUMP) != 0; +#else + return false; +#endif +} + +bool +pages_dodump(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); +#ifdef JEMALLOC_MADVISE_DONTDUMP + return madvise(addr, size, MADV_DODUMP) != 0; +#else + return false; +#endif +} + + +static size_t +os_page_detect(void) { +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwPageSize; +#elif defined(__FreeBSD__) + return getpagesize(); +#else + long result = sysconf(_SC_PAGESIZE); + if (result == -1) { + return LG_PAGE; + } + return (size_t)result; +#endif +} + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +static bool +os_overcommits_sysctl(void) { + int vm_overcommit; + size_t sz; + + sz = sizeof(vm_overcommit); +#if defined(__FreeBSD__) && defined(VM_OVERCOMMIT) + int mib[2]; + + mib[0] = CTL_VM; + mib[1] = VM_OVERCOMMIT; + if (sysctl(mib, 2, &vm_overcommit, &sz, NULL, 0) != 0) { + return false; /* Error. */ + } +#else + if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) { + return false; /* Error. */ + } +#endif + + return ((vm_overcommit & 0x3) == 0); +} +#endif + +#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY +/* + * Use syscall(2) rather than {open,read,close}(2) when possible to avoid + * reentry during bootstrapping if another library has interposed system call + * wrappers. + */ +static bool +os_overcommits_proc(void) { + int fd; + char buf[1]; + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) + #if defined(O_CLOEXEC) + fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY | + O_CLOEXEC); + #else + fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY); + if (fd != -1) { + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); + } + #endif +#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat) + #if defined(O_CLOEXEC) + fd = (int)syscall(SYS_openat, + AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); + #else + fd = (int)syscall(SYS_openat, + AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY); + if (fd != -1) { + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); + } + #endif +#else + #if defined(O_CLOEXEC) + fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); + #else + fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY); + if (fd != -1) { + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC); + } + #endif +#endif + + if (fd == -1) { + return false; /* Error. */ + } + + ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf)); +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) + syscall(SYS_close, fd); +#else + close(fd); +#endif + + if (nread < 1) { + return false; /* Error. */ + } + /* + * /proc/sys/vm/overcommit_memory meanings: + * 0: Heuristic overcommit. + * 1: Always overcommit. + * 2: Never overcommit. + */ + return (buf[0] == '0' || buf[0] == '1'); +} +#endif + +void +pages_set_thp_state (void *ptr, size_t size) { + if (opt_thp == thp_mode_default || opt_thp == init_system_thp_mode) { + return; + } + assert(opt_thp != thp_mode_not_supported && + init_system_thp_mode != thp_mode_not_supported); + + if (opt_thp == thp_mode_always + && init_system_thp_mode != thp_mode_never) { + assert(init_system_thp_mode == thp_mode_default); + pages_huge_unaligned(ptr, size); + } else if (opt_thp == thp_mode_never) { + assert(init_system_thp_mode == thp_mode_default || + init_system_thp_mode == thp_mode_always); + pages_nohuge_unaligned(ptr, size); + } +} + +static void +init_thp_state(void) { + if (!have_madvise_huge) { + if (metadata_thp_enabled() && opt_abort) { + malloc_write(": no MADV_HUGEPAGE support\n"); + abort(); + } + goto label_error; + } + + static const char sys_state_madvise[] = "always [madvise] never\n"; + static const char sys_state_always[] = "[always] madvise never\n"; + static const char sys_state_never[] = "always madvise [never]\n"; + char buf[sizeof(sys_state_madvise)]; + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) + int fd = (int)syscall(SYS_open, + "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); +#else + int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY); +#endif + if (fd == -1) { + goto label_error; + } + + ssize_t nread = malloc_read_fd(fd, &buf, sizeof(buf)); +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) + syscall(SYS_close, fd); +#else + close(fd); +#endif + + if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) { + init_system_thp_mode = thp_mode_default; + } else if (strncmp(buf, sys_state_always, (size_t)nread) == 0) { + init_system_thp_mode = thp_mode_always; + } else if (strncmp(buf, sys_state_never, (size_t)nread) == 0) { + init_system_thp_mode = thp_mode_never; + } else { + goto label_error; + } + return; +label_error: + opt_thp = init_system_thp_mode = thp_mode_not_supported; +} + +bool +pages_boot(void) { + os_page = os_page_detect(); + if (os_page > PAGE) { + malloc_write(": Unsupported system page size\n"); + if (opt_abort) { + abort(); + } + return true; + } + +#ifndef _WIN32 + mmap_flags = MAP_PRIVATE | MAP_ANON; +#endif + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT + os_overcommits = os_overcommits_sysctl(); +#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY) + os_overcommits = os_overcommits_proc(); +# ifdef MAP_NORESERVE + if (os_overcommits) { + mmap_flags |= MAP_NORESERVE; + } +# endif +#else + os_overcommits = false; +#endif + + init_thp_state(); + + /* Detect lazy purge runtime support. */ + if (pages_can_purge_lazy) { + bool committed = false; + void *madv_free_page = os_pages_map(NULL, PAGE, PAGE, &committed); + if (madv_free_page == NULL) { + return true; + } + assert(pages_can_purge_lazy_runtime); + if (pages_purge_lazy(madv_free_page, PAGE)) { + pages_can_purge_lazy_runtime = false; + } + os_pages_unmap(madv_free_page, PAGE); + } + + return false; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/prng.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/prng.c new file mode 100644 index 0000000..83c04bf --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/prng.c @@ -0,0 +1,3 @@ +#define JEMALLOC_PRNG_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/prof.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/prof.c similarity index 53% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/prof.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/prof.c index 5d2b959..13df641 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/prof.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/prof.c @@ -1,14 +1,29 @@ -#define JEMALLOC_PROF_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_PROF_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" + /******************************************************************************/ #ifdef JEMALLOC_PROF_LIBUNWIND -#define UNW_LOCAL_ONLY +#define UNW_LOCAL_ONLY #include #endif #ifdef JEMALLOC_PROF_LIBGCC +/* + * We have a circular dependency -- jemalloc_internal.h tells us if we should + * use libgcc's unwinding functionality, but after we've included that, we've + * already hooked _Unwind_Backtrace. We'll temporarily disable hooking. + */ +#undef _Unwind_Backtrace #include +#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook) #endif /******************************************************************************/ @@ -63,7 +78,7 @@ size_t lg_prof_sample; * creating/destroying mutexes. */ static malloc_mutex_t *gctx_locks; -static unsigned cum_gctxs; /* Atomic counter. */ +static atomic_u_t cum_gctxs; /* Atomic counter. */ /* * Table of mutexes that are shared among tdata's. No operations require @@ -78,7 +93,8 @@ static malloc_mutex_t *tdata_locks; * structure that knows about all backtraces currently captured. */ static ckh_t bt2gctx; -static malloc_mutex_t bt2gctx_mtx; +/* Non static to enable profiling. */ +malloc_mutex_t bt2gctx_mtx; /* * Tree of all extant prof_tdata_t structures, regardless of state, @@ -109,7 +125,7 @@ static char prof_dump_buf[ 1 #endif ]; -static unsigned prof_dump_buf_end; +static size_t prof_dump_buf_end; static int prof_dump_fd; /* Do not dump any profiles until bootstrapping is complete. */ @@ -121,20 +137,19 @@ static bool prof_booted = false; * definition. */ -static bool prof_tctx_should_destroy(prof_tctx_t *tctx); +static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); -static bool prof_tdata_should_destroy(prof_tdata_t *tdata, +static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached); static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached); -static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name); +static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); /******************************************************************************/ /* Red-black trees. */ -JEMALLOC_INLINE_C int -prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) -{ +static int +prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { uint64_t a_thr_uid = a->thr_uid; uint64_t b_thr_uid = b->thr_uid; int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); @@ -150,30 +165,29 @@ prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) b_tctx_uid); } } - return (ret); + return ret; } rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, tctx_link, prof_tctx_comp) -JEMALLOC_INLINE_C int -prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) -{ +static int +prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { unsigned a_len = a->bt.len; unsigned b_len = b->bt.len; unsigned comp_len = (a_len < b_len) ? a_len : b_len; int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); - if (ret == 0) + if (ret == 0) { ret = (a_len > b_len) - (a_len < b_len); - return (ret); + } + return ret; } rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, prof_gctx_comp) -JEMALLOC_INLINE_C int -prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) -{ +static int +prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { int ret; uint64_t a_uid = a->thr_uid; uint64_t b_uid = b->thr_uid; @@ -185,7 +199,7 @@ prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); } - return (ret); + return ret; } rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, @@ -194,8 +208,7 @@ rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, /******************************************************************************/ void -prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) -{ +prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { prof_tdata_t *tdata; cassert(config_prof); @@ -208,27 +221,28 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) * programs. */ tdata = prof_tdata_get(tsd, true); - if (tdata != NULL) + if (tdata != NULL) { prof_sample_threshold_update(tdata); + } } if ((uintptr_t)tctx > (uintptr_t)1U) { - malloc_mutex_lock(tctx->tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); tctx->prepared = false; - if (prof_tctx_should_destroy(tctx)) + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { prof_tctx_destroy(tsd, tctx); - else - malloc_mutex_unlock(tctx->tdata->lock); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } } } void -prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx) -{ +prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx) { + prof_tctx_set(tsdn, ptr, usize, NULL, tctx); - prof_tctx_set(ptr, usize, tctx); - - malloc_mutex_lock(tctx->tdata->lock); + malloc_mutex_lock(tsdn, tctx->tdata->lock); tctx->cnts.curobjs++; tctx->cnts.curbytes += usize; if (opt_prof_accum) { @@ -236,39 +250,34 @@ prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx) tctx->cnts.accumbytes += usize; } tctx->prepared = false; - malloc_mutex_unlock(tctx->tdata->lock); + malloc_mutex_unlock(tsdn, tctx->tdata->lock); } void -prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) -{ - - malloc_mutex_lock(tctx->tdata->lock); +prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) { + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); assert(tctx->cnts.curobjs > 0); assert(tctx->cnts.curbytes >= usize); tctx->cnts.curobjs--; tctx->cnts.curbytes -= usize; - if (prof_tctx_should_destroy(tctx)) + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { prof_tctx_destroy(tsd, tctx); - else - malloc_mutex_unlock(tctx->tdata->lock); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } } void -bt_init(prof_bt_t *bt, void **vec) -{ - +bt_init(prof_bt_t *bt, void **vec) { cassert(config_prof); bt->vec = vec; bt->len = 0; } -JEMALLOC_INLINE_C void -prof_enter(tsd_t *tsd, prof_tdata_t *tdata) -{ - +static void +prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); @@ -277,17 +286,15 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata) tdata->enq = true; } - malloc_mutex_lock(&bt2gctx_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); } -JEMALLOC_INLINE_C void -prof_leave(tsd_t *tsd, prof_tdata_t *tdata) -{ - +static void +prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); - malloc_mutex_unlock(&bt2gctx_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); if (tdata != NULL) { bool idump, gdump; @@ -299,17 +306,18 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata) gdump = tdata->enq_gdump; tdata->enq_gdump = false; - if (idump) - prof_idump(); - if (gdump) - prof_gdump(); + if (idump) { + prof_idump(tsd_tsdn(tsd)); + } + if (gdump) { + prof_gdump(tsd_tsdn(tsd)); + } } } #ifdef JEMALLOC_PROF_LIBUNWIND void -prof_backtrace(prof_bt_t *bt) -{ +prof_backtrace(prof_bt_t *bt) { int nframes; cassert(config_prof); @@ -317,42 +325,41 @@ prof_backtrace(prof_bt_t *bt) assert(bt->vec != NULL); nframes = unw_backtrace(bt->vec, PROF_BT_MAX); - if (nframes <= 0) + if (nframes <= 0) { return; + } bt->len = nframes; } #elif (defined(JEMALLOC_PROF_LIBGCC)) static _Unwind_Reason_Code -prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) -{ - +prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { cassert(config_prof); - return (_URC_NO_REASON); + return _URC_NO_REASON; } static _Unwind_Reason_Code -prof_unwind_callback(struct _Unwind_Context *context, void *arg) -{ +prof_unwind_callback(struct _Unwind_Context *context, void *arg) { prof_unwind_data_t *data = (prof_unwind_data_t *)arg; void *ip; cassert(config_prof); ip = (void *)_Unwind_GetIP(context); - if (ip == NULL) - return (_URC_END_OF_STACK); + if (ip == NULL) { + return _URC_END_OF_STACK; + } data->bt->vec[data->bt->len] = ip; data->bt->len++; - if (data->bt->len == data->max) - return (_URC_END_OF_STACK); + if (data->bt->len == data->max) { + return _URC_END_OF_STACK; + } - return (_URC_NO_REASON); + return _URC_NO_REASON; } void -prof_backtrace(prof_bt_t *bt) -{ +prof_backtrace(prof_bt_t *bt) { prof_unwind_data_t data = {bt, PROF_BT_MAX}; cassert(config_prof); @@ -361,20 +368,22 @@ prof_backtrace(prof_bt_t *bt) } #elif (defined(JEMALLOC_PROF_GCC)) void -prof_backtrace(prof_bt_t *bt) -{ -#define BT_FRAME(i) \ +prof_backtrace(prof_bt_t *bt) { +#define BT_FRAME(i) \ if ((i) < PROF_BT_MAX) { \ void *p; \ - if (__builtin_frame_address(i) == 0) \ + if (__builtin_frame_address(i) == 0) { \ return; \ + } \ p = __builtin_return_address(i); \ - if (p == NULL) \ + if (p == NULL) { \ return; \ + } \ bt->vec[(i)] = p; \ bt->len = (i) + 1; \ - } else \ - return; + } else { \ + return; \ + } cassert(config_prof); @@ -522,40 +531,36 @@ prof_backtrace(prof_bt_t *bt) } #else void -prof_backtrace(prof_bt_t *bt) -{ - +prof_backtrace(prof_bt_t *bt) { cassert(config_prof); not_reached(); } #endif static malloc_mutex_t * -prof_gctx_mutex_choose(void) -{ - unsigned ngctxs = atomic_add_u(&cum_gctxs, 1); +prof_gctx_mutex_choose(void) { + unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); - return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]); + return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; } static malloc_mutex_t * -prof_tdata_mutex_choose(uint64_t thr_uid) -{ - - return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]); +prof_tdata_mutex_choose(uint64_t thr_uid) { + return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; } static prof_gctx_t * -prof_gctx_create(tsd_t *tsd, prof_bt_t *bt) -{ +prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { /* * Create a single allocation that has space for vec of length bt->len. */ - prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t, - vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true), - true, NULL); - if (gctx == NULL) - return (NULL); + size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); + prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, + sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), + true); + if (gctx == NULL) { + return NULL; + } gctx->lock = prof_gctx_mutex_choose(); /* * Set nlimbo to 1, in order to avoid a race condition with @@ -567,14 +572,12 @@ prof_gctx_create(tsd_t *tsd, prof_bt_t *bt) memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); gctx->bt.vec = gctx->vec; gctx->bt.len = bt->len; - return (gctx); + return gctx; } static void prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, - prof_tdata_t *tdata) -{ - + prof_tdata_t *tdata) { cassert(config_prof); /* @@ -585,62 +588,66 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, * into this function. */ prof_enter(tsd, tdata_self); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); assert(gctx->nlimbo != 0); if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { /* Remove gctx from bt2gctx. */ - if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) + if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { not_reached(); + } prof_leave(tsd, tdata_self); /* Destroy gctx. */ - malloc_mutex_unlock(gctx->lock); - idalloctm(tsd, gctx, tcache_get(tsd, false), true); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); } else { /* * Compensate for increment in prof_tctx_destroy() or * prof_lookup(). */ gctx->nlimbo--; - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_leave(tsd, tdata_self); } } -/* tctx->tdata->lock must be held. */ static bool -prof_tctx_should_destroy(prof_tctx_t *tctx) -{ +prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - if (opt_prof_accum) - return (false); - if (tctx->cnts.curobjs != 0) - return (false); - if (tctx->prepared) - return (false); - return (true); + if (opt_prof_accum) { + return false; + } + if (tctx->cnts.curobjs != 0) { + return false; + } + if (tctx->prepared) { + return false; + } + return true; } static bool -prof_gctx_should_destroy(prof_gctx_t *gctx) -{ - - if (opt_prof_accum) - return (false); - if (!tctx_tree_empty(&gctx->tctxs)) - return (false); - if (gctx->nlimbo != 0) - return (false); - return (true); +prof_gctx_should_destroy(prof_gctx_t *gctx) { + if (opt_prof_accum) { + return false; + } + if (!tctx_tree_empty(&gctx->tctxs)) { + return false; + } + if (gctx->nlimbo != 0) { + return false; + } + return true; } -/* tctx->tdata->lock is held upon entry, and released before return. */ static void -prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) -{ +prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { prof_tdata_t *tdata = tctx->tdata; prof_gctx_t *gctx = tctx->gctx; bool destroy_tdata, destroy_tctx, destroy_gctx; + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + assert(tctx->cnts.curobjs == 0); assert(tctx->cnts.curbytes == 0); assert(!opt_prof_accum); @@ -648,10 +655,10 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) assert(tctx->cnts.accumbytes == 0); ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); - destroy_tdata = prof_tdata_should_destroy(tdata, false); - malloc_mutex_unlock(tdata->lock); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: tctx_tree_remove(&gctx->tctxs, tctx); @@ -673,8 +680,9 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) */ gctx->nlimbo++; destroy_gctx = true; - } else + } else { destroy_gctx = false; + } break; case prof_tctx_state_dumping: /* @@ -691,27 +699,30 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) destroy_tctx = false; destroy_gctx = false; } - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); if (destroy_gctx) { prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, tdata); } - if (destroy_tdata) + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + if (destroy_tdata) { prof_tdata_destroy(tsd, tdata, false); + } - if (destroy_tctx) - idalloctm(tsd, tctx, tcache_get(tsd, false), true); + if (destroy_tctx) { + idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); + } } static bool prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, - void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) -{ + void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { union { prof_gctx_t *p; void *v; - } gctx; + } gctx, tgctx; union { prof_bt_t *p; void *v; @@ -721,40 +732,57 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { /* bt has never been seen before. Insert it. */ - gctx.p = prof_gctx_create(tsd, bt); - if (gctx.v == NULL) { - prof_leave(tsd, tdata); - return (true); + prof_leave(tsd, tdata); + tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); + if (tgctx.v == NULL) { + return true; } - btkey.p = &gctx.p->bt; - if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { - /* OOM. */ - prof_leave(tsd, tdata); - idalloctm(tsd, gctx.v, tcache_get(tsd, false), true); - return (true); + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + gctx.p = tgctx.p; + btkey.p = &gctx.p->bt; + if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { + /* OOM. */ + prof_leave(tsd, tdata); + idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, + true, true); + return true; + } + new_gctx = true; + } else { + new_gctx = false; } - new_gctx = true; } else { + tgctx.v = NULL; + new_gctx = false; + } + + if (!new_gctx) { /* * Increment nlimbo, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). */ - malloc_mutex_lock(gctx.p->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); gctx.p->nlimbo++; - malloc_mutex_unlock(gctx.p->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); new_gctx = false; + + if (tgctx.v != NULL) { + /* Lost race to insert. */ + idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, + true); + } } prof_leave(tsd, tdata); *p_btkey = btkey.v; *p_gctx = gctx.p; *p_new_gctx = new_gctx; - return (false); + return false; } prof_tctx_t * -prof_lookup(tsd_t *tsd, prof_bt_t *bt) -{ +prof_lookup(tsd_t *tsd, prof_bt_t *bt) { union { prof_tctx_t *p; void *v; @@ -765,16 +793,17 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) cassert(config_prof); tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) - return (NULL); + if (tdata == NULL) { + return NULL; + } - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); - if (!not_found) /* Note double negative! */ + if (!not_found) { /* Note double negative! */ ret.p->prepared = true; - malloc_mutex_unlock(tdata->lock); + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (not_found) { - tcache_t *tcache; void *btkey; prof_gctx_t *gctx; bool new_gctx, error; @@ -784,17 +813,19 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) * cache. */ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, - &new_gctx)) - return (NULL); + &new_gctx)) { + return NULL; + } /* Link a prof_tctx_t into gctx for this thread. */ - tcache = tcache_get(tsd, true); - ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true, - NULL); + ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), + sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, + arena_ichoose(tsd, NULL), true); if (ret.p == NULL) { - if (new_gctx) + if (new_gctx) { prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - return (NULL); + } + return NULL; } ret.p->tdata = tdata; ret.p->thr_uid = tdata->thr_uid; @@ -804,47 +835,48 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) ret.p->tctx_uid = tdata->tctx_uid_next++; ret.p->prepared = true; ret.p->state = prof_tctx_state_initializing; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); - malloc_mutex_unlock(tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (error) { - if (new_gctx) + if (new_gctx) { prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - idalloctm(tsd, ret.v, tcache, true); - return (NULL); + } + idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); + return NULL; } - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); ret.p->state = prof_tctx_state_nominal; tctx_tree_insert(&gctx->tctxs, ret.p); gctx->nlimbo--; - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } - return (ret.p); + return ret.p; } +/* + * The bodies of this function and prof_leakcheck() are compiled out unless heap + * profiling is enabled, so that it is possible to compile jemalloc with + * floating point support completely disabled. Avoiding floating point code is + * important on memory-constrained systems, but it also enables a workaround for + * versions of glibc that don't properly save/restore floating point registers + * during dynamic lazy symbol loading (which internally calls into whatever + * malloc implementation happens to be integrated into the application). Note + * that some compilers (e.g. gcc 4.8) may use floating point registers for fast + * memory moves, so jemalloc must be compiled with such optimizations disabled + * (e.g. + * -mno-sse) in order for the workaround to be complete. + */ void -prof_sample_threshold_update(prof_tdata_t *tdata) -{ - /* - * The body of this function is compiled out unless heap profiling is - * enabled, so that it is possible to compile jemalloc with floating - * point support completely disabled. Avoiding floating point code is - * important on memory-constrained systems, but it also enables a - * workaround for versions of glibc that don't properly save/restore - * floating point registers during dynamic lazy symbol loading (which - * internally calls into whatever malloc implementation happens to be - * integrated into the application). Note that some compilers (e.g. - * gcc 4.8) may use floating point registers for fast memory moves, so - * jemalloc must be compiled with such optimizations disabled (e.g. - * -mno-sse) in order for the workaround to be complete. - */ +prof_sample_threshold_update(prof_tdata_t *tdata) { #ifdef JEMALLOC_PROF uint64_t r; double u; - if (!config_prof) + if (!config_prof) { return; + } if (lg_prof_sample == 0) { tdata->bytes_until_sample = 0; @@ -869,8 +901,7 @@ prof_sample_threshold_update(prof_tdata_t *tdata) * pp 500 * (http://luc.devroye.org/rnbookindex.html) */ - prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005), - UINT64_C(1442695040888963407)); + r = prng_lg_range_u64(&tdata->prng_state, 53); u = (double)r * (1.0/9007199254740992.0L); tdata->bytes_until_sample = (uint64_t)(log(u) / log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) @@ -880,101 +911,91 @@ prof_sample_threshold_update(prof_tdata_t *tdata) #ifdef JEMALLOC_JET static prof_tdata_t * -prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ +prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { size_t *tdata_count = (size_t *)arg; (*tdata_count)++; - return (NULL); + return NULL; } size_t -prof_tdata_count(void) -{ +prof_tdata_count(void) { size_t tdata_count = 0; + tsdn_t *tsdn; - malloc_mutex_lock(&tdatas_mtx); + tsdn = tsdn_fetch(); + malloc_mutex_lock(tsdn, &tdatas_mtx); tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, (void *)&tdata_count); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsdn, &tdatas_mtx); - return (tdata_count); + return tdata_count; } -#endif -#ifdef JEMALLOC_JET size_t -prof_bt_count(void) -{ +prof_bt_count(void) { size_t bt_count; tsd_t *tsd; prof_tdata_t *tdata; tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) - return (0); + if (tdata == NULL) { + return 0; + } - malloc_mutex_lock(&bt2gctx_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); bt_count = ckh_count(&bt2gctx); - malloc_mutex_unlock(&bt2gctx_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - return (bt_count); + return bt_count; } #endif -#ifdef JEMALLOC_JET -#undef prof_dump_open -#define prof_dump_open JEMALLOC_N(prof_dump_open_impl) -#endif static int -prof_dump_open(bool propagate_err, const char *filename) -{ +prof_dump_open_impl(bool propagate_err, const char *filename) { int fd; fd = creat(filename, 0644); if (fd == -1 && !propagate_err) { malloc_printf(": creat(\"%s\"), 0644) failed\n", filename); - if (opt_abort) + if (opt_abort) { abort(); + } } - return (fd); + return fd; } -#ifdef JEMALLOC_JET -#undef prof_dump_open -#define prof_dump_open JEMALLOC_N(prof_dump_open) -prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl); -#endif +prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; static bool -prof_dump_flush(bool propagate_err) -{ +prof_dump_flush(bool propagate_err) { bool ret = false; ssize_t err; cassert(config_prof); - err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); + err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); if (err == -1) { if (!propagate_err) { malloc_write(": write() failed during heap " "profile flush\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } ret = true; } prof_dump_buf_end = 0; - return (ret); + return ret; } static bool -prof_dump_close(bool propagate_err) -{ +prof_dump_close(bool propagate_err) { bool ret; assert(prof_dump_fd != -1); @@ -982,13 +1003,12 @@ prof_dump_close(bool propagate_err) close(prof_dump_fd); prof_dump_fd = -1; - return (ret); + return ret; } static bool -prof_dump_write(bool propagate_err, const char *s) -{ - unsigned i, slen, n; +prof_dump_write(bool propagate_err, const char *s) { + size_t i, slen, n; cassert(config_prof); @@ -996,9 +1016,11 @@ prof_dump_write(bool propagate_err, const char *s) slen = strlen(s); while (i < slen) { /* Flush the buffer if it is full. */ - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) - if (prof_dump_flush(propagate_err) && propagate_err) - return (true); + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + if (prof_dump_flush(propagate_err) && propagate_err) { + return true; + } + } if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { /* Finish writing. */ @@ -1012,13 +1034,12 @@ prof_dump_write(bool propagate_err, const char *s) i += n; } - return (false); + return false; } JEMALLOC_FORMAT_PRINTF(2, 3) static bool -prof_dump_printf(bool propagate_err, const char *format, ...) -{ +prof_dump_printf(bool propagate_err, const char *format, ...) { bool ret; va_list ap; char buf[PROF_PRINTF_BUFSIZE]; @@ -1028,23 +1049,22 @@ prof_dump_printf(bool propagate_err, const char *format, ...) va_end(ap); ret = prof_dump_write(propagate_err, buf); - return (ret); + return ret; } -/* tctx->tdata->lock is held. */ static void -prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata) -{ +prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - malloc_mutex_lock(tctx->gctx->lock); + malloc_mutex_lock(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: - malloc_mutex_unlock(tctx->gctx->lock); + malloc_mutex_unlock(tsdn, tctx->gctx->lock); return; case prof_tctx_state_nominal: tctx->state = prof_tctx_state_dumping; - malloc_mutex_unlock(tctx->gctx->lock); + malloc_mutex_unlock(tsdn, tctx->gctx->lock); memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); @@ -1063,10 +1083,9 @@ prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata) } } -/* gctx->lock is held. */ static void -prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx) -{ +prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { + malloc_mutex_assert_owner(tsdn, gctx->lock); gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; @@ -1076,10 +1095,11 @@ prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx) } } -/* tctx->gctx is held. */ static prof_tctx_t * -prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) -{ +prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: @@ -1087,20 +1107,26 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: - prof_tctx_merge_gctx(tctx, tctx->gctx); + prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); break; default: not_reached(); } - return (NULL); + return NULL; } -/* gctx->lock is held. */ +struct prof_tctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + static prof_tctx_t * -prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) -{ - bool propagate_err = *(bool *)arg; +prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { + struct prof_tctx_dump_iter_arg_s *arg = + (struct prof_tctx_dump_iter_arg_s *)opaque; + + malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: @@ -1109,25 +1135,27 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: - if (prof_dump_printf(propagate_err, + if (prof_dump_printf(arg->propagate_err, " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, - tctx->dump_cnts.accumbytes)) - return (tctx); + tctx->dump_cnts.accumbytes)) { + return tctx; + } break; default: not_reached(); } - return (NULL); + return NULL; } -/* tctx->gctx is held. */ static prof_tctx_t * -prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) -{ +prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; prof_tctx_t *ret; + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + switch (tctx->state) { case prof_tctx_state_nominal: /* New since dumping started; ignore. */ @@ -1144,16 +1172,14 @@ prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) ret = NULL; label_return: - return (ret); + return ret; } static void -prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) -{ - +prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { cassert(config_prof); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsdn, gctx->lock); /* * Increment nlimbo so that gctx won't go away before dump. @@ -1165,26 +1191,32 @@ prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsdn, gctx->lock); } -static prof_gctx_t * -prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) -{ - size_t *leak_ngctx = (size_t *)arg; +struct prof_gctx_merge_iter_arg_s { + tsdn_t *tsdn; + size_t leak_ngctx; +}; - malloc_mutex_lock(gctx->lock); - tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL); - if (gctx->cnt_summed.curobjs != 0) - (*leak_ngctx)++; - malloc_mutex_unlock(gctx->lock); +static prof_gctx_t * +prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + struct prof_gctx_merge_iter_arg_s *arg = + (struct prof_gctx_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, + (void *)arg->tsdn); + if (gctx->cnt_summed.curobjs != 0) { + arg->leak_ngctx++; + } + malloc_mutex_unlock(arg->tsdn, gctx->lock); - return (NULL); + return NULL; } static void -prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) -{ +prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { prof_tdata_t *tdata = prof_tdata_get(tsd, false); prof_gctx_t *gctx; @@ -1196,7 +1228,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) */ while ((gctx = gctx_tree_first(gctxs)) != NULL) { gctx_tree_remove(gctxs, gctx); - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); { prof_tctx_t *next; @@ -1204,34 +1236,43 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) do { prof_tctx_t *to_destroy = tctx_tree_iter(&gctx->tctxs, next, - prof_tctx_finish_iter, NULL); + prof_tctx_finish_iter, + (void *)tsd_tsdn(tsd)); if (to_destroy != NULL) { next = tctx_tree_next(&gctx->tctxs, to_destroy); tctx_tree_remove(&gctx->tctxs, to_destroy); - idalloctm(tsd, to_destroy, - tcache_get(tsd, false), true); - } else + idalloctm(tsd_tsdn(tsd), to_destroy, + NULL, NULL, true, true); + } else { next = NULL; + } } while (next != NULL); } gctx->nlimbo--; if (prof_gctx_should_destroy(gctx)) { gctx->nlimbo++; - malloc_mutex_unlock(gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - } else - malloc_mutex_unlock(gctx->lock); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + } } } +struct prof_tdata_merge_iter_arg_s { + tsdn_t *tsdn; + prof_cnt_t cnt_all; +}; + static prof_tdata_t * -prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ - prof_cnt_t *cnt_all = (prof_cnt_t *)arg; +prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *opaque) { + struct prof_tdata_merge_iter_arg_s *arg = + (struct prof_tdata_merge_iter_arg_s *)opaque; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(arg->tsdn, tdata->lock); if (!tdata->expired) { size_t tabind; union { @@ -1242,29 +1283,32 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) tdata->dumping = true; memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, - &tctx.v);) - prof_tctx_merge_tdata(tctx.p, tdata); + &tctx.v);) { + prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); + } - cnt_all->curobjs += tdata->cnt_summed.curobjs; - cnt_all->curbytes += tdata->cnt_summed.curbytes; + arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; + arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; if (opt_prof_accum) { - cnt_all->accumobjs += tdata->cnt_summed.accumobjs; - cnt_all->accumbytes += tdata->cnt_summed.accumbytes; + arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; + arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; } - } else + } else { tdata->dumping = false; - malloc_mutex_unlock(tdata->lock); + } + malloc_mutex_unlock(arg->tsdn, tdata->lock); - return (NULL); + return NULL; } static prof_tdata_t * -prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ +prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { bool propagate_err = *(bool *)arg; - if (!tdata->dumping) - return (NULL); + if (!tdata->dumping) { + return NULL; + } if (prof_dump_printf(propagate_err, " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", @@ -1272,48 +1316,42 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, tdata->cnt_summed.accumbytes, (tdata->thread_name != NULL) ? " " : "", - (tdata->thread_name != NULL) ? tdata->thread_name : "")) - return (tdata); - return (NULL); + (tdata->thread_name != NULL) ? tdata->thread_name : "")) { + return tdata; + } + return NULL; } -#ifdef JEMALLOC_JET -#undef prof_dump_header -#define prof_dump_header JEMALLOC_N(prof_dump_header_impl) -#endif static bool -prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) -{ +prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, + const prof_cnt_t *cnt_all) { bool ret; if (prof_dump_printf(propagate_err, "heap_v2/%"FMTu64"\n" " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, - cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) - return (true); + cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { + return true; + } - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsdn, &tdatas_mtx); ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, (void *)&propagate_err) != NULL); - malloc_mutex_unlock(&tdatas_mtx); - return (ret); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + return ret; } -#ifdef JEMALLOC_JET -#undef prof_dump_header -#define prof_dump_header JEMALLOC_N(prof_dump_header) -prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl); -#endif +prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; -/* gctx->lock is held. */ static bool -prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, - prof_gctx_tree_t *gctxs) -{ +prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, + const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { bool ret; unsigned i; + struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; cassert(config_prof); + malloc_mutex_assert_owner(tsdn, gctx->lock); /* Avoid dumping such gctx's that have no useful data. */ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || @@ -1347,21 +1385,23 @@ prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, goto label_return; } + prof_tctx_dump_iter_arg.tsdn = tsdn; + prof_tctx_dump_iter_arg.propagate_err = propagate_err; if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, - (void *)&propagate_err) != NULL) { + (void *)&prof_tctx_dump_iter_arg) != NULL) { ret = true; goto label_return; } ret = false; label_return: - return (ret); + return ret; } +#ifndef _WIN32 JEMALLOC_FORMAT_PRINTF(1, 2) static int -prof_open_maps(const char *format, ...) -{ +prof_open_maps(const char *format, ...) { int mfd; va_list ap; char filename[PATH_MAX + 1]; @@ -1369,27 +1409,47 @@ prof_open_maps(const char *format, ...) va_start(ap, format); malloc_vsnprintf(filename, sizeof(filename), format, ap); va_end(ap); + +#if defined(O_CLOEXEC) + mfd = open(filename, O_RDONLY | O_CLOEXEC); +#else mfd = open(filename, O_RDONLY); + if (mfd != -1) { + fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC); + } +#endif + + return mfd; +} +#endif - return (mfd); +static int +prof_getpid(void) { +#ifdef _WIN32 + return GetCurrentProcessId(); +#else + return getpid(); +#endif } static bool -prof_dump_maps(bool propagate_err) -{ +prof_dump_maps(bool propagate_err) { bool ret; int mfd; cassert(config_prof); #ifdef __FreeBSD__ mfd = prof_open_maps("/proc/curproc/map"); +#elif defined(_WIN32) + mfd = -1; // Not implemented #else { - int pid = getpid(); + int pid = prof_getpid(); mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); - if (mfd == -1) + if (mfd == -1) { mfd = prof_open_maps("/proc/%d/maps", pid); + } } #endif if (mfd != -1) { @@ -1411,8 +1471,9 @@ prof_dump_maps(bool propagate_err) goto label_return; } } - nread = read(mfd, &prof_dump_buf[prof_dump_buf_end], - PROF_DUMP_BUFSIZE - prof_dump_buf_end); + nread = malloc_read_fd(mfd, + &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE + - prof_dump_buf_end); } while (nread > 0); } else { ret = true; @@ -1421,152 +1482,263 @@ prof_dump_maps(bool propagate_err) ret = false; label_return: - if (mfd != -1) + if (mfd != -1) { close(mfd); - return (ret); + } + return ret; } +/* + * See prof_sample_threshold_update() comment for why the body of this function + * is conditionally compiled. + */ static void prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, - const char *filename) -{ - + const char *filename) { +#ifdef JEMALLOC_PROF + /* + * Scaling is equivalent AdjustSamples() in jeprof, but the result may + * differ slightly from what jeprof reports, because here we scale the + * summary values, whereas jeprof scales each context individually and + * reports the sums of the scaled values. + */ if (cnt_all->curbytes != 0) { - malloc_printf(": Leak summary: %"FMTu64" byte%s, %" - FMTu64" object%s, %zu context%s\n", - cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "", - cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "", - leak_ngctx, (leak_ngctx != 1) ? "s" : ""); + double sample_period = (double)((uint64_t)1 << lg_prof_sample); + double ratio = (((double)cnt_all->curbytes) / + (double)cnt_all->curobjs) / sample_period; + double scale_factor = 1.0 / (1.0 - exp(-ratio)); + uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) + * scale_factor); + uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * + scale_factor); + + malloc_printf(": Leak approximation summary: ~%"FMTu64 + " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", + curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != + 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); malloc_printf( ": Run jeprof on \"%s\" for leak detail\n", filename); } +#endif } +struct prof_gctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + static prof_gctx_t * -prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg) -{ +prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { prof_gctx_t *ret; - bool propagate_err = *(bool *)arg; + struct prof_gctx_dump_iter_arg_s *arg = + (struct prof_gctx_dump_iter_arg_s *)opaque; - malloc_mutex_lock(gctx->lock); + malloc_mutex_lock(arg->tsdn, gctx->lock); - if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) { + if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, + gctxs)) { ret = gctx; goto label_return; } ret = NULL; label_return: - malloc_mutex_unlock(gctx->lock); - return (ret); + malloc_mutex_unlock(arg->tsdn, gctx->lock); + return ret; } -static bool -prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) -{ - prof_tdata_t *tdata; - prof_cnt_t cnt_all; +static void +prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + prof_gctx_tree_t *gctxs) { size_t tabind; union { prof_gctx_t *p; void *v; } gctx; - size_t leak_ngctx; - prof_gctx_tree_t gctxs; - - cassert(config_prof); - tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (true); - - malloc_mutex_lock(&prof_dump_mtx); prof_enter(tsd, tdata); /* * Put gctx's in limbo and clear their counters in preparation for * summing. */ - gctx_tree_new(&gctxs); - for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) - prof_dump_gctx_prep(gctx.p, &gctxs); + gctx_tree_new(gctxs); + for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { + prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); + } /* * Iterate over tdatas, and for the non-expired ones snapshot their tctx * stats and merge them into the associated gctx's. */ - memset(&cnt_all, 0, sizeof(prof_cnt_t)); - malloc_mutex_lock(&tdatas_mtx); - tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all); - malloc_mutex_unlock(&tdatas_mtx); + prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd); + memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t)); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, + (void *)prof_tdata_merge_iter_arg); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); /* Merge tctx stats into gctx's. */ - leak_ngctx = 0; - gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx); + prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_merge_iter_arg->leak_ngctx = 0; + gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, + (void *)prof_gctx_merge_iter_arg); prof_leave(tsd, tdata); +} +static bool +prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, + prof_gctx_tree_t *gctxs) { /* Create dump file. */ - if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) - goto label_open_close_error; + if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { + return true; + } /* Dump profile header. */ - if (prof_dump_header(propagate_err, &cnt_all)) + if (prof_dump_header(tsd_tsdn(tsd), propagate_err, + &prof_tdata_merge_iter_arg->cnt_all)) { goto label_write_error; + } /* Dump per gctx profile stats. */ - if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, - (void *)&propagate_err) != NULL) + prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_dump_iter_arg->propagate_err = propagate_err; + if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, + (void *)prof_gctx_dump_iter_arg) != NULL) { goto label_write_error; + } /* Dump /proc//maps if possible. */ - if (prof_dump_maps(propagate_err)) + if (prof_dump_maps(propagate_err)) { goto label_write_error; + } + + if (prof_dump_close(propagate_err)) { + return true; + } + + return false; +label_write_error: + prof_dump_close(propagate_err); + return true; +} - if (prof_dump_close(propagate_err)) - goto label_open_close_error; +static bool +prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck) { + cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); + prof_tdata_t * tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return true; + } + + pre_reentrancy(tsd, NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + + prof_gctx_tree_t gctxs; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); + bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata, + &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, + &prof_gctx_dump_iter_arg, &gctxs); prof_gctx_finish(tsd, &gctxs); - malloc_mutex_unlock(&prof_dump_mtx); - if (leakcheck) - prof_leakcheck(&cnt_all, leak_ngctx, filename); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); + post_reentrancy(tsd); - return (false); -label_write_error: - prof_dump_close(propagate_err); -label_open_close_error: + if (err) { + return true; + } + + if (leakcheck) { + prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, + prof_gctx_merge_iter_arg.leak_ngctx, filename); + } + return false; +} + +#ifdef JEMALLOC_JET +void +prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, + uint64_t *accumbytes) { + tsd_t *tsd; + prof_tdata_t *tdata; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + prof_gctx_tree_t gctxs; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + if (curobjs != NULL) { + *curobjs = 0; + } + if (curbytes != NULL) { + *curbytes = 0; + } + if (accumobjs != NULL) { + *accumobjs = 0; + } + if (accumbytes != NULL) { + *accumbytes = 0; + } + return; + } + + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); prof_gctx_finish(tsd, &gctxs); - malloc_mutex_unlock(&prof_dump_mtx); - return (true); + + if (curobjs != NULL) { + *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs; + } + if (curbytes != NULL) { + *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes; + } + if (accumobjs != NULL) { + *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs; + } + if (accumbytes != NULL) { + *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes; + } } +#endif -#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) -#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) +#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) +#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) static void -prof_dump_filename(char *filename, char v, uint64_t vseq) -{ - +prof_dump_filename(char *filename, char v, uint64_t vseq) { cassert(config_prof); if (vseq != VSEQ_INVALID) { /* "...v.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c%"FMTu64".heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq); + opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); } else { /* "....heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, "%s.%d.%"FMTu64".%c.heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v); + opt_prof_prefix, prof_getpid(), prof_dump_seq, v); } prof_dump_seq++; } static void -prof_fdump(void) -{ +prof_fdump(void) { tsd_t *tsd; char filename[DUMP_FILENAME_BUFSIZE]; @@ -1574,30 +1746,53 @@ prof_fdump(void) assert(opt_prof_final); assert(opt_prof_prefix[0] != '\0'); - if (!prof_booted) + if (!prof_booted) { return; + } tsd = tsd_fetch(); + assert(tsd_reentrancy_level_get(tsd) == 0); - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'f', VSEQ_INVALID); - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, opt_prof_leak); } +bool +prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { + cassert(config_prof); + +#ifndef JEMALLOC_ATOMIC_U64 + if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", + WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { + return true; + } + prof_accum->accumbytes = 0; +#else + atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); +#endif + return false; +} + void -prof_idump(void) -{ +prof_idump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); - if (!prof_booted) + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { return; - tsd = tsd_fetch(); + } + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { + return; + } + tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) + if (tdata == NULL) { return; + } if (tdata->enq) { tdata->enq_idump = true; return; @@ -1605,53 +1800,56 @@ prof_idump(void) if (opt_prof_prefix[0] != '\0') { char filename[PATH_MAX + 1]; - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'i', prof_dump_iseq); prof_dump_iseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } bool -prof_mdump(const char *filename) -{ - tsd_t *tsd; - char filename_buf[DUMP_FILENAME_BUFSIZE]; - +prof_mdump(tsd_t *tsd, const char *filename) { cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); - if (!opt_prof || !prof_booted) - return (true); - tsd = tsd_fetch(); - + if (!opt_prof || !prof_booted) { + return true; + } + char filename_buf[DUMP_FILENAME_BUFSIZE]; if (filename == NULL) { /* No filename specified, so automatically generate one. */ - if (opt_prof_prefix[0] == '\0') - return (true); - malloc_mutex_lock(&prof_dump_seq_mtx); + if (opt_prof_prefix[0] == '\0') { + return true; + } + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename_buf, 'm', prof_dump_mseq); prof_dump_mseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); filename = filename_buf; } - return (prof_dump(tsd, true, filename, false)); + return prof_dump(tsd, true, filename, false); } void -prof_gdump(void) -{ +prof_gdump(tsdn_t *tsdn) { tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); - if (!prof_booted) + if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) { return; - tsd = tsd_fetch(); + } + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { + return; + } + tdata = prof_tdata_get(tsd, false); - if (tdata == NULL) + if (tdata == NULL) { return; + } if (tdata->enq) { tdata->enq_gdump = true; return; @@ -1659,17 +1857,16 @@ prof_gdump(void) if (opt_prof_prefix[0] != '\0') { char filename[DUMP_FILENAME_BUFSIZE]; - malloc_mutex_lock(&prof_dump_seq_mtx); + malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); prof_dump_filename(filename, 'u', prof_dump_useq); prof_dump_useq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } static void -prof_bt_hash(const void *key, size_t r_hash[2]) -{ +prof_bt_hash(const void *key, size_t r_hash[2]) { prof_bt_t *bt = (prof_bt_t *)key; cassert(config_prof); @@ -1678,46 +1875,44 @@ prof_bt_hash(const void *key, size_t r_hash[2]) } static bool -prof_bt_keycomp(const void *k1, const void *k2) -{ +prof_bt_keycomp(const void *k1, const void *k2) { const prof_bt_t *bt1 = (prof_bt_t *)k1; const prof_bt_t *bt2 = (prof_bt_t *)k2; cassert(config_prof); - if (bt1->len != bt2->len) - return (false); + if (bt1->len != bt2->len) { + return false; + } return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } -JEMALLOC_INLINE_C uint64_t -prof_thr_uid_alloc(void) -{ +static uint64_t +prof_thr_uid_alloc(tsdn_t *tsdn) { uint64_t thr_uid; - malloc_mutex_lock(&next_thr_uid_mtx); + malloc_mutex_lock(tsdn, &next_thr_uid_mtx); thr_uid = next_thr_uid; next_thr_uid++; - malloc_mutex_unlock(&next_thr_uid_mtx); + malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); - return (thr_uid); + return thr_uid; } static prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, - char *thread_name, bool active) -{ + char *thread_name, bool active) { prof_tdata_t *tdata; - tcache_t *tcache; cassert(config_prof); /* Initialize an empty cache for this thread. */ - tcache = tcache_get(tsd, true); - tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false, - tcache, true, NULL); - if (tdata == NULL) - return (NULL); + tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), + sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (tdata == NULL) { + return NULL; + } tdata->lock = prof_tdata_mutex_choose(thr_uid); tdata->thr_uid = thr_uid; @@ -1727,10 +1922,10 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, tdata->expired = false; tdata->tctx_uid_next = 0; - if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, - prof_bt_hash, prof_bt_keycomp)) { - idalloctm(tsd, tdata, tcache, true); - return (NULL); + if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); + return NULL; } tdata->prng_state = (uint64_t)(uintptr_t)tdata; @@ -1743,328 +1938,326 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, tdata->dumping = false; tdata->active = active; - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_insert(&tdatas, tdata); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); - return (tdata); + return tdata; } prof_tdata_t * -prof_tdata_init(tsd_t *tsd) -{ +prof_tdata_init(tsd_t *tsd) { + return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, + NULL, prof_thread_active_init_get(tsd_tsdn(tsd))); +} - return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL, - prof_thread_active_init_get())); +static bool +prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { + if (tdata->attached && !even_if_attached) { + return false; + } + if (ckh_count(&tdata->bt2tctx) != 0) { + return false; + } + return true; } -/* tdata->lock must be held. */ static bool -prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached) -{ +prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsdn, tdata->lock); - if (tdata->attached && !even_if_attached) - return (false); - if (ckh_count(&tdata->bt2tctx) != 0) - return (false); - return (true); + return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); } -/* tdatas_mtx must be held. */ static void prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, - bool even_if_attached) -{ - tcache_t *tcache; - - assert(prof_tdata_should_destroy(tdata, even_if_attached)); - assert(tsd_prof_tdata_get(tsd) != tdata); + bool even_if_attached) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_remove(&tdatas, tdata); - tcache = tcache_get(tsd, false); - if (tdata->thread_name != NULL) - idalloctm(tsd, tdata->thread_name, tcache, true); + assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); + + if (tdata->thread_name != NULL) { + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, + true); + } ckh_delete(tsd, &tdata->bt2tctx); - idalloctm(tsd, tdata, tcache, true); + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); } static void -prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) -{ - - malloc_mutex_lock(&tdatas_mtx); +prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); prof_tdata_destroy_locked(tsd, tdata, even_if_attached); - malloc_mutex_unlock(&tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); } static void -prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) -{ +prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { bool destroy_tdata; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); if (tdata->attached) { - destroy_tdata = prof_tdata_should_destroy(tdata, true); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, + true); /* * Only detach if !destroy_tdata, because detaching would allow * another thread to win the race to destroy tdata. */ - if (!destroy_tdata) + if (!destroy_tdata) { tdata->attached = false; + } tsd_prof_tdata_set(tsd, NULL); - } else + } else { destroy_tdata = false; - malloc_mutex_unlock(tdata->lock); - if (destroy_tdata) + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (destroy_tdata) { prof_tdata_destroy(tsd, tdata, true); + } } prof_tdata_t * -prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) -{ +prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { uint64_t thr_uid = tdata->thr_uid; uint64_t thr_discrim = tdata->thr_discrim + 1; char *thread_name = (tdata->thread_name != NULL) ? - prof_thread_name_alloc(tsd, tdata->thread_name) : NULL; + prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; bool active = tdata->active; prof_tdata_detach(tsd, tdata); - return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, - active)); + return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, + active); } static bool -prof_tdata_expire(prof_tdata_t *tdata) -{ +prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { bool destroy_tdata; - malloc_mutex_lock(tdata->lock); + malloc_mutex_lock(tsdn, tdata->lock); if (!tdata->expired) { tdata->expired = true; destroy_tdata = tdata->attached ? false : - prof_tdata_should_destroy(tdata, false); - } else + prof_tdata_should_destroy(tsdn, tdata, false); + } else { destroy_tdata = false; - malloc_mutex_unlock(tdata->lock); + } + malloc_mutex_unlock(tsdn, tdata->lock); - return (destroy_tdata); + return destroy_tdata; } static prof_tdata_t * -prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) -{ +prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; - return (prof_tdata_expire(tdata) ? tdata : NULL); + return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); } void -prof_reset(tsd_t *tsd, size_t lg_sample) -{ +prof_reset(tsd_t *tsd, size_t lg_sample) { prof_tdata_t *next; assert(lg_sample < (sizeof(uint64_t) << 3)); - malloc_mutex_lock(&prof_dump_mtx); - malloc_mutex_lock(&tdatas_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); lg_prof_sample = lg_sample; next = NULL; do { prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, - prof_tdata_reset_iter, NULL); + prof_tdata_reset_iter, (void *)tsd); if (to_destroy != NULL) { next = tdata_tree_next(&tdatas, to_destroy); prof_tdata_destroy_locked(tsd, to_destroy, false); - } else + } else { next = NULL; + } } while (next != NULL); - malloc_mutex_unlock(&tdatas_mtx); - malloc_mutex_unlock(&prof_dump_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); } void -prof_tdata_cleanup(tsd_t *tsd) -{ +prof_tdata_cleanup(tsd_t *tsd) { prof_tdata_t *tdata; - if (!config_prof) + if (!config_prof) { return; + } tdata = tsd_prof_tdata_get(tsd); - if (tdata != NULL) + if (tdata != NULL) { prof_tdata_detach(tsd, tdata); + } } bool -prof_active_get(void) -{ +prof_active_get(tsdn_t *tsdn) { bool prof_active_current; - malloc_mutex_lock(&prof_active_mtx); + malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_current = prof_active; - malloc_mutex_unlock(&prof_active_mtx); - return (prof_active_current); + malloc_mutex_unlock(tsdn, &prof_active_mtx); + return prof_active_current; } bool -prof_active_set(bool active) -{ +prof_active_set(tsdn_t *tsdn, bool active) { bool prof_active_old; - malloc_mutex_lock(&prof_active_mtx); + malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_old = prof_active; prof_active = active; - malloc_mutex_unlock(&prof_active_mtx); - return (prof_active_old); + malloc_mutex_unlock(tsdn, &prof_active_mtx); + return prof_active_old; } const char * -prof_thread_name_get(void) -{ - tsd_t *tsd; +prof_thread_name_get(tsd_t *tsd) { prof_tdata_t *tdata; - tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (""); + if (tdata == NULL) { + return ""; + } return (tdata->thread_name != NULL ? tdata->thread_name : ""); } static char * -prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) -{ +prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { char *ret; size_t size; - if (thread_name == NULL) - return (NULL); + if (thread_name == NULL) { + return NULL; + } size = strlen(thread_name) + 1; - if (size == 1) - return (""); + if (size == 1) { + return ""; + } - ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL); - if (ret == NULL) - return (NULL); + ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (ret == NULL) { + return NULL; + } memcpy(ret, thread_name, size); - return (ret); + return ret; } int -prof_thread_name_set(tsd_t *tsd, const char *thread_name) -{ +prof_thread_name_set(tsd_t *tsd, const char *thread_name) { prof_tdata_t *tdata; unsigned i; char *s; tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (EAGAIN); + if (tdata == NULL) { + return EAGAIN; + } /* Validate input. */ - if (thread_name == NULL) - return (EFAULT); + if (thread_name == NULL) { + return EFAULT; + } for (i = 0; thread_name[i] != '\0'; i++) { char c = thread_name[i]; - if (!isgraph(c) && !isblank(c)) - return (EFAULT); + if (!isgraph(c) && !isblank(c)) { + return EFAULT; + } } - s = prof_thread_name_alloc(tsd, thread_name); - if (s == NULL) - return (EAGAIN); + s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); + if (s == NULL) { + return EAGAIN; + } if (tdata->thread_name != NULL) { - idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false), + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, true); tdata->thread_name = NULL; } - if (strlen(s) > 0) + if (strlen(s) > 0) { tdata->thread_name = s; - return (0); + } + return 0; } bool -prof_thread_active_get(void) -{ - tsd_t *tsd; +prof_thread_active_get(tsd_t *tsd) { prof_tdata_t *tdata; - tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (false); - return (tdata->active); + if (tdata == NULL) { + return false; + } + return tdata->active; } bool -prof_thread_active_set(bool active) -{ - tsd_t *tsd; +prof_thread_active_set(tsd_t *tsd, bool active) { prof_tdata_t *tdata; - tsd = tsd_fetch(); tdata = prof_tdata_get(tsd, true); - if (tdata == NULL) - return (true); + if (tdata == NULL) { + return true; + } tdata->active = active; - return (false); + return false; } bool -prof_thread_active_init_get(void) -{ +prof_thread_active_init_get(tsdn_t *tsdn) { bool active_init; - malloc_mutex_lock(&prof_thread_active_init_mtx); + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init = prof_thread_active_init; - malloc_mutex_unlock(&prof_thread_active_init_mtx); - return (active_init); + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); + return active_init; } bool -prof_thread_active_init_set(bool active_init) -{ +prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) { bool active_init_old; - malloc_mutex_lock(&prof_thread_active_init_mtx); + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init_old = prof_thread_active_init; prof_thread_active_init = active_init; - malloc_mutex_unlock(&prof_thread_active_init_mtx); - return (active_init_old); + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); + return active_init_old; } bool -prof_gdump_get(void) -{ +prof_gdump_get(tsdn_t *tsdn) { bool prof_gdump_current; - malloc_mutex_lock(&prof_gdump_mtx); + malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_current = prof_gdump_val; - malloc_mutex_unlock(&prof_gdump_mtx); - return (prof_gdump_current); + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); + return prof_gdump_current; } bool -prof_gdump_set(bool gdump) -{ +prof_gdump_set(tsdn_t *tsdn, bool gdump) { bool prof_gdump_old; - malloc_mutex_lock(&prof_gdump_mtx); + malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_old = prof_gdump_val; prof_gdump_val = gdump; - malloc_mutex_unlock(&prof_gdump_mtx); - return (prof_gdump_old); + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); + return prof_gdump_old; } void -prof_boot0(void) -{ - +prof_boot0(void) { cassert(config_prof); memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, @@ -2072,9 +2265,7 @@ prof_boot0(void) } void -prof_boot1(void) -{ - +prof_boot1(void) { cassert(config_prof); /* @@ -2098,72 +2289,98 @@ prof_boot1(void) } bool -prof_boot2(void) -{ - +prof_boot2(tsd_t *tsd) { cassert(config_prof); if (opt_prof) { - tsd_t *tsd; unsigned i; lg_prof_sample = opt_lg_prof_sample; prof_active = opt_prof_active; - if (malloc_mutex_init(&prof_active_mtx)) - return (true); + if (malloc_mutex_init(&prof_active_mtx, "prof_active", + WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { + return true; + } prof_gdump_val = opt_prof_gdump; - if (malloc_mutex_init(&prof_gdump_mtx)) - return (true); + if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", + WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { + return true; + } prof_thread_active_init = opt_prof_thread_active_init; - if (malloc_mutex_init(&prof_thread_active_init_mtx)) - return (true); + if (malloc_mutex_init(&prof_thread_active_init_mtx, + "prof_thread_active_init", + WITNESS_RANK_PROF_THREAD_ACTIVE_INIT, + malloc_mutex_rank_exclusive)) { + return true; + } - tsd = tsd_fetch(); if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) - return (true); - if (malloc_mutex_init(&bt2gctx_mtx)) - return (true); + prof_bt_keycomp)) { + return true; + } + if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", + WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { + return true; + } tdata_tree_new(&tdatas); - if (malloc_mutex_init(&tdatas_mtx)) - return (true); + if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", + WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { + return true; + } next_thr_uid = 0; - if (malloc_mutex_init(&next_thr_uid_mtx)) - return (true); + if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", + WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) { + return true; + } - if (malloc_mutex_init(&prof_dump_seq_mtx)) - return (true); - if (malloc_mutex_init(&prof_dump_mtx)) - return (true); + if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", + WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) { + return true; + } + if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", + WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) { + return true; + } if (opt_prof_final && opt_prof_prefix[0] != '\0' && atexit(prof_fdump) != 0) { malloc_write(": Error in atexit()\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } - gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS * - sizeof(malloc_mutex_t)); - if (gctx_locks == NULL) - return (true); + gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), + CACHELINE); + if (gctx_locks == NULL) { + return true; + } for (i = 0; i < PROF_NCTX_LOCKS; i++) { - if (malloc_mutex_init(&gctx_locks[i])) - return (true); + if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", + WITNESS_RANK_PROF_GCTX, + malloc_mutex_rank_exclusive)) { + return true; + } } - tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS * - sizeof(malloc_mutex_t)); - if (tdata_locks == NULL) - return (true); + tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), + CACHELINE); + if (tdata_locks == NULL) { + return true; + } for (i = 0; i < PROF_NTDATA_LOCKS; i++) { - if (malloc_mutex_init(&tdata_locks[i])) - return (true); + if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", + WITNESS_RANK_PROF_TDATA, + malloc_mutex_rank_exclusive)) { + return true; + } } } @@ -2177,60 +2394,79 @@ prof_boot2(void) prof_booted = true; - return (false); + return false; } void -prof_prefork(void) -{ - - if (opt_prof) { +prof_prefork0(tsdn_t *tsdn) { + if (config_prof && opt_prof) { unsigned i; - malloc_mutex_prefork(&tdatas_mtx); - malloc_mutex_prefork(&bt2gctx_mtx); - malloc_mutex_prefork(&next_thr_uid_mtx); - malloc_mutex_prefork(&prof_dump_seq_mtx); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_prefork(&gctx_locks[i]); - for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_prefork(&tdata_locks[i]); + malloc_mutex_prefork(tsdn, &prof_dump_mtx); + malloc_mutex_prefork(tsdn, &bt2gctx_mtx); + malloc_mutex_prefork(tsdn, &tdatas_mtx); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_prefork(tsdn, &tdata_locks[i]); + } + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_prefork(tsdn, &gctx_locks[i]); + } } } void -prof_postfork_parent(void) -{ +prof_prefork1(tsdn_t *tsdn) { + if (config_prof && opt_prof) { + malloc_mutex_prefork(tsdn, &prof_active_mtx); + malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); + malloc_mutex_prefork(tsdn, &prof_gdump_mtx); + malloc_mutex_prefork(tsdn, &next_thr_uid_mtx); + malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); + } +} - if (opt_prof) { +void +prof_postfork_parent(tsdn_t *tsdn) { + if (config_prof && opt_prof) { unsigned i; - for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_parent(&tdata_locks[i]); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_parent(&gctx_locks[i]); - malloc_mutex_postfork_parent(&prof_dump_seq_mtx); - malloc_mutex_postfork_parent(&next_thr_uid_mtx); - malloc_mutex_postfork_parent(&bt2gctx_mtx); - malloc_mutex_postfork_parent(&tdatas_mtx); + malloc_mutex_postfork_parent(tsdn, + &prof_thread_active_init_mtx); + malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); + } + malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); + malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); } } void -prof_postfork_child(void) -{ - - if (opt_prof) { +prof_postfork_child(tsdn_t *tsdn) { + if (config_prof && opt_prof) { unsigned i; - for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_child(&tdata_locks[i]); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_child(&gctx_locks[i]); - malloc_mutex_postfork_child(&prof_dump_seq_mtx); - malloc_mutex_postfork_child(&next_thr_uid_mtx); - malloc_mutex_postfork_child(&bt2gctx_mtx); - malloc_mutex_postfork_child(&tdatas_mtx); + malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); + malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_child(tsdn, &prof_active_mtx); + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); + } + malloc_mutex_postfork_child(tsdn, &tdatas_mtx); + malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); } } diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/quarantine.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/quarantine.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/quarantine.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/quarantine.c diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/rtree.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/rtree.c new file mode 100644 index 0000000..53702cf --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/rtree.c @@ -0,0 +1,320 @@ +#define JEMALLOC_RTREE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" + +/* + * Only the most significant bits of keys passed to rtree_{read,write}() are + * used. + */ +bool +rtree_new(rtree_t *rtree, bool zeroed) { +#ifdef JEMALLOC_JET + if (!zeroed) { + memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */ + } +#else + assert(zeroed); +#endif + + if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE, + malloc_mutex_rank_exclusive)) { + return true; + } + + return false; +} + +static rtree_node_elm_t * +rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms * + sizeof(rtree_node_elm_t), CACHELINE); +} +rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl; + +static void +rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) { + /* Nodes are never deleted during normal operation. */ + not_reached(); +} +UNUSED rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc = + rtree_node_dalloc_impl; + +static rtree_leaf_elm_t * +rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms * + sizeof(rtree_leaf_elm_t), CACHELINE); +} +rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl; + +static void +rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) { + /* Leaves are never deleted during normal operation. */ + not_reached(); +} +UNUSED rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc = + rtree_leaf_dalloc_impl; + +#ifdef JEMALLOC_JET +# if RTREE_HEIGHT > 1 +static void +rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree, + unsigned level) { + size_t nchildren = ZU(1) << rtree_levels[level].bits; + if (level + 2 < RTREE_HEIGHT) { + for (size_t i = 0; i < nchildren; i++) { + rtree_node_elm_t *node = + (rtree_node_elm_t *)atomic_load_p(&subtree[i].child, + ATOMIC_RELAXED); + if (node != NULL) { + rtree_delete_subtree(tsdn, rtree, node, level + + 1); + } + } + } else { + for (size_t i = 0; i < nchildren; i++) { + rtree_leaf_elm_t *leaf = + (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child, + ATOMIC_RELAXED); + if (leaf != NULL) { + rtree_leaf_dalloc(tsdn, rtree, leaf); + } + } + } + + if (subtree != rtree->root) { + rtree_node_dalloc(tsdn, rtree, subtree); + } +} +# endif + +void +rtree_delete(tsdn_t *tsdn, rtree_t *rtree) { +# if RTREE_HEIGHT > 1 + rtree_delete_subtree(tsdn, rtree, rtree->root, 0); +# endif +} +#endif + +static rtree_node_elm_t * +rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, + atomic_p_t *elmp) { + malloc_mutex_lock(tsdn, &rtree->init_lock); + /* + * If *elmp is non-null, then it was initialized with the init lock + * held, so we can get by with 'relaxed' here. + */ + rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED); + if (node == NULL) { + node = rtree_node_alloc(tsdn, rtree, ZU(1) << + rtree_levels[level].bits); + if (node == NULL) { + malloc_mutex_unlock(tsdn, &rtree->init_lock); + return NULL; + } + /* + * Even though we hold the lock, a later reader might not; we + * need release semantics. + */ + atomic_store_p(elmp, node, ATOMIC_RELEASE); + } + malloc_mutex_unlock(tsdn, &rtree->init_lock); + + return node; +} + +static rtree_leaf_elm_t * +rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) { + malloc_mutex_lock(tsdn, &rtree->init_lock); + /* + * If *elmp is non-null, then it was initialized with the init lock + * held, so we can get by with 'relaxed' here. + */ + rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED); + if (leaf == NULL) { + leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) << + rtree_levels[RTREE_HEIGHT-1].bits); + if (leaf == NULL) { + malloc_mutex_unlock(tsdn, &rtree->init_lock); + return NULL; + } + /* + * Even though we hold the lock, a later reader might not; we + * need release semantics. + */ + atomic_store_p(elmp, leaf, ATOMIC_RELEASE); + } + malloc_mutex_unlock(tsdn, &rtree->init_lock); + + return leaf; +} + +static bool +rtree_node_valid(rtree_node_elm_t *node) { + return ((uintptr_t)node != (uintptr_t)0); +} + +static bool +rtree_leaf_valid(rtree_leaf_elm_t *leaf) { + return ((uintptr_t)leaf != (uintptr_t)0); +} + +static rtree_node_elm_t * +rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) { + rtree_node_elm_t *node; + + if (dependent) { + node = (rtree_node_elm_t *)atomic_load_p(&elm->child, + ATOMIC_RELAXED); + } else { + node = (rtree_node_elm_t *)atomic_load_p(&elm->child, + ATOMIC_ACQUIRE); + } + + assert(!dependent || node != NULL); + return node; +} + +static rtree_node_elm_t * +rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, + unsigned level, bool dependent) { + rtree_node_elm_t *node; + + node = rtree_child_node_tryread(elm, dependent); + if (!dependent && unlikely(!rtree_node_valid(node))) { + node = rtree_node_init(tsdn, rtree, level + 1, &elm->child); + } + assert(!dependent || node != NULL); + return node; +} + +static rtree_leaf_elm_t * +rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) { + rtree_leaf_elm_t *leaf; + + if (dependent) { + leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, + ATOMIC_RELAXED); + } else { + leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, + ATOMIC_ACQUIRE); + } + + assert(!dependent || leaf != NULL); + return leaf; +} + +static rtree_leaf_elm_t * +rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, + unsigned level, bool dependent) { + rtree_leaf_elm_t *leaf; + + leaf = rtree_child_leaf_tryread(elm, dependent); + if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { + leaf = rtree_leaf_init(tsdn, rtree, &elm->child); + } + assert(!dependent || leaf != NULL); + return leaf; +} + +rtree_leaf_elm_t * +rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, bool init_missing) { + rtree_node_elm_t *node; + rtree_leaf_elm_t *leaf; +#if RTREE_HEIGHT > 1 + node = rtree->root; +#else + leaf = rtree->root; +#endif + + if (config_debug) { + uintptr_t leafkey = rtree_leafkey(key); + for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { + assert(rtree_ctx->cache[i].leafkey != leafkey); + } + for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { + assert(rtree_ctx->l2_cache[i].leafkey != leafkey); + } + } + +#define RTREE_GET_CHILD(level) { \ + assert(level < RTREE_HEIGHT-1); \ + if (level != 0 && !dependent && \ + unlikely(!rtree_node_valid(node))) { \ + return NULL; \ + } \ + uintptr_t subkey = rtree_subkey(key, level); \ + if (level + 2 < RTREE_HEIGHT) { \ + node = init_missing ? \ + rtree_child_node_read(tsdn, rtree, \ + &node[subkey], level, dependent) : \ + rtree_child_node_tryread(&node[subkey], \ + dependent); \ + } else { \ + leaf = init_missing ? \ + rtree_child_leaf_read(tsdn, rtree, \ + &node[subkey], level, dependent) : \ + rtree_child_leaf_tryread(&node[subkey], \ + dependent); \ + } \ + } + /* + * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss): + * (1) evict last entry in L2 cache; (2) move the collision slot from L1 + * cache down to L2; and 3) fill L1. + */ +#define RTREE_GET_LEAF(level) { \ + assert(level == RTREE_HEIGHT-1); \ + if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \ + return NULL; \ + } \ + if (RTREE_CTX_NCACHE_L2 > 1) { \ + memmove(&rtree_ctx->l2_cache[1], \ + &rtree_ctx->l2_cache[0], \ + sizeof(rtree_ctx_cache_elm_t) * \ + (RTREE_CTX_NCACHE_L2 - 1)); \ + } \ + size_t slot = rtree_cache_direct_map(key); \ + rtree_ctx->l2_cache[0].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[0].leaf = \ + rtree_ctx->cache[slot].leaf; \ + uintptr_t leafkey = rtree_leafkey(key); \ + rtree_ctx->cache[slot].leafkey = leafkey; \ + rtree_ctx->cache[slot].leaf = leaf; \ + uintptr_t subkey = rtree_subkey(key, level); \ + return &leaf[subkey]; \ + } + if (RTREE_HEIGHT > 1) { + RTREE_GET_CHILD(0) + } + if (RTREE_HEIGHT > 2) { + RTREE_GET_CHILD(1) + } + if (RTREE_HEIGHT > 3) { + for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) { + RTREE_GET_CHILD(i) + } + } + RTREE_GET_LEAF(RTREE_HEIGHT-1) +#undef RTREE_GET_CHILD +#undef RTREE_GET_LEAF + not_reached(); +} + +void +rtree_ctx_data_init(rtree_ctx_t *ctx) { + for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { + rtree_ctx_cache_elm_t *cache = &ctx->cache[i]; + cache->leafkey = RTREE_LEAFKEY_INVALID; + cache->leaf = NULL; + } + for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { + rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i]; + cache->leafkey = RTREE_LEAFKEY_INVALID; + cache->leaf = NULL; + } +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/stats.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/stats.c new file mode 100644 index 0000000..08b9507 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/stats.c @@ -0,0 +1,1286 @@ +#define JEMALLOC_STATS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/emitter.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_prof.h" + +const char *global_mutex_names[mutex_prof_num_global_mutexes] = { +#define OP(mtx) #mtx, + MUTEX_PROF_GLOBAL_MUTEXES +#undef OP +}; + +const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = { +#define OP(mtx) #mtx, + MUTEX_PROF_ARENA_MUTEXES +#undef OP +}; + +#define CTL_GET(n, v, t) do { \ + size_t sz = sizeof(t); \ + xmallctl(n, (void *)v, &sz, NULL, 0); \ +} while (0) + +#define CTL_M2_GET(n, i, v, t) do { \ + size_t mib[CTL_MAX_DEPTH]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = (i); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ +} while (0) + +#define CTL_M2_M4_GET(n, i, j, v, t) do { \ + size_t mib[CTL_MAX_DEPTH]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = (i); \ + mib[4] = (j); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ +} while (0) + +/******************************************************************************/ +/* Data. */ + +bool opt_stats_print = false; +char opt_stats_print_opts[stats_print_tot_num_options+1] = ""; + +/******************************************************************************/ + +/* Calculate x.yyy and output a string (takes a fixed sized char array). */ +static bool +get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) { + if (divisor == 0 || dividend > divisor) { + /* The rate is not supposed to be greater than 1. */ + return true; + } + if (dividend > 0) { + assert(UINT64_MAX / dividend >= 1000); + } + + unsigned n = (unsigned)((dividend * 1000) / divisor); + if (n < 10) { + malloc_snprintf(str, 6, "0.00%u", n); + } else if (n < 100) { + malloc_snprintf(str, 6, "0.0%u", n); + } else if (n < 1000) { + malloc_snprintf(str, 6, "0.%u", n); + } else { + malloc_snprintf(str, 6, "1"); + } + + return false; +} + +#define MUTEX_CTL_STR_MAX_LENGTH 128 +static void +gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix, + const char *mutex, const char *counter) { + malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter); +} + +static void +mutex_stats_init_cols(emitter_row_t *row, const char *table_name, + emitter_col_t *name, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { + mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0; + mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0; + + emitter_col_t *col; + + if (name != NULL) { + emitter_col_init(name, row); + name->justify = emitter_justify_left; + name->width = 21; + name->type = emitter_type_title; + name->str_val = table_name; + } + +#define WIDTH_uint32_t 12 +#define WIDTH_uint64_t 16 +#define OP(counter, counter_type, human) \ + col = &col_##counter_type[k_##counter_type]; \ + ++k_##counter_type; \ + emitter_col_init(col, row); \ + col->justify = emitter_justify_right; \ + col->width = WIDTH_##counter_type; \ + col->type = emitter_type_title; \ + col->str_val = human; + MUTEX_PROF_COUNTERS +#undef OP +#undef WIDTH_uint32_t +#undef WIDTH_uint64_t +} + +static void +mutex_stats_read_global(const char *name, emitter_col_t *col_name, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + + col_name->str_val = name; + + emitter_col_t *dst; +#define EMITTER_TYPE_uint32_t emitter_type_uint32 +#define EMITTER_TYPE_uint64_t emitter_type_uint64 +#define OP(counter, counter_type, human) \ + dst = &col_##counter_type[mutex_counter_##counter]; \ + dst->type = EMITTER_TYPE_##counter_type; \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "mutexes", name, #counter); \ + CTL_GET(cmd, (counter_type *)&dst->bool_val, counter_type); + MUTEX_PROF_COUNTERS +#undef OP +#undef EMITTER_TYPE_uint32_t +#undef EMITTER_TYPE_uint64_t +} + +static void +mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind, + const char *name, emitter_col_t *col_name, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + + col_name->str_val = name; + + emitter_col_t *dst; +#define EMITTER_TYPE_uint32_t emitter_type_uint32 +#define EMITTER_TYPE_uint64_t emitter_type_uint64 +#define OP(counter, counter_type, human) \ + dst = &col_##counter_type[mutex_counter_##counter]; \ + dst->type = EMITTER_TYPE_##counter_type; \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "arenas.0.mutexes", arena_mutex_names[mutex_ind], #counter);\ + CTL_M2_GET(cmd, arena_ind, \ + (counter_type *)&dst->bool_val, counter_type); + MUTEX_PROF_COUNTERS +#undef OP +#undef EMITTER_TYPE_uint32_t +#undef EMITTER_TYPE_uint64_t +} + +static void +mutex_stats_read_arena_bin(unsigned arena_ind, unsigned bin_ind, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + emitter_col_t *dst; + +#define EMITTER_TYPE_uint32_t emitter_type_uint32 +#define EMITTER_TYPE_uint64_t emitter_type_uint64 +#define OP(counter, counter_type, human) \ + dst = &col_##counter_type[mutex_counter_##counter]; \ + dst->type = EMITTER_TYPE_##counter_type; \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "arenas.0.bins.0","mutex", #counter); \ + CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \ + (counter_type *)&dst->bool_val, counter_type); + MUTEX_PROF_COUNTERS +#undef OP +#undef EMITTER_TYPE_uint32_t +#undef EMITTER_TYPE_uint64_t +} + +/* "row" can be NULL to avoid emitting in table mode. */ +static void +mutex_stats_emit(emitter_t *emitter, emitter_row_t *row, + emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters], + emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters]) { + if (row != NULL) { + emitter_table_row(emitter, row); + } + + mutex_prof_uint64_t_counter_ind_t k_uint64_t = 0; + mutex_prof_uint32_t_counter_ind_t k_uint32_t = 0; + + emitter_col_t *col; + +#define EMITTER_TYPE_uint32_t emitter_type_uint32 +#define EMITTER_TYPE_uint64_t emitter_type_uint64 +#define OP(counter, type, human) \ + col = &col_##type[k_##type]; \ + ++k_##type; \ + emitter_json_kv(emitter, #counter, EMITTER_TYPE_##type, \ + (const void *)&col->bool_val); + MUTEX_PROF_COUNTERS; +#undef OP +#undef EMITTER_TYPE_uint32_t +#undef EMITTER_TYPE_uint64_t +} + +static void +stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i) { + size_t page; + bool in_gap, in_gap_prev; + unsigned nbins, j; + + CTL_GET("arenas.page", &page, size_t); + + CTL_GET("arenas.nbins", &nbins, unsigned); + + emitter_row_t header_row; + emitter_row_init(&header_row); + + emitter_row_t row; + emitter_row_init(&row); +#define COL(name, left_or_right, col_width, etype) \ + emitter_col_t col_##name; \ + emitter_col_init(&col_##name, &row); \ + col_##name.justify = emitter_justify_##left_or_right; \ + col_##name.width = col_width; \ + col_##name.type = emitter_type_##etype; \ + emitter_col_t header_col_##name; \ + emitter_col_init(&header_col_##name, &header_row); \ + header_col_##name.justify = emitter_justify_##left_or_right; \ + header_col_##name.width = col_width; \ + header_col_##name.type = emitter_type_title; \ + header_col_##name.str_val = #name; + + COL(size, right, 20, size) + COL(ind, right, 4, unsigned) + COL(allocated, right, 13, uint64) + COL(nmalloc, right, 13, uint64) + COL(ndalloc, right, 13, uint64) + COL(nrequests, right, 13, uint64) + COL(curregs, right, 13, size) + COL(curslabs, right, 13, size) + COL(regs, right, 5, unsigned) + COL(pgs, right, 4, size) + /* To buffer a right- and left-justified column. */ + COL(justify_spacer, right, 1, title) + COL(util, right, 6, title) + COL(nfills, right, 13, uint64) + COL(nflushes, right, 13, uint64) + COL(nslabs, right, 13, uint64) + COL(nreslabs, right, 13, uint64) +#undef COL + + /* Don't want to actually print the name. */ + header_col_justify_spacer.str_val = " "; + col_justify_spacer.str_val = " "; + + + emitter_col_t col_mutex64[mutex_prof_num_uint64_t_counters]; + emitter_col_t col_mutex32[mutex_prof_num_uint32_t_counters]; + + emitter_col_t header_mutex64[mutex_prof_num_uint64_t_counters]; + emitter_col_t header_mutex32[mutex_prof_num_uint32_t_counters]; + + if (mutex) { + mutex_stats_init_cols(&row, NULL, NULL, col_mutex64, + col_mutex32); + mutex_stats_init_cols(&header_row, NULL, NULL, header_mutex64, + header_mutex32); + } + + /* + * We print a "bins:" header as part of the table row; we need to adjust + * the header size column to compensate. + */ + header_col_size.width -=5; + emitter_table_printf(emitter, "bins:"); + emitter_table_row(emitter, &header_row); + emitter_json_arr_begin(emitter, "bins"); + + for (j = 0, in_gap = false; j < nbins; j++) { + uint64_t nslabs; + size_t reg_size, slab_size, curregs; + size_t curslabs; + uint32_t nregs; + uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t nreslabs; + + CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs, + uint64_t); + in_gap_prev = in_gap; + in_gap = (nslabs == 0); + + if (in_gap_prev && !in_gap) { + emitter_table_printf(emitter, + " ---\n"); + } + + CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); + CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); + CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t); + + CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, + size_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, + &nrequests, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs, + size_t); + + if (mutex) { + mutex_stats_read_arena_bin(i, j, col_mutex64, + col_mutex32); + } + + emitter_json_arr_obj_begin(emitter); + emitter_json_kv(emitter, "nmalloc", emitter_type_uint64, + &nmalloc); + emitter_json_kv(emitter, "ndalloc", emitter_type_uint64, + &ndalloc); + emitter_json_kv(emitter, "curregs", emitter_type_size, + &curregs); + emitter_json_kv(emitter, "nrequests", emitter_type_uint64, + &nrequests); + emitter_json_kv(emitter, "nfills", emitter_type_uint64, + &nfills); + emitter_json_kv(emitter, "nflushes", emitter_type_uint64, + &nflushes); + emitter_json_kv(emitter, "nreslabs", emitter_type_uint64, + &nreslabs); + emitter_json_kv(emitter, "curslabs", emitter_type_size, + &curslabs); + if (mutex) { + emitter_json_dict_begin(emitter, "mutex"); + mutex_stats_emit(emitter, NULL, col_mutex64, + col_mutex32); + emitter_json_dict_end(emitter); + } + emitter_json_arr_obj_end(emitter); + + size_t availregs = nregs * curslabs; + char util[6]; + if (get_rate_str((uint64_t)curregs, (uint64_t)availregs, util)) + { + if (availregs == 0) { + malloc_snprintf(util, sizeof(util), "1"); + } else if (curregs > availregs) { + /* + * Race detected: the counters were read in + * separate mallctl calls and concurrent + * operations happened in between. In this case + * no meaningful utilization can be computed. + */ + malloc_snprintf(util, sizeof(util), " race"); + } else { + not_reached(); + } + } + + col_size.size_val = reg_size; + col_ind.unsigned_val = j; + col_allocated.size_val = curregs * reg_size; + col_nmalloc.uint64_val = nmalloc; + col_ndalloc.uint64_val = ndalloc; + col_nrequests.uint64_val = nrequests; + col_curregs.size_val = curregs; + col_curslabs.size_val = curslabs; + col_regs.unsigned_val = nregs; + col_pgs.size_val = slab_size / page; + col_util.str_val = util; + col_nfills.uint64_val = nfills; + col_nflushes.uint64_val = nflushes; + col_nslabs.uint64_val = nslabs; + col_nreslabs.uint64_val = nreslabs; + + /* + * Note that mutex columns were initialized above, if mutex == + * true. + */ + + emitter_table_row(emitter, &row); + } + emitter_json_arr_end(emitter); /* Close "bins". */ + + if (in_gap) { + emitter_table_printf(emitter, " ---\n"); + } +} + +static void +stats_arena_lextents_print(emitter_t *emitter, unsigned i) { + unsigned nbins, nlextents, j; + bool in_gap, in_gap_prev; + + CTL_GET("arenas.nbins", &nbins, unsigned); + CTL_GET("arenas.nlextents", &nlextents, unsigned); + + emitter_row_t header_row; + emitter_row_init(&header_row); + emitter_row_t row; + emitter_row_init(&row); + +#define COL(name, left_or_right, col_width, etype) \ + emitter_col_t header_##name; \ + emitter_col_init(&header_##name, &header_row); \ + header_##name.justify = emitter_justify_##left_or_right; \ + header_##name.width = col_width; \ + header_##name.type = emitter_type_title; \ + header_##name.str_val = #name; \ + \ + emitter_col_t col_##name; \ + emitter_col_init(&col_##name, &row); \ + col_##name.justify = emitter_justify_##left_or_right; \ + col_##name.width = col_width; \ + col_##name.type = emitter_type_##etype; + + COL(size, right, 20, size) + COL(ind, right, 4, unsigned) + COL(allocated, right, 13, size) + COL(nmalloc, right, 13, uint64) + COL(ndalloc, right, 13, uint64) + COL(nrequests, right, 13, uint64) + COL(curlextents, right, 13, size) +#undef COL + + /* As with bins, we label the large extents table. */ + header_size.width -= 6; + emitter_table_printf(emitter, "large:"); + emitter_table_row(emitter, &header_row); + emitter_json_arr_begin(emitter, "lextents"); + + for (j = 0, in_gap = false; j < nlextents; j++) { + uint64_t nmalloc, ndalloc, nrequests; + size_t lextent_size, curlextents; + + CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j, + &nmalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j, + &ndalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j, + &nrequests, uint64_t); + in_gap_prev = in_gap; + in_gap = (nrequests == 0); + + if (in_gap_prev && !in_gap) { + emitter_table_printf(emitter, + " ---\n"); + } + + CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j, + &curlextents, size_t); + + emitter_json_arr_obj_begin(emitter); + emitter_json_kv(emitter, "curlextents", emitter_type_size, + &curlextents); + emitter_json_arr_obj_end(emitter); + + col_size.size_val = lextent_size; + col_ind.unsigned_val = nbins + j; + col_allocated.size_val = curlextents * lextent_size; + col_nmalloc.uint64_val = nmalloc; + col_ndalloc.uint64_val = ndalloc; + col_nrequests.uint64_val = nrequests; + col_curlextents.size_val = curlextents; + + if (!in_gap) { + emitter_table_row(emitter, &row); + } + } + emitter_json_arr_end(emitter); /* Close "lextents". */ + if (in_gap) { + emitter_table_printf(emitter, " ---\n"); + } +} + +static void +stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind) { + emitter_row_t row; + emitter_col_t col_name; + emitter_col_t col64[mutex_prof_num_uint64_t_counters]; + emitter_col_t col32[mutex_prof_num_uint32_t_counters]; + + emitter_row_init(&row); + mutex_stats_init_cols(&row, "", &col_name, col64, col32); + + emitter_json_dict_begin(emitter, "mutexes"); + emitter_table_row(emitter, &row); + + for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes; + i++) { + const char *name = arena_mutex_names[i]; + emitter_json_dict_begin(emitter, name); + mutex_stats_read_arena(arena_ind, i, name, &col_name, col64, + col32); + mutex_stats_emit(emitter, &row, col64, col32); + emitter_json_dict_end(emitter); /* Close the mutex dict. */ + } + emitter_json_dict_end(emitter); /* End "mutexes". */ +} + +static void +stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large, + bool mutex) { + unsigned nthreads; + const char *dss; + ssize_t dirty_decay_ms, muzzy_decay_ms; + size_t page, pactive, pdirty, pmuzzy, mapped, retained; + size_t base, internal, resident, metadata_thp; + uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; + uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; + size_t small_allocated; + uint64_t small_nmalloc, small_ndalloc, small_nrequests; + size_t large_allocated; + uint64_t large_nmalloc, large_ndalloc, large_nrequests; + size_t tcache_bytes; + uint64_t uptime; + + CTL_GET("arenas.page", &page, size_t); + + CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); + emitter_kv(emitter, "nthreads", "assigned threads", + emitter_type_unsigned, &nthreads); + + CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t); + emitter_kv(emitter, "uptime_ns", "uptime", emitter_type_uint64, + &uptime); + + CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); + emitter_kv(emitter, "dss", "dss allocation precedence", + emitter_type_string, &dss); + + CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms, + ssize_t); + CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms, + ssize_t); + CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); + CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); + CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t); + CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t); + CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise, + uint64_t); + CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise, + uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t); + + emitter_row_t decay_row; + emitter_row_init(&decay_row); + + /* JSON-style emission. */ + emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, + &dirty_decay_ms); + emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, + &muzzy_decay_ms); + + emitter_json_kv(emitter, "pactive", emitter_type_size, &pactive); + emitter_json_kv(emitter, "pdirty", emitter_type_size, &pdirty); + emitter_json_kv(emitter, "pmuzzy", emitter_type_size, &pmuzzy); + + emitter_json_kv(emitter, "dirty_npurge", emitter_type_uint64, + &dirty_npurge); + emitter_json_kv(emitter, "dirty_nmadvise", emitter_type_uint64, + &dirty_nmadvise); + emitter_json_kv(emitter, "dirty_purged", emitter_type_uint64, + &dirty_purged); + + emitter_json_kv(emitter, "muzzy_npurge", emitter_type_uint64, + &muzzy_npurge); + emitter_json_kv(emitter, "muzzy_nmadvise", emitter_type_uint64, + &muzzy_nmadvise); + emitter_json_kv(emitter, "muzzy_purged", emitter_type_uint64, + &muzzy_purged); + + /* Table-style emission. */ + emitter_col_t decay_type; + emitter_col_init(&decay_type, &decay_row); + decay_type.justify = emitter_justify_right; + decay_type.width = 9; + decay_type.type = emitter_type_title; + decay_type.str_val = "decaying:"; + + emitter_col_t decay_time; + emitter_col_init(&decay_time, &decay_row); + decay_time.justify = emitter_justify_right; + decay_time.width = 6; + decay_time.type = emitter_type_title; + decay_time.str_val = "time"; + + emitter_col_t decay_npages; + emitter_col_init(&decay_npages, &decay_row); + decay_npages.justify = emitter_justify_right; + decay_npages.width = 13; + decay_npages.type = emitter_type_title; + decay_npages.str_val = "npages"; + + emitter_col_t decay_sweeps; + emitter_col_init(&decay_sweeps, &decay_row); + decay_sweeps.justify = emitter_justify_right; + decay_sweeps.width = 13; + decay_sweeps.type = emitter_type_title; + decay_sweeps.str_val = "sweeps"; + + emitter_col_t decay_madvises; + emitter_col_init(&decay_madvises, &decay_row); + decay_madvises.justify = emitter_justify_right; + decay_madvises.width = 13; + decay_madvises.type = emitter_type_title; + decay_madvises.str_val = "madvises"; + + emitter_col_t decay_purged; + emitter_col_init(&decay_purged, &decay_row); + decay_purged.justify = emitter_justify_right; + decay_purged.width = 13; + decay_purged.type = emitter_type_title; + decay_purged.str_val = "purged"; + + /* Title row. */ + emitter_table_row(emitter, &decay_row); + + /* Dirty row. */ + decay_type.str_val = "dirty:"; + + if (dirty_decay_ms >= 0) { + decay_time.type = emitter_type_ssize; + decay_time.ssize_val = dirty_decay_ms; + } else { + decay_time.type = emitter_type_title; + decay_time.str_val = "N/A"; + } + + decay_npages.type = emitter_type_size; + decay_npages.size_val = pdirty; + + decay_sweeps.type = emitter_type_uint64; + decay_sweeps.uint64_val = dirty_npurge; + + decay_madvises.type = emitter_type_uint64; + decay_madvises.uint64_val = dirty_nmadvise; + + decay_purged.type = emitter_type_uint64; + decay_purged.uint64_val = dirty_purged; + + emitter_table_row(emitter, &decay_row); + + /* Muzzy row. */ + decay_type.str_val = "muzzy:"; + + if (muzzy_decay_ms >= 0) { + decay_time.type = emitter_type_ssize; + decay_time.ssize_val = muzzy_decay_ms; + } else { + decay_time.type = emitter_type_title; + decay_time.str_val = "N/A"; + } + + decay_npages.type = emitter_type_size; + decay_npages.size_val = pmuzzy; + + decay_sweeps.type = emitter_type_uint64; + decay_sweeps.uint64_val = muzzy_npurge; + + decay_madvises.type = emitter_type_uint64; + decay_madvises.uint64_val = muzzy_nmadvise; + + decay_purged.type = emitter_type_uint64; + decay_purged.uint64_val = muzzy_purged; + + emitter_table_row(emitter, &decay_row); + + /* Small / large / total allocation counts. */ + emitter_row_t alloc_count_row; + emitter_row_init(&alloc_count_row); + + emitter_col_t alloc_count_title; + emitter_col_init(&alloc_count_title, &alloc_count_row); + alloc_count_title.justify = emitter_justify_left; + alloc_count_title.width = 25; + alloc_count_title.type = emitter_type_title; + alloc_count_title.str_val = ""; + + emitter_col_t alloc_count_allocated; + emitter_col_init(&alloc_count_allocated, &alloc_count_row); + alloc_count_allocated.justify = emitter_justify_right; + alloc_count_allocated.width = 12; + alloc_count_allocated.type = emitter_type_title; + alloc_count_allocated.str_val = "allocated"; + + emitter_col_t alloc_count_nmalloc; + emitter_col_init(&alloc_count_nmalloc, &alloc_count_row); + alloc_count_nmalloc.justify = emitter_justify_right; + alloc_count_nmalloc.width = 12; + alloc_count_nmalloc.type = emitter_type_title; + alloc_count_nmalloc.str_val = "nmalloc"; + + emitter_col_t alloc_count_ndalloc; + emitter_col_init(&alloc_count_ndalloc, &alloc_count_row); + alloc_count_ndalloc.justify = emitter_justify_right; + alloc_count_ndalloc.width = 12; + alloc_count_ndalloc.type = emitter_type_title; + alloc_count_ndalloc.str_val = "ndalloc"; + + emitter_col_t alloc_count_nrequests; + emitter_col_init(&alloc_count_nrequests, &alloc_count_row); + alloc_count_nrequests.justify = emitter_justify_right; + alloc_count_nrequests.width = 12; + alloc_count_nrequests.type = emitter_type_title; + alloc_count_nrequests.str_val = "nrequests"; + + emitter_table_row(emitter, &alloc_count_row); + +#define GET_AND_EMIT_ALLOC_STAT(small_or_large, name, valtype) \ + CTL_M2_GET("stats.arenas.0." #small_or_large "." #name, i, \ + &small_or_large##_##name, valtype##_t); \ + emitter_json_kv(emitter, #name, emitter_type_##valtype, \ + &small_or_large##_##name); \ + alloc_count_##name.type = emitter_type_##valtype; \ + alloc_count_##name.valtype##_val = small_or_large##_##name; + + emitter_json_dict_begin(emitter, "small"); + alloc_count_title.str_val = "small:"; + + GET_AND_EMIT_ALLOC_STAT(small, allocated, size) + GET_AND_EMIT_ALLOC_STAT(small, nmalloc, uint64) + GET_AND_EMIT_ALLOC_STAT(small, ndalloc, uint64) + GET_AND_EMIT_ALLOC_STAT(small, nrequests, uint64) + + emitter_table_row(emitter, &alloc_count_row); + emitter_json_dict_end(emitter); /* Close "small". */ + + emitter_json_dict_begin(emitter, "large"); + alloc_count_title.str_val = "large:"; + + GET_AND_EMIT_ALLOC_STAT(large, allocated, size) + GET_AND_EMIT_ALLOC_STAT(large, nmalloc, uint64) + GET_AND_EMIT_ALLOC_STAT(large, ndalloc, uint64) + GET_AND_EMIT_ALLOC_STAT(large, nrequests, uint64) + + emitter_table_row(emitter, &alloc_count_row); + emitter_json_dict_end(emitter); /* Close "large". */ + +#undef GET_AND_EMIT_ALLOC_STAT + + /* Aggregated small + large stats are emitter only in table mode. */ + alloc_count_title.str_val = "total:"; + alloc_count_allocated.size_val = small_allocated + large_allocated; + alloc_count_nmalloc.uint64_val = small_nmalloc + large_nmalloc; + alloc_count_ndalloc.uint64_val = small_ndalloc + large_ndalloc; + alloc_count_nrequests.uint64_val = small_nrequests + large_nrequests; + emitter_table_row(emitter, &alloc_count_row); + + emitter_row_t mem_count_row; + emitter_row_init(&mem_count_row); + + emitter_col_t mem_count_title; + emitter_col_init(&mem_count_title, &mem_count_row); + mem_count_title.justify = emitter_justify_left; + mem_count_title.width = 25; + mem_count_title.type = emitter_type_title; + mem_count_title.str_val = ""; + + emitter_col_t mem_count_val; + emitter_col_init(&mem_count_val, &mem_count_row); + mem_count_val.justify = emitter_justify_right; + mem_count_val.width = 12; + mem_count_val.type = emitter_type_title; + mem_count_val.str_val = ""; + + emitter_table_row(emitter, &mem_count_row); + mem_count_val.type = emitter_type_size; + + /* Active count in bytes is emitted only in table mode. */ + mem_count_title.str_val = "active:"; + mem_count_val.size_val = pactive * page; + emitter_table_row(emitter, &mem_count_row); + +#define GET_AND_EMIT_MEM_STAT(stat) \ + CTL_M2_GET("stats.arenas.0."#stat, i, &stat, size_t); \ + emitter_json_kv(emitter, #stat, emitter_type_size, &stat); \ + mem_count_title.str_val = #stat":"; \ + mem_count_val.size_val = stat; \ + emitter_table_row(emitter, &mem_count_row); + + GET_AND_EMIT_MEM_STAT(mapped) + GET_AND_EMIT_MEM_STAT(retained) + GET_AND_EMIT_MEM_STAT(base) + GET_AND_EMIT_MEM_STAT(internal) + GET_AND_EMIT_MEM_STAT(metadata_thp) + GET_AND_EMIT_MEM_STAT(tcache_bytes) + GET_AND_EMIT_MEM_STAT(resident) +#undef GET_AND_EMIT_MEM_STAT + + if (mutex) { + stats_arena_mutexes_print(emitter, i); + } + if (bins) { + stats_arena_bins_print(emitter, mutex, i); + } + if (large) { + stats_arena_lextents_print(emitter, i); + } +} + +static void +stats_general_print(emitter_t *emitter) { + const char *cpv; + bool bv, bv2; + unsigned uv; + uint32_t u32v; + uint64_t u64v; + ssize_t ssv, ssv2; + size_t sv, bsz, usz, ssz, sssz, cpsz; + + bsz = sizeof(bool); + usz = sizeof(unsigned); + ssz = sizeof(size_t); + sssz = sizeof(ssize_t); + cpsz = sizeof(const char *); + + CTL_GET("version", &cpv, const char *); + emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv); + + /* config. */ + emitter_dict_begin(emitter, "config", "Build-time option settings"); +#define CONFIG_WRITE_BOOL(name) \ + do { \ + CTL_GET("config."#name, &bv, bool); \ + emitter_kv(emitter, #name, "config."#name, \ + emitter_type_bool, &bv); \ + } while (0) + + CONFIG_WRITE_BOOL(cache_oblivious); + CONFIG_WRITE_BOOL(debug); + CONFIG_WRITE_BOOL(fill); + CONFIG_WRITE_BOOL(lazy_lock); + emitter_kv(emitter, "malloc_conf", "config.malloc_conf", + emitter_type_string, &config_malloc_conf); + + CONFIG_WRITE_BOOL(prof); + CONFIG_WRITE_BOOL(prof_libgcc); + CONFIG_WRITE_BOOL(prof_libunwind); + CONFIG_WRITE_BOOL(stats); + CONFIG_WRITE_BOOL(utrace); + CONFIG_WRITE_BOOL(xmalloc); +#undef CONFIG_WRITE_BOOL + emitter_dict_end(emitter); /* Close "config" dict. */ + + /* opt. */ +#define OPT_WRITE(name, var, size, emitter_type) \ + if (je_mallctl("opt."name, (void *)&var, &size, NULL, 0) == \ + 0) { \ + emitter_kv(emitter, name, "opt."name, emitter_type, \ + &var); \ + } + +#define OPT_WRITE_MUTABLE(name, var1, var2, size, emitter_type, \ + altname) \ + if (je_mallctl("opt."name, (void *)&var1, &size, NULL, 0) == \ + 0 && je_mallctl(altname, (void *)&var2, &size, NULL, 0) \ + == 0) { \ + emitter_kv_note(emitter, name, "opt."name, \ + emitter_type, &var1, altname, emitter_type, \ + &var2); \ + } + +#define OPT_WRITE_BOOL(name) OPT_WRITE(name, bv, bsz, emitter_type_bool) +#define OPT_WRITE_BOOL_MUTABLE(name, altname) \ + OPT_WRITE_MUTABLE(name, bv, bv2, bsz, emitter_type_bool, altname) + +#define OPT_WRITE_UNSIGNED(name) \ + OPT_WRITE(name, uv, usz, emitter_type_unsigned) + +#define OPT_WRITE_SSIZE_T(name) \ + OPT_WRITE(name, ssv, sssz, emitter_type_ssize) +#define OPT_WRITE_SSIZE_T_MUTABLE(name, altname) \ + OPT_WRITE_MUTABLE(name, ssv, ssv2, sssz, emitter_type_ssize, \ + altname) + +#define OPT_WRITE_CHAR_P(name) \ + OPT_WRITE(name, cpv, cpsz, emitter_type_string) + + emitter_dict_begin(emitter, "opt", "Run-time option settings"); + + OPT_WRITE_BOOL("abort") + OPT_WRITE_BOOL("abort_conf") + OPT_WRITE_BOOL("retain") + OPT_WRITE_CHAR_P("dss") + OPT_WRITE_UNSIGNED("narenas") + OPT_WRITE_CHAR_P("percpu_arena") + OPT_WRITE_CHAR_P("metadata_thp") + OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread") + OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms") + OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms") + OPT_WRITE_UNSIGNED("lg_extent_max_active_fit") + OPT_WRITE_CHAR_P("junk") + OPT_WRITE_BOOL("zero") + OPT_WRITE_BOOL("utrace") + OPT_WRITE_BOOL("xmalloc") + OPT_WRITE_BOOL("tcache") + OPT_WRITE_SSIZE_T("lg_tcache_max") + OPT_WRITE_CHAR_P("thp") + OPT_WRITE_BOOL("prof") + OPT_WRITE_CHAR_P("prof_prefix") + OPT_WRITE_BOOL_MUTABLE("prof_active", "prof.active") + OPT_WRITE_BOOL_MUTABLE("prof_thread_active_init", + "prof.thread_active_init") + OPT_WRITE_SSIZE_T_MUTABLE("lg_prof_sample", "prof.lg_sample") + OPT_WRITE_BOOL("prof_accum") + OPT_WRITE_SSIZE_T("lg_prof_interval") + OPT_WRITE_BOOL("prof_gdump") + OPT_WRITE_BOOL("prof_final") + OPT_WRITE_BOOL("prof_leak") + OPT_WRITE_BOOL("stats_print") + OPT_WRITE_CHAR_P("stats_print_opts") + + emitter_dict_end(emitter); + +#undef OPT_WRITE +#undef OPT_WRITE_MUTABLE +#undef OPT_WRITE_BOOL +#undef OPT_WRITE_BOOL_MUTABLE +#undef OPT_WRITE_UNSIGNED +#undef OPT_WRITE_SSIZE_T +#undef OPT_WRITE_SSIZE_T_MUTABLE +#undef OPT_WRITE_CHAR_P + + /* prof. */ + if (config_prof) { + emitter_dict_begin(emitter, "prof", "Profiling settings"); + + CTL_GET("prof.thread_active_init", &bv, bool); + emitter_kv(emitter, "thread_active_init", + "prof.thread_active_init", emitter_type_bool, &bv); + + CTL_GET("prof.active", &bv, bool); + emitter_kv(emitter, "active", "prof.active", emitter_type_bool, + &bv); + + CTL_GET("prof.gdump", &bv, bool); + emitter_kv(emitter, "gdump", "prof.gdump", emitter_type_bool, + &bv); + + CTL_GET("prof.interval", &u64v, uint64_t); + emitter_kv(emitter, "interval", "prof.interval", + emitter_type_uint64, &u64v); + + CTL_GET("prof.lg_sample", &ssv, ssize_t); + emitter_kv(emitter, "lg_sample", "prof.lg_sample", + emitter_type_ssize, &ssv); + + emitter_dict_end(emitter); /* Close "prof". */ + } + + /* arenas. */ + /* + * The json output sticks arena info into an "arenas" dict; the table + * output puts them at the top-level. + */ + emitter_json_dict_begin(emitter, "arenas"); + + CTL_GET("arenas.narenas", &uv, unsigned); + emitter_kv(emitter, "narenas", "Arenas", emitter_type_unsigned, &uv); + + /* + * Decay settings are emitted only in json mode; in table mode, they're + * emitted as notes with the opt output, above. + */ + CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t); + emitter_json_kv(emitter, "dirty_decay_ms", emitter_type_ssize, &ssv); + + CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t); + emitter_json_kv(emitter, "muzzy_decay_ms", emitter_type_ssize, &ssv); + + CTL_GET("arenas.quantum", &sv, size_t); + emitter_kv(emitter, "quantum", "Quantum size", emitter_type_size, &sv); + + CTL_GET("arenas.page", &sv, size_t); + emitter_kv(emitter, "page", "Page size", emitter_type_size, &sv); + + if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { + emitter_kv(emitter, "tcache_max", + "Maximum thread-cached size class", emitter_type_size, &sv); + } + + unsigned nbins; + CTL_GET("arenas.nbins", &nbins, unsigned); + emitter_kv(emitter, "nbins", "Number of bin size classes", + emitter_type_unsigned, &nbins); + + unsigned nhbins; + CTL_GET("arenas.nhbins", &nhbins, unsigned); + emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes", + emitter_type_unsigned, &nhbins); + + /* + * We do enough mallctls in a loop that we actually want to omit them + * (not just omit the printing). + */ + if (emitter->output == emitter_output_json) { + emitter_json_arr_begin(emitter, "bin"); + for (unsigned i = 0; i < nbins; i++) { + emitter_json_arr_obj_begin(emitter); + + CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t); + emitter_json_kv(emitter, "size", emitter_type_size, + &sv); + + CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t); + emitter_json_kv(emitter, "nregs", emitter_type_uint32, + &u32v); + + CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t); + emitter_json_kv(emitter, "slab_size", emitter_type_size, + &sv); + + emitter_json_arr_obj_end(emitter); + } + emitter_json_arr_end(emitter); /* Close "bin". */ + } + + unsigned nlextents; + CTL_GET("arenas.nlextents", &nlextents, unsigned); + emitter_kv(emitter, "nlextents", "Number of large size classes", + emitter_type_unsigned, &nlextents); + + if (emitter->output == emitter_output_json) { + emitter_json_arr_begin(emitter, "lextent"); + for (unsigned i = 0; i < nlextents; i++) { + emitter_json_arr_obj_begin(emitter); + + CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t); + emitter_json_kv(emitter, "size", emitter_type_size, + &sv); + + emitter_json_arr_obj_end(emitter); + } + emitter_json_arr_end(emitter); /* Close "lextent". */ + } + + emitter_json_dict_end(emitter); /* Close "arenas" */ +} + +static void +stats_print_helper(emitter_t *emitter, bool merged, bool destroyed, + bool unmerged, bool bins, bool large, bool mutex) { + /* + * These should be deleted. We keep them around for a while, to aid in + * the transition to the emitter code. + */ + size_t allocated, active, metadata, metadata_thp, resident, mapped, + retained; + size_t num_background_threads; + uint64_t background_thread_num_runs, background_thread_run_interval; + + CTL_GET("stats.allocated", &allocated, size_t); + CTL_GET("stats.active", &active, size_t); + CTL_GET("stats.metadata", &metadata, size_t); + CTL_GET("stats.metadata_thp", &metadata_thp, size_t); + CTL_GET("stats.resident", &resident, size_t); + CTL_GET("stats.mapped", &mapped, size_t); + CTL_GET("stats.retained", &retained, size_t); + + if (have_background_thread) { + CTL_GET("stats.background_thread.num_threads", + &num_background_threads, size_t); + CTL_GET("stats.background_thread.num_runs", + &background_thread_num_runs, uint64_t); + CTL_GET("stats.background_thread.run_interval", + &background_thread_run_interval, uint64_t); + } else { + num_background_threads = 0; + background_thread_num_runs = 0; + background_thread_run_interval = 0; + } + + /* Generic global stats. */ + emitter_json_dict_begin(emitter, "stats"); + emitter_json_kv(emitter, "allocated", emitter_type_size, &allocated); + emitter_json_kv(emitter, "active", emitter_type_size, &active); + emitter_json_kv(emitter, "metadata", emitter_type_size, &metadata); + emitter_json_kv(emitter, "metadata_thp", emitter_type_size, + &metadata_thp); + emitter_json_kv(emitter, "resident", emitter_type_size, &resident); + emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped); + emitter_json_kv(emitter, "retained", emitter_type_size, &retained); + + emitter_table_printf(emitter, "Allocated: %zu, active: %zu, " + "metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, " + "retained: %zu\n", allocated, active, metadata, metadata_thp, + resident, mapped, retained); + + /* Background thread stats. */ + emitter_json_dict_begin(emitter, "background_thread"); + emitter_json_kv(emitter, "num_threads", emitter_type_size, + &num_background_threads); + emitter_json_kv(emitter, "num_runs", emitter_type_uint64, + &background_thread_num_runs); + emitter_json_kv(emitter, "run_interval", emitter_type_uint64, + &background_thread_run_interval); + emitter_json_dict_end(emitter); /* Close "background_thread". */ + + emitter_table_printf(emitter, "Background threads: %zu, " + "num_runs: %"FMTu64", run_interval: %"FMTu64" ns\n", + num_background_threads, background_thread_num_runs, + background_thread_run_interval); + + if (mutex) { + emitter_row_t row; + emitter_col_t name; + emitter_col_t col64[mutex_prof_num_uint64_t_counters]; + emitter_col_t col32[mutex_prof_num_uint32_t_counters]; + + emitter_row_init(&row); + mutex_stats_init_cols(&row, "", &name, col64, col32); + + emitter_table_row(emitter, &row); + emitter_json_dict_begin(emitter, "mutexes"); + + for (int i = 0; i < mutex_prof_num_global_mutexes; i++) { + mutex_stats_read_global(global_mutex_names[i], &name, + col64, col32); + emitter_json_dict_begin(emitter, global_mutex_names[i]); + mutex_stats_emit(emitter, &row, col64, col32); + emitter_json_dict_end(emitter); + } + + emitter_json_dict_end(emitter); /* Close "mutexes". */ + } + + emitter_json_dict_end(emitter); /* Close "stats". */ + + if (merged || destroyed || unmerged) { + unsigned narenas; + + emitter_json_dict_begin(emitter, "stats.arenas"); + + CTL_GET("arenas.narenas", &narenas, unsigned); + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); + size_t sz; + VARIABLE_ARRAY(bool, initialized, narenas); + bool destroyed_initialized; + unsigned i, j, ninitialized; + + xmallctlnametomib("arena.0.initialized", mib, &miblen); + for (i = ninitialized = 0; i < narenas; i++) { + mib[1] = i; + sz = sizeof(bool); + xmallctlbymib(mib, miblen, &initialized[i], &sz, + NULL, 0); + if (initialized[i]) { + ninitialized++; + } + } + mib[1] = MALLCTL_ARENAS_DESTROYED; + sz = sizeof(bool); + xmallctlbymib(mib, miblen, &destroyed_initialized, &sz, + NULL, 0); + + /* Merged stats. */ + if (merged && (ninitialized > 1 || !unmerged)) { + /* Print merged arena stats. */ + emitter_table_printf(emitter, "Merged arenas stats:\n"); + emitter_json_dict_begin(emitter, "merged"); + stats_arena_print(emitter, MALLCTL_ARENAS_ALL, bins, + large, mutex); + emitter_json_dict_end(emitter); /* Close "merged". */ + } + + /* Destroyed stats. */ + if (destroyed_initialized && destroyed) { + /* Print destroyed arena stats. */ + emitter_table_printf(emitter, + "Destroyed arenas stats:\n"); + emitter_json_dict_begin(emitter, "destroyed"); + stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED, + bins, large, mutex); + emitter_json_dict_end(emitter); /* Close "destroyed". */ + } + + /* Unmerged stats. */ + if (unmerged) { + for (i = j = 0; i < narenas; i++) { + if (initialized[i]) { + char arena_ind_str[20]; + malloc_snprintf(arena_ind_str, + sizeof(arena_ind_str), "%u", i); + emitter_json_dict_begin(emitter, + arena_ind_str); + emitter_table_printf(emitter, + "arenas[%s]:\n", arena_ind_str); + stats_arena_print(emitter, i, bins, + large, mutex); + /* Close "". */ + emitter_json_dict_end(emitter); + } + } + } + emitter_json_dict_end(emitter); /* Close "stats.arenas". */ + } +} + +void +stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) { + int err; + uint64_t epoch; + size_t u64sz; +#define OPTION(o, v, d, s) bool v = d; + STATS_PRINT_OPTIONS +#undef OPTION + + /* + * Refresh stats, in case mallctl() was called by the application. + * + * Check for OOM here, since refreshing the ctl cache can trigger + * allocation. In practice, none of the subsequent mallctl()-related + * calls in this function will cause OOM if this one succeeds. + * */ + epoch = 1; + u64sz = sizeof(uint64_t); + err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch, + sizeof(uint64_t)); + if (err != 0) { + if (err == EAGAIN) { + malloc_write(": Memory allocation failure in " + "mallctl(\"epoch\", ...)\n"); + return; + } + malloc_write(": Failure in mallctl(\"epoch\", " + "...)\n"); + abort(); + } + + if (opts != NULL) { + for (unsigned i = 0; opts[i] != '\0'; i++) { + switch (opts[i]) { +#define OPTION(o, v, d, s) case o: v = s; break; + STATS_PRINT_OPTIONS +#undef OPTION + default:; + } + } + } + + emitter_t emitter; + emitter_init(&emitter, + json ? emitter_output_json : emitter_output_table, write_cb, + cbopaque); + emitter_begin(&emitter); + emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n"); + emitter_json_dict_begin(&emitter, "jemalloc"); + + if (general) { + stats_general_print(&emitter); + } + if (config_stats) { + stats_print_helper(&emitter, merged, destroyed, unmerged, + bins, large, mutex); + } + + emitter_json_dict_end(&emitter); /* Closes the "jemalloc" dict. */ + emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n"); + emitter_end(&emitter); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/sz.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/sz.c new file mode 100644 index 0000000..9de77e4 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/sz.c @@ -0,0 +1,107 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/sz.h" + +JEMALLOC_ALIGNED(CACHELINE) +const size_t sz_pind2sz_tab[NPSIZES+1] = { +#define PSZ_yes(lg_grp, ndelta, lg_delta) \ + (((ZU(1)<next_gc_bin; + + cache_bin_t *tbin; + if (binind < NBINS) { + tbin = tcache_small_bin_get(tcache, binind); + } else { + tbin = tcache_large_bin_get(tcache, binind); + } + if (tbin->low_water > 0) { + /* + * Flush (ceiling) 3/4 of the objects below the low water mark. + */ + if (binind < NBINS) { + tcache_bin_flush_small(tsd, tcache, tbin, binind, + tbin->ncached - tbin->low_water + (tbin->low_water + >> 2)); + /* + * Reduce fill count by 2X. Limit lg_fill_div such that + * the fill count is always at least 1. + */ + cache_bin_info_t *tbin_info = &tcache_bin_info[binind]; + if ((tbin_info->ncached_max >> + (tcache->lg_fill_div[binind] + 1)) >= 1) { + tcache->lg_fill_div[binind]++; + } + } else { + tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached + - tbin->low_water + (tbin->low_water >> 2), tcache); + } + } else if (tbin->low_water < 0) { + /* + * Increase fill count by 2X for small bins. Make sure + * lg_fill_div stays greater than 0. + */ + if (binind < NBINS && tcache->lg_fill_div[binind] > 1) { + tcache->lg_fill_div[binind]--; + } + } + tbin->low_water = tbin->ncached; + + tcache->next_gc_bin++; + if (tcache->next_gc_bin == nhbins) { + tcache->next_gc_bin = 0; + } +} + +void * +tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + cache_bin_t *tbin, szind_t binind, bool *tcache_success) { + void *ret; + + assert(tcache->arena != NULL); + arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind, + config_prof ? tcache->prof_accumbytes : 0); + if (config_prof) { + tcache->prof_accumbytes = 0; + } + ret = cache_bin_alloc_easy(tbin, tcache_success); + + return ret; +} + +void +tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, + szind_t binind, unsigned rem) { + bool merged_stats = false; + + assert(binind < NBINS); + assert((cache_bin_sz_t)rem <= tbin->ncached); + + arena_t *arena = tcache->arena; + assert(arena != NULL); + unsigned nflush = tbin->ncached - rem; + VARIABLE_ARRAY(extent_t *, item_extent, nflush); + /* Look up extent once per item. */ + for (unsigned i = 0 ; i < nflush; i++) { + item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); + } + + while (nflush > 0) { + /* Lock the arena bin associated with the first object. */ + extent_t *extent = item_extent[0]; + arena_t *bin_arena = extent_arena_get(extent); + bin_t *bin = &bin_arena->bins[binind]; + + if (config_prof && bin_arena == arena) { + if (arena_prof_accum(tsd_tsdn(tsd), arena, + tcache->prof_accumbytes)) { + prof_idump(tsd_tsdn(tsd)); + } + tcache->prof_accumbytes = 0; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + if (config_stats && bin_arena == arena) { + assert(!merged_stats); + merged_stats = true; + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } + unsigned ndeferred = 0; + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + extent = item_extent[i]; + assert(ptr != NULL && extent != NULL); + + if (extent_arena_get(extent) == bin_arena) { + arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), + bin_arena, extent, ptr); + } else { + /* + * This object was allocated via a different + * arena bin than the one that is currently + * locked. Stash the object, so that it can be + * handled in a future pass. + */ + *(tbin->avail - 1 - ndeferred) = ptr; + item_extent[ndeferred] = extent; + ndeferred++; + } + } + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); + nflush = ndeferred; + } + if (config_stats && !merged_stats) { + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ + bin_t *bin = &arena->bins[binind]; + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + } + + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); + tbin->ncached = rem; + if (tbin->ncached < tbin->low_water) { + tbin->low_water = tbin->ncached; + } +} + +void +tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, + unsigned rem, tcache_t *tcache) { + bool merged_stats = false; + + assert(binind < nhbins); + assert((cache_bin_sz_t)rem <= tbin->ncached); + + arena_t *arena = tcache->arena; + assert(arena != NULL); + unsigned nflush = tbin->ncached - rem; + VARIABLE_ARRAY(extent_t *, item_extent, nflush); + /* Look up extent once per item. */ + for (unsigned i = 0 ; i < nflush; i++) { + item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); + } + + while (nflush > 0) { + /* Lock the arena associated with the first object. */ + extent_t *extent = item_extent[0]; + arena_t *locked_arena = extent_arena_get(extent); + UNUSED bool idump; + + if (config_prof) { + idump = false; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx); + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + assert(ptr != NULL); + extent = item_extent[i]; + if (extent_arena_get(extent) == locked_arena) { + large_dalloc_prep_junked_locked(tsd_tsdn(tsd), + extent); + } + } + if ((config_prof || config_stats) && locked_arena == arena) { + if (config_prof) { + idump = arena_prof_accum(tsd_tsdn(tsd), arena, + tcache->prof_accumbytes); + tcache->prof_accumbytes = 0; + } + if (config_stats) { + merged_stats = true; + arena_stats_large_nrequests_add(tsd_tsdn(tsd), + &arena->stats, binind, + tbin->tstats.nrequests); + tbin->tstats.nrequests = 0; + } + } + malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx); + + unsigned ndeferred = 0; + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + extent = item_extent[i]; + assert(ptr != NULL && extent != NULL); + + if (extent_arena_get(extent) == locked_arena) { + large_dalloc_finish(tsd_tsdn(tsd), extent); + } else { + /* + * This object was allocated via a different + * arena than the one that is currently locked. + * Stash the object, so that it can be handled + * in a future pass. + */ + *(tbin->avail - 1 - ndeferred) = ptr; + item_extent[ndeferred] = extent; + ndeferred++; + } + } + if (config_prof && idump) { + prof_idump(tsd_tsdn(tsd)); + } + arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - + ndeferred); + nflush = ndeferred; + } + if (config_stats && !merged_stats) { + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ + arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats, + binind, tbin->tstats.nrequests); + tbin->tstats.nrequests = 0; + } + + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); + tbin->ncached = rem; + if (tbin->ncached < tbin->low_water) { + tbin->low_water = tbin->ncached; + } +} + +void +tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { + assert(tcache->arena == NULL); + tcache->arena = arena; + + if (config_stats) { + /* Link into list of extant tcaches. */ + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + + ql_elm_new(tcache, link); + ql_tail_insert(&arena->tcache_ql, tcache, link); + cache_bin_array_descriptor_init( + &tcache->cache_bin_array_descriptor, tcache->bins_small, + tcache->bins_large); + ql_tail_insert(&arena->cache_bin_array_descriptor_ql, + &tcache->cache_bin_array_descriptor, link); + + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); + } +} + +static void +tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) { + arena_t *arena = tcache->arena; + assert(arena != NULL); + if (config_stats) { + /* Unlink from list of extant tcaches. */ + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + if (config_debug) { + bool in_ql = false; + tcache_t *iter; + ql_foreach(iter, &arena->tcache_ql, link) { + if (iter == tcache) { + in_ql = true; + break; + } + } + assert(in_ql); + } + ql_remove(&arena->tcache_ql, tcache, link); + ql_remove(&arena->cache_bin_array_descriptor_ql, + &tcache->cache_bin_array_descriptor, link); + tcache_stats_merge(tsdn, tcache, arena); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); + } + tcache->arena = NULL; +} + +void +tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { + tcache_arena_dissociate(tsdn, tcache); + tcache_arena_associate(tsdn, tcache, arena); +} + +bool +tsd_tcache_enabled_data_init(tsd_t *tsd) { + /* Called upon tsd initialization. */ + tsd_tcache_enabled_set(tsd, opt_tcache); + tsd_slow_update(tsd); + + if (opt_tcache) { + /* Trigger tcache init. */ + tsd_tcache_data_init(tsd); + } + + return false; +} + +/* Initialize auto tcache (embedded in TSD). */ +static void +tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) { + memset(&tcache->link, 0, sizeof(ql_elm(tcache_t))); + tcache->prof_accumbytes = 0; + tcache->next_gc_bin = 0; + tcache->arena = NULL; + + ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); + + size_t stack_offset = 0; + assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); + memset(tcache->bins_small, 0, sizeof(cache_bin_t) * NBINS); + memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - NBINS)); + unsigned i = 0; + for (; i < NBINS; i++) { + tcache->lg_fill_div[i] = 1; + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + /* + * avail points past the available space. Allocations will + * access the slots toward higher addresses (for the benefit of + * prefetch). + */ + tcache_small_bin_get(tcache, i)->avail = + (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); + } + for (; i < nhbins; i++) { + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + tcache_large_bin_get(tcache, i)->avail = + (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); + } + assert(stack_offset == stack_nelms * sizeof(void *)); +} + +/* Initialize auto tcache (embedded in TSD). */ +bool +tsd_tcache_data_init(tsd_t *tsd) { + tcache_t *tcache = tsd_tcachep_get_unsafe(tsd); + assert(tcache_small_bin_get(tcache, 0)->avail == NULL); + size_t size = stack_nelms * sizeof(void *); + /* Avoid false cacheline sharing. */ + size = sz_sa2u(size, CACHELINE); + + void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, + NULL, true, arena_get(TSDN_NULL, 0, true)); + if (avail_array == NULL) { + return true; + } + + tcache_init(tsd, tcache, avail_array); + /* + * Initialization is a bit tricky here. After malloc init is done, all + * threads can rely on arena_choose and associate tcache accordingly. + * However, the thread that does actual malloc bootstrapping relies on + * functional tsd, and it can only rely on a0. In that case, we + * associate its tcache to a0 temporarily, and later on + * arena_choose_hard() will re-associate properly. + */ + tcache->arena = NULL; + arena_t *arena; + if (!malloc_initialized()) { + /* If in initialization, assign to a0. */ + arena = arena_get(tsd_tsdn(tsd), 0, false); + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); + } else { + arena = arena_choose(tsd, NULL); + /* This may happen if thread.tcache.enabled is used. */ + if (tcache->arena == NULL) { + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); + } + } + assert(arena == tcache->arena); + + return false; +} + +/* Created manual tcache for tcache.create mallctl. */ +tcache_t * +tcache_create_explicit(tsd_t *tsd) { + tcache_t *tcache; + size_t size, stack_offset; + + size = sizeof(tcache_t); + /* Naturally align the pointer stacks. */ + size = PTR_CEILING(size); + stack_offset = size; + size += stack_nelms * sizeof(void *); + /* Avoid false cacheline sharing. */ + size = sz_sa2u(size, CACHELINE); + + tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true, + arena_get(TSDN_NULL, 0, true)); + if (tcache == NULL) { + return NULL; + } + + tcache_init(tsd, tcache, + (void *)((uintptr_t)tcache + (uintptr_t)stack_offset)); + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL)); + + return tcache; +} + +static void +tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { + assert(tcache->arena != NULL); + + for (unsigned i = 0; i < NBINS; i++) { + cache_bin_t *tbin = tcache_small_bin_get(tcache, i); + tcache_bin_flush_small(tsd, tcache, tbin, i, 0); + + if (config_stats) { + assert(tbin->tstats.nrequests == 0); + } + } + for (unsigned i = NBINS; i < nhbins; i++) { + cache_bin_t *tbin = tcache_large_bin_get(tcache, i); + tcache_bin_flush_large(tsd, tbin, i, 0, tcache); + + if (config_stats) { + assert(tbin->tstats.nrequests == 0); + } + } + + if (config_prof && tcache->prof_accumbytes > 0 && + arena_prof_accum(tsd_tsdn(tsd), tcache->arena, + tcache->prof_accumbytes)) { + prof_idump(tsd_tsdn(tsd)); + } +} + +void +tcache_flush(tsd_t *tsd) { + assert(tcache_available(tsd)); + tcache_flush_cache(tsd, tsd_tcachep_get(tsd)); +} + +static void +tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { + tcache_flush_cache(tsd, tcache); + tcache_arena_dissociate(tsd_tsdn(tsd), tcache); + + if (tsd_tcache) { + /* Release the avail array for the TSD embedded auto tcache. */ + void *avail_array = + (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail - + (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *)); + idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true); + } else { + /* Release both the tcache struct and avail array. */ + idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true); + } +} + +/* For auto tcache (embedded in TSD) only. */ +void +tcache_cleanup(tsd_t *tsd) { + tcache_t *tcache = tsd_tcachep_get(tsd); + if (!tcache_available(tsd)) { + assert(tsd_tcache_enabled_get(tsd) == false); + if (config_debug) { + assert(tcache_small_bin_get(tcache, 0)->avail == NULL); + } + return; + } + assert(tsd_tcache_enabled_get(tsd)); + assert(tcache_small_bin_get(tcache, 0)->avail != NULL); + + tcache_destroy(tsd, tcache, true); + if (config_debug) { + tcache_small_bin_get(tcache, 0)->avail = NULL; + } +} + +void +tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { + unsigned i; + + cassert(config_stats); + + /* Merge and reset tcache stats. */ + for (i = 0; i < NBINS; i++) { + bin_t *bin = &arena->bins[i]; + cache_bin_t *tbin = tcache_small_bin_get(tcache, i); + malloc_mutex_lock(tsdn, &bin->lock); + bin->stats.nrequests += tbin->tstats.nrequests; + malloc_mutex_unlock(tsdn, &bin->lock); + tbin->tstats.nrequests = 0; + } + + for (; i < nhbins; i++) { + cache_bin_t *tbin = tcache_large_bin_get(tcache, i); + arena_stats_large_nrequests_add(tsdn, &arena->stats, i, + tbin->tstats.nrequests); + tbin->tstats.nrequests = 0; + } +} + +static bool +tcaches_create_prep(tsd_t *tsd) { + bool err; + + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + + if (tcaches == NULL) { + tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *) + * (MALLOCX_TCACHE_MAX+1), CACHELINE); + if (tcaches == NULL) { + err = true; + goto label_return; + } + } + + if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) { + err = true; + goto label_return; + } + + err = false; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + return err; +} + +bool +tcaches_create(tsd_t *tsd, unsigned *r_ind) { + witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); + + bool err; + + if (tcaches_create_prep(tsd)) { + err = true; + goto label_return; + } + + tcache_t *tcache = tcache_create_explicit(tsd); + if (tcache == NULL) { + err = true; + goto label_return; + } + + tcaches_t *elm; + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcaches_avail != NULL) { + elm = tcaches_avail; + tcaches_avail = tcaches_avail->next; + elm->tcache = tcache; + *r_ind = (unsigned)(elm - tcaches); + } else { + elm = &tcaches[tcaches_past]; + elm->tcache = tcache; + *r_ind = tcaches_past; + tcaches_past++; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + + err = false; +label_return: + witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); + return err; +} + +static tcache_t * +tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx); + + if (elm->tcache == NULL) { + return NULL; + } + tcache_t *tcache = elm->tcache; + elm->tcache = NULL; + return tcache; +} + +void +tcaches_flush(tsd_t *tsd, unsigned ind) { + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]); + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcache != NULL) { + tcache_destroy(tsd, tcache, false); + } +} + +void +tcaches_destroy(tsd_t *tsd, unsigned ind) { + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + tcaches_t *elm = &tcaches[ind]; + tcache_t *tcache = tcaches_elm_remove(tsd, elm); + elm->next = tcaches_avail; + tcaches_avail = elm; + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcache != NULL) { + tcache_destroy(tsd, tcache, false); + } +} + +bool +tcache_boot(tsdn_t *tsdn) { + /* If necessary, clamp opt_lg_tcache_max. */ + if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < + SMALL_MAXCLASS) { + tcache_maxclass = SMALL_MAXCLASS; + } else { + tcache_maxclass = (ZU(1) << opt_lg_tcache_max); + } + + if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES, + malloc_mutex_rank_exclusive)) { + return true; + } + + nhbins = sz_size2index(tcache_maxclass) + 1; + + /* Initialize tcache_bin_info. */ + tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins + * sizeof(cache_bin_info_t), CACHELINE); + if (tcache_bin_info == NULL) { + return true; + } + stack_nelms = 0; + unsigned i; + for (i = 0; i < NBINS; i++) { + if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { + tcache_bin_info[i].ncached_max = + TCACHE_NSLOTS_SMALL_MIN; + } else if ((bin_infos[i].nregs << 1) <= + TCACHE_NSLOTS_SMALL_MAX) { + tcache_bin_info[i].ncached_max = + (bin_infos[i].nregs << 1); + } else { + tcache_bin_info[i].ncached_max = + TCACHE_NSLOTS_SMALL_MAX; + } + stack_nelms += tcache_bin_info[i].ncached_max; + } + for (; i < nhbins; i++) { + tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; + stack_nelms += tcache_bin_info[i].ncached_max; + } + + return false; +} + +void +tcache_prefork(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_prefork(tsdn, &tcaches_mtx); + } +} + +void +tcache_postfork_parent(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); + } +} + +void +tcache_postfork_child(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_postfork_child(tsdn, &tcaches_mtx); + } +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/ticker.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/ticker.c new file mode 100644 index 0000000..d7b8cd2 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/ticker.c @@ -0,0 +1,3 @@ +#define JEMALLOC_TICKER_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/tsd.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/tsd.c new file mode 100644 index 0000000..c143068 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/tsd.c @@ -0,0 +1,351 @@ +#define JEMALLOC_TSD_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" + +/******************************************************************************/ +/* Data. */ + +static unsigned ncleanups; +static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; + +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; +__thread bool JEMALLOC_TLS_MODEL tsd_initialized = false; +bool tsd_booted = false; +#elif (defined(JEMALLOC_TLS)) +__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; +pthread_key_t tsd_tsd; +bool tsd_booted = false; +#elif (defined(_WIN32)) +DWORD tsd_tsd; +tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER}; +bool tsd_booted = false; +#else + +/* + * This contains a mutex, but it's pretty convenient to allow the mutex code to + * have a dependency on tsd. So we define the struct here, and only refer to it + * by pointer in the header. + */ +struct tsd_init_head_s { + ql_head(tsd_init_block_t) blocks; + malloc_mutex_t lock; +}; + +pthread_key_t tsd_tsd; +tsd_init_head_t tsd_init_head = { + ql_head_initializer(blocks), + MALLOC_MUTEX_INITIALIZER +}; +tsd_wrapper_t tsd_boot_wrapper = { + false, + TSD_INITIALIZER +}; +bool tsd_booted = false; +#endif + + +/******************************************************************************/ + +void +tsd_slow_update(tsd_t *tsd) { + if (tsd_nominal(tsd)) { + if (malloc_slow || !tsd_tcache_enabled_get(tsd) || + tsd_reentrancy_level_get(tsd) > 0) { + tsd->state = tsd_state_nominal_slow; + } else { + tsd->state = tsd_state_nominal; + } + } +} + +static bool +tsd_data_init(tsd_t *tsd) { + /* + * We initialize the rtree context first (before the tcache), since the + * tcache initialization depends on it. + */ + rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); + + /* + * A nondeterministic seed based on the address of tsd reduces + * the likelihood of lockstep non-uniform cache index + * utilization among identical concurrent processes, but at the + * cost of test repeatability. For debug builds, instead use a + * deterministic seed. + */ + *tsd_offset_statep_get(tsd) = config_debug ? 0 : + (uint64_t)(uintptr_t)tsd; + + return tsd_tcache_enabled_data_init(tsd); +} + +static void +assert_tsd_data_cleanup_done(tsd_t *tsd) { + assert(!tsd_nominal(tsd)); + assert(*tsd_arenap_get_unsafe(tsd) == NULL); + assert(*tsd_iarenap_get_unsafe(tsd) == NULL); + assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true); + assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL); + assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false); + assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL); +} + +static bool +tsd_data_init_nocleanup(tsd_t *tsd) { + assert(tsd->state == tsd_state_reincarnated || + tsd->state == tsd_state_minimal_initialized); + /* + * During reincarnation, there is no guarantee that the cleanup function + * will be called (deallocation may happen after all tsd destructors). + * We set up tsd in a way that no cleanup is needed. + */ + rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + *tsd_tcache_enabledp_get_unsafe(tsd) = false; + *tsd_reentrancy_levelp_get(tsd) = 1; + assert_tsd_data_cleanup_done(tsd); + + return false; +} + +tsd_t * +tsd_fetch_slow(tsd_t *tsd, bool minimal) { + assert(!tsd_fast(tsd)); + + if (tsd->state == tsd_state_nominal_slow) { + /* On slow path but no work needed. */ + assert(malloc_slow || !tsd_tcache_enabled_get(tsd) || + tsd_reentrancy_level_get(tsd) > 0 || + *tsd_arenas_tdata_bypassp_get(tsd)); + } else if (tsd->state == tsd_state_uninitialized) { + if (!minimal) { + tsd->state = tsd_state_nominal; + tsd_slow_update(tsd); + /* Trigger cleanup handler registration. */ + tsd_set(tsd); + tsd_data_init(tsd); + } else { + tsd->state = tsd_state_minimal_initialized; + tsd_set(tsd); + tsd_data_init_nocleanup(tsd); + } + } else if (tsd->state == tsd_state_minimal_initialized) { + if (!minimal) { + /* Switch to fully initialized. */ + tsd->state = tsd_state_nominal; + assert(*tsd_reentrancy_levelp_get(tsd) >= 1); + (*tsd_reentrancy_levelp_get(tsd))--; + tsd_slow_update(tsd); + tsd_data_init(tsd); + } else { + assert_tsd_data_cleanup_done(tsd); + } + } else if (tsd->state == tsd_state_purgatory) { + tsd->state = tsd_state_reincarnated; + tsd_set(tsd); + tsd_data_init_nocleanup(tsd); + } else { + assert(tsd->state == tsd_state_reincarnated); + } + + return tsd; +} + +void * +malloc_tsd_malloc(size_t size) { + return a0malloc(CACHELINE_CEILING(size)); +} + +void +malloc_tsd_dalloc(void *wrapper) { + a0dalloc(wrapper); +} + +#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) +#ifndef _WIN32 +JEMALLOC_EXPORT +#endif +void +_malloc_thread_cleanup(void) { + bool pending[MALLOC_TSD_CLEANUPS_MAX], again; + unsigned i; + + for (i = 0; i < ncleanups; i++) { + pending[i] = true; + } + + do { + again = false; + for (i = 0; i < ncleanups; i++) { + if (pending[i]) { + pending[i] = cleanups[i](); + if (pending[i]) { + again = true; + } + } + } + } while (again); +} +#endif + +void +malloc_tsd_cleanup_register(bool (*f)(void)) { + assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); + cleanups[ncleanups] = f; + ncleanups++; +} + +static void +tsd_do_data_cleanup(tsd_t *tsd) { + prof_tdata_cleanup(tsd); + iarena_cleanup(tsd); + arena_cleanup(tsd); + arenas_tdata_cleanup(tsd); + tcache_cleanup(tsd); + witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd)); +} + +void +tsd_cleanup(void *arg) { + tsd_t *tsd = (tsd_t *)arg; + + switch (tsd->state) { + case tsd_state_uninitialized: + /* Do nothing. */ + break; + case tsd_state_minimal_initialized: + /* This implies the thread only did free() in its life time. */ + /* Fall through. */ + case tsd_state_reincarnated: + /* + * Reincarnated means another destructor deallocated memory + * after the destructor was called. Cleanup isn't required but + * is still called for testing and completeness. + */ + assert_tsd_data_cleanup_done(tsd); + /* Fall through. */ + case tsd_state_nominal: + case tsd_state_nominal_slow: + tsd_do_data_cleanup(tsd); + tsd->state = tsd_state_purgatory; + tsd_set(tsd); + break; + case tsd_state_purgatory: + /* + * The previous time this destructor was called, we set the + * state to tsd_state_purgatory so that other destructors + * wouldn't cause re-creation of the tsd. This time, do + * nothing, and do not request another callback. + */ + break; + default: + not_reached(); + } +#ifdef JEMALLOC_JET + test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd); + int *data = tsd_test_datap_get_unsafe(tsd); + if (test_callback != NULL) { + test_callback(data); + } +#endif +} + +tsd_t * +malloc_tsd_boot0(void) { + tsd_t *tsd; + + ncleanups = 0; + if (tsd_boot0()) { + return NULL; + } + tsd = tsd_fetch(); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + return tsd; +} + +void +malloc_tsd_boot1(void) { + tsd_boot1(); + tsd_t *tsd = tsd_fetch(); + /* malloc_slow has been set properly. Update tsd_slow. */ + tsd_slow_update(tsd); + *tsd_arenas_tdata_bypassp_get(tsd) = false; +} + +#ifdef _WIN32 +static BOOL WINAPI +_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { + switch (fdwReason) { +#ifdef JEMALLOC_LAZY_LOCK + case DLL_THREAD_ATTACH: + isthreaded = true; + break; +#endif + case DLL_THREAD_DETACH: + _malloc_thread_cleanup(); + break; + default: + break; + } + return true; +} + +/* + * We need to be able to say "read" here (in the "pragma section"), but have + * hooked "read". We won't read for the rest of the file, so we can get away + * with unhooking. + */ +#ifdef read +# undef read +#endif + +#ifdef _MSC_VER +# ifdef _M_IX86 +# pragma comment(linker, "/INCLUDE:__tls_used") +# pragma comment(linker, "/INCLUDE:_tls_callback") +# else +# pragma comment(linker, "/INCLUDE:_tls_used") +# pragma comment(linker, "/INCLUDE:tls_callback") +# endif +# pragma section(".CRT$XLY",long,read) +#endif +JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) +BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, + DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; +#endif + +#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ + !defined(_WIN32)) +void * +tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { + pthread_t self = pthread_self(); + tsd_init_block_t *iter; + + /* Check whether this thread has already inserted into the list. */ + malloc_mutex_lock(TSDN_NULL, &head->lock); + ql_foreach(iter, &head->blocks, link) { + if (iter->thread == self) { + malloc_mutex_unlock(TSDN_NULL, &head->lock); + return iter->data; + } + } + /* Insert block into list. */ + ql_elm_new(block, link); + block->thread = self; + ql_tail_insert(&head->blocks, block, link); + malloc_mutex_unlock(TSDN_NULL, &head->lock); + return NULL; +} + +void +tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) { + malloc_mutex_lock(TSDN_NULL, &head->lock); + ql_remove(&head->blocks, block, link); + malloc_mutex_unlock(TSDN_NULL, &head->lock); +} +#endif diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/util.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/util.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/util.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/util.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/valgrind.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/valgrind.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/src/valgrind.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/valgrind.c diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/witness.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/witness.c new file mode 100644 index 0000000..f42b72a --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/witness.c @@ -0,0 +1,100 @@ +#define JEMALLOC_WITNESS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" + +void +witness_init(witness_t *witness, const char *name, witness_rank_t rank, + witness_comp_t *comp, void *opaque) { + witness->name = name; + witness->rank = rank; + witness->comp = comp; + witness->opaque = opaque; +} + +static void +witness_lock_error_impl(const witness_list_t *witnesses, + const witness_t *witness) { + witness_t *w; + + malloc_printf(": Lock rank order reversal:"); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf(" %s(%u)\n", witness->name, witness->rank); + abort(); +} +witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl; + +static void +witness_owner_error_impl(const witness_t *witness) { + malloc_printf(": Should own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +witness_owner_error_t *JET_MUTABLE witness_owner_error = + witness_owner_error_impl; + +static void +witness_not_owner_error_impl(const witness_t *witness) { + malloc_printf(": Should not own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error = + witness_not_owner_error_impl; + +static void +witness_depth_error_impl(const witness_list_t *witnesses, + witness_rank_t rank_inclusive, unsigned depth) { + witness_t *w; + + malloc_printf(": Should own %u lock%s of rank >= %u:", depth, + (depth != 1) ? "s" : "", rank_inclusive); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf("\n"); + abort(); +} +witness_depth_error_t *JET_MUTABLE witness_depth_error = + witness_depth_error_impl; + +void +witnesses_cleanup(witness_tsd_t *witness_tsd) { + witness_assert_lockless(witness_tsd_tsdn(witness_tsd)); + + /* Do nothing. */ +} + +void +witness_prefork(witness_tsd_t *witness_tsd) { + if (!config_debug) { + return; + } + witness_tsd->forking = true; +} + +void +witness_postfork_parent(witness_tsd_t *witness_tsd) { + if (!config_debug) { + return; + } + witness_tsd->forking = false; +} + +void +witness_postfork_child(witness_tsd_t *witness_tsd) { + if (!config_debug) { + return; + } +#ifndef JEMALLOC_MUTEX_INIT_CB + witness_list_t *witnesses; + + witnesses = &witness_tsd->witnesses; + ql_new(witnesses); +#endif + witness_tsd->forking = false; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/zone.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/zone.c new file mode 100644 index 0000000..23dfdd0 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/src/zone.c @@ -0,0 +1,469 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" + +#ifndef JEMALLOC_ZONE +# error "This source file is for zones on Darwin (OS X)." +#endif + +/* Definitions of the following structs in malloc/malloc.h might be too old + * for the built binary to run on newer versions of OSX. So use the newest + * possible version of those structs. + */ +typedef struct _malloc_zone_t { + void *reserved1; + void *reserved2; + size_t (*size)(struct _malloc_zone_t *, const void *); + void *(*malloc)(struct _malloc_zone_t *, size_t); + void *(*calloc)(struct _malloc_zone_t *, size_t, size_t); + void *(*valloc)(struct _malloc_zone_t *, size_t); + void (*free)(struct _malloc_zone_t *, void *); + void *(*realloc)(struct _malloc_zone_t *, void *, size_t); + void (*destroy)(struct _malloc_zone_t *); + const char *zone_name; + unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned); + void (*batch_free)(struct _malloc_zone_t *, void **, unsigned); + struct malloc_introspection_t *introspect; + unsigned version; + void *(*memalign)(struct _malloc_zone_t *, size_t, size_t); + void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t); + size_t (*pressure_relief)(struct _malloc_zone_t *, size_t); +} malloc_zone_t; + +typedef struct { + vm_address_t address; + vm_size_t size; +} vm_range_t; + +typedef struct malloc_statistics_t { + unsigned blocks_in_use; + size_t size_in_use; + size_t max_size_in_use; + size_t size_allocated; +} malloc_statistics_t; + +typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **); + +typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned); + +typedef struct malloc_introspection_t { + kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t); + size_t (*good_size)(malloc_zone_t *, size_t); + boolean_t (*check)(malloc_zone_t *); + void (*print)(malloc_zone_t *, boolean_t); + void (*log)(malloc_zone_t *, void *); + void (*force_lock)(malloc_zone_t *); + void (*force_unlock)(malloc_zone_t *); + void (*statistics)(malloc_zone_t *, malloc_statistics_t *); + boolean_t (*zone_locked)(malloc_zone_t *); + boolean_t (*enable_discharge_checking)(malloc_zone_t *); + boolean_t (*disable_discharge_checking)(malloc_zone_t *); + void (*discharge)(malloc_zone_t *, void *); +#ifdef __BLOCKS__ + void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *)); +#else + void *enumerate_unavailable_without_blocks; +#endif + void (*reinit_lock)(malloc_zone_t *); +} malloc_introspection_t; + +extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *); + +extern malloc_zone_t *malloc_default_zone(void); + +extern void malloc_zone_register(malloc_zone_t *zone); + +extern void malloc_zone_unregister(malloc_zone_t *zone); + +/* + * The malloc_default_purgeable_zone() function is only available on >= 10.6. + * We need to check whether it is present at runtime, thus the weak_import. + */ +extern malloc_zone_t *malloc_default_purgeable_zone(void) +JEMALLOC_ATTR(weak_import); + +/******************************************************************************/ +/* Data. */ + +static malloc_zone_t *default_zone, *purgeable_zone; +static malloc_zone_t jemalloc_zone; +static struct malloc_introspection_t jemalloc_zone_introspect; +static pid_t zone_force_lock_pid = -1; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static size_t zone_size(malloc_zone_t *zone, const void *ptr); +static void *zone_malloc(malloc_zone_t *zone, size_t size); +static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); +static void *zone_valloc(malloc_zone_t *zone, size_t size); +static void zone_free(malloc_zone_t *zone, void *ptr); +static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); +static void *zone_memalign(malloc_zone_t *zone, size_t alignment, + size_t size); +static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, + size_t size); +static void zone_destroy(malloc_zone_t *zone); +static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, + void **results, unsigned num_requested); +static void zone_batch_free(struct _malloc_zone_t *zone, + void **to_be_freed, unsigned num_to_be_freed); +static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal); +static size_t zone_good_size(malloc_zone_t *zone, size_t size); +static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, + vm_address_t zone_address, memory_reader_t reader, + vm_range_recorder_t recorder); +static boolean_t zone_check(malloc_zone_t *zone); +static void zone_print(malloc_zone_t *zone, boolean_t verbose); +static void zone_log(malloc_zone_t *zone, void *address); +static void zone_force_lock(malloc_zone_t *zone); +static void zone_force_unlock(malloc_zone_t *zone); +static void zone_statistics(malloc_zone_t *zone, + malloc_statistics_t *stats); +static boolean_t zone_locked(malloc_zone_t *zone); +static void zone_reinit_lock(malloc_zone_t *zone); + +/******************************************************************************/ +/* + * Functions. + */ + +static size_t +zone_size(malloc_zone_t *zone, const void *ptr) { + /* + * There appear to be places within Darwin (such as setenv(3)) that + * cause calls to this function with pointers that *no* zone owns. If + * we knew that all pointers were owned by *some* zone, we could split + * our zone into two parts, and use one as the default allocator and + * the other as the default deallocator/reallocator. Since that will + * not work in practice, we must check all pointers to assure that they + * reside within a mapped extent before determining size. + */ + return ivsalloc(tsdn_fetch(), ptr); +} + +static void * +zone_malloc(malloc_zone_t *zone, size_t size) { + return je_malloc(size); +} + +static void * +zone_calloc(malloc_zone_t *zone, size_t num, size_t size) { + return je_calloc(num, size); +} + +static void * +zone_valloc(malloc_zone_t *zone, size_t size) { + void *ret = NULL; /* Assignment avoids useless compiler warning. */ + + je_posix_memalign(&ret, PAGE, size); + + return ret; +} + +static void +zone_free(malloc_zone_t *zone, void *ptr) { + if (ivsalloc(tsdn_fetch(), ptr) != 0) { + je_free(ptr); + return; + } + + free(ptr); +} + +static void * +zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { + if (ivsalloc(tsdn_fetch(), ptr) != 0) { + return je_realloc(ptr, size); + } + + return realloc(ptr, size); +} + +static void * +zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) { + void *ret = NULL; /* Assignment avoids useless compiler warning. */ + + je_posix_memalign(&ret, alignment, size); + + return ret; +} + +static void +zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) { + size_t alloc_size; + + alloc_size = ivsalloc(tsdn_fetch(), ptr); + if (alloc_size != 0) { + assert(alloc_size == size); + je_free(ptr); + return; + } + + free(ptr); +} + +static void +zone_destroy(malloc_zone_t *zone) { + /* This function should never be called. */ + not_reached(); +} + +static unsigned +zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, + unsigned num_requested) { + unsigned i; + + for (i = 0; i < num_requested; i++) { + results[i] = je_malloc(size); + if (!results[i]) + break; + } + + return i; +} + +static void +zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, + unsigned num_to_be_freed) { + unsigned i; + + for (i = 0; i < num_to_be_freed; i++) { + zone_free(zone, to_be_freed[i]); + to_be_freed[i] = NULL; + } +} + +static size_t +zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) { + return 0; +} + +static size_t +zone_good_size(malloc_zone_t *zone, size_t size) { + if (size == 0) { + size = 1; + } + return sz_s2u(size); +} + +static kern_return_t +zone_enumerator(task_t task, void *data, unsigned type_mask, + vm_address_t zone_address, memory_reader_t reader, + vm_range_recorder_t recorder) { + return KERN_SUCCESS; +} + +static boolean_t +zone_check(malloc_zone_t *zone) { + return true; +} + +static void +zone_print(malloc_zone_t *zone, boolean_t verbose) { +} + +static void +zone_log(malloc_zone_t *zone, void *address) { +} + +static void +zone_force_lock(malloc_zone_t *zone) { + if (isthreaded) { + /* + * See the note in zone_force_unlock, below, to see why we need + * this. + */ + assert(zone_force_lock_pid == -1); + zone_force_lock_pid = getpid(); + jemalloc_prefork(); + } +} + +static void +zone_force_unlock(malloc_zone_t *zone) { + /* + * zone_force_lock and zone_force_unlock are the entry points to the + * forking machinery on OS X. The tricky thing is, the child is not + * allowed to unlock mutexes locked in the parent, even if owned by the + * forking thread (and the mutex type we use in OS X will fail an assert + * if we try). In the child, we can get away with reinitializing all + * the mutexes, which has the effect of unlocking them. In the parent, + * doing this would mean we wouldn't wake any waiters blocked on the + * mutexes we unlock. So, we record the pid of the current thread in + * zone_force_lock, and use that to detect if we're in the parent or + * child here, to decide which unlock logic we need. + */ + if (isthreaded) { + assert(zone_force_lock_pid != -1); + if (getpid() == zone_force_lock_pid) { + jemalloc_postfork_parent(); + } else { + jemalloc_postfork_child(); + } + zone_force_lock_pid = -1; + } +} + +static void +zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { + /* We make no effort to actually fill the values */ + stats->blocks_in_use = 0; + stats->size_in_use = 0; + stats->max_size_in_use = 0; + stats->size_allocated = 0; +} + +static boolean_t +zone_locked(malloc_zone_t *zone) { + /* Pretend no lock is being held */ + return false; +} + +static void +zone_reinit_lock(malloc_zone_t *zone) { + /* As of OSX 10.12, this function is only used when force_unlock would + * be used if the zone version were < 9. So just use force_unlock. */ + zone_force_unlock(zone); +} + +static void +zone_init(void) { + jemalloc_zone.size = zone_size; + jemalloc_zone.malloc = zone_malloc; + jemalloc_zone.calloc = zone_calloc; + jemalloc_zone.valloc = zone_valloc; + jemalloc_zone.free = zone_free; + jemalloc_zone.realloc = zone_realloc; + jemalloc_zone.destroy = zone_destroy; + jemalloc_zone.zone_name = "jemalloc_zone"; + jemalloc_zone.batch_malloc = zone_batch_malloc; + jemalloc_zone.batch_free = zone_batch_free; + jemalloc_zone.introspect = &jemalloc_zone_introspect; + jemalloc_zone.version = 9; + jemalloc_zone.memalign = zone_memalign; + jemalloc_zone.free_definite_size = zone_free_definite_size; + jemalloc_zone.pressure_relief = zone_pressure_relief; + + jemalloc_zone_introspect.enumerator = zone_enumerator; + jemalloc_zone_introspect.good_size = zone_good_size; + jemalloc_zone_introspect.check = zone_check; + jemalloc_zone_introspect.print = zone_print; + jemalloc_zone_introspect.log = zone_log; + jemalloc_zone_introspect.force_lock = zone_force_lock; + jemalloc_zone_introspect.force_unlock = zone_force_unlock; + jemalloc_zone_introspect.statistics = zone_statistics; + jemalloc_zone_introspect.zone_locked = zone_locked; + jemalloc_zone_introspect.enable_discharge_checking = NULL; + jemalloc_zone_introspect.disable_discharge_checking = NULL; + jemalloc_zone_introspect.discharge = NULL; +#ifdef __BLOCKS__ + jemalloc_zone_introspect.enumerate_discharged_pointers = NULL; +#else + jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL; +#endif + jemalloc_zone_introspect.reinit_lock = zone_reinit_lock; +} + +static malloc_zone_t * +zone_default_get(void) { + malloc_zone_t **zones = NULL; + unsigned int num_zones = 0; + + /* + * On OSX 10.12, malloc_default_zone returns a special zone that is not + * present in the list of registered zones. That zone uses a "lite zone" + * if one is present (apparently enabled when malloc stack logging is + * enabled), or the first registered zone otherwise. In practice this + * means unless malloc stack logging is enabled, the first registered + * zone is the default. So get the list of zones to get the first one, + * instead of relying on malloc_default_zone. + */ + if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, + (vm_address_t**)&zones, &num_zones)) { + /* + * Reset the value in case the failure happened after it was + * set. + */ + num_zones = 0; + } + + if (num_zones) { + return zones[0]; + } + + return malloc_default_zone(); +} + +/* As written, this function can only promote jemalloc_zone. */ +static void +zone_promote(void) { + malloc_zone_t *zone; + + do { + /* + * Unregister and reregister the default zone. On OSX >= 10.6, + * unregistering takes the last registered zone and places it + * at the location of the specified zone. Unregistering the + * default zone thus makes the last registered one the default. + * On OSX < 10.6, unregistering shifts all registered zones. + * The first registered zone then becomes the default. + */ + malloc_zone_unregister(default_zone); + malloc_zone_register(default_zone); + + /* + * On OSX 10.6, having the default purgeable zone appear before + * the default zone makes some things crash because it thinks it + * owns the default zone allocated pointers. We thus + * unregister/re-register it in order to ensure it's always + * after the default zone. On OSX < 10.6, there is no purgeable + * zone, so this does nothing. On OSX >= 10.6, unregistering + * replaces the purgeable zone with the last registered zone + * above, i.e. the default zone. Registering it again then puts + * it at the end, obviously after the default zone. + */ + if (purgeable_zone != NULL) { + malloc_zone_unregister(purgeable_zone); + malloc_zone_register(purgeable_zone); + } + + zone = zone_default_get(); + } while (zone != &jemalloc_zone); +} + +JEMALLOC_ATTR(constructor) +void +zone_register(void) { + /* + * If something else replaced the system default zone allocator, don't + * register jemalloc's. + */ + default_zone = zone_default_get(); + if (!default_zone->zone_name || strcmp(default_zone->zone_name, + "DefaultMallocZone") != 0) { + return; + } + + /* + * The default purgeable zone is created lazily by OSX's libc. It uses + * the default zone when it is created for "small" allocations + * (< 15 KiB), but assumes the default zone is a scalable_zone. This + * obviously fails when the default zone is the jemalloc zone, so + * malloc_default_purgeable_zone() is called beforehand so that the + * default purgeable zone is created when the default zone is still + * a scalable_zone. As purgeable zones only exist on >= 10.6, we need + * to check for the existence of malloc_default_purgeable_zone() at + * run time. + */ + purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : + malloc_default_purgeable_zone(); + + /* Register the custom zone. At this point it won't be the default. */ + zone_init(); + malloc_zone_register(&jemalloc_zone); + + /* Promote the custom zone to be default. */ + zone_promote(); +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-alti.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-alti.h similarity index 96% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-alti.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-alti.h index 0005df6..a1885db 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-alti.h +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-alti.h @@ -33,8 +33,8 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/** - * @file SFMT-alti.h +/** + * @file SFMT-alti.h * * @brief SIMD oriented Fast Mersenne Twister(SFMT) * pseudorandom number generator @@ -95,7 +95,7 @@ vector unsigned int vec_recursion(vector unsigned int a, * This function fills the internal state array with pseudorandom * integers. */ -JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { +static inline void gen_rand_all(sfmt_t *ctx) { int i; vector unsigned int r, r1, r2; @@ -119,10 +119,10 @@ JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { * This function fills the user-specified array with pseudorandom * integers. * - * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ -JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { +static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; vector unsigned int r, r1, r2; @@ -173,7 +173,7 @@ JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { * @param array an 128-bit array to be swaped. * @param size size of 128-bit array. */ -JEMALLOC_INLINE void swap(w128_t *array, int size) { +static inline void swap(w128_t *array, int size) { int i; const vector unsigned char perm = ALTI_SWAP; diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params11213.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params11213.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params11213.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params11213.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params1279.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params1279.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params1279.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params1279.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params132049.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params132049.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params132049.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params132049.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params19937.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params19937.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params19937.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params19937.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params216091.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params216091.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params216091.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params216091.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params2281.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params2281.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params2281.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params2281.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params4253.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params4253.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params4253.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params4253.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params44497.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params44497.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params44497.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params44497.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params607.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params607.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params607.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params607.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params86243.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params86243.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-params86243.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-params86243.h diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-sse2.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-sse2.h similarity index 97% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-sse2.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-sse2.h index 0314a16..169ad55 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT-sse2.h +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT-sse2.h @@ -33,7 +33,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/** +/** * @file SFMT-sse2.h * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2 * @@ -60,10 +60,10 @@ * @param mask 128-bit mask * @return output */ -JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, +JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, __m128i c, __m128i d, __m128i mask) { __m128i v, x, y, z; - + x = _mm_load_si128(a); y = _mm_srli_epi32(*b, SR1); z = _mm_srli_si128(c, SR2); @@ -81,7 +81,7 @@ JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, * This function fills the internal state array with pseudorandom * integers. */ -JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { +static inline void gen_rand_all(sfmt_t *ctx) { int i; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); @@ -108,10 +108,10 @@ JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { * This function fills the user-specified array with pseudorandom * integers. * - * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ -JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { +static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT.h similarity index 83% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT.h index 09c1607..863fc55 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/SFMT.h +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/SFMT.h @@ -81,91 +81,66 @@ const char *get_idstring(void); int get_min_array_size32(void); int get_min_array_size64(void); -#ifndef JEMALLOC_ENABLE_INLINE -double to_real1(uint32_t v); -double genrand_real1(sfmt_t *ctx); -double to_real2(uint32_t v); -double genrand_real2(sfmt_t *ctx); -double to_real3(uint32_t v); -double genrand_real3(sfmt_t *ctx); -double to_res53(uint64_t v); -double to_res53_mix(uint32_t x, uint32_t y); -double genrand_res53(sfmt_t *ctx); -double genrand_res53_mix(sfmt_t *ctx); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_)) /* These real versions are due to Isaku Wada */ /** generates a random number on [0,1]-real-interval */ -JEMALLOC_INLINE double to_real1(uint32_t v) -{ +static inline double to_real1(uint32_t v) { return v * (1.0/4294967295.0); /* divided by 2^32-1 */ } /** generates a random number on [0,1]-real-interval */ -JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx) -{ +static inline double genrand_real1(sfmt_t *ctx) { return to_real1(gen_rand32(ctx)); } /** generates a random number on [0,1)-real-interval */ -JEMALLOC_INLINE double to_real2(uint32_t v) -{ +static inline double to_real2(uint32_t v) { return v * (1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on [0,1)-real-interval */ -JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx) -{ +static inline double genrand_real2(sfmt_t *ctx) { return to_real2(gen_rand32(ctx)); } /** generates a random number on (0,1)-real-interval */ -JEMALLOC_INLINE double to_real3(uint32_t v) -{ +static inline double to_real3(uint32_t v) { return (((double)v) + 0.5)*(1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on (0,1)-real-interval */ -JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx) -{ +static inline double genrand_real3(sfmt_t *ctx) { return to_real3(gen_rand32(ctx)); } /** These real versions are due to Isaku Wada */ /** generates a random number on [0,1) with 53-bit resolution*/ -JEMALLOC_INLINE double to_res53(uint64_t v) -{ +static inline double to_res53(uint64_t v) { return v * (1.0/18446744073709551616.0L); } /** generates a random number on [0,1) with 53-bit resolution from two * 32 bit integers */ -JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y) -{ +static inline double to_res53_mix(uint32_t x, uint32_t y) { return to_res53(x | ((uint64_t)y << 32)); } /** generates a random number on [0,1) with 53-bit resolution */ -JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx) -{ +static inline double genrand_res53(sfmt_t *ctx) { return to_res53(gen_rand64(ctx)); -} +} /** generates a random number on [0,1) with 53-bit resolution using 32bit integer. */ -JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx) -{ +static inline double genrand_res53_mix(sfmt_t *ctx) { uint32_t x, y; x = gen_rand32(ctx); y = gen_rand32(ctx); return to_res53_mix(x, y); -} -#endif +} #endif diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/btalloc.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/btalloc.h similarity index 77% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/btalloc.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/btalloc.h index c3f9d4d..5877ea7 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/btalloc.h +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/btalloc.h @@ -1,20 +1,19 @@ /* btalloc() provides a mechanism for allocating via permuted backtraces. */ void *btalloc(size_t size, unsigned bits); -#define btalloc_n_proto(n) \ +#define btalloc_n_proto(n) \ void *btalloc_##n(size_t size, unsigned bits); btalloc_n_proto(0) btalloc_n_proto(1) -#define btalloc_n_gen(n) \ +#define btalloc_n_gen(n) \ void * \ -btalloc_##n(size_t size, unsigned bits) \ -{ \ +btalloc_##n(size_t size, unsigned bits) { \ void *p; \ \ - if (bits == 0) \ + if (bits == 0) { \ p = mallocx(size, 0); \ - else { \ + } else { \ switch (bits & 0x1U) { \ case 0: \ p = (btalloc_0(size, bits >> 1)); \ @@ -27,5 +26,5 @@ btalloc_##n(size_t size, unsigned bits) \ } \ /* Intentionally sabotage tail call optimization. */ \ assert_ptr_not_null(p, "Unexpected mallocx() failure"); \ - return (p); \ + return p; \ } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/extent_hooks.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/extent_hooks.h new file mode 100644 index 0000000..1f06201 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/extent_hooks.h @@ -0,0 +1,289 @@ +/* + * Boilerplate code used for testing extent hooks via interception and + * passthrough. + */ + +static void *extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit, + unsigned arena_ind); +static bool extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static void extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static bool extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_purge_forced_hook(extent_hooks_t *extent_hooks, + void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_split_hook(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t size_a, size_t size_b, bool committed, + unsigned arena_ind); +static bool extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, + size_t size_a, void *addr_b, size_t size_b, bool committed, + unsigned arena_ind); + +static extent_hooks_t *default_hooks; +static extent_hooks_t hooks = { + extent_alloc_hook, + extent_dalloc_hook, + extent_destroy_hook, + extent_commit_hook, + extent_decommit_hook, + extent_purge_lazy_hook, + extent_purge_forced_hook, + extent_split_hook, + extent_merge_hook +}; + +/* Control whether hook functions pass calls through to default hooks. */ +static bool try_alloc = true; +static bool try_dalloc = true; +static bool try_destroy = true; +static bool try_commit = true; +static bool try_decommit = true; +static bool try_purge_lazy = true; +static bool try_purge_forced = true; +static bool try_split = true; +static bool try_merge = true; + +/* Set to false prior to operations, then introspect after operations. */ +static bool called_alloc; +static bool called_dalloc; +static bool called_destroy; +static bool called_commit; +static bool called_decommit; +static bool called_purge_lazy; +static bool called_purge_forced; +static bool called_split; +static bool called_merge; + +/* Set to false prior to operations, then introspect after operations. */ +static bool did_alloc; +static bool did_dalloc; +static bool did_destroy; +static bool did_commit; +static bool did_decommit; +static bool did_purge_lazy; +static bool did_purge_forced; +static bool did_split; +static bool did_merge; + +#if 0 +# define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__) +#else +# define TRACE_HOOK(fmt, ...) +#endif + +static void * +extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { + void *ret; + + TRACE_HOOK("%s(extent_hooks=%p, new_addr=%p, size=%zu, alignment=%zu, " + "*zero=%s, *commit=%s, arena_ind=%u)\n", __func__, extent_hooks, + new_addr, size, alignment, *zero ? "true" : "false", *commit ? + "true" : "false", arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->alloc, extent_alloc_hook, + "Wrong hook function"); + called_alloc = true; + if (!try_alloc) { + return NULL; + } + ret = default_hooks->alloc(default_hooks, new_addr, size, alignment, + zero, commit, 0); + did_alloc = (ret != NULL); + return ret; +} + +static bool +extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " + "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? + "true" : "false", arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_hook, + "Wrong hook function"); + called_dalloc = true; + if (!try_dalloc) { + return true; + } + err = default_hooks->dalloc(default_hooks, addr, size, committed, 0); + did_dalloc = !err; + return err; +} + +static void +extent_destroy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " + "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? + "true" : "false", arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->destroy, extent_destroy_hook, + "Wrong hook function"); + called_destroy = true; + if (!try_destroy) { + return; + } + default_hooks->destroy(default_hooks, addr, size, committed, 0); + did_destroy = true; +} + +static bool +extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size, + offset, length, arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->commit, extent_commit_hook, + "Wrong hook function"); + called_commit = true; + if (!try_commit) { + return true; + } + err = default_hooks->commit(default_hooks, addr, size, offset, length, + 0); + did_commit = !err; + return err; +} + +static bool +extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu, arena_ind=%u)\n", __func__, extent_hooks, addr, size, + offset, length, arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->decommit, extent_decommit_hook, + "Wrong hook function"); + called_decommit = true; + if (!try_decommit) { + return true; + } + err = default_hooks->decommit(default_hooks, addr, size, offset, length, + 0); + did_decommit = !err; + return err; +} + +static bool +extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size, + offset, length, arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->purge_lazy, extent_purge_lazy_hook, + "Wrong hook function"); + called_purge_lazy = true; + if (!try_purge_lazy) { + return true; + } + err = default_hooks->purge_lazy == NULL || + default_hooks->purge_lazy(default_hooks, addr, size, offset, length, + 0); + did_purge_lazy = !err; + return err; +} + +static bool +extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, offset=%zu, " + "length=%zu arena_ind=%u)\n", __func__, extent_hooks, addr, size, + offset, length, arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->purge_forced, extent_purge_forced_hook, + "Wrong hook function"); + called_purge_forced = true; + if (!try_purge_forced) { + return true; + } + err = default_hooks->purge_forced == NULL || + default_hooks->purge_forced(default_hooks, addr, size, offset, + length, 0); + did_purge_forced = !err; + return err; +} + +static bool +extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, size_a=%zu, " + "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks, + addr, size, size_a, size_b, committed ? "true" : "false", + arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->split, extent_split_hook, + "Wrong hook function"); + called_split = true; + if (!try_split) { + return true; + } + err = (default_hooks->split == NULL || + default_hooks->split(default_hooks, addr, size, size_a, size_b, + committed, 0)); + did_split = !err; + return err; +} + +static bool +extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { + bool err; + + TRACE_HOOK("%s(extent_hooks=%p, addr_a=%p, size_a=%zu, addr_b=%p " + "size_b=%zu, committed=%s, arena_ind=%u)\n", __func__, extent_hooks, + addr_a, size_a, addr_b, size_b, committed ? "true" : "false", + arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->merge, extent_merge_hook, + "Wrong hook function"); + assert_ptr_eq((void *)((uintptr_t)addr_a + size_a), addr_b, + "Extents not mergeable"); + called_merge = true; + if (!try_merge) { + return true; + } + err = (default_hooks->merge == NULL || + default_hooks->merge(default_hooks, addr_a, size_a, addr_b, size_b, + committed, 0)); + did_merge = !err; + return err; +} + +static void +extent_hooks_prep(void) { + size_t sz; + + sz = sizeof(default_hooks); + assert_d_eq(mallctl("arena.0.extent_hooks", (void *)&default_hooks, &sz, + NULL, 0), 0, "Unexpected mallctl() error"); +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/jemalloc_test.h.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/jemalloc_test.h.in similarity index 77% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/jemalloc_test.h.in rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/jemalloc_test.h.in index 455569d..67caa86 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/jemalloc_test.h.in +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/jemalloc_test.h.in @@ -1,3 +1,7 @@ +#ifdef __cplusplus +extern "C" { +#endif + #include #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX @@ -11,7 +15,6 @@ #ifdef _WIN32 # include "msvc_compat/strings.h" #endif -#include #ifdef _WIN32 # include @@ -20,39 +23,6 @@ # include #endif -/******************************************************************************/ -/* - * Define always-enabled assertion macros, so that test assertions execute even - * if assertions are disabled in the library code. These definitions must - * exist prior to including "jemalloc/internal/util.h". - */ -#define assert(e) do { \ - if (!(e)) { \ - malloc_printf( \ - ": %s:%d: Failed assertion: \"%s\"\n", \ - __FILE__, __LINE__, #e); \ - abort(); \ - } \ -} while (0) - -#define not_reached() do { \ - malloc_printf( \ - ": %s:%d: Unreachable code reached\n", \ - __FILE__, __LINE__); \ - abort(); \ -} while (0) - -#define not_implemented() do { \ - malloc_printf(": %s:%d: Not implemented\n", \ - __FILE__, __LINE__); \ - abort(); \ -} while (0) - -#define assert_not_implemented(e) do { \ - if (!(e)) \ - not_implemented(); \ -} while (0) - #include "test/jemalloc_test_defs.h" #ifdef JEMALLOC_OSSPIN @@ -73,7 +43,8 @@ #ifdef JEMALLOC_UNIT_TEST # define JEMALLOC_JET # define JEMALLOC_MANGLE -# include "jemalloc/internal/jemalloc_internal.h" +# include "jemalloc/internal/jemalloc_preamble.h" +# include "jemalloc/internal/jemalloc_internal_includes.h" /******************************************************************************/ /* @@ -81,26 +52,34 @@ * expose the minimum necessary internal utility code (to avoid re-implementing * essentially identical code within the test infrastructure). */ -#elif defined(JEMALLOC_INTEGRATION_TEST) +#elif defined(JEMALLOC_INTEGRATION_TEST) || \ + defined(JEMALLOC_INTEGRATION_CPP_TEST) # define JEMALLOC_MANGLE # include "jemalloc/jemalloc@install_suffix@.h" # include "jemalloc/internal/jemalloc_internal_defs.h" # include "jemalloc/internal/jemalloc_internal_macros.h" +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; + # define JEMALLOC_N(n) @private_namespace@##n # include "jemalloc/internal/private_namespace.h" +# include "jemalloc/internal/hooks.h" -# define JEMALLOC_H_TYPES -# define JEMALLOC_H_STRUCTS -# define JEMALLOC_H_EXTERNS -# define JEMALLOC_H_INLINES +/* Hermetic headers. */ +# include "jemalloc/internal/assert.h" +# include "jemalloc/internal/malloc_io.h" +# include "jemalloc/internal/nstime.h" # include "jemalloc/internal/util.h" + +/* Non-hermetic headers. */ # include "jemalloc/internal/qr.h" # include "jemalloc/internal/ql.h" -# undef JEMALLOC_H_TYPES -# undef JEMALLOC_H_STRUCTS -# undef JEMALLOC_H_EXTERNS -# undef JEMALLOC_H_INLINES /******************************************************************************/ /* @@ -115,7 +94,8 @@ # include "jemalloc/jemalloc_protos_jet.h" # define JEMALLOC_JET -# include "jemalloc/internal/jemalloc_internal.h" +# include "jemalloc/internal/jemalloc_preamble.h" +# include "jemalloc/internal/jemalloc_internal_includes.h" # include "jemalloc/internal/public_unnamespace.h" # undef JEMALLOC_JET @@ -147,5 +127,47 @@ #include "test/test.h" #include "test/timer.h" #include "test/thd.h" -#define MEXP 19937 +#define MEXP 19937 #include "test/SFMT.h" + +/******************************************************************************/ +/* + * Define always-enabled assertion macros, so that test assertions execute even + * if assertions are disabled in the library code. + */ +#undef assert +#undef not_reached +#undef not_implemented +#undef assert_not_implemented + +#define assert(e) do { \ + if (!(e)) { \ + malloc_printf( \ + ": %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ +} while (0) + +#define not_reached() do { \ + malloc_printf( \ + ": %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ +} while (0) + +#define not_implemented() do { \ + malloc_printf(": %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ +} while (0) + +#define assert_not_implemented(e) do { \ + if (!(e)) { \ + not_implemented(); \ + } \ +} while (0) + +#ifdef __cplusplus +} +#endif diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/jemalloc_test_defs.h.in diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/math.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/math.h similarity index 84% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/math.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/math.h index b057b29..efba086 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/math.h +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/math.h @@ -1,12 +1,3 @@ -#ifndef JEMALLOC_ENABLE_INLINE -double ln_gamma(double x); -double i_gamma(double x, double p, double ln_gamma_p); -double pt_norm(double p); -double pt_chi2(double p, double df, double ln_gamma_df_2); -double pt_gamma(double p, double shape, double scale, double ln_gamma_shape); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_)) /* * Compute the natural log of Gamma(x), accurate to 10 decimal places. * @@ -15,9 +6,8 @@ double pt_gamma(double p, double shape, double scale, double ln_gamma_shape); * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function * [S14]. Communications of the ACM 9(9):684. */ -JEMALLOC_INLINE double -ln_gamma(double x) -{ +static inline double +ln_gamma(double x) { double f, z; assert(x > 0.0); @@ -31,14 +21,15 @@ ln_gamma(double x) } x = z; f = -log(f); - } else + } else { f = 0.0; + } z = 1.0 / (x * x); - return (f + (x-0.5) * log(x) - x + 0.918938533204673 + + return f + (x-0.5) * log(x) - x + 0.918938533204673 + (((-0.000595238095238 * z + 0.000793650793651) * z - - 0.002777777777778) * z + 0.083333333333333) / x); + 0.002777777777778) * z + 0.083333333333333) / x; } /* @@ -50,9 +41,8 @@ ln_gamma(double x) * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral. * Applied Statistics 19:285-287. */ -JEMALLOC_INLINE double -i_gamma(double x, double p, double ln_gamma_p) -{ +static inline double +i_gamma(double x, double p, double ln_gamma_p) { double acu, factor, oflo, gin, term, rn, a, b, an, dif; double pn[6]; unsigned i; @@ -60,8 +50,9 @@ i_gamma(double x, double p, double ln_gamma_p) assert(p > 0.0); assert(x >= 0.0); - if (x == 0.0) - return (0.0); + if (x == 0.0) { + return 0.0; + } acu = 1.0e-10; oflo = 1.0e30; @@ -80,7 +71,7 @@ i_gamma(double x, double p, double ln_gamma_p) gin += term; if (term <= acu) { gin *= factor / p; - return (gin); + return gin; } } } else { @@ -99,23 +90,26 @@ i_gamma(double x, double p, double ln_gamma_p) b += 2.0; term += 1.0; an = a * term; - for (i = 0; i < 2; i++) + for (i = 0; i < 2; i++) { pn[i+4] = b * pn[i+2] - an * pn[i]; + } if (pn[5] != 0.0) { rn = pn[4] / pn[5]; dif = fabs(gin - rn); if (dif <= acu && dif <= acu * rn) { gin = 1.0 - factor * gin; - return (gin); + return gin; } gin = rn; } - for (i = 0; i < 4; i++) + for (i = 0; i < 4; i++) { pn[i] = pn[i+2]; + } if (fabs(pn[4]) >= oflo) { - for (i = 0; i < 4; i++) + for (i = 0; i < 4; i++) { pn[i] /= oflo; + } } } } @@ -131,9 +125,8 @@ i_gamma(double x, double p, double ln_gamma_p) * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal * distribution. Applied Statistics 37(3):477-484. */ -JEMALLOC_INLINE double -pt_norm(double p) -{ +static inline double +pt_norm(double p) { double q, r, ret; assert(p > 0.0 && p < 1.0); @@ -142,7 +135,7 @@ pt_norm(double p) if (fabs(q) <= 0.425) { /* p close to 1/2. */ r = 0.180625 - q * q; - return (q * (((((((2.5090809287301226727e3 * r + + return q * (((((((2.5090809287301226727e3 * r + 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) * r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2) @@ -151,12 +144,13 @@ pt_norm(double p) 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) * r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1) - * r + 1.0)); + * r + 1.0); } else { - if (q < 0.0) + if (q < 0.0) { r = p; - else + } else { r = 1.0 - p; + } assert(r > 0.0); r = sqrt(-log(r)); @@ -198,9 +192,10 @@ pt_norm(double p) 5.99832206555887937690e-1) * r + 1.0)); } - if (q < 0.0) + if (q < 0.0) { ret = -ret; - return (ret); + } + return ret; } } @@ -218,9 +213,8 @@ pt_norm(double p) * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage * points of the Chi^2 distribution. Applied Statistics 40(1):233-235. */ -JEMALLOC_INLINE double -pt_chi2(double p, double df, double ln_gamma_df_2) -{ +static inline double +pt_chi2(double p, double df, double ln_gamma_df_2) { double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6; unsigned i; @@ -236,8 +230,9 @@ pt_chi2(double p, double df, double ln_gamma_df_2) if (df < -1.24 * log(p)) { /* Starting approximation for small Chi^2. */ ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx); - if (ch - e < 0.0) - return (ch); + if (ch - e < 0.0) { + return ch; + } } else { if (df > 0.32) { x = pt_norm(p); @@ -263,8 +258,9 @@ pt_chi2(double p, double df, double ln_gamma_df_2) * (13.32 + 3.0 * ch)) / p2; ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch + c * aa) * p2 / p1) / t; - if (fabs(q / ch - 1.0) - 0.01 <= 0.0) + if (fabs(q / ch - 1.0) - 0.01 <= 0.0) { break; + } } } } @@ -273,8 +269,9 @@ pt_chi2(double p, double df, double ln_gamma_df_2) /* Calculation of seven-term Taylor series. */ q = ch; p1 = 0.5 * ch; - if (p1 < 0.0) - return (-1.0); + if (p1 < 0.0) { + return -1.0; + } p2 = p - i_gamma(p1, xx, ln_gamma_df_2); t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch)); b = t / ch; @@ -290,11 +287,12 @@ pt_chi2(double p, double df, double ln_gamma_df_2) s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0; ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3 - b * (s4 - b * (s5 - b * s6)))))); - if (fabs(q / ch - 1.0) <= e) + if (fabs(q / ch - 1.0) <= e) { break; + } } - return (ch); + return ch; } /* @@ -302,10 +300,7 @@ pt_chi2(double p, double df, double ln_gamma_df_2) * compute the upper limit on the definite integral from [0..z] that satisfies * p. */ -JEMALLOC_INLINE double -pt_gamma(double p, double shape, double scale, double ln_gamma_shape) -{ - - return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale); +static inline double +pt_gamma(double p, double shape, double scale, double ln_gamma_shape) { + return pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale; } -#endif diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/mq.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/mq.h similarity index 77% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/mq.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/mq.h index 7c4df49..af2c078 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/mq.h +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/mq.h @@ -26,9 +26,9 @@ void mq_nanosleep(unsigned ns); * does not perform any cleanup of messages, since it knows nothing of their * payloads. */ -#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) +#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) -#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ +#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ typedef struct { \ mtx_t lock; \ ql_head(a_mq_msg_type) msgs; \ @@ -37,31 +37,28 @@ typedef struct { \ a_attr bool \ a_prefix##init(a_mq_type *mq) { \ \ - if (mtx_init(&mq->lock)) \ - return (true); \ + if (mtx_init(&mq->lock)) { \ + return true; \ + } \ ql_new(&mq->msgs); \ mq->count = 0; \ - return (false); \ + return false; \ } \ a_attr void \ -a_prefix##fini(a_mq_type *mq) \ -{ \ - \ +a_prefix##fini(a_mq_type *mq) { \ mtx_fini(&mq->lock); \ } \ a_attr unsigned \ -a_prefix##count(a_mq_type *mq) \ -{ \ +a_prefix##count(a_mq_type *mq) { \ unsigned count; \ \ mtx_lock(&mq->lock); \ count = mq->count; \ mtx_unlock(&mq->lock); \ - return (count); \ + return count; \ } \ a_attr a_mq_msg_type * \ -a_prefix##tryget(a_mq_type *mq) \ -{ \ +a_prefix##tryget(a_mq_type *mq) { \ a_mq_msg_type *msg; \ \ mtx_lock(&mq->lock); \ @@ -71,35 +68,36 @@ a_prefix##tryget(a_mq_type *mq) \ mq->count--; \ } \ mtx_unlock(&mq->lock); \ - return (msg); \ + return msg; \ } \ a_attr a_mq_msg_type * \ -a_prefix##get(a_mq_type *mq) \ -{ \ +a_prefix##get(a_mq_type *mq) { \ a_mq_msg_type *msg; \ unsigned ns; \ \ msg = a_prefix##tryget(mq); \ - if (msg != NULL) \ - return (msg); \ + if (msg != NULL) { \ + return msg; \ + } \ \ ns = 1; \ while (true) { \ mq_nanosleep(ns); \ msg = a_prefix##tryget(mq); \ - if (msg != NULL) \ - return (msg); \ + if (msg != NULL) { \ + return msg; \ + } \ if (ns < 1000*1000*1000) { \ /* Double sleep time, up to max 1 second. */ \ ns <<= 1; \ - if (ns > 1000*1000*1000) \ + if (ns > 1000*1000*1000) { \ ns = 1000*1000*1000; \ + } \ } \ } \ } \ a_attr void \ -a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \ -{ \ +a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) { \ \ mtx_lock(&mq->lock); \ ql_elm_new(msg, a_field); \ diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/mtx.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/mtx.h similarity index 89% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/mtx.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/mtx.h index bbe822f..58afbc3 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/mtx.h +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/mtx.h @@ -8,6 +8,8 @@ typedef struct { #ifdef _WIN32 CRITICAL_SECTION lock; +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock lock; #elif (defined(JEMALLOC_OSSPIN)) OSSpinLock lock; #else diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/test.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/test.h new file mode 100644 index 0000000..fd0e526 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/test.h @@ -0,0 +1,338 @@ +#define ASSERT_BUFSIZE 256 + +#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \ + t a_ = (a); \ + t b_ = (b); \ + if (!(a_ cmp b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) " #cmp " (%s) --> " \ + "%" pri " " #neg_cmp " %" pri ": ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_, b_); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) + +#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \ + !=, "p", __VA_ARGS__) +#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \ + ==, "p", __VA_ARGS__) +#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \ + !=, "p", __VA_ARGS__) +#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \ + ==, "p", __VA_ARGS__) + +#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) +#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) +#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) +#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) +#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) +#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) + +#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) +#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) +#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) +#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) +#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) +#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) + +#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) +#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) +#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) +#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) +#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) +#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) + +#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) +#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) +#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) +#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) +#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) +#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) + +#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \ + !=, "ld", __VA_ARGS__) +#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \ + ==, "ld", __VA_ARGS__) +#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \ + >=, "ld", __VA_ARGS__) +#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \ + >, "ld", __VA_ARGS__) +#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \ + <, "ld", __VA_ARGS__) +#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \ + <=, "ld", __VA_ARGS__) + +#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \ + a, b, ==, !=, "lu", __VA_ARGS__) +#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \ + a, b, !=, ==, "lu", __VA_ARGS__) +#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \ + a, b, <, >=, "lu", __VA_ARGS__) +#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \ + a, b, <=, >, "lu", __VA_ARGS__) +#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \ + a, b, >=, <, "lu", __VA_ARGS__) +#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \ + a, b, >, <=, "lu", __VA_ARGS__) + +#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \ + !=, "qd", __VA_ARGS__) +#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \ + ==, "qd", __VA_ARGS__) +#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \ + >=, "qd", __VA_ARGS__) +#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \ + >, "qd", __VA_ARGS__) +#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \ + <, "qd", __VA_ARGS__) +#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \ + <=, "qd", __VA_ARGS__) + +#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \ + a, b, ==, !=, "qu", __VA_ARGS__) +#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \ + a, b, !=, ==, "qu", __VA_ARGS__) +#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \ + a, b, <, >=, "qu", __VA_ARGS__) +#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \ + a, b, <=, >, "qu", __VA_ARGS__) +#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \ + a, b, >=, <, "qu", __VA_ARGS__) +#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \ + a, b, >, <=, "qu", __VA_ARGS__) + +#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \ + !=, "jd", __VA_ARGS__) +#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \ + ==, "jd", __VA_ARGS__) +#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \ + >=, "jd", __VA_ARGS__) +#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \ + >, "jd", __VA_ARGS__) +#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \ + <, "jd", __VA_ARGS__) +#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \ + <=, "jd", __VA_ARGS__) + +#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \ + !=, "ju", __VA_ARGS__) +#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \ + ==, "ju", __VA_ARGS__) +#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \ + >=, "ju", __VA_ARGS__) +#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \ + >, "ju", __VA_ARGS__) +#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \ + <, "ju", __VA_ARGS__) +#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \ + <=, "ju", __VA_ARGS__) + +#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \ + !=, "zd", __VA_ARGS__) +#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \ + ==, "zd", __VA_ARGS__) +#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \ + >=, "zd", __VA_ARGS__) +#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \ + >, "zd", __VA_ARGS__) +#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \ + <, "zd", __VA_ARGS__) +#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \ + <=, "zd", __VA_ARGS__) + +#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \ + !=, "zu", __VA_ARGS__) +#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \ + ==, "zu", __VA_ARGS__) +#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \ + >=, "zu", __VA_ARGS__) +#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \ + >, "zu", __VA_ARGS__) +#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \ + <, "zu", __VA_ARGS__) +#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \ + <=, "zu", __VA_ARGS__) + +#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \ + !=, FMTd32, __VA_ARGS__) +#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \ + ==, FMTd32, __VA_ARGS__) +#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \ + >=, FMTd32, __VA_ARGS__) +#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \ + >, FMTd32, __VA_ARGS__) +#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \ + <, FMTd32, __VA_ARGS__) +#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \ + <=, FMTd32, __VA_ARGS__) + +#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \ + !=, FMTu32, __VA_ARGS__) +#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \ + ==, FMTu32, __VA_ARGS__) +#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \ + >=, FMTu32, __VA_ARGS__) +#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \ + >, FMTu32, __VA_ARGS__) +#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \ + <, FMTu32, __VA_ARGS__) +#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \ + <=, FMTu32, __VA_ARGS__) + +#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \ + !=, FMTd64, __VA_ARGS__) +#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \ + ==, FMTd64, __VA_ARGS__) +#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \ + >=, FMTd64, __VA_ARGS__) +#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \ + >, FMTd64, __VA_ARGS__) +#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \ + <, FMTd64, __VA_ARGS__) +#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \ + <=, FMTd64, __VA_ARGS__) + +#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \ + !=, FMTu64, __VA_ARGS__) +#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \ + ==, FMTu64, __VA_ARGS__) +#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \ + >=, FMTu64, __VA_ARGS__) +#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \ + >, FMTu64, __VA_ARGS__) +#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \ + <, FMTu64, __VA_ARGS__) +#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \ + <=, FMTu64, __VA_ARGS__) + +#define assert_b_eq(a, b, ...) do { \ + bool a_ = (a); \ + bool b_ = (b); \ + if (!(a_ == b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) == (%s) --> %s != %s: ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_ ? "true" : "false", \ + b_ ? "true" : "false"); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) +#define assert_b_ne(a, b, ...) do { \ + bool a_ = (a); \ + bool b_ = (b); \ + if (!(a_ != b_)) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) != (%s) --> %s == %s: ", \ + __func__, __FILE__, __LINE__, \ + #a, #b, a_ ? "true" : "false", \ + b_ ? "true" : "false"); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) +#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) +#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) + +#define assert_str_eq(a, b, ...) do { \ + if (strcmp((a), (b))) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) same as (%s) --> " \ + "\"%s\" differs from \"%s\": ", \ + __func__, __FILE__, __LINE__, #a, #b, a, b); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) +#define assert_str_ne(a, b, ...) do { \ + if (!strcmp((a), (b))) { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Failed assertion: " \ + "(%s) differs from (%s) --> " \ + "\"%s\" same as \"%s\": ", \ + __func__, __FILE__, __LINE__, #a, #b, a, b); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ + } \ +} while (0) + +#define assert_not_reached(...) do { \ + char prefix[ASSERT_BUFSIZE]; \ + char message[ASSERT_BUFSIZE]; \ + malloc_snprintf(prefix, sizeof(prefix), \ + "%s:%s:%d: Unreachable code reached: ", \ + __func__, __FILE__, __LINE__); \ + malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ + p_test_fail(prefix, message); \ +} while (0) + +/* + * If this enum changes, corresponding changes in test/test.sh.in are also + * necessary. + */ +typedef enum { + test_status_pass = 0, + test_status_skip = 1, + test_status_fail = 2, + + test_status_count = 3 +} test_status_t; + +typedef void (test_t)(void); + +#define TEST_BEGIN(f) \ +static void \ +f(void) { \ + p_test_init(#f); + +#define TEST_END \ + goto label_test_end; \ +label_test_end: \ + p_test_fini(); \ +} + +#define test(...) \ + p_test(__VA_ARGS__, NULL) + +#define test_no_reentrancy(...) \ + p_test_no_reentrancy(__VA_ARGS__, NULL) + +#define test_no_malloc_init(...) \ + p_test_no_malloc_init(__VA_ARGS__, NULL) + +#define test_skip_if(e) do { \ + if (e) { \ + test_skip("%s:%s:%d: Test skipped: (%s)", \ + __func__, __FILE__, __LINE__, #e); \ + goto label_test_end; \ + } \ +} while (0) + +bool test_is_reentrant(); + +void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); +void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); + +/* For private use by macros. */ +test_status_t p_test(test_t *t, ...); +test_status_t p_test_no_reentrancy(test_t *t, ...); +test_status_t p_test_no_malloc_init(test_t *t, ...); +void p_test_init(const char *name); +void p_test_fini(void); +void p_test_fail(const char *prefix, const char *message); diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/thd.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/thd.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/include/test/thd.h rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/thd.h diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/timer.h b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/timer.h new file mode 100644 index 0000000..ace6191 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/include/test/timer.h @@ -0,0 +1,11 @@ +/* Simple timer, for use in benchmark reporting. */ + +typedef struct { + nstime_t t0; + nstime_t t1; +} timedelta_t; + +void timer_start(timedelta_t *timer); +void timer_stop(timedelta_t *timer); +uint64_t timer_usec(const timedelta_t *timer); +void timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen); diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/MALLOCX_ARENA.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/MALLOCX_ARENA.c similarity index 80% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/MALLOCX_ARENA.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/MALLOCX_ARENA.c index 30c203a..222164d 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/MALLOCX_ARENA.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/MALLOCX_ARENA.c @@ -1,6 +1,6 @@ #include "test/jemalloc_test.h" -#define NTHREADS 10 +#define NTHREADS 10 static bool have_dss = #ifdef JEMALLOC_DSS @@ -11,16 +11,15 @@ static bool have_dss = ; void * -thd_start(void *arg) -{ +thd_start(void *arg) { unsigned thread_ind = (unsigned)(uintptr_t)arg; unsigned arena_ind; void *p; size_t sz; sz = sizeof(arena_ind); - assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, - "Error in arenas.extend"); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Error in arenas.create"); if (thread_ind % 4 != 3) { size_t mib[3]; @@ -42,11 +41,10 @@ thd_start(void *arg) assert_ptr_not_null(p, "Unexpected mallocx() error"); dallocx(p, 0); - return (NULL); + return NULL; } -TEST_BEGIN(test_MALLOCX_ARENA) -{ +TEST_BEGIN(test_MALLOCX_ARENA) { thd_t thds[NTHREADS]; unsigned i; @@ -55,15 +53,14 @@ TEST_BEGIN(test_MALLOCX_ARENA) (void *)(uintptr_t)i); } - for (i = 0; i < NTHREADS; i++) + for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); + } } TEST_END int -main(void) -{ - - return (test( - test_MALLOCX_ARENA)); +main(void) { + return test( + test_MALLOCX_ARENA); } diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/aligned_alloc.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/aligned_alloc.c similarity index 78% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/aligned_alloc.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/aligned_alloc.c index 6090014..536b67e 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/aligned_alloc.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/aligned_alloc.c @@ -1,12 +1,19 @@ #include "test/jemalloc_test.h" -#define CHUNK 0x400000 -/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ -#define MAXALIGN ((size_t)0x2000000LU) -#define NITER 4 +#define MAXALIGN (((size_t)1) << 23) -TEST_BEGIN(test_alignment_errors) -{ +/* + * On systems which can't merge extents, tests that call this function generate + * a lot of dirty memory very quickly. Purging between cycles mitigates + * potential OOM on e.g. 32-bit Windows. + */ +static void +purge(void) { + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl error"); +} + +TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; @@ -27,8 +34,7 @@ TEST_BEGIN(test_alignment_errors) } TEST_END -TEST_BEGIN(test_oom_errors) -{ +TEST_BEGIN(test_oom_errors) { size_t alignment, size; void *p; @@ -72,14 +78,15 @@ TEST_BEGIN(test_oom_errors) } TEST_END -TEST_BEGIN(test_alignment_and_size) -{ +TEST_BEGIN(test_alignment_and_size) { +#define NITER 4 size_t alignment, size, total; unsigned i; void *ps[NITER]; - for (i = 0; i < NITER; i++) + for (i = 0; i < NITER; i++) { ps[i] = NULL; + } for (alignment = 8; alignment <= MAXALIGN; @@ -100,8 +107,9 @@ TEST_BEGIN(test_alignment_and_size) alignment, size, size, buf); } total += malloc_usable_size(ps[i]); - if (total >= (MAXALIGN << 1)) + if (total >= (MAXALIGN << 1)) { break; + } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { @@ -110,16 +118,16 @@ TEST_BEGIN(test_alignment_and_size) } } } + purge(); } +#undef NITER } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_alignment_errors, test_oom_errors, - test_alignment_and_size)); + test_alignment_and_size); } diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/allocated.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/allocated.c similarity index 73% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/allocated.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/allocated.c index 3630e80..1425fd0 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/allocated.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/allocated.c @@ -9,8 +9,7 @@ static const bool config_stats = ; void * -thd_start(void *arg) -{ +thd_start(void *arg) { int err; void *p; uint64_t a0, a1, d0, d1; @@ -18,16 +17,18 @@ thd_start(void *arg) size_t sz, usize; sz = sizeof(a0); - if ((err = mallctl("thread.allocated", &a0, &sz, NULL, 0))) { - if (err == ENOENT) + if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) { + if (err == ENOENT) { goto label_ENOENT; + } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(ap0); - if ((err = mallctl("thread.allocatedp", &ap0, &sz, NULL, 0))) { - if (err == ENOENT) + if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) { + if (err == ENOENT) { goto label_ENOENT; + } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } @@ -36,16 +37,19 @@ thd_start(void *arg) "storage"); sz = sizeof(d0); - if ((err = mallctl("thread.deallocated", &d0, &sz, NULL, 0))) { - if (err == ENOENT) + if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) { + if (err == ENOENT) { goto label_ENOENT; + } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(dp0); - if ((err = mallctl("thread.deallocatedp", &dp0, &sz, NULL, 0))) { - if (err == ENOENT) + if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL, + 0))) { + if (err == ENOENT) { goto label_ENOENT; + } test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } @@ -57,9 +61,9 @@ thd_start(void *arg) assert_ptr_not_null(p, "Unexpected malloc() error"); sz = sizeof(a1); - mallctl("thread.allocated", &a1, &sz, NULL, 0); + mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0); sz = sizeof(ap1); - mallctl("thread.allocatedp", &ap1, &sz, NULL, 0); + mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0); assert_u64_eq(*ap1, a1, "Dereferenced \"thread.allocatedp\" value should equal " "\"thread.allocated\" value"); @@ -74,9 +78,9 @@ thd_start(void *arg) free(p); sz = sizeof(d1); - mallctl("thread.deallocated", &d1, &sz, NULL, 0); + mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0); sz = sizeof(dp1); - mallctl("thread.deallocatedp", &dp1, &sz, NULL, 0); + mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0); assert_u64_eq(*dp1, d1, "Dereferenced \"thread.deallocatedp\" value should equal " "\"thread.deallocated\" value"); @@ -87,23 +91,20 @@ thd_start(void *arg) "Deallocated memory counter should increase by at least the amount " "explicitly deallocated"); - return (NULL); + return NULL; label_ENOENT: assert_false(config_stats, "ENOENT should only be returned if stats are disabled"); test_skip("\"thread.allocated\" mallctl not available"); - return (NULL); + return NULL; } -TEST_BEGIN(test_main_thread) -{ - +TEST_BEGIN(test_main_thread) { thd_start(NULL); } TEST_END -TEST_BEGIN(test_subthread) -{ +TEST_BEGIN(test_subthread) { thd_t thd; thd_create(&thd, thd_start, NULL); @@ -112,14 +113,12 @@ TEST_BEGIN(test_subthread) TEST_END int -main(void) -{ - +main(void) { /* Run tests multiple times to check for bad interactions. */ - return (test( + return test( test_main_thread, test_subthread, test_main_thread, test_subthread, - test_main_thread)); + test_main_thread); } diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/chunk.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/chunk.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/chunk.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/chunk.c diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/extent.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/extent.c new file mode 100644 index 0000000..b5db087 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/extent.c @@ -0,0 +1,248 @@ +#include "test/jemalloc_test.h" + +#include "test/extent_hooks.h" + +static bool +check_background_thread_enabled(void) { + bool enabled; + size_t sz = sizeof(bool); + int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0); + if (ret == ENOENT) { + return false; + } + assert_d_eq(ret, 0, "Unexpected mallctl error"); + return enabled; +} + +static void +test_extent_body(unsigned arena_ind) { + void *p; + size_t large0, large1, large2, sz; + size_t purge_mib[3]; + size_t purge_miblen; + int flags; + bool xallocx_success_a, xallocx_success_b, xallocx_success_c; + + flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + + /* Get large size classes. */ + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, + 0), 0, "Unexpected arenas.lextent.0.size failure"); + assert_d_eq(mallctl("arenas.lextent.1.size", (void *)&large1, &sz, NULL, + 0), 0, "Unexpected arenas.lextent.1.size failure"); + assert_d_eq(mallctl("arenas.lextent.2.size", (void *)&large2, &sz, NULL, + 0), 0, "Unexpected arenas.lextent.2.size failure"); + + /* Test dalloc/decommit/purge cascade. */ + purge_miblen = sizeof(purge_mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen), + 0, "Unexpected mallctlnametomib() failure"); + purge_mib[1] = (size_t)arena_ind; + called_alloc = false; + try_alloc = true; + try_dalloc = false; + try_decommit = false; + p = mallocx(large0 * 2, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + assert_true(called_alloc, "Expected alloc call"); + called_dalloc = false; + called_decommit = false; + did_purge_lazy = false; + did_purge_forced = false; + called_split = false; + xallocx_success_a = (xallocx(p, large0, 0, flags) == large0); + assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), + 0, "Unexpected arena.%u.purge error", arena_ind); + if (xallocx_success_a) { + assert_true(called_dalloc, "Expected dalloc call"); + assert_true(called_decommit, "Expected decommit call"); + assert_true(did_purge_lazy || did_purge_forced, + "Expected purge"); + } + assert_true(called_split, "Expected split call"); + dallocx(p, flags); + try_dalloc = true; + + /* Test decommit/commit and observe split/merge. */ + try_dalloc = false; + try_decommit = true; + p = mallocx(large0 * 2, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + did_decommit = false; + did_commit = false; + called_split = false; + did_split = false; + did_merge = false; + xallocx_success_b = (xallocx(p, large0, 0, flags) == large0); + assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), + 0, "Unexpected arena.%u.purge error", arena_ind); + if (xallocx_success_b) { + assert_true(did_split, "Expected split"); + } + xallocx_success_c = (xallocx(p, large0 * 2, 0, flags) == large0 * 2); + if (did_split) { + assert_b_eq(did_decommit, did_commit, + "Expected decommit/commit match"); + } + if (xallocx_success_b && xallocx_success_c) { + assert_true(did_merge, "Expected merge"); + } + dallocx(p, flags); + try_dalloc = true; + try_decommit = false; + + /* Make sure non-large allocation succeeds. */ + p = mallocx(42, flags); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + dallocx(p, flags); +} + +static void +test_manual_hook_auto_arena(void) { + unsigned narenas; + size_t old_size, new_size, sz; + size_t hooks_mib[3]; + size_t hooks_miblen; + extent_hooks_t *new_hooks, *old_hooks; + + extent_hooks_prep(); + + sz = sizeof(unsigned); + /* Get number of auto arenas. */ + assert_d_eq(mallctl("opt.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + if (narenas == 1) { + return; + } + + /* Install custom extent hooks on arena 1 (might not be initialized). */ + hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib, + &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); + hooks_mib[1] = 1; + old_size = sizeof(extent_hooks_t *); + new_hooks = &hooks; + new_size = sizeof(extent_hooks_t *); + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, + &old_size, (void *)&new_hooks, new_size), 0, + "Unexpected extent_hooks error"); + static bool auto_arena_created = false; + if (old_hooks != &hooks) { + assert_b_eq(auto_arena_created, false, + "Expected auto arena 1 created only once."); + auto_arena_created = true; + } +} + +static void +test_manual_hook_body(void) { + unsigned arena_ind; + size_t old_size, new_size, sz; + size_t hooks_mib[3]; + size_t hooks_miblen; + extent_hooks_t *new_hooks, *old_hooks; + + extent_hooks_prep(); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + + /* Install custom extent hooks. */ + hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.extent_hooks", hooks_mib, + &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); + hooks_mib[1] = (size_t)arena_ind; + old_size = sizeof(extent_hooks_t *); + new_hooks = &hooks; + new_size = sizeof(extent_hooks_t *); + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, + &old_size, (void *)&new_hooks, new_size), 0, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->alloc, extent_alloc_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->dalloc, extent_dalloc_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->commit, extent_commit_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->decommit, extent_decommit_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->purge_lazy, extent_purge_lazy_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->purge_forced, extent_purge_forced_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->split, extent_split_hook, + "Unexpected extent_hooks error"); + assert_ptr_ne(old_hooks->merge, extent_merge_hook, + "Unexpected extent_hooks error"); + + if (!check_background_thread_enabled()) { + test_extent_body(arena_ind); + } + + /* Restore extent hooks. */ + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL, + (void *)&old_hooks, new_size), 0, "Unexpected extent_hooks error"); + assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, + &old_size, NULL, 0), 0, "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks, default_hooks, "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->alloc, default_hooks->alloc, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->dalloc, default_hooks->dalloc, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->commit, default_hooks->commit, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->decommit, default_hooks->decommit, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->purge_lazy, default_hooks->purge_lazy, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->purge_forced, default_hooks->purge_forced, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->split, default_hooks->split, + "Unexpected extent_hooks error"); + assert_ptr_eq(old_hooks->merge, default_hooks->merge, + "Unexpected extent_hooks error"); +} + +TEST_BEGIN(test_extent_manual_hook) { + test_manual_hook_auto_arena(); + test_manual_hook_body(); + + /* Test failure paths. */ + try_split = false; + test_manual_hook_body(); + try_merge = false; + test_manual_hook_body(); + try_purge_lazy = false; + try_purge_forced = false; + test_manual_hook_body(); + + try_split = try_merge = try_purge_lazy = try_purge_forced = true; +} +TEST_END + +TEST_BEGIN(test_extent_auto_hook) { + unsigned arena_ind; + size_t new_size, sz; + extent_hooks_t *new_hooks; + + extent_hooks_prep(); + + sz = sizeof(unsigned); + new_hooks = &hooks; + new_size = sizeof(extent_hooks_t *); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, + (void *)&new_hooks, new_size), 0, "Unexpected mallctl() failure"); + + test_skip_if(check_background_thread_enabled()); + test_extent_body(arena_ind); +} +TEST_END + +int +main(void) { + return test( + test_extent_manual_hook, + test_extent_auto_hook); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/extent.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/extent.sh new file mode 100644 index 0000000..0cc2187 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/extent.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="junk:false" +fi diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/mallocx.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/mallocx.c new file mode 100644 index 0000000..fd960f3 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/mallocx.c @@ -0,0 +1,228 @@ +#include "test/jemalloc_test.h" + +static unsigned +get_nsizes_impl(const char *cmd) { + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return ret; +} + +static unsigned +get_nlarge(void) { + return get_nsizes_impl("arenas.nlextents"); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) { + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return ret; +} + +static size_t +get_large_size(size_t ind) { + return get_size_impl("arenas.lextent.0.size", ind); +} + +/* + * On systems which can't merge extents, tests that call this function generate + * a lot of dirty memory very quickly. Purging between cycles mitigates + * potential OOM on e.g. 32-bit Windows. + */ +static void +purge(void) { + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl error"); +} + +TEST_BEGIN(test_overflow) { + size_t largemax; + + largemax = get_large_size(get_nlarge()-1); + + assert_ptr_null(mallocx(largemax+1, 0), + "Expected OOM for mallocx(size=%#zx, 0)", largemax+1); + + assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0), + "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); + + assert_ptr_null(mallocx(SIZE_T_MAX, 0), + "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX); + + assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), + "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))", + ZU(PTRDIFF_MAX)+1); +} +TEST_END + +TEST_BEGIN(test_oom) { + size_t largemax; + bool oom; + void *ptrs[3]; + unsigned i; + + /* + * It should be impossible to allocate three objects that each consume + * nearly half the virtual address space. + */ + largemax = get_large_size(get_nlarge()-1); + oom = false; + for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { + ptrs[i] = mallocx(largemax, 0); + if (ptrs[i] == NULL) { + oom = true; + } + } + assert_true(oom, + "Expected OOM during series of calls to mallocx(size=%zu, 0)", + largemax); + for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { + if (ptrs[i] != NULL) { + dallocx(ptrs[i], 0); + } + } + purge(); + +#if LG_SIZEOF_PTR == 3 + assert_ptr_null(mallocx(0x8000000000000000ULL, + MALLOCX_ALIGN(0x8000000000000000ULL)), + "Expected OOM for mallocx()"); + assert_ptr_null(mallocx(0x8000000000000000ULL, + MALLOCX_ALIGN(0x80000000)), + "Expected OOM for mallocx()"); +#else + assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)), + "Expected OOM for mallocx()"); +#endif +} +TEST_END + +TEST_BEGIN(test_basic) { +#define MAXSZ (((size_t)1) << 23) + size_t sz; + + for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { + size_t nsz, rsz; + void *p; + nsz = nallocx(sz, 0); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, 0); + assert_ptr_not_null(p, + "Unexpected mallocx(size=%zx, flags=0) error", sz); + rsz = sallocx(p, 0); + assert_zu_ge(rsz, sz, "Real size smaller than expected"); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); + dallocx(p, 0); + + p = mallocx(sz, 0); + assert_ptr_not_null(p, + "Unexpected mallocx(size=%zx, flags=0) error", sz); + dallocx(p, 0); + + nsz = nallocx(sz, MALLOCX_ZERO); + assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); + p = mallocx(sz, MALLOCX_ZERO); + assert_ptr_not_null(p, + "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error", + nsz); + rsz = sallocx(p, 0); + assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); + dallocx(p, 0); + purge(); + } +#undef MAXSZ +} +TEST_END + +TEST_BEGIN(test_alignment_and_size) { + const char *percpu_arena; + size_t sz = sizeof(percpu_arena); + + if(mallctl("opt.percpu_arena", (void *)&percpu_arena, &sz, NULL, 0) || + strcmp(percpu_arena, "disabled") != 0) { + test_skip("test_alignment_and_size skipped: " + "not working with percpu arena."); + }; +#define MAXALIGN (((size_t)1) << 23) +#define NITER 4 + size_t nsz, rsz, alignment, total; + unsigned i; + void *ps[NITER]; + + for (i = 0; i < NITER; i++) { + ps[i] = NULL; + } + + for (alignment = 8; + alignment <= MAXALIGN; + alignment <<= 1) { + total = 0; + for (sz = 1; + sz < 3 * alignment && sz < (1U << 31); + sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { + for (i = 0; i < NITER; i++) { + nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + assert_zu_ne(nsz, 0, + "nallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | + MALLOCX_ZERO); + assert_ptr_not_null(ps[i], + "mallocx() error for alignment=%zu, " + "size=%zu (%#zx)", alignment, sz, sz); + rsz = sallocx(ps[i], 0); + assert_zu_ge(rsz, sz, + "Real size smaller than expected for " + "alignment=%zu, size=%zu", alignment, sz); + assert_zu_eq(nsz, rsz, + "nallocx()/sallocx() size mismatch for " + "alignment=%zu, size=%zu", alignment, sz); + assert_ptr_null( + (void *)((uintptr_t)ps[i] & (alignment-1)), + "%p inadequately aligned for" + " alignment=%zu, size=%zu", ps[i], + alignment, sz); + total += rsz; + if (total >= (MAXALIGN << 1)) { + break; + } + } + for (i = 0; i < NITER; i++) { + if (ps[i] != NULL) { + dallocx(ps[i], 0); + ps[i] = NULL; + } + } + } + purge(); + } +#undef MAXALIGN +#undef NITER +} +TEST_END + +int +main(void) { + return test( + test_overflow, + test_oom, + test_basic, + test_alignment_and_size); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/mallocx.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/mallocx.sh new file mode 100644 index 0000000..0cc2187 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/mallocx.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="junk:false" +fi diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/overflow.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/overflow.c similarity index 68% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/overflow.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/overflow.c index 303d9b2..6a9785b 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/overflow.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/overflow.c @@ -1,24 +1,23 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_overflow) -{ - unsigned nhchunks; +TEST_BEGIN(test_overflow) { + unsigned nlextents; size_t mib[4]; size_t sz, miblen, max_size_class; void *p; sz = sizeof(unsigned); - assert_d_eq(mallctl("arenas.nhchunks", &nhchunks, &sz, NULL, 0), 0, - "Unexpected mallctl() error"); + assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, + 0), 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); - assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, + assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); - mib[2] = nhchunks - 1; + mib[2] = nlextents - 1; sz = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, &max_size_class, &sz, NULL, 0), 0, - "Unexpected mallctlbymib() error"); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, + NULL, 0), 0, "Unexpected mallctlbymib() error"); assert_ptr_null(malloc(max_size_class + 1), "Expected OOM due to over-sized allocation request"); @@ -41,9 +40,7 @@ TEST_BEGIN(test_overflow) TEST_END int -main(void) -{ - - return (test( - test_overflow)); +main(void) { + return test( + test_overflow); } diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/posix_memalign.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/posix_memalign.c similarity index 77% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/posix_memalign.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/posix_memalign.c index 19741c6..2c2726d 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/posix_memalign.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/posix_memalign.c @@ -1,12 +1,19 @@ #include "test/jemalloc_test.h" -#define CHUNK 0x400000 -/* #define MAXALIGN ((size_t)UINT64_C(0x80000000000)) */ -#define MAXALIGN ((size_t)0x2000000LU) -#define NITER 4 +#define MAXALIGN (((size_t)1) << 23) -TEST_BEGIN(test_alignment_errors) -{ +/* + * On systems which can't merge extents, tests that call this function generate + * a lot of dirty memory very quickly. Purging between cycles mitigates + * potential OOM on e.g. 32-bit Windows. + */ +static void +purge(void) { + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl error"); +} + +TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; @@ -25,8 +32,7 @@ TEST_BEGIN(test_alignment_errors) } TEST_END -TEST_BEGIN(test_oom_errors) -{ +TEST_BEGIN(test_oom_errors) { size_t alignment, size; void *p; @@ -64,15 +70,16 @@ TEST_BEGIN(test_oom_errors) } TEST_END -TEST_BEGIN(test_alignment_and_size) -{ +TEST_BEGIN(test_alignment_and_size) { +#define NITER 4 size_t alignment, size, total; unsigned i; int err; void *ps[NITER]; - for (i = 0; i < NITER; i++) + for (i = 0; i < NITER; i++) { ps[i] = NULL; + } for (alignment = 8; alignment <= MAXALIGN; @@ -94,8 +101,9 @@ TEST_BEGIN(test_alignment_and_size) alignment, size, size, buf); } total += malloc_usable_size(ps[i]); - if (total >= (MAXALIGN << 1)) + if (total >= (MAXALIGN << 1)) { break; + } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { @@ -104,16 +112,16 @@ TEST_BEGIN(test_alignment_and_size) } } } + purge(); } +#undef NITER } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_alignment_errors, test_oom_errors, - test_alignment_and_size)); + test_alignment_and_size); } diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/rallocx.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/rallocx.c similarity index 63% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/rallocx.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/rallocx.c index be1b27b..7821ca5 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/rallocx.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/rallocx.c @@ -1,14 +1,53 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_grow_and_shrink) -{ +static unsigned +get_nsizes_impl(const char *cmd) { + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return ret; +} + +static unsigned +get_nlarge(void) { + return get_nsizes_impl("arenas.nlextents"); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) { + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return ret; +} + +static size_t +get_large_size(size_t ind) { + return get_size_impl("arenas.lextent.0.size", ind); +} + +TEST_BEGIN(test_grow_and_shrink) { void *p, *q; size_t tsz; -#define NCYCLES 3 +#define NCYCLES 3 unsigned i, j; -#define NSZS 2500 +#define NSZS 1024 size_t szs[NSZS]; -#define MAXSZ ZU(12 * 1024 * 1024) +#define MAXSZ ZU(12 * 1024 * 1024) p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); @@ -46,8 +85,7 @@ TEST_BEGIN(test_grow_and_shrink) TEST_END static bool -validate_fill(const void *p, uint8_t c, size_t offset, size_t len) -{ +validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { bool ret = false; const uint8_t *buf = (const uint8_t *)p; size_t i; @@ -62,16 +100,15 @@ validate_fill(const void *p, uint8_t c, size_t offset, size_t len) } } - return (ret); + return ret; } -TEST_BEGIN(test_zero) -{ +TEST_BEGIN(test_zero) { void *p, *q; size_t psz, qsz, i, j; size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024}; -#define FILL_BYTE 0xaaU -#define RANGE 2048 +#define FILL_BYTE 0xaaU +#define RANGE 2048 for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) { size_t start_size = start_sizes[i]; @@ -110,11 +147,10 @@ TEST_BEGIN(test_zero) } TEST_END -TEST_BEGIN(test_align) -{ +TEST_BEGIN(test_align) { void *p, *q; size_t align; -#define MAX_ALIGN (ZU(1) << 25) +#define MAX_ALIGN (ZU(1) << 25) align = ZU(1); p = mallocx(1, MALLOCX_ALIGN(align)); @@ -135,25 +171,24 @@ TEST_BEGIN(test_align) } TEST_END -TEST_BEGIN(test_lg_align_and_zero) -{ +TEST_BEGIN(test_lg_align_and_zero) { void *p, *q; - size_t lg_align, sz; -#define MAX_LG_ALIGN 25 -#define MAX_VALIDATE (ZU(1) << 22) + unsigned lg_align; + size_t sz; +#define MAX_LG_ALIGN 25 +#define MAX_VALIDATE (ZU(1) << 22) - lg_align = ZU(0); + lg_align = 0; p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx() error"); for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) { q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); assert_ptr_not_null(q, - "Unexpected rallocx() error for lg_align=%zu", lg_align); + "Unexpected rallocx() error for lg_align=%u", lg_align); assert_ptr_null( (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)), - "%p inadequately aligned for lg_align=%zu", - q, lg_align); + "%p inadequately aligned for lg_align=%u", q, lg_align); sz = sallocx(q, 0); if ((sz << 1) <= MAX_VALIDATE) { assert_false(validate_fill(q, 0, 0, sz), @@ -173,13 +208,38 @@ TEST_BEGIN(test_lg_align_and_zero) } TEST_END -int -main(void) -{ +TEST_BEGIN(test_overflow) { + size_t largemax; + void *p; + + largemax = get_large_size(get_nlarge()-1); - return (test( + p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_ptr_null(rallocx(p, largemax+1, 0), + "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1); + + assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0), + "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); + + assert_ptr_null(rallocx(p, SIZE_T_MAX, 0), + "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX); + + assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), + "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))", + ZU(PTRDIFF_MAX)+1); + + dallocx(p, 0); +} +TEST_END + +int +main(void) { + return test( test_grow_and_shrink, test_zero, test_align, - test_lg_align_and_zero)); + test_lg_align_and_zero, + test_overflow); } diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/sdallocx.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/sdallocx.c similarity index 74% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/sdallocx.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/sdallocx.c index b84817d..ca01448 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/sdallocx.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/sdallocx.c @@ -1,23 +1,22 @@ #include "test/jemalloc_test.h" -#define MAXALIGN (((size_t)1) << 25) -#define NITER 4 +#define MAXALIGN (((size_t)1) << 22) +#define NITER 3 -TEST_BEGIN(test_basic) -{ +TEST_BEGIN(test_basic) { void *ptr = mallocx(64, 0); sdallocx(ptr, 64, 0); } TEST_END -TEST_BEGIN(test_alignment_and_size) -{ +TEST_BEGIN(test_alignment_and_size) { size_t nsz, sz, alignment, total; unsigned i; void *ps[NITER]; - for (i = 0; i < NITER; i++) + for (i = 0; i < NITER; i++) { ps[i] = NULL; + } for (alignment = 8; alignment <= MAXALIGN; @@ -32,8 +31,9 @@ TEST_BEGIN(test_alignment_and_size) ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); total += nsz; - if (total >= (MAXALIGN << 1)) + if (total >= (MAXALIGN << 1)) { break; + } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { @@ -48,10 +48,8 @@ TEST_BEGIN(test_alignment_and_size) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test_no_reentrancy( test_basic, - test_alignment_and_size)); + test_alignment_and_size); } diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/thread_arena.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/thread_arena.c similarity index 53% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/thread_arena.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/thread_arena.c index 67be535..1e5ec05 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/thread_arena.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/thread_arena.c @@ -1,10 +1,9 @@ #include "test/jemalloc_test.h" -#define NTHREADS 10 +#define NTHREADS 10 void * -thd_start(void *arg) -{ +thd_start(void *arg) { unsigned main_arena_ind = *(unsigned *)arg; void *p; unsigned arena_ind; @@ -16,8 +15,8 @@ thd_start(void *arg) free(p); size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind, - sizeof(main_arena_ind)))) { + if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, + (void *)&main_arena_ind, sizeof(main_arena_ind)))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); @@ -25,7 +24,8 @@ thd_start(void *arg) } size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { + if ((err = mallctl("thread.arena", (void *)&arena_ind, &size, NULL, + 0))) { char buf[BUFERROR_BUF]; buferror(err, buf, sizeof(buf)); @@ -34,14 +34,19 @@ thd_start(void *arg) assert_u_eq(arena_ind, main_arena_ind, "Arena index should be same as for main thread"); - return (NULL); + return NULL; } -TEST_BEGIN(test_thread_arena) -{ +static void +mallctl_failure(int err) { + char buf[BUFERROR_BUF]; + + buferror(err, buf, sizeof(buf)); + test_fail("Error in mallctl(): %s", buf); +} + +TEST_BEGIN(test_thread_arena) { void *p; - unsigned arena_ind; - size_t size; int err; thd_t thds[NTHREADS]; unsigned i; @@ -49,12 +54,15 @@ TEST_BEGIN(test_thread_arena) p = malloc(1); assert_ptr_not_null(p, "Error in malloc()"); - size = sizeof(arena_ind); - if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) { - char buf[BUFERROR_BUF]; + unsigned arena_ind, old_arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Arena creation failure"); - buferror(err, buf, sizeof(buf)); - test_fail("Error in mallctl(): %s", buf); + size_t size = sizeof(arena_ind); + if ((err = mallctl("thread.arena", (void *)&old_arena_ind, &size, + (void *)&arena_ind, sizeof(arena_ind))) != 0) { + mallctl_failure(err); } for (i = 0; i < NTHREADS; i++) { @@ -67,13 +75,12 @@ TEST_BEGIN(test_thread_arena) thd_join(thds[i], (void *)&join_ret); assert_zd_eq(join_ret, 0, "Unexpected thread join error"); } + free(p); } TEST_END int -main(void) -{ - - return (test( - test_thread_arena)); +main(void) { + return test( + test_thread_arena); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/thread_tcache_enabled.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/thread_tcache_enabled.c new file mode 100644 index 0000000..95c9acc --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/thread_tcache_enabled.c @@ -0,0 +1,87 @@ +#include "test/jemalloc_test.h" + +void * +thd_start(void *arg) { + bool e0, e1; + size_t sz = sizeof(bool); + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + + if (e0) { + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + } + + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + e1 = true; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + free(malloc(1)); + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_true(e0, "tcache should be enabled"); + + free(malloc(1)); + e1 = false; + assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, + (void *)&e1, sz), 0, "Unexpected mallctl() error"); + assert_false(e0, "tcache should be disabled"); + + free(malloc(1)); + return NULL; +} + +TEST_BEGIN(test_main_thread) { + thd_start(NULL); +} +TEST_END + +TEST_BEGIN(test_subthread) { + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} +TEST_END + +int +main(void) { + /* Run tests multiple times to check for bad interactions. */ + return test( + test_main_thread, + test_subthread, + test_main_thread, + test_subthread, + test_main_thread); +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/xallocx.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/xallocx.c similarity index 50% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/xallocx.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/xallocx.c index 3736252..cd0ca04 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/integration/xallocx.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/xallocx.c @@ -1,7 +1,24 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_same_size) -{ +/* + * Use a separate arena for xallocx() extension/contraction tests so that + * internal allocation e.g. by heap profiling can't interpose allocations where + * xallocx() would ordinarily be able to extend. + */ +static unsigned +arena_ind(void) { + static unsigned ind = 0; + + if (ind == 0) { + size_t sz = sizeof(ind); + assert_d_eq(mallctl("arenas.create", (void *)&ind, &sz, NULL, + 0), 0, "Unexpected mallctl failure creating arena"); + } + + return ind; +} + +TEST_BEGIN(test_same_size) { void *p; size_t sz, tsz; @@ -16,8 +33,7 @@ TEST_BEGIN(test_same_size) } TEST_END -TEST_BEGIN(test_extra_no_move) -{ +TEST_BEGIN(test_extra_no_move) { void *p; size_t sz, tsz; @@ -32,8 +48,7 @@ TEST_BEGIN(test_extra_no_move) } TEST_END -TEST_BEGIN(test_no_move_fail) -{ +TEST_BEGIN(test_no_move_fail) { void *p; size_t sz, tsz; @@ -49,42 +64,29 @@ TEST_BEGIN(test_no_move_fail) TEST_END static unsigned -get_nsizes_impl(const char *cmd) -{ +get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); - assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, + assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); - return (ret); -} - -static unsigned -get_nsmall(void) -{ - - return (get_nsizes_impl("arenas.nbins")); + return ret; } static unsigned -get_nlarge(void) -{ - - return (get_nsizes_impl("arenas.nlruns")); +get_nsmall(void) { + return get_nsizes_impl("arenas.nbins"); } static unsigned -get_nhuge(void) -{ - - return (get_nsizes_impl("arenas.nhchunks")); +get_nlarge(void) { + return get_nsizes_impl("arenas.nlextents"); } static size_t -get_size_impl(const char *cmd, size_t ind) -{ +get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; @@ -95,41 +97,29 @@ get_size_impl(const char *cmd, size_t ind) 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); - assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), + assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); - return (ret); -} - -static size_t -get_small_size(size_t ind) -{ - - return (get_size_impl("arenas.bin.0.size", ind)); + return ret; } static size_t -get_large_size(size_t ind) -{ - - return (get_size_impl("arenas.lrun.0.size", ind)); +get_small_size(size_t ind) { + return get_size_impl("arenas.bin.0.size", ind); } static size_t -get_huge_size(size_t ind) -{ - - return (get_size_impl("arenas.hchunk.0.size", ind)); +get_large_size(size_t ind) { + return get_size_impl("arenas.lextent.0.size", ind); } -TEST_BEGIN(test_size) -{ - size_t small0, hugemax; +TEST_BEGIN(test_size) { + size_t small0, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); @@ -139,60 +129,58 @@ TEST_BEGIN(test_size) "Unexpected xallocx() behavior"); /* Test largest supported size. */ - assert_zu_le(xallocx(p, hugemax, 0, 0), hugemax, + assert_zu_le(xallocx(p, largemax, 0, 0), largemax, "Unexpected xallocx() behavior"); /* Test size overflow. */ - assert_zu_le(xallocx(p, hugemax+1, 0, 0), hugemax, + assert_zu_le(xallocx(p, largemax+1, 0, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), hugemax, + assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), largemax, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END -TEST_BEGIN(test_size_extra_overflow) -{ - size_t small0, hugemax; +TEST_BEGIN(test_size_extra_overflow) { + size_t small0, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); /* Test overflows that can be resolved by clamping extra. */ - assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax, + assert_zu_le(xallocx(p, largemax-1, 2, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax, + assert_zu_le(xallocx(p, largemax, 1, 0), largemax, "Unexpected xallocx() behavior"); - /* Test overflow such that hugemax-size underflows. */ - assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax, + /* Test overflow such that largemax-size underflows. */ + assert_zu_le(xallocx(p, largemax+1, 2, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax, + assert_zu_le(xallocx(p, largemax+2, 3, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax, + assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), largemax, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax, + assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), largemax, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END -TEST_BEGIN(test_extra_small) -{ - size_t small0, small1, hugemax; +TEST_BEGIN(test_extra_small) { + size_t small0, small1, largemax; void *p; /* Get size classes. */ small0 = get_small_size(0); small1 = get_small_size(1); - hugemax = get_huge_size(get_nhuge()-1); + largemax = get_large_size(get_nlarge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); @@ -207,7 +195,7 @@ TEST_BEGIN(test_extra_small) "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ - assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0, + assert_zu_eq(xallocx(p, small0, largemax - small0 + 1, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0, "Unexpected xallocx() behavior"); @@ -216,140 +204,77 @@ TEST_BEGIN(test_extra_small) } TEST_END -TEST_BEGIN(test_extra_large) -{ - size_t smallmax, large0, large1, large2, huge0, hugemax; +TEST_BEGIN(test_extra_large) { + int flags = MALLOCX_ARENA(arena_ind()); + size_t smallmax, large1, large2, large3, largemax; void *p; /* Get size classes. */ smallmax = get_small_size(get_nsmall()-1); - large0 = get_large_size(0); large1 = get_large_size(1); large2 = get_large_size(2); - huge0 = get_huge_size(0); - hugemax = get_huge_size(get_nhuge()-1); - - p = mallocx(large2, 0); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - - assert_zu_eq(xallocx(p, large2, 0, 0), large2, - "Unexpected xallocx() behavior"); - /* Test size decrease with zero extra. */ - assert_zu_eq(xallocx(p, large0, 0, 0), large0, - "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, smallmax, 0, 0), large0, - "Unexpected xallocx() behavior"); - - assert_zu_eq(xallocx(p, large2, 0, 0), large2, - "Unexpected xallocx() behavior"); - /* Test size decrease with non-zero extra. */ - assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2, - "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, large1, large2 - large1, 0), large2, - "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, large0, large1 - large0, 0), large1, - "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, 0), large0, - "Unexpected xallocx() behavior"); - - assert_zu_eq(xallocx(p, large0, 0, 0), large0, - "Unexpected xallocx() behavior"); - /* Test size increase with zero extra. */ - assert_zu_eq(xallocx(p, large2, 0, 0), large2, - "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge0, 0, 0), large2, - "Unexpected xallocx() behavior"); - - assert_zu_eq(xallocx(p, large0, 0, 0), large0, - "Unexpected xallocx() behavior"); - /* Test size increase with non-zero extra. */ - assert_zu_lt(xallocx(p, large0, huge0 - large0, 0), huge0, - "Unexpected xallocx() behavior"); - - assert_zu_eq(xallocx(p, large0, 0, 0), large0, - "Unexpected xallocx() behavior"); - /* Test size increase with non-zero extra. */ - assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2, - "Unexpected xallocx() behavior"); - - assert_zu_eq(xallocx(p, large2, 0, 0), large2, - "Unexpected xallocx() behavior"); - /* Test size+extra overflow. */ - assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, 0), huge0, - "Unexpected xallocx() behavior"); - - dallocx(p, 0); -} -TEST_END - -TEST_BEGIN(test_extra_huge) -{ - size_t largemax, huge0, huge1, huge2, hugemax; - void *p; - - /* Get size classes. */ + large3 = get_large_size(3); largemax = get_large_size(get_nlarge()-1); - huge0 = get_huge_size(0); - huge1 = get_huge_size(1); - huge2 = get_huge_size(2); - hugemax = get_huge_size(get_nhuge()-1); - p = mallocx(huge2, 0); + p = mallocx(large3, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); - assert_zu_eq(xallocx(p, huge2, 0, 0), huge2, + assert_zu_eq(xallocx(p, large3, 0, flags), large3, "Unexpected xallocx() behavior"); /* Test size decrease with zero extra. */ - assert_zu_ge(xallocx(p, huge0, 0, 0), huge0, + assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, largemax, 0, 0), huge0, + assert_zu_ge(xallocx(p, smallmax, 0, flags), large1, "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge2, 0, 0), huge2, - "Unexpected xallocx() behavior"); + if (xallocx(p, large3, 0, flags) != large3) { + p = rallocx(p, large3, flags); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + } /* Test size decrease with non-zero extra. */ - assert_zu_eq(xallocx(p, huge0, huge2 - huge0, 0), huge2, + assert_zu_eq(xallocx(p, large1, large3 - large1, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge1, huge2 - huge1, 0), huge2, + assert_zu_eq(xallocx(p, large2, large3 - large2, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge0, huge1 - huge0, 0), huge1, + assert_zu_ge(xallocx(p, large1, large2 - large1, flags), large2, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, largemax, huge0 - largemax, 0), huge0, + assert_zu_ge(xallocx(p, smallmax, large1 - smallmax, flags), large1, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, huge0, 0, 0), huge0, + assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with zero extra. */ - assert_zu_le(xallocx(p, huge2, 0, 0), huge2, + assert_zu_le(xallocx(p, large3, 0, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_le(xallocx(p, hugemax+1, 0, 0), huge2, + assert_zu_le(xallocx(p, largemax+1, 0, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, huge0, 0, 0), huge0, + assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ - assert_zu_le(xallocx(p, huge0, SIZE_T_MAX - huge0, 0), hugemax, + assert_zu_le(xallocx(p, large1, SIZE_T_MAX - large1, flags), largemax, "Unexpected xallocx() behavior"); - assert_zu_ge(xallocx(p, huge0, 0, 0), huge0, + assert_zu_ge(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ - assert_zu_le(xallocx(p, huge0, huge2 - huge0, 0), huge2, + assert_zu_le(xallocx(p, large1, large3 - large1, flags), large3, "Unexpected xallocx() behavior"); - assert_zu_eq(xallocx(p, huge2, 0, 0), huge2, - "Unexpected xallocx() behavior"); + if (xallocx(p, large3, 0, flags) != large3) { + p = rallocx(p, large3, flags); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + } /* Test size+extra overflow. */ - assert_zu_le(xallocx(p, huge2, hugemax - huge2 + 1, 0), hugemax, + assert_zu_le(xallocx(p, large3, largemax - large3 + 1, flags), largemax, "Unexpected xallocx() behavior"); - dallocx(p, 0); + dallocx(p, flags); } TEST_END static void -print_filled_extents(const void *p, uint8_t c, size_t len) -{ +print_filled_extents(const void *p, uint8_t c, size_t len) { const uint8_t *pc = (const uint8_t *)p; size_t i, range0; uint8_t c0; @@ -368,32 +293,33 @@ print_filled_extents(const void *p, uint8_t c, size_t len) } static bool -validate_fill(const void *p, uint8_t c, size_t offset, size_t len) -{ +validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { const uint8_t *pc = (const uint8_t *)p; bool err; size_t i; for (i = offset, err = false; i < offset+len; i++) { - if (pc[i] != c) + if (pc[i] != c) { err = true; + } } - if (err) + if (err) { print_filled_extents(p, c, offset + len); + } - return (err); + return err; } static void -test_zero(size_t szmin, size_t szmax) -{ +test_zero(size_t szmin, size_t szmax) { + int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO; size_t sz, nsz; void *p; -#define FILL_BYTE 0x7aU +#define FILL_BYTE 0x7aU sz = szmax; - p = mallocx(sz, MALLOCX_ZERO); + p = mallocx(sz, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu", sz); @@ -408,15 +334,19 @@ test_zero(size_t szmin, size_t szmax) /* Shrink in place so that we can expect growing in place to succeed. */ sz = szmin; - assert_zu_eq(xallocx(p, sz, 0, MALLOCX_ZERO), sz, - "Unexpected xallocx() error"); + if (xallocx(p, sz, 0, flags) != sz) { + p = rallocx(p, sz, flags); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + } assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); for (sz = szmin; sz < szmax; sz = nsz) { - nsz = nallocx(sz+1, MALLOCX_ZERO); - assert_zu_eq(xallocx(p, sz+1, 0, MALLOCX_ZERO), nsz, - "Unexpected xallocx() failure"); + nsz = nallocx(sz+1, flags); + if (xallocx(p, sz+1, 0, flags) != nsz) { + p = rallocx(p, sz+1, flags); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + } assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); assert_false(validate_fill(p, 0x00, sz, nsz-sz), @@ -426,38 +356,23 @@ test_zero(size_t szmin, size_t szmax) "Memory not filled: nsz=%zu", nsz); } - dallocx(p, 0); + dallocx(p, flags); } -TEST_BEGIN(test_zero_large) -{ - size_t large0, largemax; +TEST_BEGIN(test_zero_large) { + size_t large0, large1; /* Get size classes. */ large0 = get_large_size(0); - largemax = get_large_size(get_nlarge()-1); - - test_zero(large0, largemax); -} -TEST_END - -TEST_BEGIN(test_zero_huge) -{ - size_t huge0, huge1; - - /* Get size classes. */ - huge0 = get_huge_size(0); - huge1 = get_huge_size(1); + large1 = get_large_size(1); - test_zero(huge1, huge0 * 2); + test_zero(large1, large0 * 2); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_same_size, test_extra_no_move, test_no_move_fail, @@ -465,7 +380,5 @@ main(void) test_size_extra_overflow, test_extra_small, test_extra_large, - test_extra_huge, - test_zero_large, - test_zero_huge)); + test_zero_large); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/xallocx.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/xallocx.sh new file mode 100644 index 0000000..0cc2187 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/integration/xallocx.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="junk:false" +fi diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/SFMT.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/SFMT.c similarity index 91% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/SFMT.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/SFMT.c index 80cabe0..c05e218 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/SFMT.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/SFMT.c @@ -33,7 +33,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/** +/** * @file SFMT.c * @brief SIMD oriented Fast Mersenne Twister(SFMT) * @@ -45,7 +45,7 @@ * * The new BSD License is applied to this software, see LICENSE.txt */ -#define SFMT_C_ +#define SFMT_C_ #include "test/jemalloc_test.h" #include "test/SFMT-params.h" @@ -108,7 +108,7 @@ struct sfmt_s { /*-------------------------------------- FILE GLOBAL VARIABLES - internal state, index counter and flag + internal state, index counter and flag --------------------------------------*/ /** a parity check vector which certificate the period of 2^{MEXP} */ @@ -117,18 +117,18 @@ static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4}; /*---------------- STATIC FUNCTIONS ----------------*/ -JEMALLOC_INLINE_C int idxof(int i); +static inline int idxof(int i); #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) -JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift); -JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift); +static inline void rshift128(w128_t *out, w128_t const *in, int shift); +static inline void lshift128(w128_t *out, w128_t const *in, int shift); #endif -JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx); -JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); -JEMALLOC_INLINE_C uint32_t func1(uint32_t x); -JEMALLOC_INLINE_C uint32_t func2(uint32_t x); +static inline void gen_rand_all(sfmt_t *ctx); +static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); +static inline uint32_t func1(uint32_t x); +static inline uint32_t func2(uint32_t x); static void period_certification(sfmt_t *ctx); #if defined(BIG_ENDIAN64) && !defined(ONLY64) -JEMALLOC_INLINE_C void swap(w128_t *array, int size); +static inline void swap(w128_t *array, int size); #endif #if defined(HAVE_ALTIVEC) @@ -138,15 +138,15 @@ JEMALLOC_INLINE_C void swap(w128_t *array, int size); #endif /** - * This function simulate a 64-bit index of LITTLE ENDIAN + * This function simulate a 64-bit index of LITTLE ENDIAN * in BIG ENDIAN machine. */ #ifdef ONLY64 -JEMALLOC_INLINE_C int idxof(int i) { +static inline int idxof(int i) { return i ^ 1; } #else -JEMALLOC_INLINE_C int idxof(int i) { +static inline int idxof(int i) { return i; } #endif @@ -160,7 +160,7 @@ JEMALLOC_INLINE_C int idxof(int i) { */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) #ifdef ONLY64 -JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { +static inline void rshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); @@ -175,7 +175,7 @@ JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { out->u[3] = (uint32_t)oh; } #else -JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { +static inline void rshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); @@ -199,7 +199,7 @@ JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { * @param shift the shift value */ #ifdef ONLY64 -JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { +static inline void lshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); @@ -214,7 +214,7 @@ JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { out->u[3] = (uint32_t)oh; } #else -JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { +static inline void lshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); @@ -241,37 +241,37 @@ JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) #ifdef ONLY64 -JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, +static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { w128_t x; w128_t y; lshift128(&x, a, SL2); rshift128(&y, c, SR2); - r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] ^ (d->u[0] << SL1); - r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] ^ (d->u[1] << SL1); - r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] ^ (d->u[2] << SL1); - r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] ^ (d->u[3] << SL1); } #else -JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, +static inline void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { w128_t x; w128_t y; lshift128(&x, a, SL2); rshift128(&y, c, SR2); - r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] + r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] ^ (d->u[0] << SL1); - r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] + r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] ^ (d->u[1] << SL1); - r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] + r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] ^ (d->u[2] << SL1); - r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] + r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] ^ (d->u[3] << SL1); } #endif @@ -282,7 +282,7 @@ JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, * This function fills the internal state array with pseudorandom * integers. */ -JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) { +static inline void gen_rand_all(sfmt_t *ctx) { int i; w128_t *r1, *r2; @@ -306,10 +306,10 @@ JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) { * This function fills the user-specified array with pseudorandom * integers. * - * @param array an 128-bit array to be filled by pseudorandom numbers. + * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pseudorandom numbers to be generated. */ -JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { +static inline void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; w128_t *r1, *r2; @@ -343,7 +343,7 @@ JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { #endif #if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC) -JEMALLOC_INLINE_C void swap(w128_t *array, int size) { +static inline void swap(w128_t *array, int size) { int i; uint32_t x, y; @@ -476,7 +476,7 @@ uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) { * This function generates and returns 64-bit pseudorandom number. * init_gen_rand or init_by_array must be called before this function. * The function gen_rand64 should not be called after gen_rand32, - * unless an initialization is again executed. + * unless an initialization is again executed. * @return 64-bit pseudorandom number */ uint64_t gen_rand64(sfmt_t *ctx) { @@ -618,7 +618,7 @@ sfmt_t *init_gen_rand(uint32_t seed) { psfmt32[idxof(0)] = seed; for (i = 1; i < N32; i++) { - psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] + psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] ^ (psfmt32[idxof(i - 1)] >> 30)) + i; } @@ -668,7 +668,7 @@ sfmt_t *init_by_array(uint32_t *init_key, int key_length) { } else { count = N32; } - r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] + r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] ^ psfmt32[idxof(N32 - 1)]); psfmt32[idxof(mid)] += r; r += key_length; @@ -677,7 +677,7 @@ sfmt_t *init_by_array(uint32_t *init_key, int key_length) { count--; for (i = 1, j = 0; (j < count) && (j < key_length); j++) { - r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] ^ psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] += r; r += init_key[j] + i; @@ -686,7 +686,7 @@ sfmt_t *init_by_array(uint32_t *init_key, int key_length) { i = (i + 1) % N32; } for (; j < count; j++) { - r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] + r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] ^ psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] += r; r += i; @@ -695,7 +695,7 @@ sfmt_t *init_by_array(uint32_t *init_key, int key_length) { i = (i + 1) % N32; } for (j = 0; j < N32; j++) { - r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] + r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] + psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] ^= r; r -= i; diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/btalloc.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/btalloc.c new file mode 100644 index 0000000..d570952 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/btalloc.c @@ -0,0 +1,6 @@ +#include "test/jemalloc_test.h" + +void * +btalloc(size_t size, unsigned bits) { + return btalloc_0(size, bits); +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/btalloc_0.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/btalloc_0.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/btalloc_0.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/btalloc_0.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/btalloc_1.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/btalloc_1.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/btalloc_1.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/btalloc_1.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/math.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/math.c similarity index 66% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/math.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/math.c index 887a363..1758c67 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/math.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/math.c @@ -1,2 +1,2 @@ -#define MATH_C_ +#define MATH_C_ #include "test/jemalloc_test.h" diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/mq.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/mq.c similarity index 93% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/mq.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/mq.c index 40b31c1..9b5f672 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/mq.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/mq.c @@ -5,9 +5,7 @@ * time is guaranteed. */ void -mq_nanosleep(unsigned ns) -{ - +mq_nanosleep(unsigned ns) { assert(ns <= 1000*1000*1000); #ifdef _WIN32 diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/mtx.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/mtx.c similarity index 57% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/mtx.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/mtx.c index 73bd02f..a393c01 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/mtx.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/mtx.c @@ -1,38 +1,40 @@ #include "test/jemalloc_test.h" #ifndef _CRT_SPINCOUNT -#define _CRT_SPINCOUNT 4000 +#define _CRT_SPINCOUNT 4000 #endif bool -mtx_init(mtx_t *mtx) -{ - +mtx_init(mtx_t *mtx) { #ifdef _WIN32 - if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, _CRT_SPINCOUNT)) - return (true); + if (!InitializeCriticalSectionAndSpinCount(&mtx->lock, + _CRT_SPINCOUNT)) { + return true; + } +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + mtx->lock = OS_UNFAIR_LOCK_INIT; #elif (defined(JEMALLOC_OSSPIN)) mtx->lock = 0; #else pthread_mutexattr_t attr; - if (pthread_mutexattr_init(&attr) != 0) - return (true); + if (pthread_mutexattr_init(&attr) != 0) { + return true; + } pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT); if (pthread_mutex_init(&mtx->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); - return (true); + return true; } pthread_mutexattr_destroy(&attr); #endif - return (false); + return false; } void -mtx_fini(mtx_t *mtx) -{ - +mtx_fini(mtx_t *mtx) { #ifdef _WIN32 +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) #elif (defined(JEMALLOC_OSSPIN)) #else pthread_mutex_destroy(&mtx->lock); @@ -40,11 +42,11 @@ mtx_fini(mtx_t *mtx) } void -mtx_lock(mtx_t *mtx) -{ - +mtx_lock(mtx_t *mtx) { #ifdef _WIN32 EnterCriticalSection(&mtx->lock); +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_lock(&mtx->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockLock(&mtx->lock); #else @@ -53,11 +55,11 @@ mtx_lock(mtx_t *mtx) } void -mtx_unlock(mtx_t *mtx) -{ - +mtx_unlock(mtx_t *mtx) { #ifdef _WIN32 LeaveCriticalSection(&mtx->lock); +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock_unlock(&mtx->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockUnlock(&mtx->lock); #else diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/test.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/test.c new file mode 100644 index 0000000..01a4d73 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/test.c @@ -0,0 +1,217 @@ +#include "test/jemalloc_test.h" + +/* Test status state. */ + +static unsigned test_count = 0; +static test_status_t test_counts[test_status_count] = {0, 0, 0}; +static test_status_t test_status = test_status_pass; +static const char * test_name = ""; + +/* Reentrancy testing helpers. */ + +#define NUM_REENTRANT_ALLOCS 20 +typedef enum { + non_reentrant = 0, + libc_reentrant = 1, + arena_new_reentrant = 2 +} reentrancy_t; +static reentrancy_t reentrancy; + +static bool libc_hook_ran = false; +static bool arena_new_hook_ran = false; + +static const char * +reentrancy_t_str(reentrancy_t r) { + switch (r) { + case non_reentrant: + return "non-reentrant"; + case libc_reentrant: + return "libc-reentrant"; + case arena_new_reentrant: + return "arena_new-reentrant"; + default: + unreachable(); + } +} + +static void +do_hook(bool *hook_ran, void (**hook)()) { + *hook_ran = true; + *hook = NULL; + + size_t alloc_size = 1; + for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) { + free(malloc(alloc_size)); + alloc_size *= 2; + } +} + +static void +libc_reentrancy_hook() { + do_hook(&libc_hook_ran, &hooks_libc_hook); +} + +static void +arena_new_reentrancy_hook() { + do_hook(&arena_new_hook_ran, &hooks_arena_new_hook); +} + +/* Actual test infrastructure. */ +bool +test_is_reentrant() { + return reentrancy != non_reentrant; +} + +JEMALLOC_FORMAT_PRINTF(1, 2) +void +test_skip(const char *format, ...) { + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); + malloc_printf("\n"); + test_status = test_status_skip; +} + +JEMALLOC_FORMAT_PRINTF(1, 2) +void +test_fail(const char *format, ...) { + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); + malloc_printf("\n"); + test_status = test_status_fail; +} + +static const char * +test_status_string(test_status_t test_status) { + switch (test_status) { + case test_status_pass: return "pass"; + case test_status_skip: return "skip"; + case test_status_fail: return "fail"; + default: not_reached(); + } +} + +void +p_test_init(const char *name) { + test_count++; + test_status = test_status_pass; + test_name = name; +} + +void +p_test_fini(void) { + test_counts[test_status]++; + malloc_printf("%s (%s): %s\n", test_name, reentrancy_t_str(reentrancy), + test_status_string(test_status)); +} + +static test_status_t +p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) { + test_status_t ret; + + if (do_malloc_init) { + /* + * Make sure initialization occurs prior to running tests. + * Tests are special because they may use internal facilities + * prior to triggering initialization as a side effect of + * calling into the public API. + */ + if (nallocx(1, 0) == 0) { + malloc_printf("Initialization error"); + return test_status_fail; + } + } + + ret = test_status_pass; + for (; t != NULL; t = va_arg(ap, test_t *)) { + /* Non-reentrant run. */ + reentrancy = non_reentrant; + hooks_arena_new_hook = hooks_libc_hook = NULL; + t(); + if (test_status > ret) { + ret = test_status; + } + /* Reentrant run. */ + if (do_reentrant) { + reentrancy = libc_reentrant; + hooks_arena_new_hook = NULL; + hooks_libc_hook = &libc_reentrancy_hook; + t(); + if (test_status > ret) { + ret = test_status; + } + + reentrancy = arena_new_reentrant; + hooks_libc_hook = NULL; + hooks_arena_new_hook = &arena_new_reentrancy_hook; + t(); + if (test_status > ret) { + ret = test_status; + } + } + } + + malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n", + test_status_string(test_status_pass), + test_counts[test_status_pass], test_count, + test_status_string(test_status_skip), + test_counts[test_status_skip], test_count, + test_status_string(test_status_fail), + test_counts[test_status_fail], test_count); + + return ret; +} + +test_status_t +p_test(test_t *t, ...) { + test_status_t ret; + va_list ap; + + ret = test_status_pass; + va_start(ap, t); + ret = p_test_impl(true, true, t, ap); + va_end(ap); + + return ret; +} + +test_status_t +p_test_no_reentrancy(test_t *t, ...) { + test_status_t ret; + va_list ap; + + ret = test_status_pass; + va_start(ap, t); + ret = p_test_impl(true, false, t, ap); + va_end(ap); + + return ret; +} + +test_status_t +p_test_no_malloc_init(test_t *t, ...) { + test_status_t ret; + va_list ap; + + ret = test_status_pass; + va_start(ap, t); + /* + * We also omit reentrancy from bootstrapping tests, since we don't + * (yet) care about general reentrancy during bootstrapping. + */ + ret = p_test_impl(false, false, t, ap); + va_end(ap); + + return ret; +} + +void +p_test_fail(const char *prefix, const char *message) { + malloc_cprintf(NULL, NULL, "%s%s\n", prefix, message); + test_status = test_status_fail; +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/thd.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/thd.c similarity index 65% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/thd.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/thd.c index c9d0065..9a15eab 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/src/thd.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/thd.c @@ -2,18 +2,16 @@ #ifdef _WIN32 void -thd_create(thd_t *thd, void *(*proc)(void *), void *arg) -{ +thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { LPTHREAD_START_ROUTINE routine = (LPTHREAD_START_ROUTINE)proc; *thd = CreateThread(NULL, 0, routine, arg, 0, NULL); - if (*thd == NULL) + if (*thd == NULL) { test_fail("Error in CreateThread()\n"); + } } void -thd_join(thd_t thd, void **ret) -{ - +thd_join(thd_t thd, void **ret) { if (WaitForSingleObject(thd, INFINITE) == WAIT_OBJECT_0 && ret) { DWORD exit_code; GetExitCodeThread(thd, (LPDWORD) &exit_code); @@ -23,17 +21,14 @@ thd_join(thd_t thd, void **ret) #else void -thd_create(thd_t *thd, void *(*proc)(void *), void *arg) -{ - - if (pthread_create(thd, NULL, proc, arg) != 0) +thd_create(thd_t *thd, void *(*proc)(void *), void *arg) { + if (pthread_create(thd, NULL, proc, arg) != 0) { test_fail("Error in pthread_create()\n"); + } } void -thd_join(thd_t thd, void **ret) -{ - +thd_join(thd_t thd, void **ret) { pthread_join(thd, ret); } #endif diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/timer.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/timer.c new file mode 100644 index 0000000..c451c63 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/src/timer.c @@ -0,0 +1,56 @@ +#include "test/jemalloc_test.h" + +void +timer_start(timedelta_t *timer) { + nstime_init(&timer->t0, 0); + nstime_update(&timer->t0); +} + +void +timer_stop(timedelta_t *timer) { + nstime_copy(&timer->t1, &timer->t0); + nstime_update(&timer->t1); +} + +uint64_t +timer_usec(const timedelta_t *timer) { + nstime_t delta; + + nstime_copy(&delta, &timer->t1); + nstime_subtract(&delta, &timer->t0); + return nstime_ns(&delta) / 1000; +} + +void +timer_ratio(timedelta_t *a, timedelta_t *b, char *buf, size_t buflen) { + uint64_t t0 = timer_usec(a); + uint64_t t1 = timer_usec(b); + uint64_t mult; + size_t i = 0; + size_t j, n; + + /* Whole. */ + n = malloc_snprintf(&buf[i], buflen-i, "%"FMTu64, t0 / t1); + i += n; + if (i >= buflen) { + return; + } + mult = 1; + for (j = 0; j < n; j++) { + mult *= 10; + } + + /* Decimal. */ + n = malloc_snprintf(&buf[i], buflen-i, "."); + i += n; + + /* Fraction. */ + while (i < buflen-1) { + uint64_t round = (i+1 == buflen-1 && ((t0 * mult * 10 / t1) % 10 + >= 5)) ? 1 : 0; + n = malloc_snprintf(&buf[i], buflen-i, + "%"FMTu64, (t0 * mult / t1) % 10 + round); + i += n; + mult *= 10; + } +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/stress/microbench.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/stress/microbench.c similarity index 77% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/stress/microbench.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/stress/microbench.c index ee39fea..988b793 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/stress/microbench.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/stress/microbench.c @@ -1,22 +1,23 @@ #include "test/jemalloc_test.h" -JEMALLOC_INLINE_C void -time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, void (*func)(void)) -{ +static inline void +time_func(timedelta_t *timer, uint64_t nwarmup, uint64_t niter, + void (*func)(void)) { uint64_t i; - for (i = 0; i < nwarmup; i++) + for (i = 0; i < nwarmup; i++) { func(); + } timer_start(timer); - for (i = 0; i < niter; i++) + for (i = 0; i < niter; i++) { func(); + } timer_stop(timer); } void compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a, - void (*func_a), const char *name_b, void (*func_b)) -{ + void (*func_a), const char *name_b, void (*func_b)) { timedelta_t timer_a, timer_b; char ratio_buf[6]; void *p; @@ -40,8 +41,7 @@ compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a, } static void -malloc_free(void) -{ +malloc_free(void) { /* The compiler can optimize away free(malloc(1))! */ void *p = malloc(1); if (p == NULL) { @@ -52,8 +52,7 @@ malloc_free(void) } static void -mallocx_free(void) -{ +mallocx_free(void) { void *p = mallocx(1, 0); if (p == NULL) { test_fail("Unexpected mallocx() failure"); @@ -62,17 +61,14 @@ mallocx_free(void) free(p); } -TEST_BEGIN(test_malloc_vs_mallocx) -{ - +TEST_BEGIN(test_malloc_vs_mallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "malloc", malloc_free, "mallocx", mallocx_free); } TEST_END static void -malloc_dallocx(void) -{ +malloc_dallocx(void) { void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); @@ -82,8 +78,7 @@ malloc_dallocx(void) } static void -malloc_sdallocx(void) -{ +malloc_sdallocx(void) { void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); @@ -92,25 +87,20 @@ malloc_sdallocx(void) sdallocx(p, 1, 0); } -TEST_BEGIN(test_free_vs_dallocx) -{ - +TEST_BEGIN(test_free_vs_dallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "free", malloc_free, "dallocx", malloc_dallocx); } TEST_END -TEST_BEGIN(test_dallocx_vs_sdallocx) -{ - +TEST_BEGIN(test_dallocx_vs_sdallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "dallocx", malloc_dallocx, "sdallocx", malloc_sdallocx); } TEST_END static void -malloc_mus_free(void) -{ +malloc_mus_free(void) { void *p; p = malloc(1); @@ -123,8 +113,7 @@ malloc_mus_free(void) } static void -malloc_sallocx_free(void) -{ +malloc_sallocx_free(void) { void *p; p = malloc(1); @@ -132,22 +121,20 @@ malloc_sallocx_free(void) test_fail("Unexpected malloc() failure"); return; } - if (sallocx(p, 0) < 1) + if (sallocx(p, 0) < 1) { test_fail("Unexpected sallocx() failure"); + } free(p); } -TEST_BEGIN(test_mus_vs_sallocx) -{ - +TEST_BEGIN(test_mus_vs_sallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "malloc_usable_size", malloc_mus_free, "sallocx", malloc_sallocx_free); } TEST_END static void -malloc_nallocx_free(void) -{ +malloc_nallocx_free(void) { void *p; p = malloc(1); @@ -155,27 +142,24 @@ malloc_nallocx_free(void) test_fail("Unexpected malloc() failure"); return; } - if (nallocx(1, 0) < 1) + if (nallocx(1, 0) < 1) { test_fail("Unexpected nallocx() failure"); + } free(p); } -TEST_BEGIN(test_sallocx_vs_nallocx) -{ - +TEST_BEGIN(test_sallocx_vs_nallocx) { compare_funcs(10*1000*1000, 100*1000*1000, "sallocx", malloc_sallocx_free, "nallocx", malloc_nallocx_free); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test_no_reentrancy( test_malloc_vs_mallocx, test_free_vs_dallocx, test_dallocx_vs_sdallocx, test_mus_vs_sallocx, - test_sallocx_vs_nallocx)); + test_sallocx_vs_nallocx); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/test.sh.in b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/test.sh.in new file mode 100644 index 0000000..39302ff --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/test.sh.in @@ -0,0 +1,80 @@ +#!/bin/sh + +case @abi@ in + macho) + export DYLD_FALLBACK_LIBRARY_PATH="@objroot@lib" + ;; + pecoff) + export PATH="${PATH}:@objroot@lib" + ;; + *) + ;; +esac + +# Make a copy of the @JEMALLOC_CPREFIX@MALLOC_CONF passed in to this script, so +# it can be repeatedly concatenated with per test settings. +export MALLOC_CONF_ALL=${@JEMALLOC_CPREFIX@MALLOC_CONF} +# Concatenate the individual test's MALLOC_CONF and MALLOC_CONF_ALL. +export_malloc_conf() { + if [ "x${MALLOC_CONF}" != "x" -a "x${MALLOC_CONF_ALL}" != "x" ] ; then + export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF},${MALLOC_CONF_ALL}" + else + export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF}${MALLOC_CONF_ALL}" + fi +} + +# Corresponds to test_status_t. +pass_code=0 +skip_code=1 +fail_code=2 + +pass_count=0 +skip_count=0 +fail_count=0 +for t in $@; do + if [ $pass_count -ne 0 -o $skip_count -ne 0 -o $fail_count != 0 ] ; then + echo + fi + echo "=== ${t} ===" + if [ -e "@srcroot@${t}.sh" ] ; then + # Source the shell script corresponding to the test in a subshell and + # execute the test. This allows the shell script to set MALLOC_CONF, which + # is then used to set @JEMALLOC_CPREFIX@MALLOC_CONF (thus allowing the + # per test shell script to ignore the @JEMALLOC_CPREFIX@ detail). + enable_fill=@enable_fill@ \ + enable_prof=@enable_prof@ \ + . @srcroot@${t}.sh && \ + export_malloc_conf && \ + $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@ + else + export MALLOC_CONF= && \ + export_malloc_conf && \ + $JEMALLOC_TEST_PREFIX ${t}@exe@ @abs_srcroot@ @abs_objroot@ + fi + result_code=$? + case ${result_code} in + ${pass_code}) + pass_count=$((pass_count+1)) + ;; + ${skip_code}) + skip_count=$((skip_count+1)) + ;; + ${fail_code}) + fail_count=$((fail_count+1)) + ;; + *) + echo "Test harness error: ${t} w/ MALLOC_CONF=\"${MALLOC_CONF}\"" 1>&2 + echo "Use prefix to debug, e.g. JEMALLOC_TEST_PREFIX=\"gdb --args\" sh test/test.sh ${t}" 1>&2 + exit 1 + esac +done + +total_count=`expr ${pass_count} + ${skip_count} + ${fail_count}` +echo +echo "Test suite summary: pass: ${pass_count}/${total_count}, skip: ${skip_count}/${total_count}, fail: ${fail_count}/${total_count}" + +if [ ${fail_count} -eq 0 ] ; then + exit 0 +else + exit 1 +fi diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/SFMT.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/SFMT.c similarity index 99% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/SFMT.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/SFMT.c index ba4be87..1fc8cf1 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/SFMT.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/SFMT.c @@ -35,10 +35,10 @@ */ #include "test/jemalloc_test.h" -#define BLOCK_SIZE 10000 -#define BLOCK_SIZE64 (BLOCK_SIZE / 2) -#define COUNT_1 1000 -#define COUNT_2 700 +#define BLOCK_SIZE 10000 +#define BLOCK_SIZE64 (BLOCK_SIZE / 2) +#define COUNT_1 1000 +#define COUNT_2 700 static const uint32_t init_gen_rand_32_expected[] = { 3440181298U, 1564997079U, 1510669302U, 2930277156U, 1452439940U, @@ -1449,8 +1449,7 @@ static const uint64_t init_by_array_64_expected[] = { KQU(15570163926716513029), KQU(13356980519185762498) }; -TEST_BEGIN(test_gen_rand_32) -{ +TEST_BEGIN(test_gen_rand_32) { uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); int i; @@ -1484,8 +1483,7 @@ TEST_BEGIN(test_gen_rand_32) } TEST_END -TEST_BEGIN(test_by_array_32) -{ +TEST_BEGIN(test_by_array_32) { uint32_t array32[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); uint32_t array32_2[BLOCK_SIZE] JEMALLOC_ATTR(aligned(16)); int i; @@ -1520,8 +1518,7 @@ TEST_BEGIN(test_by_array_32) } TEST_END -TEST_BEGIN(test_gen_rand_64) -{ +TEST_BEGIN(test_gen_rand_64) { uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); int i; @@ -1556,8 +1553,7 @@ TEST_BEGIN(test_gen_rand_64) } TEST_END -TEST_BEGIN(test_by_array_64) -{ +TEST_BEGIN(test_by_array_64) { uint64_t array64[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); uint64_t array64_2[BLOCK_SIZE64] JEMALLOC_ATTR(aligned(16)); int i; @@ -1594,12 +1590,10 @@ TEST_BEGIN(test_by_array_64) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_gen_rand_32, test_by_array_32, test_gen_rand_64, - test_by_array_64)); + test_by_array_64); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/a0.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/a0.c new file mode 100644 index 0000000..a27ab3f --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/a0.c @@ -0,0 +1,16 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_a0) { + void *p; + + p = a0malloc(1); + assert_ptr_not_null(p, "Unexpected a0malloc() error"); + a0dalloc(p); +} +TEST_END + +int +main(void) { + return test_no_malloc_init( + test_a0); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/arena_reset.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/arena_reset.c new file mode 100644 index 0000000..f5fb24d --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/arena_reset.c @@ -0,0 +1,344 @@ +#ifndef ARENA_RESET_PROF_C_ +#include "test/jemalloc_test.h" +#endif + +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/rtree.h" + +#include "test/extent_hooks.h" + +static unsigned +get_nsizes_impl(const char *cmd) { + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return ret; +} + +static unsigned +get_nsmall(void) { + return get_nsizes_impl("arenas.nbins"); +} + +static unsigned +get_nlarge(void) { + return get_nsizes_impl("arenas.nlextents"); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) { + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return ret; +} + +static size_t +get_small_size(size_t ind) { + return get_size_impl("arenas.bin.0.size", ind); +} + +static size_t +get_large_size(size_t ind) { + return get_size_impl("arenas.lextent.0.size", ind); +} + +/* Like ivsalloc(), but safe to call on discarded allocations. */ +static size_t +vsalloc(tsdn_t *tsdn, const void *ptr) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *extent; + szind_t szind; + if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, false, &extent, &szind)) { + return 0; + } + + if (extent == NULL) { + return 0; + } + if (extent_state_get(extent) != extent_state_active) { + return 0; + } + + if (szind == NSIZES) { + return 0; + } + + return sz_index2size(szind); +} + +static unsigned +do_arena_create(extent_hooks_t *h) { + unsigned arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, + (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, + "Unexpected mallctl() failure"); + return arena_ind; +} + +static void +do_arena_reset_pre(unsigned arena_ind, void ***ptrs, unsigned *nptrs) { +#define NLARGE 32 + unsigned nsmall, nlarge, i; + size_t sz; + int flags; + tsdn_t *tsdn; + + flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + + nsmall = get_nsmall(); + nlarge = get_nlarge() > NLARGE ? NLARGE : get_nlarge(); + *nptrs = nsmall + nlarge; + *ptrs = (void **)malloc(*nptrs * sizeof(void *)); + assert_ptr_not_null(*ptrs, "Unexpected malloc() failure"); + + /* Allocate objects with a wide range of sizes. */ + for (i = 0; i < nsmall; i++) { + sz = get_small_size(i); + (*ptrs)[i] = mallocx(sz, flags); + assert_ptr_not_null((*ptrs)[i], + "Unexpected mallocx(%zu, %#x) failure", sz, flags); + } + for (i = 0; i < nlarge; i++) { + sz = get_large_size(i); + (*ptrs)[nsmall + i] = mallocx(sz, flags); + assert_ptr_not_null((*ptrs)[i], + "Unexpected mallocx(%zu, %#x) failure", sz, flags); + } + + tsdn = tsdn_fetch(); + + /* Verify allocations. */ + for (i = 0; i < *nptrs; i++) { + assert_zu_gt(ivsalloc(tsdn, (*ptrs)[i]), 0, + "Allocation should have queryable size"); + } +} + +static void +do_arena_reset_post(void **ptrs, unsigned nptrs, unsigned arena_ind) { + tsdn_t *tsdn; + unsigned i; + + tsdn = tsdn_fetch(); + + if (have_background_thread) { + malloc_mutex_lock(tsdn, + &background_thread_info[arena_ind % ncpus].mtx); + } + /* Verify allocations no longer exist. */ + for (i = 0; i < nptrs; i++) { + assert_zu_eq(vsalloc(tsdn, ptrs[i]), 0, + "Allocation should no longer exist"); + } + if (have_background_thread) { + malloc_mutex_unlock(tsdn, + &background_thread_info[arena_ind % ncpus].mtx); + } + + free(ptrs); +} + +static void +do_arena_reset_destroy(const char *name, unsigned arena_ind) { + size_t mib[3]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib(name, mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +static void +do_arena_reset(unsigned arena_ind) { + do_arena_reset_destroy("arena.0.reset", arena_ind); +} + +static void +do_arena_destroy(unsigned arena_ind) { + do_arena_reset_destroy("arena.0.destroy", arena_ind); +} + +TEST_BEGIN(test_arena_reset) { + unsigned arena_ind; + void **ptrs; + unsigned nptrs; + + arena_ind = do_arena_create(NULL); + do_arena_reset_pre(arena_ind, &ptrs, &nptrs); + do_arena_reset(arena_ind); + do_arena_reset_post(ptrs, nptrs, arena_ind); +} +TEST_END + +static bool +arena_i_initialized(unsigned arena_ind, bool refresh) { + bool initialized; + size_t mib[3]; + size_t miblen, sz; + + if (refresh) { + uint64_t epoch = 1; + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(epoch)), 0, "Unexpected mallctl() failure"); + } + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + sz = sizeof(initialized); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL, + 0), 0, "Unexpected mallctlbymib() failure"); + + return initialized; +} + +TEST_BEGIN(test_arena_destroy_initial) { + assert_false(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), + "Destroyed arena stats should not be initialized"); +} +TEST_END + +TEST_BEGIN(test_arena_destroy_hooks_default) { + unsigned arena_ind, arena_ind_another, arena_ind_prev; + void **ptrs; + unsigned nptrs; + + arena_ind = do_arena_create(NULL); + do_arena_reset_pre(arena_ind, &ptrs, &nptrs); + + assert_false(arena_i_initialized(arena_ind, false), + "Arena stats should not be initialized"); + assert_true(arena_i_initialized(arena_ind, true), + "Arena stats should be initialized"); + + /* + * Create another arena before destroying one, to better verify arena + * index reuse. + */ + arena_ind_another = do_arena_create(NULL); + + do_arena_destroy(arena_ind); + + assert_false(arena_i_initialized(arena_ind, true), + "Arena stats should not be initialized"); + assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), + "Destroyed arena stats should be initialized"); + + do_arena_reset_post(ptrs, nptrs, arena_ind); + + arena_ind_prev = arena_ind; + arena_ind = do_arena_create(NULL); + do_arena_reset_pre(arena_ind, &ptrs, &nptrs); + assert_u_eq(arena_ind, arena_ind_prev, + "Arena index should have been recycled"); + do_arena_destroy(arena_ind); + do_arena_reset_post(ptrs, nptrs, arena_ind); + + do_arena_destroy(arena_ind_another); +} +TEST_END + +/* + * Actually unmap extents, regardless of opt_retain, so that attempts to access + * a destroyed arena's memory will segfault. + */ +static bool +extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + TRACE_HOOK("%s(extent_hooks=%p, addr=%p, size=%zu, committed=%s, " + "arena_ind=%u)\n", __func__, extent_hooks, addr, size, committed ? + "true" : "false", arena_ind); + assert_ptr_eq(extent_hooks, &hooks, + "extent_hooks should be same as pointer used to set hooks"); + assert_ptr_eq(extent_hooks->dalloc, extent_dalloc_unmap, + "Wrong hook function"); + called_dalloc = true; + if (!try_dalloc) { + return true; + } + pages_unmap(addr, size); + did_dalloc = true; + return false; +} + +static extent_hooks_t hooks_orig; + +static extent_hooks_t hooks_unmap = { + extent_alloc_hook, + extent_dalloc_unmap, /* dalloc */ + extent_destroy_hook, + extent_commit_hook, + extent_decommit_hook, + extent_purge_lazy_hook, + extent_purge_forced_hook, + extent_split_hook, + extent_merge_hook +}; + +TEST_BEGIN(test_arena_destroy_hooks_unmap) { + unsigned arena_ind; + void **ptrs; + unsigned nptrs; + + extent_hooks_prep(); + try_decommit = false; + memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); + memcpy(&hooks, &hooks_unmap, sizeof(extent_hooks_t)); + + did_alloc = false; + arena_ind = do_arena_create(&hooks); + do_arena_reset_pre(arena_ind, &ptrs, &nptrs); + + assert_true(did_alloc, "Expected alloc"); + + assert_false(arena_i_initialized(arena_ind, false), + "Arena stats should not be initialized"); + assert_true(arena_i_initialized(arena_ind, true), + "Arena stats should be initialized"); + + did_dalloc = false; + do_arena_destroy(arena_ind); + assert_true(did_dalloc, "Expected dalloc"); + + assert_false(arena_i_initialized(arena_ind, true), + "Arena stats should not be initialized"); + assert_true(arena_i_initialized(MALLCTL_ARENAS_DESTROYED, false), + "Destroyed arena stats should be initialized"); + + do_arena_reset_post(ptrs, nptrs, arena_ind); + + memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); +} +TEST_END + +int +main(void) { + return test( + test_arena_reset, + test_arena_destroy_initial, + test_arena_destroy_hooks_default, + test_arena_destroy_hooks_unmap); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/arena_reset_prof.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/arena_reset_prof.c new file mode 100644 index 0000000..38d8012 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/arena_reset_prof.c @@ -0,0 +1,4 @@ +#include "test/jemalloc_test.h" +#define ARENA_RESET_PROF_C_ + +#include "arena_reset.c" diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/arena_reset_prof.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/arena_reset_prof.sh new file mode 100644 index 0000000..041dc1c --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/arena_reset_prof.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +export MALLOC_CONF="prof:true,lg_prof_sample:0" diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/atomic.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/atomic.c new file mode 100644 index 0000000..572d8d2 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/atomic.c @@ -0,0 +1,229 @@ +#include "test/jemalloc_test.h" + +/* + * We *almost* have consistent short names (e.g. "u32" for uint32_t, "b" for + * bool, etc. The one exception is that the short name for void * is "p" in + * some places and "ptr" in others. In the long run it would be nice to unify + * these, but in the short run we'll use this shim. + */ +#define assert_p_eq assert_ptr_eq + +/* + * t: the non-atomic type, like "uint32_t". + * ta: the short name for the type, like "u32". + * val[1,2,3]: Values of the given type. The CAS tests use val2 for expected, + * and val3 for desired. + */ + +#define DO_TESTS(t, ta, val1, val2, val3) do { \ + t val; \ + t expected; \ + bool success; \ + /* This (along with the load below) also tests ATOMIC_LOAD. */ \ + atomic_##ta##_t atom = ATOMIC_INIT(val1); \ + \ + /* ATOMIC_INIT and load. */ \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, "Load or init failed"); \ + \ + /* Store. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + atomic_store_##ta(&atom, val2, ATOMIC_RELAXED); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val2, val, "Store failed"); \ + \ + /* Exchange. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_exchange_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, "Exchange returned invalid value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val2, val, "Exchange store invalid value"); \ + \ + /* \ + * Weak CAS. Spurious failures are allowed, so we loop a few \ + * times. \ + */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + success = false; \ + for (int i = 0; i < 10 && !success; i++) { \ + expected = val2; \ + success = atomic_compare_exchange_weak_##ta(&atom, \ + &expected, val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, expected, \ + "CAS should update expected"); \ + } \ + assert_b_eq(val1 == val2, success, \ + "Weak CAS did the wrong state update"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + if (success) { \ + assert_##ta##_eq(val3, val, \ + "Successful CAS should update atomic"); \ + } else { \ + assert_##ta##_eq(val1, val, \ + "Unsuccessful CAS should not update atomic"); \ + } \ + \ + /* Strong CAS. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + expected = val2; \ + success = atomic_compare_exchange_strong_##ta(&atom, &expected, \ + val3, ATOMIC_RELAXED, ATOMIC_RELAXED); \ + assert_b_eq(val1 == val2, success, \ + "Strong CAS did the wrong state update"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + if (success) { \ + assert_##ta##_eq(val3, val, \ + "Successful CAS should update atomic"); \ + } else { \ + assert_##ta##_eq(val1, val, \ + "Unsuccessful CAS should not update atomic"); \ + } \ + \ + \ +} while (0) + +#define DO_INTEGER_TESTS(t, ta, val1, val2) do { \ + atomic_##ta##_t atom; \ + t val; \ + \ + /* Fetch-add. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_add_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-add should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 + val2, val, \ + "Fetch-add should update atomic"); \ + \ + /* Fetch-sub. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_sub_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-sub should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 - val2, val, \ + "Fetch-sub should update atomic"); \ + \ + /* Fetch-and. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_and_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-and should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 & val2, val, \ + "Fetch-and should update atomic"); \ + \ + /* Fetch-or. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_or_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-or should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 | val2, val, \ + "Fetch-or should update atomic"); \ + \ + /* Fetch-xor. */ \ + atomic_store_##ta(&atom, val1, ATOMIC_RELAXED); \ + val = atomic_fetch_xor_##ta(&atom, val2, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1, val, \ + "Fetch-xor should return previous value"); \ + val = atomic_load_##ta(&atom, ATOMIC_RELAXED); \ + assert_##ta##_eq(val1 ^ val2, val, \ + "Fetch-xor should update atomic"); \ +} while (0) + +#define TEST_STRUCT(t, ta) \ +typedef struct { \ + t val1; \ + t val2; \ + t val3; \ +} ta##_test_t; + +#define TEST_CASES(t) { \ + {(t)-1, (t)-1, (t)-2}, \ + {(t)-1, (t) 0, (t)-2}, \ + {(t)-1, (t) 1, (t)-2}, \ + \ + {(t) 0, (t)-1, (t)-2}, \ + {(t) 0, (t) 0, (t)-2}, \ + {(t) 0, (t) 1, (t)-2}, \ + \ + {(t) 1, (t)-1, (t)-2}, \ + {(t) 1, (t) 0, (t)-2}, \ + {(t) 1, (t) 1, (t)-2}, \ + \ + {(t)0, (t)-(1 << 22), (t)-2}, \ + {(t)0, (t)(1 << 22), (t)-2}, \ + {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \ + {(t)(1 << 22), (t)(1 << 22), (t)-2} \ +} + +#define TEST_BODY(t, ta) do { \ + const ta##_test_t tests[] = TEST_CASES(t); \ + for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \ + ta##_test_t test = tests[i]; \ + DO_TESTS(t, ta, test.val1, test.val2, test.val3); \ + } \ +} while (0) + +#define INTEGER_TEST_BODY(t, ta) do { \ + const ta##_test_t tests[] = TEST_CASES(t); \ + for (unsigned i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { \ + ta##_test_t test = tests[i]; \ + DO_TESTS(t, ta, test.val1, test.val2, test.val3); \ + DO_INTEGER_TESTS(t, ta, test.val1, test.val2); \ + } \ +} while (0) + +TEST_STRUCT(uint64_t, u64); +TEST_BEGIN(test_atomic_u64) { +#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) + test_skip("64-bit atomic operations not supported"); +#else + INTEGER_TEST_BODY(uint64_t, u64); +#endif +} +TEST_END + + +TEST_STRUCT(uint32_t, u32); +TEST_BEGIN(test_atomic_u32) { + INTEGER_TEST_BODY(uint32_t, u32); +} +TEST_END + +TEST_STRUCT(void *, p); +TEST_BEGIN(test_atomic_p) { + TEST_BODY(void *, p); +} +TEST_END + +TEST_STRUCT(size_t, zu); +TEST_BEGIN(test_atomic_zu) { + INTEGER_TEST_BODY(size_t, zu); +} +TEST_END + +TEST_STRUCT(ssize_t, zd); +TEST_BEGIN(test_atomic_zd) { + INTEGER_TEST_BODY(ssize_t, zd); +} +TEST_END + + +TEST_STRUCT(unsigned, u); +TEST_BEGIN(test_atomic_u) { + INTEGER_TEST_BODY(unsigned, u); +} +TEST_END + +int +main(void) { + return test( + test_atomic_u64, + test_atomic_u32, + test_atomic_p, + test_atomic_zu, + test_atomic_zd, + test_atomic_u); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/background_thread.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/background_thread.c new file mode 100644 index 0000000..f7bd37c --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/background_thread.c @@ -0,0 +1,119 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/util.h" + +static void +test_switch_background_thread_ctl(bool new_val) { + bool e0, e1; + size_t sz = sizeof(bool); + + e1 = new_val; + assert_d_eq(mallctl("background_thread", (void *)&e0, &sz, + &e1, sz), 0, "Unexpected mallctl() failure"); + assert_b_eq(e0, !e1, + "background_thread should be %d before.\n", !e1); + if (e1) { + assert_zu_gt(n_background_threads, 0, + "Number of background threads should be non zero.\n"); + } else { + assert_zu_eq(n_background_threads, 0, + "Number of background threads should be zero.\n"); + } +} + +static void +test_repeat_background_thread_ctl(bool before) { + bool e0, e1; + size_t sz = sizeof(bool); + + e1 = before; + assert_d_eq(mallctl("background_thread", (void *)&e0, &sz, + &e1, sz), 0, "Unexpected mallctl() failure"); + assert_b_eq(e0, before, + "background_thread should be %d.\n", before); + if (e1) { + assert_zu_gt(n_background_threads, 0, + "Number of background threads should be non zero.\n"); + } else { + assert_zu_eq(n_background_threads, 0, + "Number of background threads should be zero.\n"); + } +} + +TEST_BEGIN(test_background_thread_ctl) { + test_skip_if(!have_background_thread); + + bool e0, e1; + size_t sz = sizeof(bool); + + assert_d_eq(mallctl("opt.background_thread", (void *)&e0, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("background_thread", (void *)&e1, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + assert_b_eq(e0, e1, + "Default and opt.background_thread does not match.\n"); + if (e0) { + test_switch_background_thread_ctl(false); + } + assert_zu_eq(n_background_threads, 0, + "Number of background threads should be 0.\n"); + + for (unsigned i = 0; i < 4; i++) { + test_switch_background_thread_ctl(true); + test_repeat_background_thread_ctl(true); + test_repeat_background_thread_ctl(true); + + test_switch_background_thread_ctl(false); + test_repeat_background_thread_ctl(false); + test_repeat_background_thread_ctl(false); + } +} +TEST_END + +TEST_BEGIN(test_background_thread_running) { + test_skip_if(!have_background_thread); + test_skip_if(!config_stats); + +#if defined(JEMALLOC_BACKGROUND_THREAD) + tsd_t *tsd = tsd_fetch(); + background_thread_info_t *info = &background_thread_info[0]; + + test_repeat_background_thread_ctl(false); + test_switch_background_thread_ctl(true); + assert_b_eq(info->state, background_thread_started, + "Background_thread did not start.\n"); + + nstime_t start, now; + nstime_init(&start, 0); + nstime_update(&start); + + bool ran = false; + while (true) { + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + if (info->tot_n_runs > 0) { + ran = true; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + if (ran) { + break; + } + + nstime_init(&now, 0); + nstime_update(&now); + nstime_subtract(&now, &start); + assert_u64_lt(nstime_sec(&now), 1000, + "Background threads did not run for 1000 seconds."); + sleep(1); + } + test_switch_background_thread_ctl(false); +#endif +} +TEST_END + +int +main(void) { + /* Background_thread creation tests reentrancy naturally. */ + return test_no_reentrancy( + test_background_thread_ctl, + test_background_thread_running); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/background_thread_enable.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/background_thread_enable.c new file mode 100644 index 0000000..ff95e67 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/background_thread_enable.c @@ -0,0 +1,83 @@ +#include "test/jemalloc_test.h" + +const char *malloc_conf = "background_thread:false,narenas:1,max_background_threads:20"; + +TEST_BEGIN(test_deferred) { + test_skip_if(!have_background_thread); + + unsigned id; + size_t sz_u = sizeof(unsigned); + + /* + * 10 here is somewhat arbitrary, except insofar as we want to ensure + * that the number of background threads is smaller than the number of + * arenas. I'll ragequit long before we have to spin up 10 threads per + * cpu to handle background purging, so this is a conservative + * approximation. + */ + for (unsigned i = 0; i < 10 * ncpus; i++) { + assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0, + "Failed to create arena"); + } + + bool enable = true; + size_t sz_b = sizeof(bool); + assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, + "Failed to enable background threads"); + enable = false; + assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, + "Failed to disable background threads"); +} +TEST_END + +TEST_BEGIN(test_max_background_threads) { + test_skip_if(!have_background_thread); + + size_t maxt; + size_t opt_maxt; + size_t sz_m = sizeof(maxt); + assert_d_eq(mallctl("opt.max_background_threads", + &opt_maxt, &sz_m, NULL, 0), 0, + "Failed to get opt.max_background_threads"); + assert_d_eq(mallctl("max_background_threads", &maxt, &sz_m, NULL, 0), 0, + "Failed to get max background threads"); + assert_zu_eq(20, maxt, "should be ncpus"); + assert_zu_eq(opt_maxt, maxt, + "max_background_threads and " + "opt.max_background_threads should match"); + assert_d_eq(mallctl("max_background_threads", NULL, NULL, &maxt, sz_m), + 0, "Failed to set max background threads"); + + unsigned id; + size_t sz_u = sizeof(unsigned); + + for (unsigned i = 0; i < 10 * ncpus; i++) { + assert_d_eq(mallctl("arenas.create", &id, &sz_u, NULL, 0), 0, + "Failed to create arena"); + } + + bool enable = true; + size_t sz_b = sizeof(bool); + assert_d_eq(mallctl("background_thread", NULL, NULL, &enable, sz_b), 0, + "Failed to enable background threads"); + assert_zu_eq(n_background_threads, maxt, + "Number of background threads should be 3.\n"); + maxt = 10; + assert_d_eq(mallctl("max_background_threads", NULL, NULL, &maxt, sz_m), + 0, "Failed to set max background threads"); + assert_zu_eq(n_background_threads, maxt, + "Number of background threads should be 10.\n"); + maxt = 3; + assert_d_eq(mallctl("max_background_threads", NULL, NULL, &maxt, sz_m), + 0, "Failed to set max background threads"); + assert_zu_eq(n_background_threads, maxt, + "Number of background threads should be 3.\n"); +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_deferred, + test_max_background_threads); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/base.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/base.c new file mode 100644 index 0000000..6b792cf --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/base.c @@ -0,0 +1,234 @@ +#include "test/jemalloc_test.h" + +#include "test/extent_hooks.h" + +static extent_hooks_t hooks_null = { + extent_alloc_hook, + NULL, /* dalloc */ + NULL, /* destroy */ + NULL, /* commit */ + NULL, /* decommit */ + NULL, /* purge_lazy */ + NULL, /* purge_forced */ + NULL, /* split */ + NULL /* merge */ +}; + +static extent_hooks_t hooks_not_null = { + extent_alloc_hook, + extent_dalloc_hook, + extent_destroy_hook, + NULL, /* commit */ + extent_decommit_hook, + extent_purge_lazy_hook, + extent_purge_forced_hook, + NULL, /* split */ + NULL /* merge */ +}; + +TEST_BEGIN(test_base_hooks_default) { + base_t *base; + size_t allocated0, allocated1, resident, mapped, n_thp; + + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); + base = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); + + if (config_stats) { + base_stats_get(tsdn, base, &allocated0, &resident, &mapped, + &n_thp); + assert_zu_ge(allocated0, sizeof(base_t), + "Base header should count as allocated"); + if (opt_metadata_thp == metadata_thp_always) { + assert_zu_gt(n_thp, 0, + "Base should have 1 THP at least."); + } + } + + assert_ptr_not_null(base_alloc(tsdn, base, 42, 1), + "Unexpected base_alloc() failure"); + + if (config_stats) { + base_stats_get(tsdn, base, &allocated1, &resident, &mapped, + &n_thp); + assert_zu_ge(allocated1 - allocated0, 42, + "At least 42 bytes were allocated by base_alloc()"); + } + + base_delete(tsdn, base); +} +TEST_END + +TEST_BEGIN(test_base_hooks_null) { + extent_hooks_t hooks_orig; + base_t *base; + size_t allocated0, allocated1, resident, mapped, n_thp; + + extent_hooks_prep(); + try_dalloc = false; + try_destroy = true; + try_decommit = false; + try_purge_lazy = false; + try_purge_forced = false; + memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); + memcpy(&hooks, &hooks_null, sizeof(extent_hooks_t)); + + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); + base = base_new(tsdn, 0, &hooks); + assert_ptr_not_null(base, "Unexpected base_new() failure"); + + if (config_stats) { + base_stats_get(tsdn, base, &allocated0, &resident, &mapped, + &n_thp); + assert_zu_ge(allocated0, sizeof(base_t), + "Base header should count as allocated"); + if (opt_metadata_thp == metadata_thp_always) { + assert_zu_gt(n_thp, 0, + "Base should have 1 THP at least."); + } + } + + assert_ptr_not_null(base_alloc(tsdn, base, 42, 1), + "Unexpected base_alloc() failure"); + + if (config_stats) { + base_stats_get(tsdn, base, &allocated1, &resident, &mapped, + &n_thp); + assert_zu_ge(allocated1 - allocated0, 42, + "At least 42 bytes were allocated by base_alloc()"); + } + + base_delete(tsdn, base); + + memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); +} +TEST_END + +TEST_BEGIN(test_base_hooks_not_null) { + extent_hooks_t hooks_orig; + base_t *base; + void *p, *q, *r, *r_exp; + + extent_hooks_prep(); + try_dalloc = false; + try_destroy = true; + try_decommit = false; + try_purge_lazy = false; + try_purge_forced = false; + memcpy(&hooks_orig, &hooks, sizeof(extent_hooks_t)); + memcpy(&hooks, &hooks_not_null, sizeof(extent_hooks_t)); + + tsdn_t *tsdn = tsd_tsdn(tsd_fetch()); + did_alloc = false; + base = base_new(tsdn, 0, &hooks); + assert_ptr_not_null(base, "Unexpected base_new() failure"); + assert_true(did_alloc, "Expected alloc"); + + /* + * Check for tight packing at specified alignment under simple + * conditions. + */ + { + const size_t alignments[] = { + 1, + QUANTUM, + QUANTUM << 1, + CACHELINE, + CACHELINE << 1, + }; + unsigned i; + + for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) { + size_t alignment = alignments[i]; + size_t align_ceil = ALIGNMENT_CEILING(alignment, + QUANTUM); + p = base_alloc(tsdn, base, 1, alignment); + assert_ptr_not_null(p, + "Unexpected base_alloc() failure"); + assert_ptr_eq(p, + (void *)(ALIGNMENT_CEILING((uintptr_t)p, + alignment)), "Expected quantum alignment"); + q = base_alloc(tsdn, base, alignment, alignment); + assert_ptr_not_null(q, + "Unexpected base_alloc() failure"); + assert_ptr_eq((void *)((uintptr_t)p + align_ceil), q, + "Minimal allocation should take up %zu bytes", + align_ceil); + r = base_alloc(tsdn, base, 1, alignment); + assert_ptr_not_null(r, + "Unexpected base_alloc() failure"); + assert_ptr_eq((void *)((uintptr_t)q + align_ceil), r, + "Minimal allocation should take up %zu bytes", + align_ceil); + } + } + + /* + * Allocate an object that cannot fit in the first block, then verify + * that the first block's remaining space is considered for subsequent + * allocation. + */ + assert_zu_ge(extent_bsize_get(&base->blocks->extent), QUANTUM, + "Remainder insufficient for test"); + /* Use up all but one quantum of block. */ + while (extent_bsize_get(&base->blocks->extent) > QUANTUM) { + p = base_alloc(tsdn, base, QUANTUM, QUANTUM); + assert_ptr_not_null(p, "Unexpected base_alloc() failure"); + } + r_exp = extent_addr_get(&base->blocks->extent); + assert_zu_eq(base->extent_sn_next, 1, "One extant block expected"); + q = base_alloc(tsdn, base, QUANTUM + 1, QUANTUM); + assert_ptr_not_null(q, "Unexpected base_alloc() failure"); + assert_ptr_ne(q, r_exp, "Expected allocation from new block"); + assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected"); + r = base_alloc(tsdn, base, QUANTUM, QUANTUM); + assert_ptr_not_null(r, "Unexpected base_alloc() failure"); + assert_ptr_eq(r, r_exp, "Expected allocation from first block"); + assert_zu_eq(base->extent_sn_next, 2, "Two extant blocks expected"); + + /* + * Check for proper alignment support when normal blocks are too small. + */ + { + const size_t alignments[] = { + HUGEPAGE, + HUGEPAGE << 1 + }; + unsigned i; + + for (i = 0; i < sizeof(alignments) / sizeof(size_t); i++) { + size_t alignment = alignments[i]; + p = base_alloc(tsdn, base, QUANTUM, alignment); + assert_ptr_not_null(p, + "Unexpected base_alloc() failure"); + assert_ptr_eq(p, + (void *)(ALIGNMENT_CEILING((uintptr_t)p, + alignment)), "Expected %zu-byte alignment", + alignment); + } + } + + called_dalloc = called_destroy = called_decommit = called_purge_lazy = + called_purge_forced = false; + base_delete(tsdn, base); + assert_true(called_dalloc, "Expected dalloc call"); + assert_true(!called_destroy, "Unexpected destroy call"); + assert_true(called_decommit, "Expected decommit call"); + assert_true(called_purge_lazy, "Expected purge_lazy call"); + assert_true(called_purge_forced, "Expected purge_forced call"); + + try_dalloc = true; + try_destroy = true; + try_decommit = true; + try_purge_lazy = true; + try_purge_forced = true; + memcpy(&hooks, &hooks_orig, sizeof(extent_hooks_t)); +} +TEST_END + +int +main(void) { + return test( + test_base_hooks_default, + test_base_hooks_null, + test_base_hooks_not_null); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/bit_util.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/bit_util.c new file mode 100644 index 0000000..42a9701 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/bit_util.c @@ -0,0 +1,57 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/bit_util.h" + +#define TEST_POW2_CEIL(t, suf, pri) do { \ + unsigned i, pow2; \ + t x; \ + \ + assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \ + \ + for (i = 0; i < sizeof(t) * 8; i++) { \ + assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \ + << i, "Unexpected result"); \ + } \ + \ + for (i = 2; i < sizeof(t) * 8; i++) { \ + assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \ + ((t)1) << i, "Unexpected result"); \ + } \ + \ + for (i = 0; i < sizeof(t) * 8 - 1; i++) { \ + assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \ + ((t)1) << (i+1), "Unexpected result"); \ + } \ + \ + for (pow2 = 1; pow2 < 25; pow2++) { \ + for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \ + x++) { \ + assert_##suf##_eq(pow2_ceil_##suf(x), \ + ((t)1) << pow2, \ + "Unexpected result, x=%"pri, x); \ + } \ + } \ +} while (0) + +TEST_BEGIN(test_pow2_ceil_u64) { + TEST_POW2_CEIL(uint64_t, u64, FMTu64); +} +TEST_END + +TEST_BEGIN(test_pow2_ceil_u32) { + TEST_POW2_CEIL(uint32_t, u32, FMTu32); +} +TEST_END + +TEST_BEGIN(test_pow2_ceil_zu) { + TEST_POW2_CEIL(size_t, zu, "zu"); +} +TEST_END + +int +main(void) { + return test( + test_pow2_ceil_u64, + test_pow2_ceil_u32, + test_pow2_ceil_zu); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/bitmap.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/bitmap.c new file mode 100644 index 0000000..cafb203 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/bitmap.c @@ -0,0 +1,431 @@ +#include "test/jemalloc_test.h" + +#define NBITS_TAB \ + NB( 1) \ + NB( 2) \ + NB( 3) \ + NB( 4) \ + NB( 5) \ + NB( 6) \ + NB( 7) \ + NB( 8) \ + NB( 9) \ + NB(10) \ + NB(11) \ + NB(12) \ + NB(13) \ + NB(14) \ + NB(15) \ + NB(16) \ + NB(17) \ + NB(18) \ + NB(19) \ + NB(20) \ + NB(21) \ + NB(22) \ + NB(23) \ + NB(24) \ + NB(25) \ + NB(26) \ + NB(27) \ + NB(28) \ + NB(29) \ + NB(30) \ + NB(31) \ + NB(32) \ + \ + NB(33) \ + NB(34) \ + NB(35) \ + NB(36) \ + NB(37) \ + NB(38) \ + NB(39) \ + NB(40) \ + NB(41) \ + NB(42) \ + NB(43) \ + NB(44) \ + NB(45) \ + NB(46) \ + NB(47) \ + NB(48) \ + NB(49) \ + NB(50) \ + NB(51) \ + NB(52) \ + NB(53) \ + NB(54) \ + NB(55) \ + NB(56) \ + NB(57) \ + NB(58) \ + NB(59) \ + NB(60) \ + NB(61) \ + NB(62) \ + NB(63) \ + NB(64) \ + NB(65) \ + \ + NB(126) \ + NB(127) \ + NB(128) \ + NB(129) \ + NB(130) \ + \ + NB(254) \ + NB(255) \ + NB(256) \ + NB(257) \ + NB(258) \ + \ + NB(510) \ + NB(511) \ + NB(512) \ + NB(513) \ + NB(514) \ + \ + NB(1024) \ + NB(2048) \ + NB(4096) \ + NB(8192) \ + NB(16384) \ + +static void +test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits) { + bitmap_info_t binfo_dyn; + bitmap_info_init(&binfo_dyn, nbits); + + assert_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn), + "Unexpected difference between static and dynamic initialization, " + "nbits=%zu", nbits); + assert_zu_eq(binfo->nbits, binfo_dyn.nbits, + "Unexpected difference between static and dynamic initialization, " + "nbits=%zu", nbits); +#ifdef BITMAP_USE_TREE + assert_u_eq(binfo->nlevels, binfo_dyn.nlevels, + "Unexpected difference between static and dynamic initialization, " + "nbits=%zu", nbits); + { + unsigned i; + + for (i = 0; i < binfo->nlevels; i++) { + assert_zu_eq(binfo->levels[i].group_offset, + binfo_dyn.levels[i].group_offset, + "Unexpected difference between static and dynamic " + "initialization, nbits=%zu, level=%u", nbits, i); + } + } +#else + assert_zu_eq(binfo->ngroups, binfo_dyn.ngroups, + "Unexpected difference between static and dynamic initialization"); +#endif +} + +TEST_BEGIN(test_bitmap_initializer) { +#define NB(nbits) { \ + if (nbits <= BITMAP_MAXBITS) { \ + bitmap_info_t binfo = \ + BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_initializer_body(&binfo, nbits); \ + } \ + } + NBITS_TAB +#undef NB +} +TEST_END + +static size_t +test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits, + size_t prev_size) { + size_t size = bitmap_size(binfo); + assert_zu_ge(size, (nbits >> 3), + "Bitmap size is smaller than expected"); + assert_zu_ge(size, prev_size, "Bitmap size is smaller than expected"); + return size; +} + +TEST_BEGIN(test_bitmap_size) { + size_t nbits, prev_size; + + prev_size = 0; + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, nbits); + prev_size = test_bitmap_size_body(&binfo, nbits, prev_size); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + prev_size = test_bitmap_size_body(&binfo, nbits, \ + prev_size); \ + } + prev_size = 0; + NBITS_TAB +#undef NB +} +TEST_END + +static void +test_bitmap_init_body(const bitmap_info_t *binfo, size_t nbits) { + size_t i; + bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); + assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); + + bitmap_init(bitmap, binfo, false); + for (i = 0; i < nbits; i++) { + assert_false(bitmap_get(bitmap, binfo, i), + "Bit should be unset"); + } + + bitmap_init(bitmap, binfo, true); + for (i = 0; i < nbits; i++) { + assert_true(bitmap_get(bitmap, binfo, i), "Bit should be set"); + } + + free(bitmap); +} + +TEST_BEGIN(test_bitmap_init) { + size_t nbits; + + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, nbits); + test_bitmap_init_body(&binfo, nbits); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_init_body(&binfo, nbits); \ + } + NBITS_TAB +#undef NB +} +TEST_END + +static void +test_bitmap_set_body(const bitmap_info_t *binfo, size_t nbits) { + size_t i; + bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); + assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); + bitmap_init(bitmap, binfo, false); + + for (i = 0; i < nbits; i++) { + bitmap_set(bitmap, binfo, i); + } + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + free(bitmap); +} + +TEST_BEGIN(test_bitmap_set) { + size_t nbits; + + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, nbits); + test_bitmap_set_body(&binfo, nbits); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_set_body(&binfo, nbits); \ + } + NBITS_TAB +#undef NB +} +TEST_END + +static void +test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits) { + size_t i; + bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); + assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); + bitmap_init(bitmap, binfo, false); + + for (i = 0; i < nbits; i++) { + bitmap_set(bitmap, binfo, i); + } + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + for (i = 0; i < nbits; i++) { + bitmap_unset(bitmap, binfo, i); + } + for (i = 0; i < nbits; i++) { + bitmap_set(bitmap, binfo, i); + } + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + free(bitmap); +} + +TEST_BEGIN(test_bitmap_unset) { + size_t nbits; + + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, nbits); + test_bitmap_unset_body(&binfo, nbits); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_unset_body(&binfo, nbits); \ + } + NBITS_TAB +#undef NB +} +TEST_END + +static void +test_bitmap_xfu_body(const bitmap_info_t *binfo, size_t nbits) { + bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo)); + assert_ptr_not_null(bitmap, "Unexpected malloc() failure"); + bitmap_init(bitmap, binfo, false); + + /* Iteratively set bits starting at the beginning. */ + for (size_t i = 0; i < nbits; i++) { + assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, + "First unset bit should be just after previous first unset " + "bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, + "First unset bit should be just after previous first unset " + "bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "First unset bit should be just after previous first unset " + "bit"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "First unset bit should be just after previous first unset " + "bit"); + } + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + + /* + * Iteratively unset bits starting at the end, and verify that + * bitmap_sfu() reaches the unset bits. + */ + for (size_t i = nbits - 1; i < nbits; i--) { /* (nbits..0] */ + bitmap_unset(bitmap, binfo, i); + assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, + "First unset bit should the bit previously unset"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, + "First unset bit should the bit previously unset"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "First unset bit should the bit previously unset"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "First unset bit should the bit previously unset"); + bitmap_unset(bitmap, binfo, i); + } + assert_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset"); + + /* + * Iteratively set bits starting at the beginning, and verify that + * bitmap_sfu() looks past them. + */ + for (size_t i = 1; i < nbits; i++) { + bitmap_set(bitmap, binfo, i - 1); + assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), i, + "First unset bit should be just after the bit previously " + "set"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, (i > 0) ? i-1 : i), i, + "First unset bit should be just after the bit previously " + "set"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "First unset bit should be just after the bit previously " + "set"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "First unset bit should be just after the bit previously " + "set"); + bitmap_unset(bitmap, binfo, i); + } + assert_zu_eq(bitmap_ffu(bitmap, binfo, 0), nbits - 1, + "First unset bit should be the last bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, (nbits > 1) ? nbits-2 : nbits-1), + nbits - 1, "First unset bit should be the last bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits - 1), nbits - 1, + "First unset bit should be the last bit"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits - 1, + "First unset bit should be the last bit"); + assert_true(bitmap_full(bitmap, binfo), "All bits should be set"); + + /* + * Bubble a "usu" pattern through the bitmap and verify that + * bitmap_ffu() finds the correct bit for all five min_bit cases. + */ + if (nbits >= 3) { + for (size_t i = 0; i < nbits-2; i++) { + bitmap_unset(bitmap, binfo, i); + bitmap_unset(bitmap, binfo, i+2); + if (i > 0) { + assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i, + "Unexpected first unset bit"); + } + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), i+2, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i+2), i+2, + "Unexpected first unset bit"); + if (i + 3 < nbits) { + assert_zu_eq(bitmap_ffu(bitmap, binfo, i+3), + nbits, "Unexpected first unset bit"); + } + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_sfu(bitmap, binfo), i+2, + "Unexpected first unset bit"); + } + } + + /* + * Unset the last bit, bubble another unset bit through the bitmap, and + * verify that bitmap_ffu() finds the correct bit for all four min_bit + * cases. + */ + if (nbits >= 3) { + bitmap_unset(bitmap, binfo, nbits-1); + for (size_t i = 0; i < nbits-1; i++) { + bitmap_unset(bitmap, binfo, i); + if (i > 0) { + assert_zu_eq(bitmap_ffu(bitmap, binfo, i-1), i, + "Unexpected first unset bit"); + } + assert_zu_eq(bitmap_ffu(bitmap, binfo, i), i, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, i+1), nbits-1, + "Unexpected first unset bit"); + assert_zu_eq(bitmap_ffu(bitmap, binfo, nbits-1), + nbits-1, "Unexpected first unset bit"); + + assert_zu_eq(bitmap_sfu(bitmap, binfo), i, + "Unexpected first unset bit"); + } + assert_zu_eq(bitmap_sfu(bitmap, binfo), nbits-1, + "Unexpected first unset bit"); + } + + free(bitmap); +} + +TEST_BEGIN(test_bitmap_xfu) { + size_t nbits; + + for (nbits = 1; nbits <= BITMAP_MAXBITS; nbits++) { + bitmap_info_t binfo; + bitmap_info_init(&binfo, nbits); + test_bitmap_xfu_body(&binfo, nbits); + } +#define NB(nbits) { \ + bitmap_info_t binfo = BITMAP_INFO_INITIALIZER(nbits); \ + test_bitmap_xfu_body(&binfo, nbits); \ + } + NBITS_TAB +#undef NB +} +TEST_END + +int +main(void) { + return test( + test_bitmap_initializer, + test_bitmap_size, + test_bitmap_init, + test_bitmap_set, + test_bitmap_unset, + test_bitmap_xfu); +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/ckh.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ckh.c similarity index 91% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/ckh.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ckh.c index b117595..707ea5f 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/ckh.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ckh.c @@ -1,14 +1,13 @@ #include "test/jemalloc_test.h" -TEST_BEGIN(test_new_delete) -{ +TEST_BEGIN(test_new_delete) { tsd_t *tsd; ckh_t ckh; tsd = tsd_fetch(); - assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), - "Unexpected ckh_new() error"); + assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, + ckh_string_keycomp), "Unexpected ckh_new() error"); ckh_delete(tsd, &ckh); assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash, @@ -17,8 +16,7 @@ TEST_BEGIN(test_new_delete) } TEST_END -TEST_BEGIN(test_count_insert_search_remove) -{ +TEST_BEGIN(test_count_insert_search_remove) { tsd_t *tsd; ckh_t ckh; const char *strs[] = { @@ -32,8 +30,8 @@ TEST_BEGIN(test_count_insert_search_remove) tsd = tsd_fetch(); - assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), - "Unexpected ckh_new() error"); + assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, + ckh_string_keycomp), "Unexpected ckh_new() error"); assert_zu_eq(ckh_count(&ckh), 0, "ckh_count() should return %zu, but it returned %zu", ZU(0), ckh_count(&ckh)); @@ -105,9 +103,8 @@ TEST_BEGIN(test_count_insert_search_remove) } TEST_END -TEST_BEGIN(test_insert_iter_remove) -{ -#define NITEMS ZU(1000) +TEST_BEGIN(test_insert_iter_remove) { +#define NITEMS ZU(1000) tsd_t *tsd; ckh_t ckh; void **p[NITEMS]; @@ -174,10 +171,12 @@ TEST_BEGIN(test_insert_iter_remove) } } - for (j = 0; j < i + 1; j++) + for (j = 0; j < i + 1; j++) { assert_true(seen[j], "Item %zu not seen", j); - for (; j < NITEMS; j++) + } + for (; j < NITEMS; j++) { assert_false(seen[j], "Item %zu seen", j); + } } } @@ -204,11 +203,9 @@ TEST_BEGIN(test_insert_iter_remove) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_new_delete, test_count_insert_search_remove, - test_insert_iter_remove)); + test_insert_iter_remove); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/decay.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/decay.c new file mode 100644 index 0000000..f727bf9 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/decay.c @@ -0,0 +1,599 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/ticker.h" + +static nstime_monotonic_t *nstime_monotonic_orig; +static nstime_update_t *nstime_update_orig; + +static unsigned nupdates_mock; +static nstime_t time_mock; +static bool monotonic_mock; + +static bool +check_background_thread_enabled(void) { + bool enabled; + size_t sz = sizeof(bool); + int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0); + if (ret == ENOENT) { + return false; + } + assert_d_eq(ret, 0, "Unexpected mallctl error"); + return enabled; +} + +static bool +nstime_monotonic_mock(void) { + return monotonic_mock; +} + +static bool +nstime_update_mock(nstime_t *time) { + nupdates_mock++; + if (monotonic_mock) { + nstime_copy(time, &time_mock); + } + return !monotonic_mock; +} + +static unsigned +do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) { + unsigned arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + + assert_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen), + 0, "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, + (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0, + "Unexpected mallctlbymib() failure"); + + assert_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen), + 0, "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0, + "Unexpected mallctlbymib() failure"); + + return arena_ind; +} + +static void +do_arena_destroy(unsigned arena_ind) { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +void +do_epoch(void) { + uint64_t epoch = 1; + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); +} + +void +do_purge(unsigned arena_ind) { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +void +do_decay(unsigned arena_ind) { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +static uint64_t +get_arena_npurge_impl(const char *mibname, unsigned arena_ind) { + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib(mibname, mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[2] = (size_t)arena_ind; + uint64_t npurge = 0; + size_t sz = sizeof(npurge); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0), + config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure"); + return npurge; +} + +static uint64_t +get_arena_dirty_npurge(unsigned arena_ind) { + do_epoch(); + return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind); +} + +static uint64_t +get_arena_muzzy_npurge(unsigned arena_ind) { + do_epoch(); + return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); +} + +static uint64_t +get_arena_npurge(unsigned arena_ind) { + do_epoch(); + return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) + + get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind); +} + +static size_t +get_arena_pdirty(unsigned arena_ind) { + do_epoch(); + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[2] = (size_t)arena_ind; + size_t pdirty; + size_t sz = sizeof(pdirty); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); + return pdirty; +} + +static size_t +get_arena_pmuzzy(unsigned arena_ind) { + do_epoch(); + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[2] = (size_t)arena_ind; + size_t pmuzzy; + size_t sz = sizeof(pmuzzy); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); + return pmuzzy; +} + +static void * +do_mallocx(size_t size, int flags) { + void *p = mallocx(size, flags); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + return p; +} + +static void +generate_dirty(unsigned arena_ind, size_t size) { + int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + void *p = do_mallocx(size, flags); + dallocx(p, flags); +} + +TEST_BEGIN(test_decay_ticks) { + test_skip_if(check_background_thread_enabled()); + + ticker_t *decay_ticker; + unsigned tick0, tick1, arena_ind; + size_t sz, large0; + void *p; + + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + + /* Set up a manually managed arena for test. */ + arena_ind = do_arena_create(0, 0); + + /* Migrate to the new arena, and get the ticker. */ + unsigned old_arena_ind; + size_t sz_arena_ind = sizeof(old_arena_ind); + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, + &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0, + "Unexpected mallctl() failure"); + decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind); + assert_ptr_not_null(decay_ticker, + "Unexpected failure getting decay ticker"); + + /* + * Test the standard APIs using a large size class, since we can't + * control tcache interactions for small size classes (except by + * completely disabling tcache for the entire test program). + */ + + /* malloc(). */ + tick0 = ticker_read(decay_ticker); + p = malloc(large0); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()"); + /* free(). */ + tick0 = ticker_read(decay_ticker); + free(p); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()"); + + /* calloc(). */ + tick0 = ticker_read(decay_ticker); + p = calloc(1, large0); + assert_ptr_not_null(p, "Unexpected calloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()"); + free(p); + + /* posix_memalign(). */ + tick0 = ticker_read(decay_ticker); + assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0, + "Unexpected posix_memalign() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during posix_memalign()"); + free(p); + + /* aligned_alloc(). */ + tick0 = ticker_read(decay_ticker); + p = aligned_alloc(sizeof(size_t), large0); + assert_ptr_not_null(p, "Unexpected aligned_alloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during aligned_alloc()"); + free(p); + + /* realloc(). */ + /* Allocate. */ + tick0 = ticker_read(decay_ticker); + p = realloc(NULL, large0); + assert_ptr_not_null(p, "Unexpected realloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); + /* Reallocate. */ + tick0 = ticker_read(decay_ticker); + p = realloc(p, large0); + assert_ptr_not_null(p, "Unexpected realloc() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); + /* Deallocate. */ + tick0 = ticker_read(decay_ticker); + realloc(p, 0); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); + + /* + * Test the *allocx() APIs using large and small size classes, with + * tcache explicitly disabled. + */ + { + unsigned i; + size_t allocx_sizes[2]; + allocx_sizes[0] = large0; + allocx_sizes[1] = 1; + + for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) { + sz = allocx_sizes[i]; + + /* mallocx(). */ + tick0 = ticker_read(decay_ticker); + p = mallocx(sz, MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during mallocx() (sz=%zu)", + sz); + /* rallocx(). */ + tick0 = ticker_read(decay_ticker); + p = rallocx(p, sz, MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, "Unexpected rallocx() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during rallocx() (sz=%zu)", + sz); + /* xallocx(). */ + tick0 = ticker_read(decay_ticker); + xallocx(p, sz, 0, MALLOCX_TCACHE_NONE); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during xallocx() (sz=%zu)", + sz); + /* dallocx(). */ + tick0 = ticker_read(decay_ticker); + dallocx(p, MALLOCX_TCACHE_NONE); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during dallocx() (sz=%zu)", + sz); + /* sdallocx(). */ + p = mallocx(sz, MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + tick0 = ticker_read(decay_ticker); + sdallocx(p, sz, MALLOCX_TCACHE_NONE); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during sdallocx() " + "(sz=%zu)", sz); + } + } + + /* + * Test tcache fill/flush interactions for large and small size classes, + * using an explicit tcache. + */ + unsigned tcache_ind, i; + size_t tcache_sizes[2]; + tcache_sizes[0] = large0; + tcache_sizes[1] = 1; + + size_t tcache_max, sz_tcache_max; + sz_tcache_max = sizeof(tcache_max); + assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, + &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure"); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz, + NULL, 0), 0, "Unexpected mallctl failure"); + + for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) { + sz = tcache_sizes[i]; + + /* tcache fill. */ + tick0 = ticker_read(decay_ticker); + p = mallocx(sz, MALLOCX_TCACHE(tcache_ind)); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + tick1 = ticker_read(decay_ticker); + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during tcache fill " + "(sz=%zu)", sz); + /* tcache flush. */ + dallocx(p, MALLOCX_TCACHE(tcache_ind)); + tick0 = ticker_read(decay_ticker); + assert_d_eq(mallctl("tcache.flush", NULL, NULL, + (void *)&tcache_ind, sizeof(unsigned)), 0, + "Unexpected mallctl failure"); + tick1 = ticker_read(decay_ticker); + + /* Will only tick if it's in tcache. */ + if (sz <= tcache_max) { + assert_u32_ne(tick1, tick0, + "Expected ticker to tick during tcache " + "flush (sz=%zu)", sz); + } else { + assert_u32_eq(tick1, tick0, + "Unexpected ticker tick during tcache " + "flush (sz=%zu)", sz); + } + } +} +TEST_END + +static void +decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt, + uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) { +#define NINTERVALS 101 + nstime_t time, update_interval, decay_ms, deadline; + + nstime_init(&time, 0); + nstime_update(&time); + + nstime_init2(&decay_ms, dt, 0); + nstime_copy(&deadline, &time); + nstime_add(&deadline, &decay_ms); + + nstime_init2(&update_interval, dt, 0); + nstime_idivide(&update_interval, NINTERVALS); + + /* + * Keep q's slab from being deallocated during the looping below. If a + * cached slab were to repeatedly come and go during looping, it could + * prevent the decay backlog ever becoming empty. + */ + void *p = do_mallocx(1, flags); + uint64_t dirty_npurge1, muzzy_npurge1; + do { + for (unsigned i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; + i++) { + void *q = do_mallocx(1, flags); + dallocx(q, flags); + } + dirty_npurge1 = get_arena_dirty_npurge(arena_ind); + muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind); + + nstime_add(&time_mock, &update_interval); + nstime_update(&time); + } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 == + dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) || + !terminate_asap)); + dallocx(p, flags); + + if (config_stats) { + assert_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 + + muzzy_npurge0, "Expected purging to occur"); + } +#undef NINTERVALS +} + +TEST_BEGIN(test_decay_ticker) { + test_skip_if(check_background_thread_enabled()); +#define NPS 2048 + ssize_t ddt = opt_dirty_decay_ms; + ssize_t mdt = opt_muzzy_decay_ms; + unsigned arena_ind = do_arena_create(ddt, mdt); + int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); + void *ps[NPS]; + size_t large; + + /* + * Allocate a bunch of large objects, pause the clock, deallocate every + * other object (to fragment virtual memory), restore the clock, then + * [md]allocx() in a tight loop while advancing time rapidly to verify + * the ticker triggers purging. + */ + + size_t tcache_max; + size_t sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + large = nallocx(tcache_max + 1, flags); + + do_purge(arena_ind); + uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind); + uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind); + + for (unsigned i = 0; i < NPS; i++) { + ps[i] = do_mallocx(large, flags); + } + + nupdates_mock = 0; + nstime_init(&time_mock, 0); + nstime_update(&time_mock); + monotonic_mock = true; + + nstime_monotonic_orig = nstime_monotonic; + nstime_update_orig = nstime_update; + nstime_monotonic = nstime_monotonic_mock; + nstime_update = nstime_update_mock; + + for (unsigned i = 0; i < NPS; i += 2) { + dallocx(ps[i], flags); + unsigned nupdates0 = nupdates_mock; + do_decay(arena_ind); + assert_u_gt(nupdates_mock, nupdates0, + "Expected nstime_update() to be called"); + } + + decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0, + muzzy_npurge0, true); + decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0, + muzzy_npurge0, false); + + do_arena_destroy(arena_ind); + + nstime_monotonic = nstime_monotonic_orig; + nstime_update = nstime_update_orig; +#undef NPS +} +TEST_END + +TEST_BEGIN(test_decay_nonmonotonic) { + test_skip_if(check_background_thread_enabled()); +#define NPS (SMOOTHSTEP_NSTEPS + 1) + int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); + void *ps[NPS]; + uint64_t npurge0 = 0; + uint64_t npurge1 = 0; + size_t sz, large0; + unsigned i, nupdates0; + + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl failure"); + do_epoch(); + sz = sizeof(uint64_t); + npurge0 = get_arena_npurge(0); + + nupdates_mock = 0; + nstime_init(&time_mock, 0); + nstime_update(&time_mock); + monotonic_mock = false; + + nstime_monotonic_orig = nstime_monotonic; + nstime_update_orig = nstime_update; + nstime_monotonic = nstime_monotonic_mock; + nstime_update = nstime_update_mock; + + for (i = 0; i < NPS; i++) { + ps[i] = mallocx(large0, flags); + assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); + } + + for (i = 0; i < NPS; i++) { + dallocx(ps[i], flags); + nupdates0 = nupdates_mock; + assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, + "Unexpected arena.0.decay failure"); + assert_u_gt(nupdates_mock, nupdates0, + "Expected nstime_update() to be called"); + } + + do_epoch(); + sz = sizeof(uint64_t); + npurge1 = get_arena_npurge(0); + + if (config_stats) { + assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred"); + } + + nstime_monotonic = nstime_monotonic_orig; + nstime_update = nstime_update_orig; +#undef NPS +} +TEST_END + +TEST_BEGIN(test_decay_now) { + test_skip_if(check_background_thread_enabled()); + + unsigned arena_ind = do_arena_create(0, 0); + assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); + assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); + size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2}; + /* Verify that dirty/muzzy pages never linger after deallocation. */ + for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { + size_t size = sizes[i]; + generate_dirty(arena_ind, size); + assert_zu_eq(get_arena_pdirty(arena_ind), 0, + "Unexpected dirty pages"); + assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, + "Unexpected muzzy pages"); + } + do_arena_destroy(arena_ind); +} +TEST_END + +TEST_BEGIN(test_decay_never) { + test_skip_if(check_background_thread_enabled()); + + unsigned arena_ind = do_arena_create(-1, -1); + int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; + assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages"); + assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages"); + size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2}; + void *ptrs[sizeof(sizes)/sizeof(size_t)]; + for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { + ptrs[i] = do_mallocx(sizes[i], flags); + } + /* Verify that each deallocation generates additional dirty pages. */ + size_t pdirty_prev = get_arena_pdirty(arena_ind); + size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind); + assert_zu_eq(pdirty_prev, 0, "Unexpected dirty pages"); + assert_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages"); + for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) { + dallocx(ptrs[i], flags); + size_t pdirty = get_arena_pdirty(arena_ind); + size_t pmuzzy = get_arena_pmuzzy(arena_ind); + assert_zu_gt(pdirty, pdirty_prev, + "Expected dirty pages to increase."); + assert_zu_eq(pmuzzy, 0, "Unexpected muzzy pages"); + pdirty_prev = pdirty; + } + do_arena_destroy(arena_ind); +} +TEST_END + +int +main(void) { + return test( + test_decay_ticks, + test_decay_ticker, + test_decay_nonmonotonic, + test_decay_now, + test_decay_never); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/decay.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/decay.sh new file mode 100644 index 0000000..45aeccf --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/decay.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +export MALLOC_CONF="dirty_decay_ms:1000,muzzy_decay_ms:1000,lg_tcache_max:0" diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/div.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/div.c new file mode 100644 index 0000000..b47f10b --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/div.c @@ -0,0 +1,29 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/div.h" + +TEST_BEGIN(test_div_exhaustive) { + for (size_t divisor = 2; divisor < 1000 * 1000; ++divisor) { + div_info_t div_info; + div_init(&div_info, divisor); + size_t max = 1000 * divisor; + if (max < 1000 * 1000) { + max = 1000 * 1000; + } + for (size_t dividend = 0; dividend < 1000 * divisor; + dividend += divisor) { + size_t quotient = div_compute( + &div_info, dividend); + assert_zu_eq(dividend, quotient * divisor, + "With divisor = %zu, dividend = %zu, " + "got quotient %zu", divisor, dividend, quotient); + } + } +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_div_exhaustive); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/emitter.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/emitter.c new file mode 100644 index 0000000..535c7cf --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/emitter.c @@ -0,0 +1,413 @@ +#include "test/jemalloc_test.h" +#include "jemalloc/internal/emitter.h" + +/* + * This is so useful for debugging and feature work, we'll leave printing + * functionality committed but disabled by default. + */ +/* Print the text as it will appear. */ +static bool print_raw = false; +/* Print the text escaped, so it can be copied back into the test case. */ +static bool print_escaped = false; + +typedef struct buf_descriptor_s buf_descriptor_t; +struct buf_descriptor_s { + char *buf; + size_t len; + bool mid_quote; +}; + +/* + * Forwards all writes to the passed-in buf_v (which should be cast from a + * buf_descriptor_t *). + */ +static void +forwarding_cb(void *buf_descriptor_v, const char *str) { + buf_descriptor_t *buf_descriptor = (buf_descriptor_t *)buf_descriptor_v; + + if (print_raw) { + malloc_printf("%s", str); + } + if (print_escaped) { + const char *it = str; + while (*it != '\0') { + if (!buf_descriptor->mid_quote) { + malloc_printf("\""); + buf_descriptor->mid_quote = true; + } + switch (*it) { + case '\\': + malloc_printf("\\"); + break; + case '\"': + malloc_printf("\\\""); + break; + case '\t': + malloc_printf("\\t"); + break; + case '\n': + malloc_printf("\\n\"\n"); + buf_descriptor->mid_quote = false; + break; + default: + malloc_printf("%c", *it); + } + it++; + } + } + + size_t written = malloc_snprintf(buf_descriptor->buf, + buf_descriptor->len, "%s", str); + assert_zu_eq(written, strlen(str), "Buffer overflow!"); + buf_descriptor->buf += written; + buf_descriptor->len -= written; + assert_zu_gt(buf_descriptor->len, 0, "Buffer out of space!"); +} + +static void +assert_emit_output(void (*emit_fn)(emitter_t *), + const char *expected_json_output, const char *expected_table_output) { + emitter_t emitter; + char buf[MALLOC_PRINTF_BUFSIZE]; + buf_descriptor_t buf_descriptor; + + buf_descriptor.buf = buf; + buf_descriptor.len = MALLOC_PRINTF_BUFSIZE; + buf_descriptor.mid_quote = false; + + emitter_init(&emitter, emitter_output_json, &forwarding_cb, + &buf_descriptor); + (*emit_fn)(&emitter); + assert_str_eq(expected_json_output, buf, "json output failure"); + + buf_descriptor.buf = buf; + buf_descriptor.len = MALLOC_PRINTF_BUFSIZE; + buf_descriptor.mid_quote = false; + + emitter_init(&emitter, emitter_output_table, &forwarding_cb, + &buf_descriptor); + (*emit_fn)(&emitter); + assert_str_eq(expected_table_output, buf, "table output failure"); +} + +static void +emit_dict(emitter_t *emitter) { + bool b_false = false; + bool b_true = true; + int i_123 = 123; + const char *str = "a string"; + + emitter_begin(emitter); + emitter_dict_begin(emitter, "foo", "This is the foo table:"); + emitter_kv(emitter, "abc", "ABC", emitter_type_bool, &b_false); + emitter_kv(emitter, "def", "DEF", emitter_type_bool, &b_true); + emitter_kv_note(emitter, "ghi", "GHI", emitter_type_int, &i_123, + "note_key1", emitter_type_string, &str); + emitter_kv_note(emitter, "jkl", "JKL", emitter_type_string, &str, + "note_key2", emitter_type_bool, &b_false); + emitter_dict_end(emitter); + emitter_end(emitter); +} +static const char *dict_json = +"{\n" +"\t\"foo\": {\n" +"\t\t\"abc\": false,\n" +"\t\t\"def\": true,\n" +"\t\t\"ghi\": 123,\n" +"\t\t\"jkl\": \"a string\"\n" +"\t}\n" +"}\n"; +static const char *dict_table = +"This is the foo table:\n" +" ABC: false\n" +" DEF: true\n" +" GHI: 123 (note_key1: \"a string\")\n" +" JKL: \"a string\" (note_key2: false)\n"; + +TEST_BEGIN(test_dict) { + assert_emit_output(&emit_dict, dict_json, dict_table); +} +TEST_END + +static void +emit_table_printf(emitter_t *emitter) { + emitter_begin(emitter); + emitter_table_printf(emitter, "Table note 1\n"); + emitter_table_printf(emitter, "Table note 2 %s\n", + "with format string"); + emitter_end(emitter); +} + +static const char *table_printf_json = +"{\n" +"}\n"; + +static const char *table_printf_table = +"Table note 1\n" +"Table note 2 with format string\n"; + +TEST_BEGIN(test_table_printf) { + assert_emit_output(&emit_table_printf, table_printf_json, + table_printf_table); +} +TEST_END + +static void emit_nested_dict(emitter_t *emitter) { + int val = 123; + emitter_begin(emitter); + emitter_dict_begin(emitter, "json1", "Dict 1"); + emitter_dict_begin(emitter, "json2", "Dict 2"); + emitter_kv(emitter, "primitive", "A primitive", emitter_type_int, &val); + emitter_dict_end(emitter); /* Close 2 */ + emitter_dict_begin(emitter, "json3", "Dict 3"); + emitter_dict_end(emitter); /* Close 3 */ + emitter_dict_end(emitter); /* Close 1 */ + emitter_dict_begin(emitter, "json4", "Dict 4"); + emitter_kv(emitter, "primitive", "Another primitive", + emitter_type_int, &val); + emitter_dict_end(emitter); /* Close 4 */ + emitter_end(emitter); +} + +static const char *nested_dict_json = +"{\n" +"\t\"json1\": {\n" +"\t\t\"json2\": {\n" +"\t\t\t\"primitive\": 123\n" +"\t\t},\n" +"\t\t\"json3\": {\n" +"\t\t}\n" +"\t},\n" +"\t\"json4\": {\n" +"\t\t\"primitive\": 123\n" +"\t}\n" +"}\n"; + +static const char *nested_dict_table = +"Dict 1\n" +" Dict 2\n" +" A primitive: 123\n" +" Dict 3\n" +"Dict 4\n" +" Another primitive: 123\n"; + +TEST_BEGIN(test_nested_dict) { + assert_emit_output(&emit_nested_dict, nested_dict_json, + nested_dict_table); +} +TEST_END + +static void +emit_types(emitter_t *emitter) { + bool b = false; + int i = -123; + unsigned u = 123; + ssize_t zd = -456; + size_t zu = 456; + const char *str = "string"; + uint32_t u32 = 789; + uint64_t u64 = 10000000000ULL; + + emitter_begin(emitter); + emitter_kv(emitter, "k1", "K1", emitter_type_bool, &b); + emitter_kv(emitter, "k2", "K2", emitter_type_int, &i); + emitter_kv(emitter, "k3", "K3", emitter_type_unsigned, &u); + emitter_kv(emitter, "k4", "K4", emitter_type_ssize, &zd); + emitter_kv(emitter, "k5", "K5", emitter_type_size, &zu); + emitter_kv(emitter, "k6", "K6", emitter_type_string, &str); + emitter_kv(emitter, "k7", "K7", emitter_type_uint32, &u32); + emitter_kv(emitter, "k8", "K8", emitter_type_uint64, &u64); + /* + * We don't test the title type, since it's only used for tables. It's + * tested in the emitter_table_row tests. + */ + emitter_end(emitter); +} + +static const char *types_json = +"{\n" +"\t\"k1\": false,\n" +"\t\"k2\": -123,\n" +"\t\"k3\": 123,\n" +"\t\"k4\": -456,\n" +"\t\"k5\": 456,\n" +"\t\"k6\": \"string\",\n" +"\t\"k7\": 789,\n" +"\t\"k8\": 10000000000\n" +"}\n"; + +static const char *types_table = +"K1: false\n" +"K2: -123\n" +"K3: 123\n" +"K4: -456\n" +"K5: 456\n" +"K6: \"string\"\n" +"K7: 789\n" +"K8: 10000000000\n"; + +TEST_BEGIN(test_types) { + assert_emit_output(&emit_types, types_json, types_table); +} +TEST_END + +static void +emit_modal(emitter_t *emitter) { + int val = 123; + emitter_begin(emitter); + emitter_dict_begin(emitter, "j0", "T0"); + emitter_json_dict_begin(emitter, "j1"); + emitter_kv(emitter, "i1", "I1", emitter_type_int, &val); + emitter_json_kv(emitter, "i2", emitter_type_int, &val); + emitter_table_kv(emitter, "I3", emitter_type_int, &val); + emitter_table_dict_begin(emitter, "T1"); + emitter_kv(emitter, "i4", "I4", emitter_type_int, &val); + emitter_json_dict_end(emitter); /* Close j1 */ + emitter_kv(emitter, "i5", "I5", emitter_type_int, &val); + emitter_table_dict_end(emitter); /* Close T1 */ + emitter_kv(emitter, "i6", "I6", emitter_type_int, &val); + emitter_dict_end(emitter); /* Close j0 / T0 */ + emitter_end(emitter); +} + +const char *modal_json = +"{\n" +"\t\"j0\": {\n" +"\t\t\"j1\": {\n" +"\t\t\t\"i1\": 123,\n" +"\t\t\t\"i2\": 123,\n" +"\t\t\t\"i4\": 123\n" +"\t\t},\n" +"\t\t\"i5\": 123,\n" +"\t\t\"i6\": 123\n" +"\t}\n" +"}\n"; + +const char *modal_table = +"T0\n" +" I1: 123\n" +" I3: 123\n" +" T1\n" +" I4: 123\n" +" I5: 123\n" +" I6: 123\n"; + +TEST_BEGIN(test_modal) { + assert_emit_output(&emit_modal, modal_json, modal_table); +} +TEST_END + +static void +emit_json_arr(emitter_t *emitter) { + int ival = 123; + + emitter_begin(emitter); + emitter_json_dict_begin(emitter, "dict"); + emitter_json_arr_begin(emitter, "arr"); + emitter_json_arr_obj_begin(emitter); + emitter_json_kv(emitter, "foo", emitter_type_int, &ival); + emitter_json_arr_obj_end(emitter); /* Close arr[0] */ + /* arr[1] and arr[2] are primitives. */ + emitter_json_arr_value(emitter, emitter_type_int, &ival); + emitter_json_arr_value(emitter, emitter_type_int, &ival); + emitter_json_arr_obj_begin(emitter); + emitter_json_kv(emitter, "bar", emitter_type_int, &ival); + emitter_json_kv(emitter, "baz", emitter_type_int, &ival); + emitter_json_arr_obj_end(emitter); /* Close arr[3]. */ + emitter_json_arr_end(emitter); /* Close arr. */ + emitter_json_dict_end(emitter); /* Close dict. */ + emitter_end(emitter); +} + +static const char *json_arr_json = +"{\n" +"\t\"dict\": {\n" +"\t\t\"arr\": [\n" +"\t\t\t{\n" +"\t\t\t\t\"foo\": 123\n" +"\t\t\t},\n" +"\t\t\t123,\n" +"\t\t\t123,\n" +"\t\t\t{\n" +"\t\t\t\t\"bar\": 123,\n" +"\t\t\t\t\"baz\": 123\n" +"\t\t\t}\n" +"\t\t]\n" +"\t}\n" +"}\n"; + +static const char *json_arr_table = ""; + +TEST_BEGIN(test_json_arr) { + assert_emit_output(&emit_json_arr, json_arr_json, json_arr_table); +} +TEST_END + +static void +emit_table_row(emitter_t *emitter) { + emitter_begin(emitter); + emitter_row_t row; + emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title}; + abc.str_val = "ABC title"; + emitter_col_t def = {emitter_justify_right, 15, emitter_type_title}; + def.str_val = "DEF title"; + emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title}; + ghi.str_val = "GHI"; + + emitter_row_init(&row); + emitter_col_init(&abc, &row); + emitter_col_init(&def, &row); + emitter_col_init(&ghi, &row); + + emitter_table_row(emitter, &row); + + abc.type = emitter_type_int; + def.type = emitter_type_bool; + ghi.type = emitter_type_int; + + abc.int_val = 123; + def.bool_val = true; + ghi.int_val = 456; + emitter_table_row(emitter, &row); + + abc.int_val = 789; + def.bool_val = false; + ghi.int_val = 1011; + emitter_table_row(emitter, &row); + + abc.type = emitter_type_string; + abc.str_val = "a string"; + def.bool_val = false; + ghi.type = emitter_type_title; + ghi.str_val = "ghi"; + emitter_table_row(emitter, &row); + + emitter_end(emitter); +} + +static const char *table_row_json = +"{\n" +"}\n"; + +static const char *table_row_table = +"ABC title DEF title GHI\n" +"123 true 456\n" +"789 false 1011\n" +"\"a string\" false ghi\n"; + +TEST_BEGIN(test_table_row) { + assert_emit_output(&emit_table_row, table_row_json, table_row_table); +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_dict, + test_table_printf, + test_nested_dict, + test_types, + test_modal, + test_json_arr, + test_table_row); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/extent_quantize.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/extent_quantize.c new file mode 100644 index 0000000..0ca7a75 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/extent_quantize.c @@ -0,0 +1,141 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_small_extent_size) { + unsigned nbins, i; + size_t sz, extent_size; + size_t mib[4]; + size_t miblen = sizeof(mib) / sizeof(size_t); + + /* + * Iterate over all small size classes, get their extent sizes, and + * verify that the quantized size is the same as the extent size. + */ + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); + + assert_d_eq(mallctlnametomib("arenas.bin.0.slab_size", mib, &miblen), 0, + "Unexpected mallctlnametomib failure"); + for (i = 0; i < nbins; i++) { + mib[2] = i; + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size, &sz, + NULL, 0), 0, "Unexpected mallctlbymib failure"); + assert_zu_eq(extent_size, + extent_size_quantize_floor(extent_size), + "Small extent quantization should be a no-op " + "(extent_size=%zu)", extent_size); + assert_zu_eq(extent_size, + extent_size_quantize_ceil(extent_size), + "Small extent quantization should be a no-op " + "(extent_size=%zu)", extent_size); + } +} +TEST_END + +TEST_BEGIN(test_large_extent_size) { + bool cache_oblivious; + unsigned nlextents, i; + size_t sz, extent_size_prev, ceil_prev; + size_t mib[4]; + size_t miblen = sizeof(mib) / sizeof(size_t); + + /* + * Iterate over all large size classes, get their extent sizes, and + * verify that the quantized size is the same as the extent size. + */ + + sz = sizeof(bool); + assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious, + &sz, NULL, 0), 0, "Unexpected mallctl failure"); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, + 0), 0, "Unexpected mallctl failure"); + + assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib failure"); + for (i = 0; i < nlextents; i++) { + size_t lextent_size, extent_size, floor, ceil; + + mib[2] = i; + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&lextent_size, + &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); + extent_size = cache_oblivious ? lextent_size + PAGE : + lextent_size; + floor = extent_size_quantize_floor(extent_size); + ceil = extent_size_quantize_ceil(extent_size); + + assert_zu_eq(extent_size, floor, + "Extent quantization should be a no-op for precise size " + "(lextent_size=%zu, extent_size=%zu)", lextent_size, + extent_size); + assert_zu_eq(extent_size, ceil, + "Extent quantization should be a no-op for precise size " + "(lextent_size=%zu, extent_size=%zu)", lextent_size, + extent_size); + + if (i > 0) { + assert_zu_eq(extent_size_prev, + extent_size_quantize_floor(extent_size - PAGE), + "Floor should be a precise size"); + if (extent_size_prev < ceil_prev) { + assert_zu_eq(ceil_prev, extent_size, + "Ceiling should be a precise size " + "(extent_size_prev=%zu, ceil_prev=%zu, " + "extent_size=%zu)", extent_size_prev, + ceil_prev, extent_size); + } + } + if (i + 1 < nlextents) { + extent_size_prev = floor; + ceil_prev = extent_size_quantize_ceil(extent_size + + PAGE); + } + } +} +TEST_END + +TEST_BEGIN(test_monotonic) { +#define SZ_MAX ZU(4 * 1024 * 1024) + unsigned i; + size_t floor_prev, ceil_prev; + + floor_prev = 0; + ceil_prev = 0; + for (i = 1; i <= SZ_MAX >> LG_PAGE; i++) { + size_t extent_size, floor, ceil; + + extent_size = i << LG_PAGE; + floor = extent_size_quantize_floor(extent_size); + ceil = extent_size_quantize_ceil(extent_size); + + assert_zu_le(floor, extent_size, + "Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)", + floor, extent_size, ceil); + assert_zu_ge(ceil, extent_size, + "Ceiling should be >= (floor=%zu, extent_size=%zu, " + "ceil=%zu)", floor, extent_size, ceil); + + assert_zu_le(floor_prev, floor, "Floor should be monotonic " + "(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)", + floor_prev, floor, extent_size, ceil); + assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic " + "(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)", + floor, extent_size, ceil_prev, ceil); + + floor_prev = floor; + ceil_prev = ceil; + } +} +TEST_END + +int +main(void) { + return test( + test_small_extent_size, + test_large_extent_size, + test_monotonic); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/fork.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/fork.c new file mode 100644 index 0000000..b169075 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/fork.c @@ -0,0 +1,141 @@ +#include "test/jemalloc_test.h" + +#ifndef _WIN32 +#include +#endif + +#ifndef _WIN32 +static void +wait_for_child_exit(int pid) { + int status; + while (true) { + if (waitpid(pid, &status, 0) == -1) { + test_fail("Unexpected waitpid() failure."); + } + if (WIFSIGNALED(status)) { + test_fail("Unexpected child termination due to " + "signal %d", WTERMSIG(status)); + break; + } + if (WIFEXITED(status)) { + if (WEXITSTATUS(status) != 0) { + test_fail("Unexpected child exit value %d", + WEXITSTATUS(status)); + } + break; + } + } +} +#endif + +TEST_BEGIN(test_fork) { +#ifndef _WIN32 + void *p; + pid_t pid; + + /* Set up a manually managed arena for test. */ + unsigned arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + + /* Migrate to the new arena. */ + unsigned old_arena_ind; + sz = sizeof(old_arena_ind); + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + (void *)&arena_ind, sizeof(arena_ind)), 0, + "Unexpected mallctl() failure"); + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + + pid = fork(); + + free(p); + + p = malloc(64); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + free(p); + + if (pid == -1) { + /* Error. */ + test_fail("Unexpected fork() failure"); + } else if (pid == 0) { + /* Child. */ + _exit(0); + } else { + wait_for_child_exit(pid); + } +#else + test_skip("fork(2) is irrelevant to Windows"); +#endif +} +TEST_END + +#ifndef _WIN32 +static void * +do_fork_thd(void *arg) { + malloc(1); + int pid = fork(); + if (pid == -1) { + /* Error. */ + test_fail("Unexpected fork() failure"); + } else if (pid == 0) { + /* Child. */ + char *args[] = {"true", NULL}; + execvp(args[0], args); + test_fail("Exec failed"); + } else { + /* Parent */ + wait_for_child_exit(pid); + } + return NULL; +} +#endif + +#ifndef _WIN32 +static void +do_test_fork_multithreaded() { + thd_t child; + thd_create(&child, do_fork_thd, NULL); + do_fork_thd(NULL); + thd_join(child, NULL); +} +#endif + +TEST_BEGIN(test_fork_multithreaded) { +#ifndef _WIN32 + /* + * We've seen bugs involving hanging on arenas_lock (though the same + * class of bugs can happen on any mutex). The bugs are intermittent + * though, so we want to run the test multiple times. Since we hold the + * arenas lock only early in the process lifetime, we can't just run + * this test in a loop (since, after all the arenas are initialized, we + * won't acquire arenas_lock any further). We therefore repeat the test + * with multiple processes. + */ + for (int i = 0; i < 100; i++) { + int pid = fork(); + if (pid == -1) { + /* Error. */ + test_fail("Unexpected fork() failure,"); + } else if (pid == 0) { + /* Child. */ + do_test_fork_multithreaded(); + _exit(0); + } else { + wait_for_child_exit(pid); + } + } +#else + test_skip("fork(2) is irrelevant to Windows"); +#endif +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_fork, + test_fork_multithreaded); +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/hash.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/hash.c similarity index 74% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/hash.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/hash.c index 77a8ced..7cc034f 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/hash.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/hash.c @@ -28,6 +28,7 @@ */ #include "test/jemalloc_test.h" +#include "jemalloc/internal/hash.h" typedef enum { hash_variant_x86_32, @@ -35,43 +36,39 @@ typedef enum { hash_variant_x64_128 } hash_variant_t; -static size_t -hash_variant_bits(hash_variant_t variant) -{ - +static int +hash_variant_bits(hash_variant_t variant) { switch (variant) { - case hash_variant_x86_32: return (32); - case hash_variant_x86_128: return (128); - case hash_variant_x64_128: return (128); + case hash_variant_x86_32: return 32; + case hash_variant_x86_128: return 128; + case hash_variant_x64_128: return 128; default: not_reached(); } } static const char * -hash_variant_string(hash_variant_t variant) -{ - +hash_variant_string(hash_variant_t variant) { switch (variant) { - case hash_variant_x86_32: return ("hash_x86_32"); - case hash_variant_x86_128: return ("hash_x86_128"); - case hash_variant_x64_128: return ("hash_x64_128"); + case hash_variant_x86_32: return "hash_x86_32"; + case hash_variant_x86_128: return "hash_x86_128"; + case hash_variant_x64_128: return "hash_x64_128"; default: not_reached(); } } +#define KEY_SIZE 256 static void -hash_variant_verify(hash_variant_t variant) -{ - const size_t hashbytes = hash_variant_bits(variant) / 8; - uint8_t key[256]; - VARIABLE_ARRAY(uint8_t, hashes, hashbytes * 256); +hash_variant_verify_key(hash_variant_t variant, uint8_t *key) { + const int hashbytes = hash_variant_bits(variant) / 8; + const int hashes_size = hashbytes * 256; + VARIABLE_ARRAY(uint8_t, hashes, hashes_size); VARIABLE_ARRAY(uint8_t, final, hashbytes); unsigned i; uint32_t computed, expected; - memset(key, 0, sizeof(key)); - memset(hashes, 0, sizeof(hashes)); - memset(final, 0, sizeof(final)); + memset(key, 0, KEY_SIZE); + memset(hashes, 0, hashes_size); + memset(final, 0, hashbytes); /* * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the @@ -102,17 +99,17 @@ hash_variant_verify(hash_variant_t variant) /* Hash the result array. */ switch (variant) { case hash_variant_x86_32: { - uint32_t out = hash_x86_32(hashes, hashbytes*256, 0); + uint32_t out = hash_x86_32(hashes, hashes_size, 0); memcpy(final, &out, sizeof(out)); break; } case hash_variant_x86_128: { uint64_t out[2]; - hash_x86_128(hashes, hashbytes*256, 0, out); + hash_x86_128(hashes, hashes_size, 0, out); memcpy(final, out, sizeof(out)); break; } case hash_variant_x64_128: { uint64_t out[2]; - hash_x64_128(hashes, hashbytes*256, 0, out); + hash_x64_128(hashes, hashes_size, 0, out); memcpy(final, out, sizeof(out)); break; } default: not_reached(); @@ -139,33 +136,38 @@ hash_variant_verify(hash_variant_t variant) hash_variant_string(variant), expected, computed); } -TEST_BEGIN(test_hash_x86_32) -{ +static void +hash_variant_verify(hash_variant_t variant) { +#define MAX_ALIGN 16 + uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)]; + unsigned i; + + for (i = 0; i < MAX_ALIGN; i++) { + hash_variant_verify_key(variant, &key[i]); + } +#undef MAX_ALIGN +} +#undef KEY_SIZE +TEST_BEGIN(test_hash_x86_32) { hash_variant_verify(hash_variant_x86_32); } TEST_END -TEST_BEGIN(test_hash_x86_128) -{ - +TEST_BEGIN(test_hash_x86_128) { hash_variant_verify(hash_variant_x86_128); } TEST_END -TEST_BEGIN(test_hash_x64_128) -{ - +TEST_BEGIN(test_hash_x64_128) { hash_variant_verify(hash_variant_x64_128); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_hash_x86_32, test_hash_x86_128, - test_hash_x64_128)); + test_hash_x64_128); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/hooks.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/hooks.c new file mode 100644 index 0000000..b70172e --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/hooks.c @@ -0,0 +1,38 @@ +#include "test/jemalloc_test.h" + +static bool hook_called = false; + +static void +hook() { + hook_called = true; +} + +static int +func_to_hook(int arg1, int arg2) { + return arg1 + arg2; +} + +#define func_to_hook JEMALLOC_HOOK(func_to_hook, hooks_libc_hook) + +TEST_BEGIN(unhooked_call) { + hooks_libc_hook = NULL; + hook_called = false; + assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); + assert_false(hook_called, "Nulling out hook didn't take."); +} +TEST_END + +TEST_BEGIN(hooked_call) { + hooks_libc_hook = &hook; + hook_called = false; + assert_d_eq(3, func_to_hook(1, 2), "Hooking changed return value."); + assert_true(hook_called, "Hook should have executed."); +} +TEST_END + +int +main(void) { + return test( + unhooked_call, + hooked_call); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk.c new file mode 100644 index 0000000..243ced4 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk.c @@ -0,0 +1,141 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/util.h" + +static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; +static large_dalloc_junk_t *large_dalloc_junk_orig; +static large_dalloc_maybe_junk_t *large_dalloc_maybe_junk_orig; +static void *watch_for_junking; +static bool saw_junking; + +static void +watch_junking(void *p) { + watch_for_junking = p; + saw_junking = false; +} + +static void +arena_dalloc_junk_small_intercept(void *ptr, const bin_info_t *bin_info) { + size_t i; + + arena_dalloc_junk_small_orig(ptr, bin_info); + for (i = 0; i < bin_info->reg_size; i++) { + assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, + "Missing junk fill for byte %zu/%zu of deallocated region", + i, bin_info->reg_size); + } + if (ptr == watch_for_junking) { + saw_junking = true; + } +} + +static void +large_dalloc_junk_intercept(void *ptr, size_t usize) { + size_t i; + + large_dalloc_junk_orig(ptr, usize); + for (i = 0; i < usize; i++) { + assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, + "Missing junk fill for byte %zu/%zu of deallocated region", + i, usize); + } + if (ptr == watch_for_junking) { + saw_junking = true; + } +} + +static void +large_dalloc_maybe_junk_intercept(void *ptr, size_t usize) { + large_dalloc_maybe_junk_orig(ptr, usize); + if (ptr == watch_for_junking) { + saw_junking = true; + } +} + +static void +test_junk(size_t sz_min, size_t sz_max) { + uint8_t *s; + size_t sz_prev, sz, i; + + if (opt_junk_free) { + arena_dalloc_junk_small_orig = arena_dalloc_junk_small; + arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; + large_dalloc_junk_orig = large_dalloc_junk; + large_dalloc_junk = large_dalloc_junk_intercept; + large_dalloc_maybe_junk_orig = large_dalloc_maybe_junk; + large_dalloc_maybe_junk = large_dalloc_maybe_junk_intercept; + } + + sz_prev = 0; + s = (uint8_t *)mallocx(sz_min, 0); + assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); + + for (sz = sallocx(s, 0); sz <= sz_max; + sz_prev = sz, sz = sallocx(s, 0)) { + if (sz_prev > 0) { + assert_u_eq(s[0], 'a', + "Previously allocated byte %zu/%zu is corrupted", + ZU(0), sz_prev); + assert_u_eq(s[sz_prev-1], 'a', + "Previously allocated byte %zu/%zu is corrupted", + sz_prev-1, sz_prev); + } + + for (i = sz_prev; i < sz; i++) { + if (opt_junk_alloc) { + assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK, + "Newly allocated byte %zu/%zu isn't " + "junk-filled", i, sz); + } + s[i] = 'a'; + } + + if (xallocx(s, sz+1, 0, 0) == sz) { + uint8_t *t; + watch_junking(s); + t = (uint8_t *)rallocx(s, sz+1, 0); + assert_ptr_not_null((void *)t, + "Unexpected rallocx() failure"); + assert_zu_ge(sallocx(t, 0), sz+1, + "Unexpectedly small rallocx() result"); + if (!background_thread_enabled()) { + assert_ptr_ne(s, t, + "Unexpected in-place rallocx()"); + assert_true(!opt_junk_free || saw_junking, + "Expected region of size %zu to be " + "junk-filled", sz); + } + s = t; + } + } + + watch_junking(s); + dallocx(s, 0); + assert_true(!opt_junk_free || saw_junking, + "Expected region of size %zu to be junk-filled", sz); + + if (opt_junk_free) { + arena_dalloc_junk_small = arena_dalloc_junk_small_orig; + large_dalloc_junk = large_dalloc_junk_orig; + large_dalloc_maybe_junk = large_dalloc_maybe_junk_orig; + } +} + +TEST_BEGIN(test_junk_small) { + test_skip_if(!config_fill); + test_junk(1, SMALL_MAXCLASS-1); +} +TEST_END + +TEST_BEGIN(test_junk_large) { + test_skip_if(!config_fill); + test_junk(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1))); +} +TEST_END + +int +main(void) { + return test( + test_junk_small, + test_junk_large); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk.sh new file mode 100644 index 0000000..97cd8ca --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="abort:false,zero:false,junk:true" +fi diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_alloc.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_alloc.c new file mode 100644 index 0000000..a442a0c --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_alloc.c @@ -0,0 +1 @@ +#include "junk.c" diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_alloc.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_alloc.sh new file mode 100644 index 0000000..e1008c2 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_alloc.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="abort:false,zero:false,junk:alloc" +fi diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_free.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_free.c new file mode 100644 index 0000000..a442a0c --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_free.c @@ -0,0 +1 @@ +#include "junk.c" diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_free.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_free.sh new file mode 100644 index 0000000..402196c --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/junk_free.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="abort:false,zero:false,junk:free" +fi diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/lg_chunk.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/lg_chunk.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/lg_chunk.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/lg_chunk.c diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/log.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/log.c new file mode 100644 index 0000000..a52bd73 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/log.c @@ -0,0 +1,193 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/log.h" + +static void +expect_no_logging(const char *names) { + log_var_t log_l1 = LOG_VAR_INIT("l1"); + log_var_t log_l2 = LOG_VAR_INIT("l2"); + log_var_t log_l2_a = LOG_VAR_INIT("l2.a"); + + strcpy(log_var_names, names); + + int count = 0; + + for (int i = 0; i < 10; i++) { + log_do_begin(log_l1) + count++; + log_do_end(log_l1) + + log_do_begin(log_l2) + count++; + log_do_end(log_l2) + + log_do_begin(log_l2_a) + count++; + log_do_end(log_l2_a) + } + assert_d_eq(count, 0, "Disabled logging not ignored!"); +} + +TEST_BEGIN(test_log_disabled) { + test_skip_if(!config_log); + atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); + expect_no_logging(""); + expect_no_logging("abc"); + expect_no_logging("a.b.c"); + expect_no_logging("l12"); + expect_no_logging("l123|a456|b789"); + expect_no_logging("|||"); +} +TEST_END + +TEST_BEGIN(test_log_enabled_direct) { + test_skip_if(!config_log); + atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); + log_var_t log_l1 = LOG_VAR_INIT("l1"); + log_var_t log_l1_a = LOG_VAR_INIT("l1.a"); + log_var_t log_l2 = LOG_VAR_INIT("l2"); + + int count; + + count = 0; + strcpy(log_var_names, "l1"); + for (int i = 0; i < 10; i++) { + log_do_begin(log_l1) + count++; + log_do_end(log_l1) + } + assert_d_eq(count, 10, "Mis-logged!"); + + count = 0; + strcpy(log_var_names, "l1.a"); + for (int i = 0; i < 10; i++) { + log_do_begin(log_l1_a) + count++; + log_do_end(log_l1_a) + } + assert_d_eq(count, 10, "Mis-logged!"); + + count = 0; + strcpy(log_var_names, "l1.a|abc|l2|def"); + for (int i = 0; i < 10; i++) { + log_do_begin(log_l1_a) + count++; + log_do_end(log_l1_a) + + log_do_begin(log_l2) + count++; + log_do_end(log_l2) + } + assert_d_eq(count, 20, "Mis-logged!"); +} +TEST_END + +TEST_BEGIN(test_log_enabled_indirect) { + test_skip_if(!config_log); + atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); + strcpy(log_var_names, "l0|l1|abc|l2.b|def"); + + /* On. */ + log_var_t log_l1 = LOG_VAR_INIT("l1"); + /* Off. */ + log_var_t log_l1a = LOG_VAR_INIT("l1a"); + /* On. */ + log_var_t log_l1_a = LOG_VAR_INIT("l1.a"); + /* Off. */ + log_var_t log_l2_a = LOG_VAR_INIT("l2.a"); + /* On. */ + log_var_t log_l2_b_a = LOG_VAR_INIT("l2.b.a"); + /* On. */ + log_var_t log_l2_b_b = LOG_VAR_INIT("l2.b.b"); + + /* 4 are on total, so should sum to 40. */ + int count = 0; + for (int i = 0; i < 10; i++) { + log_do_begin(log_l1) + count++; + log_do_end(log_l1) + + log_do_begin(log_l1a) + count++; + log_do_end(log_l1a) + + log_do_begin(log_l1_a) + count++; + log_do_end(log_l1_a) + + log_do_begin(log_l2_a) + count++; + log_do_end(log_l2_a) + + log_do_begin(log_l2_b_a) + count++; + log_do_end(log_l2_b_a) + + log_do_begin(log_l2_b_b) + count++; + log_do_end(log_l2_b_b) + } + + assert_d_eq(count, 40, "Mis-logged!"); +} +TEST_END + +TEST_BEGIN(test_log_enabled_global) { + test_skip_if(!config_log); + atomic_store_b(&log_init_done, true, ATOMIC_RELAXED); + strcpy(log_var_names, "abc|.|def"); + + log_var_t log_l1 = LOG_VAR_INIT("l1"); + log_var_t log_l2_a_a = LOG_VAR_INIT("l2.a.a"); + + int count = 0; + for (int i = 0; i < 10; i++) { + log_do_begin(log_l1) + count++; + log_do_end(log_l1) + + log_do_begin(log_l2_a_a) + count++; + log_do_end(log_l2_a_a) + } + assert_d_eq(count, 20, "Mis-logged!"); +} +TEST_END + +TEST_BEGIN(test_logs_if_no_init) { + test_skip_if(!config_log); + atomic_store_b(&log_init_done, false, ATOMIC_RELAXED); + + log_var_t l = LOG_VAR_INIT("definitely.not.enabled"); + + int count = 0; + for (int i = 0; i < 10; i++) { + log_do_begin(l) + count++; + log_do_end(l) + } + assert_d_eq(count, 0, "Logging shouldn't happen if not initialized."); +} +TEST_END + +/* + * This really just checks to make sure that this usage compiles; we don't have + * any test code to run. + */ +TEST_BEGIN(test_log_only_format_string) { + if (false) { + LOG("log_str", "No arguments follow this format string."); + } +} +TEST_END + +int +main(void) { + return test( + test_log_disabled, + test_log_enabled_direct, + test_log_enabled_indirect, + test_log_enabled_global, + test_logs_if_no_init, + test_log_only_format_string); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/mallctl.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/mallctl.c new file mode 100644 index 0000000..1ecbab0 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/mallctl.c @@ -0,0 +1,805 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/util.h" + +TEST_BEGIN(test_mallctl_errors) { + uint64_t epoch; + size_t sz; + + assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT, + "mallctl() should return ENOENT for non-existent names"); + + assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")), + EPERM, "mallctl() should return EPERM on attempt to write " + "read-only value"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(epoch)-1), EINVAL, + "mallctl() should return EINVAL for input size mismatch"); + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(epoch)+1), EINVAL, + "mallctl() should return EINVAL for input size mismatch"); + + sz = sizeof(epoch)-1; + assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, + "mallctl() should return EINVAL for output size mismatch"); + sz = sizeof(epoch)+1; + assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, + "mallctl() should return EINVAL for output size mismatch"); +} +TEST_END + +TEST_BEGIN(test_mallctlnametomib_errors) { + size_t mib[1]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT, + "mallctlnametomib() should return ENOENT for non-existent names"); +} +TEST_END + +TEST_BEGIN(test_mallctlbymib_errors) { + uint64_t epoch; + size_t sz; + size_t mib[1]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("version", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0", + strlen("0.0.0")), EPERM, "mallctl() should return EPERM on " + "attempt to write read-only value"); + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, + sizeof(epoch)-1), EINVAL, + "mallctlbymib() should return EINVAL for input size mismatch"); + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, + sizeof(epoch)+1), EINVAL, + "mallctlbymib() should return EINVAL for input size mismatch"); + + sz = sizeof(epoch)-1; + assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), + EINVAL, + "mallctlbymib() should return EINVAL for output size mismatch"); + sz = sizeof(epoch)+1; + assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), + EINVAL, + "mallctlbymib() should return EINVAL for output size mismatch"); +} +TEST_END + +TEST_BEGIN(test_mallctl_read_write) { + uint64_t old_epoch, new_epoch; + size_t sz = sizeof(old_epoch); + + /* Blind. */ + assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Read. */ + assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Write. */ + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch, + sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); + + /* Read+write. */ + assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, + (void *)&new_epoch, sizeof(new_epoch)), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); +} +TEST_END + +TEST_BEGIN(test_mallctlnametomib_short_mib) { + size_t mib[4]; + size_t miblen; + + miblen = 3; + mib[3] = 42; + assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + assert_zu_eq(miblen, 3, "Unexpected mib output length"); + assert_zu_eq(mib[3], 42, + "mallctlnametomib() wrote past the end of the input mib"); +} +TEST_END + +TEST_BEGIN(test_mallctl_config) { +#define TEST_MALLCTL_CONFIG(config, t) do { \ + t oldval; \ + size_t sz = sizeof(oldval); \ + assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \ + NULL, 0), 0, "Unexpected mallctl() failure"); \ + assert_b_eq(oldval, config_##config, "Incorrect config value"); \ + assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ +} while (0) + + TEST_MALLCTL_CONFIG(cache_oblivious, bool); + TEST_MALLCTL_CONFIG(debug, bool); + TEST_MALLCTL_CONFIG(fill, bool); + TEST_MALLCTL_CONFIG(lazy_lock, bool); + TEST_MALLCTL_CONFIG(malloc_conf, const char *); + TEST_MALLCTL_CONFIG(prof, bool); + TEST_MALLCTL_CONFIG(prof_libgcc, bool); + TEST_MALLCTL_CONFIG(prof_libunwind, bool); + TEST_MALLCTL_CONFIG(stats, bool); + TEST_MALLCTL_CONFIG(utrace, bool); + TEST_MALLCTL_CONFIG(xmalloc, bool); + +#undef TEST_MALLCTL_CONFIG +} +TEST_END + +TEST_BEGIN(test_mallctl_opt) { + bool config_always = true; + +#define TEST_MALLCTL_OPT(t, opt, config) do { \ + t oldval; \ + size_t sz = sizeof(oldval); \ + int expected = config_##config ? 0 : ENOENT; \ + int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \ + 0); \ + assert_d_eq(result, expected, \ + "Unexpected mallctl() result for opt."#opt); \ + assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ +} while (0) + + TEST_MALLCTL_OPT(bool, abort, always); + TEST_MALLCTL_OPT(bool, abort_conf, always); + TEST_MALLCTL_OPT(const char *, metadata_thp, always); + TEST_MALLCTL_OPT(bool, retain, always); + TEST_MALLCTL_OPT(const char *, dss, always); + TEST_MALLCTL_OPT(unsigned, narenas, always); + TEST_MALLCTL_OPT(const char *, percpu_arena, always); + TEST_MALLCTL_OPT(bool, background_thread, always); + TEST_MALLCTL_OPT(ssize_t, dirty_decay_ms, always); + TEST_MALLCTL_OPT(ssize_t, muzzy_decay_ms, always); + TEST_MALLCTL_OPT(bool, stats_print, always); + TEST_MALLCTL_OPT(const char *, junk, fill); + TEST_MALLCTL_OPT(bool, zero, fill); + TEST_MALLCTL_OPT(bool, utrace, utrace); + TEST_MALLCTL_OPT(bool, xmalloc, xmalloc); + TEST_MALLCTL_OPT(bool, tcache, always); + TEST_MALLCTL_OPT(size_t, lg_extent_max_active_fit, always); + TEST_MALLCTL_OPT(size_t, lg_tcache_max, always); + TEST_MALLCTL_OPT(const char *, thp, always); + TEST_MALLCTL_OPT(bool, prof, prof); + TEST_MALLCTL_OPT(const char *, prof_prefix, prof); + TEST_MALLCTL_OPT(bool, prof_active, prof); + TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof); + TEST_MALLCTL_OPT(bool, prof_accum, prof); + TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof); + TEST_MALLCTL_OPT(bool, prof_gdump, prof); + TEST_MALLCTL_OPT(bool, prof_final, prof); + TEST_MALLCTL_OPT(bool, prof_leak, prof); + +#undef TEST_MALLCTL_OPT +} +TEST_END + +TEST_BEGIN(test_manpage_example) { + unsigned nbins, i; + size_t mib[4]; + size_t len, miblen; + + len = sizeof(nbins); + assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0, + "Unexpected mallctl() failure"); + + miblen = 4; + assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + for (i = 0; i < nbins; i++) { + size_t bin_size; + + mib[2] = i; + len = sizeof(bin_size); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len, + NULL, 0), 0, "Unexpected mallctlbymib() failure"); + /* Do something with bin_size... */ + } +} +TEST_END + +TEST_BEGIN(test_tcache_none) { + test_skip_if(!opt_tcache); + + /* Allocate p and q. */ + void *p0 = mallocx(42, 0); + assert_ptr_not_null(p0, "Unexpected mallocx() failure"); + void *q = mallocx(42, 0); + assert_ptr_not_null(q, "Unexpected mallocx() failure"); + + /* Deallocate p and q, but bypass the tcache for q. */ + dallocx(p0, 0); + dallocx(q, MALLOCX_TCACHE_NONE); + + /* Make sure that tcache-based allocation returns p, not q. */ + void *p1 = mallocx(42, 0); + assert_ptr_not_null(p1, "Unexpected mallocx() failure"); + assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region"); + + /* Clean up. */ + dallocx(p1, MALLOCX_TCACHE_NONE); +} +TEST_END + +TEST_BEGIN(test_tcache) { +#define NTCACHES 10 + unsigned tis[NTCACHES]; + void *ps[NTCACHES]; + void *qs[NTCACHES]; + unsigned i; + size_t sz, psz, qsz; + + psz = 42; + qsz = nallocx(psz, 0) + 1; + + /* Create tcaches. */ + for (i = 0; i < NTCACHES; i++) { + sz = sizeof(unsigned); + assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, + 0), 0, "Unexpected mallctl() failure, i=%u", i); + } + + /* Exercise tcache ID recycling. */ + for (i = 0; i < NTCACHES; i++) { + assert_d_eq(mallctl("tcache.destroy", NULL, NULL, + (void *)&tis[i], sizeof(unsigned)), 0, + "Unexpected mallctl() failure, i=%u", i); + } + for (i = 0; i < NTCACHES; i++) { + sz = sizeof(unsigned); + assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, + 0), 0, "Unexpected mallctl() failure, i=%u", i); + } + + /* Flush empty tcaches. */ + for (i = 0; i < NTCACHES; i++) { + assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], + sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", + i); + } + + /* Cache some allocations. */ + for (i = 0; i < NTCACHES; i++) { + ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", + i); + dallocx(ps[i], MALLOCX_TCACHE(tis[i])); + + qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u", + i); + dallocx(qs[i], MALLOCX_TCACHE(tis[i])); + } + + /* Verify that tcaches allocate cached regions. */ + for (i = 0; i < NTCACHES; i++) { + void *p0 = ps[i]; + ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", + i); + assert_ptr_eq(ps[i], p0, + "Expected mallocx() to allocate cached region, i=%u", i); + } + + /* Verify that reallocation uses cached regions. */ + for (i = 0; i < NTCACHES; i++) { + void *q0 = qs[i]; + qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i])); + assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u", + i); + assert_ptr_eq(qs[i], q0, + "Expected rallocx() to allocate cached region, i=%u", i); + /* Avoid undefined behavior in case of test failure. */ + if (qs[i] == NULL) { + qs[i] = ps[i]; + } + } + for (i = 0; i < NTCACHES; i++) { + dallocx(qs[i], MALLOCX_TCACHE(tis[i])); + } + + /* Flush some non-empty tcaches. */ + for (i = 0; i < NTCACHES/2; i++) { + assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], + sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", + i); + } + + /* Destroy tcaches. */ + for (i = 0; i < NTCACHES; i++) { + assert_d_eq(mallctl("tcache.destroy", NULL, NULL, + (void *)&tis[i], sizeof(unsigned)), 0, + "Unexpected mallctl() failure, i=%u", i); + } +} +TEST_END + +TEST_BEGIN(test_thread_arena) { + unsigned old_arena_ind, new_arena_ind, narenas; + + const char *opa; + size_t sz = sizeof(opa); + assert_d_eq(mallctl("opt.percpu_arena", (void *)&opa, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect"); + + if (strcmp(opa, "disabled") == 0) { + new_arena_ind = narenas - 1; + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + (void *)&new_arena_ind, sizeof(unsigned)), 0, + "Unexpected mallctl() failure"); + new_arena_ind = 0; + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + (void *)&new_arena_ind, sizeof(unsigned)), 0, + "Unexpected mallctl() failure"); + } else { + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1; + if (old_arena_ind != new_arena_ind) { + assert_d_eq(mallctl("thread.arena", + (void *)&old_arena_ind, &sz, (void *)&new_arena_ind, + sizeof(unsigned)), EPERM, "thread.arena ctl " + "should not be allowed with percpu arena"); + } + } +} +TEST_END + +TEST_BEGIN(test_arena_i_initialized) { + unsigned narenas, i; + size_t sz; + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); + bool initialized; + + sz = sizeof(narenas); + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + + assert_d_eq(mallctlnametomib("arena.0.initialized", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + for (i = 0; i < narenas; i++) { + mib[1] = i; + sz = sizeof(initialized); + assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); + } + + mib[1] = MALLCTL_ARENAS_ALL; + sz = sizeof(initialized); + assert_d_eq(mallctlbymib(mib, miblen, &initialized, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_true(initialized, + "Merged arena statistics should always be initialized"); + + /* Equivalent to the above but using mallctl() directly. */ + sz = sizeof(initialized); + assert_d_eq(mallctl( + "arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".initialized", + (void *)&initialized, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_true(initialized, + "Merged arena statistics should always be initialized"); +} +TEST_END + +TEST_BEGIN(test_arena_i_dirty_decay_ms) { + ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms; + size_t sz = sizeof(ssize_t); + + assert_d_eq(mallctl("arena.0.dirty_decay_ms", + (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + dirty_decay_ms = -2; + assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL, + (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + dirty_decay_ms = 0x7fffffff; + assert_d_eq(mallctl("arena.0.dirty_decay_ms", NULL, NULL, + (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, + "Unexpected mallctl() failure"); + + for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1; + dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms, + dirty_decay_ms++) { + ssize_t old_dirty_decay_ms; + + assert_d_eq(mallctl("arena.0.dirty_decay_ms", + (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms, + sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); + assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms, + "Unexpected old arena.0.dirty_decay_ms"); + } +} +TEST_END + +TEST_BEGIN(test_arena_i_muzzy_decay_ms) { + ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms; + size_t sz = sizeof(ssize_t); + + assert_d_eq(mallctl("arena.0.muzzy_decay_ms", + (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + muzzy_decay_ms = -2; + assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + muzzy_decay_ms = 0x7fffffff; + assert_d_eq(mallctl("arena.0.muzzy_decay_ms", NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, + "Unexpected mallctl() failure"); + + for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1; + muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms, + muzzy_decay_ms++) { + ssize_t old_muzzy_decay_ms; + + assert_d_eq(mallctl("arena.0.muzzy_decay_ms", + (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms, + sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); + assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms, + "Unexpected old arena.0.muzzy_decay_ms"); + } +} +TEST_END + +TEST_BEGIN(test_arena_i_purge) { + unsigned narenas; + size_t sz = sizeof(unsigned); + size_t mib[3]; + size_t miblen = 3; + + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = narenas; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); + + mib[1] = MALLCTL_ARENAS_ALL; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} +TEST_END + +TEST_BEGIN(test_arena_i_decay) { + unsigned narenas; + size_t sz = sizeof(unsigned); + size_t mib[3]; + size_t miblen = 3; + + assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = narenas; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); + + mib[1] = MALLCTL_ARENAS_ALL; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} +TEST_END + +TEST_BEGIN(test_arena_i_dss) { + const char *dss_prec_old, *dss_prec_new; + size_t sz = sizeof(dss_prec_old); + size_t mib[3]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, + "Unexpected mallctlnametomib() error"); + + dss_prec_new = "disabled"; + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, + (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, + "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected default for dss precedence"); + + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, + (void *)&dss_prec_old, sizeof(dss_prec_old)), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected value for dss precedence"); + + mib[1] = narenas_total_get(); + dss_prec_new = "disabled"; + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, + (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, + "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected default for dss precedence"); + + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, + (void *)&dss_prec_old, sizeof(dss_prec_new)), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); + assert_str_ne(dss_prec_old, "primary", + "Unexpected value for dss precedence"); +} +TEST_END + +TEST_BEGIN(test_arena_i_retain_grow_limit) { + size_t old_limit, new_limit, default_limit; + size_t mib[3]; + size_t miblen; + + bool retain_enabled; + size_t sz = sizeof(retain_enabled); + assert_d_eq(mallctl("opt.retain", &retain_enabled, &sz, NULL, 0), + 0, "Unexpected mallctl() failure"); + test_skip_if(!retain_enabled); + + sz = sizeof(default_limit); + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.retain_grow_limit", mib, &miblen), + 0, "Unexpected mallctlnametomib() error"); + + assert_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(default_limit, sz_pind2sz(EXTENT_GROW_MAX_PIND), + "Unexpected default for retain_grow_limit"); + + new_limit = PAGE - 1; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, + sizeof(new_limit)), EFAULT, "Unexpected mallctl() success"); + + new_limit = PAGE + 1; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, + sizeof(new_limit)), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(old_limit, PAGE, + "Unexpected value for retain_grow_limit"); + + /* Expect grow less than psize class 10. */ + new_limit = sz_pind2sz(10) - 1; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &new_limit, + sizeof(new_limit)), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctlbymib(mib, miblen, &old_limit, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_zu_eq(old_limit, sz_pind2sz(9), + "Unexpected value for retain_grow_limit"); + + /* Restore to default. */ + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, &default_limit, + sizeof(default_limit)), 0, "Unexpected mallctl() failure"); +} +TEST_END + +TEST_BEGIN(test_arenas_dirty_decay_ms) { + ssize_t dirty_decay_ms, orig_dirty_decay_ms, prev_dirty_decay_ms; + size_t sz = sizeof(ssize_t); + + assert_d_eq(mallctl("arenas.dirty_decay_ms", + (void *)&orig_dirty_decay_ms, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + dirty_decay_ms = -2; + assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL, + (void *)&dirty_decay_ms, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + dirty_decay_ms = 0x7fffffff; + assert_d_eq(mallctl("arenas.dirty_decay_ms", NULL, NULL, + (void *)&dirty_decay_ms, sizeof(ssize_t)), 0, + "Expected mallctl() failure"); + + for (prev_dirty_decay_ms = dirty_decay_ms, dirty_decay_ms = -1; + dirty_decay_ms < 20; prev_dirty_decay_ms = dirty_decay_ms, + dirty_decay_ms++) { + ssize_t old_dirty_decay_ms; + + assert_d_eq(mallctl("arenas.dirty_decay_ms", + (void *)&old_dirty_decay_ms, &sz, (void *)&dirty_decay_ms, + sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); + assert_zd_eq(old_dirty_decay_ms, prev_dirty_decay_ms, + "Unexpected old arenas.dirty_decay_ms"); + } +} +TEST_END + +TEST_BEGIN(test_arenas_muzzy_decay_ms) { + ssize_t muzzy_decay_ms, orig_muzzy_decay_ms, prev_muzzy_decay_ms; + size_t sz = sizeof(ssize_t); + + assert_d_eq(mallctl("arenas.muzzy_decay_ms", + (void *)&orig_muzzy_decay_ms, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + + muzzy_decay_ms = -2; + assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), EFAULT, + "Unexpected mallctl() success"); + + muzzy_decay_ms = 0x7fffffff; + assert_d_eq(mallctl("arenas.muzzy_decay_ms", NULL, NULL, + (void *)&muzzy_decay_ms, sizeof(ssize_t)), 0, + "Expected mallctl() failure"); + + for (prev_muzzy_decay_ms = muzzy_decay_ms, muzzy_decay_ms = -1; + muzzy_decay_ms < 20; prev_muzzy_decay_ms = muzzy_decay_ms, + muzzy_decay_ms++) { + ssize_t old_muzzy_decay_ms; + + assert_d_eq(mallctl("arenas.muzzy_decay_ms", + (void *)&old_muzzy_decay_ms, &sz, (void *)&muzzy_decay_ms, + sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); + assert_zd_eq(old_muzzy_decay_ms, prev_muzzy_decay_ms, + "Unexpected old arenas.muzzy_decay_ms"); + } +} +TEST_END + +TEST_BEGIN(test_arenas_constants) { +#define TEST_ARENAS_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \ + 0), 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); + TEST_ARENAS_CONSTANT(size_t, page, PAGE); + TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS); + TEST_ARENAS_CONSTANT(unsigned, nlextents, NSIZES - NBINS); + +#undef TEST_ARENAS_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_bin_constants) { +#define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \ + NULL, 0), 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_BIN_CONSTANT(size_t, size, bin_infos[0].reg_size); + TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, bin_infos[0].nregs); + TEST_ARENAS_BIN_CONSTANT(size_t, slab_size, + bin_infos[0].slab_size); + +#undef TEST_ARENAS_BIN_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_lextent_constants) { +#define TEST_ARENAS_LEXTENT_CONSTANT(t, name, expected) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("arenas.lextent.0."#name, (void *)&name, \ + &sz, NULL, 0), 0, "Unexpected mallctl() failure"); \ + assert_zu_eq(name, expected, "Incorrect "#name" size"); \ +} while (0) + + TEST_ARENAS_LEXTENT_CONSTANT(size_t, size, LARGE_MINCLASS); + +#undef TEST_ARENAS_LEXTENT_CONSTANT +} +TEST_END + +TEST_BEGIN(test_arenas_create) { + unsigned narenas_before, arena, narenas_after; + size_t sz = sizeof(unsigned); + + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz, + NULL, 0), 0, "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); + + assert_u_eq(narenas_before+1, narenas_after, + "Unexpected number of arenas before versus after extension"); + assert_u_eq(arena, narenas_after-1, "Unexpected arena index"); +} +TEST_END + +TEST_BEGIN(test_arenas_lookup) { + unsigned arena, arena1; + void *ptr; + size_t sz = sizeof(unsigned); + + assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, + "Unexpected mallctl() failure"); + ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE); + assert_ptr_not_null(ptr, "Unexpected mallocx() failure"); + assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)), + 0, "Unexpected mallctl() failure"); + assert_u_eq(arena, arena1, "Unexpected arena index"); + dallocx(ptr, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas) { +#define TEST_STATS_ARENAS(t, name) do { \ + t name; \ + size_t sz = sizeof(t); \ + assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \ + NULL, 0), 0, "Unexpected mallctl() failure"); \ +} while (0) + + TEST_STATS_ARENAS(unsigned, nthreads); + TEST_STATS_ARENAS(const char *, dss); + TEST_STATS_ARENAS(ssize_t, dirty_decay_ms); + TEST_STATS_ARENAS(ssize_t, muzzy_decay_ms); + TEST_STATS_ARENAS(size_t, pactive); + TEST_STATS_ARENAS(size_t, pdirty); + +#undef TEST_STATS_ARENAS +} +TEST_END + +int +main(void) { + return test( + test_mallctl_errors, + test_mallctlnametomib_errors, + test_mallctlbymib_errors, + test_mallctl_read_write, + test_mallctlnametomib_short_mib, + test_mallctl_config, + test_mallctl_opt, + test_manpage_example, + test_tcache_none, + test_tcache, + test_thread_arena, + test_arena_i_initialized, + test_arena_i_dirty_decay_ms, + test_arena_i_muzzy_decay_ms, + test_arena_i_purge, + test_arena_i_decay, + test_arena_i_dss, + test_arena_i_retain_grow_limit, + test_arenas_dirty_decay_ms, + test_arenas_muzzy_decay_ms, + test_arenas_constants, + test_arenas_bin_constants, + test_arenas_lextent_constants, + test_arenas_create, + test_arenas_lookup, + test_stats_arenas); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/malloc_io.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/malloc_io.c new file mode 100644 index 0000000..79ba7fc --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/malloc_io.c @@ -0,0 +1,258 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_malloc_strtoumax_no_endptr) { + int err; + + set_errno(0); + assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result"); + err = get_errno(); + assert_d_eq(err, 0, "Unexpected failure"); +} +TEST_END + +TEST_BEGIN(test_malloc_strtoumax) { + struct test_s { + const char *input; + const char *expected_remainder; + int base; + int expected_errno; + const char *expected_errno_name; + uintmax_t expected_x; + }; +#define ERR(e) e, #e +#define KUMAX(x) ((uintmax_t)x##ULL) +#define KSMAX(x) ((uintmax_t)(intmax_t)x##LL) + struct test_s tests[] = { + {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, + {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, + {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX}, + + {"", "", 0, ERR(EINVAL), UINTMAX_MAX}, + {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX}, + {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX}, + {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX}, + + {"42", "", 0, ERR(0), KUMAX(42)}, + {"+42", "", 0, ERR(0), KUMAX(42)}, + {"-42", "", 0, ERR(0), KSMAX(-42)}, + {"042", "", 0, ERR(0), KUMAX(042)}, + {"+042", "", 0, ERR(0), KUMAX(042)}, + {"-042", "", 0, ERR(0), KSMAX(-042)}, + {"0x42", "", 0, ERR(0), KUMAX(0x42)}, + {"+0x42", "", 0, ERR(0), KUMAX(0x42)}, + {"-0x42", "", 0, ERR(0), KSMAX(-0x42)}, + + {"0", "", 0, ERR(0), KUMAX(0)}, + {"1", "", 0, ERR(0), KUMAX(1)}, + + {"42", "", 0, ERR(0), KUMAX(42)}, + {" 42", "", 0, ERR(0), KUMAX(42)}, + {"42 ", " ", 0, ERR(0), KUMAX(42)}, + {"0x", "x", 0, ERR(0), KUMAX(0)}, + {"42x", "x", 0, ERR(0), KUMAX(42)}, + + {"07", "", 0, ERR(0), KUMAX(7)}, + {"010", "", 0, ERR(0), KUMAX(8)}, + {"08", "8", 0, ERR(0), KUMAX(0)}, + {"0_", "_", 0, ERR(0), KUMAX(0)}, + + {"0x", "x", 0, ERR(0), KUMAX(0)}, + {"0X", "X", 0, ERR(0), KUMAX(0)}, + {"0xg", "xg", 0, ERR(0), KUMAX(0)}, + {"0XA", "", 0, ERR(0), KUMAX(10)}, + + {"010", "", 10, ERR(0), KUMAX(10)}, + {"0x3", "x3", 10, ERR(0), KUMAX(0)}, + + {"12", "2", 2, ERR(0), KUMAX(1)}, + {"78", "8", 8, ERR(0), KUMAX(7)}, + {"9a", "a", 10, ERR(0), KUMAX(9)}, + {"9A", "A", 10, ERR(0), KUMAX(9)}, + {"fg", "g", 16, ERR(0), KUMAX(15)}, + {"FG", "G", 16, ERR(0), KUMAX(15)}, + {"0xfg", "g", 16, ERR(0), KUMAX(15)}, + {"0XFG", "G", 16, ERR(0), KUMAX(15)}, + {"z_", "_", 36, ERR(0), KUMAX(35)}, + {"Z_", "_", 36, ERR(0), KUMAX(35)} + }; +#undef ERR +#undef KUMAX +#undef KSMAX + unsigned i; + + for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) { + struct test_s *test = &tests[i]; + int err; + uintmax_t result; + char *remainder; + + set_errno(0); + result = malloc_strtoumax(test->input, &remainder, test->base); + err = get_errno(); + assert_d_eq(err, test->expected_errno, + "Expected errno %s for \"%s\", base %d", + test->expected_errno_name, test->input, test->base); + assert_str_eq(remainder, test->expected_remainder, + "Unexpected remainder for \"%s\", base %d", + test->input, test->base); + if (err == 0) { + assert_ju_eq(result, test->expected_x, + "Unexpected result for \"%s\", base %d", + test->input, test->base); + } + } +} +TEST_END + +TEST_BEGIN(test_malloc_snprintf_truncated) { +#define BUFLEN 15 + char buf[BUFLEN]; + size_t result; + size_t len; +#define TEST(expected_str_untruncated, ...) do { \ + result = malloc_snprintf(buf, len, __VA_ARGS__); \ + assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ + "Unexpected string inequality (\"%s\" vs \"%s\")", \ + buf, expected_str_untruncated); \ + assert_zu_eq(result, strlen(expected_str_untruncated), \ + "Unexpected result"); \ +} while (0) + + for (len = 1; len < BUFLEN; len++) { + TEST("012346789", "012346789"); + TEST("a0123b", "a%sb", "0123"); + TEST("a01234567", "a%s%s", "0123", "4567"); + TEST("a0123 ", "a%-6s", "0123"); + TEST("a 0123", "a%6s", "0123"); + TEST("a 012", "a%6.3s", "0123"); + TEST("a 012", "a%*.*s", 6, 3, "0123"); + TEST("a 123b", "a% db", 123); + TEST("a123b", "a%-db", 123); + TEST("a-123b", "a%-db", -123); + TEST("a+123b", "a%+db", 123); + } +#undef BUFLEN +#undef TEST +} +TEST_END + +TEST_BEGIN(test_malloc_snprintf) { +#define BUFLEN 128 + char buf[BUFLEN]; + size_t result; +#define TEST(expected_str, ...) do { \ + result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \ + assert_str_eq(buf, expected_str, "Unexpected output"); \ + assert_zu_eq(result, strlen(expected_str), "Unexpected result");\ +} while (0) + + TEST("hello", "hello"); + + TEST("50%, 100%", "50%%, %d%%", 100); + + TEST("a0123b", "a%sb", "0123"); + + TEST("a 0123b", "a%5sb", "0123"); + TEST("a 0123b", "a%*sb", 5, "0123"); + + TEST("a0123 b", "a%-5sb", "0123"); + TEST("a0123b", "a%*sb", -1, "0123"); + TEST("a0123 b", "a%*sb", -5, "0123"); + TEST("a0123 b", "a%-*sb", -5, "0123"); + + TEST("a012b", "a%.3sb", "0123"); + TEST("a012b", "a%.*sb", 3, "0123"); + TEST("a0123b", "a%.*sb", -3, "0123"); + + TEST("a 012b", "a%5.3sb", "0123"); + TEST("a 012b", "a%5.*sb", 3, "0123"); + TEST("a 012b", "a%*.3sb", 5, "0123"); + TEST("a 012b", "a%*.*sb", 5, 3, "0123"); + TEST("a 0123b", "a%*.*sb", 5, -3, "0123"); + + TEST("_abcd_", "_%x_", 0xabcd); + TEST("_0xabcd_", "_%#x_", 0xabcd); + TEST("_1234_", "_%o_", 01234); + TEST("_01234_", "_%#o_", 01234); + TEST("_1234_", "_%u_", 1234); + + TEST("_1234_", "_%d_", 1234); + TEST("_ 1234_", "_% d_", 1234); + TEST("_+1234_", "_%+d_", 1234); + TEST("_-1234_", "_%d_", -1234); + TEST("_-1234_", "_% d_", -1234); + TEST("_-1234_", "_%+d_", -1234); + + TEST("_-1234_", "_%d_", -1234); + TEST("_1234_", "_%d_", 1234); + TEST("_-1234_", "_%i_", -1234); + TEST("_1234_", "_%i_", 1234); + TEST("_01234_", "_%#o_", 01234); + TEST("_1234_", "_%u_", 1234); + TEST("_0x1234abc_", "_%#x_", 0x1234abc); + TEST("_0X1234ABC_", "_%#X_", 0x1234abc); + TEST("_c_", "_%c_", 'c'); + TEST("_string_", "_%s_", "string"); + TEST("_0x42_", "_%p_", ((void *)0x42)); + + TEST("_-1234_", "_%ld_", ((long)-1234)); + TEST("_1234_", "_%ld_", ((long)1234)); + TEST("_-1234_", "_%li_", ((long)-1234)); + TEST("_1234_", "_%li_", ((long)1234)); + TEST("_01234_", "_%#lo_", ((long)01234)); + TEST("_1234_", "_%lu_", ((long)1234)); + TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc)); + TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC)); + + TEST("_-1234_", "_%lld_", ((long long)-1234)); + TEST("_1234_", "_%lld_", ((long long)1234)); + TEST("_-1234_", "_%lli_", ((long long)-1234)); + TEST("_1234_", "_%lli_", ((long long)1234)); + TEST("_01234_", "_%#llo_", ((long long)01234)); + TEST("_1234_", "_%llu_", ((long long)1234)); + TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc)); + TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC)); + + TEST("_-1234_", "_%qd_", ((long long)-1234)); + TEST("_1234_", "_%qd_", ((long long)1234)); + TEST("_-1234_", "_%qi_", ((long long)-1234)); + TEST("_1234_", "_%qi_", ((long long)1234)); + TEST("_01234_", "_%#qo_", ((long long)01234)); + TEST("_1234_", "_%qu_", ((long long)1234)); + TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc)); + TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC)); + + TEST("_-1234_", "_%jd_", ((intmax_t)-1234)); + TEST("_1234_", "_%jd_", ((intmax_t)1234)); + TEST("_-1234_", "_%ji_", ((intmax_t)-1234)); + TEST("_1234_", "_%ji_", ((intmax_t)1234)); + TEST("_01234_", "_%#jo_", ((intmax_t)01234)); + TEST("_1234_", "_%ju_", ((intmax_t)1234)); + TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc)); + TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC)); + + TEST("_1234_", "_%td_", ((ptrdiff_t)1234)); + TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234)); + TEST("_1234_", "_%ti_", ((ptrdiff_t)1234)); + TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234)); + + TEST("_-1234_", "_%zd_", ((ssize_t)-1234)); + TEST("_1234_", "_%zd_", ((ssize_t)1234)); + TEST("_-1234_", "_%zi_", ((ssize_t)-1234)); + TEST("_1234_", "_%zi_", ((ssize_t)1234)); + TEST("_01234_", "_%#zo_", ((ssize_t)01234)); + TEST("_1234_", "_%zu_", ((ssize_t)1234)); + TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc)); + TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC)); +#undef BUFLEN +} +TEST_END + +int +main(void) { + return test( + test_malloc_strtoumax_no_endptr, + test_malloc_strtoumax, + test_malloc_snprintf_truncated, + test_malloc_snprintf); +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/math.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/math.c similarity index 97% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/math.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/math.c index ebec77a..09ef20c 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/math.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/math.c @@ -1,39 +1,42 @@ #include "test/jemalloc_test.h" -#define MAX_REL_ERR 1.0e-9 -#define MAX_ABS_ERR 1.0e-9 +#define MAX_REL_ERR 1.0e-9 +#define MAX_ABS_ERR 1.0e-9 #include +#ifdef __PGI +#undef INFINITY +#endif + #ifndef INFINITY -#define INFINITY (DBL_MAX + DBL_MAX) +#define INFINITY (DBL_MAX + DBL_MAX) #endif static bool -double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) -{ +double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) { double rel_err; - if (fabs(a - b) < max_abs_err) - return (true); + if (fabs(a - b) < max_abs_err) { + return true; + } rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a); return (rel_err < max_rel_err); } static uint64_t -factorial(unsigned x) -{ +factorial(unsigned x) { uint64_t ret = 1; unsigned i; - for (i = 2; i <= x; i++) + for (i = 2; i <= x; i++) { ret *= (uint64_t)i; + } - return (ret); + return ret; } -TEST_BEGIN(test_ln_gamma_factorial) -{ +TEST_BEGIN(test_ln_gamma_factorial) { unsigned x; /* exp(ln_gamma(x)) == (x-1)! for integer x. */ @@ -184,8 +187,7 @@ static const double ln_gamma_misc_expected[] = { 359.13420536957539753 }; -TEST_BEGIN(test_ln_gamma_misc) -{ +TEST_BEGIN(test_ln_gamma_misc) { unsigned i; for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) { @@ -235,8 +237,7 @@ static const double pt_norm_expected[] = { 1.88079360815125041, 2.05374891063182208, 2.32634787404084076 }; -TEST_BEGIN(test_pt_norm) -{ +TEST_BEGIN(test_pt_norm) { unsigned i; for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) { @@ -285,8 +286,7 @@ static const double pt_chi2_expected[] = { 1046.4872561869577, 1063.5717461999654, 1107.0741966053859 }; -TEST_BEGIN(test_pt_chi2) -{ +TEST_BEGIN(test_pt_chi2) { unsigned i, j; unsigned e = 0; @@ -347,8 +347,7 @@ static const double pt_gamma_expected[] = { 4.7230515633946677, 5.6417477865306020, 8.4059469148854635 }; -TEST_BEGIN(test_pt_gamma_shape) -{ +TEST_BEGIN(test_pt_gamma_shape) { unsigned i, j; unsigned e = 0; @@ -367,8 +366,7 @@ TEST_BEGIN(test_pt_gamma_shape) } TEST_END -TEST_BEGIN(test_pt_gamma_scale) -{ +TEST_BEGIN(test_pt_gamma_scale) { double shape = 1.0; double ln_gamma_shape = ln_gamma(shape); @@ -381,14 +379,12 @@ TEST_BEGIN(test_pt_gamma_scale) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_ln_gamma_factorial, test_ln_gamma_misc, test_pt_norm, test_pt_chi2, test_pt_gamma_shape, - test_pt_gamma_scale)); + test_pt_gamma_scale); } diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/mq.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/mq.c similarity index 82% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/mq.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/mq.c index bde2a48..57a4d54 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/mq.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/mq.c @@ -1,7 +1,7 @@ #include "test/jemalloc_test.h" -#define NSENDERS 3 -#define NMSGS 100000 +#define NSENDERS 3 +#define NMSGS 100000 typedef struct mq_msg_s mq_msg_t; struct mq_msg_s { @@ -9,8 +9,7 @@ struct mq_msg_s { }; mq_gen(static, mq_, mq_t, mq_msg_t, link) -TEST_BEGIN(test_mq_basic) -{ +TEST_BEGIN(test_mq_basic) { mq_t mq; mq_msg_t msg; @@ -31,8 +30,7 @@ TEST_BEGIN(test_mq_basic) TEST_END static void * -thd_receiver_start(void *arg) -{ +thd_receiver_start(void *arg) { mq_t *mq = (mq_t *)arg; unsigned i; @@ -41,12 +39,11 @@ thd_receiver_start(void *arg) assert_ptr_not_null(msg, "mq_get() should never return NULL"); dallocx(msg, 0); } - return (NULL); + return NULL; } static void * -thd_sender_start(void *arg) -{ +thd_sender_start(void *arg) { mq_t *mq = (mq_t *)arg; unsigned i; @@ -58,11 +55,10 @@ thd_sender_start(void *arg) msg = (mq_msg_t *)p; mq_put(mq, msg); } - return (NULL); + return NULL; } -TEST_BEGIN(test_mq_threaded) -{ +TEST_BEGIN(test_mq_threaded) { mq_t mq; thd_t receiver; thd_t senders[NSENDERS]; @@ -71,23 +67,23 @@ TEST_BEGIN(test_mq_threaded) assert_false(mq_init(&mq), "Unexpected mq_init() failure"); thd_create(&receiver, thd_receiver_start, (void *)&mq); - for (i = 0; i < NSENDERS; i++) + for (i = 0; i < NSENDERS; i++) { thd_create(&senders[i], thd_sender_start, (void *)&mq); + } thd_join(receiver, NULL); - for (i = 0; i < NSENDERS; i++) + for (i = 0; i < NSENDERS; i++) { thd_join(senders[i], NULL); + } mq_fini(&mq); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_mq_basic, - test_mq_threaded)); + test_mq_threaded); } diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/mtx.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/mtx.c similarity index 74% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/mtx.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/mtx.c index 96ff694..424587b 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/mtx.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/mtx.c @@ -1,10 +1,9 @@ #include "test/jemalloc_test.h" -#define NTHREADS 2 -#define NINCRS 2000000 +#define NTHREADS 2 +#define NINCRS 2000000 -TEST_BEGIN(test_mtx_basic) -{ +TEST_BEGIN(test_mtx_basic) { mtx_t mtx; assert_false(mtx_init(&mtx), "Unexpected mtx_init() failure"); @@ -20,8 +19,7 @@ typedef struct { } thd_start_arg_t; static void * -thd_start(void *varg) -{ +thd_start(void *varg) { thd_start_arg_t *arg = (thd_start_arg_t *)varg; unsigned i; @@ -30,31 +28,30 @@ thd_start(void *varg) arg->x++; mtx_unlock(&arg->mtx); } - return (NULL); + return NULL; } -TEST_BEGIN(test_mtx_race) -{ +TEST_BEGIN(test_mtx_race) { thd_start_arg_t arg; thd_t thds[NTHREADS]; unsigned i; assert_false(mtx_init(&arg.mtx), "Unexpected mtx_init() failure"); arg.x = 0; - for (i = 0; i < NTHREADS; i++) + for (i = 0; i < NTHREADS; i++) { thd_create(&thds[i], thd_start, (void *)&arg); - for (i = 0; i < NTHREADS; i++) + } + for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); + } assert_u_eq(arg.x, NTHREADS * NINCRS, "Race-related counter corruption"); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_mtx_basic, - test_mtx_race)); + test_mtx_race); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/nstime.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/nstime.c new file mode 100644 index 0000000..f313780 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/nstime.c @@ -0,0 +1,249 @@ +#include "test/jemalloc_test.h" + +#define BILLION UINT64_C(1000000000) + +TEST_BEGIN(test_nstime_init) { + nstime_t nst; + + nstime_init(&nst, 42000000043); + assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read"); + assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read"); + assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read"); +} +TEST_END + +TEST_BEGIN(test_nstime_init2) { + nstime_t nst; + + nstime_init2(&nst, 42, 43); + assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read"); + assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read"); +} +TEST_END + +TEST_BEGIN(test_nstime_copy) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_init(&nstb, 0); + nstime_copy(&nstb, &nsta); + assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied"); + assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied"); +} +TEST_END + +TEST_BEGIN(test_nstime_compare) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal"); + assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal"); + + nstime_init2(&nstb, 42, 42); + assert_d_eq(nstime_compare(&nsta, &nstb), 1, + "nsta should be greater than nstb"); + assert_d_eq(nstime_compare(&nstb, &nsta), -1, + "nstb should be less than nsta"); + + nstime_init2(&nstb, 42, 44); + assert_d_eq(nstime_compare(&nsta, &nstb), -1, + "nsta should be less than nstb"); + assert_d_eq(nstime_compare(&nstb, &nsta), 1, + "nstb should be greater than nsta"); + + nstime_init2(&nstb, 41, BILLION - 1); + assert_d_eq(nstime_compare(&nsta, &nstb), 1, + "nsta should be greater than nstb"); + assert_d_eq(nstime_compare(&nstb, &nsta), -1, + "nstb should be less than nsta"); + + nstime_init2(&nstb, 43, 0); + assert_d_eq(nstime_compare(&nsta, &nstb), -1, + "nsta should be less than nstb"); + assert_d_eq(nstime_compare(&nstb, &nsta), 1, + "nstb should be greater than nsta"); +} +TEST_END + +TEST_BEGIN(test_nstime_add) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_add(&nsta, &nstb); + nstime_init2(&nstb, 84, 86); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect addition result"); + + nstime_init2(&nsta, 42, BILLION - 1); + nstime_copy(&nstb, &nsta); + nstime_add(&nsta, &nstb); + nstime_init2(&nstb, 85, BILLION - 2); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect addition result"); +} +TEST_END + +TEST_BEGIN(test_nstime_iadd) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, BILLION - 1); + nstime_iadd(&nsta, 1); + nstime_init2(&nstb, 43, 0); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect addition result"); + + nstime_init2(&nsta, 42, 1); + nstime_iadd(&nsta, BILLION + 1); + nstime_init2(&nstb, 43, 2); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect addition result"); +} +TEST_END + +TEST_BEGIN(test_nstime_subtract) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_subtract(&nsta, &nstb); + nstime_init(&nstb, 0); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect subtraction result"); + + nstime_init2(&nsta, 42, 43); + nstime_init2(&nstb, 41, 44); + nstime_subtract(&nsta, &nstb); + nstime_init2(&nstb, 0, BILLION - 1); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect subtraction result"); +} +TEST_END + +TEST_BEGIN(test_nstime_isubtract) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_isubtract(&nsta, 42*BILLION + 43); + nstime_init(&nstb, 0); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect subtraction result"); + + nstime_init2(&nsta, 42, 43); + nstime_isubtract(&nsta, 41*BILLION + 44); + nstime_init2(&nstb, 0, BILLION - 1); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect subtraction result"); +} +TEST_END + +TEST_BEGIN(test_nstime_imultiply) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_imultiply(&nsta, 10); + nstime_init2(&nstb, 420, 430); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect multiplication result"); + + nstime_init2(&nsta, 42, 666666666); + nstime_imultiply(&nsta, 3); + nstime_init2(&nstb, 127, 999999998); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect multiplication result"); +} +TEST_END + +TEST_BEGIN(test_nstime_idivide) { + nstime_t nsta, nstb; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 10); + nstime_idivide(&nsta, 10); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect division result"); + + nstime_init2(&nsta, 42, 666666666); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 3); + nstime_idivide(&nsta, 3); + assert_d_eq(nstime_compare(&nsta, &nstb), 0, + "Incorrect division result"); +} +TEST_END + +TEST_BEGIN(test_nstime_divide) { + nstime_t nsta, nstb, nstc; + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 10); + assert_u64_eq(nstime_divide(&nsta, &nstb), 10, + "Incorrect division result"); + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 10); + nstime_init(&nstc, 1); + nstime_add(&nsta, &nstc); + assert_u64_eq(nstime_divide(&nsta, &nstb), 10, + "Incorrect division result"); + + nstime_init2(&nsta, 42, 43); + nstime_copy(&nstb, &nsta); + nstime_imultiply(&nsta, 10); + nstime_init(&nstc, 1); + nstime_subtract(&nsta, &nstc); + assert_u64_eq(nstime_divide(&nsta, &nstb), 9, + "Incorrect division result"); +} +TEST_END + +TEST_BEGIN(test_nstime_monotonic) { + nstime_monotonic(); +} +TEST_END + +TEST_BEGIN(test_nstime_update) { + nstime_t nst; + + nstime_init(&nst, 0); + + assert_false(nstime_update(&nst), "Basic time update failed."); + + /* Only Rip Van Winkle sleeps this long. */ + { + nstime_t addend; + nstime_init2(&addend, 631152000, 0); + nstime_add(&nst, &addend); + } + { + nstime_t nst0; + nstime_copy(&nst0, &nst); + assert_true(nstime_update(&nst), + "Update should detect time roll-back."); + assert_d_eq(nstime_compare(&nst, &nst0), 0, + "Time should not have been modified"); + } +} +TEST_END + +int +main(void) { + return test( + test_nstime_init, + test_nstime_init2, + test_nstime_copy, + test_nstime_compare, + test_nstime_add, + test_nstime_iadd, + test_nstime_subtract, + test_nstime_isubtract, + test_nstime_imultiply, + test_nstime_idivide, + test_nstime_divide, + test_nstime_monotonic, + test_nstime_update); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/pack.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/pack.c new file mode 100644 index 0000000..fc188b0 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/pack.c @@ -0,0 +1,166 @@ +#include "test/jemalloc_test.h" + +/* + * Size class that is a divisor of the page size, ideally 4+ regions per run. + */ +#if LG_PAGE <= 14 +#define SZ (ZU(1) << (LG_PAGE - 2)) +#else +#define SZ ZU(4096) +#endif + +/* + * Number of slabs to consume at high water mark. Should be at least 2 so that + * if mmap()ed memory grows downward, downward growth of mmap()ed memory is + * tested. + */ +#define NSLABS 8 + +static unsigned +binind_compute(void) { + size_t sz; + unsigned nbins, i; + + sz = sizeof(nbins); + assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, + "Unexpected mallctl failure"); + + for (i = 0; i < nbins; i++) { + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + size_t size; + + assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, + &miblen), 0, "Unexpected mallctlnametomb failure"); + mib[2] = (size_t)i; + + sz = sizeof(size); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL, + 0), 0, "Unexpected mallctlbymib failure"); + if (size == SZ) { + return i; + } + } + + test_fail("Unable to compute nregs_per_run"); + return 0; +} + +static size_t +nregs_per_run_compute(void) { + uint32_t nregs; + size_t sz; + unsigned binind = binind_compute(); + size_t mib[4]; + size_t miblen = sizeof(mib)/sizeof(size_t); + + assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, + "Unexpected mallctlnametomb failure"); + mib[2] = (size_t)binind; + sz = sizeof(nregs); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL, + 0), 0, "Unexpected mallctlbymib failure"); + return nregs; +} + +static unsigned +arenas_create_mallctl(void) { + unsigned arena_ind; + size_t sz; + + sz = sizeof(arena_ind); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Error in arenas.create"); + + return arena_ind; +} + +static void +arena_reset_mallctl(unsigned arena_ind) { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + + assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +TEST_BEGIN(test_pack) { + bool prof_enabled; + size_t sz = sizeof(prof_enabled); + if (mallctl("opt.prof", (void *)&prof_enabled, &sz, NULL, 0) == 0) { + test_skip_if(prof_enabled); + } + + unsigned arena_ind = arenas_create_mallctl(); + size_t nregs_per_run = nregs_per_run_compute(); + size_t nregs = nregs_per_run * NSLABS; + VARIABLE_ARRAY(void *, ptrs, nregs); + size_t i, j, offset; + + /* Fill matrix. */ + for (i = offset = 0; i < NSLABS; i++) { + for (j = 0; j < nregs_per_run; j++) { + void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | + MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, + "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |" + " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu", + SZ, arena_ind, i, j); + ptrs[(i * nregs_per_run) + j] = p; + } + } + + /* + * Free all but one region of each run, but rotate which region is + * preserved, so that subsequent allocations exercise the within-run + * layout policy. + */ + offset = 0; + for (i = offset = 0; + i < NSLABS; + i++, offset = (offset + 1) % nregs_per_run) { + for (j = 0; j < nregs_per_run; j++) { + void *p = ptrs[(i * nregs_per_run) + j]; + if (offset == j) { + continue; + } + dallocx(p, MALLOCX_ARENA(arena_ind) | + MALLOCX_TCACHE_NONE); + } + } + + /* + * Logically refill matrix, skipping preserved regions and verifying + * that the matrix is unmodified. + */ + offset = 0; + for (i = offset = 0; + i < NSLABS; + i++, offset = (offset + 1) % nregs_per_run) { + for (j = 0; j < nregs_per_run; j++) { + void *p; + + if (offset == j) { + continue; + } + p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | + MALLOCX_TCACHE_NONE); + assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j], + "Unexpected refill discrepancy, run=%zu, reg=%zu\n", + i, j); + } + } + + /* Clean up. */ + arena_reset_mallctl(arena_ind); +} +TEST_END + +int +main(void) { + return test( + test_pack); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/pack.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/pack.sh new file mode 100644 index 0000000..6f45148 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/pack.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +# Immediately purge to minimize fragmentation. +export MALLOC_CONF="dirty_decay_ms:0,muzzy_decay_ms:0" diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/pages.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/pages.c new file mode 100644 index 0000000..ee729ee --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/pages.c @@ -0,0 +1,29 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_pages_huge) { + size_t alloc_size; + bool commit; + void *pages, *hugepage; + + alloc_size = HUGEPAGE * 2 - PAGE; + commit = true; + pages = pages_map(NULL, alloc_size, PAGE, &commit); + assert_ptr_not_null(pages, "Unexpected pages_map() error"); + + if (init_system_thp_mode == thp_mode_default) { + hugepage = (void *)(ALIGNMENT_CEILING((uintptr_t)pages, HUGEPAGE)); + assert_b_ne(pages_huge(hugepage, HUGEPAGE), have_madvise_huge, + "Unexpected pages_huge() result"); + assert_false(pages_nohuge(hugepage, HUGEPAGE), + "Unexpected pages_nohuge() result"); + } + + pages_unmap(pages, alloc_size); +} +TEST_END + +int +main(void) { + return test( + test_pages_huge); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ph.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ph.c new file mode 100644 index 0000000..88bf56f --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ph.c @@ -0,0 +1,318 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/ph.h" + +typedef struct node_s node_t; + +struct node_s { +#define NODE_MAGIC 0x9823af7e + uint32_t magic; + phn(node_t) link; + uint64_t key; +}; + +static int +node_cmp(const node_t *a, const node_t *b) { + int ret; + + ret = (a->key > b->key) - (a->key < b->key); + if (ret == 0) { + /* + * Duplicates are not allowed in the heap, so force an + * arbitrary ordering for non-identical items with equal keys. + */ + ret = (((uintptr_t)a) > ((uintptr_t)b)) + - (((uintptr_t)a) < ((uintptr_t)b)); + } + return ret; +} + +static int +node_cmp_magic(const node_t *a, const node_t *b) { + + assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); + assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); + + return node_cmp(a, b); +} + +typedef ph(node_t) heap_t; +ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic); + +static void +node_print(const node_t *node, unsigned depth) { + unsigned i; + node_t *leftmost_child, *sibling; + + for (i = 0; i < depth; i++) { + malloc_printf("\t"); + } + malloc_printf("%2"FMTu64"\n", node->key); + + leftmost_child = phn_lchild_get(node_t, link, node); + if (leftmost_child == NULL) { + return; + } + node_print(leftmost_child, depth + 1); + + for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != + NULL; sibling = phn_next_get(node_t, link, sibling)) { + node_print(sibling, depth + 1); + } +} + +static void +heap_print(const heap_t *heap) { + node_t *auxelm; + + malloc_printf("vvv heap %p vvv\n", heap); + if (heap->ph_root == NULL) { + goto label_return; + } + + node_print(heap->ph_root, 0); + + for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; + auxelm = phn_next_get(node_t, link, auxelm)) { + assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, + link, auxelm)), auxelm, + "auxelm's prev doesn't link to auxelm"); + node_print(auxelm, 0); + } + +label_return: + malloc_printf("^^^ heap %p ^^^\n", heap); +} + +static unsigned +node_validate(const node_t *node, const node_t *parent) { + unsigned nnodes = 1; + node_t *leftmost_child, *sibling; + + if (parent != NULL) { + assert_d_ge(node_cmp_magic(node, parent), 0, + "Child is less than parent"); + } + + leftmost_child = phn_lchild_get(node_t, link, node); + if (leftmost_child == NULL) { + return nnodes; + } + assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child), + (void *)node, "Leftmost child does not link to node"); + nnodes += node_validate(leftmost_child, node); + + for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != + NULL; sibling = phn_next_get(node_t, link, sibling)) { + assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, + link, sibling)), sibling, + "sibling's prev doesn't link to sibling"); + nnodes += node_validate(sibling, node); + } + return nnodes; +} + +static unsigned +heap_validate(const heap_t *heap) { + unsigned nnodes = 0; + node_t *auxelm; + + if (heap->ph_root == NULL) { + goto label_return; + } + + nnodes += node_validate(heap->ph_root, NULL); + + for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; + auxelm = phn_next_get(node_t, link, auxelm)) { + assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, + link, auxelm)), auxelm, + "auxelm's prev doesn't link to auxelm"); + nnodes += node_validate(auxelm, NULL); + } + +label_return: + if (false) { + heap_print(heap); + } + return nnodes; +} + +TEST_BEGIN(test_ph_empty) { + heap_t heap; + + heap_new(&heap); + assert_true(heap_empty(&heap), "Heap should be empty"); + assert_ptr_null(heap_first(&heap), "Unexpected node"); + assert_ptr_null(heap_any(&heap), "Unexpected node"); +} +TEST_END + +static void +node_remove(heap_t *heap, node_t *node) { + heap_remove(heap, node); + + node->magic = 0; +} + +static node_t * +node_remove_first(heap_t *heap) { + node_t *node = heap_remove_first(heap); + node->magic = 0; + return node; +} + +static node_t * +node_remove_any(heap_t *heap) { + node_t *node = heap_remove_any(heap); + node->magic = 0; + return node; +} + +TEST_BEGIN(test_ph_random) { +#define NNODES 25 +#define NBAGS 250 +#define SEED 42 + sfmt_t *sfmt; + uint64_t bag[NNODES]; + heap_t heap; + node_t nodes[NNODES]; + unsigned i, j, k; + + sfmt = init_gen_rand(SEED); + for (i = 0; i < NBAGS; i++) { + switch (i) { + case 0: + /* Insert in order. */ + for (j = 0; j < NNODES; j++) { + bag[j] = j; + } + break; + case 1: + /* Insert in reverse order. */ + for (j = 0; j < NNODES; j++) { + bag[j] = NNODES - j - 1; + } + break; + default: + for (j = 0; j < NNODES; j++) { + bag[j] = gen_rand64_range(sfmt, NNODES); + } + } + + for (j = 1; j <= NNODES; j++) { + /* Initialize heap and nodes. */ + heap_new(&heap); + assert_u_eq(heap_validate(&heap), 0, + "Incorrect node count"); + for (k = 0; k < j; k++) { + nodes[k].magic = NODE_MAGIC; + nodes[k].key = bag[k]; + } + + /* Insert nodes. */ + for (k = 0; k < j; k++) { + heap_insert(&heap, &nodes[k]); + if (i % 13 == 12) { + assert_ptr_not_null(heap_any(&heap), + "Heap should not be empty"); + /* Trigger merging. */ + assert_ptr_not_null(heap_first(&heap), + "Heap should not be empty"); + } + assert_u_eq(heap_validate(&heap), k + 1, + "Incorrect node count"); + } + + assert_false(heap_empty(&heap), + "Heap should not be empty"); + + /* Remove nodes. */ + switch (i % 6) { + case 0: + for (k = 0; k < j; k++) { + assert_u_eq(heap_validate(&heap), j - k, + "Incorrect node count"); + node_remove(&heap, &nodes[k]); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + } + break; + case 1: + for (k = j; k > 0; k--) { + node_remove(&heap, &nodes[k-1]); + assert_u_eq(heap_validate(&heap), k - 1, + "Incorrect node count"); + } + break; + case 2: { + node_t *prev = NULL; + for (k = 0; k < j; k++) { + node_t *node = node_remove_first(&heap); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + if (prev != NULL) { + assert_d_ge(node_cmp(node, + prev), 0, + "Bad removal order"); + } + prev = node; + } + break; + } case 3: { + node_t *prev = NULL; + for (k = 0; k < j; k++) { + node_t *node = heap_first(&heap); + assert_u_eq(heap_validate(&heap), j - k, + "Incorrect node count"); + if (prev != NULL) { + assert_d_ge(node_cmp(node, + prev), 0, + "Bad removal order"); + } + node_remove(&heap, node); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + prev = node; + } + break; + } case 4: { + for (k = 0; k < j; k++) { + node_remove_any(&heap); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + } + break; + } case 5: { + for (k = 0; k < j; k++) { + node_t *node = heap_any(&heap); + assert_u_eq(heap_validate(&heap), j - k, + "Incorrect node count"); + node_remove(&heap, node); + assert_u_eq(heap_validate(&heap), j - k + - 1, "Incorrect node count"); + } + break; + } default: + not_reached(); + } + + assert_ptr_null(heap_first(&heap), + "Heap should be empty"); + assert_ptr_null(heap_any(&heap), + "Heap should be empty"); + assert_true(heap_empty(&heap), "Heap should be empty"); + } + } + fini_gen_rand(sfmt); +#undef NNODES +#undef SEED +} +TEST_END + +int +main(void) { + return test( + test_ph_empty, + test_ph_random); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prng.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prng.c new file mode 100644 index 0000000..b5795c2 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prng.c @@ -0,0 +1,237 @@ +#include "test/jemalloc_test.h" + +static void +test_prng_lg_range_u32(bool atomic) { + atomic_u32_t sa, sb; + uint32_t ra, rb; + unsigned lg_range; + + atomic_store_u32(&sa, 42, ATOMIC_RELAXED); + ra = prng_lg_range_u32(&sa, 32, atomic); + atomic_store_u32(&sa, 42, ATOMIC_RELAXED); + rb = prng_lg_range_u32(&sa, 32, atomic); + assert_u32_eq(ra, rb, + "Repeated generation should produce repeated results"); + + atomic_store_u32(&sb, 42, ATOMIC_RELAXED); + rb = prng_lg_range_u32(&sb, 32, atomic); + assert_u32_eq(ra, rb, + "Equivalent generation should produce equivalent results"); + + atomic_store_u32(&sa, 42, ATOMIC_RELAXED); + ra = prng_lg_range_u32(&sa, 32, atomic); + rb = prng_lg_range_u32(&sa, 32, atomic); + assert_u32_ne(ra, rb, + "Full-width results must not immediately repeat"); + + atomic_store_u32(&sa, 42, ATOMIC_RELAXED); + ra = prng_lg_range_u32(&sa, 32, atomic); + for (lg_range = 31; lg_range > 0; lg_range--) { + atomic_store_u32(&sb, 42, ATOMIC_RELAXED); + rb = prng_lg_range_u32(&sb, lg_range, atomic); + assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)), + 0, "High order bits should be 0, lg_range=%u", lg_range); + assert_u32_eq(rb, (ra >> (32 - lg_range)), + "Expected high order bits of full-width result, " + "lg_range=%u", lg_range); + } +} + +static void +test_prng_lg_range_u64(void) { + uint64_t sa, sb, ra, rb; + unsigned lg_range; + + sa = 42; + ra = prng_lg_range_u64(&sa, 64); + sa = 42; + rb = prng_lg_range_u64(&sa, 64); + assert_u64_eq(ra, rb, + "Repeated generation should produce repeated results"); + + sb = 42; + rb = prng_lg_range_u64(&sb, 64); + assert_u64_eq(ra, rb, + "Equivalent generation should produce equivalent results"); + + sa = 42; + ra = prng_lg_range_u64(&sa, 64); + rb = prng_lg_range_u64(&sa, 64); + assert_u64_ne(ra, rb, + "Full-width results must not immediately repeat"); + + sa = 42; + ra = prng_lg_range_u64(&sa, 64); + for (lg_range = 63; lg_range > 0; lg_range--) { + sb = 42; + rb = prng_lg_range_u64(&sb, lg_range); + assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)), + 0, "High order bits should be 0, lg_range=%u", lg_range); + assert_u64_eq(rb, (ra >> (64 - lg_range)), + "Expected high order bits of full-width result, " + "lg_range=%u", lg_range); + } +} + +static void +test_prng_lg_range_zu(bool atomic) { + atomic_zu_t sa, sb; + size_t ra, rb; + unsigned lg_range; + + atomic_store_zu(&sa, 42, ATOMIC_RELAXED); + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + atomic_store_zu(&sa, 42, ATOMIC_RELAXED); + rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + assert_zu_eq(ra, rb, + "Repeated generation should produce repeated results"); + + atomic_store_zu(&sb, 42, ATOMIC_RELAXED); + rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + assert_zu_eq(ra, rb, + "Equivalent generation should produce equivalent results"); + + atomic_store_zu(&sa, 42, ATOMIC_RELAXED); + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + assert_zu_ne(ra, rb, + "Full-width results must not immediately repeat"); + + atomic_store_zu(&sa, 42, ATOMIC_RELAXED); + ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); + for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0; + lg_range--) { + atomic_store_zu(&sb, 42, ATOMIC_RELAXED); + rb = prng_lg_range_zu(&sb, lg_range, atomic); + assert_zu_eq((rb & (SIZE_T_MAX << lg_range)), + 0, "High order bits should be 0, lg_range=%u", lg_range); + assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - + lg_range)), "Expected high order bits of full-width " + "result, lg_range=%u", lg_range); + } +} + +TEST_BEGIN(test_prng_lg_range_u32_nonatomic) { + test_prng_lg_range_u32(false); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_u32_atomic) { + test_prng_lg_range_u32(true); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_u64_nonatomic) { + test_prng_lg_range_u64(); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_zu_nonatomic) { + test_prng_lg_range_zu(false); +} +TEST_END + +TEST_BEGIN(test_prng_lg_range_zu_atomic) { + test_prng_lg_range_zu(true); +} +TEST_END + +static void +test_prng_range_u32(bool atomic) { + uint32_t range; +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 + + for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { + atomic_u32_t s; + unsigned rep; + + atomic_store_u32(&s, range, ATOMIC_RELAXED); + for (rep = 0; rep < NREPS; rep++) { + uint32_t r = prng_range_u32(&s, range, atomic); + + assert_u32_lt(r, range, "Out of range"); + } + } +} + +static void +test_prng_range_u64(void) { + uint64_t range; +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 + + for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { + uint64_t s; + unsigned rep; + + s = range; + for (rep = 0; rep < NREPS; rep++) { + uint64_t r = prng_range_u64(&s, range); + + assert_u64_lt(r, range, "Out of range"); + } + } +} + +static void +test_prng_range_zu(bool atomic) { + size_t range; +#define MAX_RANGE 10000000 +#define RANGE_STEP 97 +#define NREPS 10 + + for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { + atomic_zu_t s; + unsigned rep; + + atomic_store_zu(&s, range, ATOMIC_RELAXED); + for (rep = 0; rep < NREPS; rep++) { + size_t r = prng_range_zu(&s, range, atomic); + + assert_zu_lt(r, range, "Out of range"); + } + } +} + +TEST_BEGIN(test_prng_range_u32_nonatomic) { + test_prng_range_u32(false); +} +TEST_END + +TEST_BEGIN(test_prng_range_u32_atomic) { + test_prng_range_u32(true); +} +TEST_END + +TEST_BEGIN(test_prng_range_u64_nonatomic) { + test_prng_range_u64(); +} +TEST_END + +TEST_BEGIN(test_prng_range_zu_nonatomic) { + test_prng_range_zu(false); +} +TEST_END + +TEST_BEGIN(test_prng_range_zu_atomic) { + test_prng_range_zu(true); +} +TEST_END + +int +main(void) { + return test( + test_prng_lg_range_u32_nonatomic, + test_prng_lg_range_u32_atomic, + test_prng_lg_range_u64_nonatomic, + test_prng_lg_range_zu_nonatomic, + test_prng_lg_range_zu_atomic, + test_prng_range_u32_nonatomic, + test_prng_range_u32_atomic, + test_prng_range_u64_nonatomic, + test_prng_range_zu_nonatomic, + test_prng_range_zu_atomic); +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_accum.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_accum.c similarity index 68% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_accum.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_accum.c index fd229e0..2522006 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_accum.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_accum.c @@ -1,36 +1,27 @@ #include "test/jemalloc_test.h" -#define NTHREADS 4 -#define NALLOCS_PER_THREAD 50 -#define DUMP_INTERVAL 1 -#define BT_COUNT_CHECK_INTERVAL 5 - -#ifdef JEMALLOC_PROF -const char *malloc_conf = - "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"; -#endif +#define NTHREADS 4 +#define NALLOCS_PER_THREAD 50 +#define DUMP_INTERVAL 1 +#define BT_COUNT_CHECK_INTERVAL 5 static int -prof_dump_open_intercept(bool propagate_err, const char *filename) -{ +prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); - return (fd); + return fd; } static void * -alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) -{ - - return (btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration)); +alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) { + return btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration); } static void * -thd_start(void *varg) -{ +thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; size_t bt_count_prev, bt_count; unsigned i_prev, i; @@ -55,11 +46,10 @@ thd_start(void *varg) } } - return (NULL); + return NULL; } -TEST_BEGIN(test_idump) -{ +TEST_BEGIN(test_idump) { bool active; thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; @@ -68,8 +58,9 @@ TEST_BEGIN(test_idump) test_skip_if(!config_prof); active = true; - assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), - 0, "Unexpected mallctl failure while activating profiling"); + assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, + sizeof(active)), 0, + "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; @@ -77,15 +68,14 @@ TEST_BEGIN(test_idump) thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } - for (i = 0; i < NTHREADS; i++) + for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); + } } TEST_END int -main(void) -{ - - return (test( - test_idump)); +main(void) { + return test_no_reentrancy( + test_idump); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_accum.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_accum.sh new file mode 100644 index 0000000..b3e13fc --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_accum.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0" +fi diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_active.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_active.c similarity index 78% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_active.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_active.c index 8149095..850a24a 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_active.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_active.c @@ -1,18 +1,12 @@ #include "test/jemalloc_test.h" -#ifdef JEMALLOC_PROF -const char *malloc_conf = - "prof:true,prof_thread_active_init:false,lg_prof_sample:0"; -#endif - static void -mallctl_bool_get(const char *name, bool expected, const char *func, int line) -{ +mallctl_bool_get(const char *name, bool expected, const char *func, int line) { bool old; size_t sz; sz = sizeof(old); - assert_d_eq(mallctl(name, &old, &sz, NULL, 0), 0, + assert_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0, "%s():%d: Unexpected mallctl failure reading %s", func, line, name); assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line, name); @@ -20,13 +14,13 @@ mallctl_bool_get(const char *name, bool expected, const char *func, int line) static void mallctl_bool_set(const char *name, bool old_expected, bool val_new, - const char *func, int line) -{ + const char *func, int line) { bool old; size_t sz; sz = sizeof(old); - assert_d_eq(mallctl(name, &old, &sz, &val_new, sizeof(val_new)), 0, + assert_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new, + sizeof(val_new)), 0, "%s():%d: Unexpected mallctl failure reading/writing %s", func, line, name); assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func, @@ -35,50 +29,41 @@ mallctl_bool_set(const char *name, bool old_expected, bool val_new, static void mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func, - int line) -{ - + int line) { mallctl_bool_get("prof.active", prof_active_old_expected, func, line); } -#define mallctl_prof_active_get(a) \ +#define mallctl_prof_active_get(a) \ mallctl_prof_active_get_impl(a, __func__, __LINE__) static void mallctl_prof_active_set_impl(bool prof_active_old_expected, - bool prof_active_new, const char *func, int line) -{ - + bool prof_active_new, const char *func, int line) { mallctl_bool_set("prof.active", prof_active_old_expected, prof_active_new, func, line); } -#define mallctl_prof_active_set(a, b) \ +#define mallctl_prof_active_set(a, b) \ mallctl_prof_active_set_impl(a, b, __func__, __LINE__) static void mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected, - const char *func, int line) -{ - + const char *func, int line) { mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected, func, line); } -#define mallctl_thread_prof_active_get(a) \ +#define mallctl_thread_prof_active_get(a) \ mallctl_thread_prof_active_get_impl(a, __func__, __LINE__) static void mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected, - bool thread_prof_active_new, const char *func, int line) -{ - + bool thread_prof_active_new, const char *func, int line) { mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected, thread_prof_active_new, func, line); } -#define mallctl_thread_prof_active_set(a, b) \ +#define mallctl_thread_prof_active_set(a, b) \ mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__) static void -prof_sampling_probe_impl(bool expect_sample, const char *func, int line) -{ +prof_sampling_probe_impl(bool expect_sample, const char *func, int line) { void *p; size_t expected_backtraces = expect_sample ? 1 : 0; @@ -90,12 +75,10 @@ prof_sampling_probe_impl(bool expect_sample, const char *func, int line) "%s():%d: Unexpected backtrace count", func, line); dallocx(p, 0); } -#define prof_sampling_probe(a) \ +#define prof_sampling_probe(a) \ prof_sampling_probe_impl(a, __func__, __LINE__) -TEST_BEGIN(test_prof_active) -{ - +TEST_BEGIN(test_prof_active) { test_skip_if(!config_prof); mallctl_prof_active_get(true); @@ -128,9 +111,7 @@ TEST_BEGIN(test_prof_active) TEST_END int -main(void) -{ - - return (test( - test_prof_active)); +main(void) { + return test_no_reentrancy( + test_prof_active); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_active.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_active.sh new file mode 100644 index 0000000..0167cb1 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_active.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,prof_thread_active_init:false,lg_prof_sample:0" +fi diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_gdump.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_gdump.c similarity index 68% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_gdump.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_gdump.c index a0e6ee9..fcb434c 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_gdump.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_gdump.c @@ -1,14 +1,9 @@ #include "test/jemalloc_test.h" -#ifdef JEMALLOC_PROF -const char *malloc_conf = "prof:true,prof_active:false,prof_gdump:true"; -#endif - static bool did_prof_dump_open; static int -prof_dump_open_intercept(bool propagate_err, const char *filename) -{ +prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; did_prof_dump_open = true; @@ -16,11 +11,10 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); - return (fd); + return fd; } -TEST_BEGIN(test_gdump) -{ +TEST_BEGIN(test_gdump) { bool active, gdump, gdump_old; void *p, *q, *r, *s; size_t sz; @@ -28,40 +22,41 @@ TEST_BEGIN(test_gdump) test_skip_if(!config_prof); active = true; - assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), - 0, "Unexpected mallctl failure while activating profiling"); + assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, + sizeof(active)), 0, + "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; did_prof_dump_open = false; - p = mallocx(chunksize, 0); + p = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); did_prof_dump_open = false; - q = mallocx(chunksize, 0); + q = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); gdump = false; sz = sizeof(gdump_old); - assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump, - sizeof(gdump)), 0, + assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, + (void *)&gdump, sizeof(gdump)), 0, "Unexpected mallctl failure while disabling prof.gdump"); assert(gdump_old); did_prof_dump_open = false; - r = mallocx(chunksize, 0); + r = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_false(did_prof_dump_open, "Unexpected profile dump"); gdump = true; sz = sizeof(gdump_old); - assert_d_eq(mallctl("prof.gdump", &gdump_old, &sz, &gdump, - sizeof(gdump)), 0, + assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, + (void *)&gdump, sizeof(gdump)), 0, "Unexpected mallctl failure while enabling prof.gdump"); assert(!gdump_old); did_prof_dump_open = false; - s = mallocx(chunksize, 0); + s = mallocx((1U << LG_LARGE_MINCLASS), 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); @@ -73,9 +68,7 @@ TEST_BEGIN(test_gdump) TEST_END int -main(void) -{ - - return (test( - test_gdump)); +main(void) { + return test_no_reentrancy( + test_gdump); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_gdump.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_gdump.sh new file mode 100644 index 0000000..3f600d2 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_gdump.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,prof_active:false,prof_gdump:true" +fi + diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_idump.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_idump.c similarity index 60% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_idump.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_idump.c index bdea53e..1cc6c98 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_idump.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_idump.c @@ -1,16 +1,9 @@ #include "test/jemalloc_test.h" -#ifdef JEMALLOC_PROF -const char *malloc_conf = - "prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0," - "lg_prof_interval:0"; -#endif - static bool did_prof_dump_open; static int -prof_dump_open_intercept(bool propagate_err, const char *filename) -{ +prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; did_prof_dump_open = true; @@ -18,19 +11,19 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); - return (fd); + return fd; } -TEST_BEGIN(test_idump) -{ +TEST_BEGIN(test_idump) { bool active; void *p; test_skip_if(!config_prof); active = true; - assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), - 0, "Unexpected mallctl failure while activating profiling"); + assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, + sizeof(active)), 0, + "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; @@ -43,9 +36,7 @@ TEST_BEGIN(test_idump) TEST_END int -main(void) -{ - - return (test( - test_idump)); +main(void) { + return test( + test_idump); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_idump.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_idump.sh new file mode 100644 index 0000000..4dc599a --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_idump.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +export MALLOC_CONF="tcache:false" +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="${MALLOC_CONF},prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0" +fi + + diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_reset.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_reset.c similarity index 85% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_reset.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_reset.c index 69983e5..7cce42d 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_reset.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_reset.c @@ -1,52 +1,42 @@ #include "test/jemalloc_test.h" -#ifdef JEMALLOC_PROF -const char *malloc_conf = - "prof:true,prof_active:false,lg_prof_sample:0"; -#endif - static int -prof_dump_open_intercept(bool propagate_err, const char *filename) -{ +prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); - return (fd); + return fd; } static void -set_prof_active(bool active) -{ - - assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)), - 0, "Unexpected mallctl failure"); +set_prof_active(bool active) { + assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, + sizeof(active)), 0, "Unexpected mallctl failure"); } static size_t -get_lg_prof_sample(void) -{ +get_lg_prof_sample(void) { size_t lg_prof_sample; size_t sz = sizeof(size_t); - assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0, + assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz, + NULL, 0), 0, "Unexpected mallctl failure while reading profiling sample rate"); - return (lg_prof_sample); + return lg_prof_sample; } static void -do_prof_reset(size_t lg_prof_sample) -{ +do_prof_reset(size_t lg_prof_sample) { assert_d_eq(mallctl("prof.reset", NULL, NULL, - &lg_prof_sample, sizeof(size_t)), 0, + (void *)&lg_prof_sample, sizeof(size_t)), 0, "Unexpected mallctl failure while resetting profile data"); assert_zu_eq(lg_prof_sample, get_lg_prof_sample(), "Expected profile sample rate change"); } -TEST_BEGIN(test_prof_reset_basic) -{ +TEST_BEGIN(test_prof_reset_basic) { size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next; size_t sz; unsigned i; @@ -54,8 +44,8 @@ TEST_BEGIN(test_prof_reset_basic) test_skip_if(!config_prof); sz = sizeof(size_t); - assert_d_eq(mallctl("opt.lg_prof_sample", &lg_prof_sample_orig, &sz, - NULL, 0), 0, + assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig, + &sz, NULL, 0), 0, "Unexpected mallctl failure while reading profiling sample rate"); assert_zu_eq(lg_prof_sample_orig, 0, "Unexpected profiling sample rate"); @@ -94,17 +84,15 @@ TEST_END bool prof_dump_header_intercepted = false; prof_cnt_t cnt_all_copy = {0, 0, 0, 0}; static bool -prof_dump_header_intercept(bool propagate_err, const prof_cnt_t *cnt_all) -{ - +prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err, + const prof_cnt_t *cnt_all) { prof_dump_header_intercepted = true; memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t)); - return (false); + return false; } -TEST_BEGIN(test_prof_reset_cleanup) -{ +TEST_BEGIN(test_prof_reset_cleanup) { void *p; prof_dump_header_t *prof_dump_header_orig; @@ -142,14 +130,13 @@ TEST_BEGIN(test_prof_reset_cleanup) } TEST_END -#define NTHREADS 4 -#define NALLOCS_PER_THREAD (1U << 13) -#define OBJ_RING_BUF_COUNT 1531 -#define RESET_INTERVAL (1U << 10) -#define DUMP_INTERVAL 3677 +#define NTHREADS 4 +#define NALLOCS_PER_THREAD (1U << 13) +#define OBJ_RING_BUF_COUNT 1531 +#define RESET_INTERVAL (1U << 10) +#define DUMP_INTERVAL 3677 static void * -thd_start(void *varg) -{ +thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; unsigned i; void *objs[OBJ_RING_BUF_COUNT]; @@ -189,11 +176,10 @@ thd_start(void *varg) } } - return (NULL); + return NULL; } -TEST_BEGIN(test_prof_reset) -{ +TEST_BEGIN(test_prof_reset) { size_t lg_prof_sample_orig; thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; @@ -216,8 +202,9 @@ TEST_BEGIN(test_prof_reset) thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } - for (i = 0; i < NTHREADS; i++) + for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); + } assert_zu_eq(prof_bt_count(), bt_count, "Unexpected bactrace count change"); @@ -236,9 +223,8 @@ TEST_END #undef DUMP_INTERVAL /* Test sampling at the same allocation site across resets. */ -#define NITER 10 -TEST_BEGIN(test_xallocx) -{ +#define NITER 10 +TEST_BEGIN(test_xallocx) { size_t lg_prof_sample_orig; unsigned i; void *ptrs[NITER]; @@ -288,15 +274,13 @@ TEST_END #undef NITER int -main(void) -{ - +main(void) { /* Intercept dumping prior to running any tests. */ prof_dump_open = prof_dump_open_intercept; - return (test( + return test_no_reentrancy( test_prof_reset_basic, test_prof_reset_cleanup, test_prof_reset, - test_xallocx)); + test_xallocx); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_reset.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_reset.sh new file mode 100644 index 0000000..43c516a --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_reset.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,prof_active:false,lg_prof_sample:0" +fi diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_tctx.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_tctx.c new file mode 100644 index 0000000..ff3b2b0 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_tctx.c @@ -0,0 +1,46 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_prof_realloc) { + tsdn_t *tsdn; + int flags; + void *p, *q; + prof_tctx_t *tctx_p, *tctx_q; + uint64_t curobjs_0, curobjs_1, curobjs_2, curobjs_3; + + test_skip_if(!config_prof); + + tsdn = tsdn_fetch(); + flags = MALLOCX_TCACHE_NONE; + + prof_cnt_all(&curobjs_0, NULL, NULL, NULL); + p = mallocx(1024, flags); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + tctx_p = prof_tctx_get(tsdn, p, NULL); + assert_ptr_ne(tctx_p, (prof_tctx_t *)(uintptr_t)1U, + "Expected valid tctx"); + prof_cnt_all(&curobjs_1, NULL, NULL, NULL); + assert_u64_eq(curobjs_0 + 1, curobjs_1, + "Allocation should have increased sample size"); + + q = rallocx(p, 2048, flags); + assert_ptr_ne(p, q, "Expected move"); + assert_ptr_not_null(p, "Unexpected rmallocx() failure"); + tctx_q = prof_tctx_get(tsdn, q, NULL); + assert_ptr_ne(tctx_q, (prof_tctx_t *)(uintptr_t)1U, + "Expected valid tctx"); + prof_cnt_all(&curobjs_2, NULL, NULL, NULL); + assert_u64_eq(curobjs_1, curobjs_2, + "Reallocation should not have changed sample size"); + + dallocx(q, flags); + prof_cnt_all(&curobjs_3, NULL, NULL, NULL); + assert_u64_eq(curobjs_0, curobjs_3, + "Sample size should have returned to base level"); +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_prof_realloc); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_tctx.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_tctx.sh new file mode 100644 index 0000000..8fcc7d8 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_tctx.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,lg_prof_sample:0" +fi diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_thread_name.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_thread_name.c similarity index 68% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_thread_name.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_thread_name.c index f501158..c9c2a2b 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/prof_thread_name.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_thread_name.c @@ -1,42 +1,35 @@ #include "test/jemalloc_test.h" -#ifdef JEMALLOC_PROF -const char *malloc_conf = "prof:true,prof_active:false"; -#endif - static void mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func, - int line) -{ + int line) { const char *thread_name_old; size_t sz; sz = sizeof(thread_name_old); - assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, NULL, 0), - 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", + assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz, + NULL, 0), 0, + "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); assert_str_eq(thread_name_old, thread_name_expected, "%s():%d: Unexpected thread.prof.name value", func, line); } -#define mallctl_thread_name_get(a) \ +#define mallctl_thread_name_get(a) \ mallctl_thread_name_get_impl(a, __func__, __LINE__) static void mallctl_thread_name_set_impl(const char *thread_name, const char *func, - int line) -{ - - assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, - sizeof(thread_name)), 0, + int line) { + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, + (void *)&thread_name, sizeof(thread_name)), 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); mallctl_thread_name_get_impl(thread_name, func, line); } -#define mallctl_thread_name_set(a) \ +#define mallctl_thread_name_set(a) \ mallctl_thread_name_set_impl(a, __func__, __LINE__) -TEST_BEGIN(test_prof_thread_name_validation) -{ +TEST_BEGIN(test_prof_thread_name_validation) { const char *thread_name; test_skip_if(!config_prof); @@ -46,15 +39,15 @@ TEST_BEGIN(test_prof_thread_name_validation) /* NULL input shouldn't be allowed. */ thread_name = NULL; - assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, - sizeof(thread_name)), EFAULT, + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, + (void *)&thread_name, sizeof(thread_name)), EFAULT, "Unexpected mallctl result writing \"%s\" to thread.prof.name", thread_name); /* '\n' shouldn't be allowed. */ thread_name = "hi\nthere"; - assert_d_eq(mallctl("thread.prof.name", NULL, NULL, &thread_name, - sizeof(thread_name)), EFAULT, + assert_d_eq(mallctl("thread.prof.name", NULL, NULL, + (void *)&thread_name, sizeof(thread_name)), EFAULT, "Unexpected mallctl result writing \"%s\" to thread.prof.name", thread_name); @@ -64,8 +57,9 @@ TEST_BEGIN(test_prof_thread_name_validation) size_t sz; sz = sizeof(thread_name_old); - assert_d_eq(mallctl("thread.prof.name", &thread_name_old, &sz, - &thread_name, sizeof(thread_name)), EPERM, + assert_d_eq(mallctl("thread.prof.name", + (void *)&thread_name_old, &sz, (void *)&thread_name, + sizeof(thread_name)), EPERM, "Unexpected mallctl result writing \"%s\" to " "thread.prof.name", thread_name); } @@ -74,11 +68,10 @@ TEST_BEGIN(test_prof_thread_name_validation) } TEST_END -#define NTHREADS 4 -#define NRESET 25 +#define NTHREADS 4 +#define NRESET 25 static void * -thd_start(void *varg) -{ +thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; char thread_name[16] = ""; unsigned i; @@ -97,11 +90,10 @@ thd_start(void *varg) mallctl_thread_name_set(thread_name); mallctl_thread_name_set(""); - return (NULL); + return NULL; } -TEST_BEGIN(test_prof_thread_name_threaded) -{ +TEST_BEGIN(test_prof_thread_name_threaded) { thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; @@ -112,18 +104,17 @@ TEST_BEGIN(test_prof_thread_name_threaded) thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } - for (i = 0; i < NTHREADS; i++) + for (i = 0; i < NTHREADS; i++) { thd_join(thds[i], NULL); + } } TEST_END #undef NTHREADS #undef NRESET int -main(void) -{ - - return (test( +main(void) { + return test( test_prof_thread_name_validation, - test_prof_thread_name_threaded)); + test_prof_thread_name_threaded); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_thread_name.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_thread_name.sh new file mode 100644 index 0000000..298c105 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/prof_thread_name.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_prof}" = "x1" ] ; then + export MALLOC_CONF="prof:true,prof_active:false" +fi diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/ql.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ql.c similarity index 88% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/ql.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ql.c index 05fad45..b76c24c 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/ql.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ql.c @@ -1,7 +1,9 @@ #include "test/jemalloc_test.h" +#include "jemalloc/internal/ql.h" + /* Number of ring entries, in [2..26]. */ -#define NENTRIES 9 +#define NENTRIES 9 typedef struct list_s list_t; typedef ql_head(list_t) list_head_t; @@ -12,8 +14,7 @@ struct list_s { }; static void -test_empty_list(list_head_t *head) -{ +test_empty_list(list_head_t *head) { list_t *t; unsigned i; @@ -34,8 +35,7 @@ test_empty_list(list_head_t *head) assert_u_eq(i, 0, "Unexpected element for empty list"); } -TEST_BEGIN(test_ql_empty) -{ +TEST_BEGIN(test_ql_empty) { list_head_t head; ql_new(&head); @@ -44,8 +44,7 @@ TEST_BEGIN(test_ql_empty) TEST_END static void -init_entries(list_t *entries, unsigned nentries) -{ +init_entries(list_t *entries, unsigned nentries) { unsigned i; for (i = 0; i < nentries; i++) { @@ -55,8 +54,7 @@ init_entries(list_t *entries, unsigned nentries) } static void -test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) -{ +test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) { list_t *t; unsigned i; @@ -91,31 +89,31 @@ test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) } } -TEST_BEGIN(test_ql_tail_insert) -{ +TEST_BEGIN(test_ql_tail_insert) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); - for (i = 0; i < NENTRIES; i++) + for (i = 0; i < NENTRIES; i++) { ql_tail_insert(&head, &entries[i], link); + } test_entries_list(&head, entries, NENTRIES); } TEST_END -TEST_BEGIN(test_ql_tail_remove) -{ +TEST_BEGIN(test_ql_tail_remove) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); - for (i = 0; i < NENTRIES; i++) + for (i = 0; i < NENTRIES; i++) { ql_tail_insert(&head, &entries[i], link); + } for (i = 0; i < NENTRIES; i++) { test_entries_list(&head, entries, NENTRIES-i); @@ -125,31 +123,31 @@ TEST_BEGIN(test_ql_tail_remove) } TEST_END -TEST_BEGIN(test_ql_head_insert) -{ +TEST_BEGIN(test_ql_head_insert) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); - for (i = 0; i < NENTRIES; i++) + for (i = 0; i < NENTRIES; i++) { ql_head_insert(&head, &entries[NENTRIES-i-1], link); + } test_entries_list(&head, entries, NENTRIES); } TEST_END -TEST_BEGIN(test_ql_head_remove) -{ +TEST_BEGIN(test_ql_head_remove) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); - for (i = 0; i < NENTRIES; i++) + for (i = 0; i < NENTRIES; i++) { ql_head_insert(&head, &entries[NENTRIES-i-1], link); + } for (i = 0; i < NENTRIES; i++) { test_entries_list(&head, &entries[i], NENTRIES-i); @@ -159,8 +157,7 @@ TEST_BEGIN(test_ql_head_remove) } TEST_END -TEST_BEGIN(test_ql_insert) -{ +TEST_BEGIN(test_ql_insert) { list_head_t head; list_t entries[8]; list_t *a, *b, *c, *d, *e, *f, *g, *h; @@ -196,14 +193,12 @@ TEST_BEGIN(test_ql_insert) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_ql_empty, test_ql_tail_insert, test_ql_tail_remove, test_ql_head_insert, test_ql_head_remove, - test_ql_insert)); + test_ql_insert); } diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/qr.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/qr.c similarity index 82% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/qr.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/qr.c index a2a2d90..271a109 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/qr.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/qr.c @@ -1,9 +1,11 @@ #include "test/jemalloc_test.h" +#include "jemalloc/internal/qr.h" + /* Number of ring entries, in [2..26]. */ -#define NENTRIES 9 +#define NENTRIES 9 /* Split index, in [1..NENTRIES). */ -#define SPLIT_INDEX 5 +#define SPLIT_INDEX 5 typedef struct ring_s ring_t; @@ -13,8 +15,7 @@ struct ring_s { }; static void -init_entries(ring_t *entries) -{ +init_entries(ring_t *entries) { unsigned i; for (i = 0; i < NENTRIES; i++) { @@ -24,8 +25,7 @@ init_entries(ring_t *entries) } static void -test_independent_entries(ring_t *entries) -{ +test_independent_entries(ring_t *entries) { ring_t *t; unsigned i, j; @@ -61,8 +61,7 @@ test_independent_entries(ring_t *entries) } } -TEST_BEGIN(test_qr_one) -{ +TEST_BEGIN(test_qr_one) { ring_t entries[NENTRIES]; init_entries(entries); @@ -71,8 +70,7 @@ TEST_BEGIN(test_qr_one) TEST_END static void -test_entries_ring(ring_t *entries) -{ +test_entries_ring(ring_t *entries) { ring_t *t; unsigned i, j; @@ -104,27 +102,27 @@ test_entries_ring(ring_t *entries) } } -TEST_BEGIN(test_qr_after_insert) -{ +TEST_BEGIN(test_qr_after_insert) { ring_t entries[NENTRIES]; unsigned i; init_entries(entries); - for (i = 1; i < NENTRIES; i++) + for (i = 1; i < NENTRIES; i++) { qr_after_insert(&entries[i - 1], &entries[i], link); + } test_entries_ring(entries); } TEST_END -TEST_BEGIN(test_qr_remove) -{ +TEST_BEGIN(test_qr_remove) { ring_t entries[NENTRIES]; ring_t *t; unsigned i, j; init_entries(entries); - for (i = 1; i < NENTRIES; i++) + for (i = 1; i < NENTRIES; i++) { qr_after_insert(&entries[i - 1], &entries[i], link); + } for (i = 0; i < NENTRIES; i++) { j = 0; @@ -145,15 +143,15 @@ TEST_BEGIN(test_qr_remove) } TEST_END -TEST_BEGIN(test_qr_before_insert) -{ +TEST_BEGIN(test_qr_before_insert) { ring_t entries[NENTRIES]; ring_t *t; unsigned i, j; init_entries(entries); - for (i = 1; i < NENTRIES; i++) + for (i = 1; i < NENTRIES; i++) { qr_before_insert(&entries[i - 1], &entries[i], link); + } for (i = 0; i < NENTRIES; i++) { j = 0; qr_foreach(t, &entries[i], link) { @@ -184,8 +182,7 @@ TEST_BEGIN(test_qr_before_insert) TEST_END static void -test_split_entries(ring_t *entries) -{ +test_split_entries(ring_t *entries) { ring_t *t; unsigned i, j; @@ -206,43 +203,41 @@ test_split_entries(ring_t *entries) } } -TEST_BEGIN(test_qr_meld_split) -{ +TEST_BEGIN(test_qr_meld_split) { ring_t entries[NENTRIES]; unsigned i; init_entries(entries); - for (i = 1; i < NENTRIES; i++) + for (i = 1; i < NENTRIES; i++) { qr_after_insert(&entries[i - 1], &entries[i], link); + } - qr_split(&entries[0], &entries[SPLIT_INDEX], link); + qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_split_entries(entries); - qr_meld(&entries[0], &entries[SPLIT_INDEX], link); + qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_entries_ring(entries); - qr_meld(&entries[0], &entries[SPLIT_INDEX], link); + qr_meld(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_split_entries(entries); - qr_split(&entries[0], &entries[SPLIT_INDEX], link); + qr_split(&entries[0], &entries[SPLIT_INDEX], ring_t, link); test_entries_ring(entries); - qr_split(&entries[0], &entries[0], link); + qr_split(&entries[0], &entries[0], ring_t, link); test_entries_ring(entries); - qr_meld(&entries[0], &entries[0], link); + qr_meld(&entries[0], &entries[0], ring_t, link); test_entries_ring(entries); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_qr_one, test_qr_after_insert, test_qr_remove, test_qr_before_insert, - test_qr_meld_split)); + test_qr_meld_split); } diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/quarantine.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/quarantine.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/quarantine.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/quarantine.c diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/rb.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/rb.c similarity index 71% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/rb.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/rb.c index b38eb0e..65c0492 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/rb.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/rb.c @@ -1,27 +1,29 @@ #include "test/jemalloc_test.h" -#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ - a_type *rbp_bh_t; \ - for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \ - rbp_bh_t != &(a_rbt)->rbt_nil; \ - rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \ - if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ - (r_height)++; \ +#include "jemalloc/internal/rb.h" + +#define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ + a_type *rbp_bh_t; \ + for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; rbp_bh_t != \ + NULL; rbp_bh_t = rbtn_left_get(a_type, a_field, \ + rbp_bh_t)) { \ + if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ + (r_height)++; \ + } \ } \ - } \ } while (0) typedef struct node_s node_t; struct node_s { -#define NODE_MAGIC 0x9823af7e +#define NODE_MAGIC 0x9823af7e uint32_t magic; rb_node(node_t) link; uint64_t key; }; static int -node_cmp(node_t *a, node_t *b) { +node_cmp(const node_t *a, const node_t *b) { int ret; assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); @@ -36,14 +38,13 @@ node_cmp(node_t *a, node_t *b) { ret = (((uintptr_t)a) > ((uintptr_t)b)) - (((uintptr_t)a) < ((uintptr_t)b)); } - return (ret); + return ret; } typedef rb_tree(node_t) tree_t; rb_gen(static, tree_, tree_t, node_t, link, node_cmp); -TEST_BEGIN(test_rb_empty) -{ +TEST_BEGIN(test_rb_empty) { tree_t tree; node_t key; @@ -68,47 +69,56 @@ TEST_BEGIN(test_rb_empty) TEST_END static unsigned -tree_recurse(node_t *node, unsigned black_height, unsigned black_depth, - node_t *nil) -{ +tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) { unsigned ret = 0; - node_t *left_node = rbtn_left_get(node_t, link, node); - node_t *right_node = rbtn_right_get(node_t, link, node); + node_t *left_node; + node_t *right_node; - if (!rbtn_red_get(node_t, link, node)) + if (node == NULL) { + return ret; + } + + left_node = rbtn_left_get(node_t, link, node); + right_node = rbtn_right_get(node_t, link, node); + + if (!rbtn_red_get(node_t, link, node)) { black_depth++; + } /* Red nodes must be interleaved with black nodes. */ if (rbtn_red_get(node_t, link, node)) { - assert_false(rbtn_red_get(node_t, link, left_node), - "Node should be black"); - assert_false(rbtn_red_get(node_t, link, right_node), - "Node should be black"); + if (left_node != NULL) { + assert_false(rbtn_red_get(node_t, link, left_node), + "Node should be black"); + } + if (right_node != NULL) { + assert_false(rbtn_red_get(node_t, link, right_node), + "Node should be black"); + } } - if (node == nil) - return (ret); /* Self. */ assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); /* Left subtree. */ - if (left_node != nil) - ret += tree_recurse(left_node, black_height, black_depth, nil); - else + if (left_node != NULL) { + ret += tree_recurse(left_node, black_height, black_depth); + } else { ret += (black_depth != black_height); + } /* Right subtree. */ - if (right_node != nil) - ret += tree_recurse(right_node, black_height, black_depth, nil); - else + if (right_node != NULL) { + ret += tree_recurse(right_node, black_height, black_depth); + } else { ret += (black_depth != black_height); + } - return (ret); + return ret; } static node_t * -tree_iterate_cb(tree_t *tree, node_t *node, void *data) -{ +tree_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *i = (unsigned *)data; node_t *search_node; @@ -131,34 +141,31 @@ tree_iterate_cb(tree_t *tree, node_t *node, void *data) (*i)++; - return (NULL); + return NULL; } static unsigned -tree_iterate(tree_t *tree) -{ +tree_iterate(tree_t *tree) { unsigned i; i = 0; tree_iter(tree, NULL, tree_iterate_cb, (void *)&i); - return (i); + return i; } static unsigned -tree_iterate_reverse(tree_t *tree) -{ +tree_iterate_reverse(tree_t *tree) { unsigned i; i = 0; tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i); - return (i); + return i; } static void -node_remove(tree_t *tree, node_t *node, unsigned nnodes) -{ +node_remove(tree_t *tree, node_t *node, unsigned nnodes) { node_t *search_node; unsigned black_height, imbalances; @@ -181,8 +188,7 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes) node->magic = 0; rbtn_black_height(node_t, link, tree, black_height); - imbalances = tree_recurse(tree->rbt_root, black_height, 0, - &(tree->rbt_nil)); + imbalances = tree_recurse(tree->rbt_root, black_height, 0); assert_u_eq(imbalances, 0, "Tree is unbalanced"); assert_u_eq(tree_iterate(tree), nnodes-1, "Unexpected node iteration count"); @@ -191,32 +197,37 @@ node_remove(tree_t *tree, node_t *node, unsigned nnodes) } static node_t * -remove_iterate_cb(tree_t *tree, node_t *node, void *data) -{ +remove_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; node_t *ret = tree_next(tree, node); node_remove(tree, node, *nnodes); - return (ret); + return ret; } static node_t * -remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) -{ +remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; node_t *ret = tree_prev(tree, node); node_remove(tree, node, *nnodes); - return (ret); + return ret; +} + +static void +destroy_cb(node_t *node, void *data) { + unsigned *nnodes = (unsigned *)data; + + assert_u_gt(*nnodes, 0, "Destruction removed too many nodes"); + (*nnodes)--; } -TEST_BEGIN(test_rb_random) -{ -#define NNODES 25 -#define NBAGS 250 -#define SEED 42 +TEST_BEGIN(test_rb_random) { +#define NNODES 25 +#define NBAGS 250 +#define SEED 42 sfmt_t *sfmt; uint64_t bag[NNODES]; tree_t tree; @@ -228,23 +239,25 @@ TEST_BEGIN(test_rb_random) switch (i) { case 0: /* Insert in order. */ - for (j = 0; j < NNODES; j++) + for (j = 0; j < NNODES; j++) { bag[j] = j; + } break; case 1: /* Insert in reverse order. */ - for (j = 0; j < NNODES; j++) + for (j = 0; j < NNODES; j++) { bag[j] = NNODES - j - 1; + } break; default: - for (j = 0; j < NNODES; j++) + for (j = 0; j < NNODES; j++) { bag[j] = gen_rand64_range(sfmt, NNODES); + } } for (j = 1; j <= NNODES; j++) { /* Initialize tree and nodes. */ tree_new(&tree); - tree.rbt_nil.magic = 0; for (k = 0; k < j; k++) { nodes[k].magic = NODE_MAGIC; nodes[k].key = bag[k]; @@ -257,7 +270,7 @@ TEST_BEGIN(test_rb_random) rbtn_black_height(node_t, link, &tree, black_height); imbalances = tree_recurse(tree.rbt_root, - black_height, 0, &(tree.rbt_nil)); + black_height, 0); assert_u_eq(imbalances, 0, "Tree is unbalanced"); @@ -278,14 +291,16 @@ TEST_BEGIN(test_rb_random) } /* Remove nodes. */ - switch (i % 4) { + switch (i % 5) { case 0: - for (k = 0; k < j; k++) + for (k = 0; k < j; k++) { node_remove(&tree, &nodes[k], j - k); + } break; case 1: - for (k = j; k > 0; k--) + for (k = j; k > 0; k--) { node_remove(&tree, &nodes[k-1], k); + } break; case 2: { node_t *start; @@ -314,6 +329,12 @@ TEST_BEGIN(test_rb_random) assert_u_eq(nnodes, 0, "Removal terminated early"); break; + } case 4: { + unsigned nnodes = j; + tree_destroy(&tree, destroy_cb, &nnodes); + assert_u_eq(nnodes, 0, + "Destruction terminated early"); + break; } default: not_reached(); } @@ -327,10 +348,8 @@ TEST_BEGIN(test_rb_random) TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_rb_empty, - test_rb_random)); + test_rb_random); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/retained.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/retained.c new file mode 100644 index 0000000..d51a598 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/retained.c @@ -0,0 +1,181 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/spin.h" + +static unsigned arena_ind; +static size_t sz; +static size_t esz; +#define NEPOCHS 8 +#define PER_THD_NALLOCS 1 +static atomic_u_t epoch; +static atomic_u_t nfinished; + +static unsigned +do_arena_create(extent_hooks_t *h) { + unsigned arena_ind; + size_t sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, + (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0, + "Unexpected mallctl() failure"); + return arena_ind; +} + +static void +do_arena_destroy(unsigned arena_ind) { + size_t mib[3]; + size_t miblen; + + miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0, + "Unexpected mallctlnametomib() failure"); + mib[1] = (size_t)arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, + "Unexpected mallctlbymib() failure"); +} + +static void +do_refresh(void) { + uint64_t epoch = 1; + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, + sizeof(epoch)), 0, "Unexpected mallctl() failure"); +} + +static size_t +do_get_size_impl(const char *cmd, unsigned arena_ind) { + size_t mib[4]; + size_t miblen = sizeof(mib) / sizeof(size_t); + size_t z = sizeof(size_t); + + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = arena_ind; + size_t size; + assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd); + + return size; +} + +static size_t +do_get_active(unsigned arena_ind) { + return do_get_size_impl("stats.arenas.0.pactive", arena_ind) * PAGE; +} + +static size_t +do_get_mapped(unsigned arena_ind) { + return do_get_size_impl("stats.arenas.0.mapped", arena_ind); +} + +static void * +thd_start(void *arg) { + for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) { + /* Busy-wait for next epoch. */ + unsigned cur_epoch; + spin_t spinner = SPIN_INITIALIZER; + while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) != + next_epoch) { + spin_adaptive(&spinner); + } + assert_u_eq(cur_epoch, next_epoch, "Unexpected epoch"); + + /* + * Allocate. The main thread will reset the arena, so there's + * no need to deallocate. + */ + for (unsigned i = 0; i < PER_THD_NALLOCS; i++) { + void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) | + MALLOCX_TCACHE_NONE + ); + assert_ptr_not_null(p, + "Unexpected mallocx() failure\n"); + } + + /* Let the main thread know we've finished this iteration. */ + atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE); + } + + return NULL; +} + +TEST_BEGIN(test_retained) { + test_skip_if(!config_stats); + + arena_ind = do_arena_create(NULL); + sz = nallocx(HUGEPAGE, 0); + esz = sz + sz_large_pad; + + atomic_store_u(&epoch, 0, ATOMIC_RELAXED); + + unsigned nthreads = ncpus * 2; + VARIABLE_ARRAY(thd_t, threads, nthreads); + for (unsigned i = 0; i < nthreads; i++) { + thd_create(&threads[i], thd_start, NULL); + } + + for (unsigned e = 1; e < NEPOCHS; e++) { + atomic_store_u(&nfinished, 0, ATOMIC_RELEASE); + atomic_store_u(&epoch, e, ATOMIC_RELEASE); + + /* Wait for threads to finish allocating. */ + spin_t spinner = SPIN_INITIALIZER; + while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) { + spin_adaptive(&spinner); + } + + /* + * Assert that retained is no more than the sum of size classes + * that should have been used to satisfy the worker threads' + * requests, discounting per growth fragmentation. + */ + do_refresh(); + + size_t allocated = esz * nthreads * PER_THD_NALLOCS; + size_t active = do_get_active(arena_ind); + assert_zu_le(allocated, active, "Unexpected active memory"); + size_t mapped = do_get_mapped(arena_ind); + assert_zu_le(active, mapped, "Unexpected mapped memory"); + + arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false); + size_t usable = 0; + size_t fragmented = 0; + for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind < + arena->extent_grow_next; pind++) { + size_t psz = sz_pind2sz(pind); + size_t psz_fragmented = psz % esz; + size_t psz_usable = psz - psz_fragmented; + /* + * Only consider size classes that wouldn't be skipped. + */ + if (psz_usable > 0) { + assert_zu_lt(usable, allocated, + "Excessive retained memory " + "(%#zx[+%#zx] > %#zx)", usable, psz_usable, + allocated); + fragmented += psz_fragmented; + usable += psz_usable; + } + } + + /* + * Clean up arena. Destroying and recreating the arena + * is simpler that specifying extent hooks that deallocate + * (rather than retaining) during reset. + */ + do_arena_destroy(arena_ind); + assert_u_eq(do_arena_create(NULL), arena_ind, + "Unexpected arena index"); + } + + for (unsigned i = 0; i < nthreads; i++) { + thd_join(threads[i], NULL); + } + + do_arena_destroy(arena_ind); +} +TEST_END + +int +main(void) { + return test( + test_retained); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/rtree.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/rtree.c new file mode 100644 index 0000000..908100f --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/rtree.c @@ -0,0 +1,227 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/rtree.h" + +rtree_node_alloc_t *rtree_node_alloc_orig; +rtree_node_dalloc_t *rtree_node_dalloc_orig; +rtree_leaf_alloc_t *rtree_leaf_alloc_orig; +rtree_leaf_dalloc_t *rtree_leaf_dalloc_orig; + +/* Potentially too large to safely place on the stack. */ +rtree_t test_rtree; + +static rtree_node_elm_t * +rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + rtree_node_elm_t *node; + + if (rtree != &test_rtree) { + return rtree_node_alloc_orig(tsdn, rtree, nelms); + } + + malloc_mutex_unlock(tsdn, &rtree->init_lock); + node = (rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t)); + assert_ptr_not_null(node, "Unexpected calloc() failure"); + malloc_mutex_lock(tsdn, &rtree->init_lock); + + return node; +} + +static void +rtree_node_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, + rtree_node_elm_t *node) { + if (rtree != &test_rtree) { + rtree_node_dalloc_orig(tsdn, rtree, node); + return; + } + + free(node); +} + +static rtree_leaf_elm_t * +rtree_leaf_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + rtree_leaf_elm_t *leaf; + + if (rtree != &test_rtree) { + return rtree_leaf_alloc_orig(tsdn, rtree, nelms); + } + + malloc_mutex_unlock(tsdn, &rtree->init_lock); + leaf = (rtree_leaf_elm_t *)calloc(nelms, sizeof(rtree_leaf_elm_t)); + assert_ptr_not_null(leaf, "Unexpected calloc() failure"); + malloc_mutex_lock(tsdn, &rtree->init_lock); + + return leaf; +} + +static void +rtree_leaf_dalloc_intercept(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *leaf) { + if (rtree != &test_rtree) { + rtree_leaf_dalloc_orig(tsdn, rtree, leaf); + return; + } + + free(leaf); +} + +TEST_BEGIN(test_rtree_read_empty) { + tsdn_t *tsdn; + + tsdn = tsdn_fetch(); + + rtree_t *rtree = &test_rtree; + rtree_ctx_t rtree_ctx; + rtree_ctx_data_init(&rtree_ctx); + assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, + false), "rtree_extent_read() should return NULL for empty tree"); + rtree_delete(tsdn, rtree); +} +TEST_END + +#undef NTHREADS +#undef NITERS +#undef SEED + +TEST_BEGIN(test_rtree_extrema) { + extent_t extent_a, extent_b; + extent_init(&extent_a, NULL, NULL, LARGE_MINCLASS, false, + sz_size2index(LARGE_MINCLASS), 0, extent_state_active, false, + false, true); + extent_init(&extent_b, NULL, NULL, 0, false, NSIZES, 0, + extent_state_active, false, false, true); + + tsdn_t *tsdn = tsdn_fetch(); + + rtree_t *rtree = &test_rtree; + rtree_ctx_t rtree_ctx; + rtree_ctx_data_init(&rtree_ctx); + assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + + assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &extent_a, + extent_szind_get(&extent_a), extent_slab_get(&extent_a)), + "Unexpected rtree_write() failure"); + rtree_szind_slab_update(tsdn, rtree, &rtree_ctx, PAGE, + extent_szind_get(&extent_a), extent_slab_get(&extent_a)); + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, true), + &extent_a, + "rtree_extent_read() should return previously set value"); + + assert_false(rtree_write(tsdn, rtree, &rtree_ctx, ~((uintptr_t)0), + &extent_b, extent_szind_get_maybe_invalid(&extent_b), + extent_slab_get(&extent_b)), "Unexpected rtree_write() failure"); + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, + ~((uintptr_t)0), true), &extent_b, + "rtree_extent_read() should return previously set value"); + + rtree_delete(tsdn, rtree); +} +TEST_END + +TEST_BEGIN(test_rtree_bits) { + tsdn_t *tsdn = tsdn_fetch(); + + uintptr_t keys[] = {PAGE, PAGE + 1, + PAGE + (((uintptr_t)1) << LG_PAGE) - 1}; + + extent_t extent; + extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0, + extent_state_active, false, false, true); + + rtree_t *rtree = &test_rtree; + rtree_ctx_t rtree_ctx; + rtree_ctx_data_init(&rtree_ctx); + assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + + for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) { + assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i], + &extent, NSIZES, false), + "Unexpected rtree_write() failure"); + for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[j], true), &extent, + "rtree_extent_read() should return previously set " + "value and ignore insignificant key bits; i=%u, " + "j=%u, set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, + j, keys[i], keys[j]); + } + assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, + (((uintptr_t)2) << LG_PAGE), false), + "Only leftmost rtree leaf should be set; i=%u", i); + rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]); + } + + rtree_delete(tsdn, rtree); +} +TEST_END + +TEST_BEGIN(test_rtree_random) { +#define NSET 16 +#define SEED 42 + sfmt_t *sfmt = init_gen_rand(SEED); + tsdn_t *tsdn = tsdn_fetch(); + uintptr_t keys[NSET]; + rtree_t *rtree = &test_rtree; + rtree_ctx_t rtree_ctx; + rtree_ctx_data_init(&rtree_ctx); + + extent_t extent; + extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0, + extent_state_active, false, false, true); + + assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); + + for (unsigned i = 0; i < NSET; i++) { + keys[i] = (uintptr_t)gen_rand64(sfmt); + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, + &rtree_ctx, keys[i], false, true); + assert_ptr_not_null(elm, + "Unexpected rtree_leaf_elm_lookup() failure"); + rtree_leaf_elm_write(tsdn, rtree, elm, &extent, NSIZES, false); + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[i], true), &extent, + "rtree_extent_read() should return previously set value"); + } + for (unsigned i = 0; i < NSET; i++) { + assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[i], true), &extent, + "rtree_extent_read() should return previously set value, " + "i=%u", i); + } + + for (unsigned i = 0; i < NSET; i++) { + rtree_clear(tsdn, rtree, &rtree_ctx, keys[i]); + assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[i], true), + "rtree_extent_read() should return previously set value"); + } + for (unsigned i = 0; i < NSET; i++) { + assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, + keys[i], true), + "rtree_extent_read() should return previously set value"); + } + + rtree_delete(tsdn, rtree); + fini_gen_rand(sfmt); +#undef NSET +#undef SEED +} +TEST_END + +int +main(void) { + rtree_node_alloc_orig = rtree_node_alloc; + rtree_node_alloc = rtree_node_alloc_intercept; + rtree_node_dalloc_orig = rtree_node_dalloc; + rtree_node_dalloc = rtree_node_dalloc_intercept; + rtree_leaf_alloc_orig = rtree_leaf_alloc; + rtree_leaf_alloc = rtree_leaf_alloc_intercept; + rtree_leaf_dalloc_orig = rtree_leaf_dalloc; + rtree_leaf_dalloc = rtree_leaf_dalloc_intercept; + + return test( + test_rtree_read_empty, + test_rtree_extrema, + test_rtree_bits, + test_rtree_random); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/size_classes.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/size_classes.c new file mode 100644 index 0000000..bcff560 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/size_classes.c @@ -0,0 +1,183 @@ +#include "test/jemalloc_test.h" + +static size_t +get_max_size_class(void) { + unsigned nlextents; + size_t mib[4]; + size_t sz, miblen, max_size_class; + + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.nlextents", (void *)&nlextents, &sz, NULL, + 0), 0, "Unexpected mallctl() error"); + + miblen = sizeof(mib) / sizeof(size_t); + assert_d_eq(mallctlnametomib("arenas.lextent.0.size", mib, &miblen), 0, + "Unexpected mallctlnametomib() error"); + mib[2] = nlextents - 1; + + sz = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, + NULL, 0), 0, "Unexpected mallctlbymib() error"); + + return max_size_class; +} + +TEST_BEGIN(test_size_classes) { + size_t size_class, max_size_class; + szind_t index, max_index; + + max_size_class = get_max_size_class(); + max_index = sz_size2index(max_size_class); + + for (index = 0, size_class = sz_index2size(index); index < max_index || + size_class < max_size_class; index++, size_class = + sz_index2size(index)) { + assert_true(index < max_index, + "Loop conditionals should be equivalent; index=%u, " + "size_class=%zu (%#zx)", index, size_class, size_class); + assert_true(size_class < max_size_class, + "Loop conditionals should be equivalent; index=%u, " + "size_class=%zu (%#zx)", index, size_class, size_class); + + assert_u_eq(index, sz_size2index(size_class), + "sz_size2index() does not reverse sz_index2size(): index=%u" + " --> size_class=%zu --> index=%u --> size_class=%zu", + index, size_class, sz_size2index(size_class), + sz_index2size(sz_size2index(size_class))); + assert_zu_eq(size_class, + sz_index2size(sz_size2index(size_class)), + "sz_index2size() does not reverse sz_size2index(): index=%u" + " --> size_class=%zu --> index=%u --> size_class=%zu", + index, size_class, sz_size2index(size_class), + sz_index2size(sz_size2index(size_class))); + + assert_u_eq(index+1, sz_size2index(size_class+1), + "Next size_class does not round up properly"); + + assert_zu_eq(size_class, (index > 0) ? + sz_s2u(sz_index2size(index-1)+1) : sz_s2u(1), + "sz_s2u() does not round up to size class"); + assert_zu_eq(size_class, sz_s2u(size_class-1), + "sz_s2u() does not round up to size class"); + assert_zu_eq(size_class, sz_s2u(size_class), + "sz_s2u() does not compute same size class"); + assert_zu_eq(sz_s2u(size_class+1), sz_index2size(index+1), + "sz_s2u() does not round up to next size class"); + } + + assert_u_eq(index, sz_size2index(sz_index2size(index)), + "sz_size2index() does not reverse sz_index2size()"); + assert_zu_eq(max_size_class, sz_index2size( + sz_size2index(max_size_class)), + "sz_index2size() does not reverse sz_size2index()"); + + assert_zu_eq(size_class, sz_s2u(sz_index2size(index-1)+1), + "sz_s2u() does not round up to size class"); + assert_zu_eq(size_class, sz_s2u(size_class-1), + "sz_s2u() does not round up to size class"); + assert_zu_eq(size_class, sz_s2u(size_class), + "sz_s2u() does not compute same size class"); +} +TEST_END + +TEST_BEGIN(test_psize_classes) { + size_t size_class, max_psz; + pszind_t pind, max_pind; + + max_psz = get_max_size_class() + PAGE; + max_pind = sz_psz2ind(max_psz); + + for (pind = 0, size_class = sz_pind2sz(pind); + pind < max_pind || size_class < max_psz; + pind++, size_class = sz_pind2sz(pind)) { + assert_true(pind < max_pind, + "Loop conditionals should be equivalent; pind=%u, " + "size_class=%zu (%#zx)", pind, size_class, size_class); + assert_true(size_class < max_psz, + "Loop conditionals should be equivalent; pind=%u, " + "size_class=%zu (%#zx)", pind, size_class, size_class); + + assert_u_eq(pind, sz_psz2ind(size_class), + "sz_psz2ind() does not reverse sz_pind2sz(): pind=%u -->" + " size_class=%zu --> pind=%u --> size_class=%zu", pind, + size_class, sz_psz2ind(size_class), + sz_pind2sz(sz_psz2ind(size_class))); + assert_zu_eq(size_class, sz_pind2sz(sz_psz2ind(size_class)), + "sz_pind2sz() does not reverse sz_psz2ind(): pind=%u -->" + " size_class=%zu --> pind=%u --> size_class=%zu", pind, + size_class, sz_psz2ind(size_class), + sz_pind2sz(sz_psz2ind(size_class))); + + assert_u_eq(pind+1, sz_psz2ind(size_class+1), + "Next size_class does not round up properly"); + + assert_zu_eq(size_class, (pind > 0) ? + sz_psz2u(sz_pind2sz(pind-1)+1) : sz_psz2u(1), + "sz_psz2u() does not round up to size class"); + assert_zu_eq(size_class, sz_psz2u(size_class-1), + "sz_psz2u() does not round up to size class"); + assert_zu_eq(size_class, sz_psz2u(size_class), + "sz_psz2u() does not compute same size class"); + assert_zu_eq(sz_psz2u(size_class+1), sz_pind2sz(pind+1), + "sz_psz2u() does not round up to next size class"); + } + + assert_u_eq(pind, sz_psz2ind(sz_pind2sz(pind)), + "sz_psz2ind() does not reverse sz_pind2sz()"); + assert_zu_eq(max_psz, sz_pind2sz(sz_psz2ind(max_psz)), + "sz_pind2sz() does not reverse sz_psz2ind()"); + + assert_zu_eq(size_class, sz_psz2u(sz_pind2sz(pind-1)+1), + "sz_psz2u() does not round up to size class"); + assert_zu_eq(size_class, sz_psz2u(size_class-1), + "sz_psz2u() does not round up to size class"); + assert_zu_eq(size_class, sz_psz2u(size_class), + "sz_psz2u() does not compute same size class"); +} +TEST_END + +TEST_BEGIN(test_overflow) { + size_t max_size_class, max_psz; + + max_size_class = get_max_size_class(); + max_psz = max_size_class + PAGE; + + assert_u_eq(sz_size2index(max_size_class+1), NSIZES, + "sz_size2index() should return NSIZES on overflow"); + assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), NSIZES, + "sz_size2index() should return NSIZES on overflow"); + assert_u_eq(sz_size2index(SIZE_T_MAX), NSIZES, + "sz_size2index() should return NSIZES on overflow"); + + assert_zu_eq(sz_s2u(max_size_class+1), 0, + "sz_s2u() should return 0 for unsupported size"); + assert_zu_eq(sz_s2u(ZU(PTRDIFF_MAX)+1), 0, + "sz_s2u() should return 0 for unsupported size"); + assert_zu_eq(sz_s2u(SIZE_T_MAX), 0, + "sz_s2u() should return 0 on overflow"); + + assert_u_eq(sz_psz2ind(max_size_class+1), NPSIZES, + "sz_psz2ind() should return NPSIZES on overflow"); + assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES, + "sz_psz2ind() should return NPSIZES on overflow"); + assert_u_eq(sz_psz2ind(SIZE_T_MAX), NPSIZES, + "sz_psz2ind() should return NPSIZES on overflow"); + + assert_zu_eq(sz_psz2u(max_size_class+1), max_psz, + "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported" + " size"); + assert_zu_eq(sz_psz2u(ZU(PTRDIFF_MAX)+1), max_psz, + "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported " + "size"); + assert_zu_eq(sz_psz2u(SIZE_T_MAX), max_psz, + "sz_psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow"); +} +TEST_END + +int +main(void) { + return test( + test_size_classes, + test_psize_classes, + test_overflow); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/slab.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/slab.c new file mode 100644 index 0000000..7e662ae --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/slab.c @@ -0,0 +1,32 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_arena_slab_regind) { + szind_t binind; + + for (binind = 0; binind < NBINS; binind++) { + size_t regind; + extent_t slab; + const bin_info_t *bin_info = &bin_infos[binind]; + extent_init(&slab, NULL, mallocx(bin_info->slab_size, + MALLOCX_LG_ALIGN(LG_PAGE)), bin_info->slab_size, true, + binind, 0, extent_state_active, false, true, true); + assert_ptr_not_null(extent_addr_get(&slab), + "Unexpected malloc() failure"); + for (regind = 0; regind < bin_info->nregs; regind++) { + void *reg = (void *)((uintptr_t)extent_addr_get(&slab) + + (bin_info->reg_size * regind)); + assert_zu_eq(arena_slab_regind(&slab, binind, reg), + regind, + "Incorrect region index computed for size %zu", + bin_info->reg_size); + } + free(extent_addr_get(&slab)); + } +} +TEST_END + +int +main(void) { + return test( + test_arena_slab_regind); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/smoothstep.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/smoothstep.c new file mode 100644 index 0000000..7c5dbb7 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/smoothstep.c @@ -0,0 +1,102 @@ +#include "test/jemalloc_test.h" + +static const uint64_t smoothstep_tab[] = { +#define STEP(step, h, x, y) \ + h, + SMOOTHSTEP +#undef STEP +}; + +TEST_BEGIN(test_smoothstep_integral) { + uint64_t sum, min, max; + unsigned i; + + /* + * The integral of smoothstep in the [0..1] range equals 1/2. Verify + * that the fixed point representation's integral is no more than + * rounding error distant from 1/2. Regarding rounding, each table + * element is rounded down to the nearest fixed point value, so the + * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps. + */ + sum = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + sum += smoothstep_tab[i]; + } + + max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1); + min = max - SMOOTHSTEP_NSTEPS; + + assert_u64_ge(sum, min, + "Integral too small, even accounting for truncation"); + assert_u64_le(sum, max, "Integral exceeds 1/2"); + if (false) { + malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n", + max - sum, SMOOTHSTEP_NSTEPS); + } +} +TEST_END + +TEST_BEGIN(test_smoothstep_monotonic) { + uint64_t prev_h; + unsigned i; + + /* + * The smoothstep function is monotonic in [0..1], i.e. its slope is + * non-negative. In practice we want to parametrize table generation + * such that piecewise slope is greater than zero, but do not require + * that here. + */ + prev_h = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + uint64_t h = smoothstep_tab[i]; + assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i); + prev_h = h; + } + assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1], + (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1"); +} +TEST_END + +TEST_BEGIN(test_smoothstep_slope) { + uint64_t prev_h, prev_delta; + unsigned i; + + /* + * The smoothstep slope strictly increases until x=0.5, and then + * strictly decreases until x=1.0. Verify the slightly weaker + * requirement of monotonicity, so that inadequate table precision does + * not cause false test failures. + */ + prev_h = 0; + prev_delta = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) { + uint64_t h = smoothstep_tab[i]; + uint64_t delta = h - prev_h; + assert_u64_ge(delta, prev_delta, + "Slope must monotonically increase in 0.0 <= x <= 0.5, " + "i=%u", i); + prev_h = h; + prev_delta = delta; + } + + prev_h = KQU(1) << SMOOTHSTEP_BFP; + prev_delta = 0; + for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) { + uint64_t h = smoothstep_tab[i]; + uint64_t delta = prev_h - h; + assert_u64_ge(delta, prev_delta, + "Slope must monotonically decrease in 0.5 <= x <= 1.0, " + "i=%u", i); + prev_h = h; + prev_delta = delta; + } +} +TEST_END + +int +main(void) { + return test( + test_smoothstep_integral, + test_smoothstep_monotonic, + test_smoothstep_slope); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/spin.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/spin.c new file mode 100644 index 0000000..b965f74 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/spin.c @@ -0,0 +1,18 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/spin.h" + +TEST_BEGIN(test_spin) { + spin_t spinner = SPIN_INITIALIZER; + + for (unsigned i = 0; i < 100; i++) { + spin_adaptive(&spinner); + } +} +TEST_END + +int +main(void) { + return test( + test_spin); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/stats.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/stats.c new file mode 100644 index 0000000..231010e --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/stats.c @@ -0,0 +1,368 @@ +#include "test/jemalloc_test.h" + +TEST_BEGIN(test_stats_summary) { + size_t sz, allocated, active, resident, mapped; + int expected = config_stats ? 0 : ENOENT; + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL, + 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0), + expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_le(allocated, active, + "allocated should be no larger than active"); + assert_zu_lt(active, resident, + "active should be less than resident"); + assert_zu_lt(active, mapped, + "active should be less than mapped"); + } +} +TEST_END + +TEST_BEGIN(test_stats_large) { + void *p; + uint64_t epoch; + size_t allocated; + uint64_t nmalloc, ndalloc, nrequests; + size_t sz; + int expected = config_stats ? 0 : ENOENT; + + p = mallocx(SMALL_MAXCLASS+1, MALLOCX_ARENA(0)); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.large.allocated", + (void *)&allocated, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.nrequests", + (void *)&nrequests, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_le(nmalloc, nrequests, + "nmalloc should no larger than nrequests"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_summary) { + void *little, *large; + uint64_t epoch; + size_t sz; + int expected = config_stats ? 0 : ENOENT; + size_t mapped; + uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; + uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; + + little = mallocx(SMALL_MAXCLASS, MALLOCX_ARENA(0)); + assert_ptr_not_null(little, "Unexpected mallocx() failure"); + large = mallocx((1U << LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); + assert_ptr_not_null(large, "Unexpected mallocx() failure"); + + dallocx(little, 0); + dallocx(large, 0); + + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); + assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, + "Unexpected mallctl() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL, + 0), expected, "Unexepected mallctl() result"); + + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.dirty_npurge", + (void *)&dirty_npurge, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.dirty_nmadvise", + (void *)&dirty_nmadvise, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.dirty_purged", + (void *)&dirty_purged, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.muzzy_npurge", + (void *)&muzzy_npurge, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.muzzy_nmadvise", + (void *)&muzzy_nmadvise, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.muzzy_purged", + (void *)&muzzy_purged, &sz, NULL, 0), expected, + "Unexepected mallctl() result"); + + if (config_stats) { + if (!background_thread_enabled()) { + assert_u64_gt(dirty_npurge + muzzy_npurge, 0, + "At least one purge should have occurred"); + } + assert_u64_le(dirty_nmadvise, dirty_purged, + "dirty_nmadvise should be no greater than dirty_purged"); + assert_u64_le(muzzy_nmadvise, muzzy_purged, + "muzzy_nmadvise should be no greater than muzzy_purged"); + } +} +TEST_END + +void * +thd_start(void *arg) { + return NULL; +} + +static void +no_lazy_lock(void) { + thd_t thd; + + thd_create(&thd, thd_start, NULL); + thd_join(thd, NULL); +} + +TEST_BEGIN(test_stats_arenas_small) { + void *p; + size_t sz, allocated; + uint64_t epoch, nmalloc, ndalloc, nrequests; + int expected = config_stats ? 0 : ENOENT; + + no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ + + p = mallocx(SMALL_MAXCLASS, MALLOCX_ARENA(0)); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.small.allocated", + (void *)&allocated, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.small.nrequests", + (void *)&nrequests, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_gt(nmalloc, 0, + "nmalloc should be no greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_large) { + void *p; + size_t sz, allocated; + uint64_t epoch, nmalloc, ndalloc; + int expected = config_stats ? 0 : ENOENT; + + p = mallocx((1U << LG_LARGE_MINCLASS), MALLOCX_ARENA(0)); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); + + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.large.allocated", + (void *)&allocated, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, + &sz, NULL, 0), expected, "Unexpected mallctl() result"); + + if (config_stats) { + assert_zu_gt(allocated, 0, + "allocated should be greater than zero"); + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + } + + dallocx(p, 0); +} +TEST_END + +static void +gen_mallctl_str(char *cmd, char *name, unsigned arena_ind) { + sprintf(cmd, "stats.arenas.%u.bins.0.%s", arena_ind, name); +} + +TEST_BEGIN(test_stats_arenas_bins) { + void *p; + size_t sz, curslabs, curregs; + uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t nslabs, nreslabs; + int expected = config_stats ? 0 : ENOENT; + + /* Make sure allocation below isn't satisfied by tcache. */ + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); + + unsigned arena_ind, old_arena_ind; + sz = sizeof(unsigned); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0), + 0, "Arena creation failure"); + sz = sizeof(arena_ind); + assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz, + (void *)&arena_ind, sizeof(arena_ind)), 0, + "Unexpected mallctl() failure"); + + p = malloc(bin_infos[0].reg_size); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + + assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), + opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); + + char cmd[128]; + sz = sizeof(uint64_t); + gen_mallctl_str(cmd, "nmalloc", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nmalloc, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "ndalloc", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&ndalloc, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "nrequests", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nrequests, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + sz = sizeof(size_t); + gen_mallctl_str(cmd, "curregs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&curregs, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + sz = sizeof(uint64_t); + gen_mallctl_str(cmd, "nfills", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "nflushes", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + gen_mallctl_str(cmd, "nslabs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + gen_mallctl_str(cmd, "nreslabs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&nreslabs, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + sz = sizeof(size_t); + gen_mallctl_str(cmd, "curslabs", arena_ind); + assert_d_eq(mallctl(cmd, (void *)&curslabs, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + if (config_stats) { + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(nrequests, 0, + "nrequests should be greater than zero"); + assert_zu_gt(curregs, 0, + "allocated should be greater than zero"); + if (opt_tcache) { + assert_u64_gt(nfills, 0, + "At least one fill should have occurred"); + assert_u64_gt(nflushes, 0, + "At least one flush should have occurred"); + } + assert_u64_gt(nslabs, 0, + "At least one slab should have been allocated"); + assert_zu_gt(curslabs, 0, + "At least one slab should be currently allocated"); + } + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_stats_arenas_lextents) { + void *p; + uint64_t epoch, nmalloc, ndalloc; + size_t curlextents, sz, hsize; + int expected = config_stats ? 0 : ENOENT; + + sz = sizeof(size_t); + assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&hsize, &sz, NULL, + 0), 0, "Unexpected mallctl() failure"); + + p = mallocx(hsize, MALLOCX_ARENA(0)); + assert_ptr_not_null(p, "Unexpected mallocx() failure"); + + assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), + 0, "Unexpected mallctl() failure"); + + sz = sizeof(uint64_t); + assert_d_eq(mallctl("stats.arenas.0.lextents.0.nmalloc", + (void *)&nmalloc, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + assert_d_eq(mallctl("stats.arenas.0.lextents.0.ndalloc", + (void *)&ndalloc, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + sz = sizeof(size_t); + assert_d_eq(mallctl("stats.arenas.0.lextents.0.curlextents", + (void *)&curlextents, &sz, NULL, 0), expected, + "Unexpected mallctl() result"); + + if (config_stats) { + assert_u64_gt(nmalloc, 0, + "nmalloc should be greater than zero"); + assert_u64_ge(nmalloc, ndalloc, + "nmalloc should be at least as large as ndalloc"); + assert_u64_gt(curlextents, 0, + "At least one extent should be currently allocated"); + } + + dallocx(p, 0); +} +TEST_END + +int +main(void) { + return test_no_reentrancy( + test_stats_summary, + test_stats_large, + test_stats_arenas_summary, + test_stats_arenas_small, + test_stats_arenas_large, + test_stats_arenas_bins, + test_stats_arenas_lextents); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/stats_print.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/stats_print.c new file mode 100644 index 0000000..014d002 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/stats_print.c @@ -0,0 +1,999 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/util.h" + +typedef enum { + TOKEN_TYPE_NONE, + TOKEN_TYPE_ERROR, + TOKEN_TYPE_EOI, + TOKEN_TYPE_NULL, + TOKEN_TYPE_FALSE, + TOKEN_TYPE_TRUE, + TOKEN_TYPE_LBRACKET, + TOKEN_TYPE_RBRACKET, + TOKEN_TYPE_LBRACE, + TOKEN_TYPE_RBRACE, + TOKEN_TYPE_COLON, + TOKEN_TYPE_COMMA, + TOKEN_TYPE_STRING, + TOKEN_TYPE_NUMBER +} token_type_t; + +typedef struct parser_s parser_t; +typedef struct { + parser_t *parser; + token_type_t token_type; + size_t pos; + size_t len; + size_t line; + size_t col; +} token_t; + +struct parser_s { + bool verbose; + char *buf; /* '\0'-terminated. */ + size_t len; /* Number of characters preceding '\0' in buf. */ + size_t pos; + size_t line; + size_t col; + token_t token; +}; + +static void +token_init(token_t *token, parser_t *parser, token_type_t token_type, + size_t pos, size_t len, size_t line, size_t col) { + token->parser = parser; + token->token_type = token_type; + token->pos = pos; + token->len = len; + token->line = line; + token->col = col; +} + +static void +token_error(token_t *token) { + if (!token->parser->verbose) { + return; + } + switch (token->token_type) { + case TOKEN_TYPE_NONE: + not_reached(); + case TOKEN_TYPE_ERROR: + malloc_printf("%zu:%zu: Unexpected character in token: ", + token->line, token->col); + break; + default: + malloc_printf("%zu:%zu: Unexpected token: ", token->line, + token->col); + break; + } + UNUSED ssize_t err = malloc_write_fd(STDERR_FILENO, + &token->parser->buf[token->pos], token->len); + malloc_printf("\n"); +} + +static void +parser_init(parser_t *parser, bool verbose) { + parser->verbose = verbose; + parser->buf = NULL; + parser->len = 0; + parser->pos = 0; + parser->line = 1; + parser->col = 0; +} + +static void +parser_fini(parser_t *parser) { + if (parser->buf != NULL) { + dallocx(parser->buf, MALLOCX_TCACHE_NONE); + } +} + +static bool +parser_append(parser_t *parser, const char *str) { + size_t len = strlen(str); + char *buf = (parser->buf == NULL) ? mallocx(len + 1, + MALLOCX_TCACHE_NONE) : rallocx(parser->buf, parser->len + len + 1, + MALLOCX_TCACHE_NONE); + if (buf == NULL) { + return true; + } + memcpy(&buf[parser->len], str, len + 1); + parser->buf = buf; + parser->len += len; + return false; +} + +static bool +parser_tokenize(parser_t *parser) { + enum { + STATE_START, + STATE_EOI, + STATE_N, STATE_NU, STATE_NUL, STATE_NULL, + STATE_F, STATE_FA, STATE_FAL, STATE_FALS, STATE_FALSE, + STATE_T, STATE_TR, STATE_TRU, STATE_TRUE, + STATE_LBRACKET, + STATE_RBRACKET, + STATE_LBRACE, + STATE_RBRACE, + STATE_COLON, + STATE_COMMA, + STATE_CHARS, + STATE_CHAR_ESCAPE, + STATE_CHAR_U, STATE_CHAR_UD, STATE_CHAR_UDD, STATE_CHAR_UDDD, + STATE_STRING, + STATE_MINUS, + STATE_LEADING_ZERO, + STATE_DIGITS, + STATE_DECIMAL, + STATE_FRAC_DIGITS, + STATE_EXP, + STATE_EXP_SIGN, + STATE_EXP_DIGITS, + STATE_ACCEPT + } state = STATE_START; + size_t token_pos JEMALLOC_CC_SILENCE_INIT(0); + size_t token_line JEMALLOC_CC_SILENCE_INIT(1); + size_t token_col JEMALLOC_CC_SILENCE_INIT(0); + + assert_zu_le(parser->pos, parser->len, + "Position is past end of buffer"); + + while (state != STATE_ACCEPT) { + char c = parser->buf[parser->pos]; + + switch (state) { + case STATE_START: + token_pos = parser->pos; + token_line = parser->line; + token_col = parser->col; + switch (c) { + case ' ': case '\b': case '\n': case '\r': case '\t': + break; + case '\0': + state = STATE_EOI; + break; + case 'n': + state = STATE_N; + break; + case 'f': + state = STATE_F; + break; + case 't': + state = STATE_T; + break; + case '[': + state = STATE_LBRACKET; + break; + case ']': + state = STATE_RBRACKET; + break; + case '{': + state = STATE_LBRACE; + break; + case '}': + state = STATE_RBRACE; + break; + case ':': + state = STATE_COLON; + break; + case ',': + state = STATE_COMMA; + break; + case '"': + state = STATE_CHARS; + break; + case '-': + state = STATE_MINUS; + break; + case '0': + state = STATE_LEADING_ZERO; + break; + case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + state = STATE_DIGITS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_EOI: + token_init(&parser->token, parser, + TOKEN_TYPE_EOI, token_pos, parser->pos - + token_pos, token_line, token_col); + state = STATE_ACCEPT; + break; + case STATE_N: + switch (c) { + case 'u': + state = STATE_NU; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_NU: + switch (c) { + case 'l': + state = STATE_NUL; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_NUL: + switch (c) { + case 'l': + state = STATE_NULL; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_NULL: + switch (c) { + case ' ': case '\b': case '\n': case '\r': case '\t': + case '\0': + case '[': case ']': case '{': case '}': case ':': + case ',': + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + token_init(&parser->token, parser, TOKEN_TYPE_NULL, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_F: + switch (c) { + case 'a': + state = STATE_FA; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_FA: + switch (c) { + case 'l': + state = STATE_FAL; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_FAL: + switch (c) { + case 's': + state = STATE_FALS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_FALS: + switch (c) { + case 'e': + state = STATE_FALSE; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_FALSE: + switch (c) { + case ' ': case '\b': case '\n': case '\r': case '\t': + case '\0': + case '[': case ']': case '{': case '}': case ':': + case ',': + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + token_init(&parser->token, parser, + TOKEN_TYPE_FALSE, token_pos, parser->pos - + token_pos, token_line, token_col); + state = STATE_ACCEPT; + break; + case STATE_T: + switch (c) { + case 'r': + state = STATE_TR; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_TR: + switch (c) { + case 'u': + state = STATE_TRU; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_TRU: + switch (c) { + case 'e': + state = STATE_TRUE; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_TRUE: + switch (c) { + case ' ': case '\b': case '\n': case '\r': case '\t': + case '\0': + case '[': case ']': case '{': case '}': case ':': + case ',': + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + token_init(&parser->token, parser, TOKEN_TYPE_TRUE, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_LBRACKET: + token_init(&parser->token, parser, TOKEN_TYPE_LBRACKET, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_RBRACKET: + token_init(&parser->token, parser, TOKEN_TYPE_RBRACKET, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_LBRACE: + token_init(&parser->token, parser, TOKEN_TYPE_LBRACE, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_RBRACE: + token_init(&parser->token, parser, TOKEN_TYPE_RBRACE, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_COLON: + token_init(&parser->token, parser, TOKEN_TYPE_COLON, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_COMMA: + token_init(&parser->token, parser, TOKEN_TYPE_COMMA, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_CHARS: + switch (c) { + case '\\': + state = STATE_CHAR_ESCAPE; + break; + case '"': + state = STATE_STRING; + break; + case 0x00: case 0x01: case 0x02: case 0x03: case 0x04: + case 0x05: case 0x06: case 0x07: case 0x08: case 0x09: + case 0x0a: case 0x0b: case 0x0c: case 0x0d: case 0x0e: + case 0x0f: case 0x10: case 0x11: case 0x12: case 0x13: + case 0x14: case 0x15: case 0x16: case 0x17: case 0x18: + case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: + case 0x1e: case 0x1f: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + default: + break; + } + break; + case STATE_CHAR_ESCAPE: + switch (c) { + case '"': case '\\': case '/': case 'b': case 'n': + case 'r': case 't': + state = STATE_CHARS; + break; + case 'u': + state = STATE_CHAR_U; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_CHAR_U: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + state = STATE_CHAR_UD; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_CHAR_UD: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + state = STATE_CHAR_UDD; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_CHAR_UDD: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + state = STATE_CHAR_UDDD; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_CHAR_UDDD: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + state = STATE_CHARS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_STRING: + token_init(&parser->token, parser, TOKEN_TYPE_STRING, + token_pos, parser->pos - token_pos, token_line, + token_col); + state = STATE_ACCEPT; + break; + case STATE_MINUS: + switch (c) { + case '0': + state = STATE_LEADING_ZERO; + break; + case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + state = STATE_DIGITS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_LEADING_ZERO: + switch (c) { + case '.': + state = STATE_DECIMAL; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_NUMBER, token_pos, parser->pos - + token_pos, token_line, token_col); + state = STATE_ACCEPT; + break; + } + break; + case STATE_DIGITS: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + break; + case '.': + state = STATE_DECIMAL; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_NUMBER, token_pos, parser->pos - + token_pos, token_line, token_col); + state = STATE_ACCEPT; + break; + } + break; + case STATE_DECIMAL: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + state = STATE_FRAC_DIGITS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_FRAC_DIGITS: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + break; + case 'e': case 'E': + state = STATE_EXP; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_NUMBER, token_pos, parser->pos - + token_pos, token_line, token_col); + state = STATE_ACCEPT; + break; + } + break; + case STATE_EXP: + switch (c) { + case '-': case '+': + state = STATE_EXP_SIGN; + break; + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + state = STATE_EXP_DIGITS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_EXP_SIGN: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + state = STATE_EXP_DIGITS; + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_ERROR, token_pos, parser->pos + 1 + - token_pos, token_line, token_col); + return true; + } + break; + case STATE_EXP_DIGITS: + switch (c) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + break; + default: + token_init(&parser->token, parser, + TOKEN_TYPE_NUMBER, token_pos, parser->pos - + token_pos, token_line, token_col); + state = STATE_ACCEPT; + break; + } + break; + default: + not_reached(); + } + + if (state != STATE_ACCEPT) { + if (c == '\n') { + parser->line++; + parser->col = 0; + } else { + parser->col++; + } + parser->pos++; + } + } + return false; +} + +static bool parser_parse_array(parser_t *parser); +static bool parser_parse_object(parser_t *parser); + +static bool +parser_parse_value(parser_t *parser) { + switch (parser->token.token_type) { + case TOKEN_TYPE_NULL: + case TOKEN_TYPE_FALSE: + case TOKEN_TYPE_TRUE: + case TOKEN_TYPE_STRING: + case TOKEN_TYPE_NUMBER: + return false; + case TOKEN_TYPE_LBRACE: + return parser_parse_object(parser); + case TOKEN_TYPE_LBRACKET: + return parser_parse_array(parser); + default: + return true; + } + not_reached(); +} + +static bool +parser_parse_pair(parser_t *parser) { + assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING, + "Pair should start with string"); + if (parser_tokenize(parser)) { + return true; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_COLON: + if (parser_tokenize(parser)) { + return true; + } + return parser_parse_value(parser); + default: + return true; + } +} + +static bool +parser_parse_values(parser_t *parser) { + if (parser_parse_value(parser)) { + return true; + } + + while (true) { + if (parser_tokenize(parser)) { + return true; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_COMMA: + if (parser_tokenize(parser)) { + return true; + } + if (parser_parse_value(parser)) { + return true; + } + break; + case TOKEN_TYPE_RBRACKET: + return false; + default: + return true; + } + } +} + +static bool +parser_parse_array(parser_t *parser) { + assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACKET, + "Array should start with ["); + if (parser_tokenize(parser)) { + return true; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_RBRACKET: + return false; + default: + return parser_parse_values(parser); + } + not_reached(); +} + +static bool +parser_parse_pairs(parser_t *parser) { + assert_d_eq(parser->token.token_type, TOKEN_TYPE_STRING, + "Object should start with string"); + if (parser_parse_pair(parser)) { + return true; + } + + while (true) { + if (parser_tokenize(parser)) { + return true; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_COMMA: + if (parser_tokenize(parser)) { + return true; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_STRING: + if (parser_parse_pair(parser)) { + return true; + } + break; + default: + return true; + } + break; + case TOKEN_TYPE_RBRACE: + return false; + default: + return true; + } + } +} + +static bool +parser_parse_object(parser_t *parser) { + assert_d_eq(parser->token.token_type, TOKEN_TYPE_LBRACE, + "Object should start with {"); + if (parser_tokenize(parser)) { + return true; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_STRING: + return parser_parse_pairs(parser); + case TOKEN_TYPE_RBRACE: + return false; + default: + return true; + } + not_reached(); +} + +static bool +parser_parse(parser_t *parser) { + if (parser_tokenize(parser)) { + goto label_error; + } + if (parser_parse_value(parser)) { + goto label_error; + } + + if (parser_tokenize(parser)) { + goto label_error; + } + switch (parser->token.token_type) { + case TOKEN_TYPE_EOI: + return false; + default: + goto label_error; + } + not_reached(); + +label_error: + token_error(&parser->token); + return true; +} + +TEST_BEGIN(test_json_parser) { + size_t i; + const char *invalid_inputs[] = { + /* Tokenizer error case tests. */ + "{ \"string\": X }", + "{ \"string\": nXll }", + "{ \"string\": nuXl }", + "{ \"string\": nulX }", + "{ \"string\": nullX }", + "{ \"string\": fXlse }", + "{ \"string\": faXse }", + "{ \"string\": falXe }", + "{ \"string\": falsX }", + "{ \"string\": falseX }", + "{ \"string\": tXue }", + "{ \"string\": trXe }", + "{ \"string\": truX }", + "{ \"string\": trueX }", + "{ \"string\": \"\n\" }", + "{ \"string\": \"\\z\" }", + "{ \"string\": \"\\uX000\" }", + "{ \"string\": \"\\u0X00\" }", + "{ \"string\": \"\\u00X0\" }", + "{ \"string\": \"\\u000X\" }", + "{ \"string\": -X }", + "{ \"string\": 0.X }", + "{ \"string\": 0.0eX }", + "{ \"string\": 0.0e+X }", + + /* Parser error test cases. */ + "{\"string\": }", + "{\"string\" }", + "{\"string\": [ 0 }", + "{\"string\": {\"a\":0, 1 } }", + "{\"string\": {\"a\":0: } }", + "{", + "{}{", + }; + const char *valid_inputs[] = { + /* Token tests. */ + "null", + "false", + "true", + "{}", + "{\"a\": 0}", + "[]", + "[0, 1]", + "0", + "1", + "10", + "-10", + "10.23", + "10.23e4", + "10.23e-4", + "10.23e+4", + "10.23E4", + "10.23E-4", + "10.23E+4", + "-10.23", + "-10.23e4", + "-10.23e-4", + "-10.23e+4", + "-10.23E4", + "-10.23E-4", + "-10.23E+4", + "\"value\"", + "\" \\\" \\/ \\b \\n \\r \\t \\u0abc \\u1DEF \"", + + /* Parser test with various nesting. */ + "{\"a\":null, \"b\":[1,[{\"c\":2},3]], \"d\":{\"e\":true}}", + }; + + for (i = 0; i < sizeof(invalid_inputs)/sizeof(const char *); i++) { + const char *input = invalid_inputs[i]; + parser_t parser; + parser_init(&parser, false); + assert_false(parser_append(&parser, input), + "Unexpected input appending failure"); + assert_true(parser_parse(&parser), + "Unexpected parse success for input: %s", input); + parser_fini(&parser); + } + + for (i = 0; i < sizeof(valid_inputs)/sizeof(const char *); i++) { + const char *input = valid_inputs[i]; + parser_t parser; + parser_init(&parser, true); + assert_false(parser_append(&parser, input), + "Unexpected input appending failure"); + assert_false(parser_parse(&parser), + "Unexpected parse error for input: %s", input); + parser_fini(&parser); + } +} +TEST_END + +void +write_cb(void *opaque, const char *str) { + parser_t *parser = (parser_t *)opaque; + if (parser_append(parser, str)) { + test_fail("Unexpected input appending failure"); + } +} + +TEST_BEGIN(test_stats_print_json) { + const char *opts[] = { + "J", + "Jg", + "Jm", + "Jd", + "Jmd", + "Jgd", + "Jgm", + "Jgmd", + "Ja", + "Jb", + "Jl", + "Jx", + "Jbl", + "Jal", + "Jab", + "Jabl", + "Jax", + "Jbx", + "Jlx", + "Jablx", + "Jgmdablx", + }; + unsigned arena_ind, i; + + for (i = 0; i < 3; i++) { + unsigned j; + + switch (i) { + case 0: + break; + case 1: { + size_t sz = sizeof(arena_ind); + assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, + &sz, NULL, 0), 0, "Unexpected mallctl failure"); + break; + } case 2: { + size_t mib[3]; + size_t miblen = sizeof(mib)/sizeof(size_t); + assert_d_eq(mallctlnametomib("arena.0.destroy", + mib, &miblen), 0, + "Unexpected mallctlnametomib failure"); + mib[1] = arena_ind; + assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, + 0), 0, "Unexpected mallctlbymib failure"); + break; + } default: + not_reached(); + } + + for (j = 0; j < sizeof(opts)/sizeof(const char *); j++) { + parser_t parser; + + parser_init(&parser, true); + malloc_stats_print(write_cb, (void *)&parser, opts[j]); + assert_false(parser_parse(&parser), + "Unexpected parse error, opts=\"%s\"", opts[j]); + parser_fini(&parser); + } + } +} +TEST_END + +int +main(void) { + return test( + test_json_parser, + test_stats_print_json); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ticker.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ticker.c new file mode 100644 index 0000000..e5790a3 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/ticker.c @@ -0,0 +1,73 @@ +#include "test/jemalloc_test.h" + +#include "jemalloc/internal/ticker.h" + +TEST_BEGIN(test_ticker_tick) { +#define NREPS 2 +#define NTICKS 3 + ticker_t ticker; + int32_t i, j; + + ticker_init(&ticker, NTICKS); + for (i = 0; i < NREPS; i++) { + for (j = 0; j < NTICKS; j++) { + assert_u_eq(ticker_read(&ticker), NTICKS - j, + "Unexpected ticker value (i=%d, j=%d)", i, j); + assert_false(ticker_tick(&ticker), + "Unexpected ticker fire (i=%d, j=%d)", i, j); + } + assert_u32_eq(ticker_read(&ticker), 0, + "Expected ticker depletion"); + assert_true(ticker_tick(&ticker), + "Expected ticker fire (i=%d)", i); + assert_u32_eq(ticker_read(&ticker), NTICKS, + "Expected ticker reset"); + } +#undef NTICKS +} +TEST_END + +TEST_BEGIN(test_ticker_ticks) { +#define NTICKS 3 + ticker_t ticker; + + ticker_init(&ticker, NTICKS); + + assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); + assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire"); + assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value"); + assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire"); + assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); + + assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire"); + assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); +#undef NTICKS +} +TEST_END + +TEST_BEGIN(test_ticker_copy) { +#define NTICKS 3 + ticker_t ta, tb; + + ticker_init(&ta, NTICKS); + ticker_copy(&tb, &ta); + assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); + assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire"); + assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); + + ticker_tick(&ta); + ticker_copy(&tb, &ta); + assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value"); + assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire"); + assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); +#undef NTICKS +} +TEST_END + +int +main(void) { + return test( + test_ticker_tick, + test_ticker_ticks, + test_ticker_copy); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/tsd.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/tsd.c new file mode 100644 index 0000000..6c47913 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/tsd.c @@ -0,0 +1,139 @@ +#include "test/jemalloc_test.h" + +static int data_cleanup_count; + +void +data_cleanup(int *data) { + if (data_cleanup_count == 0) { + assert_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT, + "Argument passed into cleanup function should match tsd " + "value"); + } + ++data_cleanup_count; + + /* + * Allocate during cleanup for two rounds, in order to assure that + * jemalloc's internal tsd reinitialization happens. + */ + bool reincarnate = false; + switch (*data) { + case MALLOC_TSD_TEST_DATA_INIT: + *data = 1; + reincarnate = true; + break; + case 1: + *data = 2; + reincarnate = true; + break; + case 2: + return; + default: + not_reached(); + } + + if (reincarnate) { + void *p = mallocx(1, 0); + assert_ptr_not_null(p, "Unexpeced mallocx() failure"); + dallocx(p, 0); + } +} + +static void * +thd_start(void *arg) { + int d = (int)(uintptr_t)arg; + void *p; + + tsd_t *tsd = tsd_fetch(); + assert_x_eq(tsd_test_data_get(tsd), MALLOC_TSD_TEST_DATA_INIT, + "Initial tsd get should return initialization value"); + + p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + + tsd_test_data_set(tsd, d); + assert_x_eq(tsd_test_data_get(tsd), d, + "After tsd set, tsd get should return value that was set"); + + d = 0; + assert_x_eq(tsd_test_data_get(tsd), (int)(uintptr_t)arg, + "Resetting local data should have no effect on tsd"); + + tsd_test_callback_set(tsd, &data_cleanup); + + free(p); + return NULL; +} + +TEST_BEGIN(test_tsd_main_thread) { + thd_start((void *)(uintptr_t)0xa5f3e329); +} +TEST_END + +TEST_BEGIN(test_tsd_sub_thread) { + thd_t thd; + + data_cleanup_count = 0; + thd_create(&thd, thd_start, (void *)MALLOC_TSD_TEST_DATA_INIT); + thd_join(thd, NULL); + /* + * We reincarnate twice in the data cleanup, so it should execute at + * least 3 times. + */ + assert_x_ge(data_cleanup_count, 3, + "Cleanup function should have executed multiple times."); +} +TEST_END + +static void * +thd_start_reincarnated(void *arg) { + tsd_t *tsd = tsd_fetch(); + assert(tsd); + + void *p = malloc(1); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + + /* Manually trigger reincarnation. */ + assert_ptr_not_null(tsd_arena_get(tsd), + "Should have tsd arena set."); + tsd_cleanup((void *)tsd); + assert_ptr_null(*tsd_arenap_get_unsafe(tsd), + "TSD arena should have been cleared."); + assert_u_eq(tsd->state, tsd_state_purgatory, + "TSD state should be purgatory\n"); + + free(p); + assert_u_eq(tsd->state, tsd_state_reincarnated, + "TSD state should be reincarnated\n"); + p = mallocx(1, MALLOCX_TCACHE_NONE); + assert_ptr_not_null(p, "Unexpected malloc() failure"); + assert_ptr_null(*tsd_arenap_get_unsafe(tsd), + "Should not have tsd arena set after reincarnation."); + + free(p); + tsd_cleanup((void *)tsd); + assert_ptr_null(*tsd_arenap_get_unsafe(tsd), + "TSD arena should have been cleared after 2nd cleanup."); + + return NULL; +} + +TEST_BEGIN(test_tsd_reincarnation) { + thd_t thd; + thd_create(&thd, thd_start_reincarnated, NULL); + thd_join(thd, NULL); +} +TEST_END + +int +main(void) { + /* Ensure tsd bootstrapped. */ + if (nallocx(1, 0) == 0) { + malloc_printf("Initialization error"); + return test_status_fail; + } + + return test_no_reentrancy( + test_tsd_main_thread, + test_tsd_sub_thread, + test_tsd_reincarnation); +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/util.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/util.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/util.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/util.c diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/witness.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/witness.c new file mode 100644 index 0000000..5986da4 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/witness.c @@ -0,0 +1,280 @@ +#include "test/jemalloc_test.h" + +static witness_lock_error_t *witness_lock_error_orig; +static witness_owner_error_t *witness_owner_error_orig; +static witness_not_owner_error_t *witness_not_owner_error_orig; +static witness_depth_error_t *witness_depth_error_orig; + +static bool saw_lock_error; +static bool saw_owner_error; +static bool saw_not_owner_error; +static bool saw_depth_error; + +static void +witness_lock_error_intercept(const witness_list_t *witnesses, + const witness_t *witness) { + saw_lock_error = true; +} + +static void +witness_owner_error_intercept(const witness_t *witness) { + saw_owner_error = true; +} + +static void +witness_not_owner_error_intercept(const witness_t *witness) { + saw_not_owner_error = true; +} + +static void +witness_depth_error_intercept(const witness_list_t *witnesses, + witness_rank_t rank_inclusive, unsigned depth) { + saw_depth_error = true; +} + +static int +witness_comp(const witness_t *a, void *oa, const witness_t *b, void *ob) { + assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); + + assert(oa == (void *)a); + assert(ob == (void *)b); + + return strcmp(a->name, b->name); +} + +static int +witness_comp_reverse(const witness_t *a, void *oa, const witness_t *b, + void *ob) { + assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); + + assert(oa == (void *)a); + assert(ob == (void *)b); + + return -strcmp(a->name, b->name); +} + +TEST_BEGIN(test_witness) { + witness_t a, b; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + + test_skip_if(!config_debug); + + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0); + + witness_init(&a, "a", 1, NULL, NULL); + witness_assert_not_owner(&witness_tsdn, &a); + witness_lock(&witness_tsdn, &a); + witness_assert_owner(&witness_tsdn, &a); + witness_assert_depth(&witness_tsdn, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 0); + + witness_init(&b, "b", 2, NULL, NULL); + witness_assert_not_owner(&witness_tsdn, &b); + witness_lock(&witness_tsdn, &b); + witness_assert_owner(&witness_tsdn, &b); + witness_assert_depth(&witness_tsdn, 2); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 2); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0); + + witness_unlock(&witness_tsdn, &a); + witness_assert_depth(&witness_tsdn, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)2U, 1); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)3U, 0); + witness_unlock(&witness_tsdn, &b); + + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + witness_assert_depth_to_rank(&witness_tsdn, (witness_rank_t)1U, 0); +} +TEST_END + +TEST_BEGIN(test_witness_comp) { + witness_t a, b, c, d; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + + test_skip_if(!config_debug); + + witness_assert_lockless(&witness_tsdn); + + witness_init(&a, "a", 1, witness_comp, &a); + witness_assert_not_owner(&witness_tsdn, &a); + witness_lock(&witness_tsdn, &a); + witness_assert_owner(&witness_tsdn, &a); + witness_assert_depth(&witness_tsdn, 1); + + witness_init(&b, "b", 1, witness_comp, &b); + witness_assert_not_owner(&witness_tsdn, &b); + witness_lock(&witness_tsdn, &b); + witness_assert_owner(&witness_tsdn, &b); + witness_assert_depth(&witness_tsdn, 2); + witness_unlock(&witness_tsdn, &b); + witness_assert_depth(&witness_tsdn, 1); + + witness_lock_error_orig = witness_lock_error; + witness_lock_error = witness_lock_error_intercept; + saw_lock_error = false; + + witness_init(&c, "c", 1, witness_comp_reverse, &c); + witness_assert_not_owner(&witness_tsdn, &c); + assert_false(saw_lock_error, "Unexpected witness lock error"); + witness_lock(&witness_tsdn, &c); + assert_true(saw_lock_error, "Expected witness lock error"); + witness_unlock(&witness_tsdn, &c); + witness_assert_depth(&witness_tsdn, 1); + + saw_lock_error = false; + + witness_init(&d, "d", 1, NULL, NULL); + witness_assert_not_owner(&witness_tsdn, &d); + assert_false(saw_lock_error, "Unexpected witness lock error"); + witness_lock(&witness_tsdn, &d); + assert_true(saw_lock_error, "Expected witness lock error"); + witness_unlock(&witness_tsdn, &d); + witness_assert_depth(&witness_tsdn, 1); + + witness_unlock(&witness_tsdn, &a); + + witness_assert_lockless(&witness_tsdn); + + witness_lock_error = witness_lock_error_orig; +} +TEST_END + +TEST_BEGIN(test_witness_reversal) { + witness_t a, b; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + + test_skip_if(!config_debug); + + witness_lock_error_orig = witness_lock_error; + witness_lock_error = witness_lock_error_intercept; + saw_lock_error = false; + + witness_assert_lockless(&witness_tsdn); + + witness_init(&a, "a", 1, NULL, NULL); + witness_init(&b, "b", 2, NULL, NULL); + + witness_lock(&witness_tsdn, &b); + witness_assert_depth(&witness_tsdn, 1); + assert_false(saw_lock_error, "Unexpected witness lock error"); + witness_lock(&witness_tsdn, &a); + assert_true(saw_lock_error, "Expected witness lock error"); + + witness_unlock(&witness_tsdn, &a); + witness_assert_depth(&witness_tsdn, 1); + witness_unlock(&witness_tsdn, &b); + + witness_assert_lockless(&witness_tsdn); + + witness_lock_error = witness_lock_error_orig; +} +TEST_END + +TEST_BEGIN(test_witness_recursive) { + witness_t a; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + + test_skip_if(!config_debug); + + witness_not_owner_error_orig = witness_not_owner_error; + witness_not_owner_error = witness_not_owner_error_intercept; + saw_not_owner_error = false; + + witness_lock_error_orig = witness_lock_error; + witness_lock_error = witness_lock_error_intercept; + saw_lock_error = false; + + witness_assert_lockless(&witness_tsdn); + + witness_init(&a, "a", 1, NULL, NULL); + + witness_lock(&witness_tsdn, &a); + assert_false(saw_lock_error, "Unexpected witness lock error"); + assert_false(saw_not_owner_error, "Unexpected witness not owner error"); + witness_lock(&witness_tsdn, &a); + assert_true(saw_lock_error, "Expected witness lock error"); + assert_true(saw_not_owner_error, "Expected witness not owner error"); + + witness_unlock(&witness_tsdn, &a); + + witness_assert_lockless(&witness_tsdn); + + witness_owner_error = witness_owner_error_orig; + witness_lock_error = witness_lock_error_orig; + +} +TEST_END + +TEST_BEGIN(test_witness_unlock_not_owned) { + witness_t a; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + + test_skip_if(!config_debug); + + witness_owner_error_orig = witness_owner_error; + witness_owner_error = witness_owner_error_intercept; + saw_owner_error = false; + + witness_assert_lockless(&witness_tsdn); + + witness_init(&a, "a", 1, NULL, NULL); + + assert_false(saw_owner_error, "Unexpected owner error"); + witness_unlock(&witness_tsdn, &a); + assert_true(saw_owner_error, "Expected owner error"); + + witness_assert_lockless(&witness_tsdn); + + witness_owner_error = witness_owner_error_orig; +} +TEST_END + +TEST_BEGIN(test_witness_depth) { + witness_t a; + witness_tsdn_t witness_tsdn = { WITNESS_TSD_INITIALIZER }; + + test_skip_if(!config_debug); + + witness_depth_error_orig = witness_depth_error; + witness_depth_error = witness_depth_error_intercept; + saw_depth_error = false; + + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + + witness_init(&a, "a", 1, NULL, NULL); + + assert_false(saw_depth_error, "Unexpected depth error"); + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + + witness_lock(&witness_tsdn, &a); + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + assert_true(saw_depth_error, "Expected depth error"); + + witness_unlock(&witness_tsdn, &a); + + witness_assert_lockless(&witness_tsdn); + witness_assert_depth(&witness_tsdn, 0); + + witness_depth_error = witness_depth_error_orig; +} +TEST_END + +int +main(void) { + return test( + test_witness, + test_witness_comp, + test_witness_reversal, + test_witness_recursive, + test_witness_unlock_not_owned, + test_witness_depth); +} diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/zero.c b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/zero.c similarity index 56% rename from redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/zero.c rename to redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/zero.c index 93afc2b..553692b 100644 --- a/redis-android/src/main/jni/redis-4.0.11/deps/jemalloc/test/unit/zero.c +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/zero.c @@ -1,78 +1,59 @@ #include "test/jemalloc_test.h" -#ifdef JEMALLOC_FILL -const char *malloc_conf = - "abort:false,junk:false,zero:true,redzone:false,quarantine:0"; -#endif - static void -test_zero(size_t sz_min, size_t sz_max) -{ - char *s; +test_zero(size_t sz_min, size_t sz_max) { + uint8_t *s; size_t sz_prev, sz, i; +#define MAGIC ((uint8_t)0x61) sz_prev = 0; - s = (char *)mallocx(sz_min, 0); + s = (uint8_t *)mallocx(sz_min, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { - assert_c_eq(s[0], 'a', + assert_u_eq(s[0], MAGIC, "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); - assert_c_eq(s[sz_prev-1], 'a', + assert_u_eq(s[sz_prev-1], MAGIC, "Previously allocated byte %zu/%zu is corrupted", sz_prev-1, sz_prev); } for (i = sz_prev; i < sz; i++) { - assert_c_eq(s[i], 0x0, + assert_u_eq(s[i], 0x0, "Newly allocated byte %zu/%zu isn't zero-filled", i, sz); - s[i] = 'a'; + s[i] = MAGIC; } if (xallocx(s, sz+1, 0, 0) == sz) { - s = (char *)rallocx(s, sz+1, 0); + s = (uint8_t *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)s, "Unexpected rallocx() failure"); } } dallocx(s, 0); +#undef MAGIC } -TEST_BEGIN(test_zero_small) -{ - +TEST_BEGIN(test_zero_small) { test_skip_if(!config_fill); test_zero(1, SMALL_MAXCLASS-1); } TEST_END -TEST_BEGIN(test_zero_large) -{ - +TEST_BEGIN(test_zero_large) { test_skip_if(!config_fill); - test_zero(SMALL_MAXCLASS+1, large_maxclass); -} -TEST_END - -TEST_BEGIN(test_zero_huge) -{ - - test_skip_if(!config_fill); - test_zero(large_maxclass+1, chunksize*2); + test_zero(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1))); } TEST_END int -main(void) -{ - - return (test( +main(void) { + return test( test_zero_small, - test_zero_large, - test_zero_huge)); + test_zero_large); } diff --git a/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/zero.sh b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/zero.sh new file mode 100644 index 0000000..b4540b2 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/deps/jemalloc/test/unit/zero.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "x${enable_fill}" = "x1" ] ; then + export MALLOC_CONF="abort:false,junk:false,zero:true" +fi diff --git a/redis-android/src/main/jni/redis-4.0.11/deps/update-jemalloc.sh b/redis-android/src/main/jni/redis-5.0.0/deps/update-jemalloc.sh similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/deps/update-jemalloc.sh rename to redis-android/src/main/jni/redis-5.0.0/deps/update-jemalloc.sh diff --git a/redis-android/src/main/jni/redis-4.0.11/redis.conf b/redis-android/src/main/jni/redis-5.0.0/redis.conf similarity index 82% rename from redis-android/src/main/jni/redis-4.0.11/redis.conf rename to redis-android/src/main/jni/redis-5.0.0/redis.conf index 43562c5..93ab9a4 100644 --- a/redis-android/src/main/jni/redis-4.0.11/redis.conf +++ b/redis-android/src/main/jni/redis-5.0.0/redis.conf @@ -59,7 +59,7 @@ # internet, binding to all the interfaces is dangerous and will expose the # instance to everybody on the internet. So by default we uncomment the # following bind directive, that will force Redis to listen only into -# the IPv4 lookback interface address (this means Redis will be able to +# the IPv4 loopback interface address (this means Redis will be able to # accept connections only from clients running into the same computer it # is running). # @@ -264,57 +264,64 @@ dir ./ ################################# REPLICATION ################################# -# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# Master-Replica replication. Use replicaof to make a Redis instance a copy of # another Redis server. A few things to understand ASAP about Redis replication. # +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# # 1) Redis replication is asynchronous, but you can configure a master to # stop accepting writes if it appears to be not connected with at least -# a given number of slaves. -# 2) Redis slaves are able to perform a partial resynchronization with the +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the # master if the replication link is lost for a relatively small amount of # time. You may want to configure the replication backlog size (see the next # sections of this file) with a sensible value depending on your needs. # 3) Replication is automatic and does not need user intervention. After a -# network partition slaves automatically try to reconnect to masters +# network partition replicas automatically try to reconnect to masters # and resynchronize with them. # -# slaveof +# replicaof # If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before +# directive below) it is possible to tell the replica to authenticate before # starting the replication synchronization process, otherwise the master will -# refuse the slave request. +# refuse the replica request. # # masterauth -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: # -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will # still reply to client requests, possibly with out of date data, or the # data set may just be empty if this is the first synchronization. # -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# 2) if replica-serve-stale-data is set to 'no' the replica will reply with # an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. +# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, +# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, +# COMMAND, POST, HOST: and LATENCY. # -slave-serve-stale-data yes +replica-serve-stale-data yes -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but # may also cause problems if clients are writing to it because of a # misconfiguration. # -# Since Redis 2.6 by default slaves are read-only. +# Since Redis 2.6 by default replicas are read-only. # -# Note: read only slaves are not designed to be exposed to untrusted clients +# Note: read only replicas are not designed to be exposed to untrusted clients # on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands +# Still a read only replica exports by default all the administrative commands # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only slaves using 'rename-command' to shadow all the +# security of read only replicas using 'rename-command' to shadow all the # administrative / dangerous commands. -slave-read-only yes +replica-read-only yes # Replication SYNC strategy: disk or socket. # @@ -322,25 +329,25 @@ slave-read-only yes # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY # ------------------------------------------------------- # -# New slaves and reconnecting slaves that are not able to continue the replication +# New replicas and reconnecting replicas that are not able to continue the replication # process just receiving differences, need to do what is called a "full -# synchronization". An RDB file is transmitted from the master to the slaves. +# synchronization". An RDB file is transmitted from the master to the replicas. # The transmission can happen in two different ways: # # 1) Disk-backed: The Redis master creates a new process that writes the RDB # file on disk. Later the file is transferred by the parent -# process to the slaves incrementally. +# process to the replicas incrementally. # 2) Diskless: The Redis master creates a new process that directly writes the -# RDB file to slave sockets, without touching the disk at all. +# RDB file to replica sockets, without touching the disk at all. # -# With disk-backed replication, while the RDB file is generated, more slaves +# With disk-backed replication, while the RDB file is generated, more replicas # can be queued and served with the RDB file as soon as the current child producing # the RDB file finishes its work. With diskless replication instead once -# the transfer starts, new slaves arriving will be queued and a new transfer +# the transfer starts, new replicas arriving will be queued and a new transfer # will start when the current one terminates. # # When diskless replication is used, the master waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple slaves +# time (in seconds) before starting the transfer in the hope that multiple replicas # will arrive and the transfer can be parallelized. # # With slow disks and fast (large bandwidth) networks, diskless replication @@ -349,140 +356,140 @@ repl-diskless-sync no # When diskless replication is enabled, it is possible to configure the delay # the server waits in order to spawn the child that transfers the RDB via socket -# to the slaves. +# to the replicas. # # This is important since once the transfer starts, it is not possible to serve -# new slaves arriving, that will be queued for the next RDB transfer, so the server -# waits a delay in order to let more slaves arrive. +# new replicas arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more replicas arrive. # # The delay is specified in seconds, and by default is 5 seconds. To disable # it entirely just set it to 0 seconds and the transfer will start ASAP. repl-diskless-sync-delay 5 -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 +# Replicas send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_replica_period option. The default value is 10 # seconds. # -# repl-ping-slave-period 10 +# repl-ping-replica-period 10 # The following option sets the replication timeout for: # -# 1) Bulk transfer I/O during SYNC, from the point of view of slave. -# 2) Master timeout from the point of view of slaves (data, pings). -# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). # # It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. # # repl-timeout 60 -# Disable TCP_NODELAY on the slave socket after SYNC? +# Disable TCP_NODELAY on the replica socket after SYNC? # # If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to slaves. But this can add a delay for -# the data to appear on the slave side, up to 40 milliseconds with +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with # Linux kernels using a default configuration. # -# If you select "no" the delay for data to appear on the slave side will +# If you select "no" the delay for data to appear on the replica side will # be reduced but more bandwidth will be used for replication. # # By default we optimize for low latency, but in very high traffic conditions -# or when the master and slaves are many hops away, turning this to "yes" may +# or when the master and replicas are many hops away, turning this to "yes" may # be a good idea. repl-disable-tcp-nodelay no # Set the replication backlog size. The backlog is a buffer that accumulates -# slave data when slaves are disconnected for some time, so that when a slave +# replica data when replicas are disconnected for some time, so that when a replica # wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the slave missed while +# resync is enough, just passing the portion of data the replica missed while # disconnected. # -# The bigger the replication backlog, the longer the time the slave can be +# The bigger the replication backlog, the longer the time the replica can be # disconnected and later be able to perform a partial resynchronization. # -# The backlog is only allocated once there is at least a slave connected. +# The backlog is only allocated once there is at least a replica connected. # # repl-backlog-size 1mb -# After a master has no longer connected slaves for some time, the backlog +# After a master has no longer connected replicas for some time, the backlog # will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last slave disconnected, for +# need to elapse, starting from the time the last replica disconnected, for # the backlog buffer to be freed. # -# Note that slaves never free the backlog for timeout, since they may be +# Note that replicas never free the backlog for timeout, since they may be # promoted to masters later, and should be able to correctly "partially -# resynchronize" with the slaves: hence they should always accumulate backlog. +# resynchronize" with the replicas: hence they should always accumulate backlog. # # A value of 0 means to never release the backlog. # # repl-backlog-ttl 3600 -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a +# The replica priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a replica to promote into a # master if the master is no longer working correctly. # -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel will # pick the one with priority 10, that is the lowest. # -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by # Redis Sentinel for promotion. # # By default the priority is 100. -slave-priority 100 +replica-priority 100 # It is possible for a master to stop accepting writes if there are less than -# N slaves connected, having a lag less or equal than M seconds. +# N replicas connected, having a lag less or equal than M seconds. # -# The N slaves need to be in "online" state. +# The N replicas need to be in "online" state. # # The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the slave, that is usually sent every second. +# the last ping received from the replica, that is usually sent every second. # # This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough slaves +# will limit the window of exposure for lost writes in case not enough replicas # are available, to the specified number of seconds. # -# For example to require at least 3 slaves with a lag <= 10 seconds use: +# For example to require at least 3 replicas with a lag <= 10 seconds use: # -# min-slaves-to-write 3 -# min-slaves-max-lag 10 +# min-replicas-to-write 3 +# min-replicas-max-lag 10 # # Setting one or the other to 0 disables the feature. # -# By default min-slaves-to-write is set to 0 (feature disabled) and -# min-slaves-max-lag is set to 10. +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. # A Redis master is able to list the address and port of the attached -# slaves in different ways. For example the "INFO replication" section +# replicas in different ways. For example the "INFO replication" section # offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover slave instances. +# Redis Sentinel in order to discover replica instances. # Another place where this info is available is in the output of the # "ROLE" command of a master. # -# The listed IP and address normally reported by a slave is obtained +# The listed IP and address normally reported by a replica is obtained # in the following way: # # IP: The address is auto detected by checking the peer address -# of the socket used by the slave to connect with the master. +# of the socket used by the replica to connect with the master. # -# Port: The port is communicated by the slave during the replication -# handshake, and is normally the port that the slave is using to -# list for connections. +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. # # However when port forwarding or Network Address Translation (NAT) is -# used, the slave may be actually reachable via different IP and port -# pairs. The following two options can be used by a slave in order to +# used, the replica may be actually reachable via different IP and port +# pairs. The following two options can be used by a replica in order to # report to its master a specific set of IP and port, so that both INFO # and ROLE will report those values. # # There is no need to use both the options if you need to override just # the port or the IP address. # -# slave-announce-ip 5.5.5.5 -# slave-announce-port 1234 +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 ################################## SECURITY ################################### @@ -516,7 +523,7 @@ slave-priority 100 # rename-command CONFIG "" # # Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. +# AOF file or transmitted to replicas may cause problems. ################################### CLIENTS #################################### @@ -545,15 +552,15 @@ slave-priority 100 # This option is usually useful when using Redis as an LRU or LFU cache, or to # set a hard memory limit for an instance (using the 'noeviction' policy). # -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted # from the used memory count, so that network problems / resyncs will # not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion +# buffer of replicas is full with DELs of keys evicted triggering the deletion # of more keys, and so forth until the database is completely emptied. # -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica # output buffers (but this is not needed if the policy is 'noeviction'). # # maxmemory @@ -600,6 +607,26 @@ slave-priority 100 # # maxmemory-samples 5 +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica to have +# a different memory setting, and you are sure all the writes performed to the +# replica are idempotent, then you may change this default (but be sure to understand +# what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory and so +# forth). So make sure you monitor your replicas and make sure they have enough +# memory to never hit a real out-of-memory condition before the master hits +# the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + ############################# LAZY FREEING #################################### # Redis has two primitives to delete keys. One is called DEL and is a blocking @@ -635,9 +662,9 @@ slave-priority 100 # or SORT with STORE option may delete existing keys. The SET command # itself removes any old content of the specified key in order to replace # it with the specified string. -# 4) During replication, when a slave performs a full resynchronization with +# 4) During replication, when a replica performs a full resynchronization with # its master, the content of the whole database is removed in order to -# load the RDB file just transfered. +# load the RDB file just transferred. # # In all the above cases the default is to delete objects in a blocking way, # like if DEL was called. However you can configure each case specifically @@ -647,7 +674,7 @@ slave-priority 100 lazyfree-lazy-eviction no lazyfree-lazy-expire no lazyfree-lazy-server-del no -slave-lazy-flush no +replica-lazy-flush no ############################## APPEND ONLY MODE ############################### @@ -776,10 +803,7 @@ aof-load-truncated yes # When loading Redis recognizes that the AOF file starts with the "REDIS" # string and loads the prefixed RDB file, and continues loading the AOF # tail. -# -# This is currently turned off by default in order to avoid the surprise -# of a format change, but will at some point be used as the default. -aof-use-rdb-preamble no +aof-use-rdb-preamble yes ################################ LUA SCRIPTING ############################### @@ -827,42 +851,42 @@ lua-time-limit 5000 # # cluster-node-timeout 15000 -# A slave of a failing master will avoid to start a failover if its data +# A replica of a failing master will avoid to start a failover if its data # looks too old. # -# There is no simple way for a slave to actually have an exact measure of +# There is no simple way for a replica to actually have an exact measure of # its "data age", so the following two checks are performed: # -# 1) If there are multiple slaves able to failover, they exchange messages -# in order to try to give an advantage to the slave with the best +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best # replication offset (more data from the master processed). -# Slaves will try to get their rank by offset, and apply to the start +# Replicas will try to get their rank by offset, and apply to the start # of the failover a delay proportional to their rank. # -# 2) Every single slave computes the time of the last interaction with +# 2) Every single replica computes the time of the last interaction with # its master. This can be the last ping or command received (if the master # is still in the "connected" state), or the time that elapsed since the # disconnection with the master (if the replication link is currently down). -# If the last interaction is too old, the slave will not try to failover +# If the last interaction is too old, the replica will not try to failover # at all. # -# The point "2" can be tuned by user. Specifically a slave will not perform +# The point "2" can be tuned by user. Specifically a replica will not perform # the failover if, since the last interaction with the master, the time # elapsed is greater than: # -# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# (node-timeout * replica-validity-factor) + repl-ping-replica-period # -# So for example if node-timeout is 30 seconds, and the slave-validity-factor -# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the -# slave will not try to failover if it was not able to talk with the master +# So for example if node-timeout is 30 seconds, and the replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master # for longer than 310 seconds. # -# A large slave-validity-factor may allow slaves with too old data to failover +# A large replica-validity-factor may allow replicas with too old data to failover # a master, while a too small value may prevent the cluster from being able to -# elect a slave at all. +# elect a replica at all. # -# For maximum availability, it is possible to set the slave-validity-factor -# to a value of 0, which means, that slaves will always try to failover the +# For maximum availability, it is possible to set the replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the # master regardless of the last time they interacted with the master. # (However they'll always try to apply a delay proportional to their # offset rank). @@ -870,22 +894,22 @@ lua-time-limit 5000 # Zero is the only value able to guarantee that when all the partitions heal # the cluster will always be able to continue. # -# cluster-slave-validity-factor 10 +# cluster-replica-validity-factor 10 -# Cluster slaves are able to migrate to orphaned masters, that are masters -# that are left without working slaves. This improves the cluster ability +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability # to resist to failures as otherwise an orphaned master can't be failed over -# in case of failure if it has no working slaves. +# in case of failure if it has no working replicas. # -# Slaves migrate to orphaned masters only if there are still at least a -# given number of other working slaves for their old master. This number -# is the "migration barrier". A migration barrier of 1 means that a slave -# will migrate only if there is at least 1 other working slave for its master -# and so forth. It usually reflects the number of slaves you want for every +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every # master in your cluster. # -# Default is 1 (slaves migrate only if their masters remain with at least -# one slave). To disable migration just set it to a very large value. +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value. # A value of 0 can be set but is useful only for debugging and dangerous # in production. # @@ -904,7 +928,7 @@ lua-time-limit 5000 # # cluster-require-full-coverage yes -# This option, when set to yes, prevents slaves from trying to failover its +# This option, when set to yes, prevents replicas from trying to failover its # master during master failures. However the master can still perform a # manual failover, if forced to do so. # @@ -912,7 +936,7 @@ lua-time-limit 5000 # data center operations, where we want one side to never be promoted if not # in the case of a total DC failure. # -# cluster-slave-no-failover no +# cluster-replica-no-failover no # In order to setup your cluster make sure to read the documentation # available at http://redis.io web site. @@ -1107,6 +1131,17 @@ zset-max-ziplist-value 64 # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. hll-sparse-max-bytes 3000 +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entires limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in # order to help rehashing the main Redis hash table (the one mapping top-level # keys to values). The hash table implementation Redis uses (see dict.c) @@ -1135,7 +1170,7 @@ activerehashing yes # The limit can be set differently for the three different classes of clients: # # normal -> normal clients including MONITOR clients -# slave -> slave clients +# replica -> replica clients # pubsub -> clients subscribed to at least one pubsub channel or pattern # # The syntax of every client-output-buffer-limit directive is the following: @@ -1156,12 +1191,12 @@ activerehashing yes # asynchronous clients may create a scenario where data is requested faster # than it can read. # -# Instead there is a default limit for pubsub and slave clients, since -# subscribers and slaves receive data in a push fashion. +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. # # Both the hard or the soft limit can be disabled by setting them to zero. client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit replica 256mb 64mb 60 client-output-buffer-limit pubsub 32mb 8mb 60 # Client query buffers accumulate new commands. They are limited to a fixed @@ -1195,12 +1230,34 @@ client-output-buffer-limit pubsub 32mb 8mb 60 # 100 only in environments where very low latency is required. hz 10 +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporary raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used as +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + # When a child rewrites the AOF file, if the following option is enabled # the file will be fsync-ed every 32 MB of data generated. This is useful # in order to commit the file to the disk more incrementally and avoid # big latency spikes. aof-rewrite-incremental-fsync yes +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good # idea to start with the default settings and only change them after investigating # how to improve the performances and how the keys LFU change over time, which @@ -1310,8 +1367,12 @@ aof-rewrite-incremental-fsync yes # active-defrag-threshold-upper 100 # Minimal effort for defrag in CPU percentage -# active-defrag-cycle-min 25 +# active-defrag-cycle-min 5 # Maximal effort for defrag in CPU percentage # active-defrag-cycle-max 75 +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + diff --git a/redis-android/src/main/jni/redis-4.0.11/runtest b/redis-android/src/main/jni/redis-5.0.0/runtest similarity index 86% rename from redis-android/src/main/jni/redis-4.0.11/runtest rename to redis-android/src/main/jni/redis-5.0.0/runtest index d8451df..ade1bd0 100755 --- a/redis-android/src/main/jni/redis-4.0.11/runtest +++ b/redis-android/src/main/jni/redis-5.0.0/runtest @@ -11,4 +11,4 @@ then echo "You need tcl 8.5 or newer in order to run the Redis test" exit 1 fi -$TCLSH tests/test_helper.tcl $* +$TCLSH tests/test_helper.tcl "${@}" diff --git a/redis-android/src/main/jni/redis-4.0.11/runtest-cluster b/redis-android/src/main/jni/redis-5.0.0/runtest-cluster similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/runtest-cluster rename to redis-android/src/main/jni/redis-5.0.0/runtest-cluster diff --git a/redis-android/src/main/jni/redis-4.0.11/runtest-sentinel b/redis-android/src/main/jni/redis-5.0.0/runtest-sentinel similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/runtest-sentinel rename to redis-android/src/main/jni/redis-5.0.0/runtest-sentinel diff --git a/redis-android/src/main/jni/redis-4.0.11/sentinel.conf b/redis-android/src/main/jni/redis-5.0.0/sentinel.conf similarity index 70% rename from redis-android/src/main/jni/redis-4.0.11/sentinel.conf rename to redis-android/src/main/jni/redis-5.0.0/sentinel.conf index 38b0972..bc9a705 100644 --- a/redis-android/src/main/jni/redis-4.0.11/sentinel.conf +++ b/redis-android/src/main/jni/redis-5.0.0/sentinel.conf @@ -20,6 +20,21 @@ # The port that this sentinel instance will run on port 26379 +# By default Redis Sentinel does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis-sentinel.pid when +# daemonized. +daemonize no + +# When running daemonized, Redis Sentinel writes a pid file in +# /var/run/redis-sentinel.pid by default. You can specify a custom pid file +# location here. +pidfile /var/run/redis-sentinel.pid + +# Specify the log file name. Also the empty string can be used to force +# Sentinel to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + # sentinel announce-ip # sentinel announce-port # @@ -58,11 +73,11 @@ dir /tmp # be elected by the majority of the known Sentinels in order to # start a failover, so no failover can be performed in minority. # -# Slaves are auto-discovered, so you don't need to specify slaves in +# Replicas are auto-discovered, so you don't need to specify replicas in # any way. Sentinel itself will rewrite this configuration file adding -# the slaves using additional configuration options. +# the replicas using additional configuration options. # Also note that the configuration file is rewritten when a -# slave is promoted to master. +# replica is promoted to master. # # Note: master name should not include special characters or spaces. # The valid charset is A-z 0-9 and the three characters ".-_". @@ -70,11 +85,11 @@ sentinel monitor mymaster 127.0.0.1 6379 2 # sentinel auth-pass # -# Set the password to use to authenticate with the master and slaves. +# Set the password to use to authenticate with the master and replicas. # Useful if there is a password set in the Redis instances to monitor. # -# Note that the master password is also used for slaves, so it is not -# possible to set a different password in masters and slaves instances +# Note that the master password is also used for replicas, so it is not +# possible to set a different password in masters and replicas instances # if you want to be able to monitor these instances with Sentinel. # # However you can have Redis instances without the authentication enabled @@ -89,7 +104,7 @@ sentinel monitor mymaster 127.0.0.1 6379 2 # sentinel down-after-milliseconds # -# Number of milliseconds the master (or any attached slave or sentinel) should +# Number of milliseconds the master (or any attached replica or sentinel) should # be unreachable (as in, not acceptable reply to PING, continuously, for the # specified period) in order to consider it in S_DOWN state (Subjectively # Down). @@ -97,11 +112,11 @@ sentinel monitor mymaster 127.0.0.1 6379 2 # Default is 30 seconds. sentinel down-after-milliseconds mymaster 30000 -# sentinel parallel-syncs +# sentinel parallel-syncs # -# How many slaves we can reconfigure to point to the new slave simultaneously -# during the failover. Use a low number if you use the slaves to serve query -# to avoid that all the slaves will be unreachable at about the same +# How many replicas we can reconfigure to point to the new replica simultaneously +# during the failover. Use a low number if you use the replicas to serve query +# to avoid that all the replicas will be unreachable at about the same # time while performing the synchronization with the master. sentinel parallel-syncs mymaster 1 @@ -113,18 +128,18 @@ sentinel parallel-syncs mymaster 1 # already tried against the same master by a given Sentinel, is two # times the failover timeout. # -# - The time needed for a slave replicating to a wrong master according +# - The time needed for a replica replicating to a wrong master according # to a Sentinel current configuration, to be forced to replicate # with the right master, is exactly the failover timeout (counting since # the moment a Sentinel detected the misconfiguration). # # - The time needed to cancel a failover that is already in progress but # did not produced any configuration change (SLAVEOF NO ONE yet not -# acknowledged by the promoted slave). +# acknowledged by the promoted replica). # -# - The maximum time a failover in progress waits for all the slaves to be -# reconfigured as slaves of the new master. However even after this time -# the slaves will be reconfigured by the Sentinels anyway, but not with +# - The maximum time a failover in progress waits for all the replicas to be +# reconfigured as replicas of the new master. However even after this time +# the replicas will be reconfigured by the Sentinels anyway, but not with # the exact parallel-syncs progression as specified. # # Default is 3 minutes. @@ -185,7 +200,7 @@ sentinel failover-timeout mymaster 180000 # is either "leader" or "observer" # # The arguments from-ip, from-port, to-ip, to-port are used to communicate -# the old address of the master and the new address of the elected slave +# the old address of the master and the new address of the elected replica # (now a master). # # This script should be resistant to multiple invocations. @@ -203,3 +218,27 @@ sentinel failover-timeout mymaster 180000 sentinel deny-scripts-reconfig yes +# REDIS COMMANDS RENAMING +# +# Sometimes the Redis server has certain commands, that are needed for Sentinel +# to work correctly, renamed to unguessable strings. This is often the case +# of CONFIG and SLAVEOF in the context of providers that provide Redis as +# a service, and don't want the customers to reconfigure the instances outside +# of the administration console. +# +# In such case it is possible to tell Sentinel to use different command names +# instead of the normal ones. For example if the master "mymaster", and the +# associated replicas, have "CONFIG" all renamed to "GUESSME", I could use: +# +# SENTINEL rename-command mymaster CONFIG GUESSME +# +# After such configuration is set, every time Sentinel would use CONFIG it will +# use GUESSME instead. Note that there is no actual need to respect the command +# case, so writing "config guessme" is the same in the example above. +# +# SENTINEL SET can also be used in order to perform this configuration at runtime. +# +# In order to set a command back to its original name (undo the renaming), it +# is possible to just rename a command to itsef: +# +# SENTINEL rename-command mymaster CONFIG CONFIG diff --git a/redis-android/src/main/jni/redis-4.0.11/src/.gitignore b/redis-android/src/main/jni/redis-5.0.0/src/.gitignore similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/.gitignore rename to redis-android/src/main/jni/redis-5.0.0/src/.gitignore diff --git a/redis-android/src/main/jni/redis-4.0.11/src/Makefile b/redis-android/src/main/jni/redis-5.0.0/src/Makefile similarity index 96% rename from redis-android/src/main/jni/redis-4.0.11/src/Makefile rename to redis-android/src/main/jni/redis-5.0.0/src/Makefile index 86e0b3f..773d3b2 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/Makefile +++ b/redis-android/src/main/jni/redis-5.0.0/src/Makefile @@ -39,7 +39,7 @@ endif endif # To get ARM stack traces if Redis crashes we need a special C flag. -ifneq (,$(findstring armv,$(uname_M))) +ifneq (,$(filter aarch64 armv,$(uname_M))) CFLAGS+=-funwind-tables endif @@ -100,7 +100,7 @@ ifeq ($(uname_S),FreeBSD) else # All the other OSes (notably Linux) FINAL_LDFLAGS+= -rdynamic - FINAL_LIBS+=-ldl -pthread + FINAL_LIBS+=-ldl -pthread -lrt endif endif endif @@ -122,7 +122,7 @@ endif ifeq ($(MALLOC),jemalloc) DEPENDENCY_TARGETS+= jemalloc FINAL_CFLAGS+= -DUSE_JEMALLOC -I../deps/jemalloc/include - FINAL_LIBS+= ../deps/jemalloc/lib/libjemalloc.a + FINAL_LIBS := ../deps/jemalloc/lib/libjemalloc.a $(FINAL_LIBS) endif REDIS_CC=$(QUIET_CC)$(CC) $(FINAL_CFLAGS) @@ -144,9 +144,9 @@ endif REDIS_SERVER_NAME=redis-server REDIS_SENTINEL_NAME=redis-sentinel -REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o +REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o REDIS_CLI_NAME=redis-cli -REDIS_CLI_OBJ=anet.o adlist.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o +REDIS_CLI_OBJ=anet.o adlist.o dict.o redis-cli.o zmalloc.o release.o anet.o ae.o crc64.o siphash.o crc16.o REDIS_BENCHMARK_NAME=redis-benchmark REDIS_BENCHMARK_OBJ=ae.o anet.o redis-benchmark.o adlist.o zmalloc.o redis-benchmark.o REDIS_CHECK_RDB_NAME=redis-check-rdb diff --git a/redis-android/src/main/jni/redis-4.0.11/src/adlist.c b/redis-android/src/main/jni/redis-5.0.0/src/adlist.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/adlist.c rename to redis-android/src/main/jni/redis-5.0.0/src/adlist.c diff --git a/redis-android/src/main/jni/redis-4.0.11/src/adlist.h b/redis-android/src/main/jni/redis-5.0.0/src/adlist.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/adlist.h rename to redis-android/src/main/jni/redis-5.0.0/src/adlist.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/ae.c b/redis-android/src/main/jni/redis-5.0.0/src/ae.c similarity index 98% rename from redis-android/src/main/jni/redis-4.0.11/src/ae.c rename to redis-android/src/main/jni/redis-5.0.0/src/ae.c index 8fdb94d..8e246bb 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/ae.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/ae.c @@ -39,15 +39,12 @@ #include #include #include -#ifdef __ANDROID__ - #include - #include -#endif #include "ae.h" #include "zmalloc.h" #include "config.h" + /* Include the best multiplexing layer supported by this system. * The following should be ordered by performances, descending. */ #ifdef HAVE_EVPORT @@ -437,7 +434,7 @@ int aeProcessEvents(aeEventLoop *eventLoop, int flags) * before replying to a client. */ int invert = fe->mask & AE_BARRIER; - /* Note the "fe->mask & mask & ..." code: maybe an already + /* Note the "fe->mask & mask & ..." code: maybe an already * processed event removed an element that fired and we still * didn't processed, so we check if the event is still valid. * @@ -489,7 +486,7 @@ int aeWait(int fd, int mask, long long milliseconds) { if ((retval = poll(&pfd, 1, milliseconds))== 1) { if (pfd.revents & POLLIN) retmask |= AE_READABLE; if (pfd.revents & POLLOUT) retmask |= AE_WRITABLE; - if (pfd.revents & POLLERR) retmask |= AE_WRITABLE; + if (pfd.revents & POLLERR) retmask |= AE_WRITABLE; if (pfd.revents & POLLHUP) retmask |= AE_WRITABLE; return retmask; } else { diff --git a/redis-android/src/main/jni/redis-4.0.11/src/ae.h b/redis-android/src/main/jni/redis-5.0.0/src/ae.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/ae.h rename to redis-android/src/main/jni/redis-5.0.0/src/ae.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/ae_epoll.c b/redis-android/src/main/jni/redis-5.0.0/src/ae_epoll.c similarity index 98% rename from redis-android/src/main/jni/redis-4.0.11/src/ae_epoll.c rename to redis-android/src/main/jni/redis-5.0.0/src/ae_epoll.c index 410aac7..6ee45ea 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/ae_epoll.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/ae_epoll.c @@ -30,6 +30,8 @@ #include +#include // for close +#include "ae.h" typedef struct aeApiState { int epfd; diff --git a/redis-android/src/main/jni/redis-4.0.11/src/ae_evport.c b/redis-android/src/main/jni/redis-5.0.0/src/ae_evport.c similarity index 99% rename from redis-android/src/main/jni/redis-4.0.11/src/ae_evport.c rename to redis-android/src/main/jni/redis-5.0.0/src/ae_evport.c index 5c317be..b3cf42a 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/ae_evport.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/ae_evport.c @@ -37,6 +37,8 @@ #include #include +#include "zmalloc.h" +#include "ae.h" static int evport_debug = 0; diff --git a/redis-android/src/main/jni/redis-4.0.11/src/ae_kqueue.c b/redis-android/src/main/jni/redis-5.0.0/src/ae_kqueue.c similarity index 99% rename from redis-android/src/main/jni/redis-4.0.11/src/ae_kqueue.c rename to redis-android/src/main/jni/redis-5.0.0/src/ae_kqueue.c index 6796f4c..c85b193 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/ae_kqueue.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/ae_kqueue.c @@ -32,6 +32,7 @@ #include #include #include +#include "ae.h" typedef struct aeApiState { int kqfd; diff --git a/redis-android/src/main/jni/redis-4.0.11/src/ae_select.c b/redis-android/src/main/jni/redis-5.0.0/src/ae_select.c similarity index 99% rename from redis-android/src/main/jni/redis-4.0.11/src/ae_select.c rename to redis-android/src/main/jni/redis-5.0.0/src/ae_select.c index c039a8e..f18c916 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/ae_select.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/ae_select.c @@ -31,6 +31,7 @@ #include #include +#include "ae.h" typedef struct aeApiState { fd_set rfds, wfds; diff --git a/redis-android/src/main/jni/redis-4.0.11/src/anet.c b/redis-android/src/main/jni/redis-5.0.0/src/anet.c similarity index 99% rename from redis-android/src/main/jni/redis-4.0.11/src/anet.c rename to redis-android/src/main/jni/redis-5.0.0/src/anet.c index dc88eb7..2981fca 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/anet.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/anet.c @@ -237,7 +237,7 @@ int anetResolveIP(char *err, char *host, char *ipbuf, size_t ipbuf_len) { static int anetSetReuseAddr(char *err, int fd) { int yes = 1; - /* Make sure connection-intensive things like the redis benckmark + /* Make sure connection-intensive things like the redis benchmark * will be able to close/open sockets a zillion of times */ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) == -1) { anetSetError(err, "setsockopt SO_REUSEADDR: %s", strerror(errno)); diff --git a/redis-android/src/main/jni/redis-4.0.11/src/anet.h b/redis-android/src/main/jni/redis-5.0.0/src/anet.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/anet.h rename to redis-android/src/main/jni/redis-5.0.0/src/anet.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/aof.c b/redis-android/src/main/jni/redis-5.0.0/src/aof.c similarity index 89% rename from redis-android/src/main/jni/redis-4.0.11/src/aof.c rename to redis-android/src/main/jni/redis-5.0.0/src/aof.c index 262c780..fddb636 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/aof.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/aof.c @@ -30,9 +30,13 @@ #include "server.h" #include "bio.h" #include "rio.h" +#include "wait3.h" +#include +#include #include #include +#include #include #include #include @@ -228,7 +232,7 @@ static void killAppendOnlyChild(void) { void stopAppendOnly(void) { serverAssert(server.aof_state != AOF_OFF); flushAppendOnlyFile(1); - aof_fsync(server.aof_fd); + redis_fsync(server.aof_fd); close(server.aof_fd); server.aof_fd = -1; @@ -261,7 +265,7 @@ int startAppendOnly(void) { serverLog(LL_WARNING,"AOF was enabled but there is already a child process saving an RDB file on disk. An AOF background was scheduled to start when possible."); } else { /* If there is a pending AOF rewrite, we need to switch it off and - * start a new one: the old one cannot be reused becuase it is not + * start a new one: the old one cannot be reused because it is not * accumulating the AOF buffer. */ if (server.aof_child_pid != -1) { serverLog(LL_WARNING,"AOF was enabled but there is already an AOF rewriting in background. Stopping background AOF and starting a rewrite now."); @@ -476,10 +480,10 @@ void flushAppendOnlyFile(int force) { /* Perform the fsync if needed. */ if (server.aof_fsync == AOF_FSYNC_ALWAYS) { - /* aof_fsync is defined as fdatasync() for Linux in order to avoid + /* redis_fsync is defined as fdatasync() for Linux in order to avoid * flushing metadata. */ latencyStartMonitor(latency); - aof_fsync(server.aof_fd); /* Let's try to get this data on the disk */ + redis_fsync(server.aof_fd); /* Let's try to get this data on the disk */ latencyEndMonitor(latency); latencyAddSampleIfNeeded("aof-fsync-always",latency); server.aof_last_fsync = server.unixtime; @@ -645,7 +649,7 @@ struct client *createFakeClient(void) { c->obuf_soft_limit_reached_time = 0; c->watched_keys = listCreate(); c->peerid = NULL; - listSetFreeMethod(c->reply,decrRefCountVoid); + listSetFreeMethod(c->reply,freeClientReplyValue); listSetDupMethod(c->reply,dupClientReplyValue); initClientMultiState(c); return c; @@ -677,13 +681,14 @@ int loadAppendOnlyFile(char *filename) { int old_aof_state = server.aof_state; long loops = 0; off_t valid_up_to = 0; /* Offset of latest well-formed command loaded. */ + off_t valid_before_multi = 0; /* Offset before MULTI command loaded. */ if (fp == NULL) { serverLog(LL_WARNING,"Fatal error: can't open the append log file for reading: %s",strerror(errno)); exit(1); } - /* Handle a zero-length AOF file as a special case. An emtpy AOF file + /* Handle a zero-length AOF file as a special case. An empty AOF file * is a valid AOF because an empty server with AOF enabled will create * a zero length file at startup, that will remain like that if no write * operation is received. */ @@ -759,7 +764,7 @@ int loadAppendOnlyFile(char *filename) { } if (buf[0] != '$') goto fmterr; len = strtol(buf+1,NULL,10); - argsds = sdsnewlen(NULL,len); + argsds = sdsnewlen(SDS_NOINIT,len); if (len && fread(argsds,len,1,fp) == 0) { sdsfree(argsds); fakeClient->argc = j; /* Free up to j-1. */ @@ -777,16 +782,28 @@ int loadAppendOnlyFile(char *filename) { /* Command lookup */ cmd = lookupCommand(argv[0]->ptr); if (!cmd) { - serverLog(LL_WARNING,"Unknown command '%s' reading the append only file", (char*)argv[0]->ptr); + serverLog(LL_WARNING, + "Unknown command '%s' reading the append only file", + (char*)argv[0]->ptr); exit(1); } + if (cmd == server.multiCommand) valid_before_multi = valid_up_to; + /* Run the command in the context of a fake client */ fakeClient->cmd = cmd; - cmd->proc(fakeClient); + if (fakeClient->flags & CLIENT_MULTI && + fakeClient->cmd->proc != execCommand) + { + queueMultiCommand(fakeClient); + } else { + cmd->proc(fakeClient); + } /* The fake client should not have a reply */ - serverAssert(fakeClient->bufpos == 0 && listLength(fakeClient->reply) == 0); + serverAssert(fakeClient->bufpos == 0 && + listLength(fakeClient->reply) == 0); + /* The fake client should never get blocked */ serverAssert((fakeClient->flags & CLIENT_BLOCKED) == 0); @@ -798,8 +815,15 @@ int loadAppendOnlyFile(char *filename) { } /* This point can only be reached when EOF is reached without errors. - * If the client is in the middle of a MULTI/EXEC, log error and quit. */ - if (fakeClient->flags & CLIENT_MULTI) goto uxeof; + * If the client is in the middle of a MULTI/EXEC, handle it as it was + * a short read, even if technically the protocol is correct: we want + * to remove the unprocessed tail and continue. */ + if (fakeClient->flags & CLIENT_MULTI) { + serverLog(LL_WARNING, + "Revert incomplete MULTI/EXEC transaction in AOF file"); + valid_up_to = valid_before_multi; + goto uxeof; + } loaded_ok: /* DB loaded, cleanup and return C_OK to the caller. */ fclose(fp); @@ -1074,6 +1098,140 @@ int rewriteHashObject(rio *r, robj *key, robj *o) { return 1; } +/* Helper for rewriteStreamObject() that generates a bulk string into the + * AOF representing the ID 'id'. */ +int rioWriteBulkStreamID(rio *r,streamID *id) { + int retval; + + sds replyid = sdscatfmt(sdsempty(),"%U-%U",id->ms,id->seq); + if ((retval = rioWriteBulkString(r,replyid,sdslen(replyid))) == 0) return 0; + sdsfree(replyid); + return retval; +} + +/* Helper for rewriteStreamObject(): emit the XCLAIM needed in order to + * add the message described by 'nack' having the id 'rawid', into the pending + * list of the specified consumer. All this in the context of the specified + * key and group. */ +int rioWriteStreamPendingEntry(rio *r, robj *key, const char *groupname, size_t groupname_len, streamConsumer *consumer, unsigned char *rawid, streamNACK *nack) { + /* XCLAIM 0 TIME + RETRYCOUNT JUSTID FORCE. */ + streamID id; + streamDecodeID(rawid,&id); + if (rioWriteBulkCount(r,'*',12) == 0) return 0; + if (rioWriteBulkString(r,"XCLAIM",6) == 0) return 0; + if (rioWriteBulkObject(r,key) == 0) return 0; + if (rioWriteBulkString(r,groupname,groupname_len) == 0) return 0; + if (rioWriteBulkString(r,consumer->name,sdslen(consumer->name)) == 0) return 0; + if (rioWriteBulkString(r,"0",1) == 0) return 0; + if (rioWriteBulkStreamID(r,&id) == 0) return 0; + if (rioWriteBulkString(r,"TIME",4) == 0) return 0; + if (rioWriteBulkLongLong(r,nack->delivery_time) == 0) return 0; + if (rioWriteBulkString(r,"RETRYCOUNT",10) == 0) return 0; + if (rioWriteBulkLongLong(r,nack->delivery_count) == 0) return 0; + if (rioWriteBulkString(r,"JUSTID",6) == 0) return 0; + if (rioWriteBulkString(r,"FORCE",5) == 0) return 0; + return 1; +} + +/* Emit the commands needed to rebuild a stream object. + * The function returns 0 on error, 1 on success. */ +int rewriteStreamObject(rio *r, robj *key, robj *o) { + stream *s = o->ptr; + streamIterator si; + streamIteratorStart(&si,s,NULL,NULL,0); + streamID id; + int64_t numfields; + + if (s->length) { + /* Reconstruct the stream data using XADD commands. */ + while(streamIteratorGetID(&si,&id,&numfields)) { + /* Emit a two elements array for each item. The first is + * the ID, the second is an array of field-value pairs. */ + + /* Emit the XADD ...fields... command. */ + if (rioWriteBulkCount(r,'*',3+numfields*2) == 0) return 0; + if (rioWriteBulkString(r,"XADD",4) == 0) return 0; + if (rioWriteBulkObject(r,key) == 0) return 0; + if (rioWriteBulkStreamID(r,&id) == 0) return 0; + while(numfields--) { + unsigned char *field, *value; + int64_t field_len, value_len; + streamIteratorGetField(&si,&field,&value,&field_len,&value_len); + if (rioWriteBulkString(r,(char*)field,field_len) == 0) return 0; + if (rioWriteBulkString(r,(char*)value,value_len) == 0) return 0; + } + } + } else { + /* Use the XADD MAXLEN 0 trick to generate an empty stream if + * the key we are serializing is an empty string, which is possible + * for the Stream type. */ + if (rioWriteBulkCount(r,'*',7) == 0) return 0; + if (rioWriteBulkString(r,"XADD",4) == 0) return 0; + if (rioWriteBulkObject(r,key) == 0) return 0; + if (rioWriteBulkString(r,"MAXLEN",6) == 0) return 0; + if (rioWriteBulkString(r,"0",1) == 0) return 0; + if (rioWriteBulkStreamID(r,&s->last_id) == 0) return 0; + if (rioWriteBulkString(r,"x",1) == 0) return 0; + if (rioWriteBulkString(r,"y",1) == 0) return 0; + } + + /* Append XSETID after XADD, make sure lastid is correct, + * in case of XDEL lastid. */ + if (rioWriteBulkCount(r,'*',3) == 0) return 0; + if (rioWriteBulkString(r,"XSETID",6) == 0) return 0; + if (rioWriteBulkObject(r,key) == 0) return 0; + if (rioWriteBulkStreamID(r,&s->last_id) == 0) return 0; + + + /* Create all the stream consumer groups. */ + if (s->cgroups) { + raxIterator ri; + raxStart(&ri,s->cgroups); + raxSeek(&ri,"^",NULL,0); + while(raxNext(&ri)) { + streamCG *group = ri.data; + /* Emit the XGROUP CREATE in order to create the group. */ + if (rioWriteBulkCount(r,'*',5) == 0) return 0; + if (rioWriteBulkString(r,"XGROUP",6) == 0) return 0; + if (rioWriteBulkString(r,"CREATE",6) == 0) return 0; + if (rioWriteBulkObject(r,key) == 0) return 0; + if (rioWriteBulkString(r,(char*)ri.key,ri.key_len) == 0) return 0; + if (rioWriteBulkStreamID(r,&group->last_id) == 0) return 0; + + /* Generate XCLAIMs for each consumer that happens to + * have pending entries. Empty consumers have no semantical + * value so they are discarded. */ + raxIterator ri_cons; + raxStart(&ri_cons,group->consumers); + raxSeek(&ri_cons,"^",NULL,0); + while(raxNext(&ri_cons)) { + streamConsumer *consumer = ri_cons.data; + /* For the current consumer, iterate all the PEL entries + * to emit the XCLAIM protocol. */ + raxIterator ri_pel; + raxStart(&ri_pel,consumer->pel); + raxSeek(&ri_pel,"^",NULL,0); + while(raxNext(&ri_pel)) { + streamNACK *nack = ri_pel.data; + if (rioWriteStreamPendingEntry(r,key,(char*)ri.key, + ri.key_len,consumer, + ri_pel.key,nack) == 0) + { + return 0; + } + } + raxStop(&ri_pel); + } + raxStop(&ri_cons); + } + raxStop(&ri); + } + + streamIteratorStop(&si); + return 1; +} + /* Call the module type callback in order to rewrite a data type * that is exported by a module and is not handled by Redis itself. * The function returns 0 on error, 1 on success. */ @@ -1109,7 +1267,6 @@ int rewriteAppendOnlyFileRio(rio *aof) { dictIterator *di = NULL; dictEntry *de; size_t processed = 0; - long long now = mstime(); int j; for (j = 0; j < server.dbnum; j++) { @@ -1135,9 +1292,6 @@ int rewriteAppendOnlyFileRio(rio *aof) { expiretime = getExpire(db,&key); - /* If this key is already expired skip it */ - if (expiretime != -1 && expiretime < now) continue; - /* Save the key and associated value */ if (o->type == OBJ_STRING) { /* Emit a SET command */ @@ -1154,6 +1308,8 @@ int rewriteAppendOnlyFileRio(rio *aof) { if (rewriteSortedSetObject(aof,&key,o) == 0) goto werr; } else if (o->type == OBJ_HASH) { if (rewriteHashObject(aof,&key,o) == 0) goto werr; + } else if (o->type == OBJ_STREAM) { + if (rewriteStreamObject(aof,&key,o) == 0) goto werr; } else if (o->type == OBJ_MODULE) { if (rewriteModuleObject(aof,&key,o) == 0) goto werr; } else { @@ -1208,7 +1364,7 @@ int rewriteAppendOnlyFile(char *filename) { rioInitWithFile(&aof,fp); if (server.aof_rewrite_incremental_fsync) - rioSetAutoSync(&aof,AOF_AUTOSYNC_BYTES); + rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES); if (server.aof_use_rdb_preamble) { int error; @@ -1576,7 +1732,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { oldfd = server.aof_fd; server.aof_fd = newfd; if (server.aof_fsync == AOF_FSYNC_ALWAYS) - aof_fsync(newfd); + redis_fsync(newfd); else if (server.aof_fsync == AOF_FSYNC_EVERYSEC) aof_background_fsync(newfd); server.aof_selected_db = -1; /* Make sure SELECT is re-issued */ @@ -1603,7 +1759,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { "Background AOF rewrite signal handler took %lldus", ustime()-now); } else if (!bysignal && exitcode != 0) { /* SIGUSR1 is whitelisted, so we have a way to kill a child without - * tirggering an error conditon. */ + * tirggering an error condition. */ if (bysignal != SIGUSR1) server.aof_lastbgrewrite_status = C_ERR; serverLog(LL_WARNING, diff --git a/redis-android/src/main/jni/redis-4.0.11/src/asciilogo.h b/redis-android/src/main/jni/redis-5.0.0/src/asciilogo.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/asciilogo.h rename to redis-android/src/main/jni/redis-5.0.0/src/asciilogo.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/atomicvar.h b/redis-android/src/main/jni/redis-5.0.0/src/atomicvar.h similarity index 98% rename from redis-android/src/main/jni/redis-4.0.11/src/atomicvar.h rename to redis-android/src/main/jni/redis-5.0.0/src/atomicvar.h index 84a5bbc..173b045 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/atomicvar.h +++ b/redis-android/src/main/jni/redis-5.0.0/src/atomicvar.h @@ -16,7 +16,7 @@ * pthread_mutex_t myvar_mutex; * atomicSet(myvar,12345); * - * If atomic primitives are availble (tested in config.h) the mutex + * If atomic primitives are available (tested in config.h) the mutex * is not used. * * Never use return value from the macros, instead use the AtomicGetIncr() diff --git a/redis-android/src/main/jni/redis-4.0.11/src/bio.c b/redis-android/src/main/jni/redis-5.0.0/src/bio.c similarity index 98% rename from redis-android/src/main/jni/redis-4.0.11/src/bio.c rename to redis-android/src/main/jni/redis-5.0.0/src/bio.c index 4a5d197..6faf578 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/bio.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/bio.c @@ -17,7 +17,7 @@ * * The design is trivial, we have a structure representing a job to perform * and a different thread and job queue for every job type. - * Every thread wait for new jobs in its queue, and process every job + * Every thread waits for new jobs in its queue, and process every job * sequentially. * * Jobs of the same type are guaranteed to be processed from the least @@ -192,7 +192,7 @@ void *bioProcessBackgroundJobs(void *arg) { if (type == BIO_CLOSE_FILE) { close((long)job->arg1); } else if (type == BIO_AOF_FSYNC) { - aof_fsync((long)job->arg1); + redis_fsync((long)job->arg1); } else if (type == BIO_LAZY_FREE) { /* What we free changes depending on what arguments are set: * arg1 -> free the object at pointer. @@ -209,14 +209,14 @@ void *bioProcessBackgroundJobs(void *arg) { } zfree(job); - /* Unblock threads blocked on bioWaitStepOfType() if any. */ - pthread_cond_broadcast(&bio_step_cond[type]); - /* Lock again before reiterating the loop, if there are no longer * jobs to process we'll block again in pthread_cond_wait(). */ pthread_mutex_lock(&bio_mutex[type]); listDelNode(bio_jobs[type],ln); bio_pending[type]--; + + /* Unblock threads blocked on bioWaitStepOfType() if any. */ + pthread_cond_broadcast(&bio_step_cond[type]); } } diff --git a/redis-android/src/main/jni/redis-4.0.11/src/bio.h b/redis-android/src/main/jni/redis-5.0.0/src/bio.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/bio.h rename to redis-android/src/main/jni/redis-5.0.0/src/bio.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/bitops.c b/redis-android/src/main/jni/redis-5.0.0/src/bitops.c similarity index 99% rename from redis-android/src/main/jni/redis-4.0.11/src/bitops.c rename to redis-android/src/main/jni/redis-5.0.0/src/bitops.c index 43450fc..23f2266 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/bitops.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/bitops.c @@ -918,7 +918,7 @@ void bitfieldCommand(client *c) { struct bitfieldOp *ops = NULL; /* Array of ops to execute at end. */ int owtype = BFOVERFLOW_WRAP; /* Overflow type. */ int readonly = 1; - size_t higest_write_offset = 0; + size_t highest_write_offset = 0; for (j = 2; j < c->argc; j++) { int remargs = c->argc-j-1; /* Remaining args other than current. */ @@ -968,8 +968,8 @@ void bitfieldCommand(client *c) { if (opcode != BITFIELDOP_GET) { readonly = 0; - if (higest_write_offset < bitoffset + bits - 1) - higest_write_offset = bitoffset + bits - 1; + if (highest_write_offset < bitoffset + bits - 1) + highest_write_offset = bitoffset + bits - 1; /* INCRBY and SET require another argument. */ if (getLongLongFromObjectOrReply(c,c->argv[j+3],&i64,NULL) != C_OK){ zfree(ops); @@ -999,7 +999,7 @@ void bitfieldCommand(client *c) { /* Lookup by making room up to the farest bit reached by * this operation. */ if ((o = lookupStringForBitCommand(c, - higest_write_offset)) == NULL) return; + highest_write_offset)) == NULL) return; } addReplyMultiBulkLen(c,numops); diff --git a/redis-android/src/main/jni/redis-5.0.0/src/blocked.c b/redis-android/src/main/jni/redis-5.0.0/src/blocked.c new file mode 100644 index 0000000..2b43f2b --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/src/blocked.c @@ -0,0 +1,608 @@ +/* blocked.c - generic support for blocking operations like BLPOP & WAIT. + * + * Copyright (c) 2009-2012, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * --------------------------------------------------------------------------- + * + * API: + * + * getTimeoutFromObjectOrReply() is just an utility function to parse a + * timeout argument since blocking operations usually require a timeout. + * + * blockClient() set the CLIENT_BLOCKED flag in the client, and set the + * specified block type 'btype' filed to one of BLOCKED_* macros. + * + * unblockClient() unblocks the client doing the following: + * 1) It calls the btype-specific function to cleanup the state. + * 2) It unblocks the client by unsetting the CLIENT_BLOCKED flag. + * 3) It puts the client into a list of just unblocked clients that are + * processed ASAP in the beforeSleep() event loop callback, so that + * if there is some query buffer to process, we do it. This is also + * required because otherwise there is no 'readable' event fired, we + * already read the pending commands. We also set the CLIENT_UNBLOCKED + * flag to remember the client is in the unblocked_clients list. + * + * processUnblockedClients() is called inside the beforeSleep() function + * to process the query buffer from unblocked clients and remove the clients + * from the blocked_clients queue. + * + * replyToBlockedClientTimedOut() is called by the cron function when + * a client blocked reaches the specified timeout (if the timeout is set + * to 0, no timeout is processed). + * It usually just needs to send a reply to the client. + * + * When implementing a new type of blocking opeation, the implementation + * should modify unblockClient() and replyToBlockedClientTimedOut() in order + * to handle the btype-specific behavior of this two functions. + * If the blocking operation waits for certain keys to change state, the + * clusterRedirectBlockedClientIfNeeded() function should also be updated. + */ + +#include "server.h" + +int serveClientBlockedOnList(client *receiver, robj *key, robj *dstkey, redisDb *db, robj *value, int where); + +/* Get a timeout value from an object and store it into 'timeout'. + * The final timeout is always stored as milliseconds as a time where the + * timeout will expire, however the parsing is performed according to + * the 'unit' that can be seconds or milliseconds. + * + * Note that if the timeout is zero (usually from the point of view of + * commands API this means no timeout) the value stored into 'timeout' + * is zero. */ +int getTimeoutFromObjectOrReply(client *c, robj *object, mstime_t *timeout, int unit) { + long long tval; + + if (getLongLongFromObjectOrReply(c,object,&tval, + "timeout is not an integer or out of range") != C_OK) + return C_ERR; + + if (tval < 0) { + addReplyError(c,"timeout is negative"); + return C_ERR; + } + + if (tval > 0) { + if (unit == UNIT_SECONDS) tval *= 1000; + tval += mstime(); + } + *timeout = tval; + + return C_OK; +} + +/* Block a client for the specific operation type. Once the CLIENT_BLOCKED + * flag is set client query buffer is not longer processed, but accumulated, + * and will be processed when the client is unblocked. */ +void blockClient(client *c, int btype) { + c->flags |= CLIENT_BLOCKED; + c->btype = btype; + server.blocked_clients++; + server.blocked_clients_by_type[btype]++; +} + +/* This function is called in the beforeSleep() function of the event loop + * in order to process the pending input buffer of clients that were + * unblocked after a blocking operation. */ +void processUnblockedClients(void) { + listNode *ln; + client *c; + + while (listLength(server.unblocked_clients)) { + ln = listFirst(server.unblocked_clients); + serverAssert(ln != NULL); + c = ln->value; + listDelNode(server.unblocked_clients,ln); + c->flags &= ~CLIENT_UNBLOCKED; + + /* Process remaining data in the input buffer, unless the client + * is blocked again. Actually processInputBuffer() checks that the + * client is not blocked before to proceed, but things may change and + * the code is conceptually more correct this way. */ + if (!(c->flags & CLIENT_BLOCKED)) { + if (c->querybuf && sdslen(c->querybuf) > 0) { + processInputBufferAndReplicate(c); + } + } + } +} + +/* This function will schedule the client for reprocessing at a safe time. + * + * This is useful when a client was blocked for some reason (blocking opeation, + * CLIENT PAUSE, or whatever), because it may end with some accumulated query + * buffer that needs to be processed ASAP: + * + * 1. When a client is blocked, its readable handler is still active. + * 2. However in this case it only gets data into the query buffer, but the + * query is not parsed or executed once there is enough to proceed as + * usually (because the client is blocked... so we can't execute commands). + * 3. When the client is unblocked, without this function, the client would + * have to write some query in order for the readable handler to finally + * call processQueryBuffer*() on it. + * 4. With this function instead we can put the client in a queue that will + * process it for queries ready to be executed at a safe time. + */ +void queueClientForReprocessing(client *c) { + /* The client may already be into the unblocked list because of a previous + * blocking operation, don't add back it into the list multiple times. */ + if (!(c->flags & CLIENT_UNBLOCKED)) { + c->flags |= CLIENT_UNBLOCKED; + listAddNodeTail(server.unblocked_clients,c); + } +} + +/* Unblock a client calling the right function depending on the kind + * of operation the client is blocking for. */ +void unblockClient(client *c) { + if (c->btype == BLOCKED_LIST || + c->btype == BLOCKED_ZSET || + c->btype == BLOCKED_STREAM) { + unblockClientWaitingData(c); + } else if (c->btype == BLOCKED_WAIT) { + unblockClientWaitingReplicas(c); + } else if (c->btype == BLOCKED_MODULE) { + unblockClientFromModule(c); + } else { + serverPanic("Unknown btype in unblockClient()."); + } + /* Clear the flags, and put the client in the unblocked list so that + * we'll process new commands in its query buffer ASAP. */ + server.blocked_clients--; + server.blocked_clients_by_type[c->btype]--; + c->flags &= ~CLIENT_BLOCKED; + c->btype = BLOCKED_NONE; + queueClientForReprocessing(c); +} + +/* This function gets called when a blocked client timed out in order to + * send it a reply of some kind. After this function is called, + * unblockClient() will be called with the same client as argument. */ +void replyToBlockedClientTimedOut(client *c) { + if (c->btype == BLOCKED_LIST || + c->btype == BLOCKED_ZSET || + c->btype == BLOCKED_STREAM) { + addReply(c,shared.nullmultibulk); + } else if (c->btype == BLOCKED_WAIT) { + addReplyLongLong(c,replicationCountAcksByOffset(c->bpop.reploffset)); + } else if (c->btype == BLOCKED_MODULE) { + moduleBlockedClientTimedOut(c); + } else { + serverPanic("Unknown btype in replyToBlockedClientTimedOut()."); + } +} + +/* Mass-unblock clients because something changed in the instance that makes + * blocking no longer safe. For example clients blocked in list operations + * in an instance which turns from master to slave is unsafe, so this function + * is called when a master turns into a slave. + * + * The semantics is to send an -UNBLOCKED error to the client, disconnecting + * it at the same time. */ +void disconnectAllBlockedClients(void) { + listNode *ln; + listIter li; + + listRewind(server.clients,&li); + while((ln = listNext(&li))) { + client *c = listNodeValue(ln); + + if (c->flags & CLIENT_BLOCKED) { + addReplySds(c,sdsnew( + "-UNBLOCKED force unblock from blocking operation, " + "instance state changed (master -> replica?)\r\n")); + unblockClient(c); + c->flags |= CLIENT_CLOSE_AFTER_REPLY; + } + } +} + +/* This function should be called by Redis every time a single command, + * a MULTI/EXEC block, or a Lua script, terminated its execution after + * being called by a client. It handles serving clients blocked in + * lists, streams, and sorted sets, via a blocking commands. + * + * All the keys with at least one client blocked that received at least + * one new element via some write operation are accumulated into + * the server.ready_keys list. This function will run the list and will + * serve clients accordingly. Note that the function will iterate again and + * again as a result of serving BRPOPLPUSH we can have new blocking clients + * to serve because of the PUSH side of BRPOPLPUSH. + * + * This function is normally "fair", that is, it will server clients + * using a FIFO behavior. However this fairness is violated in certain + * edge cases, that is, when we have clients blocked at the same time + * in a sorted set and in a list, for the same key (a very odd thing to + * do client side, indeed!). Because mismatching clients (blocking for + * a different type compared to the current key type) are moved in the + * other side of the linked list. However as long as the key starts to + * be used only for a single type, like virtually any Redis application will + * do, the function is already fair. */ +void handleClientsBlockedOnKeys(void) { + while(listLength(server.ready_keys) != 0) { + list *l; + + /* Point server.ready_keys to a fresh list and save the current one + * locally. This way as we run the old list we are free to call + * signalKeyAsReady() that may push new elements in server.ready_keys + * when handling clients blocked into BRPOPLPUSH. */ + l = server.ready_keys; + server.ready_keys = listCreate(); + + while(listLength(l) != 0) { + listNode *ln = listFirst(l); + readyList *rl = ln->value; + + /* First of all remove this key from db->ready_keys so that + * we can safely call signalKeyAsReady() against this key. */ + dictDelete(rl->db->ready_keys,rl->key); + + /* Serve clients blocked on list key. */ + robj *o = lookupKeyWrite(rl->db,rl->key); + if (o != NULL && o->type == OBJ_LIST) { + dictEntry *de; + + /* We serve clients in the same order they blocked for + * this key, from the first blocked to the last. */ + de = dictFind(rl->db->blocking_keys,rl->key); + if (de) { + list *clients = dictGetVal(de); + int numclients = listLength(clients); + + while(numclients--) { + listNode *clientnode = listFirst(clients); + client *receiver = clientnode->value; + + if (receiver->btype != BLOCKED_LIST) { + /* Put at the tail, so that at the next call + * we'll not run into it again. */ + listDelNode(clients,clientnode); + listAddNodeTail(clients,receiver); + continue; + } + + robj *dstkey = receiver->bpop.target; + int where = (receiver->lastcmd && + receiver->lastcmd->proc == blpopCommand) ? + LIST_HEAD : LIST_TAIL; + robj *value = listTypePop(o,where); + + if (value) { + /* Protect receiver->bpop.target, that will be + * freed by the next unblockClient() + * call. */ + if (dstkey) incrRefCount(dstkey); + unblockClient(receiver); + + if (serveClientBlockedOnList(receiver, + rl->key,dstkey,rl->db,value, + where) == C_ERR) + { + /* If we failed serving the client we need + * to also undo the POP operation. */ + listTypePush(o,value,where); + } + + if (dstkey) decrRefCount(dstkey); + decrRefCount(value); + } else { + break; + } + } + } + + if (listTypeLength(o) == 0) { + dbDelete(rl->db,rl->key); + notifyKeyspaceEvent(NOTIFY_GENERIC,"del",rl->key,rl->db->id); + } + /* We don't call signalModifiedKey() as it was already called + * when an element was pushed on the list. */ + } + + /* Serve clients blocked on sorted set key. */ + else if (o != NULL && o->type == OBJ_ZSET) { + dictEntry *de; + + /* We serve clients in the same order they blocked for + * this key, from the first blocked to the last. */ + de = dictFind(rl->db->blocking_keys,rl->key); + if (de) { + list *clients = dictGetVal(de); + int numclients = listLength(clients); + unsigned long zcard = zsetLength(o); + + while(numclients-- && zcard) { + listNode *clientnode = listFirst(clients); + client *receiver = clientnode->value; + + if (receiver->btype != BLOCKED_ZSET) { + /* Put at the tail, so that at the next call + * we'll not run into it again. */ + listDelNode(clients,clientnode); + listAddNodeTail(clients,receiver); + continue; + } + + int where = (receiver->lastcmd && + receiver->lastcmd->proc == bzpopminCommand) + ? ZSET_MIN : ZSET_MAX; + unblockClient(receiver); + genericZpopCommand(receiver,&rl->key,1,where,1,NULL); + zcard--; + + /* Replicate the command. */ + robj *argv[2]; + struct redisCommand *cmd = where == ZSET_MIN ? + server.zpopminCommand : + server.zpopmaxCommand; + argv[0] = createStringObject(cmd->name,strlen(cmd->name)); + argv[1] = rl->key; + incrRefCount(rl->key); + propagate(cmd,receiver->db->id, + argv,2,PROPAGATE_AOF|PROPAGATE_REPL); + decrRefCount(argv[0]); + decrRefCount(argv[1]); + } + } + } + + /* Serve clients blocked on stream key. */ + else if (o != NULL && o->type == OBJ_STREAM) { + dictEntry *de = dictFind(rl->db->blocking_keys,rl->key); + stream *s = o->ptr; + + /* We need to provide the new data arrived on the stream + * to all the clients that are waiting for an offset smaller + * than the current top item. */ + if (de) { + list *clients = dictGetVal(de); + listNode *ln; + listIter li; + listRewind(clients,&li); + + while((ln = listNext(&li))) { + client *receiver = listNodeValue(ln); + if (receiver->btype != BLOCKED_STREAM) continue; + streamID *gt = dictFetchValue(receiver->bpop.keys, + rl->key); + + /* If we blocked in the context of a consumer + * group, we need to resolve the group and update the + * last ID the client is blocked for: this is needed + * because serving other clients in the same consumer + * group will alter the "last ID" of the consumer + * group, and clients blocked in a consumer group are + * always blocked for the ">" ID: we need to deliver + * only new messages and avoid unblocking the client + * otherwise. */ + streamCG *group = NULL; + if (receiver->bpop.xread_group) { + group = streamLookupCG(s, + receiver->bpop.xread_group->ptr); + /* If the group was not found, send an error + * to the consumer. */ + if (!group) { + addReplyError(receiver, + "-NOGROUP the consumer group this client " + "was blocked on no longer exists"); + unblockClient(receiver); + continue; + } else { + *gt = group->last_id; + } + } + + if (streamCompareID(&s->last_id, gt) > 0) { + streamID start = *gt; + start.seq++; /* Can't overflow, it's an uint64_t */ + + /* Lookup the consumer for the group, if any. */ + streamConsumer *consumer = NULL; + int noack = 0; + + if (group) { + consumer = streamLookupConsumer(group, + receiver->bpop.xread_consumer->ptr, + 1); + noack = receiver->bpop.xread_group_noack; + } + + /* Emit the two elements sub-array consisting of + * the name of the stream and the data we + * extracted from it. Wrapped in a single-item + * array, since we have just one key. */ + addReplyMultiBulkLen(receiver,1); + addReplyMultiBulkLen(receiver,2); + addReplyBulk(receiver,rl->key); + + streamPropInfo pi = { + rl->key, + receiver->bpop.xread_group + }; + streamReplyWithRange(receiver,s,&start,NULL, + receiver->bpop.xread_count, + 0, group, consumer, noack, &pi); + + /* Note that after we unblock the client, 'gt' + * and other receiver->bpop stuff are no longer + * valid, so we must do the setup above before + * this call. */ + unblockClient(receiver); + } + } + } + } + + /* Free this item. */ + decrRefCount(rl->key); + zfree(rl); + listDelNode(l,ln); + } + listRelease(l); /* We have the new list on place at this point. */ + } +} + +/* This is how the current blocking lists/sorted sets/streams work, we use + * BLPOP as example, but the concept is the same for other list ops, sorted + * sets and XREAD. + * - If the user calls BLPOP and the key exists and contains a non empty list + * then LPOP is called instead. So BLPOP is semantically the same as LPOP + * if blocking is not required. + * - If instead BLPOP is called and the key does not exists or the list is + * empty we need to block. In order to do so we remove the notification for + * new data to read in the client socket (so that we'll not serve new + * requests if the blocking request is not served). Also we put the client + * in a dictionary (db->blocking_keys) mapping keys to a list of clients + * blocking for this keys. + * - If a PUSH operation against a key with blocked clients waiting is + * performed, we mark this key as "ready", and after the current command, + * MULTI/EXEC block, or script, is executed, we serve all the clients waiting + * for this list, from the one that blocked first, to the last, accordingly + * to the number of elements we have in the ready list. + */ + +/* Set a client in blocking mode for the specified key (list, zset or stream), + * with the specified timeout. The 'type' argument is BLOCKED_LIST, + * BLOCKED_ZSET or BLOCKED_STREAM depending on the kind of operation we are + * waiting for an empty key in order to awake the client. The client is blocked + * for all the 'numkeys' keys as in the 'keys' argument. When we block for + * stream keys, we also provide an array of streamID structures: clients will + * be unblocked only when items with an ID greater or equal to the specified + * one is appended to the stream. */ +void blockForKeys(client *c, int btype, robj **keys, int numkeys, mstime_t timeout, robj *target, streamID *ids) { + dictEntry *de; + list *l; + int j; + + c->bpop.timeout = timeout; + c->bpop.target = target; + + if (target != NULL) incrRefCount(target); + + for (j = 0; j < numkeys; j++) { + /* The value associated with the key name in the bpop.keys dictionary + * is NULL for lists and sorted sets, or the stream ID for streams. */ + void *key_data = NULL; + if (btype == BLOCKED_STREAM) { + key_data = zmalloc(sizeof(streamID)); + memcpy(key_data,ids+j,sizeof(streamID)); + } + + /* If the key already exists in the dictionary ignore it. */ + if (dictAdd(c->bpop.keys,keys[j],key_data) != DICT_OK) { + zfree(key_data); + continue; + } + incrRefCount(keys[j]); + + /* And in the other "side", to map keys -> clients */ + de = dictFind(c->db->blocking_keys,keys[j]); + if (de == NULL) { + int retval; + + /* For every key we take a list of clients blocked for it */ + l = listCreate(); + retval = dictAdd(c->db->blocking_keys,keys[j],l); + incrRefCount(keys[j]); + serverAssertWithInfo(c,keys[j],retval == DICT_OK); + } else { + l = dictGetVal(de); + } + listAddNodeTail(l,c); + } + blockClient(c,btype); +} + +/* Unblock a client that's waiting in a blocking operation such as BLPOP. + * You should never call this function directly, but unblockClient() instead. */ +void unblockClientWaitingData(client *c) { + dictEntry *de; + dictIterator *di; + list *l; + + serverAssertWithInfo(c,NULL,dictSize(c->bpop.keys) != 0); + di = dictGetIterator(c->bpop.keys); + /* The client may wait for multiple keys, so unblock it for every key. */ + while((de = dictNext(di)) != NULL) { + robj *key = dictGetKey(de); + + /* Remove this client from the list of clients waiting for this key. */ + l = dictFetchValue(c->db->blocking_keys,key); + serverAssertWithInfo(c,key,l != NULL); + listDelNode(l,listSearchKey(l,c)); + /* If the list is empty we need to remove it to avoid wasting memory */ + if (listLength(l) == 0) + dictDelete(c->db->blocking_keys,key); + } + dictReleaseIterator(di); + + /* Cleanup the client structure */ + dictEmpty(c->bpop.keys,NULL); + if (c->bpop.target) { + decrRefCount(c->bpop.target); + c->bpop.target = NULL; + } + if (c->bpop.xread_group) { + decrRefCount(c->bpop.xread_group); + decrRefCount(c->bpop.xread_consumer); + c->bpop.xread_group = NULL; + c->bpop.xread_consumer = NULL; + } +} + +/* If the specified key has clients blocked waiting for list pushes, this + * function will put the key reference into the server.ready_keys list. + * Note that db->ready_keys is a hash table that allows us to avoid putting + * the same key again and again in the list in case of multiple pushes + * made by a script or in the context of MULTI/EXEC. + * + * The list will be finally processed by handleClientsBlockedOnLists() */ +void signalKeyAsReady(redisDb *db, robj *key) { + readyList *rl; + + /* No clients blocking for this key? No need to queue it. */ + if (dictFind(db->blocking_keys,key) == NULL) return; + + /* Key was already signaled? No need to queue it again. */ + if (dictFind(db->ready_keys,key) != NULL) return; + + /* Ok, we need to queue this key into server.ready_keys. */ + rl = zmalloc(sizeof(*rl)); + rl->key = key; + rl->db = db; + incrRefCount(key); + listAddNodeTail(server.ready_keys,rl); + + /* We also add the key in the db->ready_keys dictionary in order + * to avoid adding it multiple times into a list with a simple O(1) + * check. */ + incrRefCount(key); + serverAssert(dictAdd(db->ready_keys,key,NULL) == DICT_OK); +} + + diff --git a/redis-android/src/main/jni/redis-4.0.11/src/childinfo.c b/redis-android/src/main/jni/redis-5.0.0/src/childinfo.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/childinfo.c rename to redis-android/src/main/jni/redis-5.0.0/src/childinfo.c diff --git a/redis-android/src/main/jni/redis-4.0.11/src/cluster.c b/redis-android/src/main/jni/redis-5.0.0/src/cluster.c similarity index 95% rename from redis-android/src/main/jni/redis-4.0.11/src/cluster.c rename to redis-android/src/main/jni/redis-5.0.0/src/cluster.c index f853ff9..e82f256 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/cluster.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/cluster.c @@ -56,7 +56,7 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request); void clusterUpdateState(void); int clusterNodeGetSlotBit(clusterNode *n, int slot); sds clusterGenNodesDescription(int filter); -clusterNode *clusterLookupNode(char *name); +clusterNode *clusterLookupNode(const char *name); int clusterNodeAddSlave(clusterNode *master, clusterNode *slave); int clusterAddSlot(clusterNode *n, int slot); int clusterDelSlot(int slot); @@ -75,6 +75,7 @@ void clusterDelNode(clusterNode *delnode); sds representClusterNodeFlags(sds ci, uint16_t flags); uint64_t clusterGetMaxEpoch(void); int clusterBumpConfigEpochWithoutConsensus(void); +void moduleCallClusterReceivers(const char *sender_id, uint64_t module_id, uint8_t type, const unsigned char *payload, uint32_t len); /* ----------------------------------------------------------------------------- * Initialization @@ -600,8 +601,7 @@ clusterLink *createClusterLink(clusterNode *node) { * with this link will have the 'link' field set to NULL. */ void freeClusterLink(clusterLink *link) { if (link->fd != -1) { - aeDeleteFileEvent(server.el, link->fd, AE_WRITABLE); - aeDeleteFileEvent(server.el, link->fd, AE_READABLE); + aeDeleteFileEvent(server.el, link->fd, AE_READABLE|AE_WRITABLE); } sdsfree(link->sndbuf); sdsfree(link->rcvbuf); @@ -672,7 +672,7 @@ unsigned int keyHashSlot(char *key, int keylen) { for (e = s+1; e < keylen; e++) if (key[e] == '}') break; - /* No '}' or nothing betweeen {} ? Hash the whole key. */ + /* No '}' or nothing between {} ? Hash the whole key. */ if (e == keylen || e == s+1) return crc16(key,keylen) & 0x3FFF; /* If we are here there is both a { and a } on its right. Hash @@ -932,7 +932,7 @@ void clusterDelNode(clusterNode *delnode) { } /* Node lookup by name */ -clusterNode *clusterLookupNode(char *name) { +clusterNode *clusterLookupNode(const char *name) { sds s = sdsnewlen(name, CLUSTER_NAMELEN); dictEntry *de; @@ -1230,7 +1230,7 @@ void clearNodeFailureIfNeeded(clusterNode *node) { serverLog(LL_NOTICE, "Clear FAIL state for node %.40s: %s is reachable again.", node->name, - nodeIsSlave(node) ? "slave" : "master without slots"); + nodeIsSlave(node) ? "replica" : "master without slots"); node->flags &= ~CLUSTER_NODE_FAIL; clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); } @@ -1589,6 +1589,12 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc } } + /* After updating the slots configuration, don't do any actual change + * in the state of the server if a module disabled Redis Cluster + * keys redirections. */ + if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) + return; + /* If at least one slot was reassigned from a node to another node * with a greater configEpoch, it is possible that: * 1) We are a master left without slots. This means that we were @@ -1683,6 +1689,12 @@ int clusterProcessPacket(clusterLink *link) { explen += sizeof(clusterMsgDataUpdate); if (totlen != explen) return 1; + } else if (type == CLUSTERMSG_TYPE_MODULE) { + uint32_t explen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + + explen += sizeof(clusterMsgDataPublish) - + 3 + ntohl(hdr->data.module.msg.len); + if (totlen != explen) return 1; } /* Check if the sender is a known node. */ @@ -2053,7 +2065,7 @@ int clusterProcessPacket(clusterLink *link) { server.cluster->mf_end = mstime() + CLUSTER_MF_TIMEOUT; server.cluster->mf_slave = sender; pauseClients(mstime()+(CLUSTER_MF_TIMEOUT*2)); - serverLog(LL_WARNING,"Manual failover requested by slave %.40s.", + serverLog(LL_WARNING,"Manual failover requested by replica %.40s.", sender->name); } else if (type == CLUSTERMSG_TYPE_UPDATE) { clusterNode *n; /* The node the update is about. */ @@ -2077,6 +2089,15 @@ int clusterProcessPacket(clusterLink *link) { * config accordingly. */ clusterUpdateSlotsConfigWith(n,reportedConfigEpoch, hdr->data.update.nodecfg.slots); + } else if (type == CLUSTERMSG_TYPE_MODULE) { + if (!sender) return 1; /* Protect the module from unknown nodes. */ + /* We need to route this message back to the right module subscribed + * for the right message type. */ + uint64_t module_id = hdr->data.module.msg.module_id; /* Endian-safe ID */ + uint32_t len = ntohl(hdr->data.module.msg.len); + uint8_t type = hdr->data.module.msg.type; + unsigned char *payload = hdr->data.module.msg.bulk_data; + moduleCallClusterReceivers(sender->name,module_id,type,payload,len); } else { serverLog(LL_WARNING,"Received unknown packet type: %d", type); } @@ -2105,7 +2126,7 @@ void clusterWriteHandler(aeEventLoop *el, int fd, void *privdata, int mask) { nwritten = write(fd, link->sndbuf, sdslen(link->sndbuf)); if (nwritten <= 0) { serverLog(LL_DEBUG,"I/O error writing to node link: %s", - strerror(errno)); + (nwritten == -1) ? strerror(errno) : "short write"); handleLinkIOError(link); return; } @@ -2362,7 +2383,7 @@ void clusterSendPing(clusterLink *link, int type) { * same time. * * Since we have non-voting slaves that lower the probability of an entry - * to feature our node, we set the number of entires per packet as + * to feature our node, we set the number of entries per packet as * 10% of the total nodes we have. */ wanted = floor(dictSize(server.cluster->nodes)/10); if (wanted < 3) wanted = 3; @@ -2564,6 +2585,61 @@ void clusterSendUpdate(clusterLink *link, clusterNode *node) { clusterSendMessage(link,buf,ntohl(hdr->totlen)); } +/* Send a MODULE message. + * + * If link is NULL, then the message is broadcasted to the whole cluster. */ +void clusterSendModule(clusterLink *link, uint64_t module_id, uint8_t type, + unsigned char *payload, uint32_t len) { + unsigned char buf[sizeof(clusterMsg)], *heapbuf; + clusterMsg *hdr = (clusterMsg*) buf; + uint32_t totlen; + + clusterBuildMessageHdr(hdr,CLUSTERMSG_TYPE_MODULE); + totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); + totlen += sizeof(clusterMsgModule) - 3 + len; + + hdr->data.module.msg.module_id = module_id; /* Already endian adjusted. */ + hdr->data.module.msg.type = type; + hdr->data.module.msg.len = htonl(len); + hdr->totlen = htonl(totlen); + + /* Try to use the local buffer if possible */ + if (totlen < sizeof(buf)) { + heapbuf = buf; + } else { + heapbuf = zmalloc(totlen); + memcpy(heapbuf,hdr,sizeof(*hdr)); + hdr = (clusterMsg*) heapbuf; + } + memcpy(hdr->data.module.msg.bulk_data,payload,len); + + if (link) + clusterSendMessage(link,heapbuf,totlen); + else + clusterBroadcastMessage(heapbuf,totlen); + + if (heapbuf != buf) zfree(heapbuf); +} + +/* This function gets a cluster node ID string as target, the same way the nodes + * addresses are represented in the modules side, resolves the node, and sends + * the message. If the target is NULL the message is broadcasted. + * + * The function returns C_OK if the target is valid, otherwise C_ERR is + * returned. */ +int clusterSendModuleMessageToTarget(const char *target, uint64_t module_id, uint8_t type, unsigned char *payload, uint32_t len) { + clusterNode *node = NULL; + + if (target != NULL) { + node = clusterLookupNode(target); + if (node == NULL || node->link == NULL) return C_ERR; + } + + clusterSendModule(target ? node->link : NULL, + module_id, type, payload, len); + return C_OK; +} + /* ----------------------------------------------------------------------------- * CLUSTER Pub/Sub support * @@ -2803,7 +2879,7 @@ void clusterLogCantFailover(int reason) { switch(reason) { case CLUSTER_CANT_FAILOVER_DATA_AGE: msg = "Disconnected from master for longer than allowed. " - "Please check the 'cluster-slave-validity-factor' configuration " + "Please check the 'cluster-replica-validity-factor' configuration " "option."; break; case CLUSTER_CANT_FAILOVER_WAITING_DELAY: @@ -2984,7 +3060,7 @@ void clusterHandleSlaveFailover(void) { server.cluster->failover_auth_time += added_delay; server.cluster->failover_auth_rank = newrank; serverLog(LL_WARNING, - "Slave rank updated to #%d, added %lld milliseconds of delay.", + "Replica rank updated to #%d, added %lld milliseconds of delay.", newrank, added_delay); } } @@ -3030,7 +3106,7 @@ void clusterHandleSlaveFailover(void) { (unsigned long long) myself->configEpoch); } - /* Take responsability for the cluster slots. */ + /* Take responsibility for the cluster slots. */ clusterFailoverReplaceYourMaster(); } else { clusterLogCantFailover(CLUSTER_CANT_FAILOVER_WAITING_VOTES); @@ -3081,11 +3157,11 @@ void clusterHandleSlaveMigration(int max_slaves) { !nodeTimedOut(mymaster->slaves[j])) okslaves++; if (okslaves <= server.cluster_migration_barrier) return; - /* Step 3: Idenitfy a candidate for migration, and check if among the + /* Step 3: Identify a candidate for migration, and check if among the * masters with the greatest number of ok slaves, I'm the one with the * smallest node ID (the "candidate slave"). * - * Note: this means that eventually a replica migration will occurr + * Note: this means that eventually a replica migration will occur * since slaves that are reachable again always have their FAIL flag * cleared, so eventually there must be a candidate. At the same time * this does not mean that there are no race conditions possible (two @@ -3140,7 +3216,8 @@ void clusterHandleSlaveMigration(int max_slaves) { * the natural slaves of this instance to advertise their switch from * the old master to the new one. */ if (target && candidate == myself && - (mstime()-target->orphaned_time) > CLUSTER_SLAVE_MIGRATION_DELAY) + (mstime()-target->orphaned_time) > CLUSTER_SLAVE_MIGRATION_DELAY && + !(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) { serverLog(LL_WARNING,"Migrating to orphaned master %.40s", target->name); @@ -3251,14 +3328,18 @@ void clusterCron(void) { int changed = 0; if (prev_ip == NULL && curr_ip != NULL) changed = 1; - if (prev_ip != NULL && curr_ip == NULL) changed = 1; - if (prev_ip && curr_ip && strcmp(prev_ip,curr_ip)) changed = 1; + else if (prev_ip != NULL && curr_ip == NULL) changed = 1; + else if (prev_ip && curr_ip && strcmp(prev_ip,curr_ip)) changed = 1; if (changed) { + if (prev_ip) zfree(prev_ip); prev_ip = curr_ip; - if (prev_ip) prev_ip = zstrdup(prev_ip); if (curr_ip) { + /* We always take a copy of the previous IP address, by + * duplicating the string. This way later we can check if + * the address really changed. */ + prev_ip = zstrdup(prev_ip); strncpy(myself->ip,server.cluster_announce_ip,NET_IP_STR_LEN); myself->ip[NET_IP_STR_LEN-1] = '\0'; } else { @@ -3489,7 +3570,8 @@ void clusterCron(void) { if (nodeIsSlave(myself)) { clusterHandleManualFailover(); - clusterHandleSlaveFailover(); + if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) + clusterHandleSlaveFailover(); /* If there are orphaned slaves, and we are a slave among the masters * with the max number of non-failing slaves, consider migrating to * the orphaned masters. Note that it does not make sense to try @@ -3666,7 +3748,7 @@ void clusterCloseAllSlots(void) { * -------------------------------------------------------------------------- */ /* The following are defines that are only used in the evaluation function - * and are based on heuristics. Actaully the main point about the rejoin and + * and are based on heuristics. Actually the main point about the rejoin and * writable delay is that they should be a few orders of magnitude larger * than the network latency. */ #define CLUSTER_MAX_REJOIN_DELAY 5000 @@ -3795,6 +3877,11 @@ int verifyClusterConfigWithData(void) { int j; int update_config = 0; + /* Return ASAP if a module disabled cluster redirections. In that case + * every master can store keys about every possible hash slot. */ + if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) + return C_OK; + /* If this node is a slave, don't perform the check at all as we * completely depend on the replication stream. */ if (nodeIsSlave(myself)) return C_OK; @@ -4009,6 +4096,7 @@ const char *clusterGetMessageTypeString(int type) { case CLUSTERMSG_TYPE_FAILOVER_AUTH_ACK: return "auth-ack"; case CLUSTERMSG_TYPE_UPDATE: return "update"; case CLUSTERMSG_TYPE_MFSTART: return "mfstart"; + case CLUSTERMSG_TYPE_MODULE: return "module"; } return "unknown"; } @@ -4105,7 +4193,34 @@ void clusterCommand(client *c) { return; } - if (!strcasecmp(c->argv[1]->ptr,"meet") && (c->argc == 4 || c->argc == 5)) { + if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) { + const char *help[] = { +"ADDSLOTS [slot ...] -- Assign slots to current node.", +"BUMPEPOCH -- Advance the cluster config epoch.", +"COUNT-failure-reports -- Return number of failure reports for .", +"COUNTKEYSINSLOT - Return the number of keys in .", +"DELSLOTS [slot ...] -- Delete slots information from current node.", +"FAILOVER [force|takeover] -- Promote current replica node to being a master.", +"FORGET -- Remove a node from the cluster.", +"GETKEYSINSLOT -- Return key names stored by current node in a slot.", +"FLUSHSLOTS -- Delete current node own slots information.", +"INFO - Return onformation about the cluster.", +"KEYSLOT -- Return the hash slot for .", +"MEET [bus-port] -- Connect nodes into a working cluster.", +"MYID -- Return the node id.", +"NODES -- Return cluster configuration seen by node. Output format:", +" ... ", +"REPLICATE -- Configure current node as replica to .", +"RESET [hard|soft] -- Reset current node (default: soft).", +"SET-config-epoch - Set config epoch of current node.", +"SETSLOT (importing|migrating|stable|node ) -- Set slot state.", +"REPLICAS -- Return replicas.", +"SLOTS -- Return information about slots range mappings. Each range is made of:", +" start, end, master and replicas IP addresses, ports and ids", +NULL + }; + addReplyHelp(c, help); + } else if (!strcasecmp(c->argv[1]->ptr,"meet") && (c->argc == 4 || c->argc == 5)) { /* CLUSTER MEET [cport] */ long long port, cport; @@ -4298,7 +4413,7 @@ void clusterCommand(client *c) { clusterAddSlot(n,slot); } else { addReplyError(c, - "Invalid CLUSTER SETSLOT action or number of arguments"); + "Invalid CLUSTER SETSLOT action or number of arguments. Try CLUSTER HELP"); return; } clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|CLUSTER_TODO_UPDATE_STATE); @@ -4476,7 +4591,7 @@ void clusterCommand(client *c) { /* Can't replicate a slave. */ if (nodeIsSlave(n)) { - addReplyError(c,"I can only replicate a master, not a slave."); + addReplyError(c,"I can only replicate a master, not a replica."); return; } @@ -4495,7 +4610,8 @@ void clusterCommand(client *c) { clusterSetMaster(n); clusterDoBeforeSleep(CLUSTER_TODO_UPDATE_STATE|CLUSTER_TODO_SAVE_CONFIG); addReply(c,shared.ok); - } else if (!strcasecmp(c->argv[1]->ptr,"slaves") && c->argc == 3) { + } else if ((!strcasecmp(c->argv[1]->ptr,"slaves") || + !strcasecmp(c->argv[1]->ptr,"replicas")) && c->argc == 3) { /* CLUSTER SLAVES */ clusterNode *n = clusterLookupNode(c->argv[2]->ptr); int j; @@ -4549,10 +4665,10 @@ void clusterCommand(client *c) { /* Check preconditions. */ if (nodeIsMaster(myself)) { - addReplyError(c,"You should send CLUSTER FAILOVER to a slave"); + addReplyError(c,"You should send CLUSTER FAILOVER to a replica"); return; } else if (myself->slaveof == NULL) { - addReplyError(c,"I'm a slave but my master is unknown to me"); + addReplyError(c,"I'm a replica but my master is unknown to me"); return; } else if (!force && (nodeFailed(myself->slaveof) || @@ -4648,7 +4764,8 @@ void clusterCommand(client *c) { clusterReset(hard); addReply(c,shared.ok); } else { - addReplyError(c,"Wrong CLUSTER subcommand or number of arguments"); + addReplySubcommandSyntaxError(c); + return; } } @@ -4735,15 +4852,39 @@ void dumpCommand(client *c) { /* RESTORE key ttl serialized-value [REPLACE] */ void restoreCommand(client *c) { - long long ttl; + long long ttl, lfu_freq = -1, lru_idle = -1, lru_clock = -1; rio payload; - int j, type, replace = 0; + int j, type, replace = 0, absttl = 0; robj *obj; /* Parse additional options */ for (j = 4; j < c->argc; j++) { + int additional = c->argc-j-1; if (!strcasecmp(c->argv[j]->ptr,"replace")) { replace = 1; + } else if (!strcasecmp(c->argv[j]->ptr,"absttl")) { + absttl = 1; + } else if (!strcasecmp(c->argv[j]->ptr,"idletime") && additional >= 1 && + lfu_freq == -1) + { + if (getLongLongFromObjectOrReply(c,c->argv[j+1],&lru_idle,NULL) + != C_OK) return; + if (lru_idle < 0) { + addReplyError(c,"Invalid IDLETIME value, must be >= 0"); + return; + } + lru_clock = LRU_CLOCK(); + j++; /* Consume additional arg. */ + } else if (!strcasecmp(c->argv[j]->ptr,"freq") && additional >= 1 && + lru_idle == -1) + { + if (getLongLongFromObjectOrReply(c,c->argv[j+1],&lfu_freq,NULL) + != C_OK) return; + if (lfu_freq < 0 || lfu_freq > 255) { + addReplyError(c,"Invalid FREQ value, must be >= 0 and <= 255"); + return; + } + j++; /* Consume additional arg. */ } else { addReply(c,shared.syntaxerr); return; @@ -4784,7 +4925,11 @@ void restoreCommand(client *c) { /* Create the key and set the TTL if any */ dbAdd(c->db,c->argv[1],obj); - if (ttl) setExpire(c,c->db,c->argv[1],mstime()+ttl); + if (ttl) { + if (!absttl) ttl+=mstime(); + setExpire(c,c->db,c->argv[1],ttl); + } + objectSetLRUOrLFU(obj,lfu_freq,lru_idle,lru_clock); signalModifiedKey(c->db,c->argv[1]); addReply(c,shared.ok); server.dirty++; @@ -5019,6 +5164,11 @@ void migrateCommand(client *c) { serverAssertWithInfo(c,NULL,rioWriteBulkLongLong(&cmd,dbid)); } + int expired = 0; /* Number of keys that we'll find already expired. + Note that serializing large keys may take some time + so certain keys that were found non expired by the + lookupKey() function, may be expired later. */ + /* Create RESTORE payload and generate the protocol to call the command. */ for (j = 0; j < num_keys; j++) { long long ttl = 0; @@ -5026,6 +5176,10 @@ void migrateCommand(client *c) { if (expireat != -1) { ttl = expireat-mstime(); + if (ttl < 0) { + expired++; + continue; + } if (ttl < 1) ttl = 1; } serverAssertWithInfo(c,NULL, @@ -5090,9 +5244,13 @@ void migrateCommand(client *c) { int socket_error = 0; int del_idx = 1; /* Index of the key argument for the replicated DEL op. */ + /* Allocate the new argument vector that will replace the current command, + * to propagate the MIGRATE as a DEL command (if no COPY option was given). + * We allocate num_keys+1 because the additional argument is for "DEL" + * command name itself. */ if (!copy) newargv = zmalloc(sizeof(robj*)*(num_keys+1)); - for (j = 0; j < num_keys; j++) { + for (j = 0; j < num_keys-expired; j++) { if (syncReadLine(cs->fd, buf2, sizeof(buf2), timeout) <= 0) { socket_error = 1; break; @@ -5290,9 +5448,17 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in multiCmd mc; int i, slot = 0, migrating_slot = 0, importing_slot = 0, missing_keys = 0; + /* Allow any key to be set if a module disabled cluster redirections. */ + if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) + return myself; + /* Set error code optimistically for the base case. */ if (error_code) *error_code = CLUSTER_REDIR_NONE; + /* Modules can turn off Redis Cluster redirection: this is useful + * when writing a module that implements a completely different + * distributed system. */ + /* We handle all the cases as if they were EXEC commands, so we have * a common code path for everything */ if (cmd->proc == execCommand) { @@ -5457,7 +5623,7 @@ void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_co if (error_code == CLUSTER_REDIR_CROSS_SLOT) { addReplySds(c,sdsnew("-CROSSSLOT Keys in request don't hash to the same slot\r\n")); } else if (error_code == CLUSTER_REDIR_UNSTABLE) { - /* The request spawns mutliple keys in the same slot, + /* The request spawns multiple keys in the same slot, * but the slot is not "stable" currently as there is * a migration or import in progress. */ addReplySds(c,sdsnew("-TRYAGAIN Multiple keys request during rehashing of slot\r\n")); @@ -5489,7 +5655,11 @@ void clusterRedirectClient(client *c, clusterNode *n, int hashslot, int error_co * longer handles, the client is sent a redirection error, and the function * returns 1. Otherwise 0 is returned and no operation is performed. */ int clusterRedirectBlockedClientIfNeeded(client *c) { - if (c->flags & CLIENT_BLOCKED && c->btype == BLOCKED_LIST) { + if (c->flags & CLIENT_BLOCKED && + (c->btype == BLOCKED_LIST || + c->btype == BLOCKED_ZSET || + c->btype == BLOCKED_STREAM)) + { dictEntry *de; dictIterator *di; diff --git a/redis-android/src/main/jni/redis-4.0.11/src/cluster.h b/redis-android/src/main/jni/redis-5.0.0/src/cluster.h similarity index 93% rename from redis-android/src/main/jni/redis-4.0.11/src/cluster.h rename to redis-android/src/main/jni/redis-5.0.0/src/cluster.h index f2b9a4e..571b9c5 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/cluster.h +++ b/redis-android/src/main/jni/redis-5.0.0/src/cluster.h @@ -97,7 +97,15 @@ typedef struct clusterLink { #define CLUSTERMSG_TYPE_FAILOVER_AUTH_ACK 6 /* Yes, you have my vote */ #define CLUSTERMSG_TYPE_UPDATE 7 /* Another node slots configuration */ #define CLUSTERMSG_TYPE_MFSTART 8 /* Pause clients for manual failover */ -#define CLUSTERMSG_TYPE_COUNT 9 /* Total number of message types. */ +#define CLUSTERMSG_TYPE_MODULE 9 /* Module cluster API message. */ +#define CLUSTERMSG_TYPE_COUNT 10 /* Total number of message types. */ + +/* Flags that a module can set in order to prevent certain Redis Cluster + * features to be enabled. Useful when implementing a different distributed + * system on top of Redis Cluster message bus, using modules. */ +#define CLUSTER_MODULE_FLAG_NONE 0 +#define CLUSTER_MODULE_FLAG_NO_FAILOVER (1<<1) +#define CLUSTER_MODULE_FLAG_NO_REDIRECTION (1<<2) /* This structure represent elements of node->fail_reports. */ typedef struct clusterNodeFailReport { @@ -195,10 +203,7 @@ typedef struct { typedef struct { uint32_t channel_len; uint32_t message_len; - /* We can't reclare bulk_data as bulk_data[] since this structure is - * nested. The 8 bytes are removed from the count during the message - * length computation. */ - unsigned char bulk_data[8]; + unsigned char bulk_data[8]; /* 8 bytes just as placeholder. */ } clusterMsgDataPublish; typedef struct { @@ -207,6 +212,13 @@ typedef struct { unsigned char slots[CLUSTER_SLOTS/8]; /* Slots bitmap. */ } clusterMsgDataUpdate; +typedef struct { + uint64_t module_id; /* ID of the sender module. */ + uint32_t len; /* ID of the sender module. */ + uint8_t type; /* Type from 0 to 255. */ + unsigned char bulk_data[3]; /* 3 bytes just as placeholder. */ +} clusterMsgModule; + union clusterMsgData { /* PING, MEET and PONG */ struct { @@ -228,12 +240,17 @@ union clusterMsgData { struct { clusterMsgDataUpdate nodecfg; } update; + + /* MODULE */ + struct { + clusterMsgModule msg; + } module; }; #define CLUSTER_PROTO_VER 1 /* Cluster bus protocol version. */ typedef struct { - char sig[4]; /* Siganture "RCmb" (Redis Cluster message bus). */ + char sig[4]; /* Signature "RCmb" (Redis Cluster message bus). */ uint32_t totlen; /* Total length of this message */ uint16_t ver; /* Protocol version, currently set to 1. */ uint16_t port; /* TCP base port number. */ diff --git a/redis-android/src/main/jni/redis-4.0.11/src/config.c b/redis-android/src/main/jni/redis-5.0.0/src/config.c similarity index 86% rename from redis-android/src/main/jni/redis-4.0.11/src/config.c rename to redis-android/src/main/jni/redis-5.0.0/src/config.c index e283e29..86548bc 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/config.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/config.c @@ -344,15 +344,19 @@ void loadServerConfigFromString(char *config) { err = "lfu-decay-time must be 0 or greater"; goto loaderr; } - } else if (!strcasecmp(argv[0],"slaveof") && argc == 3) { + } else if ((!strcasecmp(argv[0],"slaveof") || + !strcasecmp(argv[0],"replicaof")) && argc == 3) { slaveof_linenum = linenum; server.masterhost = sdsnew(argv[1]); server.masterport = atoi(argv[2]); server.repl_state = REPL_STATE_CONNECT; - } else if (!strcasecmp(argv[0],"repl-ping-slave-period") && argc == 2) { + } else if ((!strcasecmp(argv[0],"repl-ping-slave-period") || + !strcasecmp(argv[0],"repl-ping-replica-period")) && + argc == 2) + { server.repl_ping_slave_period = atoi(argv[1]); if (server.repl_ping_slave_period <= 0) { - err = "repl-ping-slave-period must be 1 or greater"; + err = "repl-ping-replica-period must be 1 or greater"; goto loaderr; } } else if (!strcasecmp(argv[0],"repl-timeout") && argc == 2) { @@ -390,15 +394,28 @@ void loadServerConfigFromString(char *config) { } } else if (!strcasecmp(argv[0],"masterauth") && argc == 2) { zfree(server.masterauth); - server.masterauth = zstrdup(argv[1]); - } else if (!strcasecmp(argv[0],"slave-serve-stale-data") && argc == 2) { + server.masterauth = argv[1][0] ? zstrdup(argv[1]) : NULL; + } else if ((!strcasecmp(argv[0],"slave-serve-stale-data") || + !strcasecmp(argv[0],"replica-serve-stale-data")) + && argc == 2) + { if ((server.repl_serve_stale_data = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } - } else if (!strcasecmp(argv[0],"slave-read-only") && argc == 2) { + } else if ((!strcasecmp(argv[0],"slave-read-only") || + !strcasecmp(argv[0],"replica-read-only")) + && argc == 2) + { if ((server.repl_slave_ro = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } + } else if ((!strcasecmp(argv[0],"slave-ignore-maxmemory") || + !strcasecmp(argv[0],"replica-ignore-maxmemory")) + && argc == 2) + { + if ((server.repl_slave_ignore_maxmemory = yesnotoi(argv[1])) == -1) { + err = "argument must be 'yes' or 'no'"; goto loaderr; + } } else if (!strcasecmp(argv[0],"rdbcompression") && argc == 2) { if ((server.rdb_compression = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; @@ -423,7 +440,9 @@ void loadServerConfigFromString(char *config) { if ((server.lazyfree_lazy_server_del = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } - } else if (!strcasecmp(argv[0],"slave-lazy-flush") && argc == 2) { + } else if ((!strcasecmp(argv[0],"slave-lazy-flush") || + !strcasecmp(argv[0],"replica-lazy-flush")) && argc == 2) + { if ((server.repl_slave_lazy_flush = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } @@ -431,14 +450,23 @@ void loadServerConfigFromString(char *config) { if ((server.active_defrag_enabled = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } + if (server.active_defrag_enabled) { +#ifndef HAVE_DEFRAG + err = "active defrag can't be enabled without proper jemalloc support"; goto loaderr; +#endif + } } else if (!strcasecmp(argv[0],"daemonize") && argc == 2) { if ((server.daemonize = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } + } else if (!strcasecmp(argv[0],"dynamic-hz") && argc == 2) { + if ((server.dynamic_hz = yesnotoi(argv[1])) == -1) { + err = "argument must be 'yes' or 'no'"; goto loaderr; + } } else if (!strcasecmp(argv[0],"hz") && argc == 2) { - server.hz = atoi(argv[1]); - if (server.hz < CONFIG_MIN_HZ) server.hz = CONFIG_MIN_HZ; - if (server.hz > CONFIG_MAX_HZ) server.hz = CONFIG_MAX_HZ; + server.config_hz = atoi(argv[1]); + if (server.config_hz < CONFIG_MIN_HZ) server.config_hz = CONFIG_MIN_HZ; + if (server.config_hz > CONFIG_MAX_HZ) server.config_hz = CONFIG_MAX_HZ; } else if (!strcasecmp(argv[0],"appendonly") && argc == 2) { int yes; @@ -483,6 +511,13 @@ void loadServerConfigFromString(char *config) { yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } + } else if (!strcasecmp(argv[0],"rdb-save-incremental-fsync") && + argc == 2) + { + if ((server.rdb_save_incremental_fsync = + yesnotoi(argv[1])) == -1) { + err = "argument must be 'yes' or 'no'"; goto loaderr; + } } else if (!strcasecmp(argv[0],"aof-load-truncated") && argc == 2) { if ((server.aof_load_truncated = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; @@ -496,7 +531,7 @@ void loadServerConfigFromString(char *config) { err = "Password is longer than CONFIG_AUTHPASS_MAX_LEN"; goto loaderr; } - server.requirepass = zstrdup(argv[1]); + server.requirepass = argv[1][0] ? zstrdup(argv[1]) : NULL; } else if (!strcasecmp(argv[0],"pidfile") && argc == 2) { zfree(server.pidfile); server.pidfile = zstrdup(argv[1]); @@ -509,14 +544,16 @@ void loadServerConfigFromString(char *config) { server.rdb_filename = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"active-defrag-threshold-lower") && argc == 2) { server.active_defrag_threshold_lower = atoi(argv[1]); - if (server.active_defrag_threshold_lower < 0) { - err = "active-defrag-threshold-lower must be 0 or greater"; + if (server.active_defrag_threshold_lower < 0 || + server.active_defrag_threshold_lower > 1000) { + err = "active-defrag-threshold-lower must be between 0 and 1000"; goto loaderr; } } else if (!strcasecmp(argv[0],"active-defrag-threshold-upper") && argc == 2) { server.active_defrag_threshold_upper = atoi(argv[1]); - if (server.active_defrag_threshold_upper < 0) { - err = "active-defrag-threshold-upper must be 0 or greater"; + if (server.active_defrag_threshold_upper < 0 || + server.active_defrag_threshold_upper > 1000) { + err = "active-defrag-threshold-upper must be between 0 and 1000"; goto loaderr; } } else if (!strcasecmp(argv[0],"active-defrag-ignore-bytes") && argc == 2) { @@ -537,10 +574,20 @@ void loadServerConfigFromString(char *config) { err = "active-defrag-cycle-max must be between 1 and 99"; goto loaderr; } + } else if (!strcasecmp(argv[0],"active-defrag-max-scan-fields") && argc == 2) { + server.active_defrag_max_scan_fields = strtoll(argv[1],NULL,10); + if (server.active_defrag_max_scan_fields < 1) { + err = "active-defrag-max-scan-fields must be positive"; + goto loaderr; + } } else if (!strcasecmp(argv[0],"hash-max-ziplist-entries") && argc == 2) { server.hash_max_ziplist_entries = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"hash-max-ziplist-value") && argc == 2) { server.hash_max_ziplist_value = memtoll(argv[1], NULL); + } else if (!strcasecmp(argv[0],"stream-node-max-bytes") && argc == 2) { + server.stream_node_max_bytes = memtoll(argv[1], NULL); + } else if (!strcasecmp(argv[0],"stream-node-max-entries") && argc == 2) { + server.stream_node_max_entries = atoi(argv[1]); } else if (!strcasecmp(argv[0],"list-max-ziplist-entries") && argc == 2){ /* DEAD OPTION */ } else if (!strcasecmp(argv[0],"list-max-ziplist-value") && argc == 2) { @@ -627,15 +674,17 @@ void loadServerConfigFromString(char *config) { err = "cluster migration barrier must zero or positive"; goto loaderr; } - } else if (!strcasecmp(argv[0],"cluster-slave-validity-factor") + } else if ((!strcasecmp(argv[0],"cluster-slave-validity-factor") || + !strcasecmp(argv[0],"cluster-replica-validity-factor")) && argc == 2) { server.cluster_slave_validity_factor = atoi(argv[1]); if (server.cluster_slave_validity_factor < 0) { - err = "cluster slave validity factor must be zero or positive"; + err = "cluster replica validity factor must be zero or positive"; goto loaderr; } - } else if (!strcasecmp(argv[0],"cluster-slave-no-failover") && + } else if ((!strcasecmp(argv[0],"cluster-slave-no-failover") || + !strcasecmp(argv[0],"cluster-replica-no-failiver")) && argc == 2) { server.cluster_slave_no_failover = yesnotoi(argv[1]); @@ -645,6 +694,8 @@ void loadServerConfigFromString(char *config) { } } else if (!strcasecmp(argv[0],"lua-time-limit") && argc == 2) { server.lua_time_limit = strtoll(argv[1],NULL,10); + } else if (!strcasecmp(argv[0],"lua-replicate-commands") && argc == 2) { + server.lua_always_replicate_commands = yesnotoi(argv[1]); } else if (!strcasecmp(argv[0],"slowlog-log-slower-than") && argc == 2) { @@ -686,27 +737,37 @@ void loadServerConfigFromString(char *config) { if ((server.stop_writes_on_bgsave_err = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } - } else if (!strcasecmp(argv[0],"slave-priority") && argc == 2) { + } else if ((!strcasecmp(argv[0],"slave-priority") || + !strcasecmp(argv[0],"replica-priority")) && argc == 2) + { server.slave_priority = atoi(argv[1]); - } else if (!strcasecmp(argv[0],"slave-announce-ip") && argc == 2) { + } else if ((!strcasecmp(argv[0],"slave-announce-ip") || + !strcasecmp(argv[0],"replica-announce-ip")) && argc == 2) + { zfree(server.slave_announce_ip); server.slave_announce_ip = zstrdup(argv[1]); - } else if (!strcasecmp(argv[0],"slave-announce-port") && argc == 2) { + } else if ((!strcasecmp(argv[0],"slave-announce-port") || + !strcasecmp(argv[0],"replica-announce-port")) && argc == 2) + { server.slave_announce_port = atoi(argv[1]); if (server.slave_announce_port < 0 || server.slave_announce_port > 65535) { err = "Invalid port"; goto loaderr; } - } else if (!strcasecmp(argv[0],"min-slaves-to-write") && argc == 2) { + } else if ((!strcasecmp(argv[0],"min-slaves-to-write") || + !strcasecmp(argv[0],"min-replicas-to-write")) && argc == 2) + { server.repl_min_slaves_to_write = atoi(argv[1]); if (server.repl_min_slaves_to_write < 0) { - err = "Invalid value for min-slaves-to-write."; goto loaderr; + err = "Invalid value for min-replicas-to-write."; goto loaderr; } - } else if (!strcasecmp(argv[0],"min-slaves-max-lag") && argc == 2) { + } else if ((!strcasecmp(argv[0],"min-slaves-max-lag") || + !strcasecmp(argv[0],"min-replicas-max-lag")) && argc == 2) + { server.repl_min_slaves_max_lag = atoi(argv[1]); if (server.repl_min_slaves_max_lag < 0) { - err = "Invalid value for min-slaves-max-lag."; goto loaderr; + err = "Invalid value for min-replicas-max-lag."; goto loaderr; } } else if (!strcasecmp(argv[0],"notify-keyspace-events") && argc == 2) { int flags = keyspaceEventsStringToFlags(argv[1]); @@ -748,7 +809,7 @@ void loadServerConfigFromString(char *config) { if (server.cluster_enabled && server.masterhost) { linenum = slaveof_linenum; i = linenum-1; - err = "slaveof directive not allowed in cluster mode"; + err = "replicaof directive not allowed in cluster mode"; goto loaderr; } @@ -832,6 +893,10 @@ void loadServerConfig(char *filename, char *options) { #define config_set_special_field(_name) \ } else if (!strcasecmp(c->argv[2]->ptr,_name)) { +#define config_set_special_field_with_alias(_name1,_name2) \ + } else if (!strcasecmp(c->argv[2]->ptr,_name1) || \ + !strcasecmp(c->argv[2]->ptr,_name2)) { + #define config_set_else } else void configSetCommand(client *c) { @@ -991,7 +1056,9 @@ void configSetCommand(client *c) { if (flags == -1) goto badfmt; server.notify_keyspace_events = flags; - } config_set_special_field("slave-announce-ip") { + } config_set_special_field_with_alias("slave-announce-ip", + "replica-announce-ip") + { zfree(server.slave_announce_ip); server.slave_announce_ip = ((char*)o->ptr)[0] ? zstrdup(o->ptr) : NULL; @@ -1007,16 +1074,28 @@ void configSetCommand(client *c) { "cluster-require-full-coverage",server.cluster_require_full_coverage) { } config_set_bool_field( "cluster-slave-no-failover",server.cluster_slave_no_failover) { + } config_set_bool_field( + "cluster-replica-no-failover",server.cluster_slave_no_failover) { } config_set_bool_field( "aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync) { + } config_set_bool_field( + "rdb-save-incremental-fsync",server.rdb_save_incremental_fsync) { } config_set_bool_field( "aof-load-truncated",server.aof_load_truncated) { } config_set_bool_field( "aof-use-rdb-preamble",server.aof_use_rdb_preamble) { } config_set_bool_field( "slave-serve-stale-data",server.repl_serve_stale_data) { + } config_set_bool_field( + "replica-serve-stale-data",server.repl_serve_stale_data) { } config_set_bool_field( "slave-read-only",server.repl_slave_ro) { + } config_set_bool_field( + "replica-read-only",server.repl_slave_ro) { + } config_set_bool_field( + "slave-ignore-maxmemory",server.repl_slave_ignore_maxmemory) { + } config_set_bool_field( + "replica-ignore-maxmemory",server.repl_slave_ignore_maxmemory) { } config_set_bool_field( "activerehashing",server.activerehashing) { } config_set_bool_field( @@ -1025,9 +1104,10 @@ void configSetCommand(client *c) { if (server.active_defrag_enabled) { server.active_defrag_enabled = 0; addReplyError(c, - "Active defragmentation cannot be enabled: it requires a " - "Redis server compiled with a modified Jemalloc like the " - "one shipped by default with the Redis source distribution"); + "-DISABLED Active defragmentation cannot be enabled: it " + "requires a Redis server compiled with a modified Jemalloc " + "like the one shipped by default with the Redis source " + "distribution"); return; } #endif @@ -1043,21 +1123,25 @@ void configSetCommand(client *c) { "lazyfree-lazy-server-del",server.lazyfree_lazy_server_del) { } config_set_bool_field( "slave-lazy-flush",server.repl_slave_lazy_flush) { + } config_set_bool_field( + "replica-lazy-flush",server.repl_slave_lazy_flush) { } config_set_bool_field( "no-appendfsync-on-rewrite",server.aof_no_fsync_on_rewrite) { + } config_set_bool_field( + "dynamic-hz",server.dynamic_hz) { /* Numerical fields. * config_set_numerical_field(name,var,min,max) */ } config_set_numerical_field( - "tcp-keepalive",server.tcpkeepalive,0,LLONG_MAX) { + "tcp-keepalive",server.tcpkeepalive,0,INT_MAX) { } config_set_numerical_field( - "maxmemory-samples",server.maxmemory_samples,1,LLONG_MAX) { + "maxmemory-samples",server.maxmemory_samples,1,INT_MAX) { } config_set_numerical_field( - "lfu-log-factor",server.lfu_log_factor,0,LLONG_MAX) { + "lfu-log-factor",server.lfu_log_factor,0,INT_MAX) { } config_set_numerical_field( - "lfu-decay-time",server.lfu_decay_time,0,LLONG_MAX) { + "lfu-decay-time",server.lfu_decay_time,0,INT_MAX) { } config_set_numerical_field( - "timeout",server.maxidletime,0,LONG_MAX) { + "timeout",server.maxidletime,0,INT_MAX) { } config_set_numerical_field( "active-defrag-threshold-lower",server.active_defrag_threshold_lower,0,1000) { } config_set_numerical_field( @@ -1069,50 +1153,68 @@ void configSetCommand(client *c) { } config_set_numerical_field( "active-defrag-cycle-max",server.active_defrag_cycle_max,1,99) { } config_set_numerical_field( - "auto-aof-rewrite-percentage",server.aof_rewrite_perc,0,LLONG_MAX){ + "active-defrag-max-scan-fields",server.active_defrag_max_scan_fields,1,LONG_MAX) { + } config_set_numerical_field( + "auto-aof-rewrite-percentage",server.aof_rewrite_perc,0,INT_MAX){ } config_set_numerical_field( - "hash-max-ziplist-entries",server.hash_max_ziplist_entries,0,LLONG_MAX) { + "hash-max-ziplist-entries",server.hash_max_ziplist_entries,0,LONG_MAX) { } config_set_numerical_field( - "hash-max-ziplist-value",server.hash_max_ziplist_value,0,LLONG_MAX) { + "hash-max-ziplist-value",server.hash_max_ziplist_value,0,LONG_MAX) { + } config_set_numerical_field( + "stream-node-max-bytes",server.stream_node_max_bytes,0,LONG_MAX) { + } config_set_numerical_field( + "stream-node-max-entries",server.stream_node_max_entries,0,LLONG_MAX) { } config_set_numerical_field( "list-max-ziplist-size",server.list_max_ziplist_size,INT_MIN,INT_MAX) { } config_set_numerical_field( "list-compress-depth",server.list_compress_depth,0,INT_MAX) { } config_set_numerical_field( - "set-max-intset-entries",server.set_max_intset_entries,0,LLONG_MAX) { + "set-max-intset-entries",server.set_max_intset_entries,0,LONG_MAX) { } config_set_numerical_field( - "zset-max-ziplist-entries",server.zset_max_ziplist_entries,0,LLONG_MAX) { + "zset-max-ziplist-entries",server.zset_max_ziplist_entries,0,LONG_MAX) { } config_set_numerical_field( - "zset-max-ziplist-value",server.zset_max_ziplist_value,0,LLONG_MAX) { + "zset-max-ziplist-value",server.zset_max_ziplist_value,0,LONG_MAX) { } config_set_numerical_field( - "hll-sparse-max-bytes",server.hll_sparse_max_bytes,0,LLONG_MAX) { + "hll-sparse-max-bytes",server.hll_sparse_max_bytes,0,LONG_MAX) { } config_set_numerical_field( - "lua-time-limit",server.lua_time_limit,0,LLONG_MAX) { + "lua-time-limit",server.lua_time_limit,0,LONG_MAX) { } config_set_numerical_field( - "slowlog-log-slower-than",server.slowlog_log_slower_than,0,LLONG_MAX) { + "slowlog-log-slower-than",server.slowlog_log_slower_than,-1,LLONG_MAX) { } config_set_numerical_field( - "slowlog-max-len",ll,0,LLONG_MAX) { + "slowlog-max-len",ll,0,LONG_MAX) { /* Cast to unsigned. */ - server.slowlog_max_len = (unsigned)ll; + server.slowlog_max_len = (unsigned long)ll; } config_set_numerical_field( "latency-monitor-threshold",server.latency_monitor_threshold,0,LLONG_MAX){ } config_set_numerical_field( - "repl-ping-slave-period",server.repl_ping_slave_period,1,LLONG_MAX) { + "repl-ping-slave-period",server.repl_ping_slave_period,1,INT_MAX) { + } config_set_numerical_field( + "repl-ping-replica-period",server.repl_ping_slave_period,1,INT_MAX) { + } config_set_numerical_field( + "repl-timeout",server.repl_timeout,1,INT_MAX) { } config_set_numerical_field( - "repl-timeout",server.repl_timeout,1,LLONG_MAX) { + "repl-backlog-ttl",server.repl_backlog_time_limit,0,LONG_MAX) { } config_set_numerical_field( - "repl-backlog-ttl",server.repl_backlog_time_limit,0,LLONG_MAX) { + "repl-diskless-sync-delay",server.repl_diskless_sync_delay,0,INT_MAX) { } config_set_numerical_field( - "repl-diskless-sync-delay",server.repl_diskless_sync_delay,0,LLONG_MAX) { + "slave-priority",server.slave_priority,0,INT_MAX) { } config_set_numerical_field( - "slave-priority",server.slave_priority,0,LLONG_MAX) { + "replica-priority",server.slave_priority,0,INT_MAX) { } config_set_numerical_field( "slave-announce-port",server.slave_announce_port,0,65535) { } config_set_numerical_field( - "min-slaves-to-write",server.repl_min_slaves_to_write,0,LLONG_MAX) { + "replica-announce-port",server.slave_announce_port,0,65535) { + } config_set_numerical_field( + "min-slaves-to-write",server.repl_min_slaves_to_write,0,INT_MAX) { + refreshGoodSlavesCount(); + } config_set_numerical_field( + "min-replicas-to-write",server.repl_min_slaves_to_write,0,INT_MAX) { refreshGoodSlavesCount(); } config_set_numerical_field( - "min-slaves-max-lag",server.repl_min_slaves_max_lag,0,LLONG_MAX) { + "min-slaves-max-lag",server.repl_min_slaves_max_lag,0,INT_MAX) { + refreshGoodSlavesCount(); + } config_set_numerical_field( + "min-replicas-max-lag",server.repl_min_slaves_max_lag,0,INT_MAX) { refreshGoodSlavesCount(); } config_set_numerical_field( "cluster-node-timeout",server.cluster_node_timeout,0,LLONG_MAX) { @@ -1121,17 +1223,19 @@ void configSetCommand(client *c) { } config_set_numerical_field( "cluster-announce-bus-port",server.cluster_announce_bus_port,0,65535) { } config_set_numerical_field( - "cluster-migration-barrier",server.cluster_migration_barrier,0,LLONG_MAX){ + "cluster-migration-barrier",server.cluster_migration_barrier,0,INT_MAX){ } config_set_numerical_field( - "cluster-slave-validity-factor",server.cluster_slave_validity_factor,0,LLONG_MAX) { + "cluster-slave-validity-factor",server.cluster_slave_validity_factor,0,INT_MAX) { } config_set_numerical_field( - "hz",server.hz,0,LLONG_MAX) { + "cluster-replica-validity-factor",server.cluster_slave_validity_factor,0,INT_MAX) { + } config_set_numerical_field( + "hz",server.config_hz,0,INT_MAX) { /* Hz is more an hint from the user, so we accept values out of range * but cap them to reasonable values. */ - if (server.hz < CONFIG_MIN_HZ) server.hz = CONFIG_MIN_HZ; - if (server.hz > CONFIG_MAX_HZ) server.hz = CONFIG_MAX_HZ; + if (server.config_hz < CONFIG_MIN_HZ) server.config_hz = CONFIG_MIN_HZ; + if (server.config_hz > CONFIG_MAX_HZ) server.config_hz = CONFIG_MAX_HZ; } config_set_numerical_field( - "watchdog-period",ll,0,LLONG_MAX) { + "watchdog-period",ll,0,INT_MAX) { if (ll) enableWatchdog(ll); else @@ -1142,7 +1246,7 @@ void configSetCommand(client *c) { } config_set_memory_field("maxmemory",server.maxmemory) { if (server.maxmemory) { if (server.maxmemory < zmalloc_used_memory()) { - serverLog(LL_WARNING,"WARNING: the new maxmemory value set via CONFIG SET is smaller than the current memory usage. This will result in keys eviction and/or inability to accept new write commands depending on the maxmemory-policy."); + serverLog(LL_WARNING,"WARNING: the new maxmemory value set via CONFIG SET is smaller than the current memory usage. This will result in key eviction and/or the inability to accept new write commands depending on the maxmemory-policy."); } freeMemoryIfNeeded(); } @@ -1235,6 +1339,7 @@ void configGetCommand(client *c) { config_get_string_field("logfile",server.logfile); config_get_string_field("pidfile",server.pidfile); config_get_string_field("slave-announce-ip",server.slave_announce_ip); + config_get_string_field("replica-announce-ip",server.slave_announce_ip); /* Numerical values */ config_get_numerical_field("maxmemory",server.maxmemory); @@ -1249,6 +1354,7 @@ void configGetCommand(client *c) { config_get_numerical_field("active-defrag-ignore-bytes",server.active_defrag_ignore_bytes); config_get_numerical_field("active-defrag-cycle-min",server.active_defrag_cycle_min); config_get_numerical_field("active-defrag-cycle-max",server.active_defrag_cycle_max); + config_get_numerical_field("active-defrag-max-scan-fields",server.active_defrag_max_scan_fields); config_get_numerical_field("auto-aof-rewrite-percentage", server.aof_rewrite_perc); config_get_numerical_field("auto-aof-rewrite-min-size", @@ -1257,6 +1363,10 @@ void configGetCommand(client *c) { server.hash_max_ziplist_entries); config_get_numerical_field("hash-max-ziplist-value", server.hash_max_ziplist_value); + config_get_numerical_field("stream-node-max-bytes", + server.stream_node_max_bytes); + config_get_numerical_field("stream-node-max-entries", + server.stream_node_max_entries); config_get_numerical_field("list-max-ziplist-size", server.list_max_ziplist_size); config_get_numerical_field("list-compress-depth", @@ -1282,19 +1392,25 @@ void configGetCommand(client *c) { config_get_numerical_field("tcp-backlog",server.tcp_backlog); config_get_numerical_field("databases",server.dbnum); config_get_numerical_field("repl-ping-slave-period",server.repl_ping_slave_period); + config_get_numerical_field("repl-ping-replica-period",server.repl_ping_slave_period); config_get_numerical_field("repl-timeout",server.repl_timeout); config_get_numerical_field("repl-backlog-size",server.repl_backlog_size); config_get_numerical_field("repl-backlog-ttl",server.repl_backlog_time_limit); config_get_numerical_field("maxclients",server.maxclients); config_get_numerical_field("watchdog-period",server.watchdog_period); config_get_numerical_field("slave-priority",server.slave_priority); + config_get_numerical_field("replica-priority",server.slave_priority); config_get_numerical_field("slave-announce-port",server.slave_announce_port); + config_get_numerical_field("replica-announce-port",server.slave_announce_port); config_get_numerical_field("min-slaves-to-write",server.repl_min_slaves_to_write); + config_get_numerical_field("min-replicas-to-write",server.repl_min_slaves_to_write); config_get_numerical_field("min-slaves-max-lag",server.repl_min_slaves_max_lag); - config_get_numerical_field("hz",server.hz); + config_get_numerical_field("min-replicas-max-lag",server.repl_min_slaves_max_lag); + config_get_numerical_field("hz",server.config_hz); config_get_numerical_field("cluster-node-timeout",server.cluster_node_timeout); config_get_numerical_field("cluster-migration-barrier",server.cluster_migration_barrier); config_get_numerical_field("cluster-slave-validity-factor",server.cluster_slave_validity_factor); + config_get_numerical_field("cluster-replica-validity-factor",server.cluster_slave_validity_factor); config_get_numerical_field("repl-diskless-sync-delay",server.repl_diskless_sync_delay); config_get_numerical_field("tcp-keepalive",server.tcpkeepalive); @@ -1303,12 +1419,22 @@ void configGetCommand(client *c) { server.cluster_require_full_coverage); config_get_bool_field("cluster-slave-no-failover", server.cluster_slave_no_failover); + config_get_bool_field("cluster-replica-no-failover", + server.cluster_slave_no_failover); config_get_bool_field("no-appendfsync-on-rewrite", server.aof_no_fsync_on_rewrite); config_get_bool_field("slave-serve-stale-data", server.repl_serve_stale_data); + config_get_bool_field("replica-serve-stale-data", + server.repl_serve_stale_data); config_get_bool_field("slave-read-only", server.repl_slave_ro); + config_get_bool_field("replica-read-only", + server.repl_slave_ro); + config_get_bool_field("slave-ignore-maxmemory", + server.repl_slave_ignore_maxmemory); + config_get_bool_field("replica-ignore-maxmemory", + server.repl_slave_ignore_maxmemory); config_get_bool_field("stop-writes-on-bgsave-error", server.stop_writes_on_bgsave_err); config_get_bool_field("daemonize", server.daemonize); @@ -1323,6 +1449,8 @@ void configGetCommand(client *c) { server.repl_diskless_sync); config_get_bool_field("aof-rewrite-incremental-fsync", server.aof_rewrite_incremental_fsync); + config_get_bool_field("rdb-save-incremental-fsync", + server.rdb_save_incremental_fsync); config_get_bool_field("aof-load-truncated", server.aof_load_truncated); config_get_bool_field("aof-use-rdb-preamble", @@ -1335,6 +1463,10 @@ void configGetCommand(client *c) { server.lazyfree_lazy_server_del); config_get_bool_field("slave-lazy-flush", server.repl_slave_lazy_flush); + config_get_bool_field("replica-lazy-flush", + server.repl_slave_lazy_flush); + config_get_bool_field("dynamic-hz", + server.dynamic_hz); /* Enum values */ config_get_enum_field("maxmemory-policy", @@ -1406,10 +1538,14 @@ void configGetCommand(client *c) { addReplyBulkCString(c,buf); matches++; } - if (stringmatch(pattern,"slaveof",1)) { + if (stringmatch(pattern,"slaveof",1) || + stringmatch(pattern,"replicaof",1)) + { + char *optname = stringmatch(pattern,"slaveof",1) ? + "slaveof" : "replicaof"; char buf[256]; - addReplyBulkCString(c,"slaveof"); + addReplyBulkCString(c,optname); if (server.masterhost) snprintf(buf,sizeof(buf),"%s %d", server.masterhost, server.masterport); @@ -1565,8 +1701,20 @@ struct rewriteConfigState *rewriteConfigReadOldFile(char *path) { /* Now we populate the state according to the content of this line. * Append the line and populate the option -> line numbers map. */ rewriteConfigAppendLine(state,line); - rewriteConfigAddLineNumberToOption(state,argv[0],linenum); + /* Translate options using the word "slave" to the corresponding name + * "replica", before adding such option to the config name -> lines + * mapping. */ + char *p = strstr(argv[0],"slave"); + if (p) { + sds alt = sdsempty(); + alt = sdscatlen(alt,argv[0],p-argv[0]);; + alt = sdscatlen(alt,"replica",7); + alt = sdscatlen(alt,p+5,strlen(p+5)); + sdsfree(argv[0]); + argv[0] = alt; + } + rewriteConfigAddLineNumberToOption(state,argv[0],linenum); sdsfreesplitres(argv,argc); } fclose(fp); @@ -1753,15 +1901,14 @@ void rewriteConfigDirOption(struct rewriteConfigState *state) { } /* Rewrite the slaveof option. */ -void rewriteConfigSlaveofOption(struct rewriteConfigState *state) { - char *option = "slaveof"; +void rewriteConfigSlaveofOption(struct rewriteConfigState *state, char *option) { sds line; /* If this is a master, we want all the slaveof config options * in the file to be removed. Note that if this is a cluster instance * we don't want a slaveof directive inside redis.conf. */ if (server.cluster_enabled || server.masterhost == NULL) { - rewriteConfigMarkAsProcessed(state,"slaveof"); + rewriteConfigMarkAsProcessed(state,option); return; } line = sdscatprintf(sdsempty(),"%s %s %d", option, @@ -1803,8 +1950,10 @@ void rewriteConfigClientoutputbufferlimitOption(struct rewriteConfigState *state rewriteConfigFormatMemory(soft,sizeof(soft), server.client_obuf_limits[j].soft_limit_bytes); + char *typename = getClientTypeName(j); + if (!strcmp(typename,"slave")) typename = "replica"; line = sdscatprintf(sdsempty(),"%s %s %s %s %ld", - option, getClientTypeName(j), hard, soft, + option, typename, hard, soft, (long) server.client_obuf_limits[j].soft_limit_seconds); rewriteConfigRewriteLine(state,option,line,force); } @@ -1982,7 +2131,7 @@ int rewriteConfig(char *path) { rewriteConfigOctalOption(state,"unixsocketperm",server.unixsocketperm,CONFIG_DEFAULT_UNIX_SOCKET_PERM); rewriteConfigNumericalOption(state,"timeout",server.maxidletime,CONFIG_DEFAULT_CLIENT_TIMEOUT); rewriteConfigNumericalOption(state,"tcp-keepalive",server.tcpkeepalive,CONFIG_DEFAULT_TCP_KEEPALIVE); - rewriteConfigNumericalOption(state,"slave-announce-port",server.slave_announce_port,CONFIG_DEFAULT_SLAVE_ANNOUNCE_PORT); + rewriteConfigNumericalOption(state,"replica-announce-port",server.slave_announce_port,CONFIG_DEFAULT_SLAVE_ANNOUNCE_PORT); rewriteConfigEnumOption(state,"loglevel",server.verbosity,loglevel_enum,CONFIG_DEFAULT_VERBOSITY); rewriteConfigStringOption(state,"logfile",server.logfile,CONFIG_DEFAULT_LOGFILE); rewriteConfigYesNoOption(state,"syslog-enabled",server.syslog_enabled,CONFIG_DEFAULT_SYSLOG_ENABLED); @@ -1995,22 +2144,23 @@ int rewriteConfig(char *path) { rewriteConfigYesNoOption(state,"rdbchecksum",server.rdb_checksum,CONFIG_DEFAULT_RDB_CHECKSUM); rewriteConfigStringOption(state,"dbfilename",server.rdb_filename,CONFIG_DEFAULT_RDB_FILENAME); rewriteConfigDirOption(state); - rewriteConfigSlaveofOption(state); - rewriteConfigStringOption(state,"slave-announce-ip",server.slave_announce_ip,CONFIG_DEFAULT_SLAVE_ANNOUNCE_IP); + rewriteConfigSlaveofOption(state,"replicaof"); + rewriteConfigStringOption(state,"replica-announce-ip",server.slave_announce_ip,CONFIG_DEFAULT_SLAVE_ANNOUNCE_IP); rewriteConfigStringOption(state,"masterauth",server.masterauth,NULL); rewriteConfigStringOption(state,"cluster-announce-ip",server.cluster_announce_ip,NULL); - rewriteConfigYesNoOption(state,"slave-serve-stale-data",server.repl_serve_stale_data,CONFIG_DEFAULT_SLAVE_SERVE_STALE_DATA); - rewriteConfigYesNoOption(state,"slave-read-only",server.repl_slave_ro,CONFIG_DEFAULT_SLAVE_READ_ONLY); - rewriteConfigNumericalOption(state,"repl-ping-slave-period",server.repl_ping_slave_period,CONFIG_DEFAULT_REPL_PING_SLAVE_PERIOD); + rewriteConfigYesNoOption(state,"replica-serve-stale-data",server.repl_serve_stale_data,CONFIG_DEFAULT_SLAVE_SERVE_STALE_DATA); + rewriteConfigYesNoOption(state,"replica-read-only",server.repl_slave_ro,CONFIG_DEFAULT_SLAVE_READ_ONLY); + rewriteConfigYesNoOption(state,"replica-ignore-maxmemory",server.repl_slave_ignore_maxmemory,CONFIG_DEFAULT_SLAVE_IGNORE_MAXMEMORY); + rewriteConfigNumericalOption(state,"repl-ping-replica-period",server.repl_ping_slave_period,CONFIG_DEFAULT_REPL_PING_SLAVE_PERIOD); rewriteConfigNumericalOption(state,"repl-timeout",server.repl_timeout,CONFIG_DEFAULT_REPL_TIMEOUT); rewriteConfigBytesOption(state,"repl-backlog-size",server.repl_backlog_size,CONFIG_DEFAULT_REPL_BACKLOG_SIZE); rewriteConfigBytesOption(state,"repl-backlog-ttl",server.repl_backlog_time_limit,CONFIG_DEFAULT_REPL_BACKLOG_TIME_LIMIT); rewriteConfigYesNoOption(state,"repl-disable-tcp-nodelay",server.repl_disable_tcp_nodelay,CONFIG_DEFAULT_REPL_DISABLE_TCP_NODELAY); rewriteConfigYesNoOption(state,"repl-diskless-sync",server.repl_diskless_sync,CONFIG_DEFAULT_REPL_DISKLESS_SYNC); rewriteConfigNumericalOption(state,"repl-diskless-sync-delay",server.repl_diskless_sync_delay,CONFIG_DEFAULT_REPL_DISKLESS_SYNC_DELAY); - rewriteConfigNumericalOption(state,"slave-priority",server.slave_priority,CONFIG_DEFAULT_SLAVE_PRIORITY); - rewriteConfigNumericalOption(state,"min-slaves-to-write",server.repl_min_slaves_to_write,CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE); - rewriteConfigNumericalOption(state,"min-slaves-max-lag",server.repl_min_slaves_max_lag,CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG); + rewriteConfigNumericalOption(state,"replica-priority",server.slave_priority,CONFIG_DEFAULT_SLAVE_PRIORITY); + rewriteConfigNumericalOption(state,"min-replicas-to-write",server.repl_min_slaves_to_write,CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE); + rewriteConfigNumericalOption(state,"min-replicas-max-lag",server.repl_min_slaves_max_lag,CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG); rewriteConfigStringOption(state,"requirepass",server.requirepass,NULL); rewriteConfigNumericalOption(state,"maxclients",server.maxclients,CONFIG_DEFAULT_MAX_CLIENTS); rewriteConfigBytesOption(state,"maxmemory",server.maxmemory,CONFIG_DEFAULT_MAXMEMORY); @@ -2025,6 +2175,7 @@ int rewriteConfig(char *path) { rewriteConfigBytesOption(state,"active-defrag-ignore-bytes",server.active_defrag_ignore_bytes,CONFIG_DEFAULT_DEFRAG_IGNORE_BYTES); rewriteConfigNumericalOption(state,"active-defrag-cycle-min",server.active_defrag_cycle_min,CONFIG_DEFAULT_DEFRAG_CYCLE_MIN); rewriteConfigNumericalOption(state,"active-defrag-cycle-max",server.active_defrag_cycle_max,CONFIG_DEFAULT_DEFRAG_CYCLE_MAX); + rewriteConfigNumericalOption(state,"active-defrag-max-scan-fields",server.active_defrag_max_scan_fields,CONFIG_DEFAULT_DEFRAG_MAX_SCAN_FIELDS); rewriteConfigYesNoOption(state,"appendonly",server.aof_state != AOF_OFF,0); rewriteConfigStringOption(state,"appendfilename",server.aof_filename,CONFIG_DEFAULT_AOF_FILENAME); rewriteConfigEnumOption(state,"appendfsync",server.aof_fsync,aof_fsync_enum,CONFIG_DEFAULT_AOF_FSYNC); @@ -2035,16 +2186,18 @@ int rewriteConfig(char *path) { rewriteConfigYesNoOption(state,"cluster-enabled",server.cluster_enabled,0); rewriteConfigStringOption(state,"cluster-config-file",server.cluster_configfile,CONFIG_DEFAULT_CLUSTER_CONFIG_FILE); rewriteConfigYesNoOption(state,"cluster-require-full-coverage",server.cluster_require_full_coverage,CLUSTER_DEFAULT_REQUIRE_FULL_COVERAGE); - rewriteConfigYesNoOption(state,"cluster-slave-no-failover",server.cluster_slave_no_failover,CLUSTER_DEFAULT_SLAVE_NO_FAILOVER); + rewriteConfigYesNoOption(state,"cluster-replica-no-failover",server.cluster_slave_no_failover,CLUSTER_DEFAULT_SLAVE_NO_FAILOVER); rewriteConfigNumericalOption(state,"cluster-node-timeout",server.cluster_node_timeout,CLUSTER_DEFAULT_NODE_TIMEOUT); rewriteConfigNumericalOption(state,"cluster-migration-barrier",server.cluster_migration_barrier,CLUSTER_DEFAULT_MIGRATION_BARRIER); - rewriteConfigNumericalOption(state,"cluster-slave-validity-factor",server.cluster_slave_validity_factor,CLUSTER_DEFAULT_SLAVE_VALIDITY); + rewriteConfigNumericalOption(state,"cluster-replica-validity-factor",server.cluster_slave_validity_factor,CLUSTER_DEFAULT_SLAVE_VALIDITY); rewriteConfigNumericalOption(state,"slowlog-log-slower-than",server.slowlog_log_slower_than,CONFIG_DEFAULT_SLOWLOG_LOG_SLOWER_THAN); rewriteConfigNumericalOption(state,"latency-monitor-threshold",server.latency_monitor_threshold,CONFIG_DEFAULT_LATENCY_MONITOR_THRESHOLD); rewriteConfigNumericalOption(state,"slowlog-max-len",server.slowlog_max_len,CONFIG_DEFAULT_SLOWLOG_MAX_LEN); rewriteConfigNotifykeyspaceeventsOption(state); rewriteConfigNumericalOption(state,"hash-max-ziplist-entries",server.hash_max_ziplist_entries,OBJ_HASH_MAX_ZIPLIST_ENTRIES); rewriteConfigNumericalOption(state,"hash-max-ziplist-value",server.hash_max_ziplist_value,OBJ_HASH_MAX_ZIPLIST_VALUE); + rewriteConfigNumericalOption(state,"stream-node-max-bytes",server.stream_node_max_bytes,OBJ_STREAM_NODE_MAX_BYTES); + rewriteConfigNumericalOption(state,"stream-node-max-entries",server.stream_node_max_entries,OBJ_STREAM_NODE_MAX_ENTRIES); rewriteConfigNumericalOption(state,"list-max-ziplist-size",server.list_max_ziplist_size,OBJ_LIST_MAX_ZIPLIST_SIZE); rewriteConfigNumericalOption(state,"list-compress-depth",server.list_compress_depth,OBJ_LIST_COMPRESS_DEPTH); rewriteConfigNumericalOption(state,"set-max-intset-entries",server.set_max_intset_entries,OBJ_SET_MAX_INTSET_ENTRIES); @@ -2055,15 +2208,17 @@ int rewriteConfig(char *path) { rewriteConfigYesNoOption(state,"activedefrag",server.active_defrag_enabled,CONFIG_DEFAULT_ACTIVE_DEFRAG); rewriteConfigYesNoOption(state,"protected-mode",server.protected_mode,CONFIG_DEFAULT_PROTECTED_MODE); rewriteConfigClientoutputbufferlimitOption(state); - rewriteConfigNumericalOption(state,"hz",server.hz,CONFIG_DEFAULT_HZ); + rewriteConfigNumericalOption(state,"hz",server.config_hz,CONFIG_DEFAULT_HZ); rewriteConfigYesNoOption(state,"aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync,CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC); + rewriteConfigYesNoOption(state,"rdb-save-incremental-fsync",server.rdb_save_incremental_fsync,CONFIG_DEFAULT_RDB_SAVE_INCREMENTAL_FSYNC); rewriteConfigYesNoOption(state,"aof-load-truncated",server.aof_load_truncated,CONFIG_DEFAULT_AOF_LOAD_TRUNCATED); rewriteConfigYesNoOption(state,"aof-use-rdb-preamble",server.aof_use_rdb_preamble,CONFIG_DEFAULT_AOF_USE_RDB_PREAMBLE); rewriteConfigEnumOption(state,"supervised",server.supervised_mode,supervised_mode_enum,SUPERVISED_NONE); rewriteConfigYesNoOption(state,"lazyfree-lazy-eviction",server.lazyfree_lazy_eviction,CONFIG_DEFAULT_LAZYFREE_LAZY_EVICTION); rewriteConfigYesNoOption(state,"lazyfree-lazy-expire",server.lazyfree_lazy_expire,CONFIG_DEFAULT_LAZYFREE_LAZY_EXPIRE); rewriteConfigYesNoOption(state,"lazyfree-lazy-server-del",server.lazyfree_lazy_server_del,CONFIG_DEFAULT_LAZYFREE_LAZY_SERVER_DEL); - rewriteConfigYesNoOption(state,"slave-lazy-flush",server.repl_slave_lazy_flush,CONFIG_DEFAULT_SLAVE_LAZY_FLUSH); + rewriteConfigYesNoOption(state,"replica-lazy-flush",server.repl_slave_lazy_flush,CONFIG_DEFAULT_SLAVE_LAZY_FLUSH); + rewriteConfigYesNoOption(state,"dynamic-hz",server.dynamic_hz,CONFIG_DEFAULT_DYNAMIC_HZ); /* Rewrite Sentinel config if in Sentinel mode. */ if (server.sentinel_mode) rewriteConfigSentinelOption(state); @@ -2094,19 +2249,24 @@ void configCommand(client *c) { return; } - if (!strcasecmp(c->argv[1]->ptr,"set")) { - if (c->argc != 4) goto badarity; + if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) { + const char *help[] = { +"GET -- Return parameters matching the glob-like and their values.", +"SET -- Set parameter to value.", +"RESETSTAT -- Reset statistics reported by INFO.", +"REWRITE -- Rewrite the configuration file.", +NULL + }; + addReplyHelp(c, help); + } else if (!strcasecmp(c->argv[1]->ptr,"set") && c->argc == 4) { configSetCommand(c); - } else if (!strcasecmp(c->argv[1]->ptr,"get")) { - if (c->argc != 3) goto badarity; + } else if (!strcasecmp(c->argv[1]->ptr,"get") && c->argc == 3) { configGetCommand(c); - } else if (!strcasecmp(c->argv[1]->ptr,"resetstat")) { - if (c->argc != 2) goto badarity; + } else if (!strcasecmp(c->argv[1]->ptr,"resetstat") && c->argc == 2) { resetServerStats(); resetCommandTableStats(); addReply(c,shared.ok); - } else if (!strcasecmp(c->argv[1]->ptr,"rewrite")) { - if (c->argc != 2) goto badarity; + } else if (!strcasecmp(c->argv[1]->ptr,"rewrite") && c->argc == 2) { if (server.configfile == NULL) { addReplyError(c,"The server is running without a config file"); return; @@ -2119,12 +2279,7 @@ void configCommand(client *c) { addReply(c,shared.ok); } } else { - addReplyError(c, - "CONFIG subcommand must be one of GET, SET, RESETSTAT, REWRITE"); + addReplySubcommandSyntaxError(c); + return; } - return; - -badarity: - addReplyErrorFormat(c,"Wrong number of arguments for CONFIG %s", - (char*) c->argv[1]->ptr); } diff --git a/redis-android/src/main/jni/redis-4.0.11/src/config.h b/redis-android/src/main/jni/redis-5.0.0/src/config.h similarity index 91% rename from redis-android/src/main/jni/redis-4.0.11/src/config.h rename to redis-android/src/main/jni/redis-5.0.0/src/config.h index 612b4e1..9ad12ba 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/config.h +++ b/redis-android/src/main/jni/redis-5.0.0/src/config.h @@ -30,10 +30,6 @@ #ifndef __CONFIG_H #define __CONFIG_H -#ifdef __ANDROID__ -#include "../redis-android.h" -#endif - #ifdef __APPLE__ #include #endif @@ -91,33 +87,33 @@ #endif #endif -/* Define aof_fsync to fdatasync() in Linux and fsync() for all the rest */ +/* Define redis_fsync to fdatasync() in Linux and fsync() for all the rest */ #ifdef __linux__ -#define aof_fsync fdatasync +#define redis_fsync fdatasync #else -#define aof_fsync fsync +#define redis_fsync fsync #endif /* Define rdb_fsync_range to sync_file_range() on Linux, otherwise we use * the plain fsync() call. */ #ifndef __ANDROID__ -#ifdef __linux__ -#if defined(__GLIBC__) && defined(__GLIBC_PREREQ) -#if (LINUX_VERSION_CODE >= 0x020611 && __GLIBC_PREREQ(2, 6)) -#define HAVE_SYNC_FILE_RANGE 1 -#endif -#else -#if (LINUX_VERSION_CODE >= 0x020611) -#define HAVE_SYNC_FILE_RANGE 1 -#endif -#endif -#endif + #ifdef __linux__ + #if defined(__GLIBC__) && defined(__GLIBC_PREREQ) + #if (LINUX_VERSION_CODE >= 0x020611 && __GLIBC_PREREQ(2, 6)) + #define HAVE_SYNC_FILE_RANGE 1 + #endif + #else + #if (LINUX_VERSION_CODE >= 0x020611) + #define HAVE_SYNC_FILE_RANGE 1 + #endif + #endif + #endif #endif #ifdef HAVE_SYNC_FILE_RANGE -#define rdb_fsync_range(fd,off,size) sync_file_range(fd,off,size,SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE) + #define rdb_fsync_range(fd,off,size) sync_file_range(fd,off,size,SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE) #else -#define rdb_fsync_range(fd,off,size) fsync(fd) + #define rdb_fsync_range(fd,off,size) fsync(fd) #endif /* Check if we can use setproctitle(). diff --git a/redis-android/src/main/jni/redis-4.0.11/src/crc16.c b/redis-android/src/main/jni/redis-5.0.0/src/crc16.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/crc16.c rename to redis-android/src/main/jni/redis-5.0.0/src/crc16.c diff --git a/redis-android/src/main/jni/redis-4.0.11/src/crc64.c b/redis-android/src/main/jni/redis-5.0.0/src/crc64.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/crc64.c rename to redis-android/src/main/jni/redis-5.0.0/src/crc64.c diff --git a/redis-android/src/main/jni/redis-4.0.11/src/crc64.h b/redis-android/src/main/jni/redis-5.0.0/src/crc64.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/crc64.h rename to redis-android/src/main/jni/redis-5.0.0/src/crc64.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/db.c b/redis-android/src/main/jni/redis-5.0.0/src/db.c similarity index 94% rename from redis-android/src/main/jni/redis-4.0.11/src/db.c rename to redis-android/src/main/jni/redis-5.0.0/src/db.c index 8449ee3..3e21a10 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/db.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/db.c @@ -30,6 +30,7 @@ #include "server.h" #include "cluster.h" #include "atomicvar.h" +#include "redis-android.h" #include #include @@ -90,7 +91,7 @@ robj *lookupKey(redisDb *db, robj *key, int flags) { * LOOKUP_NONE (or zero): no special flags are passed. * LOOKUP_NOTOUCH: don't alter the last access time of the key. * - * Note: this function also returns NULL is the key is logically expired + * Note: this function also returns NULL if the key is logically expired * but still existing, in case this is a slave, since this API is called only * for read operations. Even if the key expiry is master-driven, we can * correctly report a key is expired on slaves even if the master is lagging @@ -113,7 +114,7 @@ robj *lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) { * safety measure, the command invoked is a read-only command, we can * safely return NULL here, and provide a more consistent behavior * to clients accessign expired values in a read-only fashion, that - * will say the key as non exisitng. + * will say the key as non existing. * * Notably this covers GETs when slaves are used to scale reads. */ if (server.current_client && @@ -169,9 +170,11 @@ void dbAdd(redisDb *db, robj *key, robj *val) { int retval = dictAdd(db->dict, copy, val); serverAssertWithInfo(NULL,key,retval == DICT_OK); - if (val->type == OBJ_LIST) signalListAsReady(db, key); + if (val->type == OBJ_LIST || + val->type == OBJ_ZSET) + signalKeyAsReady(db, key); if (server.cluster_enabled) slotToKeyAdd(key); - } +} /* Overwrite an existing key with a new value. Incrementing the reference * count of the new value is up to the caller. @@ -182,17 +185,19 @@ void dbOverwrite(redisDb *db, robj *key, robj *val) { dictEntry *de = dictFind(db->dict,key->ptr); serverAssertWithInfo(NULL,key,de != NULL); + dictEntry auxentry = *de; + robj *old = dictGetVal(de); if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) { - robj *old = dictGetVal(de); - int saved_lru = old->lru; - dictReplace(db->dict, key->ptr, val); - val->lru = saved_lru; - /* LFU should be not only copied but also updated - * when a key is overwritten. */ - updateLFU(val); - } else { - dictReplace(db->dict, key->ptr, val); + val->lru = old->lru; + } + dictSetVal(db->dict, de, val); + + if (server.lazyfree_lazy_server_del) { + freeObjAsync(old); + dictSetVal(db->dict, &auxentry, NULL); } + + dictFreeVal(db->dict, &auxentry); } /* High level Set operation. This function can be used in order to set @@ -319,7 +324,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) { * If callback is given the function is called from time to time to * signal that work is in progress. * - * The dbnum can be -1 if all teh DBs should be flushed, or the specified + * The dbnum can be -1 if all the DBs should be flushed, or the specified * DB number if we want to flush only a single Redis database number. * * Flags are be EMPTYDB_NO_FLAGS if no special flags are specified or @@ -330,7 +335,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) { * database(s). Otherwise -1 is returned in the specific case the * DB number is out of range, and errno is set to EINVAL. */ long long emptyDb(int dbnum, int flags, void(callback)(void*)) { - int j, async = (flags & EMPTYDB_ASYNC); + int async = (flags & EMPTYDB_ASYNC); long long removed = 0; if (dbnum < -1 || dbnum >= server.dbnum) { @@ -338,8 +343,15 @@ long long emptyDb(int dbnum, int flags, void(callback)(void*)) { return -1; } - for (j = 0; j < server.dbnum; j++) { - if (dbnum != -1 && dbnum != j) continue; + int startdb, enddb; + if (dbnum == -1) { + startdb = 0; + enddb = server.dbnum-1; + } else { + startdb = enddb = dbnum; + } + + for (int j = startdb; j <= enddb; j++) { removed += dictSize(server.db[j].dict); if (async) { emptyDbAsync(&server.db[j]); @@ -810,6 +822,7 @@ void typeCommand(client *c) { case OBJ_SET: type = "set"; break; case OBJ_ZSET: type = "zset"; break; case OBJ_HASH: type = "hash"; break; + case OBJ_STREAM: type = "stream"; break; case OBJ_MODULE: { moduleValue *mv = o->ptr; type = mv->type->name; @@ -969,17 +982,19 @@ void moveCommand(client *c) { } /* Helper function for dbSwapDatabases(): scans the list of keys that have - * one or more blocked clients for B[LR]POP or other list blocking commands - * and signal the keys are ready if they are lists. See the comment where - * the function is used for more info. */ + * one or more blocked clients for B[LR]POP or other blocking commands + * and signal the keys as ready if they are of the right type. See the comment + * where the function is used for more info. */ void scanDatabaseForReadyLists(redisDb *db) { dictEntry *de; dictIterator *di = dictGetSafeIterator(db->blocking_keys); while((de = dictNext(di)) != NULL) { robj *key = dictGetKey(de); robj *value = lookupKey(db,key,LOOKUP_NOTOUCH); - if (value && value->type == OBJ_LIST) - signalListAsReady(db, key); + if (value && (value->type == OBJ_LIST || + value->type == OBJ_STREAM || + value->type == OBJ_ZSET)) + signalKeyAsReady(db, key); } dictReleaseIterator(di); } @@ -1198,7 +1213,7 @@ int *getKeysUsingCommandTable(struct redisCommand *cmd,robj **argv, int argc, in for (j = cmd->firstkey; j <= last; j += cmd->keystep) { if (j >= argc) { /* Modules commands, and standard commands with a not fixed number - * of arugments (negative arity parameter) do not have dispatch + * of arguments (negative arity parameter) do not have dispatch * time arity checks, so we need to handle the case where the user * passed an invalid number of arguments here. In this case we * return no keys and expect the command implementation to report @@ -1253,7 +1268,7 @@ int *zunionInterGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *nu num = atoi(argv[2]->ptr); /* Sanity check. Don't return any key if the command is going to * reply with syntax error. */ - if (num > (argc-3)) { + if (num < 1 || num > (argc-3)) { *numkeys = 0; return NULL; } @@ -1282,7 +1297,7 @@ int *evalGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numkeys) num = atoi(argv[2]->ptr); /* Sanity check. Don't return any key if the command is going to * reply with syntax error. */ - if (num > (argc-3)) { + if (num <= 0 || num > (argc-3)) { *numkeys = 0; return NULL; } @@ -1385,7 +1400,7 @@ int *georadiusGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numk for (i = 5; i < argc; i++) { char *arg = argv[i]->ptr; /* For the case when user specifies both "store" and "storedist" options, the - * second key specified would override the first key. This behavior is kept + * second key specified would override the first key. This behavior is kept * the same as in georadiusCommand method. */ if ((!strcasecmp(arg, "store") || !strcasecmp(arg, "storedist")) && ((i+1) < argc)) { @@ -1406,7 +1421,51 @@ int *georadiusGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numk if(num > 1) { keys[1] = stored_key; } - *numkeys = num; + *numkeys = num; + return keys; +} + +/* XREAD [BLOCK ] [COUNT ] [GROUP ] + * STREAMS key_1 key_2 ... key_N ID_1 ID_2 ... ID_N */ +int *xreadGetKeys(struct redisCommand *cmd, robj **argv, int argc, int *numkeys) { + int i, num = 0, *keys; + UNUSED(cmd); + + /* We need to parse the options of the command in order to seek the first + * "STREAMS" string which is actually the option. This is needed because + * "STREAMS" could also be the name of the consumer group and even the + * name of the stream key. */ + int streams_pos = -1; + for (i = 1; i < argc; i++) { + char *arg = argv[i]->ptr; + if (!strcasecmp(arg, "block")) { + i++; /* Skip option argument. */ + } else if (!strcasecmp(arg, "count")) { + i++; /* Skip option argument. */ + } else if (!strcasecmp(arg, "group")) { + i += 2; /* Skip option argument. */ + } else if (!strcasecmp(arg, "noack")) { + /* Nothing to do. */ + } else if (!strcasecmp(arg, "streams")) { + streams_pos = i; + break; + } else { + break; /* Syntax error. */ + } + } + if (streams_pos != -1) num = argc - streams_pos - 1; + + /* Syntax error. */ + if (streams_pos == -1 || num == 0 || num % 2 != 0) { + *numkeys = 0; + return NULL; + } + num /= 2; /* We have half the keys as there are arguments because + there are also the IDs, one per key. */ + + keys = zmalloc(sizeof(int) * num); + for (i = streams_pos+1; i < argc-num; i++) keys[i-streams_pos-1] = i; + *numkeys = num; return keys; } diff --git a/redis-android/src/main/jni/redis-4.0.11/src/debug.c b/redis-android/src/main/jni/redis-5.0.0/src/debug.c similarity index 90% rename from redis-android/src/main/jni/redis-4.0.11/src/debug.c rename to redis-android/src/main/jni/redis-5.0.0/src/debug.c index d145102..8cc53d9 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/debug.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/debug.c @@ -239,6 +239,27 @@ void computeDatasetDigest(unsigned char *final) { xorDigest(digest,eledigest,20); } hashTypeReleaseIterator(hi); + } else if (o->type == OBJ_STREAM) { + streamIterator si; + streamIteratorStart(&si,o->ptr,NULL,NULL,0); + streamID id; + int64_t numfields; + + while(streamIteratorGetID(&si,&id,&numfields)) { + sds itemid = sdscatfmt(sdsempty(),"%U.%U",id.ms,id.seq); + mixDigest(digest,itemid,sdslen(itemid)); + sdsfree(itemid); + + while(numfields--) { + unsigned char *field, *value; + int64_t field_len, value_len; + streamIteratorGetField(&si,&field,&value, + &field_len,&value_len); + mixDigest(digest,field,field_len); + mixDigest(digest,value,value_len); + } + } + streamIteratorStop(&si); } else if (o->type == OBJ_MODULE) { RedisModuleDigest md; moduleValue *mv = o->ptr; @@ -262,55 +283,32 @@ void computeDatasetDigest(unsigned char *final) { } void debugCommand(client *c) { - if (c->argc == 1) { - addReplyError(c,"You must specify a subcommand for DEBUG. Try DEBUG HELP for info."); - return; - } - - if (!strcasecmp(c->argv[1]->ptr,"help")) { - void *blenp = addDeferredMultiBulkLength(c); - int blen = 0; - blen++; addReplyStatus(c, - "DEBUG arg arg ... arg. Subcommands:"); - blen++; addReplyStatus(c, - "segfault -- Crash the server with sigsegv."); - blen++; addReplyStatus(c, - "panic -- Crash the server simulating a panic."); - blen++; addReplyStatus(c, - "restart -- Graceful restart: save config, db, restart."); - blen++; addReplyStatus(c, - "crash-and-recovery -- Hard crash and restart after delay."); - blen++; addReplyStatus(c, - "assert -- Crash by assertion failed."); - blen++; addReplyStatus(c, - "reload -- Save the RDB on disk and reload it back in memory."); - blen++; addReplyStatus(c, - "loadaof -- Flush the AOF buffers on disk and reload the AOF in memory."); - blen++; addReplyStatus(c, - "object -- Show low level info about key and associated value."); - blen++; addReplyStatus(c, - "sdslen -- Show low level SDS string info representing key and value."); - blen++; addReplyStatus(c, - "ziplist -- Show low level info about the ziplist encoding."); - blen++; addReplyStatus(c, - "populate [prefix] [size] -- Create string keys named key:. If a prefix is specified is used instead of the 'key' prefix."); - blen++; addReplyStatus(c, - "digest -- Outputs an hex signature representing the current DB content."); - blen++; addReplyStatus(c, - "sleep -- Stop the server for . Decimals allowed."); - blen++; addReplyStatus(c, - "set-active-expire (0|1) -- Setting it to 0 disables expiring keys in background when they are not accessed (otherwise the Redis behavior). Setting it to 1 reenables back the default."); - blen++; addReplyStatus(c, - "lua-always-replicate-commands (0|1) -- Setting it to 1 makes Lua replication defaulting to replicating single commands, without the script having to enable effects replication."); - blen++; addReplyStatus(c, - "error -- Return a Redis protocol error with as message. Useful for clients unit tests to simulate Redis errors."); - blen++; addReplyStatus(c, - "structsize -- Return the size of different Redis core C structures."); - blen++; addReplyStatus(c, - "htstats -- Return hash table statistics of the specified Redis database."); - blen++; addReplyStatus(c, - "change-repl-id -- Change the replication IDs of the instance. Dangerous, should be used only for testing the replication subsystem."); - setDeferredMultiBulkLength(c,blenp,blen); + if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) { + const char *help[] = { +"ASSERT -- Crash by assertion failed.", +"CHANGE-REPL-ID -- Change the replication IDs of the instance. Dangerous, should be used only for testing the replication subsystem.", +"CRASH-AND-RECOVER -- Hard crash and restart after delay.", +"DIGEST -- Output a hex signature representing the current DB content.", +"ERROR -- Return a Redis protocol error with as message. Useful for clients unit tests to simulate Redis errors.", +"LOG -- write message to the server log.", +"HTSTATS -- Return hash table statistics of the specified Redis database.", +"HTSTATS-KEY -- Like htstats but for the hash table stored as key's value.", +"LOADAOF -- Flush the AOF buffers on disk and reload the AOF in memory.", +"LUA-ALWAYS-REPLICATE-COMMANDS <0|1> -- Setting it to 1 makes Lua replication defaulting to replicating single commands, without the script having to enable effects replication.", +"OBJECT -- Show low level info about key and associated value.", +"PANIC -- Crash the server simulating a panic.", +"POPULATE [prefix] [size] -- Create string keys named key:. If a prefix is specified is used instead of the 'key' prefix.", +"RELOAD -- Save the RDB on disk and reload it back in memory.", +"RESTART -- Graceful restart: save config, db, restart.", +"SDSLEN -- Show low level SDS string info representing key and value.", +"SEGFAULT -- Crash the server with sigsegv.", +"SET-ACTIVE-EXPIRE <0|1> -- Setting it to 0 disables expiring keys in background when they are not accessed (otherwise the Redis behavior). Setting it to 1 reenables back the default.", +"SLEEP -- Stop the server for . Decimals allowed.", +"STRUCTSIZE -- Return the size of different Redis core C structures.", +"ZIPLIST -- Show low level info about the ziplist encoding.", +NULL + }; + addReplyHelp(c, help); } else if (!strcasecmp(c->argv[1]->ptr,"segfault")) { *((char*)-1) = 'x'; } else if (!strcasecmp(c->argv[1]->ptr,"panic")) { @@ -336,6 +334,9 @@ void debugCommand(client *c) { } else if (!strcasecmp(c->argv[1]->ptr,"assert")) { if (c->argc >= 3) c->argv[2] = tryObjectEncoding(c->argv[2]); serverAssertWithInfo(c,c->argv[0],1 == 2); + } else if (!strcasecmp(c->argv[1]->ptr,"log") && c->argc == 3) { + serverLog(LL_WARNING, "DEBUG LOG: %s", (char*)c->argv[2]->ptr); + addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"reload")) { rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); @@ -344,16 +345,22 @@ void debugCommand(client *c) { return; } emptyDb(-1,EMPTYDB_NO_FLAGS,NULL); - if (rdbLoad(server.rdb_filename,NULL) != C_OK) { + protectClient(c); + int ret = rdbLoad(server.rdb_filename,NULL); + unprotectClient(c); + if (ret != C_OK) { addReplyError(c,"Error trying to load the RDB dump"); return; } serverLog(LL_WARNING,"DB reloaded by DEBUG RELOAD"); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"loadaof")) { - if (server.aof_state == AOF_ON) flushAppendOnlyFile(1); + if (server.aof_state != AOF_OFF) flushAppendOnlyFile(1); emptyDb(-1,EMPTYDB_NO_FLAGS,NULL); - if (loadAppendOnlyFile(server.aof_filename) != C_OK) { + protectClient(c); + int ret = loadAppendOnlyFile(server.aof_filename); + unprotectClient(c); + if (ret != C_OK) { addReply(c,shared.err); return; } @@ -551,14 +558,42 @@ void debugCommand(client *c) { stats = sdscat(stats,buf); addReplyBulkSds(c,stats); + } else if (!strcasecmp(c->argv[1]->ptr,"htstats-key") && c->argc == 3) { + robj *o; + dict *ht = NULL; + + if ((o = objectCommandLookupOrReply(c,c->argv[2],shared.nokeyerr)) + == NULL) return; + + /* Get the hash table reference from the object, if possible. */ + switch (o->encoding) { + case OBJ_ENCODING_SKIPLIST: + { + zset *zs = o->ptr; + ht = zs->dict; + } + break; + case OBJ_ENCODING_HT: + ht = o->ptr; + break; + } + + if (ht == NULL) { + addReplyError(c,"The value stored at the specified key is not " + "represented using an hash table"); + } else { + char buf[4096]; + dictGetStats(buf,sizeof(buf),ht); + addReplyBulkCString(c,buf); + } } else if (!strcasecmp(c->argv[1]->ptr,"change-repl-id") && c->argc == 2) { serverLog(LL_WARNING,"Changing replication IDs after receiving DEBUG change-repl-id"); changeReplicationId(); clearReplicationId2(); addReply(c,shared.ok); } else { - addReplyErrorFormat(c, "Unknown DEBUG subcommand or wrong number of arguments for '%s'", - (char*)c->argv[1]->ptr); + addReplySubcommandSyntaxError(c); + return; } } @@ -691,6 +726,8 @@ static void *getMcontextEip(ucontext_t *uc) { return (void*) uc->uc_mcontext.sc_ip; #elif defined(__arm__) /* Linux ARM */ return (void*) uc->uc_mcontext.arm_pc; + #elif defined(__aarch64__) /* Linux AArch64 */ + return (void*) uc->uc_mcontext.pc; #endif #else return NULL; @@ -1049,7 +1086,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { infostring = genRedisInfoString("all"); serverLogRaw(LL_WARNING|LL_RAW, infostring); serverLogRaw(LL_WARNING|LL_RAW, "\n------ CLIENT LIST OUTPUT ------\n"); - clients = getAllClientsInfoString(); + clients = getAllClientsInfoString(-1); serverLogRaw(LL_WARNING|LL_RAW, clients); sdsfree(infostring); sdsfree(clients); diff --git a/redis-android/src/main/jni/redis-4.0.11/src/debugmacro.h b/redis-android/src/main/jni/redis-5.0.0/src/debugmacro.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/debugmacro.h rename to redis-android/src/main/jni/redis-5.0.0/src/debugmacro.h diff --git a/redis-android/src/main/jni/redis-5.0.0/src/defrag.c b/redis-android/src/main/jni/redis-5.0.0/src/defrag.c new file mode 100644 index 0000000..d67b6e2 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/src/defrag.c @@ -0,0 +1,1140 @@ +/* + * Active memory defragmentation + * Try to find key / value allocations that need to be re-allocated in order + * to reduce external fragmentation. + * We do that by scanning the keyspace and for each pointer we have, we can try to + * ask the allocator if moving it to a new address will help reduce fragmentation. + * + * Copyright (c) 2017, Oran Agra + * Copyright (c) 2017, Redis Labs, Inc + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "server.h" +#include +#include +#include + +#ifdef HAVE_DEFRAG + +/* this method was added to jemalloc in order to help us understand which + * pointers are worthwhile moving and which aren't */ +int je_get_defrag_hint(void* ptr, int *bin_util, int *run_util); + +/* forward declarations*/ +void defragDictBucketCallback(void *privdata, dictEntry **bucketref); +dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, unsigned int hash, long *defragged); + +/* Defrag helper for generic allocations. + * + * returns NULL in case the allocatoin wasn't moved. + * when it returns a non-null value, the old pointer was already released + * and should NOT be accessed. */ +void* activeDefragAlloc(void *ptr) { + int bin_util, run_util; + size_t size; + void *newptr; + if(!je_get_defrag_hint(ptr, &bin_util, &run_util)) { + server.stat_active_defrag_misses++; + return NULL; + } + /* if this run is more utilized than the average utilization in this bin + * (or it is full), skip it. This will eventually move all the allocations + * from relatively empty runs into relatively full runs. */ + if (run_util > bin_util || run_util == 1<<16) { + server.stat_active_defrag_misses++; + return NULL; + } + /* move this allocation to a new allocation. + * make sure not to use the thread cache. so that we don't get back the same + * pointers we try to free */ + size = zmalloc_size(ptr); + newptr = zmalloc_no_tcache(size); + memcpy(newptr, ptr, size); + zfree_no_tcache(ptr); + return newptr; +} + +/*Defrag helper for sds strings + * + * returns NULL in case the allocatoin wasn't moved. + * when it returns a non-null value, the old pointer was already released + * and should NOT be accessed. */ +sds activeDefragSds(sds sdsptr) { + void* ptr = sdsAllocPtr(sdsptr); + void* newptr = activeDefragAlloc(ptr); + if (newptr) { + size_t offset = sdsptr - (char*)ptr; + sdsptr = (char*)newptr + offset; + return sdsptr; + } + return NULL; +} + +/* Defrag helper for robj and/or string objects + * + * returns NULL in case the allocatoin wasn't moved. + * when it returns a non-null value, the old pointer was already released + * and should NOT be accessed. */ +robj *activeDefragStringOb(robj* ob, long *defragged) { + robj *ret = NULL; + if (ob->refcount!=1) + return NULL; + + /* try to defrag robj (only if not an EMBSTR type (handled below). */ + if (ob->type!=OBJ_STRING || ob->encoding!=OBJ_ENCODING_EMBSTR) { + if ((ret = activeDefragAlloc(ob))) { + ob = ret; + (*defragged)++; + } + } + + /* try to defrag string object */ + if (ob->type == OBJ_STRING) { + if(ob->encoding==OBJ_ENCODING_RAW) { + sds newsds = activeDefragSds((sds)ob->ptr); + if (newsds) { + ob->ptr = newsds; + (*defragged)++; + } + } else if (ob->encoding==OBJ_ENCODING_EMBSTR) { + /* The sds is embedded in the object allocation, calculate the + * offset and update the pointer in the new allocation. */ + long ofs = (intptr_t)ob->ptr - (intptr_t)ob; + if ((ret = activeDefragAlloc(ob))) { + ret->ptr = (void*)((intptr_t)ret + ofs); + (*defragged)++; + } + } else if (ob->encoding!=OBJ_ENCODING_INT) { + serverPanic("Unknown string encoding"); + } + } + return ret; +} + +/* Defrag helper for dictEntries to be used during dict iteration (called on + * each step). Teturns a stat of how many pointers were moved. */ +long dictIterDefragEntry(dictIterator *iter) { + /* This function is a little bit dirty since it messes with the internals + * of the dict and it's iterator, but the benefit is that it is very easy + * to use, and require no other chagnes in the dict. */ + long defragged = 0; + dictht *ht; + /* Handle the next entry (if there is one), and update the pointer in the + * current entry. */ + if (iter->nextEntry) { + dictEntry *newde = activeDefragAlloc(iter->nextEntry); + if (newde) { + defragged++; + iter->nextEntry = newde; + iter->entry->next = newde; + } + } + /* handle the case of the first entry in the hash bucket. */ + ht = &iter->d->ht[iter->table]; + if (ht->table[iter->index] == iter->entry) { + dictEntry *newde = activeDefragAlloc(iter->entry); + if (newde) { + iter->entry = newde; + ht->table[iter->index] = newde; + defragged++; + } + } + return defragged; +} + +/* Defrag helper for dict main allocations (dict struct, and hash tables). + * receives a pointer to the dict* and implicitly updates it when the dict + * struct itself was moved. Returns a stat of how many pointers were moved. */ +long dictDefragTables(dict* d) { + dictEntry **newtable; + long defragged = 0; + /* handle the first hash table */ + newtable = activeDefragAlloc(d->ht[0].table); + if (newtable) + defragged++, d->ht[0].table = newtable; + /* handle the second hash table */ + if (d->ht[1].table) { + newtable = activeDefragAlloc(d->ht[1].table); + if (newtable) + defragged++, d->ht[1].table = newtable; + } + return defragged; +} + +/* Internal function used by zslDefrag */ +void zslUpdateNode(zskiplist *zsl, zskiplistNode *oldnode, zskiplistNode *newnode, zskiplistNode **update) { + int i; + for (i = 0; i < zsl->level; i++) { + if (update[i]->level[i].forward == oldnode) + update[i]->level[i].forward = newnode; + } + serverAssert(zsl->header!=oldnode); + if (newnode->level[0].forward) { + serverAssert(newnode->level[0].forward->backward==oldnode); + newnode->level[0].forward->backward = newnode; + } else { + serverAssert(zsl->tail==oldnode); + zsl->tail = newnode; + } +} + +/* Defrag helper for sorted set. + * Update the robj pointer, defrag the skiplist struct and return the new score + * reference. We may not access oldele pointer (not even the pointer stored in + * the skiplist), as it was already freed. Newele may be null, in which case we + * only need to defrag the skiplist, but not update the obj pointer. + * When return value is non-NULL, it is the score reference that must be updated + * in the dict record. */ +double *zslDefrag(zskiplist *zsl, double score, sds oldele, sds newele) { + zskiplistNode *update[ZSKIPLIST_MAXLEVEL], *x, *newx; + int i; + sds ele = newele? newele: oldele; + + /* find the skiplist node referring to the object that was moved, + * and all pointers that need to be updated if we'll end up moving the skiplist node. */ + x = zsl->header; + for (i = zsl->level-1; i >= 0; i--) { + while (x->level[i].forward && + x->level[i].forward->ele != oldele && /* make sure not to access the + ->obj pointer if it matches + oldele */ + (x->level[i].forward->score < score || + (x->level[i].forward->score == score && + sdscmp(x->level[i].forward->ele,ele) < 0))) + x = x->level[i].forward; + update[i] = x; + } + + /* update the robj pointer inside the skip list record. */ + x = x->level[0].forward; + serverAssert(x && score == x->score && x->ele==oldele); + if (newele) + x->ele = newele; + + /* try to defrag the skiplist record itself */ + newx = activeDefragAlloc(x); + if (newx) { + zslUpdateNode(zsl, x, newx, update); + return &newx->score; + } + return NULL; +} + +/* Defrag helpler for sorted set. + * Defrag a single dict entry key name, and corresponding skiplist struct */ +long activeDefragZsetEntry(zset *zs, dictEntry *de) { + sds newsds; + double* newscore; + long defragged = 0; + sds sdsele = dictGetKey(de); + if ((newsds = activeDefragSds(sdsele))) + defragged++, de->key = newsds; + newscore = zslDefrag(zs->zsl, *(double*)dictGetVal(de), sdsele, newsds); + if (newscore) { + dictSetVal(zs->dict, de, newscore); + defragged++; + } + return defragged; +} + +#define DEFRAG_SDS_DICT_NO_VAL 0 +#define DEFRAG_SDS_DICT_VAL_IS_SDS 1 +#define DEFRAG_SDS_DICT_VAL_IS_STROB 2 +#define DEFRAG_SDS_DICT_VAL_VOID_PTR 3 + +/* Defrag a dict with sds key and optional value (either ptr, sds or robj string) */ +long activeDefragSdsDict(dict* d, int val_type) { + dictIterator *di; + dictEntry *de; + long defragged = 0; + di = dictGetIterator(d); + while((de = dictNext(di)) != NULL) { + sds sdsele = dictGetKey(de), newsds; + if ((newsds = activeDefragSds(sdsele))) + de->key = newsds, defragged++; + /* defrag the value */ + if (val_type == DEFRAG_SDS_DICT_VAL_IS_SDS) { + sdsele = dictGetVal(de); + if ((newsds = activeDefragSds(sdsele))) + de->v.val = newsds, defragged++; + } else if (val_type == DEFRAG_SDS_DICT_VAL_IS_STROB) { + robj *newele, *ele = dictGetVal(de); + if ((newele = activeDefragStringOb(ele, &defragged))) + de->v.val = newele; + } else if (val_type == DEFRAG_SDS_DICT_VAL_VOID_PTR) { + void *newptr, *ptr = dictGetVal(de); + if ((newptr = activeDefragAlloc(ptr))) + de->v.val = newptr, defragged++; + } + defragged += dictIterDefragEntry(di); + } + dictReleaseIterator(di); + return defragged; +} + +/* Defrag a list of ptr, sds or robj string values */ +long activeDefragList(list *l, int val_type) { + long defragged = 0; + listNode *ln, *newln; + for (ln = l->head; ln; ln = ln->next) { + if ((newln = activeDefragAlloc(ln))) { + if (newln->prev) + newln->prev->next = newln; + else + l->head = newln; + if (newln->next) + newln->next->prev = newln; + else + l->tail = newln; + ln = newln; + defragged++; + } + if (val_type == DEFRAG_SDS_DICT_VAL_IS_SDS) { + sds newsds, sdsele = ln->value; + if ((newsds = activeDefragSds(sdsele))) + ln->value = newsds, defragged++; + } else if (val_type == DEFRAG_SDS_DICT_VAL_IS_STROB) { + robj *newele, *ele = ln->value; + if ((newele = activeDefragStringOb(ele, &defragged))) + ln->value = newele; + } else if (val_type == DEFRAG_SDS_DICT_VAL_VOID_PTR) { + void *newptr, *ptr = ln->value; + if ((newptr = activeDefragAlloc(ptr))) + ln->value = newptr, defragged++; + } + } + return defragged; +} + +/* Defrag a list of sds values and a dict with the same sds keys */ +long activeDefragSdsListAndDict(list *l, dict *d, int dict_val_type) { + long defragged = 0; + sds newsds, sdsele; + listNode *ln, *newln; + dictIterator *di; + dictEntry *de; + /* Defrag the list and it's sds values */ + for (ln = l->head; ln; ln = ln->next) { + if ((newln = activeDefragAlloc(ln))) { + if (newln->prev) + newln->prev->next = newln; + else + l->head = newln; + if (newln->next) + newln->next->prev = newln; + else + l->tail = newln; + ln = newln; + defragged++; + } + sdsele = ln->value; + if ((newsds = activeDefragSds(sdsele))) { + /* When defragging an sds value, we need to update the dict key */ + unsigned int hash = dictGetHash(d, sdsele); + replaceSateliteDictKeyPtrAndOrDefragDictEntry(d, sdsele, newsds, hash, &defragged); + ln->value = newsds; + defragged++; + } + } + + /* Defrag the dict values (keys were already handled) */ + di = dictGetIterator(d); + while((de = dictNext(di)) != NULL) { + if (dict_val_type == DEFRAG_SDS_DICT_VAL_IS_SDS) { + sds newsds, sdsele = dictGetVal(de); + if ((newsds = activeDefragSds(sdsele))) + de->v.val = newsds, defragged++; + } else if (dict_val_type == DEFRAG_SDS_DICT_VAL_IS_STROB) { + robj *newele, *ele = dictGetVal(de); + if ((newele = activeDefragStringOb(ele, &defragged))) + de->v.val = newele, defragged++; + } else if (dict_val_type == DEFRAG_SDS_DICT_VAL_VOID_PTR) { + void *newptr, *ptr = ln->value; + if ((newptr = activeDefragAlloc(ptr))) + ln->value = newptr, defragged++; + } + defragged += dictIterDefragEntry(di); + } + dictReleaseIterator(di); + + return defragged; +} + +/* Utility function that replaces an old key pointer in the dictionary with a + * new pointer. Additionally, we try to defrag the dictEntry in that dict. + * Oldkey mey be a dead pointer and should not be accessed (we get a + * pre-calculated hash value). Newkey may be null if the key pointer wasn't + * moved. Return value is the the dictEntry if found, or NULL if not found. + * NOTE: this is very ugly code, but it let's us avoid the complication of + * doing a scan on another dict. */ +dictEntry* replaceSateliteDictKeyPtrAndOrDefragDictEntry(dict *d, sds oldkey, sds newkey, unsigned int hash, long *defragged) { + dictEntry **deref = dictFindEntryRefByPtrAndHash(d, oldkey, hash); + if (deref) { + dictEntry *de = *deref; + dictEntry *newde = activeDefragAlloc(de); + if (newde) { + de = *deref = newde; + (*defragged)++; + } + if (newkey) + de->key = newkey; + return de; + } + return NULL; +} + +long activeDefragQuickListNodes(quicklist *ql) { + quicklistNode *node = ql->head, *newnode; + long defragged = 0; + unsigned char *newzl; + while (node) { + if ((newnode = activeDefragAlloc(node))) { + if (newnode->prev) + newnode->prev->next = newnode; + else + ql->head = newnode; + if (newnode->next) + newnode->next->prev = newnode; + else + ql->tail = newnode; + node = newnode; + defragged++; + } + if ((newzl = activeDefragAlloc(node->zl))) + defragged++, node->zl = newzl; + node = node->next; + } + return defragged; +} + +/* when the value has lots of elements, we want to handle it later and not as + * oart of the main dictionary scan. this is needed in order to prevent latency + * spikes when handling large items */ +void defragLater(redisDb *db, dictEntry *kde) { + sds key = sdsdup(dictGetKey(kde)); + listAddNodeTail(db->defrag_later, key); +} + +long scanLaterList(robj *ob) { + quicklist *ql = ob->ptr; + if (ob->type != OBJ_LIST || ob->encoding != OBJ_ENCODING_QUICKLIST) + return 0; + server.stat_active_defrag_scanned+=ql->len; + return activeDefragQuickListNodes(ql); +} + +typedef struct { + zset *zs; + long defragged; +} scanLaterZsetData; + +void scanLaterZsetCallback(void *privdata, const dictEntry *_de) { + dictEntry *de = (dictEntry*)_de; + scanLaterZsetData *data = privdata; + data->defragged += activeDefragZsetEntry(data->zs, de); + server.stat_active_defrag_scanned++; +} + +long scanLaterZset(robj *ob, unsigned long *cursor) { + if (ob->type != OBJ_ZSET || ob->encoding != OBJ_ENCODING_SKIPLIST) + return 0; + zset *zs = (zset*)ob->ptr; + dict *d = zs->dict; + scanLaterZsetData data = {zs, 0}; + *cursor = dictScan(d, *cursor, scanLaterZsetCallback, defragDictBucketCallback, &data); + return data.defragged; +} + +void scanLaterSetCallback(void *privdata, const dictEntry *_de) { + dictEntry *de = (dictEntry*)_de; + long *defragged = privdata; + sds sdsele = dictGetKey(de), newsds; + if ((newsds = activeDefragSds(sdsele))) + (*defragged)++, de->key = newsds; + server.stat_active_defrag_scanned++; +} + +long scanLaterSet(robj *ob, unsigned long *cursor) { + long defragged = 0; + if (ob->type != OBJ_SET || ob->encoding != OBJ_ENCODING_HT) + return 0; + dict *d = ob->ptr; + *cursor = dictScan(d, *cursor, scanLaterSetCallback, defragDictBucketCallback, &defragged); + return defragged; +} + +void scanLaterHashCallback(void *privdata, const dictEntry *_de) { + dictEntry *de = (dictEntry*)_de; + long *defragged = privdata; + sds sdsele = dictGetKey(de), newsds; + if ((newsds = activeDefragSds(sdsele))) + (*defragged)++, de->key = newsds; + sdsele = dictGetVal(de); + if ((newsds = activeDefragSds(sdsele))) + (*defragged)++, de->v.val = newsds; + server.stat_active_defrag_scanned++; +} + +long scanLaterHash(robj *ob, unsigned long *cursor) { + long defragged = 0; + if (ob->type != OBJ_HASH || ob->encoding != OBJ_ENCODING_HT) + return 0; + dict *d = ob->ptr; + *cursor = dictScan(d, *cursor, scanLaterHashCallback, defragDictBucketCallback, &defragged); + return defragged; +} + +long defragQuicklist(redisDb *db, dictEntry *kde) { + robj *ob = dictGetVal(kde); + long defragged = 0; + quicklist *ql = ob->ptr, *newql; + serverAssert(ob->type == OBJ_LIST && ob->encoding == OBJ_ENCODING_QUICKLIST); + if ((newql = activeDefragAlloc(ql))) + defragged++, ob->ptr = ql = newql; + if (ql->len > server.active_defrag_max_scan_fields) + defragLater(db, kde); + else + defragged += activeDefragQuickListNodes(ql); + return defragged; +} + +long defragZsetSkiplist(redisDb *db, dictEntry *kde) { + robj *ob = dictGetVal(kde); + long defragged = 0; + zset *zs = (zset*)ob->ptr; + zset *newzs; + zskiplist *newzsl; + dict *newdict; + dictEntry *de; + struct zskiplistNode *newheader; + serverAssert(ob->type == OBJ_ZSET && ob->encoding == OBJ_ENCODING_SKIPLIST); + if ((newzs = activeDefragAlloc(zs))) + defragged++, ob->ptr = zs = newzs; + if ((newzsl = activeDefragAlloc(zs->zsl))) + defragged++, zs->zsl = newzsl; + if ((newheader = activeDefragAlloc(zs->zsl->header))) + defragged++, zs->zsl->header = newheader; + if (dictSize(zs->dict) > server.active_defrag_max_scan_fields) + defragLater(db, kde); + else { + dictIterator *di = dictGetIterator(zs->dict); + while((de = dictNext(di)) != NULL) { + defragged += activeDefragZsetEntry(zs, de); + } + dictReleaseIterator(di); + } + /* handle the dict struct */ + if ((newdict = activeDefragAlloc(zs->dict))) + defragged++, zs->dict = newdict; + /* defrag the dict tables */ + defragged += dictDefragTables(zs->dict); + return defragged; +} + +long defragHash(redisDb *db, dictEntry *kde) { + long defragged = 0; + robj *ob = dictGetVal(kde); + dict *d, *newd; + serverAssert(ob->type == OBJ_HASH && ob->encoding == OBJ_ENCODING_HT); + d = ob->ptr; + if (dictSize(d) > server.active_defrag_max_scan_fields) + defragLater(db, kde); + else + defragged += activeDefragSdsDict(d, DEFRAG_SDS_DICT_VAL_IS_SDS); + /* handle the dict struct */ + if ((newd = activeDefragAlloc(ob->ptr))) + defragged++, ob->ptr = newd; + /* defrag the dict tables */ + defragged += dictDefragTables(ob->ptr); + return defragged; +} + +long defragSet(redisDb *db, dictEntry *kde) { + long defragged = 0; + robj *ob = dictGetVal(kde); + dict *d, *newd; + serverAssert(ob->type == OBJ_SET && ob->encoding == OBJ_ENCODING_HT); + d = ob->ptr; + if (dictSize(d) > server.active_defrag_max_scan_fields) + defragLater(db, kde); + else + defragged += activeDefragSdsDict(d, DEFRAG_SDS_DICT_NO_VAL); + /* handle the dict struct */ + if ((newd = activeDefragAlloc(ob->ptr))) + defragged++, ob->ptr = newd; + /* defrag the dict tables */ + defragged += dictDefragTables(ob->ptr); + return defragged; +} + +/* Defrag callback for radix tree iterator, called for each node, + * used in order to defrag the nodes allocations. */ +int defragRaxNode(raxNode **noderef) { + raxNode *newnode = activeDefragAlloc(*noderef); + if (newnode) { + *noderef = newnode; + return 1; + } + return 0; +} + +/* returns 0 if no more work needs to be been done, and 1 if time is up and more work is needed. */ +int scanLaterStraemListpacks(robj *ob, unsigned long *cursor, long long endtime, long long *defragged) { + static unsigned char last[sizeof(streamID)]; + raxIterator ri; + long iterations = 0; + if (ob->type != OBJ_STREAM || ob->encoding != OBJ_ENCODING_STREAM) { + *cursor = 0; + return 0; + } + + stream *s = ob->ptr; + raxStart(&ri,s->rax); + if (*cursor == 0) { + /* if cursor is 0, we start new iteration */ + defragRaxNode(&s->rax->head); + /* assign the iterator node callback before the seek, so that the + * initial nodes that are processed till the first item are covered */ + ri.node_cb = defragRaxNode; + raxSeek(&ri,"^",NULL,0); + } else { + /* if cursor is non-zero, we seek to the static 'last' */ + if (!raxSeek(&ri,">", last, sizeof(last))) { + *cursor = 0; + return 0; + } + /* assign the iterator node callback after the seek, so that the + * initial nodes that are processed till now aren't covered */ + ri.node_cb = defragRaxNode; + } + + (*cursor)++; + while (raxNext(&ri)) { + void *newdata = activeDefragAlloc(ri.data); + if (newdata) + raxSetData(ri.node, ri.data=newdata), (*defragged)++; + if (++iterations > 16) { + if (ustime() > endtime) { + serverAssert(ri.key_len==sizeof(last)); + memcpy(last,ri.key,ri.key_len); + raxStop(&ri); + return 1; + } + iterations = 0; + } + } + raxStop(&ri); + *cursor = 0; + return 0; +} + +/* optional callback used defrag each rax element (not including the element pointer itself) */ +typedef void *(raxDefragFunction)(raxIterator *ri, void *privdata, long *defragged); + +/* defrag radix tree including: + * 1) rax struct + * 2) rax nodes + * 3) rax entry data (only if defrag_data is specified) + * 4) call a callback per element, and allow the callback to return a new pointer for the element */ +long defragRadixTree(rax **raxref, int defrag_data, raxDefragFunction *element_cb, void *element_cb_data) { + long defragged = 0; + raxIterator ri; + rax* rax; + if ((rax = activeDefragAlloc(*raxref))) + defragged++, *raxref = rax; + rax = *raxref; + raxStart(&ri,rax); + ri.node_cb = defragRaxNode; + defragRaxNode(&rax->head); + raxSeek(&ri,"^",NULL,0); + while (raxNext(&ri)) { + void *newdata = NULL; + if (element_cb) + newdata = element_cb(&ri, element_cb_data, &defragged); + if (defrag_data && !newdata) + newdata = activeDefragAlloc(ri.data); + if (newdata) + raxSetData(ri.node, ri.data=newdata), defragged++; + } + raxStop(&ri); + return defragged; +} + +typedef struct { + streamCG *cg; + streamConsumer *c; +} PendingEntryContext; + +void* defragStreamConsumerPendingEntry(raxIterator *ri, void *privdata, long *defragged) { + UNUSED(defragged); + PendingEntryContext *ctx = privdata; + streamNACK *nack = ri->data, *newnack; + nack->consumer = ctx->c; /* update nack pointer to consumer */ + newnack = activeDefragAlloc(nack); + if (newnack) { + /* update consumer group pointer to the nack */ + void *prev; + raxInsert(ctx->cg->pel, ri->key, ri->key_len, newnack, &prev); + serverAssert(prev==nack); + /* note: we don't increment 'defragged' that's done by the caller */ + } + return newnack; +} + +void* defragStreamConsumer(raxIterator *ri, void *privdata, long *defragged) { + streamConsumer *c = ri->data; + streamCG *cg = privdata; + void *newc = activeDefragAlloc(c); + if (newc) { + /* note: we don't increment 'defragged' that's done by the caller */ + c = newc; + } + sds newsds = activeDefragSds(c->name); + if (newsds) + (*defragged)++, c->name = newsds; + if (c->pel) { + PendingEntryContext pel_ctx = {cg, c}; + *defragged += defragRadixTree(&c->pel, 0, defragStreamConsumerPendingEntry, &pel_ctx); + } + return newc; /* returns NULL if c was not defragged */ +} + +void* defragStreamConsumerGroup(raxIterator *ri, void *privdata, long *defragged) { + streamCG *cg = ri->data; + UNUSED(privdata); + if (cg->consumers) + *defragged += defragRadixTree(&cg->consumers, 0, defragStreamConsumer, cg); + if (cg->pel) + *defragged += defragRadixTree(&cg->pel, 0, NULL, NULL); + return NULL; +} + +long defragStream(redisDb *db, dictEntry *kde) { + long defragged = 0; + robj *ob = dictGetVal(kde); + serverAssert(ob->type == OBJ_STREAM && ob->encoding == OBJ_ENCODING_STREAM); + stream *s = ob->ptr, *news; + + /* handle the main struct */ + if ((news = activeDefragAlloc(s))) + defragged++, ob->ptr = s = news; + + if (raxSize(s->rax) > server.active_defrag_max_scan_fields) { + rax *newrax = activeDefragAlloc(s->rax); + if (newrax) + defragged++, s->rax = newrax; + defragLater(db, kde); + } else + defragged += defragRadixTree(&s->rax, 1, NULL, NULL); + + if (s->cgroups) + defragged += defragRadixTree(&s->cgroups, 1, defragStreamConsumerGroup, NULL); + return defragged; +} + +/* for each key we scan in the main dict, this function will attempt to defrag + * all the various pointers it has. Returns a stat of how many pointers were + * moved. */ +long defragKey(redisDb *db, dictEntry *de) { + sds keysds = dictGetKey(de); + robj *newob, *ob; + unsigned char *newzl; + long defragged = 0; + sds newsds; + + /* Try to defrag the key name. */ + newsds = activeDefragSds(keysds); + if (newsds) + defragged++, de->key = newsds; + if (dictSize(db->expires)) { + /* Dirty code: + * I can't search in db->expires for that key after i already released + * the pointer it holds it won't be able to do the string compare */ + uint64_t hash = dictGetHash(db->dict, de->key); + replaceSateliteDictKeyPtrAndOrDefragDictEntry(db->expires, keysds, newsds, hash, &defragged); + } + + /* Try to defrag robj and / or string value. */ + ob = dictGetVal(de); + if ((newob = activeDefragStringOb(ob, &defragged))) { + de->v.val = newob; + ob = newob; + } + + if (ob->type == OBJ_STRING) { + /* Already handled in activeDefragStringOb. */ + } else if (ob->type == OBJ_LIST) { + if (ob->encoding == OBJ_ENCODING_QUICKLIST) { + defragged += defragQuicklist(db, de); + } else if (ob->encoding == OBJ_ENCODING_ZIPLIST) { + if ((newzl = activeDefragAlloc(ob->ptr))) + defragged++, ob->ptr = newzl; + } else { + serverPanic("Unknown list encoding"); + } + } else if (ob->type == OBJ_SET) { + if (ob->encoding == OBJ_ENCODING_HT) { + defragged += defragSet(db, de); + } else if (ob->encoding == OBJ_ENCODING_INTSET) { + intset *newis, *is = ob->ptr; + if ((newis = activeDefragAlloc(is))) + defragged++, ob->ptr = newis; + } else { + serverPanic("Unknown set encoding"); + } + } else if (ob->type == OBJ_ZSET) { + if (ob->encoding == OBJ_ENCODING_ZIPLIST) { + if ((newzl = activeDefragAlloc(ob->ptr))) + defragged++, ob->ptr = newzl; + } else if (ob->encoding == OBJ_ENCODING_SKIPLIST) { + defragged += defragZsetSkiplist(db, de); + } else { + serverPanic("Unknown sorted set encoding"); + } + } else if (ob->type == OBJ_HASH) { + if (ob->encoding == OBJ_ENCODING_ZIPLIST) { + if ((newzl = activeDefragAlloc(ob->ptr))) + defragged++, ob->ptr = newzl; + } else if (ob->encoding == OBJ_ENCODING_HT) { + defragged += defragHash(db, de); + } else { + serverPanic("Unknown hash encoding"); + } + } else if (ob->type == OBJ_STREAM) { + defragged += defragStream(db, de); + } else if (ob->type == OBJ_MODULE) { + /* Currently defragmenting modules private data types + * is not supported. */ + } else { + serverPanic("Unknown object type"); + } + return defragged; +} + +/* Defrag scan callback for the main db dictionary. */ +void defragScanCallback(void *privdata, const dictEntry *de) { + long defragged = defragKey((redisDb*)privdata, (dictEntry*)de); + server.stat_active_defrag_hits += defragged; + if(defragged) + server.stat_active_defrag_key_hits++; + else + server.stat_active_defrag_key_misses++; + server.stat_active_defrag_scanned++; +} + +/* Defrag scan callback for each hash table bicket, + * used in order to defrag the dictEntry allocations. */ +void defragDictBucketCallback(void *privdata, dictEntry **bucketref) { + UNUSED(privdata); /* NOTE: this function is also used by both activeDefragCycle and scanLaterHash, etc. don't use privdata */ + while(*bucketref) { + dictEntry *de = *bucketref, *newde; + if ((newde = activeDefragAlloc(de))) { + *bucketref = newde; + } + bucketref = &(*bucketref)->next; + } +} + +/* Utility function to get the fragmentation ratio from jemalloc. + * It is critical to do that by comparing only heap maps that belong to + * jemalloc, and skip ones the jemalloc keeps as spare. Since we use this + * fragmentation ratio in order to decide if a defrag action should be taken + * or not, a false detection can cause the defragmenter to waste a lot of CPU + * without the possibility of getting any results. */ +float getAllocatorFragmentation(size_t *out_frag_bytes) { + size_t resident, active, allocated; + zmalloc_get_allocator_info(&allocated, &active, &resident); + float frag_pct = ((float)active / allocated)*100 - 100; + size_t frag_bytes = active - allocated; + float rss_pct = ((float)resident / allocated)*100 - 100; + size_t rss_bytes = resident - allocated; + if(out_frag_bytes) + *out_frag_bytes = frag_bytes; + serverLog(LL_DEBUG, + "allocated=%zu, active=%zu, resident=%zu, frag=%.0f%% (%.0f%% rss), frag_bytes=%zu (%zu rss)", + allocated, active, resident, frag_pct, rss_pct, frag_bytes, rss_bytes); + return frag_pct; +} + +/* We may need to defrag other globals, one small allcation can hold a full allocator run. + * so although small, it is still important to defrag these */ +long defragOtherGlobals() { + long defragged = 0; + + /* there are many more pointers to defrag (e.g. client argv, output / aof buffers, etc. + * but we assume most of these are short lived, we only need to defrag allocations + * that remain static for a long time */ + defragged += activeDefragSdsDict(server.lua_scripts, DEFRAG_SDS_DICT_VAL_IS_STROB); + defragged += activeDefragSdsListAndDict(server.repl_scriptcache_fifo, server.repl_scriptcache_dict, DEFRAG_SDS_DICT_NO_VAL); + return defragged; +} + +/* returns 0 more work may or may not be needed (see non-zero cursor), + * and 1 if time is up and more work is needed. */ +int defragLaterItem(dictEntry *de, unsigned long *cursor, long long endtime) { + if (de) { + robj *ob = dictGetVal(de); + if (ob->type == OBJ_LIST) { + server.stat_active_defrag_hits += scanLaterList(ob); + *cursor = 0; /* list has no scan, we must finish it in one go */ + } else if (ob->type == OBJ_SET) { + server.stat_active_defrag_hits += scanLaterSet(ob, cursor); + } else if (ob->type == OBJ_ZSET) { + server.stat_active_defrag_hits += scanLaterZset(ob, cursor); + } else if (ob->type == OBJ_HASH) { + server.stat_active_defrag_hits += scanLaterHash(ob, cursor); + } else if (ob->type == OBJ_STREAM) { + return scanLaterStraemListpacks(ob, cursor, endtime, &server.stat_active_defrag_hits); + } else { + *cursor = 0; /* object type may have changed since we schedule it for later */ + } + } else { + *cursor = 0; /* object may have been deleted already */ + } + return 0; +} + +/* returns 0 if no more work needs to be been done, and 1 if time is up and more work is needed. */ +int defragLaterStep(redisDb *db, long long endtime) { + static sds current_key = NULL; + static unsigned long cursor = 0; + unsigned int iterations = 0; + unsigned long long prev_defragged = server.stat_active_defrag_hits; + unsigned long long prev_scanned = server.stat_active_defrag_scanned; + long long key_defragged; + + do { + /* if we're not continuing a scan from the last call or loop, start a new one */ + if (!cursor) { + listNode *head = listFirst(db->defrag_later); + + /* Move on to next key */ + if (current_key) { + serverAssert(current_key == head->value); + sdsfree(head->value); + listDelNode(db->defrag_later, head); + cursor = 0; + current_key = NULL; + } + + /* stop if we reached the last one. */ + head = listFirst(db->defrag_later); + if (!head) + return 0; + + /* start a new key */ + current_key = head->value; + cursor = 0; + } + + /* each time we enter this function we need to fetch the key from the dict again (if it still exists) */ + dictEntry *de = dictFind(db->dict, current_key); + key_defragged = server.stat_active_defrag_hits; + do { + int quit = 0; + if (defragLaterItem(de, &cursor, endtime)) + quit = 1; /* time is up, we didn't finish all the work */ + + /* Don't start a new BIG key in this loop, this is because the + * next key can be a list, and scanLaterList must be done in once cycle */ + if (!cursor) + quit = 1; + + /* Once in 16 scan iterations, 512 pointer reallocations, or 64 fields + * (if we have a lot of pointers in one hash bucket, or rehashing), + * check if we reached the time limit. */ + if (quit || (++iterations > 16 || + server.stat_active_defrag_hits - prev_defragged > 512 || + server.stat_active_defrag_scanned - prev_scanned > 64)) { + if (quit || ustime() > endtime) { + if(key_defragged != server.stat_active_defrag_hits) + server.stat_active_defrag_key_hits++; + else + server.stat_active_defrag_key_misses++; + return 1; + } + iterations = 0; + prev_defragged = server.stat_active_defrag_hits; + prev_scanned = server.stat_active_defrag_scanned; + } + } while(cursor); + if(key_defragged != server.stat_active_defrag_hits) + server.stat_active_defrag_key_hits++; + else + server.stat_active_defrag_key_misses++; + } while(1); +} + +#define INTERPOLATE(x, x1, x2, y1, y2) ( (y1) + ((x)-(x1)) * ((y2)-(y1)) / ((x2)-(x1)) ) +#define LIMIT(y, min, max) ((y)<(min)? min: ((y)>(max)? max: (y))) + +/* decide if defrag is needed, and at what CPU effort to invest in it */ +void computeDefragCycles() { + size_t frag_bytes; + float frag_pct = getAllocatorFragmentation(&frag_bytes); + /* If we're not already running, and below the threshold, exit. */ + if (!server.active_defrag_running) { + if(frag_pct < server.active_defrag_threshold_lower || frag_bytes < server.active_defrag_ignore_bytes) + return; + } + + /* Calculate the adaptive aggressiveness of the defrag */ + int cpu_pct = INTERPOLATE(frag_pct, + server.active_defrag_threshold_lower, + server.active_defrag_threshold_upper, + server.active_defrag_cycle_min, + server.active_defrag_cycle_max); + cpu_pct = LIMIT(cpu_pct, + server.active_defrag_cycle_min, + server.active_defrag_cycle_max); + /* We allow increasing the aggressiveness during a scan, but don't + * reduce it. */ + if (!server.active_defrag_running || + cpu_pct > server.active_defrag_running) + { + server.active_defrag_running = cpu_pct; + serverLog(LL_VERBOSE, + "Starting active defrag, frag=%.0f%%, frag_bytes=%zu, cpu=%d%%", + frag_pct, frag_bytes, cpu_pct); + } +} + +/* Perform incremental defragmentation work from the serverCron. + * This works in a similar way to activeExpireCycle, in the sense that + * we do incremental work across calls. */ +void activeDefragCycle(void) { + static int current_db = -1; + static unsigned long cursor = 0; + static redisDb *db = NULL; + static long long start_scan, start_stat; + unsigned int iterations = 0; + unsigned long long prev_defragged = server.stat_active_defrag_hits; + unsigned long long prev_scanned = server.stat_active_defrag_scanned; + long long start, timelimit, endtime; + mstime_t latency; + int quit = 0; + + if (server.aof_child_pid!=-1 || server.rdb_child_pid!=-1) + return; /* Defragging memory while there's a fork will just do damage. */ + + /* Once a second, check if we the fragmentation justfies starting a scan + * or making it more aggressive. */ + run_with_period(1000) { + computeDefragCycles(); + } + if (!server.active_defrag_running) + return; + + /* See activeExpireCycle for how timelimit is handled. */ + start = ustime(); + timelimit = 1000000*server.active_defrag_running/server.hz/100; + if (timelimit <= 0) timelimit = 1; + endtime = start + timelimit; + latencyStartMonitor(latency); + + do { + /* if we're not continuing a scan from the last call or loop, start a new one */ + if (!cursor) { + /* finish any leftovers from previous db before moving to the next one */ + if (db && defragLaterStep(db, endtime)) { + quit = 1; /* time is up, we didn't finish all the work */ + break; /* this will exit the function and we'll continue on the next cycle */ + } + + /* Move on to next database, and stop if we reached the last one. */ + if (++current_db >= server.dbnum) { + /* defrag other items not part of the db / keys */ + defragOtherGlobals(); + + long long now = ustime(); + size_t frag_bytes; + float frag_pct = getAllocatorFragmentation(&frag_bytes); + serverLog(LL_VERBOSE, + "Active defrag done in %dms, reallocated=%d, frag=%.0f%%, frag_bytes=%zu", + (int)((now - start_scan)/1000), (int)(server.stat_active_defrag_hits - start_stat), frag_pct, frag_bytes); + + start_scan = now; + current_db = -1; + cursor = 0; + db = NULL; + server.active_defrag_running = 0; + + computeDefragCycles(); /* if another scan is needed, start it right away */ + if (server.active_defrag_running != 0 && ustime() < endtime) + continue; + break; + } + else if (current_db==0) { + /* Start a scan from the first database. */ + start_scan = ustime(); + start_stat = server.stat_active_defrag_hits; + } + + db = &server.db[current_db]; + cursor = 0; + } + + do { + /* before scanning the next bucket, see if we have big keys left from the previous bucket to scan */ + if (defragLaterStep(db, endtime)) { + quit = 1; /* time is up, we didn't finish all the work */ + break; /* this will exit the function and we'll continue on the next cycle */ + } + + cursor = dictScan(db->dict, cursor, defragScanCallback, defragDictBucketCallback, db); + + /* Once in 16 scan iterations, 512 pointer reallocations. or 64 keys + * (if we have a lot of pointers in one hash bucket or rehasing), + * check if we reached the time limit. + * But regardless, don't start a new db in this loop, this is because after + * the last db we call defragOtherGlobals, which must be done in once cycle */ + if (!cursor || (++iterations > 16 || + server.stat_active_defrag_hits - prev_defragged > 512 || + server.stat_active_defrag_scanned - prev_scanned > 64)) { + if (!cursor || ustime() > endtime) { + quit = 1; + break; + } + iterations = 0; + prev_defragged = server.stat_active_defrag_hits; + prev_scanned = server.stat_active_defrag_scanned; + } + } while(cursor && !quit); + } while(!quit); + + latencyEndMonitor(latency); + latencyAddSampleIfNeeded("active-defrag-cycle",latency); +} + +#else /* HAVE_DEFRAG */ + +void activeDefragCycle(void) { + /* Not implemented yet. */ +} + +#endif diff --git a/redis-android/src/main/jni/redis-4.0.11/src/dict.c b/redis-android/src/main/jni/redis-5.0.0/src/dict.c similarity index 99% rename from redis-android/src/main/jni/redis-4.0.11/src/dict.c rename to redis-android/src/main/jni/redis-5.0.0/src/dict.c index 18cb9ee..2cf9d48 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/dict.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/dict.c @@ -146,14 +146,14 @@ int dictResize(dict *d) /* Expand or create the hash table */ int dictExpand(dict *d, unsigned long size) { - dictht n; /* the new hash table */ - unsigned long realsize = _dictNextPower(size); - /* the size is invalid if it is smaller than the number of * elements already inside the hash table */ if (dictIsRehashing(d) || d->ht[0].used > size) return DICT_ERR; + dictht n; /* the new hash table */ + unsigned long realsize = _dictNextPower(size); + /* Rehashing to the same table size is not useful. */ if (realsize == d->ht[0].size) return DICT_ERR; @@ -327,7 +327,7 @@ int dictReplace(dict *d, void *key, void *val) dictEntry *entry, *existing, auxentry; /* Try to add the element. If the key - * does not exists dictAdd will suceed. */ + * does not exists dictAdd will succeed. */ entry = dictAddRaw(d,key,&existing); if (entry) { dictSetVal(d, entry, val); @@ -705,8 +705,10 @@ unsigned int dictGetSomeKeys(dict *d, dictEntry **des, unsigned int count) { * table, there will be no elements in both tables up to * the current rehashing index, so we jump if possible. * (this happens when going from big to small table). */ - if (i >= d->ht[1].size) i = d->rehashidx; - continue; + if (i >= d->ht[1].size) + i = d->rehashidx; + else + continue; } if (i >= d->ht[j].size) continue; /* Out of range for this table. */ dictEntry *he = d->ht[j].table[i]; diff --git a/redis-android/src/main/jni/redis-4.0.11/src/dict.h b/redis-android/src/main/jni/redis-5.0.0/src/dict.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/dict.h rename to redis-android/src/main/jni/redis-5.0.0/src/dict.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/endianconv.c b/redis-android/src/main/jni/redis-5.0.0/src/endianconv.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/endianconv.c rename to redis-android/src/main/jni/redis-5.0.0/src/endianconv.c diff --git a/redis-android/src/main/jni/redis-4.0.11/src/endianconv.h b/redis-android/src/main/jni/redis-5.0.0/src/endianconv.h similarity index 94% rename from redis-android/src/main/jni/redis-4.0.11/src/endianconv.h rename to redis-android/src/main/jni/redis-5.0.0/src/endianconv.h index 08f5531..475f72b 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/endianconv.h +++ b/redis-android/src/main/jni/redis-5.0.0/src/endianconv.h @@ -43,12 +43,12 @@ uint16_t intrev16(uint16_t v); uint32_t intrev32(uint32_t v); uint64_t intrev64(uint64_t v); -/* variants of the function doing the actual convertion only if the target +/* variants of the function doing the actual conversion only if the target * host is big endian */ #if (BYTE_ORDER == LITTLE_ENDIAN) -#define memrev16ifbe(p) -#define memrev32ifbe(p) -#define memrev64ifbe(p) +#define memrev16ifbe(p) ((void)(0)) +#define memrev32ifbe(p) ((void)(0)) +#define memrev64ifbe(p) ((void)(0)) #define intrev16ifbe(v) (v) #define intrev32ifbe(v) (v) #define intrev64ifbe(v) (v) diff --git a/redis-android/src/main/jni/redis-4.0.11/src/evict.c b/redis-android/src/main/jni/redis-5.0.0/src/evict.c similarity index 89% rename from redis-android/src/main/jni/redis-4.0.11/src/evict.c rename to redis-android/src/main/jni/redis-5.0.0/src/evict.c index eacdf12..cdb49a5 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/evict.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/evict.c @@ -369,21 +369,41 @@ size_t freeMemoryGetNotCountedMemory(void) { return overhead; } -int freeMemoryIfNeeded(void) { - size_t mem_reported, mem_used, mem_tofree, mem_freed; - mstime_t latency, eviction_latency; - long long delta; - int slaves = listLength(server.slaves); - - /* When clients are paused the dataset should be static not just from the - * POV of clients not being able to write, but also from the POV of - * expires and evictions of keys not being performed. */ - if (clientsArePaused()) return C_OK; +/* Get the memory status from the point of view of the maxmemory directive: + * if the memory used is under the maxmemory setting then C_OK is returned. + * Otherwise, if we are over the memory limit, the function returns + * C_ERR. + * + * The function may return additional info via reference, only if the + * pointers to the respective arguments is not NULL. Certain fields are + * populated only when C_ERR is returned: + * + * 'total' total amount of bytes used. + * (Populated both for C_ERR and C_OK) + * + * 'logical' the amount of memory used minus the slaves/AOF buffers. + * (Populated when C_ERR is returned) + * + * 'tofree' the amount of memory that should be released + * in order to return back into the memory limits. + * (Populated when C_ERR is returned) + * + * 'level' this usually ranges from 0 to 1, and reports the amount of + * memory currently used. May be > 1 if we are over the memory + * limit. + * (Populated both for C_ERR and C_OK) + */ +int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *level) { + size_t mem_reported, mem_used, mem_tofree; /* Check if we are over the memory usage limit. If we are not, no need * to subtract the slaves output buffers. We can just return ASAP. */ mem_reported = zmalloc_used_memory(); - if (mem_reported <= server.maxmemory) return C_OK; + if (total) *total = mem_reported; + + /* We may return ASAP if there is no need to compute the level. */ + int return_ok_asap = !server.maxmemory || mem_reported <= server.maxmemory; + if (return_ok_asap && !level) return C_OK; /* Remove the size of slaves output buffers and AOF buffer from the * count of used memory. */ @@ -391,11 +411,55 @@ int freeMemoryIfNeeded(void) { size_t overhead = freeMemoryGetNotCountedMemory(); mem_used = (mem_used > overhead) ? mem_used-overhead : 0; + /* Compute the ratio of memory usage. */ + if (level) { + if (!server.maxmemory) { + *level = 0; + } else { + *level = (float)mem_used / (float)server.maxmemory; + } + } + + if (return_ok_asap) return C_OK; + /* Check if we are still over the memory limit. */ if (mem_used <= server.maxmemory) return C_OK; /* Compute how much memory we need to free. */ mem_tofree = mem_used - server.maxmemory; + + if (logical) *logical = mem_used; + if (tofree) *tofree = mem_tofree; + + return C_ERR; +} + +/* This function is periodically called to see if there is memory to free + * according to the current "maxmemory" settings. In case we are over the + * memory limit, the function will try to free some memory to return back + * under the limit. + * + * The function returns C_OK if we are under the memory limit or if we + * were over the limit, but the attempt to free memory was successful. + * Otehrwise if we are over the memory limit, but not enough memory + * was freed to return back under the limit, the function returns C_ERR. */ +int freeMemoryIfNeeded(void) { + /* By default slaves should ignore maxmemory and just be masters excat + * copies. */ + if (server.masterhost && server.repl_slave_ignore_maxmemory) return C_OK; + + size_t mem_reported, mem_tofree, mem_freed; + mstime_t latency, eviction_latency; + long long delta; + int slaves = listLength(server.slaves); + + /* When clients are paused the dataset should be static not just from the + * POV of clients not being able to write, but also from the POV of + * expires and evictions of keys not being performed. */ + if (clientsArePaused()) return C_OK; + if (getMaxmemoryState(&mem_reported,NULL,&mem_tofree,NULL) == C_OK) + return C_OK; + mem_freed = 0; if (server.maxmemory_policy == MAXMEMORY_NO_EVICTION) @@ -529,10 +593,8 @@ int freeMemoryIfNeeded(void) { * across the dbAsyncDelete() call, while the thread can * release the memory all the time. */ if (server.lazyfree_lazy_eviction && !(keys_freed % 16)) { - overhead = freeMemoryGetNotCountedMemory(); - mem_used = zmalloc_used_memory(); - mem_used = (mem_used > overhead) ? mem_used-overhead : 0; - if (mem_used <= server.maxmemory) { + if (getMaxmemoryState(NULL,NULL,NULL,NULL) == C_OK) { + /* Let's satisfy our stop condition. */ mem_freed = mem_tofree; } } diff --git a/redis-android/src/main/jni/redis-4.0.11/src/expire.c b/redis-android/src/main/jni/redis-5.0.0/src/expire.c similarity index 99% rename from redis-android/src/main/jni/redis-4.0.11/src/expire.c rename to redis-android/src/main/jni/redis-5.0.0/src/expire.c index ce7882e..0b92ee3 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/expire.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/expire.c @@ -112,7 +112,7 @@ void activeExpireCycle(int type) { if (type == ACTIVE_EXPIRE_CYCLE_FAST) { /* Don't start a fast cycle if the previous cycle did not exit - * for time limt. Also don't repeat a fast cycle for the same period + * for time limit. Also don't repeat a fast cycle for the same period * as the fast cycle total duration itself. */ if (!timelimit_exit) return; if (start < last_fast_cycle + ACTIVE_EXPIRE_CYCLE_FAST_DURATION*2) return; diff --git a/redis-android/src/main/jni/redis-4.0.11/src/fmacros.h b/redis-android/src/main/jni/redis-5.0.0/src/fmacros.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/fmacros.h rename to redis-android/src/main/jni/redis-5.0.0/src/fmacros.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/geo.c b/redis-android/src/main/jni/redis-5.0.0/src/geo.c similarity index 99% rename from redis-android/src/main/jni/redis-4.0.11/src/geo.c rename to redis-android/src/main/jni/redis-5.0.0/src/geo.c index 90216e7..c78fadf 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/geo.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/geo.c @@ -145,7 +145,7 @@ double extractUnitOrReply(client *c, robj *unit) { /* Input Argument Helper. * Extract the dinstance from the specified two arguments starting at 'argv' * that shouldbe in the form: and return the dinstance in the - * specified unit on success. *conversino is populated with the coefficient + * specified unit on success. *conversions is populated with the coefficient * to use in order to convert meters to the unit. * * On error a value less than zero is returned. */ diff --git a/redis-android/src/main/jni/redis-4.0.11/src/geo.h b/redis-android/src/main/jni/redis-5.0.0/src/geo.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/geo.h rename to redis-android/src/main/jni/redis-5.0.0/src/geo.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/geohash.c b/redis-android/src/main/jni/redis-5.0.0/src/geohash.c similarity index 97% rename from redis-android/src/main/jni/redis-4.0.11/src/geohash.c rename to redis-android/src/main/jni/redis-5.0.0/src/geohash.c index 1ae7a7e..db5ae02 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/geohash.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/geohash.c @@ -127,8 +127,8 @@ int geohashEncode(const GeoHashRange *long_range, const GeoHashRange *lat_range, /* Return an error when trying to index outside the supported * constraints. */ - if (longitude > 180 || longitude < -180 || - latitude > 85.05112878 || latitude < -85.05112878) return 0; + if (longitude > GEO_LONG_MAX || longitude < GEO_LONG_MIN || + latitude > GEO_LAT_MAX || latitude < GEO_LAT_MIN) return 0; hash->bits = 0; hash->step = step; @@ -144,8 +144,8 @@ int geohashEncode(const GeoHashRange *long_range, const GeoHashRange *lat_range, (longitude - long_range->min) / (long_range->max - long_range->min); /* convert to fixed point based on the step size */ - lat_offset *= (1 << step); - long_offset *= (1 << step); + lat_offset *= (1ULL << step); + long_offset *= (1ULL << step); hash->bits = interleave64(lat_offset, long_offset); return 1; } diff --git a/redis-android/src/main/jni/redis-4.0.11/src/geohash.h b/redis-android/src/main/jni/redis-5.0.0/src/geohash.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/geohash.h rename to redis-android/src/main/jni/redis-5.0.0/src/geohash.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/geohash_helper.c b/redis-android/src/main/jni/redis-5.0.0/src/geohash_helper.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/geohash_helper.c rename to redis-android/src/main/jni/redis-5.0.0/src/geohash_helper.c diff --git a/redis-android/src/main/jni/redis-4.0.11/src/geohash_helper.h b/redis-android/src/main/jni/redis-5.0.0/src/geohash_helper.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/geohash_helper.h rename to redis-android/src/main/jni/redis-5.0.0/src/geohash_helper.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/help.h b/redis-android/src/main/jni/redis-5.0.0/src/help.h similarity index 80% rename from redis-android/src/main/jni/redis-4.0.11/src/help.h rename to redis-android/src/main/jni/redis-5.0.0/src/help.h index 5f927c3..184d767 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/help.h +++ b/redis-android/src/main/jni/redis-5.0.0/src/help.h @@ -1,4 +1,4 @@ -/* Automatically generated by utils/generate-command-help.rb, do not edit. */ +/* Automatically generated by generate-command-help.rb, do not edit. */ #ifndef __REDIS_HELP_H #define __REDIS_HELP_H @@ -17,7 +17,8 @@ static char *commandGroups[] = { "scripting", "hyperloglog", "cluster", - "geo" + "geo", + "stream" }; struct commandHelp { @@ -82,11 +83,26 @@ struct commandHelp { "Pop a value from a list, push it to another list and return it; or block until one is available", 2, "2.2.0" }, + { "BZPOPMAX", + "key [key ...] timeout", + "Remove and return the member with the highest score from one or more sorted sets, or block until one is available", + 4, + "5.0.0" }, + { "BZPOPMIN", + "key [key ...] timeout", + "Remove and return the member with the lowest score from one or more sorted sets, or block until one is available", + 4, + "5.0.0" }, { "CLIENT GETNAME", "-", "Get the current connection name", 9, "2.6.9" }, + { "CLIENT ID", + "-", + "Returns the client ID for the current connection", + 9, + "5.0.0" }, { "CLIENT KILL", "[ip:port] [ID client-id] [TYPE normal|master|slave|pubsub] [ADDR ip:port] [SKIPME yes/no]", "Kill the connection of a client", @@ -112,6 +128,11 @@ struct commandHelp { "Set the current connection name", 9, "2.6.9" }, + { "CLIENT UNBLOCK", + "client-id [TIMEOUT|ERROR]", + "Unblock a client blocked in a blocking command from a different connection", + 9, + "5.0.0" }, { "CLUSTER ADDSLOTS", "slot [slot ...]", "Assign new hash slots to receiving node", @@ -134,7 +155,7 @@ struct commandHelp { "3.0.0" }, { "CLUSTER FAILOVER", "[FORCE|TAKEOVER]", - "Forces a slave to perform a manual failover of its master.", + "Forces a replica to perform a manual failover of its master.", 12, "3.0.0" }, { "CLUSTER FORGET", @@ -167,9 +188,14 @@ struct commandHelp { "Get Cluster config for the node", 12, "3.0.0" }, + { "CLUSTER REPLICAS", + "node-id", + "List replica nodes of the specified master node", + 12, + "5.0.0" }, { "CLUSTER REPLICATE", "node-id", - "Reconfigure a node as a slave of the specified master node", + "Reconfigure a node as a replica of the specified master node", 12, "3.0.0" }, { "CLUSTER RESET", @@ -194,7 +220,7 @@ struct commandHelp { "3.0.0" }, { "CLUSTER SLAVES", "node-id", - "List slave nodes of the specified master node", + "List replica nodes of the specified master node", 12, "3.0.0" }, { "CLUSTER SLOTS", @@ -318,12 +344,12 @@ struct commandHelp { 0, "1.2.0" }, { "FLUSHALL", - "-", + "[ASYNC]", "Remove all keys from all databases", 9, "1.0.0" }, { "FLUSHDB", - "-", + "[ASYNC]", "Remove all keys from the current database", 9, "1.0.0" }, @@ -532,6 +558,36 @@ struct commandHelp { "Trim a list to the specified range", 2, "1.0.0" }, + { "MEMORY DOCTOR", + "-", + "Outputs memory problems report", + 9, + "4.0.0" }, + { "MEMORY HELP", + "-", + "Show helpful text about the different subcommands", + 9, + "4.0.0" }, + { "MEMORY MALLOC-STATS", + "-", + "Show allocator internal stats", + 9, + "4.0.0" }, + { "MEMORY PURGE", + "-", + "Ask the allocator to release memory", + 9, + "4.0.0" }, + { "MEMORY STATS", + "-", + "Show memory usage details", + 9, + "4.0.0" }, + { "MEMORY USAGE", + "key [SAMPLES count]", + "Estimate the memory usage of a key", + 9, + "4.0.0" }, { "MGET", "key [key ...]", "Get the values of all the given keys", @@ -649,12 +705,12 @@ struct commandHelp { "1.0.0" }, { "READONLY", "-", - "Enables read queries for a connection to a cluster slave node", + "Enables read queries for a connection to a cluster replica node", 12, "3.0.0" }, { "READWRITE", "-", - "Disables read queries for a connection to a cluster slave node", + "Disables read queries for a connection to a cluster replica node", 12, "3.0.0" }, { "RENAME", @@ -667,6 +723,11 @@ struct commandHelp { "Rename a key, only if the new key does not exist", 0, "1.0.0" }, + { "REPLICAOF", + "host port", + "Make the server a replica of another instance, or promote it as master.", + 9, + "5.0.0" }, { "RESTORE", "key ttl serialized-value [REPLACE]", "Create a key using the provided serialized value, previously obtained using DUMP.", @@ -723,7 +784,7 @@ struct commandHelp { 10, "3.2.0" }, { "SCRIPT EXISTS", - "script [script ...]", + "sha1 [sha1 ...]", "Check existence of scripts in the script cache.", 10, "2.6.0" }, @@ -758,7 +819,7 @@ struct commandHelp { 8, "1.0.0" }, { "SET", - "key value [EX seconds] [PX milliseconds] [NX|XX]", + "key value [expiration EX seconds|PX milliseconds] [NX|XX]", "Set the string value of a key", 1, "1.0.0" }, @@ -804,7 +865,7 @@ struct commandHelp { "1.0.0" }, { "SLAVEOF", "host port", - "Make the server a slave of another instance, or promote it as master", + "Make the server a replica of another instance, or promote it as master. Deprecated starting with Redis 5. Use REPLICAOF instead.", 9, "1.0.0" }, { "SLOWLOG", @@ -867,6 +928,11 @@ struct commandHelp { "Add multiple sets and store the resulting set in a key", 3, "1.0.0" }, + { "SWAPDB", + "index index", + "Swaps two Redis databases", + 8, + "4.0.0" }, { "SYNC", "-", "Internal command used for replication", @@ -877,6 +943,11 @@ struct commandHelp { "Return the current server time", 9, "2.6.0" }, + { "TOUCH", + "key [key ...]", + "Alters the last access time of a key(s). Returns the number of existing keys specified.", + 0, + "3.2.1" }, { "TTL", "key", "Get the time to live for a key", @@ -887,6 +958,11 @@ struct commandHelp { "Determine the type stored at key", 0, "1.0.0" }, + { "UNLINK", + "key [key ...]", + "Delete a key asynchronously in another thread. Otherwise it is just as DEL, but non blocking.", + 0, + "4.0.0" }, { "UNSUBSCRIBE", "[channel [channel ...]]", "Stop listening for messages posted to the given channels", @@ -898,7 +974,7 @@ struct commandHelp { 7, "2.2.0" }, { "WAIT", - "numslaves timeout", + "numreplicas timeout", "Wait for the synchronous replication of all the write commands sent in the context of the current connection", 0, "3.0.0" }, @@ -907,6 +983,71 @@ struct commandHelp { "Watch the given keys to determine execution of the MULTI/EXEC block", 7, "2.2.0" }, + { "XACK", + "key group ID [ID ...]", + "Marks a pending message as correctly processed, effectively removing it from the pending entries list of the consumer group. Return value of the command is the number of messages successfully acknowledged, that is, the IDs we were actually able to resolve in the PEL.", + 14, + "5.0.0" }, + { "XADD", + "key ID field string [field string ...]", + "Appends a new entry to a stream", + 14, + "5.0.0" }, + { "XCLAIM", + "key group consumer min-idle-time ID [ID ...] [IDLE ms] [TIME ms-unix-time] [RETRYCOUNT count] [force] [justid]", + "Changes (or acquires) ownership of a message in a consumer group, as if the message was delivered to the specified consumer.", + 14, + "5.0.0" }, + { "XDEL", + "key ID [ID ...]", + "Removes the specified entries from the stream. Returns the number of items actually deleted, that may be different from the number of IDs passed in case certain IDs do not exist.", + 14, + "5.0.0" }, + { "XGROUP", + "[CREATE key groupname id-or-$] [SETID key id-or-$] [DESTROY key groupname] [DELCONSUMER key groupname consumername]", + "Create, destroy, and manage consumer groups.", + 14, + "5.0.0" }, + { "XINFO", + "[CONSUMERS key groupname] [GROUPS key] [STREAM key] [HELP]", + "Get information on streams and consumer groups", + 14, + "5.0.0" }, + { "XLEN", + "key", + "Return the number of entires in a stream", + 14, + "5.0.0" }, + { "XPENDING", + "key group [start end count] [consumer]", + "Return information and entries from a stream consumer group pending entries list, that are messages fetched but never acknowledged.", + 14, + "5.0.0" }, + { "XRANGE", + "key start end [COUNT count]", + "Return a range of elements in a stream, with IDs matching the specified IDs interval", + 14, + "5.0.0" }, + { "XREAD", + "[COUNT count] [BLOCK milliseconds] STREAMS key [key ...] ID [ID ...]", + "Return never seen elements in multiple streams, with IDs greater than the ones reported by the caller for each stream. Can block.", + 14, + "5.0.0" }, + { "XREADGROUP", + "GROUP group consumer [COUNT count] [BLOCK milliseconds] STREAMS key [key ...] ID [ID ...]", + "Return new entries from a stream using a consumer group, or access the history of the pending entries for a given consumer. Can block.", + 14, + "5.0.0" }, + { "XREVRANGE", + "key end start [COUNT count]", + "Return a range of elements in a stream, with IDs matching the specified IDs interval, in reverse order (from greater to smaller IDs) compared to XRANGE", + 14, + "5.0.0" }, + { "XTRIM", + "key MAXLEN [~] count", + "Trims the stream to (approximately if '~' is passed) a certain size", + 14, + "5.0.0" }, { "ZADD", "key [NX|XX] [CH] [INCR] score member [score member ...]", "Add one or more members to a sorted set, or update its score if it already exists", @@ -937,6 +1078,16 @@ struct commandHelp { "Count the number of members in a sorted set between a given lexicographical range", 4, "2.8.9" }, + { "ZPOPMAX", + "key [count]", + "Remove and return members with the highest scores in a sorted set", + 4, + "5.0.0" }, + { "ZPOPMIN", + "key [count]", + "Remove and return members with the lowest scores in a sorted set", + 4, + "5.0.0" }, { "ZRANGE", "key start stop [WITHSCORES]", "Return a range of members in a sorted set, by index", diff --git a/redis-android/src/main/jni/redis-4.0.11/src/hyperloglog.c b/redis-android/src/main/jni/redis-5.0.0/src/hyperloglog.c similarity index 90% rename from redis-android/src/main/jni/redis-4.0.11/src/hyperloglog.c rename to redis-android/src/main/jni/redis-5.0.0/src/hyperloglog.c index ef33979..ba3a3ab 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/hyperloglog.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/hyperloglog.c @@ -192,6 +192,8 @@ struct hllhdr { #define HLL_VALID_CACHE(hdr) (((hdr)->card[7] & (1<<7)) == 0) #define HLL_P 14 /* The greater is P, the smaller the error. */ +#define HLL_Q (64-HLL_P) /* The number of bits of the hash value used for + determining the number of leading zeros. */ #define HLL_REGISTERS (1<>8) | HLL_SPARSE_XZERO_BIT; \ *((p)+1) = (_l&0xff); \ } while(0) +#define HLL_ALPHA_INF 0.721347520444481703680 /* constant for 0.5/ln(2) */ /* ========================= HyperLogLog algorithm ========================= */ @@ -401,11 +404,11 @@ uint64_t MurmurHash64A (const void * key, int len, unsigned int seed) { uint64_t k; #if (BYTE_ORDER == LITTLE_ENDIAN) - #ifdef USE_ALIGNED_ACCESS - memcpy(&k,data,sizeof(uint64_t)); - #else + #ifdef USE_ALIGNED_ACCESS + memcpy(&k,data,sizeof(uint64_t)); + #else k = *((uint64_t*)data); - #endif + #endif #else k = (uint64_t) data[0]; k |= (uint64_t) data[1] << 8; @@ -426,14 +429,14 @@ uint64_t MurmurHash64A (const void * key, int len, unsigned int seed) { } switch(len & 7) { - case 7: h ^= (uint64_t)data[6] << 48; - case 6: h ^= (uint64_t)data[5] << 40; - case 5: h ^= (uint64_t)data[4] << 32; - case 4: h ^= (uint64_t)data[3] << 24; - case 3: h ^= (uint64_t)data[2] << 16; - case 2: h ^= (uint64_t)data[1] << 8; + case 7: h ^= (uint64_t)data[6] << 48; /* fall-thru */ + case 6: h ^= (uint64_t)data[5] << 40; /* fall-thru */ + case 5: h ^= (uint64_t)data[4] << 32; /* fall-thru */ + case 4: h ^= (uint64_t)data[3] << 24; /* fall-thru */ + case 3: h ^= (uint64_t)data[2] << 16; /* fall-thru */ + case 2: h ^= (uint64_t)data[1] << 8; /* fall-thru */ case 1: h ^= (uint64_t)data[0]; - h *= m; + h *= m; /* fall-thru */ }; h ^= h >> r; @@ -451,7 +454,7 @@ int hllPatLen(unsigned char *ele, size_t elesize, long *regp) { /* Count the number of zeroes starting from bit HLL_REGISTERS * (that is a power of two corresponding to the first bit we don't use - * as index). The max run can be 64-P+1 bits. + * as index). The max run can be 64-P+1 = Q+1 bits. * * Note that the final "1" ending the sequence of zeroes must be * included in the count, so if we find "001" the count is 3, and @@ -462,8 +465,10 @@ int hllPatLen(unsigned char *ele, size_t elesize, long *regp) { * there are high probabilities to find a 1 after a few iterations. */ hash = MurmurHash64A(ele,elesize,0xadc83b19ULL); index = hash & HLL_P_MASK; /* Register index. */ - hash |= ((uint64_t)1<<63); /* Make sure the loop terminates. */ - bit = HLL_REGISTERS; /* First bit not used to address the register. */ + hash >>= HLL_P; /* Remove bits used to address the register. */ + hash |= ((uint64_t)1<> 6 | r[1] << 2) & 63; if (r1 == 0) ez++; - r2 = (r[1] >> 4 | r[2] << 4) & 63; if (r2 == 0) ez++; - r3 = (r[2] >> 2) & 63; if (r3 == 0) ez++; - r4 = r[3] & 63; if (r4 == 0) ez++; - r5 = (r[3] >> 6 | r[4] << 2) & 63; if (r5 == 0) ez++; - r6 = (r[4] >> 4 | r[5] << 4) & 63; if (r6 == 0) ez++; - r7 = (r[5] >> 2) & 63; if (r7 == 0) ez++; - r8 = r[6] & 63; if (r8 == 0) ez++; - r9 = (r[6] >> 6 | r[7] << 2) & 63; if (r9 == 0) ez++; - r10 = (r[7] >> 4 | r[8] << 4) & 63; if (r10 == 0) ez++; - r11 = (r[8] >> 2) & 63; if (r11 == 0) ez++; - r12 = r[9] & 63; if (r12 == 0) ez++; - r13 = (r[9] >> 6 | r[10] << 2) & 63; if (r13 == 0) ez++; - r14 = (r[10] >> 4 | r[11] << 4) & 63; if (r14 == 0) ez++; - r15 = (r[11] >> 2) & 63; if (r15 == 0) ez++; - - /* Additional parens will allow the compiler to optimize the - * code more with a loss of precision that is not very relevant - * here (floating point math is not commutative!). */ - E += (PE[r0] + PE[r1]) + (PE[r2] + PE[r3]) + (PE[r4] + PE[r5]) + - (PE[r6] + PE[r7]) + (PE[r8] + PE[r9]) + (PE[r10] + PE[r11]) + - (PE[r12] + PE[r13]) + (PE[r14] + PE[r15]); + r0 = r[0] & 63; + r1 = (r[0] >> 6 | r[1] << 2) & 63; + r2 = (r[1] >> 4 | r[2] << 4) & 63; + r3 = (r[2] >> 2) & 63; + r4 = r[3] & 63; + r5 = (r[3] >> 6 | r[4] << 2) & 63; + r6 = (r[4] >> 4 | r[5] << 4) & 63; + r7 = (r[5] >> 2) & 63; + r8 = r[6] & 63; + r9 = (r[6] >> 6 | r[7] << 2) & 63; + r10 = (r[7] >> 4 | r[8] << 4) & 63; + r11 = (r[8] >> 2) & 63; + r12 = r[9] & 63; + r13 = (r[9] >> 6 | r[10] << 2) & 63; + r14 = (r[10] >> 4 | r[11] << 4) & 63; + r15 = (r[11] >> 2) & 63; + + reghisto[r0]++; + reghisto[r1]++; + reghisto[r2]++; + reghisto[r3]++; + reghisto[r4]++; + reghisto[r5]++; + reghisto[r6]++; + reghisto[r7]++; + reghisto[r8]++; + reghisto[r9]++; + reghisto[r10]++; + reghisto[r11]++; + reghisto[r12]++; + reghisto[r13]++; + reghisto[r14]++; + reghisto[r15]++; + r += 12; } } else { - for (j = 0; j < HLL_REGISTERS; j++) { + for(j = 0; j < HLL_REGISTERS; j++) { unsigned long reg; - HLL_DENSE_GET_REGISTER(reg,registers,j); - if (reg == 0) { - ez++; - /* Increment E at the end of the loop. */ - } else { - E += PE[reg]; /* Precomputed 2^(-reg[j]). */ - } + reghisto[reg]++; } - E += ez; /* Add 2^0 'ez' times. */ } - *ezp = ez; - return E; } /* ================== Sparse representation implementation ================= */ @@ -670,7 +673,7 @@ int hllSparseSet(robj *o, long index, uint8_t count) { end = p + sdslen(o->ptr) - HLL_HDR_SIZE; first = 0; - prev = NULL; /* Points to previos opcode at the end of the loop. */ + prev = NULL; /* Points to previous opcode at the end of the loop. */ next = NULL; /* Points to the next opcode at the end of the loop. */ span = 0; while(p < end) { @@ -761,7 +764,7 @@ int hllSparseSet(robj *o, long index, uint8_t count) { * and is either currently represented by a VAL opcode with len > 1, * by a ZERO opcode with len > 1, or by an XZERO opcode. * - * In those cases the original opcode must be split into muliple + * In those cases the original opcode must be split into multiple * opcodes. The worst case is an XZERO split in the middle resuling into * XZERO - VAL - XZERO, so the resulting sequence max length is * 5 bytes. @@ -884,7 +887,7 @@ int hllSparseSet(robj *o, long index, uint8_t count) { * * Note that this in turn means that PFADD will make sure the command * is propagated to slaves / AOF, so if there is a sparse -> dense - * convertion, it will be performed in all the slaves as well. */ + * conversion, it will be performed in all the slaves as well. */ int dense_retval = hllDenseSet(hdr->registers,index,count); serverAssert(dense_retval == 1); return dense_retval; @@ -903,76 +906,96 @@ int hllSparseAdd(robj *o, unsigned char *ele, size_t elesize) { return hllSparseSet(o,index,count); } -/* Compute SUM(2^-reg) in the sparse representation. - * PE is an array with a pre-computer table of values 2^-reg indexed by reg. - * As a side effect the integer pointed by 'ezp' is set to the number - * of zero registers. */ -double hllSparseSum(uint8_t *sparse, int sparselen, double *PE, int *ezp, int *invalid) { - double E = 0; - int ez = 0, idx = 0, runlen, regval; +/* Compute the register histogram in the sparse representation. */ +void hllSparseRegHisto(uint8_t *sparse, int sparselen, int *invalid, int* reghisto) { + int idx = 0, runlen, regval; uint8_t *end = sparse+sparselen, *p = sparse; while(p < end) { if (HLL_SPARSE_IS_ZERO(p)) { runlen = HLL_SPARSE_ZERO_LEN(p); idx += runlen; - ez += runlen; - /* Increment E at the end of the loop. */ + reghisto[0] += runlen; p++; } else if (HLL_SPARSE_IS_XZERO(p)) { runlen = HLL_SPARSE_XZERO_LEN(p); idx += runlen; - ez += runlen; - /* Increment E at the end of the loop. */ + reghisto[0] += runlen; p += 2; } else { runlen = HLL_SPARSE_VAL_LEN(p); regval = HLL_SPARSE_VAL_VALUE(p); idx += runlen; - E += PE[regval]*runlen; + reghisto[regval] += runlen; p++; } } if (idx != HLL_REGISTERS && invalid) *invalid = 1; - E += ez; /* Add 2^0 'ez' times. */ - *ezp = ez; - return E; } /* ========================= HyperLogLog Count ============================== * This is the core of the algorithm where the approximated count is computed. - * The function uses the lower level hllDenseSum() and hllSparseSum() functions - * as helpers to compute the SUM(2^-reg) part of the computation, which is - * representation-specific, while all the rest is common. */ - -/* Implements the SUM operation for uint8_t data type which is only used - * internally as speedup for PFCOUNT with multiple keys. */ -double hllRawSum(uint8_t *registers, double *PE, int *ezp) { - double E = 0; - int j, ez = 0; + * The function uses the lower level hllDenseRegHisto() and hllSparseRegHisto() + * functions as helpers to compute histogram of register values part of the + * computation, which is representation-specific, while all the rest is common. */ + +/* Implements the register histogram calculation for uint8_t data type + * which is only used internally as speedup for PFCOUNT with multiple keys. */ +void hllRawRegHisto(uint8_t *registers, int* reghisto) { uint64_t *word = (uint64_t*) registers; uint8_t *bytes; + int j; for (j = 0; j < HLL_REGISTERS/8; j++) { if (*word == 0) { - ez += 8; + reghisto[0] += 8; } else { bytes = (uint8_t*) word; - if (bytes[0]) E += PE[bytes[0]]; else ez++; - if (bytes[1]) E += PE[bytes[1]]; else ez++; - if (bytes[2]) E += PE[bytes[2]]; else ez++; - if (bytes[3]) E += PE[bytes[3]]; else ez++; - if (bytes[4]) E += PE[bytes[4]]; else ez++; - if (bytes[5]) E += PE[bytes[5]]; else ez++; - if (bytes[6]) E += PE[bytes[6]]; else ez++; - if (bytes[7]) E += PE[bytes[7]]; else ez++; + reghisto[bytes[0]]++; + reghisto[bytes[1]]++; + reghisto[bytes[2]]++; + reghisto[bytes[3]]++; + reghisto[bytes[4]]++; + reghisto[bytes[5]]++; + reghisto[bytes[6]]++; + reghisto[bytes[7]]++; } word++; } - E += ez; /* 2^(-reg[j]) is 1 when m is 0, add it 'ez' times for every - zero register in the HLL. */ - *ezp = ez; - return E; +} + +/* Helper function sigma as defined in + * "New cardinality estimation algorithms for HyperLogLog sketches" + * Otmar Ertl, arXiv:1702.01284 */ +double hllSigma(double x) { + if (x == 1.) return INFINITY; + double zPrime; + double y = 1; + double z = x; + do { + x *= x; + zPrime = z; + z += x * y; + y += y; + } while(zPrime != z); + return z; +} + +/* Helper function tau as defined in + * "New cardinality estimation algorithms for HyperLogLog sketches" + * Otmar Ertl, arXiv:1702.01284 */ +double hllTau(double x) { + if (x == 0. || x == 1.) return 0.; + double zPrime; + double y = 1.0; + double z = 1 - x; + do { + x = sqrt(x); + zPrime = z; + y *= 0.5; + z -= pow(1 - x, 2)*y; + } while(zPrime != z); + return z / 3; } /* Return the approximated cardinality of the set based on the harmonic @@ -988,49 +1011,33 @@ double hllRawSum(uint8_t *registers, double *PE, int *ezp) { * keys (no need to work with 6-bit integers encoding). */ uint64_t hllCount(struct hllhdr *hdr, int *invalid) { double m = HLL_REGISTERS; - double E, alpha = 0.7213/(1+1.079/m); - int j, ez; /* Number of registers equal to 0. */ - - /* We precompute 2^(-reg[j]) in a small table in order to - * speedup the computation of SUM(2^-register[0..i]). */ - static int initialized = 0; - static double PE[64]; - if (!initialized) { - PE[0] = 1; /* 2^(-reg[j]) is 1 when m is 0. */ - for (j = 1; j < 64; j++) { - /* 2^(-reg[j]) is the same as 1/2^reg[j]. */ - PE[j] = 1.0/(1ULL << j); - } - initialized = 1; - } + double E; + int j; + int reghisto[HLL_Q+2] = {0}; - /* Compute SUM(2^-register[0..i]). */ + /* Compute register histogram */ if (hdr->encoding == HLL_DENSE) { - E = hllDenseSum(hdr->registers,PE,&ez); + hllDenseRegHisto(hdr->registers,reghisto); } else if (hdr->encoding == HLL_SPARSE) { - E = hllSparseSum(hdr->registers, - sdslen((sds)hdr)-HLL_HDR_SIZE,PE,&ez,invalid); + hllSparseRegHisto(hdr->registers, + sdslen((sds)hdr)-HLL_HDR_SIZE,invalid,reghisto); } else if (hdr->encoding == HLL_RAW) { - E = hllRawSum(hdr->registers,PE,&ez); + hllRawRegHisto(hdr->registers,reghisto); } else { serverPanic("Unknown HyperLogLog encoding in hllCount()"); } - /* Apply loglog-beta to the raw estimate. See: - * "LogLog-Beta and More: A New Algorithm for Cardinality Estimation - * Based on LogLog Counting" Jason Qin, Denys Kim, Yumei Tung - * arXiv:1612.02284 */ - double zl = log(ez + 1); - double beta = -0.370393911*ez + - 0.070471823*zl + - 0.17393686*pow(zl,2) + - 0.16339839*pow(zl,3) + - -0.09237745*pow(zl,4) + - 0.03738027*pow(zl,5) + - -0.005384159*pow(zl,6) + - 0.00042419*pow(zl,7); - - E = llroundl(alpha*m*(m-ez)*(1/(E+beta))); + /* Estimate cardinality form register histogram. See: + * "New cardinality estimation algorithms for HyperLogLog sketches" + * Otmar Ertl, arXiv:1702.01284 */ + double z = m * hllTau((m-reghisto[HLL_Q+1])/(double)m); + for (j = HLL_Q; j >= 1; --j) { + z += reghisto[j]; + z *= 0.5; + } + z += m * hllSigma(reghisto[0]/(double)m); + E = llroundl(HLL_ALPHA_INF*m*m/z); + return (uint64_t) E; } diff --git a/redis-android/src/main/jni/redis-4.0.11/src/intset.c b/redis-android/src/main/jni/redis-5.0.0/src/intset.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/intset.c rename to redis-android/src/main/jni/redis-5.0.0/src/intset.c diff --git a/redis-android/src/main/jni/redis-4.0.11/src/intset.h b/redis-android/src/main/jni/redis-5.0.0/src/intset.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/intset.h rename to redis-android/src/main/jni/redis-5.0.0/src/intset.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/latency.c b/redis-android/src/main/jni/redis-5.0.0/src/latency.c similarity index 98% rename from redis-android/src/main/jni/redis-4.0.11/src/latency.c rename to redis-android/src/main/jni/redis-5.0.0/src/latency.c index 292720a..d89c48d 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/latency.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/latency.c @@ -152,7 +152,7 @@ int latencyResetEvent(char *event_to_reset) { /* ------------------------ Latency reporting (doctor) ---------------------- */ -/* Analyze the samples avaialble for a given event and return a structure +/* Analyze the samples available for a given event and return a structure * populate with different metrics, average, MAD, min, max, and so forth. * Check latency.h definition of struct latenctStat for more info. * If the specified event has no elements the structure is populate with @@ -294,7 +294,7 @@ sds createLatencyReport(void) { /* Potentially commands. */ if (!strcasecmp(event,"command")) { - if (server.slowlog_log_slower_than == 0) { + if (server.slowlog_log_slower_than < 0) { advise_slowlog_enabled = 1; advices++; } else if (server.slowlog_log_slower_than/1000 > @@ -560,10 +560,11 @@ sds latencyCommandGenSparkeline(char *event, struct latencyTimeSeries *ts) { /* LATENCY command implementations. * - * LATENCY SAMPLES: return time-latency samples for the specified event. + * LATENCY HISTORY: return time-latency samples for the specified event. * LATENCY LATEST: return the latest latency for all the events classes. * LATENCY DOCTOR: returns an human readable analysis of instance latency. * LATENCY GRAPH: provide an ASCII graph of the latency of the specified event. + * LATENCY RESET: reset data of a specified event or all the data if no event provided. */ void latencyCommand(client *c) { struct latencyTimeSeries *ts; diff --git a/redis-android/src/main/jni/redis-4.0.11/src/latency.h b/redis-android/src/main/jni/redis-5.0.0/src/latency.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/latency.h rename to redis-android/src/main/jni/redis-5.0.0/src/latency.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/lazyfree.c b/redis-android/src/main/jni/redis-5.0.0/src/lazyfree.c similarity index 91% rename from redis-android/src/main/jni/redis-4.0.11/src/lazyfree.c rename to redis-android/src/main/jni/redis-5.0.0/src/lazyfree.c index f1de0c8..3d3159c 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/lazyfree.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/lazyfree.c @@ -23,10 +23,10 @@ size_t lazyfreeGetPendingObjectsCount(void) { * the function just returns the number of elements the object is composed of. * * Objects composed of single allocations are always reported as having a - * single item even if they are actaully logical composed of multiple + * single item even if they are actually logical composed of multiple * elements. * - * For lists the funciton returns the number of elements in the quicklist + * For lists the function returns the number of elements in the quicklist * representing the list. */ size_t lazyfreeGetFreeEffort(robj *obj) { if (obj->type == OBJ_LIST) { @@ -90,6 +90,17 @@ int dbAsyncDelete(redisDb *db, robj *key) { } } +/* Free an object, if the object is huge enough, free it in async way. */ +void freeObjAsync(robj *o) { + size_t free_effort = lazyfreeGetFreeEffort(o); + if (free_effort > LAZYFREE_THRESHOLD && o->refcount == 1) { + atomicIncr(lazyfree_objects,1); + bioCreateBackgroundJob(BIO_LAZY_FREE,o,NULL,NULL); + } else { + decrRefCount(o); + } +} + /* Empty a Redis DB asynchronously. What the function does actually is to * create a new empty set of hash tables and scheduling the old ones for * lazy freeing. */ diff --git a/redis-android/src/main/jni/redis-5.0.0/src/listpack.c b/redis-android/src/main/jni/redis-5.0.0/src/listpack.c new file mode 100644 index 0000000..e1f4d9a --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/src/listpack.c @@ -0,0 +1,803 @@ +/* Listpack -- A lists of strings serialization format + * + * This file implements the specification you can find at: + * + * https://github.com/antirez/listpack + * + * Copyright (c) 2017, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include "listpack.h" +#include "listpack_malloc.h" + +#define LP_HDR_SIZE 6 /* 32 bit total len + 16 bit number of elements. */ +#define LP_HDR_NUMELE_UNKNOWN UINT16_MAX +#define LP_MAX_INT_ENCODING_LEN 9 +#define LP_MAX_BACKLEN_SIZE 5 +#define LP_MAX_ENTRY_BACKLEN 34359738367ULL +#define LP_ENCODING_INT 0 +#define LP_ENCODING_STRING 1 + +#define LP_ENCODING_7BIT_UINT 0 +#define LP_ENCODING_7BIT_UINT_MASK 0x80 +#define LP_ENCODING_IS_7BIT_UINT(byte) (((byte)&LP_ENCODING_7BIT_UINT_MASK)==LP_ENCODING_7BIT_UINT) + +#define LP_ENCODING_6BIT_STR 0x80 +#define LP_ENCODING_6BIT_STR_MASK 0xC0 +#define LP_ENCODING_IS_6BIT_STR(byte) (((byte)&LP_ENCODING_6BIT_STR_MASK)==LP_ENCODING_6BIT_STR) + +#define LP_ENCODING_13BIT_INT 0xC0 +#define LP_ENCODING_13BIT_INT_MASK 0xE0 +#define LP_ENCODING_IS_13BIT_INT(byte) (((byte)&LP_ENCODING_13BIT_INT_MASK)==LP_ENCODING_13BIT_INT) + +#define LP_ENCODING_12BIT_STR 0xE0 +#define LP_ENCODING_12BIT_STR_MASK 0xF0 +#define LP_ENCODING_IS_12BIT_STR(byte) (((byte)&LP_ENCODING_12BIT_STR_MASK)==LP_ENCODING_12BIT_STR) + +#define LP_ENCODING_16BIT_INT 0xF1 +#define LP_ENCODING_16BIT_INT_MASK 0xFF +#define LP_ENCODING_IS_16BIT_INT(byte) (((byte)&LP_ENCODING_16BIT_INT_MASK)==LP_ENCODING_16BIT_INT) + +#define LP_ENCODING_24BIT_INT 0xF2 +#define LP_ENCODING_24BIT_INT_MASK 0xFF +#define LP_ENCODING_IS_24BIT_INT(byte) (((byte)&LP_ENCODING_24BIT_INT_MASK)==LP_ENCODING_24BIT_INT) + +#define LP_ENCODING_32BIT_INT 0xF3 +#define LP_ENCODING_32BIT_INT_MASK 0xFF +#define LP_ENCODING_IS_32BIT_INT(byte) (((byte)&LP_ENCODING_32BIT_INT_MASK)==LP_ENCODING_32BIT_INT) + +#define LP_ENCODING_64BIT_INT 0xF4 +#define LP_ENCODING_64BIT_INT_MASK 0xFF +#define LP_ENCODING_IS_64BIT_INT(byte) (((byte)&LP_ENCODING_64BIT_INT_MASK)==LP_ENCODING_64BIT_INT) + +#define LP_ENCODING_32BIT_STR 0xF0 +#define LP_ENCODING_32BIT_STR_MASK 0xFF +#define LP_ENCODING_IS_32BIT_STR(byte) (((byte)&LP_ENCODING_32BIT_STR_MASK)==LP_ENCODING_32BIT_STR) + +#define LP_EOF 0xFF + +#define LP_ENCODING_6BIT_STR_LEN(p) ((p)[0] & 0x3F) +#define LP_ENCODING_12BIT_STR_LEN(p) ((((p)[0] & 0xF) << 8) | (p)[1]) +#define LP_ENCODING_32BIT_STR_LEN(p) (((uint32_t)(p)[1]<<0) | \ + ((uint32_t)(p)[2]<<8) | \ + ((uint32_t)(p)[3]<<16) | \ + ((uint32_t)(p)[4]<<24)) + +#define lpGetTotalBytes(p) (((uint32_t)(p)[0]<<0) | \ + ((uint32_t)(p)[1]<<8) | \ + ((uint32_t)(p)[2]<<16) | \ + ((uint32_t)(p)[3]<<24)) + +#define lpGetNumElements(p) (((uint32_t)(p)[4]<<0) | \ + ((uint32_t)(p)[5]<<8)) +#define lpSetTotalBytes(p,v) do { \ + (p)[0] = (v)&0xff; \ + (p)[1] = ((v)>>8)&0xff; \ + (p)[2] = ((v)>>16)&0xff; \ + (p)[3] = ((v)>>24)&0xff; \ +} while(0) + +#define lpSetNumElements(p,v) do { \ + (p)[4] = (v)&0xff; \ + (p)[5] = ((v)>>8)&0xff; \ +} while(0) + +/* Convert a string into a signed 64 bit integer. + * The function returns 1 if the string could be parsed into a (non-overflowing) + * signed 64 bit int, 0 otherwise. The 'value' will be set to the parsed value + * when the function returns success. + * + * Note that this function demands that the string strictly represents + * a int64 value: no spaces or other characters before or after the string + * representing the number are accepted, nor zeroes at the start if not + * for the string "0" representing the zero number. + * + * Because of its strictness, it is safe to use this function to check if + * you can convert a string into a long long, and obtain back the string + * from the number without any loss in the string representation. * + * + * ----------------------------------------------------------------------------- + * + * Credits: this function was adapted from the Redis source code, file + * "utils.c", function string2ll(), and is copyright: + * + * Copyright(C) 2011, Pieter Noordhuis + * Copyright(C) 2011, Salvatore Sanfilippo + * + * The function is released under the BSD 3-clause license. + */ +int lpStringToInt64(const char *s, unsigned long slen, int64_t *value) { + const char *p = s; + unsigned long plen = 0; + int negative = 0; + uint64_t v; + + if (plen == slen) + return 0; + + /* Special case: first and only digit is 0. */ + if (slen == 1 && p[0] == '0') { + if (value != NULL) *value = 0; + return 1; + } + + if (p[0] == '-') { + negative = 1; + p++; plen++; + + /* Abort on only a negative sign. */ + if (plen == slen) + return 0; + } + + /* First digit should be 1-9, otherwise the string should just be 0. */ + if (p[0] >= '1' && p[0] <= '9') { + v = p[0]-'0'; + p++; plen++; + } else if (p[0] == '0' && slen == 1) { + *value = 0; + return 1; + } else { + return 0; + } + + while (plen < slen && p[0] >= '0' && p[0] <= '9') { + if (v > (UINT64_MAX / 10)) /* Overflow. */ + return 0; + v *= 10; + + if (v > (UINT64_MAX - (p[0]-'0'))) /* Overflow. */ + return 0; + v += p[0]-'0'; + + p++; plen++; + } + + /* Return if not all bytes were used. */ + if (plen < slen) + return 0; + + if (negative) { + if (v > ((uint64_t)(-(INT64_MIN+1))+1)) /* Overflow. */ + return 0; + if (value != NULL) *value = -v; + } else { + if (v > INT64_MAX) /* Overflow. */ + return 0; + if (value != NULL) *value = v; + } + return 1; +} + +/* Create a new, empty listpack. + * On success the new listpack is returned, otherwise an error is returned. */ +unsigned char *lpNew(void) { + unsigned char *lp = lp_malloc(LP_HDR_SIZE+1); + if (lp == NULL) return NULL; + lpSetTotalBytes(lp,LP_HDR_SIZE+1); + lpSetNumElements(lp,0); + lp[LP_HDR_SIZE] = LP_EOF; + return lp; +} + +/* Free the specified listpack. */ +void lpFree(unsigned char *lp) { + lp_free(lp); +} + +/* Given an element 'ele' of size 'size', determine if the element can be + * represented inside the listpack encoded as integer, and returns + * LP_ENCODING_INT if so. Otherwise returns LP_ENCODING_STR if no integer + * encoding is possible. + * + * If the LP_ENCODING_INT is returned, the function stores the integer encoded + * representation of the element in the 'intenc' buffer. + * + * Regardless of the returned encoding, 'enclen' is populated by reference to + * the number of bytes that the string or integer encoded element will require + * in order to be represented. */ +int lpEncodeGetType(unsigned char *ele, uint32_t size, unsigned char *intenc, uint64_t *enclen) { + int64_t v; + if (lpStringToInt64((const char*)ele, size, &v)) { + if (v >= 0 && v <= 127) { + /* Single byte 0-127 integer. */ + intenc[0] = v; + *enclen = 1; + } else if (v >= -4096 && v <= 4095) { + /* 13 bit integer. */ + if (v < 0) v = ((int64_t)1<<13)+v; + intenc[0] = (v>>8)|LP_ENCODING_13BIT_INT; + intenc[1] = v&0xff; + *enclen = 2; + } else if (v >= -32768 && v <= 32767) { + /* 16 bit integer. */ + if (v < 0) v = ((int64_t)1<<16)+v; + intenc[0] = LP_ENCODING_16BIT_INT; + intenc[1] = v&0xff; + intenc[2] = v>>8; + *enclen = 3; + } else if (v >= -8388608 && v <= 8388607) { + /* 24 bit integer. */ + if (v < 0) v = ((int64_t)1<<24)+v; + intenc[0] = LP_ENCODING_24BIT_INT; + intenc[1] = v&0xff; + intenc[2] = (v>>8)&0xff; + intenc[3] = v>>16; + *enclen = 4; + } else if (v >= -2147483648 && v <= 2147483647) { + /* 32 bit integer. */ + if (v < 0) v = ((int64_t)1<<32)+v; + intenc[0] = LP_ENCODING_32BIT_INT; + intenc[1] = v&0xff; + intenc[2] = (v>>8)&0xff; + intenc[3] = (v>>16)&0xff; + intenc[4] = v>>24; + *enclen = 5; + } else { + /* 64 bit integer. */ + uint64_t uv = v; + intenc[0] = LP_ENCODING_64BIT_INT; + intenc[1] = uv&0xff; + intenc[2] = (uv>>8)&0xff; + intenc[3] = (uv>>16)&0xff; + intenc[4] = (uv>>24)&0xff; + intenc[5] = (uv>>32)&0xff; + intenc[6] = (uv>>40)&0xff; + intenc[7] = (uv>>48)&0xff; + intenc[8] = uv>>56; + *enclen = 9; + } + return LP_ENCODING_INT; + } else { + if (size < 64) *enclen = 1+size; + else if (size < 4096) *enclen = 2+size; + else *enclen = 5+size; + return LP_ENCODING_STRING; + } +} + +/* Store a reverse-encoded variable length field, representing the length + * of the previous element of size 'l', in the target buffer 'buf'. + * The function returns the number of bytes used to encode it, from + * 1 to 5. If 'buf' is NULL the function just returns the number of bytes + * needed in order to encode the backlen. */ +unsigned long lpEncodeBacklen(unsigned char *buf, uint64_t l) { + if (l <= 127) { + if (buf) buf[0] = l; + return 1; + } else if (l < 16383) { + if (buf) { + buf[0] = l>>7; + buf[1] = (l&127)|128; + } + return 2; + } else if (l < 2097151) { + if (buf) { + buf[0] = l>>14; + buf[1] = ((l>>7)&127)|128; + buf[2] = (l&127)|128; + } + return 3; + } else if (l < 268435455) { + if (buf) { + buf[0] = l>>21; + buf[1] = ((l>>14)&127)|128; + buf[2] = ((l>>7)&127)|128; + buf[3] = (l&127)|128; + } + return 4; + } else { + if (buf) { + buf[0] = l>>28; + buf[1] = ((l>>21)&127)|128; + buf[2] = ((l>>14)&127)|128; + buf[3] = ((l>>7)&127)|128; + buf[4] = (l&127)|128; + } + return 5; + } +} + +/* Decode the backlen and returns it. If the encoding looks invalid (more than + * 5 bytes are used), UINT64_MAX is returned to report the problem. */ +uint64_t lpDecodeBacklen(unsigned char *p) { + uint64_t val = 0; + uint64_t shift = 0; + do { + val |= (uint64_t)(p[0] & 127) << shift; + if (!(p[0] & 128)) break; + shift += 7; + p--; + if (shift > 28) return UINT64_MAX; + } while(1); + return val; +} + +/* Encode the string element pointed by 's' of size 'len' in the target + * buffer 's'. The function should be called with 'buf' having always enough + * space for encoding the string. This is done by calling lpEncodeGetType() + * before calling this function. */ +void lpEncodeString(unsigned char *buf, unsigned char *s, uint32_t len) { + if (len < 64) { + buf[0] = len | LP_ENCODING_6BIT_STR; + memcpy(buf+1,s,len); + } else if (len < 4096) { + buf[0] = (len >> 8) | LP_ENCODING_12BIT_STR; + buf[1] = len & 0xff; + memcpy(buf+2,s,len); + } else { + buf[0] = LP_ENCODING_32BIT_STR; + buf[1] = len & 0xff; + buf[2] = (len >> 8) & 0xff; + buf[3] = (len >> 16) & 0xff; + buf[4] = (len >> 24) & 0xff; + memcpy(buf+5,s,len); + } +} + +/* Return the encoded length of the listpack element pointed by 'p'. If the + * element encoding is wrong then 0 is returned. */ +uint32_t lpCurrentEncodedSize(unsigned char *p) { + if (LP_ENCODING_IS_7BIT_UINT(p[0])) return 1; + if (LP_ENCODING_IS_6BIT_STR(p[0])) return 1+LP_ENCODING_6BIT_STR_LEN(p); + if (LP_ENCODING_IS_13BIT_INT(p[0])) return 2; + if (LP_ENCODING_IS_16BIT_INT(p[0])) return 3; + if (LP_ENCODING_IS_24BIT_INT(p[0])) return 4; + if (LP_ENCODING_IS_32BIT_INT(p[0])) return 5; + if (LP_ENCODING_IS_64BIT_INT(p[0])) return 9; + if (LP_ENCODING_IS_12BIT_STR(p[0])) return 2+LP_ENCODING_12BIT_STR_LEN(p); + if (LP_ENCODING_IS_32BIT_STR(p[0])) return 5+LP_ENCODING_32BIT_STR_LEN(p); + if (p[0] == LP_EOF) return 1; + return 0; +} + +/* Skip the current entry returning the next. It is invalid to call this + * function if the current element is the EOF element at the end of the + * listpack, however, while this function is used to implement lpNext(), + * it does not return NULL when the EOF element is encountered. */ +unsigned char *lpSkip(unsigned char *p) { + unsigned long entrylen = lpCurrentEncodedSize(p); + entrylen += lpEncodeBacklen(NULL,entrylen); + p += entrylen; + return p; +} + +/* If 'p' points to an element of the listpack, calling lpNext() will return + * the pointer to the next element (the one on the right), or NULL if 'p' + * already pointed to the last element of the listpack. */ +unsigned char *lpNext(unsigned char *lp, unsigned char *p) { + ((void) lp); /* lp is not used for now. However lpPrev() uses it. */ + p = lpSkip(p); + if (p[0] == LP_EOF) return NULL; + return p; +} + +/* If 'p' points to an element of the listpack, calling lpPrev() will return + * the pointer to the preivous element (the one on the left), or NULL if 'p' + * already pointed to the first element of the listpack. */ +unsigned char *lpPrev(unsigned char *lp, unsigned char *p) { + if (p-lp == LP_HDR_SIZE) return NULL; + p--; /* Seek the first backlen byte of the last element. */ + uint64_t prevlen = lpDecodeBacklen(p); + prevlen += lpEncodeBacklen(NULL,prevlen); + return p-prevlen+1; /* Seek the first byte of the previous entry. */ +} + +/* Return a pointer to the first element of the listpack, or NULL if the + * listpack has no elements. */ +unsigned char *lpFirst(unsigned char *lp) { + lp += LP_HDR_SIZE; /* Skip the header. */ + if (lp[0] == LP_EOF) return NULL; + return lp; +} + +/* Return a pointer to the last element of the listpack, or NULL if the + * listpack has no elements. */ +unsigned char *lpLast(unsigned char *lp) { + unsigned char *p = lp+lpGetTotalBytes(lp)-1; /* Seek EOF element. */ + return lpPrev(lp,p); /* Will return NULL if EOF is the only element. */ +} + +/* Return the number of elements inside the listpack. This function attempts + * to use the cached value when within range, otherwise a full scan is + * needed. As a side effect of calling this function, the listpack header + * could be modified, because if the count is found to be already within + * the 'numele' header field range, the new value is set. */ +uint32_t lpLength(unsigned char *lp) { + uint32_t numele = lpGetNumElements(lp); + if (numele != LP_HDR_NUMELE_UNKNOWN) return numele; + + /* Too many elements inside the listpack. We need to scan in order + * to get the total number. */ + uint32_t count = 0; + unsigned char *p = lpFirst(lp); + while(p) { + count++; + p = lpNext(lp,p); + } + + /* If the count is again within range of the header numele field, + * set it. */ + if (count < LP_HDR_NUMELE_UNKNOWN) lpSetNumElements(lp,count); + return count; +} + +/* Return the listpack element pointed by 'p'. + * + * The function changes behavior depending on the passed 'intbuf' value. + * Specifically, if 'intbuf' is NULL: + * + * If the element is internally encoded as an integer, the function returns + * NULL and populates the integer value by reference in 'count'. Otherwise if + * the element is encoded as a string a pointer to the string (pointing inside + * the listpack itself) is returned, and 'count' is set to the length of the + * string. + * + * If instead 'intbuf' points to a buffer passed by the caller, that must be + * at least LP_INTBUF_SIZE bytes, the function always returns the element as + * it was a string (returning the pointer to the string and setting the + * 'count' argument to the string length by reference). However if the element + * is encoded as an integer, the 'intbuf' buffer is used in order to store + * the string representation. + * + * The user should use one or the other form depending on what the value will + * be used for. If there is immediate usage for an integer value returned + * by the function, than to pass a buffer (and convert it back to a number) + * is of course useless. + * + * If the function is called against a badly encoded ziplist, so that there + * is no valid way to parse it, the function returns like if there was an + * integer encoded with value 12345678900000000 + , this may + * be an hint to understand that something is wrong. To crash in this case is + * not sensible because of the different requirements of the application using + * this lib. + * + * Similarly, there is no error returned since the listpack normally can be + * assumed to be valid, so that would be a very high API cost. However a function + * in order to check the integrity of the listpack at load time is provided, + * check lpIsValid(). */ +unsigned char *lpGet(unsigned char *p, int64_t *count, unsigned char *intbuf) { + int64_t val; + uint64_t uval, negstart, negmax; + + if (LP_ENCODING_IS_7BIT_UINT(p[0])) { + negstart = UINT64_MAX; /* 7 bit ints are always positive. */ + negmax = 0; + uval = p[0] & 0x7f; + } else if (LP_ENCODING_IS_6BIT_STR(p[0])) { + *count = LP_ENCODING_6BIT_STR_LEN(p); + return p+1; + } else if (LP_ENCODING_IS_13BIT_INT(p[0])) { + uval = ((p[0]&0x1f)<<8) | p[1]; + negstart = (uint64_t)1<<12; + negmax = 8191; + } else if (LP_ENCODING_IS_16BIT_INT(p[0])) { + uval = (uint64_t)p[1] | + (uint64_t)p[2]<<8; + negstart = (uint64_t)1<<15; + negmax = UINT16_MAX; + } else if (LP_ENCODING_IS_24BIT_INT(p[0])) { + uval = (uint64_t)p[1] | + (uint64_t)p[2]<<8 | + (uint64_t)p[3]<<16; + negstart = (uint64_t)1<<23; + negmax = UINT32_MAX>>8; + } else if (LP_ENCODING_IS_32BIT_INT(p[0])) { + uval = (uint64_t)p[1] | + (uint64_t)p[2]<<8 | + (uint64_t)p[3]<<16 | + (uint64_t)p[4]<<24; + negstart = (uint64_t)1<<31; + negmax = UINT32_MAX; + } else if (LP_ENCODING_IS_64BIT_INT(p[0])) { + uval = (uint64_t)p[1] | + (uint64_t)p[2]<<8 | + (uint64_t)p[3]<<16 | + (uint64_t)p[4]<<24 | + (uint64_t)p[5]<<32 | + (uint64_t)p[6]<<40 | + (uint64_t)p[7]<<48 | + (uint64_t)p[8]<<56; + negstart = (uint64_t)1<<63; + negmax = UINT64_MAX; + } else if (LP_ENCODING_IS_12BIT_STR(p[0])) { + *count = LP_ENCODING_12BIT_STR_LEN(p); + return p+2; + } else if (LP_ENCODING_IS_32BIT_STR(p[0])) { + *count = LP_ENCODING_32BIT_STR_LEN(p); + return p+5; + } else { + uval = 12345678900000000ULL + p[0]; + negstart = UINT64_MAX; + negmax = 0; + } + + /* We reach this code path only for integer encodings. + * Convert the unsigned value to the signed one using two's complement + * rule. */ + if (uval >= negstart) { + /* This three steps conversion should avoid undefined behaviors + * in the unsigned -> signed conversion. */ + uval = negmax-uval; + val = uval; + val = -val-1; + } else { + val = uval; + } + + /* Return the string representation of the integer or the value itself + * depending on intbuf being NULL or not. */ + if (intbuf) { + *count = snprintf((char*)intbuf,LP_INTBUF_SIZE,"%lld",(long long)val); + return intbuf; + } else { + *count = val; + return NULL; + } +} + +/* Insert, delete or replace the specified element 'ele' of length 'len' at + * the specified position 'p', with 'p' being a listpack element pointer + * obtained with lpFirst(), lpLast(), lpIndex(), lpNext(), lpPrev() or + * lpSeek(). + * + * The element is inserted before, after, or replaces the element pointed + * by 'p' depending on the 'where' argument, that can be LP_BEFORE, LP_AFTER + * or LP_REPLACE. + * + * If 'ele' is set to NULL, the function removes the element pointed by 'p' + * instead of inserting one. + * + * Returns NULL on out of memory or when the listpack total length would exceed + * the max allowed size of 2^32-1, otherwise the new pointer to the listpack + * holding the new element is returned (and the old pointer passed is no longer + * considered valid) + * + * If 'newp' is not NULL, at the end of a successful call '*newp' will be set + * to the address of the element just added, so that it will be possible to + * continue an interation with lpNext() and lpPrev(). + * + * For deletion operations ('ele' set to NULL) 'newp' is set to the next + * element, on the right of the deleted one, or to NULL if the deleted element + * was the last one. */ +unsigned char *lpInsert(unsigned char *lp, unsigned char *ele, uint32_t size, unsigned char *p, int where, unsigned char **newp) { + unsigned char intenc[LP_MAX_INT_ENCODING_LEN]; + unsigned char backlen[LP_MAX_BACKLEN_SIZE]; + + uint64_t enclen; /* The length of the encoded element. */ + + /* An element pointer set to NULL means deletion, which is conceptually + * replacing the element with a zero-length element. So whatever we + * get passed as 'where', set it to LP_REPLACE. */ + if (ele == NULL) where = LP_REPLACE; + + /* If we need to insert after the current element, we just jump to the + * next element (that could be the EOF one) and handle the case of + * inserting before. So the function will actually deal with just two + * cases: LP_BEFORE and LP_REPLACE. */ + if (where == LP_AFTER) { + p = lpSkip(p); + where = LP_BEFORE; + } + + /* Store the offset of the element 'p', so that we can obtain its + * address again after a reallocation. */ + unsigned long poff = p-lp; + + /* Calling lpEncodeGetType() results into the encoded version of the + * element to be stored into 'intenc' in case it is representable as + * an integer: in that case, the function returns LP_ENCODING_INT. + * Otherwise if LP_ENCODING_STR is returned, we'll have to call + * lpEncodeString() to actually write the encoded string on place later. + * + * Whatever the returned encoding is, 'enclen' is populated with the + * length of the encoded element. */ + int enctype; + if (ele) { + enctype = lpEncodeGetType(ele,size,intenc,&enclen); + } else { + enctype = -1; + enclen = 0; + } + + /* We need to also encode the backward-parsable length of the element + * and append it to the end: this allows to traverse the listpack from + * the end to the start. */ + unsigned long backlen_size = ele ? lpEncodeBacklen(backlen,enclen) : 0; + uint64_t old_listpack_bytes = lpGetTotalBytes(lp); + uint32_t replaced_len = 0; + if (where == LP_REPLACE) { + replaced_len = lpCurrentEncodedSize(p); + replaced_len += lpEncodeBacklen(NULL,replaced_len); + } + + uint64_t new_listpack_bytes = old_listpack_bytes + enclen + backlen_size + - replaced_len; + if (new_listpack_bytes > UINT32_MAX) return NULL; + + /* We now need to reallocate in order to make space or shrink the + * allocation (in case 'when' value is LP_REPLACE and the new element is + * smaller). However we do that before memmoving the memory to + * make room for the new element if the final allocation will get + * larger, or we do it after if the final allocation will get smaller. */ + + unsigned char *dst = lp + poff; /* May be updated after reallocation. */ + + /* Realloc before: we need more room. */ + if (new_listpack_bytes > old_listpack_bytes) { + if ((lp = lp_realloc(lp,new_listpack_bytes)) == NULL) return NULL; + dst = lp + poff; + } + + /* Setup the listpack relocating the elements to make the exact room + * we need to store the new one. */ + if (where == LP_BEFORE) { + memmove(dst+enclen+backlen_size,dst,old_listpack_bytes-poff); + } else { /* LP_REPLACE. */ + long lendiff = (enclen+backlen_size)-replaced_len; + memmove(dst+replaced_len+lendiff, + dst+replaced_len, + old_listpack_bytes-poff-replaced_len); + } + + /* Realloc after: we need to free space. */ + if (new_listpack_bytes < old_listpack_bytes) { + if ((lp = lp_realloc(lp,new_listpack_bytes)) == NULL) return NULL; + dst = lp + poff; + } + + /* Store the entry. */ + if (newp) { + *newp = dst; + /* In case of deletion, set 'newp' to NULL if the next element is + * the EOF element. */ + if (!ele && dst[0] == LP_EOF) *newp = NULL; + } + if (ele) { + if (enctype == LP_ENCODING_INT) { + memcpy(dst,intenc,enclen); + } else { + lpEncodeString(dst,ele,size); + } + dst += enclen; + memcpy(dst,backlen,backlen_size); + dst += backlen_size; + } + + /* Update header. */ + if (where != LP_REPLACE || ele == NULL) { + uint32_t num_elements = lpGetNumElements(lp); + if (num_elements != LP_HDR_NUMELE_UNKNOWN) { + if (ele) + lpSetNumElements(lp,num_elements+1); + else + lpSetNumElements(lp,num_elements-1); + } + } + lpSetTotalBytes(lp,new_listpack_bytes); + +#if 0 + /* This code path is normally disabled: what it does is to force listpack + * to return *always* a new pointer after performing some modification to + * the listpack, even if the previous allocation was enough. This is useful + * in order to spot bugs in code using listpacks: by doing so we can find + * if the caller forgets to set the new pointer where the listpack reference + * is stored, after an update. */ + unsigned char *oldlp = lp; + lp = lp_malloc(new_listpack_bytes); + memcpy(lp,oldlp,new_listpack_bytes); + if (newp) { + unsigned long offset = (*newp)-oldlp; + *newp = lp + offset; + } + /* Make sure the old allocation contains garbage. */ + memset(oldlp,'A',new_listpack_bytes); + lp_free(oldlp); +#endif + + return lp; +} + +/* Append the specified element 'ele' of length 'len' at the end of the + * listpack. It is implemented in terms of lpInsert(), so the return value is + * the same as lpInsert(). */ +unsigned char *lpAppend(unsigned char *lp, unsigned char *ele, uint32_t size) { + uint64_t listpack_bytes = lpGetTotalBytes(lp); + unsigned char *eofptr = lp + listpack_bytes - 1; + return lpInsert(lp,ele,size,eofptr,LP_BEFORE,NULL); +} + +/* Remove the element pointed by 'p', and return the resulting listpack. + * If 'newp' is not NULL, the next element pointer (to the right of the + * deleted one) is returned by reference. If the deleted element was the + * last one, '*newp' is set to NULL. */ +unsigned char *lpDelete(unsigned char *lp, unsigned char *p, unsigned char **newp) { + return lpInsert(lp,NULL,0,p,LP_REPLACE,newp); +} + +/* Return the total number of bytes the listpack is composed of. */ +uint32_t lpBytes(unsigned char *lp) { + return lpGetTotalBytes(lp); +} + +/* Seek the specified element and returns the pointer to the seeked element. + * Positive indexes specify the zero-based element to seek from the head to + * the tail, negative indexes specify elements starting from the tail, where + * -1 means the last element, -2 the penultimate and so forth. If the index + * is out of range, NULL is returned. */ +unsigned char *lpSeek(unsigned char *lp, long index) { + int forward = 1; /* Seek forward by default. */ + + /* We want to seek from left to right or the other way around + * depending on the listpack length and the element position. + * However if the listpack length cannot be obtained in constant time, + * we always seek from left to right. */ + uint32_t numele = lpGetNumElements(lp); + if (numele != LP_HDR_NUMELE_UNKNOWN) { + if (index < 0) index = (long)numele+index; + if (index < 0) return NULL; /* Index still < 0 means out of range. */ + if (index >= numele) return NULL; /* Out of range the other side. */ + /* We want to scan right-to-left if the element we are looking for + * is past the half of the listpack. */ + if (index > numele/2) { + forward = 0; + /* Left to right scanning always expects a negative index. Convert + * our index to negative form. */ + index -= numele; + } + } else { + /* If the listpack length is unspecified, for negative indexes we + * want to always scan left-to-right. */ + if (index < 0) forward = 0; + } + + /* Forward and backward scanning is trivially based on lpNext()/lpPrev(). */ + if (forward) { + unsigned char *ele = lpFirst(lp); + while (index > 0 && ele) { + ele = lpNext(lp,ele); + index--; + } + return ele; + } else { + unsigned char *ele = lpLast(lp); + while (index < -1 && ele) { + ele = lpPrev(lp,ele); + index++; + } + return ele; + } +} + diff --git a/redis-android/src/main/jni/redis-5.0.0/src/listpack.h b/redis-android/src/main/jni/redis-5.0.0/src/listpack.h new file mode 100644 index 0000000..af67b4b --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/src/listpack.h @@ -0,0 +1,61 @@ +/* Listpack -- A lists of strings serialization format + * + * This file implements the specification you can find at: + * + * https://github.com/antirez/listpack + * + * Copyright (c) 2017, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __LISTPACK_H +#define __LISTPACK_H + +#include + +#define LP_INTBUF_SIZE 21 /* 20 digits of -2^63 + 1 null term = 21. */ + +/* lpInsert() where argument possible values: */ +#define LP_BEFORE 0 +#define LP_AFTER 1 +#define LP_REPLACE 2 + +unsigned char *lpNew(void); +void lpFree(unsigned char *lp); +unsigned char *lpInsert(unsigned char *lp, unsigned char *ele, uint32_t size, unsigned char *p, int where, unsigned char **newp); +unsigned char *lpAppend(unsigned char *lp, unsigned char *ele, uint32_t size); +unsigned char *lpDelete(unsigned char *lp, unsigned char *p, unsigned char **newp); +uint32_t lpLength(unsigned char *lp); +unsigned char *lpGet(unsigned char *p, int64_t *count, unsigned char *intbuf); +unsigned char *lpFirst(unsigned char *lp); +unsigned char *lpLast(unsigned char *lp); +unsigned char *lpNext(unsigned char *lp, unsigned char *p); +unsigned char *lpPrev(unsigned char *lp, unsigned char *p); +uint32_t lpBytes(unsigned char *lp); +unsigned char *lpSeek(unsigned char *lp, long index); + +#endif diff --git a/redis-android/src/main/jni/redis-5.0.0/src/listpack_malloc.h b/redis-android/src/main/jni/redis-5.0.0/src/listpack_malloc.h new file mode 100644 index 0000000..401ab6f --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/src/listpack_malloc.h @@ -0,0 +1,45 @@ +/* Listpack -- A lists of strings serialization format + * https://github.com/antirez/listpack + * + * Copyright (c) 2017, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* Allocator selection. + * + * This file is used in order to change the Rax allocator at compile time. + * Just define the following defines to what you want to use. Also add + * the include of your alternate allocator if needed (not needed in order + * to use the default libc allocator). */ + +#ifndef LISTPACK_ALLOC_H +#define LISTPACK_ALLOC_H +#include "zmalloc.h" +#define lp_malloc zmalloc +#define lp_realloc zrealloc +#define lp_free zfree +#endif diff --git a/redis-android/src/main/jni/redis-5.0.0/src/localtime.c b/redis-android/src/main/jni/redis-5.0.0/src/localtime.c new file mode 100644 index 0000000..3f59a33 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/src/localtime.c @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2018, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +/* This is a safe version of localtime() which contains no locks and is + * fork() friendly. Even the _r version of localtime() cannot be used safely + * in Redis. Another thread may be calling localtime() while the main thread + * forks(). Later when the child process calls localtime() again, for instance + * in order to log something to the Redis log, it may deadlock: in the copy + * of the address space of the forked process the lock will never be released. + * + * This function takes the timezone 'tz' as argument, and the 'dst' flag is + * used to check if daylight saving time is currently in effect. The caller + * of this function should obtain such information calling tzset() ASAP in the + * main() function to obtain the timezone offset from the 'timezone' global + * variable. To obtain the daylight information, if it is currently active or not, + * one trick is to call localtime() in main() ASAP as well, and get the + * information from the tm_isdst field of the tm structure. However the daylight + * time may switch in the future for long running processes, so this information + * should be refreshed at safe times. + * + * Note that this function does not work for dates < 1/1/1970, it is solely + * designed to work with what time(NULL) may return, and to support Redis + * logging of the dates, it's not really a complete implementation. */ +static int is_leap_year(time_t year) { + if (year % 4) return 0; /* A year not divisible by 4 is not leap. */ + else if (year % 100) return 1; /* If div by 4 and not 100 is surely leap. */ + else if (year % 400) return 0; /* If div by 100 *and* 400 is not leap. */ + else return 1; /* If div by 100 and not by 400 is leap. */ +} + +void nolocks_localtime(struct tm *tmp, time_t t, time_t tz, int dst) { + const time_t secs_min = 60; + const time_t secs_hour = 3600; + const time_t secs_day = 3600*24; + + t -= tz; /* Adjust for timezone. */ + t += 3600*dst; /* Adjust for daylight time. */ + time_t days = t / secs_day; /* Days passed since epoch. */ + time_t seconds = t % secs_day; /* Remaining seconds. */ + + tmp->tm_isdst = dst; + tmp->tm_hour = seconds / secs_hour; + tmp->tm_min = (seconds % secs_hour) / secs_min; + tmp->tm_sec = (seconds % secs_hour) % secs_min; + + /* 1/1/1970 was a Thursday, that is, day 4 from the POV of the tm structure + * where sunday = 0, so to calculate the day of the week we have to add 4 + * and take the modulo by 7. */ + tmp->tm_wday = (days+4)%7; + + /* Calculate the current year. */ + tmp->tm_year = 1970; + while(1) { + /* Leap years have one day more. */ + time_t days_this_year = 365 + is_leap_year(tmp->tm_year); + if (days_this_year > days) break; + days -= days_this_year; + tmp->tm_year++; + } + tmp->tm_yday = days; /* Number of day of the current year. */ + + /* We need to calculate in which month and day of the month we are. To do + * so we need to skip days according to how many days there are in each + * month, and adjust for the leap year that has one more day in February. */ + int mdays[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + mdays[1] += is_leap_year(tmp->tm_year); + + tmp->tm_mon = 0; + while(days >= mdays[tmp->tm_mon]) { + days -= mdays[tmp->tm_mon]; + tmp->tm_mon++; + } + + tmp->tm_mday = days+1; /* Add 1 since our 'days' is zero-based. */ + tmp->tm_year -= 1900; /* Surprisingly tm_year is year-1900. */ +} + +#ifdef LOCALTIME_TEST_MAIN +#include + +int main(void) { + /* Obtain timezone and daylight info. */ + tzset(); /* Now 'timezome' global is populated. */ + time_t t = time(NULL); + struct tm *aux = localtime(&t); + int daylight_active = aux->tm_isdst; + + struct tm tm; + char buf[1024]; + + nolocks_localtime(&tm,t,timezone,daylight_active); + strftime(buf,sizeof(buf),"%d %b %H:%M:%S",&tm); + printf("[timezone: %d, dl: %d] %s\n", (int)timezone, (int)daylight_active, buf); +} +#endif diff --git a/redis-android/src/main/jni/redis-5.0.0/src/lolwut.c b/redis-android/src/main/jni/redis-5.0.0/src/lolwut.c new file mode 100644 index 0000000..19cbcf6 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/src/lolwut.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2018, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * ---------------------------------------------------------------------------- + * + * This file implements the LOLWUT command. The command should do something + * fun and interesting, and should be replaced by a new implementation at + * each new version of Redis. + */ + +#include "server.h" + +void lolwut5Command(client *c); + +/* The default target for LOLWUT if no matching version was found. + * This is what unstable versions of Redis will display. */ +void lolwutUnstableCommand(client *c) { + sds rendered = sdsnew("Redis ver. "); + rendered = sdscat(rendered,REDIS_VERSION); + rendered = sdscatlen(rendered,"\n",1); + addReplyBulkSds(c,rendered); +} + +void lolwutCommand(client *c) { + char *v = REDIS_VERSION; + if ((v[0] == '5' && v[1] == '.') || + (v[0] == '4' && v[1] == '.' && v[2] == '9')) + lolwut5Command(c); + else + lolwutUnstableCommand(c); +} diff --git a/redis-android/src/main/jni/redis-5.0.0/src/lolwut5.c b/redis-android/src/main/jni/redis-5.0.0/src/lolwut5.c new file mode 100644 index 0000000..8408b37 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/src/lolwut5.c @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2018, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * ---------------------------------------------------------------------------- + * + * This file implements the LOLWUT command. The command should do something + * fun and interesting, and should be replaced by a new implementation at + * each new version of Redis. + */ + +#include "server.h" +#include + +/* This structure represents our canvas. Drawing functions will take a pointer + * to a canvas to write to it. Later the canvas can be rendered to a string + * suitable to be printed on the screen, using unicode Braille characters. */ +typedef struct lwCanvas { + int width; + int height; + char *pixels; +} lwCanvas; + +/* Translate a group of 8 pixels (2x4 vertical rectangle) to the corresponding + * braille character. The byte should correspond to the pixels arranged as + * follows, where 0 is the least significant bit, and 7 the most significant + * bit: + * + * 0 3 + * 1 4 + * 2 5 + * 6 7 + * + * The corresponding utf8 encoded character is set into the three bytes + * pointed by 'output'. + */ +#include +void lwTranslatePixelsGroup(int byte, char *output) { + int code = 0x2800 + byte; + /* Convert to unicode. This is in the U0800-UFFFF range, so we need to + * emit it like this in three bytes: + * 1110xxxx 10xxxxxx 10xxxxxx. */ + output[0] = 0xE0 | (code >> 12); /* 1110-xxxx */ + output[1] = 0x80 | ((code >> 6) & 0x3F); /* 10-xxxxxx */ + output[2] = 0x80 | (code & 0x3F); /* 10-xxxxxx */ +} + +/* Allocate and return a new canvas of the specified size. */ +lwCanvas *lwCreateCanvas(int width, int height) { + lwCanvas *canvas = zmalloc(sizeof(*canvas)); + canvas->width = width; + canvas->height = height; + canvas->pixels = zmalloc(width*height); + memset(canvas->pixels,0,width*height); + return canvas; +} + +/* Free the canvas created by lwCreateCanvas(). */ +void lwFreeCanvas(lwCanvas *canvas) { + zfree(canvas->pixels); + zfree(canvas); +} + +/* Set a pixel to the specified color. Color is 0 or 1, where zero means no + * dot will be displyed, and 1 means dot will be displayed. + * Coordinates are arranged so that left-top corner is 0,0. You can write + * out of the size of the canvas without issues. */ +void lwDrawPixel(lwCanvas *canvas, int x, int y, int color) { + if (x < 0 || x >= canvas->width || + y < 0 || y >= canvas->height) return; + canvas->pixels[x+y*canvas->width] = color; +} + +/* Return the value of the specified pixel on the canvas. */ +int lwGetPixel(lwCanvas *canvas, int x, int y) { + if (x < 0 || x >= canvas->width || + y < 0 || y >= canvas->height) return 0; + return canvas->pixels[x+y*canvas->width]; +} + +/* Draw a line from x1,y1 to x2,y2 using the Bresenham algorithm. */ +void lwDrawLine(lwCanvas *canvas, int x1, int y1, int x2, int y2, int color) { + int dx = abs(x2-x1); + int dy = abs(y2-y1); + int sx = (x1 < x2) ? 1 : -1; + int sy = (y1 < y2) ? 1 : -1; + int err = dx-dy, e2; + + while(1) { + lwDrawPixel(canvas,x1,y1,color); + if (x1 == x2 && y1 == y2) break; + e2 = err*2; + if (e2 > -dy) { + err -= dy; + x1 += sx; + } + if (e2 < dx) { + err += dx; + y1 += sy; + } + } +} + +/* Draw a square centered at the specified x,y coordinates, with the specified + * rotation angle and size. In order to write a rotated square, we use the + * trivial fact that the parametric equation: + * + * x = sin(k) + * y = cos(k) + * + * Describes a circle for values going from 0 to 2*PI. So basically if we start + * at 45 degrees, that is k = PI/4, with the first point, and then we find + * the other three points incrementing K by PI/2 (90 degrees), we'll have the + * points of the square. In order to rotate the square, we just start with + * k = PI/4 + rotation_angle, and we are done. + * + * Of course the vanilla equations above will describe the square inside a + * circle of radius 1, so in order to draw larger squares we'll have to + * multiply the obtained coordinates, and then translate them. However this + * is much simpler than implementing the abstract concept of 2D shape and then + * performing the rotation/translation transformation, so for LOLWUT it's + * a good approach. */ +void lwDrawSquare(lwCanvas *canvas, int x, int y, float size, float angle) { + int px[4], py[4]; + + /* Adjust the desired size according to the fact that the square inscribed + * into a circle of radius 1 has the side of length SQRT(2). This way + * size becomes a simple multiplication factor we can use with our + * coordinates to magnify them. */ + size /= 1.4142135623; + size = round(size); + + /* Compute the four points. */ + float k = M_PI/4 + angle; + for (int j = 0; j < 4; j++) { + px[j] = round(sin(k) * size + x); + py[j] = round(cos(k) * size + y); + k += M_PI/2; + } + + /* Draw the square. */ + for (int j = 0; j < 4; j++) + lwDrawLine(canvas,px[j],py[j],px[(j+1)%4],py[(j+1)%4],1); +} + +/* Schotter, the output of LOLWUT of Redis 5, is a computer graphic art piece + * generated by Georg Nees in the 60s. It explores the relationship between + * caos and order. + * + * The function creates the canvas itself, depending on the columns available + * in the output display and the number of squares per row and per column + * requested by the caller. */ +lwCanvas *lwDrawSchotter(int console_cols, int squares_per_row, int squares_per_col) { + /* Calculate the canvas size. */ + int canvas_width = console_cols*2; + int padding = canvas_width > 4 ? 2 : 0; + float square_side = (float)(canvas_width-padding*2) / squares_per_row; + int canvas_height = square_side * squares_per_col + padding*2; + lwCanvas *canvas = lwCreateCanvas(canvas_width, canvas_height); + + for (int y = 0; y < squares_per_col; y++) { + for (int x = 0; x < squares_per_row; x++) { + int sx = x * square_side + square_side/2 + padding; + int sy = y * square_side + square_side/2 + padding; + /* Rotate and translate randomly as we go down to lower + * rows. */ + float angle = 0; + if (y > 1) { + float r1 = (float)rand() / RAND_MAX / squares_per_col * y; + float r2 = (float)rand() / RAND_MAX / squares_per_col * y; + float r3 = (float)rand() / RAND_MAX / squares_per_col * y; + if (rand() % 2) r1 = -r1; + if (rand() % 2) r2 = -r2; + if (rand() % 2) r3 = -r3; + angle = r1; + sx += r2*square_side/3; + sy += r3*square_side/3; + } + lwDrawSquare(canvas,sx,sy,square_side,angle); + } + } + + return canvas; +} + +/* Converts the canvas to an SDS string representing the UTF8 characters to + * print to the terminal in order to obtain a graphical representaiton of the + * logical canvas. The actual returned string will require a terminal that is + * width/2 large and height/4 tall in order to hold the whole image without + * overflowing or scrolling, since each Barille character is 2x4. */ +sds lwRenderCanvas(lwCanvas *canvas) { + sds text = sdsempty(); + for (int y = 0; y < canvas->height; y += 4) { + for (int x = 0; x < canvas->width; x += 2) { + /* We need to emit groups of 8 bits according to a specific + * arrangement. See lwTranslatePixelsGroup() for more info. */ + int byte = 0; + if (lwGetPixel(canvas,x,y)) byte |= (1<<0); + if (lwGetPixel(canvas,x,y+1)) byte |= (1<<1); + if (lwGetPixel(canvas,x,y+2)) byte |= (1<<2); + if (lwGetPixel(canvas,x+1,y)) byte |= (1<<3); + if (lwGetPixel(canvas,x+1,y+1)) byte |= (1<<4); + if (lwGetPixel(canvas,x+1,y+2)) byte |= (1<<5); + if (lwGetPixel(canvas,x,y+3)) byte |= (1<<6); + if (lwGetPixel(canvas,x+1,y+3)) byte |= (1<<7); + char unicode[3]; + lwTranslatePixelsGroup(byte,unicode); + text = sdscatlen(text,unicode,3); + } + if (y != canvas->height-1) text = sdscatlen(text,"\n",1); + } + return text; +} + +/* The LOLWUT command: + * + * LOLWUT [terminal columns] [squares-per-row] [squares-per-col] + * + * By default the command uses 66 columns, 8 squares per row, 12 squares + * per column. + */ +void lolwut5Command(client *c) { + long cols = 66; + long squares_per_row = 8; + long squares_per_col = 12; + + /* Parse the optional arguments if any. */ + if (c->argc > 1 && + getLongFromObjectOrReply(c,c->argv[1],&cols,NULL) != C_OK) + return; + + if (c->argc > 2 && + getLongFromObjectOrReply(c,c->argv[2],&squares_per_row,NULL) != C_OK) + return; + + if (c->argc > 3 && + getLongFromObjectOrReply(c,c->argv[3],&squares_per_col,NULL) != C_OK) + return; + + /* Limits. We want LOLWUT to be always reasonably fast and cheap to execute + * so we have maximum number of columns, rows, and output resulution. */ + if (cols < 1) cols = 1; + if (cols > 1000) cols = 1000; + if (squares_per_row < 1) squares_per_row = 1; + if (squares_per_row > 200) squares_per_row = 200; + if (squares_per_col < 1) squares_per_col = 1; + if (squares_per_col > 200) squares_per_col = 200; + + /* Generate some computer art and reply. */ + lwCanvas *canvas = lwDrawSchotter(cols,squares_per_row,squares_per_col); + sds rendered = lwRenderCanvas(canvas); + rendered = sdscat(rendered, + "\nGeorg Nees - schotter, plotter on paper, 1968. Redis ver. "); + rendered = sdscat(rendered,REDIS_VERSION); + rendered = sdscatlen(rendered,"\n",1); + addReplyBulkSds(c,rendered); + lwFreeCanvas(canvas); +} diff --git a/redis-android/src/main/jni/redis-4.0.11/src/lzf.h b/redis-android/src/main/jni/redis-5.0.0/src/lzf.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/lzf.h rename to redis-android/src/main/jni/redis-5.0.0/src/lzf.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/lzfP.h b/redis-android/src/main/jni/redis-5.0.0/src/lzfP.h similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/lzfP.h rename to redis-android/src/main/jni/redis-5.0.0/src/lzfP.h diff --git a/redis-android/src/main/jni/redis-4.0.11/src/lzf_c.c b/redis-android/src/main/jni/redis-5.0.0/src/lzf_c.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/lzf_c.c rename to redis-android/src/main/jni/redis-5.0.0/src/lzf_c.c diff --git a/redis-android/src/main/jni/redis-4.0.11/src/lzf_d.c b/redis-android/src/main/jni/redis-5.0.0/src/lzf_d.c similarity index 88% rename from redis-android/src/main/jni/redis-4.0.11/src/lzf_d.c rename to redis-android/src/main/jni/redis-5.0.0/src/lzf_d.c index c32be8e..d44bfcc 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/lzf_d.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/lzf_d.c @@ -52,6 +52,10 @@ #endif #endif +#if defined(__GNUC__) && __GNUC__ >= 5 +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wimplicit-fallthrough" +#endif unsigned int lzf_decompress (const void *const in_data, unsigned int in_len, void *out_data, unsigned int out_len) @@ -163,17 +167,17 @@ lzf_decompress (const void *const in_data, unsigned int in_len, break; - case 9: *op++ = *ref++; - case 8: *op++ = *ref++; - case 7: *op++ = *ref++; - case 6: *op++ = *ref++; - case 5: *op++ = *ref++; - case 4: *op++ = *ref++; - case 3: *op++ = *ref++; - case 2: *op++ = *ref++; - case 1: *op++ = *ref++; + case 9: *op++ = *ref++; /* fall-thru */ + case 8: *op++ = *ref++; /* fall-thru */ + case 7: *op++ = *ref++; /* fall-thru */ + case 6: *op++ = *ref++; /* fall-thru */ + case 5: *op++ = *ref++; /* fall-thru */ + case 4: *op++ = *ref++; /* fall-thru */ + case 3: *op++ = *ref++; /* fall-thru */ + case 2: *op++ = *ref++; /* fall-thru */ + case 1: *op++ = *ref++; /* fall-thru */ case 0: *op++ = *ref++; /* two octets more */ - *op++ = *ref++; + *op++ = *ref++; /* fall-thru */ } #endif } @@ -182,4 +186,6 @@ lzf_decompress (const void *const in_data, unsigned int in_len, return op - (u8 *)out_data; } - +#if defined(__GNUC__) && __GNUC__ >= 5 +#pragma GCC diagnostic pop +#endif diff --git a/redis-android/src/main/jni/redis-4.0.11/src/memtest.c b/redis-android/src/main/jni/redis-5.0.0/src/memtest.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/memtest.c rename to redis-android/src/main/jni/redis-5.0.0/src/memtest.c diff --git a/redis-android/src/main/jni/redis-4.0.11/src/mkreleasehdr.sh b/redis-android/src/main/jni/redis-5.0.0/src/mkreleasehdr.sh similarity index 77% rename from redis-android/src/main/jni/redis-4.0.11/src/mkreleasehdr.sh rename to redis-android/src/main/jni/redis-5.0.0/src/mkreleasehdr.sh index 1ae9588..e6d558b 100755 --- a/redis-android/src/main/jni/redis-4.0.11/src/mkreleasehdr.sh +++ b/redis-android/src/main/jni/redis-5.0.0/src/mkreleasehdr.sh @@ -2,6 +2,9 @@ GIT_SHA1=`(git show-ref --head --hash=8 2> /dev/null || echo 00000000) | head -n1` GIT_DIRTY=`git diff --no-ext-diff 2> /dev/null | wc -l` BUILD_ID=`uname -n`"-"`date +%s` +if [ -n "$SOURCE_DATE_EPOCH" ]; then + BUILD_ID=$(date -u -d "@$SOURCE_DATE_EPOCH" +%s 2>/dev/null || date -u -r "$SOURCE_DATE_EPOCH" +%s 2>/dev/null || date -u %s) +fi test -f release.h || touch release.h (cat release.h | grep SHA1 | grep $GIT_SHA1) && \ (cat release.h | grep DIRTY | grep $GIT_DIRTY) && exit 0 # Already up-to-date diff --git a/redis-android/src/main/jni/redis-4.0.11/src/module.c b/redis-android/src/main/jni/redis-5.0.0/src/module.c similarity index 80% rename from redis-android/src/main/jni/redis-4.0.11/src/module.c rename to redis-android/src/main/jni/redis-5.0.0/src/module.c index 8eb3f8a..20d159d 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/module.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/module.c @@ -64,6 +64,7 @@ struct AutoMemEntry { #define REDISMODULE_AM_STRING 1 #define REDISMODULE_AM_REPLY 2 #define REDISMODULE_AM_FREED 3 /* Explicitly freed by user already. */ +#define REDISMODULE_AM_DICT 4 /* The pool allocator block. Redis Modules can allocate memory via this special * allocator that will automatically release it all once the callback returns. @@ -127,6 +128,7 @@ typedef struct RedisModuleCtx RedisModuleCtx; #define REDISMODULE_CTX_BLOCKED_REPLY (1<<3) #define REDISMODULE_CTX_BLOCKED_TIMEOUT (1<<4) #define REDISMODULE_CTX_THREAD_SAFE (1<<5) +#define REDISMODULE_CTX_BLOCKED_DISCONNECTED (1<<6) /* This represents a Redis key opened with RM_OpenKey(). */ struct RedisModuleKey { @@ -157,7 +159,9 @@ typedef struct RedisModuleKey RedisModuleKey; /* Function pointer type of a function representing a command inside * a Redis module. */ +struct RedisModuleBlockedClient; typedef int (*RedisModuleCmdFunc) (RedisModuleCtx *ctx, void **argv, int argc); +typedef void (*RedisModuleDisconnectFunc) (RedisModuleCtx *ctx, struct RedisModuleBlockedClient *bc); /* This struct holds the information about a command registered by a module.*/ struct RedisModuleCommandProxy { @@ -200,7 +204,8 @@ typedef struct RedisModuleBlockedClient { RedisModule *module; /* Module blocking the client. */ RedisModuleCmdFunc reply_callback; /* Reply callback on normal completion.*/ RedisModuleCmdFunc timeout_callback; /* Reply callback on timeout. */ - void (*free_privdata)(void *); /* privdata cleanup callback. */ + RedisModuleDisconnectFunc disconnect_callback; /* Called on disconnection.*/ + void (*free_privdata)(RedisModuleCtx*,void*);/* privdata cleanup callback.*/ void *privdata; /* Module private data that may be used by the reply or timeout callback. It is set via the RedisModule_UnblockClient() API. */ @@ -237,9 +242,21 @@ typedef struct RedisModuleKeyspaceSubscriber { /* The module keyspace notification subscribers list */ static list *moduleKeyspaceSubscribers; -/* Static client recycled for all notification clients, to avoid allocating - * per round. */ -static client *moduleKeyspaceSubscribersClient; +/* Static client recycled for when we need to provide a context with a client + * in a situation where there is no client to provide. This avoidsallocating + * a new client per round. For instance this is used in the keyspace + * notifications, timers and cluster messages callbacks. */ +static client *moduleFreeContextReusedClient; + +/* Data structures related to the exported dictionary data structure. */ +typedef struct RedisModuleDict { + rax *rax; /* The radix tree. */ +} RedisModuleDict; + +typedef struct RedisModuleDictIter { + RedisModuleDict *dict; + raxIterator ri; +} RedisModuleDictIter; /* -------------------------------------------------------------------------- * Prototypes @@ -252,6 +269,7 @@ robj **moduleCreateArgvFromUserFormat(const char *cmdname, const char *fmt, int void moduleReplicateMultiIfNeeded(RedisModuleCtx *ctx); void RM_ZsetRangeStop(RedisModuleKey *kp); static void zsetKeyReset(RedisModuleKey *key); +void RM_FreeDict(RedisModuleCtx *ctx, RedisModuleDict *d); /* -------------------------------------------------------------------------- * Heap allocation raw functions @@ -470,7 +488,7 @@ void moduleHandlePropagationAfterCommandCallback(RedisModuleCtx *ctx) { if (c->flags & CLIENT_LUA) return; /* Handle the replication of the final EXEC, since whatever a command - * emits is always wrappered around MULTI/EXEC. */ + * emits is always wrapped around MULTI/EXEC. */ if (ctx->flags & REDISMODULE_CTX_MULTI_EMITTED) { robj *propargv[1]; propargv[0] = createStringObject("EXEC",4); @@ -544,7 +562,7 @@ void RM_KeyAtPos(RedisModuleCtx *ctx, int pos) { ctx->keys_pos[ctx->keys_count++] = pos; } -/* Helper for RM_CreateCommand(). Truns a string representing command +/* Helper for RM_CreateCommand(). Turns a string representing command * flags into the command flags used by the Redis core. * * It returns the set of flags, or -1 if unknown flags are found. */ @@ -591,7 +609,7 @@ int commandFlagsFromString(char *s) { * And is supposed to always return REDISMODULE_OK. * * The set of flags 'strflags' specify the behavior of the command, and should - * be passed as a C string compoesd of space separated words, like for + * be passed as a C string composed of space separated words, like for * example "write deny-oom". The set of flags are: * * * **"write"**: The command may modify the data set (it may also read @@ -612,7 +630,7 @@ int commandFlagsFromString(char *s) { * * **"allow-stale"**: The command is allowed to run on slaves that don't * serve stale data. Don't use if you don't know what * this means. - * * **"no-monitor"**: Don't propoagate the command on monitor. Use this if + * * **"no-monitor"**: Don't propagate the command on monitor. Use this if * the command has sensible data among the arguments. * * **"fast"**: The command time complexity is not greater * than O(log(N)) where N is the size of the collection or @@ -773,6 +791,7 @@ void autoMemoryCollect(RedisModuleCtx *ctx) { case REDISMODULE_AM_STRING: decrRefCount(ptr); break; case REDISMODULE_AM_REPLY: RM_FreeCallReply(ptr); break; case REDISMODULE_AM_KEY: RM_CloseKey(ptr); break; + case REDISMODULE_AM_DICT: RM_FreeDict(NULL,ptr); break; } } ctx->flags |= REDISMODULE_CTX_AUTO_MEMORY; @@ -790,19 +809,26 @@ void autoMemoryCollect(RedisModuleCtx *ctx) { * with RedisModule_FreeString(), unless automatic memory is enabled. * * The string is created by copying the `len` bytes starting - * at `ptr`. No reference is retained to the passed buffer. */ + * at `ptr`. No reference is retained to the passed buffer. + * + * The module context 'ctx' is optional and may be NULL if you want to create + * a string out of the context scope. However in that case, the automatic + * memory management will not be available, and the string memory must be + * managed manually. */ RedisModuleString *RM_CreateString(RedisModuleCtx *ctx, const char *ptr, size_t len) { RedisModuleString *o = createStringObject(ptr,len); - autoMemoryAdd(ctx,REDISMODULE_AM_STRING,o); + if (ctx != NULL) autoMemoryAdd(ctx,REDISMODULE_AM_STRING,o); return o; } - /* Create a new module string object from a printf format and arguments. * The returned string must be freed with RedisModule_FreeString(), unless * automatic memory is enabled. * - * The string is created using the sds formatter function sdscatvprintf(). */ + * The string is created using the sds formatter function sdscatvprintf(). + * + * The passed context 'ctx' may be NULL if necessary, see the + * RedisModule_CreateString() documentation for more info. */ RedisModuleString *RM_CreateStringPrintf(RedisModuleCtx *ctx, const char *fmt, ...) { sds s = sdsempty(); @@ -812,7 +838,7 @@ RedisModuleString *RM_CreateStringPrintf(RedisModuleCtx *ctx, const char *fmt, . va_end(ap); RedisModuleString *o = createObject(OBJ_STRING, s); - autoMemoryAdd(ctx,REDISMODULE_AM_STRING,o); + if (ctx != NULL) autoMemoryAdd(ctx,REDISMODULE_AM_STRING,o); return o; } @@ -822,7 +848,10 @@ RedisModuleString *RM_CreateStringPrintf(RedisModuleCtx *ctx, const char *fmt, . * integer instead of taking a buffer and its length. * * The returned string must be released with RedisModule_FreeString() or by - * enabling automatic memory management. */ + * enabling automatic memory management. + * + * The passed context 'ctx' may be NULL if necessary, see the + * RedisModule_CreateString() documentation for more info. */ RedisModuleString *RM_CreateStringFromLongLong(RedisModuleCtx *ctx, long long ll) { char buf[LONG_STR_SIZE]; size_t len = ll2string(buf,sizeof(buf),ll); @@ -833,10 +862,13 @@ RedisModuleString *RM_CreateStringFromLongLong(RedisModuleCtx *ctx, long long ll * RedisModuleString. * * The returned string must be released with RedisModule_FreeString() or by - * enabling automatic memory management. */ + * enabling automatic memory management. + * + * The passed context 'ctx' may be NULL if necessary, see the + * RedisModule_CreateString() documentation for more info. */ RedisModuleString *RM_CreateStringFromString(RedisModuleCtx *ctx, const RedisModuleString *str) { RedisModuleString *o = dupStringObject(str); - autoMemoryAdd(ctx,REDISMODULE_AM_STRING,o); + if (ctx != NULL) autoMemoryAdd(ctx,REDISMODULE_AM_STRING,o); return o; } @@ -845,10 +877,16 @@ RedisModuleString *RM_CreateStringFromString(RedisModuleCtx *ctx, const RedisMod * * It is possible to call this function even when automatic memory management * is enabled. In that case the string will be released ASAP and removed - * from the pool of string to release at the end. */ + * from the pool of string to release at the end. + * + * If the string was created with a NULL context 'ctx', it is also possible to + * pass ctx as NULL when releasing the string (but passing a context will not + * create any issue). Strings created with a context should be freed also passing + * the context, so if you want to free a string out of context later, make sure + * to create it using a NULL context. */ void RM_FreeString(RedisModuleCtx *ctx, RedisModuleString *str) { decrRefCount(str); - autoMemoryFreed(ctx,REDISMODULE_AM_STRING,str); + if (ctx != NULL) autoMemoryFreed(ctx,REDISMODULE_AM_STRING,str); } /* Every call to this function, will make the string 'str' requiring @@ -872,9 +910,11 @@ void RM_FreeString(RedisModuleCtx *ctx, RedisModuleString *str) { * Note that when memory management is turned off, you don't need * any call to RetainString() since creating a string will always result * into a string that lives after the callback function returns, if - * no FreeString() call is performed. */ + * no FreeString() call is performed. + * + * It is possible to call this function with a NULL context. */ void RM_RetainString(RedisModuleCtx *ctx, RedisModuleString *str) { - if (!autoMemoryFreed(ctx,REDISMODULE_AM_STRING,str)) { + if (ctx == NULL || !autoMemoryFreed(ctx,REDISMODULE_AM_STRING,str)) { /* Increment the string reference counting only if we can't * just remove the object from the list of objects that should * be reclaimed. Why we do that, instead of just incrementing @@ -952,9 +992,9 @@ RedisModuleString *moduleAssertUnsharedString(RedisModuleString *str) { return str; } -/* Append the specified buffere to the string 'str'. The string must be a +/* Append the specified buffer to the string 'str'. The string must be a * string created by the user that is referenced only a single time, otherwise - * REDISMODULE_ERR is returend and the operation is not performed. */ + * REDISMODULE_ERR is returned and the operation is not performed. */ int RM_StringAppendBuffer(RedisModuleCtx *ctx, RedisModuleString *str, const char *buf, size_t len) { UNUSED(ctx); str = moduleAssertUnsharedString(str); @@ -999,13 +1039,21 @@ int RM_WrongArity(RedisModuleCtx *ctx) { * The function returns the client pointer depending on the context, or * NULL if there is no potential client. This happens when we are in the * context of a thread safe context that was not initialized with a blocked - * client object. */ + * client object. Other contexts without associated clients are the ones + * initialized to run the timers callbacks. */ client *moduleGetReplyClient(RedisModuleCtx *ctx) { - if (!(ctx->flags & REDISMODULE_CTX_THREAD_SAFE) && ctx->client) + if (ctx->flags & REDISMODULE_CTX_THREAD_SAFE) { + if (ctx->blocked_client) + return ctx->blocked_client->reply_client; + else + return NULL; + } else { + /* If this is a non thread safe context, just return the client + * that is running the command if any. This may be NULL as well + * in the case of contexts that are not executed with associated + * clients, like timer contexts. */ return ctx->client; - if (ctx->blocked_client) - return ctx->blocked_client->reply_client; - return NULL; + } } /* Send an integer reply to the client, with the specified long long value. @@ -1106,7 +1154,7 @@ int RM_ReplyWithArray(RedisModuleCtx *ctx, long len) { * * Note that in the above example there is no reason to postpone the array * length, since we produce a fixed number of elements, but in the practice - * the code may use an interator or other ways of creating the output so + * the code may use an iterator or other ways of creating the output so * that is not easy to calculate in advance the number of elements. */ void RM_ReplySetArrayLength(RedisModuleCtx *ctx, long len) { @@ -1300,51 +1348,57 @@ int RM_GetSelectedDb(RedisModuleCtx *ctx) { } -/* Return the current context's flags. The flags provide information on the +/* Return the current context's flags. The flags provide information on the * current request context (whether the client is a Lua script or in a MULTI), - * and about the Redis instance in general, i.e replication and persistence. - * + * and about the Redis instance in general, i.e replication and persistence. + * * The available flags are: - * + * * * REDISMODULE_CTX_FLAGS_LUA: The command is running in a Lua script - * + * * * REDISMODULE_CTX_FLAGS_MULTI: The command is running inside a transaction - * + * * * REDISMODULE_CTX_FLAGS_MASTER: The Redis instance is a master - * + * * * REDISMODULE_CTX_FLAGS_SLAVE: The Redis instance is a slave - * + * * * REDISMODULE_CTX_FLAGS_READONLY: The Redis instance is read-only - * + * * * REDISMODULE_CTX_FLAGS_CLUSTER: The Redis instance is in cluster mode - * + * * * REDISMODULE_CTX_FLAGS_AOF: The Redis instance has AOF enabled - * + * * * REDISMODULE_CTX_FLAGS_RDB: The instance has RDB enabled - * + * * * REDISMODULE_CTX_FLAGS_MAXMEMORY: The instance has Maxmemory set - * + * * * REDISMODULE_CTX_FLAGS_EVICT: Maxmemory is set and has an eviction * policy that may delete keys + * + * * REDISMODULE_CTX_FLAGS_OOM: Redis is out of memory according to the + * maxmemory setting. + * + * * REDISMODULE_CTX_FLAGS_OOM_WARNING: Less than 25% of memory remains before + * reaching the maxmemory level. */ int RM_GetContextFlags(RedisModuleCtx *ctx) { - + int flags = 0; /* Client specific flags */ if (ctx->client) { - if (ctx->client->flags & CLIENT_LUA) + if (ctx->client->flags & CLIENT_LUA) flags |= REDISMODULE_CTX_FLAGS_LUA; - if (ctx->client->flags & CLIENT_MULTI) + if (ctx->client->flags & CLIENT_MULTI) flags |= REDISMODULE_CTX_FLAGS_MULTI; } if (server.cluster_enabled) flags |= REDISMODULE_CTX_FLAGS_CLUSTER; - + /* Maxmemory and eviction policy */ if (server.maxmemory > 0) { flags |= REDISMODULE_CTX_FLAGS_MAXMEMORY; - + if (server.maxmemory_policy != MAXMEMORY_NO_EVICTION) flags |= REDISMODULE_CTX_FLAGS_EVICT; } @@ -1363,7 +1417,13 @@ int RM_GetContextFlags(RedisModuleCtx *ctx) { if (server.repl_slave_ro) flags |= REDISMODULE_CTX_FLAGS_READONLY; } - + + /* OOM flag. */ + float level; + int retval = getMaxmemoryState(NULL,NULL,NULL,&level); + if (retval == C_ERR) flags |= REDISMODULE_CTX_FLAGS_OOM; + if (level > 0.75) flags |= REDISMODULE_CTX_FLAGS_OOM_WARNING; + return flags; } @@ -1386,7 +1446,7 @@ int RM_SelectDb(RedisModuleCtx *ctx, int newid) { * to call other APIs with the key handle as argument to perform * operations on the key. * - * The return value is the handle repesenting the key, that must be + * The return value is the handle representing the key, that must be * closed with RM_CloseKey(). * * If the key does not exist and WRITE mode is requested, the handle @@ -1640,7 +1700,7 @@ int RM_StringTruncate(RedisModuleKey *key, size_t newlen) { * Key API for List type * -------------------------------------------------------------------------- */ -/* Push an element into a list, on head or tail depending on 'where' argumnet. +/* Push an element into a list, on head or tail depending on 'where' argument. * If the key pointer is about an empty key opened for writing, the key * is created. On error (key opened for read-only operations or of the wrong * type) REDISMODULE_ERR is returned, otherwise REDISMODULE_OK is returned. */ @@ -1745,7 +1805,7 @@ int RM_ZsetAdd(RedisModuleKey *key, double score, RedisModuleString *ele, int *f * The input and output flags, and the return value, have the same exact * meaning, with the only difference that this function will return * REDISMODULE_ERR even when 'score' is a valid double number, but adding it - * to the existing score resuts into a NaN (not a number) condition. + * to the existing score results into a NaN (not a number) condition. * * This function has an additional field 'newscore', if not NULL is filled * with the new score of the element after the increment, if no error @@ -2126,7 +2186,9 @@ int RM_ZsetRangePrev(RedisModuleKey *key) { * * The function is variadic and the user must specify pairs of field * names and values, both as RedisModuleString pointers (unless the - * CFIELD option is set, see later). + * CFIELD option is set, see later). At the end of the field/value-ptr pairs, + * NULL must be specified as last argument to signal the end of the arguments + * in the variadic function. * * Example to set the hash argv[1] to the value argv[2]: * @@ -2215,6 +2277,9 @@ int RM_HashSet(RedisModuleKey *key, int flags, ...) { * to avoid a useless copy. */ if (flags & REDISMODULE_HASH_CFIELDS) low_flags |= HASH_SET_TAKE_FIELD; + + robj *argv[2] = {field,value}; + hashTypeTryConversion(key->value,argv,0,1); updated += hashTypeSet(key->value, field->ptr, value->ptr, low_flags); /* If CFIELDS is active, SDS string ownership is now of hashTypeSet(), @@ -2685,9 +2750,9 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch sds proto = sdsnewlen(c->buf,c->bufpos); c->bufpos = 0; while(listLength(c->reply)) { - sds o = listNodeValue(listFirst(c->reply)); + clientReplyBlock *o = listNodeValue(listFirst(c->reply)); - proto = sdscatsds(proto,o); + proto = sdscatlen(proto,o->buf,o->used); listDelNode(c->reply,listFirst(c->reply)); } reply = moduleCreateCallReplyFromProto(ctx,proto); @@ -2960,7 +3025,7 @@ int RM_ModuleTypeSetValue(RedisModuleKey *key, moduleType *mt, void *value) { } /* Assuming RedisModule_KeyType() returned REDISMODULE_KEYTYPE_MODULE on - * the key, returns the moduel type pointer of the value stored at key. + * the key, returns the module type pointer of the value stored at key. * * If the key is NULL, is not associated with a module type, or is empty, * then NULL is returned instead. */ @@ -3260,7 +3325,7 @@ void RM_DigestAddLongLong(RedisModuleDigest *md, long long ll) { mixDigest(md->o,buf,len); } -/* See the doucmnetation for `RedisModule_DigestAddElement()`. */ +/* See the documentation for `RedisModule_DigestAddElement()`. */ void RM_DigestEndSequence(RedisModuleDigest *md) { xorDigest(md->x,md->o,sizeof(md->o)); memset(md->o,0,sizeof(md->o)); @@ -3372,7 +3437,7 @@ void RM_LogRaw(RedisModule *module, const char *levelstr, const char *fmt, va_li * * If the specified log level is invalid, verbose is used by default. * There is a fixed limit to the length of the log line this function is able - * to emit, this limti is not specified but is guaranteed to be more than + * to emit, this limit is not specified but is guaranteed to be more than * a few lines of text. */ void RM_Log(RedisModuleCtx *ctx, const char *levelstr, const char *fmt, ...) { @@ -3425,6 +3490,17 @@ void moduleBlockedClientPipeReadable(aeEventLoop *el, int fd, void *privdata, in * running the list of clients blocked by a module that need to be unblocked. */ void unblockClientFromModule(client *c) { RedisModuleBlockedClient *bc = c->bpop.module_blocked_handle; + + /* Call the disconnection callback if any. */ + if (bc->disconnect_callback) { + RedisModuleCtx ctx = REDISMODULE_CTX_INIT; + ctx.blocked_privdata = bc->privdata; + ctx.module = bc->module; + ctx.client = bc->client; + bc->disconnect_callback(&ctx,bc); + moduleFreeContext(&ctx); + } + bc->client = NULL; /* Reset the client for a new query since, for blocking commands implemented * into modules, we do not it immediately after the command returns (and @@ -3446,10 +3522,10 @@ void unblockClientFromModule(client *c) { * reply_timeout: called when the timeout is reached in order to send an * error to the client. * - * free_privdata: called in order to free the privata data that is passed + * free_privdata: called in order to free the private data that is passed * by RedisModule_UnblockClient() call. */ -RedisModuleBlockedClient *RM_BlockClient(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(void*), long long timeout_ms) { +RedisModuleBlockedClient *RM_BlockClient(RedisModuleCtx *ctx, RedisModuleCmdFunc reply_callback, RedisModuleCmdFunc timeout_callback, void (*free_privdata)(RedisModuleCtx*,void*), long long timeout_ms) { client *c = ctx->client; int islua = c->flags & CLIENT_LUA; int ismulti = c->flags & CLIENT_MULTI; @@ -3465,6 +3541,7 @@ RedisModuleBlockedClient *RM_BlockClient(RedisModuleCtx *ctx, RedisModuleCmdFunc bc->module = ctx->module; bc->reply_callback = reply_callback; bc->timeout_callback = timeout_callback; + bc->disconnect_callback = NULL; /* Set by RM_SetDisconnectCallback() */ bc->free_privdata = free_privdata; bc->privdata = NULL; bc->reply_client = createClient(-1); @@ -3506,12 +3583,33 @@ int RM_UnblockClient(RedisModuleBlockedClient *bc, void *privdata) { } /* Abort a blocked client blocking operation: the client will be unblocked - * without firing the reply callback. */ + * without firing any callback. */ int RM_AbortBlock(RedisModuleBlockedClient *bc) { bc->reply_callback = NULL; + bc->disconnect_callback = NULL; return RM_UnblockClient(bc,NULL); } +/* Set a callback that will be called if a blocked client disconnects + * before the module has a chance to call RedisModule_UnblockClient() + * + * Usually what you want to do there, is to cleanup your module state + * so that you can call RedisModule_UnblockClient() safely, otherwise + * the client will remain blocked forever if the timeout is large. + * + * Notes: + * + * 1. It is not safe to call Reply* family functions here, it is also + * useless since the client is gone. + * + * 2. This callback is not called if the client disconnects because of + * a timeout. In such a case, the client is unblocked automatically + * and the timeout callback is called. + */ +void RM_SetDisconnectCallback(RedisModuleBlockedClient *bc, RedisModuleDisconnectFunc callback) { + bc->disconnect_callback = callback; +} + /* This function will check the moduleUnblockedClients queue in order to * call the reply callback and really unblock the client. * @@ -3547,14 +3645,23 @@ void moduleHandleBlockedClients(void) { ctx.blocked_privdata = bc->privdata; ctx.module = bc->module; ctx.client = bc->client; + ctx.blocked_client = bc; bc->reply_callback(&ctx,(void**)c->argv,c->argc); moduleHandlePropagationAfterCommandCallback(&ctx); moduleFreeContext(&ctx); } /* Free privdata if any. */ - if (bc->privdata && bc->free_privdata) - bc->free_privdata(bc->privdata); + if (bc->privdata && bc->free_privdata) { + RedisModuleCtx ctx = REDISMODULE_CTX_INIT; + if (c == NULL) + ctx.flags |= REDISMODULE_CTX_BLOCKED_DISCONNECTED; + ctx.blocked_privdata = bc->privdata; + ctx.module = bc->module; + ctx.client = bc->client; + bc->free_privdata(&ctx,bc->privdata); + moduleFreeContext(&ctx); + } /* It is possible that this blocked client object accumulated * replies to send to the client in a thread safe context. @@ -3571,6 +3678,10 @@ void moduleHandleBlockedClients(void) { freeClient(bc->reply_client); if (c != NULL) { + /* Before unblocking the client, set the disconnect callback + * to NULL, because if we reached this point, the client was + * properly unblocked by the module. */ + bc->disconnect_callback = NULL; unblockClient(c); /* Put the client in the list of clients that need to write * if there are pending replies here. This is needed since @@ -3604,8 +3715,13 @@ void moduleBlockedClientTimedOut(client *c) { ctx.flags |= REDISMODULE_CTX_BLOCKED_TIMEOUT; ctx.module = bc->module; ctx.client = bc->client; + ctx.blocked_client = bc; bc->timeout_callback(&ctx,(void**)c->argv,c->argc); moduleFreeContext(&ctx); + /* For timeout events, we do not want to call the disconnect callback, + * because the blocked client will be automatically disconnected in + * this case, and the user can still hook using the timeout callback. */ + bc->disconnect_callback = NULL; } /* Return non-zero if a module command was called in order to fill the @@ -3620,11 +3736,26 @@ int RM_IsBlockedTimeoutRequest(RedisModuleCtx *ctx) { return (ctx->flags & REDISMODULE_CTX_BLOCKED_TIMEOUT) != 0; } -/* Get the privata data set by RedisModule_UnblockClient() */ +/* Get the private data set by RedisModule_UnblockClient() */ void *RM_GetBlockedClientPrivateData(RedisModuleCtx *ctx) { return ctx->blocked_privdata; } +/* Get the blocked client associated with a given context. + * This is useful in the reply and timeout callbacks of blocked clients, + * before sometimes the module has the blocked client handle references + * around, and wants to cleanup it. */ +RedisModuleBlockedClient *RM_GetBlockedClientHandle(RedisModuleCtx *ctx) { + return ctx->blocked_client; +} + +/* Return true if when the free callback of a blocked client is called, + * the reason for the client to be unblocked is that it disconnected + * while it was blocked. */ +int RM_BlockedClientDisconnected(RedisModuleCtx *ctx) { + return (ctx->flags & REDISMODULE_CTX_BLOCKED_DISCONNECTED) != 0; +} + /* -------------------------------------------------------------------------- * Thread Safe Contexts * -------------------------------------------------------------------------- */ @@ -3676,13 +3807,13 @@ void RM_FreeThreadSafeContext(RedisModuleCtx *ctx) { * This is not needed for `RedisModule_Reply*` calls when there is * a blocked client connected to the thread safe context. */ void RM_ThreadSafeContextLock(RedisModuleCtx *ctx) { - DICT_NOTUSED(ctx); + UNUSED(ctx); moduleAcquireGIL(); } /* Release the server lock after a thread safe API call was executed. */ void RM_ThreadSafeContextUnlock(RedisModuleCtx *ctx) { - DICT_NOTUSED(ctx); + UNUSED(ctx); moduleReleaseGIL(); } @@ -3700,11 +3831,11 @@ void moduleReleaseGIL(void) { * -------------------------------------------------------------------------- */ /* Subscribe to keyspace notifications. This is a low-level version of the - * keyspace-notifications API. A module cand register callbacks to be notified + * keyspace-notifications API. A module can register callbacks to be notified * when keyspce events occur. * * Notification events are filtered by their type (string events, set events, - * etc), and the subsriber callback receives only events that match a specific + * etc), and the subscriber callback receives only events that match a specific * mask of event types. * * When subscribing to notifications with RedisModule_SubscribeToKeyspaceEvents @@ -3737,9 +3868,9 @@ void moduleReleaseGIL(void) { * * Notification callback gets executed with a redis context that can not be * used to send anything to the client, and has the db number where the event - * occured as its selected db number. + * occurred as its selected db number. * - * Notice that it is not necessary to enable norifications in redis.conf for + * Notice that it is not necessary to enable notifications in redis.conf for * module notifications to work. * * Warning: the notification callbacks are performed in a synchronous manner, @@ -3780,10 +3911,10 @@ void moduleNotifyKeyspaceEvent(int type, const char *event, robj *key, int dbid) if ((sub->event_mask & type) && sub->active == 0) { RedisModuleCtx ctx = REDISMODULE_CTX_INIT; ctx.module = sub->module; - ctx.client = moduleKeyspaceSubscribersClient; + ctx.client = moduleFreeContextReusedClient; selectDb(ctx.client, dbid); - /* mark the handler as activer to avoid reentrant loops. + /* mark the handler as active to avoid reentrant loops. * If the subscriber performs an action triggering itself, * it will not be notified about it. */ sub->active = 1; @@ -3794,7 +3925,7 @@ void moduleNotifyKeyspaceEvent(int type, const char *event, robj *key, int dbid) } } -/* Unsubscribe any notification subscirbers this module has upon unloading */ +/* Unsubscribe any notification subscribers this module has upon unloading */ void moduleUnsubscribeNotifications(RedisModule *module) { listIter li; listNode *ln; @@ -3808,6 +3939,682 @@ void moduleUnsubscribeNotifications(RedisModule *module) { } } +/* -------------------------------------------------------------------------- + * Modules Cluster API + * -------------------------------------------------------------------------- */ + +/* The Cluster message callback function pointer type. */ +typedef void (*RedisModuleClusterMessageReceiver)(RedisModuleCtx *ctx, const char *sender_id, uint8_t type, const unsigned char *payload, uint32_t len); + +/* This structure identifies a registered caller: it must match a given module + * ID, for a given message type. The callback function is just the function + * that was registered as receiver. */ +typedef struct moduleClusterReceiver { + uint64_t module_id; + RedisModuleClusterMessageReceiver callback; + struct RedisModule *module; + struct moduleClusterReceiver *next; +} moduleClusterReceiver; + +typedef struct moduleClusterNodeInfo { + int flags; + char ip[NET_IP_STR_LEN]; + int port; + char master_id[40]; /* Only if flags & REDISMODULE_NODE_MASTER is true. */ +} mdouleClusterNodeInfo; + +/* We have an array of message types: each bucket is a linked list of + * configured receivers. */ +static moduleClusterReceiver *clusterReceivers[UINT8_MAX]; + +/* Dispatch the message to the right module receiver. */ +void moduleCallClusterReceivers(const char *sender_id, uint64_t module_id, uint8_t type, const unsigned char *payload, uint32_t len) { + moduleClusterReceiver *r = clusterReceivers[type]; + while(r) { + if (r->module_id == module_id) { + RedisModuleCtx ctx = REDISMODULE_CTX_INIT; + ctx.module = r->module; + ctx.client = moduleFreeContextReusedClient; + selectDb(ctx.client, 0); + r->callback(&ctx,sender_id,type,payload,len); + moduleFreeContext(&ctx); + return; + } + r = r->next; + } +} + +/* Register a callback receiver for cluster messages of type 'type'. If there + * was already a registered callback, this will replace the callback function + * with the one provided, otherwise if the callback is set to NULL and there + * is already a callback for this function, the callback is unregistered + * (so this API call is also used in order to delete the receiver). */ +void RM_RegisterClusterMessageReceiver(RedisModuleCtx *ctx, uint8_t type, RedisModuleClusterMessageReceiver callback) { + if (!server.cluster_enabled) return; + + uint64_t module_id = moduleTypeEncodeId(ctx->module->name,0); + moduleClusterReceiver *r = clusterReceivers[type], *prev = NULL; + while(r) { + if (r->module_id == module_id) { + /* Found! Set or delete. */ + if (callback) { + r->callback = callback; + } else { + /* Delete the receiver entry if the user is setting + * it to NULL. Just unlink the receiver node from the + * linked list. */ + if (prev) + prev->next = r->next; + else + clusterReceivers[type]->next = r->next; + zfree(r); + } + return; + } + prev = r; + r = r->next; + } + + /* Not found, let's add it. */ + if (callback) { + r = zmalloc(sizeof(*r)); + r->module_id = module_id; + r->module = ctx->module; + r->callback = callback; + r->next = clusterReceivers[type]; + clusterReceivers[type] = r; + } +} + +/* Send a message to all the nodes in the cluster if `target` is NULL, otherwise + * at the specified target, which is a REDISMODULE_NODE_ID_LEN bytes node ID, as + * returned by the receiver callback or by the nodes iteration functions. + * + * The function returns REDISMODULE_OK if the message was successfully sent, + * otherwise if the node is not connected or such node ID does not map to any + * known cluster node, REDISMODULE_ERR is returned. */ +int RM_SendClusterMessage(RedisModuleCtx *ctx, char *target_id, uint8_t type, unsigned char *msg, uint32_t len) { + if (!server.cluster_enabled) return REDISMODULE_ERR; + uint64_t module_id = moduleTypeEncodeId(ctx->module->name,0); + if (clusterSendModuleMessageToTarget(target_id,module_id,type,msg,len) == C_OK) + return REDISMODULE_OK; + else + return REDISMODULE_ERR; +} + +/* Return an array of string pointers, each string pointer points to a cluster + * node ID of exactly REDISMODULE_NODE_ID_SIZE bytes (without any null term). + * The number of returned node IDs is stored into `*numnodes`. + * However if this function is called by a module not running an a Redis + * instance with Redis Cluster enabled, NULL is returned instead. + * + * The IDs returned can be used with RedisModule_GetClusterNodeInfo() in order + * to get more information about single nodes. + * + * The array returned by this function must be freed using the function + * RedisModule_FreeClusterNodesList(). + * + * Example: + * + * size_t count, j; + * char **ids = RedisModule_GetClusterNodesList(ctx,&count); + * for (j = 0; j < count; j++) { + * RedisModule_Log("notice","Node %.*s", + * REDISMODULE_NODE_ID_LEN,ids[j]); + * } + * RedisModule_FreeClusterNodesList(ids); + */ +char **RM_GetClusterNodesList(RedisModuleCtx *ctx, size_t *numnodes) { + UNUSED(ctx); + + if (!server.cluster_enabled) return NULL; + size_t count = dictSize(server.cluster->nodes); + char **ids = zmalloc((count+1)*REDISMODULE_NODE_ID_LEN); + dictIterator *di = dictGetIterator(server.cluster->nodes); + dictEntry *de; + int j = 0; + while((de = dictNext(di)) != NULL) { + clusterNode *node = dictGetVal(de); + if (node->flags & (CLUSTER_NODE_NOADDR|CLUSTER_NODE_HANDSHAKE)) continue; + ids[j] = zmalloc(REDISMODULE_NODE_ID_LEN); + memcpy(ids[j],node->name,REDISMODULE_NODE_ID_LEN); + j++; + } + *numnodes = j; + ids[j] = NULL; /* Null term so that FreeClusterNodesList does not need + * to also get the count argument. */ + dictReleaseIterator(di); + return ids; +} + +/* Free the node list obtained with RedisModule_GetClusterNodesList. */ +void RM_FreeClusterNodesList(char **ids) { + if (ids == NULL) return; + for (int j = 0; ids[j]; j++) zfree(ids[j]); + zfree(ids); +} + +/* Return this node ID (REDISMODULE_CLUSTER_ID_LEN bytes) or NULL if the cluster + * is disabled. */ +const char *RM_GetMyClusterID(void) { + if (!server.cluster_enabled) return NULL; + return server.cluster->myself->name; +} + +/* Return the number of nodes in the cluster, regardless of their state + * (handshake, noaddress, ...) so that the number of active nodes may actually + * be smaller, but not greater than this number. If the instance is not in + * cluster mode, zero is returned. */ +size_t RM_GetClusterSize(void) { + if (!server.cluster_enabled) return 0; + return dictSize(server.cluster->nodes); +} + +/* Populate the specified info for the node having as ID the specified 'id', + * then returns REDISMODULE_OK. Otherwise if the node ID does not exist from + * the POV of this local node, REDISMODULE_ERR is returned. + * + * The arguments ip, master_id, port and flags can be NULL in case we don't + * need to populate back certain info. If an ip and master_id (only populated + * if the instance is a slave) are specified, they point to buffers holding + * at least REDISMODULE_NODE_ID_LEN bytes. The strings written back as ip + * and master_id are not null terminated. + * + * The list of flags reported is the following: + * + * * REDISMODULE_NODE_MYSELF This node + * * REDISMODULE_NODE_MASTER The node is a master + * * REDISMODULE_NODE_SLAVE The node is a replica + * * REDISMODULE_NODE_PFAIL We see the node as failing + * * REDISMODULE_NODE_FAIL The cluster agrees the node is failing + * * REDISMODULE_NODE_NOFAILOVER The slave is configured to never failover + */ + +clusterNode *clusterLookupNode(const char *name); /* We need access to internals */ + +int RM_GetClusterNodeInfo(RedisModuleCtx *ctx, const char *id, char *ip, char *master_id, int *port, int *flags) { + UNUSED(ctx); + + clusterNode *node = clusterLookupNode(id); + if (node->flags & (CLUSTER_NODE_NOADDR|CLUSTER_NODE_HANDSHAKE)) + return REDISMODULE_ERR; + + if (ip) memcpy(ip,node->name,REDISMODULE_NODE_ID_LEN); + + if (master_id) { + /* If the information is not available, the function will set the + * field to zero bytes, so that when the field can't be populated the + * function kinda remains predictable. */ + if (node->flags & CLUSTER_NODE_MASTER && node->slaveof) + memcpy(master_id,node->slaveof->name,REDISMODULE_NODE_ID_LEN); + else + memset(master_id,0,REDISMODULE_NODE_ID_LEN); + } + if (port) *port = node->port; + + /* As usually we have to remap flags for modules, in order to ensure + * we can provide binary compatibility. */ + if (flags) { + *flags = 0; + if (node->flags & CLUSTER_NODE_MYSELF) *flags |= REDISMODULE_NODE_MYSELF; + if (node->flags & CLUSTER_NODE_MASTER) *flags |= REDISMODULE_NODE_MASTER; + if (node->flags & CLUSTER_NODE_SLAVE) *flags |= REDISMODULE_NODE_SLAVE; + if (node->flags & CLUSTER_NODE_PFAIL) *flags |= REDISMODULE_NODE_PFAIL; + if (node->flags & CLUSTER_NODE_FAIL) *flags |= REDISMODULE_NODE_FAIL; + if (node->flags & CLUSTER_NODE_NOFAILOVER) *flags |= REDISMODULE_NODE_NOFAILOVER; + } + return REDISMODULE_OK; +} + +/* Set Redis Cluster flags in order to change the normal behavior of + * Redis Cluster, especially with the goal of disabling certain functions. + * This is useful for modules that use the Cluster API in order to create + * a different distributed system, but still want to use the Redis Cluster + * message bus. Flags that can be set: + * + * CLUSTER_MODULE_FLAG_NO_FAILOVER + * CLUSTER_MODULE_FLAG_NO_REDIRECTION + * + * With the following effects: + * + * NO_FAILOVER: prevent Redis Cluster slaves to failover a failing master. + * Also disables the replica migration feature. + * + * NO_REDIRECTION: Every node will accept any key, without trying to perform + * partitioning according to the user Redis Cluster algorithm. + * Slots informations will still be propagated across the + * cluster, but without effects. */ +void RM_SetClusterFlags(RedisModuleCtx *ctx, uint64_t flags) { + UNUSED(ctx); + if (flags & REDISMODULE_CLUSTER_FLAG_NO_FAILOVER) + server.cluster_module_flags |= CLUSTER_MODULE_FLAG_NO_FAILOVER; + if (flags & REDISMODULE_CLUSTER_FLAG_NO_REDIRECTION) + server.cluster_module_flags |= CLUSTER_MODULE_FLAG_NO_REDIRECTION; +} + +/* -------------------------------------------------------------------------- + * Modules Timers API + * + * Module timers are an high precision "green timers" abstraction where + * every module can register even millions of timers without problems, even if + * the actual event loop will just have a single timer that is used to awake the + * module timers subsystem in order to process the next event. + * + * All the timers are stored into a radix tree, ordered by expire time, when + * the main Redis event loop timer callback is called, we try to process all + * the timers already expired one after the other. Then we re-enter the event + * loop registering a timer that will expire when the next to process module + * timer will expire. + * + * Every time the list of active timers drops to zero, we unregister the + * main event loop timer, so that there is no overhead when such feature is + * not used. + * -------------------------------------------------------------------------- */ + +static rax *Timers; /* The radix tree of all the timers sorted by expire. */ +long long aeTimer = -1; /* Main event loop (ae.c) timer identifier. */ + +typedef void (*RedisModuleTimerProc)(RedisModuleCtx *ctx, void *data); + +/* The timer descriptor, stored as value in the radix tree. */ +typedef struct RedisModuleTimer { + RedisModule *module; /* Module reference. */ + RedisModuleTimerProc callback; /* The callback to invoke on expire. */ + void *data; /* Private data for the callback. */ + int dbid; /* Database number selected by the original client. */ +} RedisModuleTimer; + +/* This is the timer handler that is called by the main event loop. We schedule + * this timer to be called when the nearest of our module timers will expire. */ +int moduleTimerHandler(struct aeEventLoop *eventLoop, long long id, void *clientData) { + UNUSED(eventLoop); + UNUSED(id); + UNUSED(clientData); + + /* To start let's try to fire all the timers already expired. */ + raxIterator ri; + raxStart(&ri,Timers); + uint64_t now = ustime(); + long long next_period = 0; + while(1) { + raxSeek(&ri,"^",NULL,0); + if (!raxNext(&ri)) break; + uint64_t expiretime; + memcpy(&expiretime,ri.key,sizeof(expiretime)); + expiretime = ntohu64(expiretime); + if (now >= expiretime) { + RedisModuleTimer *timer = ri.data; + RedisModuleCtx ctx = REDISMODULE_CTX_INIT; + + ctx.module = timer->module; + ctx.client = moduleFreeContextReusedClient; + selectDb(ctx.client, timer->dbid); + timer->callback(&ctx,timer->data); + moduleFreeContext(&ctx); + raxRemove(Timers,(unsigned char*)ri.key,ri.key_len,NULL); + zfree(timer); + } else { + next_period = (expiretime-now)/1000; /* Scale to milliseconds. */ + break; + } + } + raxStop(&ri); + + /* Reschedule the next timer or cancel it. */ + if (next_period <= 0) next_period = 1; + return (raxSize(Timers) > 0) ? next_period : AE_NOMORE; +} + +/* Create a new timer that will fire after `period` milliseconds, and will call + * the specified function using `data` as argument. The returned timer ID can be + * used to get information from the timer or to stop it before it fires. */ +RedisModuleTimerID RM_CreateTimer(RedisModuleCtx *ctx, mstime_t period, RedisModuleTimerProc callback, void *data) { + RedisModuleTimer *timer = zmalloc(sizeof(*timer)); + timer->module = ctx->module; + timer->callback = callback; + timer->data = data; + timer->dbid = ctx->client->db->id; + uint64_t expiretime = ustime()+period*1000; + uint64_t key; + + while(1) { + key = htonu64(expiretime); + if (raxFind(Timers, (unsigned char*)&key,sizeof(key)) == raxNotFound) { + raxInsert(Timers,(unsigned char*)&key,sizeof(key),timer,NULL); + break; + } else { + expiretime++; + } + } + + /* We need to install the main event loop timer if it's not already + * installed, or we may need to refresh its period if we just installed + * a timer that will expire sooner than any other else. */ + if (aeTimer != -1) { + raxIterator ri; + raxStart(&ri,Timers); + raxSeek(&ri,"^",NULL,0); + raxNext(&ri); + if (memcmp(ri.key,&key,sizeof(key)) == 0) { + /* This is the first key, we need to re-install the timer according + * to the just added event. */ + aeDeleteTimeEvent(server.el,aeTimer); + aeTimer = -1; + } + raxStop(&ri); + } + + /* If we have no main timer (the old one was invalidated, or this is the + * first module timer we have), install one. */ + if (aeTimer == -1) + aeTimer = aeCreateTimeEvent(server.el,period,moduleTimerHandler,NULL,NULL); + + return key; +} + +/* Stop a timer, returns REDISMODULE_OK if the timer was found, belonged to the + * calling module, and was stopped, otherwise REDISMODULE_ERR is returned. + * If not NULL, the data pointer is set to the value of the data argument when + * the timer was created. */ +int RM_StopTimer(RedisModuleCtx *ctx, RedisModuleTimerID id, void **data) { + RedisModuleTimer *timer = raxFind(Timers,(unsigned char*)&id,sizeof(id)); + if (timer == raxNotFound || timer->module != ctx->module) + return REDISMODULE_ERR; + if (data) *data = timer->data; + raxRemove(Timers,(unsigned char*)&id,sizeof(id),NULL); + zfree(timer); + return REDISMODULE_OK; +} + +/* Obtain information about a timer: its remaining time before firing + * (in milliseconds), and the private data pointer associated with the timer. + * If the timer specified does not exist or belongs to a different module + * no information is returned and the function returns REDISMODULE_ERR, otherwise + * REDISMODULE_OK is returned. The arguments remaining or data can be NULL if + * the caller does not need certain information. */ +int RM_GetTimerInfo(RedisModuleCtx *ctx, RedisModuleTimerID id, uint64_t *remaining, void **data) { + RedisModuleTimer *timer = raxFind(Timers,(unsigned char*)&id,sizeof(id)); + if (timer == raxNotFound || timer->module != ctx->module) + return REDISMODULE_ERR; + if (remaining) { + int64_t rem = ntohu64(id)-ustime(); + if (rem < 0) rem = 0; + *remaining = rem/1000; /* Scale to milliseconds. */ + } + if (data) *data = timer->data; + return REDISMODULE_OK; +} + +/* -------------------------------------------------------------------------- + * Modules Dictionary API + * + * Implements a sorted dictionary (actually backed by a radix tree) with + * the usual get / set / del / num-items API, together with an iterator + * capable of going back and forth. + * -------------------------------------------------------------------------- */ + +/* Create a new dictionary. The 'ctx' pointer can be the current module context + * or NULL, depending on what you want. Please follow the following rules: + * + * 1. Use a NULL context if you plan to retain a reference to this dictionary + * that will survive the time of the module callback where you created it. + * 2. Use a NULL context if no context is available at the time you are creating + * the dictionary (of course...). + * 3. However use the current callback context as 'ctx' argument if the + * dictionary time to live is just limited to the callback scope. In this + * case, if enabled, you can enjoy the automatic memory management that will + * reclaim the dictionary memory, as well as the strings returned by the + * Next / Prev dictionary iterator calls. + */ +RedisModuleDict *RM_CreateDict(RedisModuleCtx *ctx) { + struct RedisModuleDict *d = zmalloc(sizeof(*d)); + d->rax = raxNew(); + if (ctx != NULL) autoMemoryAdd(ctx,REDISMODULE_AM_DICT,d); + return d; +} + +/* Free a dictionary created with RM_CreateDict(). You need to pass the + * context pointer 'ctx' only if the dictionary was created using the + * context instead of passing NULL. */ +void RM_FreeDict(RedisModuleCtx *ctx, RedisModuleDict *d) { + if (ctx != NULL) autoMemoryFreed(ctx,REDISMODULE_AM_DICT,d); + raxFree(d->rax); + zfree(d); +} + +/* Return the size of the dictionary (number of keys). */ +uint64_t RM_DictSize(RedisModuleDict *d) { + return raxSize(d->rax); +} + +/* Store the specified key into the dictionary, setting its value to the + * pointer 'ptr'. If the key was added with success, since it did not + * already exist, REDISMODULE_OK is returned. Otherwise if the key already + * exists the function returns REDISMODULE_ERR. */ +int RM_DictSetC(RedisModuleDict *d, void *key, size_t keylen, void *ptr) { + int retval = raxTryInsert(d->rax,key,keylen,ptr,NULL); + return (retval == 1) ? REDISMODULE_OK : REDISMODULE_ERR; +} + +/* Like RedisModule_DictSetC() but will replace the key with the new + * value if the key already exists. */ +int RM_DictReplaceC(RedisModuleDict *d, void *key, size_t keylen, void *ptr) { + int retval = raxInsert(d->rax,key,keylen,ptr,NULL); + return (retval == 1) ? REDISMODULE_OK : REDISMODULE_ERR; +} + +/* Like RedisModule_DictSetC() but takes the key as a RedisModuleString. */ +int RM_DictSet(RedisModuleDict *d, RedisModuleString *key, void *ptr) { + return RM_DictSetC(d,key->ptr,sdslen(key->ptr),ptr); +} + +/* Like RedisModule_DictReplaceC() but takes the key as a RedisModuleString. */ +int RM_DictReplace(RedisModuleDict *d, RedisModuleString *key, void *ptr) { + return RM_DictReplaceC(d,key->ptr,sdslen(key->ptr),ptr); +} + +/* Return the value stored at the specified key. The function returns NULL + * both in the case the key does not exist, or if you actually stored + * NULL at key. So, optionally, if the 'nokey' pointer is not NULL, it will + * be set by reference to 1 if the key does not exist, or to 0 if the key + * exists. */ +void *RM_DictGetC(RedisModuleDict *d, void *key, size_t keylen, int *nokey) { + void *res = raxFind(d->rax,key,keylen); + if (nokey) *nokey = (res == raxNotFound); + return (res == raxNotFound) ? NULL : res; +} + +/* Like RedisModule_DictGetC() but takes the key as a RedisModuleString. */ +void *RM_DictGet(RedisModuleDict *d, RedisModuleString *key, int *nokey) { + return RM_DictGetC(d,key->ptr,sdslen(key->ptr),nokey); +} + +/* Remove the specified key from the dictionary, returning REDISMODULE_OK if + * the key was found and delted, or REDISMODULE_ERR if instead there was + * no such key in the dictionary. When the operation is successful, if + * 'oldval' is not NULL, then '*oldval' is set to the value stored at the + * key before it was deleted. Using this feature it is possible to get + * a pointer to the value (for instance in order to release it), without + * having to call RedisModule_DictGet() before deleting the key. */ +int RM_DictDelC(RedisModuleDict *d, void *key, size_t keylen, void *oldval) { + int retval = raxRemove(d->rax,key,keylen,oldval); + return retval ? REDISMODULE_OK : REDISMODULE_ERR; +} + +/* Like RedisModule_DictDelC() but gets the key as a RedisModuleString. */ +int RM_DictDel(RedisModuleDict *d, RedisModuleString *key, void *oldval) { + return RM_DictDelC(d,key->ptr,sdslen(key->ptr),oldval); +} + +/* Return an interator, setup in order to start iterating from the specified + * key by applying the operator 'op', which is just a string specifying the + * comparison operator to use in order to seek the first element. The + * operators avalable are: + * + * "^" -- Seek the first (lexicographically smaller) key. + * "$" -- Seek the last (lexicographically biffer) key. + * ">" -- Seek the first element greter than the specified key. + * ">=" -- Seek the first element greater or equal than the specified key. + * "<" -- Seek the first element smaller than the specified key. + * "<=" -- Seek the first element smaller or equal than the specified key. + * "==" -- Seek the first element matching exactly the specified key. + * + * Note that for "^" and "$" the passed key is not used, and the user may + * just pass NULL with a length of 0. + * + * If the element to start the iteration cannot be seeked based on the + * key and operator passed, RedisModule_DictNext() / Prev() will just return + * REDISMODULE_ERR at the first call, otherwise they'll produce elements. + */ +RedisModuleDictIter *RM_DictIteratorStartC(RedisModuleDict *d, const char *op, void *key, size_t keylen) { + RedisModuleDictIter *di = zmalloc(sizeof(*di)); + di->dict = d; + raxStart(&di->ri,d->rax); + raxSeek(&di->ri,op,key,keylen); + return di; +} + +/* Exactly like RedisModule_DictIteratorStartC, but the key is passed as a + * RedisModuleString. */ +RedisModuleDictIter *RM_DictIteratorStart(RedisModuleDict *d, const char *op, RedisModuleString *key) { + return RM_DictIteratorStartC(d,op,key->ptr,sdslen(key->ptr)); +} + +/* Release the iterator created with RedisModule_DictIteratorStart(). This call + * is mandatory otherwise a memory leak is introduced in the module. */ +void RM_DictIteratorStop(RedisModuleDictIter *di) { + raxStop(&di->ri); + zfree(di); +} + +/* After its creation with RedisModule_DictIteratorStart(), it is possible to + * change the currently selected element of the iterator by using this + * API call. The result based on the operator and key is exactly like + * the function RedisModule_DictIteratorStart(), however in this case the + * return value is just REDISMODULE_OK in case the seeked element was found, + * or REDISMODULE_ERR in case it was not possible to seek the specified + * element. It is possible to reseek an iterator as many times as you want. */ +int RM_DictIteratorReseekC(RedisModuleDictIter *di, const char *op, void *key, size_t keylen) { + return raxSeek(&di->ri,op,key,keylen); +} + +/* Like RedisModule_DictIteratorReseekC() but takes the key as as a + * RedisModuleString. */ +int RM_DictIteratorReseek(RedisModuleDictIter *di, const char *op, RedisModuleString *key) { + return RM_DictIteratorReseekC(di,op,key->ptr,sdslen(key->ptr)); +} + +/* Return the current item of the dictionary iterator 'di' and steps to the + * next element. If the iterator already yield the last element and there + * are no other elements to return, NULL is returned, otherwise a pointer + * to a string representing the key is provided, and the '*keylen' length + * is set by reference (if keylen is not NULL). The '*dataptr', if not NULL + * is set to the value of the pointer stored at the returned key as auxiliary + * data (as set by the RedisModule_DictSet API). + * + * Usage example: + * + * ... create the iterator here ... + * char *key; + * void *data; + * while((key = RedisModule_DictNextC(iter,&keylen,&data)) != NULL) { + * printf("%.*s %p\n", (int)keylen, key, data); + * } + * + * The returned pointer is of type void because sometimes it makes sense + * to cast it to a char* sometimes to an unsigned char* depending on the + * fact it contains or not binary data, so this API ends being more + * comfortable to use. + * + * The validity of the returned pointer is until the next call to the + * next/prev iterator step. Also the pointer is no longer valid once the + * iterator is released. */ +void *RM_DictNextC(RedisModuleDictIter *di, size_t *keylen, void **dataptr) { + if (!raxNext(&di->ri)) return NULL; + if (keylen) *keylen = di->ri.key_len; + if (dataptr) *dataptr = di->ri.data; + return di->ri.key; +} + +/* This function is exactly like RedisModule_DictNext() but after returning + * the currently selected element in the iterator, it selects the previous + * element (laxicographically smaller) instead of the next one. */ +void *RM_DictPrevC(RedisModuleDictIter *di, size_t *keylen, void **dataptr) { + if (!raxPrev(&di->ri)) return NULL; + if (keylen) *keylen = di->ri.key_len; + if (dataptr) *dataptr = di->ri.data; + return di->ri.key; +} + +/* Like RedisModuleNextC(), but instead of returning an internally allocated + * buffer and key length, it returns directly a module string object allocated + * in the specified context 'ctx' (that may be NULL exactly like for the main + * API RedisModule_CreateString). + * + * The returned string object should be deallocated after use, either manually + * or by using a context that has automatic memory management active. */ +RedisModuleString *RM_DictNext(RedisModuleCtx *ctx, RedisModuleDictIter *di, void **dataptr) { + size_t keylen; + void *key = RM_DictNextC(di,&keylen,dataptr); + if (key == NULL) return NULL; + return RM_CreateString(ctx,key,keylen); +} + +/* Like RedisModule_DictNext() but after returning the currently selected + * element in the iterator, it selects the previous element (laxicographically + * smaller) instead of the next one. */ +RedisModuleString *RM_DictPrev(RedisModuleCtx *ctx, RedisModuleDictIter *di, void **dataptr) { + size_t keylen; + void *key = RM_DictPrevC(di,&keylen,dataptr); + if (key == NULL) return NULL; + return RM_CreateString(ctx,key,keylen); +} + +/* Compare the element currently pointed by the iterator to the specified + * element given by key/keylen, according to the operator 'op' (the set of + * valid operators are the same valid for RedisModule_DictIteratorStart). + * If the comparision is successful the command returns REDISMODULE_OK + * otherwise REDISMODULE_ERR is returned. + * + * This is useful when we want to just emit a lexicographical range, so + * in the loop, as we iterate elements, we can also check if we are still + * on range. + * + * The function returne REDISMODULE_ERR if the iterator reached the + * end of elements condition as well. */ +int RM_DictCompareC(RedisModuleDictIter *di, const char *op, void *key, size_t keylen) { + if (raxEOF(&di->ri)) return REDISMODULE_ERR; + int res = raxCompare(&di->ri,op,key,keylen); + return res ? REDISMODULE_OK : REDISMODULE_ERR; +} + +/* Like RedisModule_DictCompareC but gets the key to compare with the current + * iterator key as a RedisModuleString. */ +int RM_DictCompare(RedisModuleDictIter *di, const char *op, RedisModuleString *key) { + if (raxEOF(&di->ri)) return REDISMODULE_ERR; + int res = raxCompare(&di->ri,op,key->ptr,sdslen(key->ptr)); + return res ? REDISMODULE_OK : REDISMODULE_ERR; +} + +/* -------------------------------------------------------------------------- + * Modules utility APIs + * -------------------------------------------------------------------------- */ + +/* Return random bytes using SHA1 in counter mode with a /dev/urandom + * initialized seed. This function is fast so can be used to generate + * many bytes without any effect on the operating system entropy pool. + * Currently this function is not thread safe. */ +void RM_GetRandomBytes(unsigned char *dst, size_t len) { + getRandomBytes(dst,len); +} + +/* Like RedisModule_GetRandomBytes() but instead of setting the string to + * random bytes the string is set to random characters in the in the + * hex charset [0-9a-f]. */ +void RM_GetRandomHexChars(char *dst, size_t len) { + getRandomHexChars(dst,len); +} + /* -------------------------------------------------------------------------- * Modules API internals * -------------------------------------------------------------------------- */ @@ -3820,7 +4627,7 @@ uint64_t dictCStringKeyHash(const void *key) { } int dictCStringKeyCompare(void *privdata, const void *key1, const void *key2) { - DICT_NOTUSED(privdata); + UNUSED(privdata); return strcmp(key1,key2) == 0; } @@ -3850,8 +4657,8 @@ void moduleInitModulesSystem(void) { /* Set up the keyspace notification susbscriber list and static client */ moduleKeyspaceSubscribers = listCreate(); - moduleKeyspaceSubscribersClient = createClient(-1); - moduleKeyspaceSubscribersClient->flags |= CLIENT_MODULE; + moduleFreeContextReusedClient = createClient(-1); + moduleFreeContextReusedClient->flags |= CLIENT_MODULE; moduleRegisterCoreAPI(); if (pipe(server.module_blocked_pipe) == -1) { @@ -3865,6 +4672,9 @@ void moduleInitModulesSystem(void) { anetNonBlock(NULL,server.module_blocked_pipe[0]); anetNonBlock(NULL,server.module_blocked_pipe[1]); + /* Create the timers radix tree. */ + Timers = raxNew(); + /* Our thread-safe contexts GIL must start with already locked: * it is just unlocked when it's safe. */ pthread_mutex_lock(&moduleGIL); @@ -3876,7 +4686,7 @@ void moduleInitModulesSystem(void) { * because the server must be fully initialized before loading modules. * * The function aborts the server on errors, since to start with missing - * modules is not considered sane: clients may rely on the existance of + * modules is not considered sane: clients may rely on the existence of * given commands, loading AOF also may need some modules to exist, and * if this instance is a slave, it must understand commands from master. */ void moduleLoadFromQueue(void) { @@ -3939,6 +4749,7 @@ int moduleLoad(const char *path, void **module_argv, int module_argc) { } onload = (int (*)(void *, void **, int))(unsigned long) dlsym(handle,"RedisModule_OnLoad"); if (onload == NULL) { + dlclose(handle); serverLog(LL_WARNING, "Module %s does not export RedisModule_OnLoad() " "symbol. Module not loaded.",path); @@ -3985,7 +4796,7 @@ int moduleUnload(sds name) { moduleUnregisterCommands(module); - /* Remvoe any noification subscribers this module might have */ + /* Remove any notification subscribers this module might have */ moduleUnsubscribeNotifications(module); /* Unregister all the hooks. TODO: Yet no hooks support here. */ @@ -4012,7 +4823,15 @@ int moduleUnload(sds name) { * MODULE LOAD [args...] */ void moduleCommand(client *c) { char *subcmd = c->argv[1]->ptr; - + if (c->argc == 2 && !strcasecmp(subcmd,"help")) { + const char *help[] = { +"LIST -- Return a list of loaded modules.", +"LOAD [arg ...] -- Load a module library from .", +"UNLOAD -- Unload a module.", +NULL + }; + addReplyHelp(c, help); + } else if (!strcasecmp(subcmd,"load") && c->argc >= 3) { robj **argv = NULL; int argc = 0; @@ -4061,7 +4880,8 @@ void moduleCommand(client *c) { } dictReleaseIterator(di); } else { - addReply(c,shared.syntaxerr); + addReplySubcommandSyntaxError(c); + return; } } @@ -4186,4 +5006,42 @@ void moduleRegisterCoreAPI(void) { REGISTER_API(DigestAddLongLong); REGISTER_API(DigestEndSequence); REGISTER_API(SubscribeToKeyspaceEvents); + REGISTER_API(RegisterClusterMessageReceiver); + REGISTER_API(SendClusterMessage); + REGISTER_API(GetClusterNodeInfo); + REGISTER_API(GetClusterNodesList); + REGISTER_API(FreeClusterNodesList); + REGISTER_API(CreateTimer); + REGISTER_API(StopTimer); + REGISTER_API(GetTimerInfo); + REGISTER_API(GetMyClusterID); + REGISTER_API(GetClusterSize); + REGISTER_API(GetRandomBytes); + REGISTER_API(GetRandomHexChars); + REGISTER_API(BlockedClientDisconnected); + REGISTER_API(SetDisconnectCallback); + REGISTER_API(GetBlockedClientHandle); + REGISTER_API(SetClusterFlags); + REGISTER_API(CreateDict); + REGISTER_API(FreeDict); + REGISTER_API(DictSize); + REGISTER_API(DictSetC); + REGISTER_API(DictReplaceC); + REGISTER_API(DictSet); + REGISTER_API(DictReplace); + REGISTER_API(DictGetC); + REGISTER_API(DictGet); + REGISTER_API(DictDelC); + REGISTER_API(DictDel); + REGISTER_API(DictIteratorStartC); + REGISTER_API(DictIteratorStart); + REGISTER_API(DictIteratorStop); + REGISTER_API(DictIteratorReseekC); + REGISTER_API(DictIteratorReseek); + REGISTER_API(DictNextC); + REGISTER_API(DictPrevC); + REGISTER_API(DictNext); + REGISTER_API(DictPrev); + REGISTER_API(DictCompareC); + REGISTER_API(DictCompare); } diff --git a/redis-android/src/main/jni/redis-4.0.11/src/modules/.gitignore b/redis-android/src/main/jni/redis-5.0.0/src/modules/.gitignore similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/modules/.gitignore rename to redis-android/src/main/jni/redis-5.0.0/src/modules/.gitignore diff --git a/redis-android/src/main/jni/redis-4.0.11/src/modules/Makefile b/redis-android/src/main/jni/redis-5.0.0/src/modules/Makefile similarity index 67% rename from redis-android/src/main/jni/redis-4.0.11/src/modules/Makefile rename to redis-android/src/main/jni/redis-5.0.0/src/modules/Makefile index 066e65e..51ffac1 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/modules/Makefile +++ b/redis-android/src/main/jni/redis-5.0.0/src/modules/Makefile @@ -13,7 +13,7 @@ endif .SUFFIXES: .c .so .xo .o -all: helloworld.so hellotype.so helloblock.so testmodule.so +all: helloworld.so hellotype.so helloblock.so testmodule.so hellocluster.so hellotimer.so hellodict.so .c.xo: $(CC) -I. $(CFLAGS) $(SHOBJ_CFLAGS) -fPIC -c $< -o $@ @@ -33,6 +33,21 @@ helloblock.xo: ../redismodule.h helloblock.so: helloblock.xo $(LD) -o $@ $< $(SHOBJ_LDFLAGS) $(LIBS) -lpthread -lc +hellocluster.xo: ../redismodule.h + +hellocluster.so: hellocluster.xo + $(LD) -o $@ $< $(SHOBJ_LDFLAGS) $(LIBS) -lc + +hellotimer.xo: ../redismodule.h + +hellotimer.so: hellotimer.xo + $(LD) -o $@ $< $(SHOBJ_LDFLAGS) $(LIBS) -lc + +hellodict.xo: ../redismodule.h + +hellodict.so: hellodict.xo + $(LD) -o $@ $< $(SHOBJ_LDFLAGS) $(LIBS) -lc + testmodule.xo: ../redismodule.h testmodule.so: testmodule.xo diff --git a/redis-android/src/main/jni/redis-4.0.11/src/modules/gendoc.rb b/redis-android/src/main/jni/redis-5.0.0/src/modules/gendoc.rb similarity index 95% rename from redis-android/src/main/jni/redis-4.0.11/src/modules/gendoc.rb rename to redis-android/src/main/jni/redis-5.0.0/src/modules/gendoc.rb index 516f5d7..ee65728 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/modules/gendoc.rb +++ b/redis-android/src/main/jni/redis-5.0.0/src/modules/gendoc.rb @@ -1,5 +1,5 @@ # gendoc.rb -- Converts the top-comments inside module.c to modules API -# reference documentaiton in markdown format. +# reference documentation in markdown format. # Convert the C comment to markdown def markdown(s) diff --git a/redis-android/src/main/jni/redis-4.0.11/src/modules/helloblock.c b/redis-android/src/main/jni/redis-5.0.0/src/modules/helloblock.c similarity index 86% rename from redis-android/src/main/jni/redis-4.0.11/src/modules/helloblock.c rename to redis-android/src/main/jni/redis-5.0.0/src/modules/helloblock.c index c74fcd3..b90ccaa 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/modules/helloblock.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/modules/helloblock.c @@ -54,7 +54,8 @@ int HelloBlock_Timeout(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) } /* Private data freeing callback for HELLO.BLOCK command. */ -void HelloBlock_FreeData(void *privdata) { +void HelloBlock_FreeData(RedisModuleCtx *ctx, void *privdata) { + REDISMODULE_NOT_USED(ctx); RedisModule_Free(privdata); } @@ -73,6 +74,23 @@ void *HelloBlock_ThreadMain(void *arg) { return NULL; } +/* An example blocked client disconnection callback. + * + * Note that in the case of the HELLO.BLOCK command, the blocked client is now + * owned by the thread calling sleep(). In this specific case, there is not + * much we can do, however normally we could instead implement a way to + * signal the thread that the client disconnected, and sleep the specified + * amount of seconds with a while loop calling sleep(1), so that once we + * detect the client disconnection, we can terminate the thread ASAP. */ +void HelloBlock_Disconnected(RedisModuleCtx *ctx, RedisModuleBlockedClient *bc) { + RedisModule_Log(ctx,"warning","Blocked client %p disconnected!", + (void*)bc); + + /* Here you should cleanup your state / threads, and if possible + * call RedisModule_UnblockClient(), or notify the thread that will + * call the function ASAP. */ +} + /* HELLO.BLOCK -- Block for seconds, then reply with * a random number. Timeout is the command timeout, so that you can test * what happens when the delay is greater than the timeout. */ @@ -92,6 +110,11 @@ int HelloBlock_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int a pthread_t tid; RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx,HelloBlock_Reply,HelloBlock_Timeout,HelloBlock_FreeData,timeout); + /* Here we set a disconnection handler, however since this module will + * block in sleep() in a thread, there is not much we can do in the + * callback, so this is just to show you the API. */ + RedisModule_SetDisconnectCallback(bc,HelloBlock_Disconnected); + /* Now that we setup a blocking client, we need to pass the control * to the thread. However we need to pass arguments to the thread: * the delay and a reference to the blocked client handle. */ diff --git a/redis-android/src/main/jni/redis-5.0.0/src/modules/hellocluster.c b/redis-android/src/main/jni/redis-5.0.0/src/modules/hellocluster.c new file mode 100644 index 0000000..cb78187 --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/src/modules/hellocluster.c @@ -0,0 +1,118 @@ +/* Helloworld cluster -- A ping/pong cluster API example. + * + * ----------------------------------------------------------------------------- + * + * Copyright (c) 2018, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define REDISMODULE_EXPERIMENTAL_API +#include "../redismodule.h" +#include +#include +#include +#include + +#define MSGTYPE_PING 1 +#define MSGTYPE_PONG 2 + +/* HELLOCLUSTER.PINGALL */ +int PingallCommand_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); + + RedisModule_SendClusterMessage(ctx,NULL,MSGTYPE_PING,(unsigned char*)"Hey",3); + return RedisModule_ReplyWithSimpleString(ctx, "OK"); +} + +/* HELLOCLUSTER.LIST */ +int ListCommand_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); + + size_t numnodes; + char **ids = RedisModule_GetClusterNodesList(ctx,&numnodes); + if (ids == NULL) { + return RedisModule_ReplyWithError(ctx,"Cluster not enabled"); + } + + RedisModule_ReplyWithArray(ctx,numnodes); + for (size_t j = 0; j < numnodes; j++) { + int port; + RedisModule_GetClusterNodeInfo(ctx,ids[j],NULL,NULL,&port,NULL); + RedisModule_ReplyWithArray(ctx,2); + RedisModule_ReplyWithStringBuffer(ctx,ids[j],REDISMODULE_NODE_ID_LEN); + RedisModule_ReplyWithLongLong(ctx,port); + } + RedisModule_FreeClusterNodesList(ids); + return REDISMODULE_OK; +} + +/* Callback for message MSGTYPE_PING */ +void PingReceiver(RedisModuleCtx *ctx, const char *sender_id, uint8_t type, const unsigned char *payload, uint32_t len) { + RedisModule_Log(ctx,"notice","PING (type %d) RECEIVED from %.*s: '%.*s'", + type,REDISMODULE_NODE_ID_LEN,sender_id,(int)len, payload); + RedisModule_SendClusterMessage(ctx,NULL,MSGTYPE_PONG,(unsigned char*)"Ohi!",4); + RedisModule_Call(ctx, "INCR", "c", "pings_received"); +} + +/* Callback for message MSGTYPE_PONG. */ +void PongReceiver(RedisModuleCtx *ctx, const char *sender_id, uint8_t type, const unsigned char *payload, uint32_t len) { + RedisModule_Log(ctx,"notice","PONG (type %d) RECEIVED from %.*s: '%.*s'", + type,REDISMODULE_NODE_ID_LEN,sender_id,(int)len, payload); +} + +/* This function must be present on each Redis module. It is used in order to + * register the commands into the Redis server. */ +int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); + + if (RedisModule_Init(ctx,"hellocluster",1,REDISMODULE_APIVER_1) + == REDISMODULE_ERR) return REDISMODULE_ERR; + + if (RedisModule_CreateCommand(ctx,"hellocluster.pingall", + PingallCommand_RedisCommand,"readonly",0,0,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + if (RedisModule_CreateCommand(ctx,"hellocluster.list", + ListCommand_RedisCommand,"readonly",0,0,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + /* Disable Redis Cluster sharding and redirections. This way every node + * will be able to access every possible key, regardless of the hash slot. + * This way the PING message handler will be able to increment a specific + * variable. Normally you do that in order for the distributed system + * you create as a module to have total freedom in the keyspace + * manipulation. */ + RedisModule_SetClusterFlags(ctx,REDISMODULE_CLUSTER_FLAG_NO_REDIRECTION); + + /* Register our handlers for different message types. */ + RedisModule_RegisterClusterMessageReceiver(ctx,MSGTYPE_PING,PingReceiver); + RedisModule_RegisterClusterMessageReceiver(ctx,MSGTYPE_PONG,PongReceiver); + return REDISMODULE_OK; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/src/modules/hellodict.c b/redis-android/src/main/jni/redis-5.0.0/src/modules/hellodict.c new file mode 100644 index 0000000..651615b --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/src/modules/hellodict.c @@ -0,0 +1,132 @@ +/* Hellodict -- An example of modules dictionary API + * + * This module implements a volatile key-value store on top of the + * dictionary exported by the Redis modules API. + * + * ----------------------------------------------------------------------------- + * + * Copyright (c) 2018, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define REDISMODULE_EXPERIMENTAL_API +#include "../redismodule.h" +#include +#include +#include +#include + +static RedisModuleDict *Keyspace; + +/* HELLODICT.SET + * + * Set the specified key to the specified value. */ +int cmd_SET(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + if (argc != 3) return RedisModule_WrongArity(ctx); + RedisModule_DictSet(Keyspace,argv[1],argv[2]); + /* We need to keep a reference to the value stored at the key, otherwise + * it would be freed when this callback returns. */ + RedisModule_RetainString(NULL,argv[2]); + return RedisModule_ReplyWithSimpleString(ctx, "OK"); +} + +/* HELLODICT.GET + * + * Return the value of the specified key, or a null reply if the key + * is not defined. */ +int cmd_GET(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + if (argc != 2) return RedisModule_WrongArity(ctx); + RedisModuleString *val = RedisModule_DictGet(Keyspace,argv[1],NULL); + if (val == NULL) { + return RedisModule_ReplyWithNull(ctx); + } else { + return RedisModule_ReplyWithString(ctx, val); + } +} + +/* HELLODICT.KEYRANGE + * + * Return a list of matching keys, lexicographically between startkey + * and endkey. No more than 'count' items are emitted. */ +int cmd_KEYRANGE(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + if (argc != 4) return RedisModule_WrongArity(ctx); + + /* Parse the count argument. */ + long long count; + if (RedisModule_StringToLongLong(argv[3],&count) != REDISMODULE_OK) { + return RedisModule_ReplyWithError(ctx,"ERR invalid count"); + } + + /* Seek the iterator. */ + RedisModuleDictIter *iter = RedisModule_DictIteratorStart( + Keyspace, ">=", argv[1]); + + /* Reply with the matching items. */ + char *key; + size_t keylen; + long long replylen = 0; /* Keep track of the amitted array len. */ + RedisModule_ReplyWithArray(ctx,REDISMODULE_POSTPONED_ARRAY_LEN); + while((key = RedisModule_DictNextC(iter,&keylen,NULL)) != NULL) { + if (replylen >= count) break; + if (RedisModule_DictCompare(iter,"<=",argv[2]) == REDISMODULE_ERR) + break; + RedisModule_ReplyWithStringBuffer(ctx,key,keylen); + replylen++; + } + RedisModule_ReplySetArrayLength(ctx,replylen); + + /* Cleanup. */ + RedisModule_DictIteratorStop(iter); + return REDISMODULE_OK; +} + +/* This function must be present on each Redis module. It is used in order to + * register the commands into the Redis server. */ +int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); + + if (RedisModule_Init(ctx,"hellodict",1,REDISMODULE_APIVER_1) + == REDISMODULE_ERR) return REDISMODULE_ERR; + + if (RedisModule_CreateCommand(ctx,"hellodict.set", + cmd_SET,"write deny-oom",1,1,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + if (RedisModule_CreateCommand(ctx,"hellodict.get", + cmd_GET,"readonly",1,1,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + if (RedisModule_CreateCommand(ctx,"hellodict.keyrange", + cmd_KEYRANGE,"readonly",1,1,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + /* Create our global dictionray. Here we'll set our keys and values. */ + Keyspace = RedisModule_CreateDict(NULL); + + return REDISMODULE_OK; +} diff --git a/redis-android/src/main/jni/redis-5.0.0/src/modules/hellotimer.c b/redis-android/src/main/jni/redis-5.0.0/src/modules/hellotimer.c new file mode 100644 index 0000000..57b111b --- /dev/null +++ b/redis-android/src/main/jni/redis-5.0.0/src/modules/hellotimer.c @@ -0,0 +1,76 @@ +/* Timer API example -- Register and handle timer events + * + * ----------------------------------------------------------------------------- + * + * Copyright (c) 2018, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define REDISMODULE_EXPERIMENTAL_API +#include "../redismodule.h" +#include +#include +#include +#include + +/* Timer callback. */ +void timerHandler(RedisModuleCtx *ctx, void *data) { + REDISMODULE_NOT_USED(ctx); + printf("Fired %s!\n", data); + RedisModule_Free(data); +} + +/* HELLOTIMER.TIMER*/ +int TimerCommand_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); + + for (int j = 0; j < 10; j++) { + int delay = rand() % 5000; + char *buf = RedisModule_Alloc(256); + snprintf(buf,256,"After %d", delay); + RedisModuleTimerID tid = RedisModule_CreateTimer(ctx,delay,timerHandler,buf); + REDISMODULE_NOT_USED(tid); + } + return RedisModule_ReplyWithSimpleString(ctx, "OK"); +} + +/* This function must be present on each Redis module. It is used in order to + * register the commands into the Redis server. */ +int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); + + if (RedisModule_Init(ctx,"hellotimer",1,REDISMODULE_APIVER_1) + == REDISMODULE_ERR) return REDISMODULE_ERR; + + if (RedisModule_CreateCommand(ctx,"hellotimer.timer", + TimerCommand_RedisCommand,"readonly",0,0,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + return REDISMODULE_OK; +} diff --git a/redis-android/src/main/jni/redis-4.0.11/src/modules/hellotype.c b/redis-android/src/main/jni/redis-5.0.0/src/modules/hellotype.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/modules/hellotype.c rename to redis-android/src/main/jni/redis-5.0.0/src/modules/hellotype.c diff --git a/redis-android/src/main/jni/redis-4.0.11/src/modules/helloworld.c b/redis-android/src/main/jni/redis-5.0.0/src/modules/helloworld.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/modules/helloworld.c rename to redis-android/src/main/jni/redis-5.0.0/src/modules/helloworld.c diff --git a/redis-android/src/main/jni/redis-4.0.11/src/modules/testmodule.c b/redis-android/src/main/jni/redis-5.0.0/src/modules/testmodule.c similarity index 100% rename from redis-android/src/main/jni/redis-4.0.11/src/modules/testmodule.c rename to redis-android/src/main/jni/redis-5.0.0/src/modules/testmodule.c diff --git a/redis-android/src/main/jni/redis-4.0.11/src/multi.c b/redis-android/src/main/jni/redis-5.0.0/src/multi.c similarity index 99% rename from redis-android/src/main/jni/redis-4.0.11/src/multi.c rename to redis-android/src/main/jni/redis-5.0.0/src/multi.c index 112ce06..8159adc 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/multi.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/multi.c @@ -158,7 +158,7 @@ void execCommand(client *c) { must_propagate = 1; } - call(c,CMD_CALL_FULL); + call(c,server.loading ? CMD_CALL_NONE : CMD_CALL_FULL); /* Commands may alter argc/argv, restore mstate. */ c->mstate.commands[j].argc = c->argc; diff --git a/redis-android/src/main/jni/redis-4.0.11/src/networking.c b/redis-android/src/main/jni/redis-5.0.0/src/networking.c similarity index 78% rename from redis-android/src/main/jni/redis-4.0.11/src/networking.c rename to redis-android/src/main/jni/redis-5.0.0/src/networking.c index 6df2631..e255e64 100644 --- a/redis-android/src/main/jni/redis-4.0.11/src/networking.c +++ b/redis-android/src/main/jni/redis-5.0.0/src/networking.c @@ -33,7 +33,7 @@ #include #include -static void setProtocolError(const char *errstr, client *c, long pos); +static void setProtocolError(const char *errstr, client *c); /* Return the size consumed from the allocator, for the specified SDS string, * including internal fragmentation. This function is used in order to compute @@ -56,17 +56,32 @@ size_t getStringObjectSdsUsedMemory(robj *o) { /* Client.reply list dup and free methods. */ void *dupClientReplyValue(void *o) { - return sdsdup(o); + clientReplyBlock *old = o; + clientReplyBlock *buf = zmalloc(sizeof(clientReplyBlock) + old->size); + memcpy(buf, o, sizeof(clientReplyBlock) + old->size); + return buf; } void freeClientReplyValue(void *o) { - sdsfree(o); + zfree(o); } int listMatchObjects(void *a, void *b) { return equalStringObjects(a,b); } +/* This function links the client to the global linked list of clients. + * unlinkClient() does the opposite, among other things. */ +void linkClient(client *c) { + listAddNodeTail(server.clients,c); + /* Note that we remember the linked list node where the client is stored, + * this way removing the client in unlinkClient() will not require + * a linear scan, but just a constant time operation. */ + c->client_list_node = listLast(server.clients); + uint64_t id = htonu64(c->id); + raxInsert(server.clients_index,(unsigned char*)&id,sizeof(id),c,NULL); +} + client *createClient(int fd) { client *c = zmalloc(sizeof(client)); @@ -95,6 +110,7 @@ client *createClient(int fd) { c->fd = fd; c->name = NULL; c->bufpos = 0; + c->qb_pos = 0; c->querybuf = sdsempty(); c->pending_querybuf = sdsempty(); c->querybuf_peak = 0; @@ -124,8 +140,11 @@ client *createClient(int fd) { listSetDupMethod(c->reply,dupClientReplyValue); c->btype = BLOCKED_NONE; c->bpop.timeout = 0; - c->bpop.keys = dictCreate(&objectKeyPointerValueDictType,NULL); + c->bpop.keys = dictCreate(&objectKeyHeapPointerValueDictType,NULL); c->bpop.target = NULL; + c->bpop.xread_group = NULL; + c->bpop.xread_consumer = NULL; + c->bpop.xread_group_noack = 0; c->bpop.numreplicas = 0; c->bpop.reploffset = 0; c->woff = 0; @@ -133,13 +152,40 @@ client *createClient(int fd) { c->pubsub_channels = dictCreate(&objectKeyPointerValueDictType,NULL); c->pubsub_patterns = listCreate(); c->peerid = NULL; + c->client_list_node = NULL; listSetFreeMethod(c->pubsub_patterns,decrRefCountVoid); listSetMatchMethod(c->pubsub_patterns,listMatchObjects); - if (fd != -1) listAddNodeTail(server.clients,c); + if (fd != -1) linkClient(c); initClientMultiState(c); return c; } +/* This funciton puts the client in the queue of clients that should write + * their output buffers to the socket. Note that it does not *yet* install + * the write handler, to start clients are put in a queue of clients that need + * to write, so we try to do that before returning in the event loop (see the + * handleClientsWithPendingWrites() function). + * If we fail and there is more data to write, compared to what the socket + * buffers can hold, then we'll really install the handler. */ +void clientInstallWriteHandler(client *c) { + /* Schedule the client to write the output buffers to the socket only + * if not already done and, for slaves, if the slave can actually receive + * writes at this stage. */ + if (!(c->flags & CLIENT_PENDING_WRITE) && + (c->replstate == REPL_STATE_NONE || + (c->replstate == SLAVE_STATE_ONLINE && !c->repl_put_online_on_ack))) + { + /* Here instead of installing the write handler, we just flag the + * client and put it into a list of clients that have something + * to write to the socket. This way before re-entering the event + * loop, we can try to directly write to the client sockets avoiding + * a system call. We'll only really install the write handler if + * we'll not be able to write the whole reply at once. */ + c->flags |= CLIENT_PENDING_WRITE; + listAddNodeHead(server.clients_pending_write,c); + } +} + /* This function is called every time we are going to transmit new data * to the client. The behavior is the following: * @@ -177,24 +223,9 @@ int prepareClientToWrite(client *c) { if (c->fd <= 0) return C_ERR; /* Fake client for AOF loading. */ - /* Schedule the client to write the output buffers to the socket only - * if not already done (there were no pending writes already and the client - * was yet not flagged), and, for slaves, if the slave can actually - * receive writes at this stage. */ - if (!clientHasPendingReplies(c) && - !(c->flags & CLIENT_PENDING_WRITE) && - (c->replstate == REPL_STATE_NONE || - (c->replstate == SLAVE_STATE_ONLINE && !c->repl_put_online_on_ack))) - { - /* Here instead of installing the write handler, we just flag the - * client and put it into a list of clients that have something - * to write to the socket. This way before re-entering the event - * loop, we can try to directly write to the client sockets avoiding - * a system call. We'll only really install the write handler if - * we'll not be able to write the whole reply at once. */ - c->flags |= CLIENT_PENDING_WRITE; - listAddNodeHead(server.clients_pending_write,c); - } + /* Schedule the client to write the output buffers to the socket, unless + * it should already be setup to do so (it has already pending data). */ + if (!clientHasPendingReplies(c)) clientInstallWriteHandler(c); /* Authorize the caller to queue in the output buffer of this client. */ return C_OK; @@ -221,84 +252,38 @@ int _addReplyToBuffer(client *c, const char *s, size_t len) { return C_OK; } -void _addReplyObjectToList(client *c, robj *o) { - if (c->flags & CLIENT_CLOSE_AFTER_REPLY) return; - - if (listLength(c->reply) == 0) { - sds s = sdsdup(o->ptr); - listAddNodeTail(c->reply,s); - c->reply_bytes += sdslen(s); - } else { - listNode *ln = listLast(c->reply); - sds tail = listNodeValue(ln); - - /* Append to this object when possible. If tail == NULL it was - * set via addDeferredMultiBulkLength(). */ - if (tail && sdslen(tail)+sdslen(o->ptr) <= PROTO_REPLY_CHUNK_BYTES) { - tail = sdscatsds(tail,o->ptr); - listNodeValue(ln) = tail; - c->reply_bytes += sdslen(o->ptr); - } else { - sds s = sdsdup(o->ptr); - listAddNodeTail(c->reply,s); - c->reply_bytes += sdslen(s); - } - } - asyncCloseClientOnOutputBufferLimitReached(c); -} - -/* This method takes responsibility over the sds. When it is no longer - * needed it will be free'd, otherwise it ends up in a robj. */ -void _addReplySdsToList(client *c, sds s) { - if (c->flags & CLIENT_CLOSE_AFTER_REPLY) { - sdsfree(s); - return; - } - - if (listLength(c->reply) == 0) { - listAddNodeTail(c->reply,s); - c->reply_bytes += sdslen(s); - } else { - listNode *ln = listLast(c->reply); - sds tail = listNodeValue(ln); - - /* Append to this object when possible. If tail == NULL it was - * set via addDeferredMultiBulkLength(). */ - if (tail && sdslen(tail)+sdslen(s) <= PROTO_REPLY_CHUNK_BYTES) { - tail = sdscatsds(tail,s); - listNodeValue(ln) = tail; - c->reply_bytes += sdslen(s); - sdsfree(s); - } else { - listAddNodeTail(c->reply,s); - c->reply_bytes += sdslen(s); - } - } - asyncCloseClientOnOutputBufferLimitReached(c); -} - void _addReplyStringToList(client *c, const char *s, size_t len) { if (c->flags & CLIENT_CLOSE_AFTER_REPLY) return; - if (listLength(c->reply) == 0) { - sds node = sdsnewlen(s,len); - listAddNodeTail(c->reply,node); - c->reply_bytes += len; - } else { - listNode *ln = listLast(c->reply); - sds tail = listNodeValue(ln); - - /* Append to this object when possible. If tail == NULL it was - * set via addDeferredMultiBulkLength(). */ - if (tail && sdslen(tail)+len <= PROTO_REPLY_CHUNK_BYTES) { - tail = sdscatlen(tail,s,len); - listNodeValue(ln) = tail; - c->reply_bytes += len; - } else { - sds node = sdsnewlen(s,len); - listAddNodeTail(c->reply,node); - c->reply_bytes += len; - } + listNode *ln = listLast(c->reply); + clientReplyBlock *tail = ln? listNodeValue(ln): NULL; + + /* Note that 'tail' may be NULL even if we have a tail node, becuase when + * addDeferredMultiBulkLength() is used, it sets a dummy node to NULL just + * fo fill it later, when the size of the bulk length is set. */ + + /* Append to tail string when possible. */ + if (tail) { + /* Copy the part we can fit into the tail, and leave the rest for a + * new node */ + size_t avail = tail->size - tail->used; + size_t copy = avail >= len? len: avail; + memcpy(tail->buf + tail->used, s, copy); + tail->used += copy; + s += copy; + len -= copy; + } + if (len) { + /* Create a new node, make sure it is allocated to at + * least PROTO_REPLY_CHUNK_BYTES */ + size_t size = len < PROTO_REPLY_CHUNK_BYTES? PROTO_REPLY_CHUNK_BYTES: len; + tail = zmalloc(size + sizeof(clientReplyBlock)); + /* take over the allocation's internal fragmentation */ + tail->size = zmalloc_usable(tail) - sizeof(clientReplyBlock); + tail->used = len; + memcpy(tail->buf, s, len); + listAddNodeTail(c->reply, tail); + c->reply_bytes += tail->size; } asyncCloseClientOnOutputBufferLimitReached(c); } @@ -308,54 +293,37 @@ void _addReplyStringToList(client *c, const char *s, size_t len) { * The following functions are the ones that commands implementations will call. * -------------------------------------------------------------------------- */ +/* Add the object 'obj' string representation to the client output buffer. */ void addReply(client *c, robj *obj) { if (prepareClientToWrite(c) != C_OK) return; - /* This is an important place where we can avoid copy-on-write - * when there is a saving child running, avoiding touching the - * refcount field of the object if it's not needed. - * - * If the encoding is RAW and there is room in the static buffer - * we'll be able to send the object to the client without - * messing with its page. */ if (sdsEncodedObject(obj)) { if (_addReplyToBuffer(c,obj->ptr,sdslen(obj->ptr)) != C_OK) - _addReplyObjectToList(c,obj); + _addReplyStringToList(c,obj->ptr,sdslen(obj->ptr)); } else if (obj->encoding == OBJ_ENCODING_INT) { - /* Optimization: if there is room in the static buffer for 32 bytes - * (more than the max chars a 64 bit integer can take as string) we - * avoid decoding the object and go for the lower level approach. */ - if (listLength(c->reply) == 0 && (sizeof(c->buf) - c->bufpos) >= 32) { - char buf[32]; - int len; - - len = ll2string(buf,sizeof(buf),(long)obj->ptr); - if (_addReplyToBuffer(c,buf,len) == C_OK) - return; - /* else... continue with the normal code path, but should never - * happen actually since we verified there is room. */ - } - obj = getDecodedObject(obj); - if (_addReplyToBuffer(c,obj->ptr,sdslen(obj->ptr)) != C_OK) - _addReplyObjectToList(c,obj); - decrRefCount(obj); + /* For integer encoded strings we just convert it into a string + * using our optimized function, and attach the resulting string + * to the output buffer. */ + char buf[32]; + size_t len = ll2string(buf,sizeof(buf),(long)obj->ptr); + if (_addReplyToBuffer(c,buf,len) != C_OK) + _addReplyStringToList(c,buf,len); } else { serverPanic("Wrong obj->encoding in addReply()"); } } +/* Add the SDS 's' string to the client output buffer, as a side effect + * the SDS string is freed. */ void addReplySds(client *c, sds s) { if (prepareClientToWrite(c) != C_OK) { /* The caller expects the sds to be free'd. */ sdsfree(s); return; } - if (_addReplyToBuffer(c,s,sdslen(s)) == C_OK) { - sdsfree(s); - } else { - /* This method free's the sds when it is no longer needed. */ - _addReplySdsToList(c,s); - } + if (_addReplyToBuffer(c,s,sdslen(s)) != C_OK) + _addReplyStringToList(c,s,sdslen(s)); + sdsfree(s); } /* This low level function just adds whatever protocol you send it to the @@ -372,17 +340,44 @@ void addReplyString(client *c, const char *s, size_t len) { _addReplyStringToList(c,s,len); } +/* Low level function called by the addReplyError...() functions. + * It emits the protocol for a Redis error, in the form: + * + * -ERRORCODE Error Message + * + * If the error code is already passed in the string 's', the error + * code provided is used, otherwise the string "-ERR " for the generic + * error code is automatically added. */ void addReplyErrorLength(client *c, const char *s, size_t len) { - addReplyString(c,"-ERR ",5); + /* If the string already starts with "-..." then the error code + * is provided by the caller. Otherwise we use "-ERR". */ + if (!len || s[0] != '-') addReplyString(c,"-ERR ",5); addReplyString(c,s,len); addReplyString(c,"\r\n",2); + + /* Sometimes it could be normal that a slave replies to a master with + * an error and this function gets called. Actually the error will never + * be sent because addReply*() against master clients has no effect... + * A notable example is: + * + * EVAL 'redis.call("incr",KEYS[1]); redis.call("nonexisting")' 1 x + * + * Where the master must propagate the first change even if the second + * will produce an error. However it is useful to log such events since + * they are rare and may hint at errors in a script or a bug in Redis. */ if (c->flags & (CLIENT_MASTER|CLIENT_SLAVE)) { - char* to = c->flags & CLIENT_MASTER? "master": "slave"; - char* from = c->flags & CLIENT_MASTER? "slave": "master"; + char* to = c->flags & CLIENT_MASTER? "master": "replica"; + char* from = c->flags & CLIENT_MASTER? "replica": "master"; char *cmdname = c->lastcmd ? c->lastcmd->name : ""; serverLog(LL_WARNING,"== CRITICAL == This %s is sending an error " "to its %s: '%s' after processing the command " "'%s'", from, to, s, cmdname); + /* Here we want to panic because when a master is sending an + * error to some slave in the context of replication, this can + * only create some kind of offset or data desynchronization. Better + * to catch it ASAP and crash instead of continuing. */ + if (c->flags & CLIENT_SLAVE) + serverPanic("Continuing is unsafe: replication protocol violation."); } } @@ -439,26 +434,41 @@ void *addDeferredMultiBulkLength(client *c) { /* Populate the length object and try gluing it to the next chunk. */ void setDeferredMultiBulkLength(client *c, void *node, long length) { listNode *ln = (listNode*)node; - sds len, next; + clientReplyBlock *next; + char lenstr[128]; + size_t lenstr_len = sprintf(lenstr, "*%ld\r\n", length); /* Abort when *node is NULL: when the client should not accept writes * we return NULL in addDeferredMultiBulkLength() */ if (node == NULL) return; - - len = sdscatprintf(sdsnewlen("*",1),"%ld\r\n",length); - listNodeValue(ln) = len; - c->reply_bytes += sdslen(len); - if (ln->next != NULL) { - next = listNodeValue(ln->next); - - /* Only glue when the next node is non-NULL (an sds in this case) */ - if (next != NULL) { - len = sdscatsds(len,next); - listDelNode(c->reply,ln->next); - listNodeValue(ln) = len; - /* No need to update c->reply_bytes: we are just moving the same - * amount of bytes from one node to another. */ - } + serverAssert(!listNodeValue(ln)); + + /* Normally we fill this dummy NULL node, added by addDeferredMultiBulkLength(), + * with a new buffer structure containing the protocol needed to specify + * the length of the array following. However sometimes when there is + * little memory to move, we may instead remove this NULL node, and prefix + * our protocol in the node immediately after to it, in order to save a + * write(2) syscall later. Conditions needed to do it: + * + * - The next node is non-NULL, + * - It has enough room already allocated + * - And not too large (avoid large memmove) */ + if (ln->next != NULL && (next = listNodeValue(ln->next)) && + next->size - next->used >= lenstr_len && + next->used < PROTO_REPLY_CHUNK_BYTES * 4) { + memmove(next->buf + lenstr_len, next->buf, next->used); + memcpy(next->buf, lenstr, lenstr_len); + next->used += lenstr_len; + listDelNode(c->reply,ln); + } else { + /* Create a new node */ + clientReplyBlock *buf = zmalloc(lenstr_len + sizeof(clientReplyBlock)); + /* Take over the allocation's internal fragmentation */ + buf->size = zmalloc_usable(buf) - sizeof(clientReplyBlock); + buf->used = lenstr_len; + memcpy(buf->buf, lenstr, lenstr_len); + listNodeValue(ln) = buf; + c->reply_bytes += buf->size; } asyncCloseClientOnOutputBufferLimitReached(c); } @@ -574,7 +584,7 @@ void addReplyBulkSds(client *c, sds s) { addReply(c,shared.crlf); } -/* Add a C nul term string as bulk reply */ +/* Add a C null term string as bulk reply */ void addReplyBulkCString(client *c, const char *s) { if (s == NULL) { addReply(c,shared.nullbulk); @@ -592,6 +602,38 @@ void addReplyBulkLongLong(client *c, long long ll) { addReplyBulkCBuffer(c,buf,len); } +/* Add an array of C strings as status replies with a heading. + * This function is typically invoked by from commands that support + * subcommands in response to the 'help' subcommand. The help array + * is terminated by NULL sentinel. */ +void addReplyHelp(client *c, const char **help) { + sds cmd = sdsnew((char*) c->argv[0]->ptr); + void *blenp = addDeferredMultiBulkLength(c); + int blen = 0; + + sdstoupper(cmd); + addReplyStatusFormat(c, + "%s arg arg ... arg. Subcommands are:",cmd); + sdsfree(cmd); + + while (help[blen]) addReplyStatus(c,help[blen++]); + + blen++; /* Account for the header line(s). */ + setDeferredMultiBulkLength(c,blenp,blen); +} + +/* Add a suggestive error reply. + * This function is typically invoked by from commands that support + * subcommands in response to an unknown subcommand or argument error. */ +void addReplySubcommandSyntaxError(client *c) { + sds cmd = sdsnew((char*) c->argv[0]->ptr); + sdstoupper(cmd); + addReplyErrorFormat(c, + "Unknown subcommand or wrong number of arguments for '%s'. Try %s HELP.", + (char*)c->argv[1]->ptr,cmd); + sdsfree(cmd); +} + /* Copy 'src' client output buffers into 'dst' client output buffers. * The function takes care of freeing the old output buffers of the * destination client. */ @@ -752,9 +794,12 @@ void unlinkClient(client *c) { * fd is already set to -1. */ if (c->fd != -1) { /* Remove from the list of active clients. */ - ln = listSearchKey(server.clients,c); - serverAssert(ln != NULL); - listDelNode(server.clients,ln); + if (c->client_list_node) { + uint64_t id = htonu64(c->id); + raxRemove(server.clients_index,(unsigned char*)&id,sizeof(id),NULL); + listDelNode(server.clients,c->client_list_node); + c->client_list_node = NULL; + } /* Unregister async I/O handlers and close the socket. */ aeDeleteFileEvent(server.el,c->fd,AE_READABLE); @@ -784,6 +829,13 @@ void unlinkClient(client *c) { void freeClient(client *c) { listNode *ln; + /* If a client is protected, yet we need to free it right now, make sure + * to at least use asynchronous freeing. */ + if (c->flags & CLIENT_PROTECTED) { + freeClientAsync(c); + return; + } + /* If it is our master that's beging disconnected we should make sure * to cache the state to try a partial resynchronization later. * @@ -793,8 +845,7 @@ void freeClient(client *c) { serverLog(LL_WARNING,"Connection with master lost."); if (!(c->flags & (CLIENT_CLOSE_AFTER_REPLY| CLIENT_CLOSE_ASAP| - CLIENT_BLOCKED| - CLIENT_UNBLOCKED))) + CLIENT_BLOCKED))) { replicationCacheMaster(c); return; @@ -803,7 +854,7 @@ void freeClient(client *c) { /* Log link disconnection with slave */ if ((c->flags & CLIENT_SLAVE) && !(c->flags & CLIENT_MONITOR)) { - serverLog(LL_WARNING,"Connection with slave %s lost.", + serverLog(LL_WARNING,"Connection with replica %s lost.", replicationGetSlaveName(c)); } @@ -896,12 +947,21 @@ void freeClientsInAsyncFreeQueue(void) { } } +/* Return a client by ID, or NULL if the client ID is not in the set + * of registered clients. Note that "fake clients", created with -1 as FD, + * are not registered clients. */ +client *lookupClientByID(uint64_t id) { + id = htonu64(id); + client *c = raxFind(server.clients_index,(unsigned char*)&id,sizeof(id)); + return (c == raxNotFound) ? NULL : c; +} + /* Write data in output buffers to client. Return C_OK if the client * is still valid after the call, C_ERR if it was freed. */ int writeToClient(int fd, client *c, int handler_installed) { ssize_t nwritten = 0, totwritten = 0; size_t objlen; - sds o; + clientReplyBlock *o; while(clientHasPendingReplies(c)) { if (c->bufpos > 0) { @@ -918,23 +978,24 @@ int writeToClient(int fd, client *c, int handler_installed) { } } else { o = listNodeValue(listFirst(c->reply)); - objlen = sdslen(o); + objlen = o->used; if (objlen == 0) { + c->reply_bytes -= o->size; listDelNode(c->reply,listFirst(c->reply)); continue; } - nwritten = write(fd, o + c->sentlen, objlen - c->sentlen); + nwritten = write(fd, o->buf + c->sentlen, objlen - c->sentlen); if (nwritten <= 0) break; c->sentlen += nwritten; totwritten += nwritten; /* If we fully sent the object on head go to the next one */ if (c->sentlen == objlen) { + c->reply_bytes -= o->size; listDelNode(c->reply,listFirst(c->reply)); c->sentlen = 0; - c->reply_bytes -= objlen; /* If there are no longer objects in the list, we expect * the count of reply bytes to be exactly zero. */ if (listLength(c->reply) == 0) @@ -1011,6 +1072,10 @@ int handleClientsWithPendingWrites(void) { c->flags &= ~CLIENT_PENDING_WRITE; listDelNode(server.clients_pending_write,ln); + /* If a client is protected, don't do anything, + * that may trigger write error or recreate handler. */ + if (c->flags & CLIENT_PROTECTED) continue; + /* Try to write buffers to the client socket. */ if (writeToClient(c->fd,c,0) == C_ERR) continue; @@ -1062,6 +1127,34 @@ void resetClient(client *c) { } } +/* This funciton is used when we want to re-enter the event loop but there + * is the risk that the client we are dealing with will be freed in some + * way. This happens for instance in: + * + * * DEBUG RELOAD and similar. + * * When a Lua script is in -BUSY state. + * + * So the function will protect the client by doing two things: + * + * 1) It removes the file events. This way it is not possible that an + * error is signaled on the socket, freeing the client. + * 2) Moreover it makes sure that if the client is freed in a different code + * path, it is not really released, but only marked for later release. */ +void protectClient(client *c) { + c->flags |= CLIENT_PROTECTED; + aeDeleteFileEvent(server.el,c->fd,AE_READABLE); + aeDeleteFileEvent(server.el,c->fd,AE_WRITABLE); +} + +/* This will undo the client protection done by protectClient() */ +void unprotectClient(client *c) { + if (c->flags & CLIENT_PROTECTED) { + c->flags &= ~CLIENT_PROTECTED; + aeCreateFileEvent(server.el,c->fd,AE_READABLE,readQueryFromClient,c); + if (clientHasPendingReplies(c)) clientInstallWriteHandler(c); + } +} + /* Like processMultibulkBuffer(), but for the inline protocol instead of RESP, * this function consumes the client query buffer and creates a command ready * to be executed inside the client structure. Returns C_OK if the command @@ -1076,29 +1169,29 @@ int processInlineBuffer(client *c) { size_t querylen; /* Search for end of line */ - newline = strchr(c->querybuf,'\n'); + newline = strchr(c->querybuf+c->qb_pos,'\n'); /* Nothing to do without a \r\n */ if (newline == NULL) { - if (sdslen(c->querybuf) > PROTO_INLINE_MAX_SIZE) { + if (sdslen(c->querybuf)-c->qb_pos > PROTO_INLINE_MAX_SIZE) { addReplyError(c,"Protocol error: too big inline request"); - setProtocolError("too big inline request",c,0); + setProtocolError("too big inline request",c); } return C_ERR; } /* Handle the \r\n case. */ - if (newline && newline != c->querybuf && *(newline-1) == '\r') + if (newline && newline != c->querybuf+c->qb_pos && *(newline-1) == '\r') newline--, linefeed_chars++; /* Split the input buffer up to the \r\n */ - querylen = newline-(c->querybuf); - aux = sdsnewlen(c->querybuf,querylen); + querylen = newline-(c->querybuf+c->qb_pos); + aux = sdsnewlen(c->querybuf+c->qb_pos,querylen); argv = sdssplitargs(aux,&argc); sdsfree(aux); if (argv == NULL) { addReplyError(c,"Protocol error: unbalanced quotes in request"); - setProtocolError("unbalanced quotes in inline request",c,0); + setProtocolError("unbalanced quotes in inline request",c); return C_ERR; } @@ -1108,8 +1201,8 @@ int processInlineBuffer(client *c) { if (querylen == 0 && c->flags & CLIENT_SLAVE) c->repl_ack_time = server.unixtime; - /* Leave data after the first line of the query in the buffer */ - sdsrange(c->querybuf,querylen+linefeed_chars,-1); + /* Move querybuffer position to the next query in the buffer. */ + c->qb_pos += querylen+linefeed_chars; /* Setup argv array on client structure */ if (argc) { @@ -1126,25 +1219,23 @@ int processInlineBuffer(client *c) { sdsfree(argv[j]); } } -#ifndef __ANDROID__ - zfree(argv); // This line causes error on the VSD231(2014 model) for some reasons -#endif + zfree(argv); return C_OK; } -/* Helper function. Trims query buffer to make the function that processes - * multi bulk requests idempotent. */ +/* Helper function. Record protocol erro details in server log, + * and set the client as CLIENT_CLOSE_AFTER_REPLY. */ #define PROTO_DUMP_LEN 128 -static void setProtocolError(const char *errstr, client *c, long pos) { +static void setProtocolError(const char *errstr, client *c) { if (server.verbosity <= LL_VERBOSE) { sds client = catClientInfoString(sdsempty(),c); /* Sample some protocol to given an idea about what was inside. */ char buf[256]; - if (sdslen(c->querybuf) < PROTO_DUMP_LEN) { - snprintf(buf,sizeof(buf),"Query buffer during protocol error: '%s'", c->querybuf); + if (sdslen(c->querybuf)-c->qb_pos < PROTO_DUMP_LEN) { + snprintf(buf,sizeof(buf),"Query buffer during protocol error: '%s'", c->querybuf+c->qb_pos); } else { - snprintf(buf,sizeof(buf),"Query buffer during protocol error: '%.*s' (... more %zu bytes ...) '%.*s'", PROTO_DUMP_LEN/2, c->querybuf, sdslen(c->querybuf)-PROTO_DUMP_LEN, PROTO_DUMP_LEN/2, c->querybuf+sdslen(c->querybuf)-PROTO_DUMP_LEN/2); + snprintf(buf,sizeof(buf),"Query buffer during protocol error: '%.*s' (... more %zu bytes ...) '%.*s'", PROTO_DUMP_LEN/2, c->querybuf+c->qb_pos, sdslen(c->querybuf)-c->qb_pos-PROTO_DUMP_LEN, PROTO_DUMP_LEN/2, c->querybuf+sdslen(c->querybuf)-PROTO_DUMP_LEN/2); } /* Remove non printable chars. */ @@ -1160,7 +1251,6 @@ static void setProtocolError(const char *errstr, client *c, long pos) { sdsfree(client); } c->flags |= CLIENT_CLOSE_AFTER_REPLY; - sdsrange(c->querybuf,pos,-1); } /* Process the query buffer for client 'c', setting up the client argument @@ -1176,7 +1266,6 @@ static void setProtocolError(const char *errstr, client *c, long pos) { * to be '*'. Otherwise for inline commands processInlineBuffer() is called. */ int processMultibulkBuffer(client *c) { char *newline = NULL; - long pos = 0; int ok; long long ll; @@ -1185,34 +1274,32 @@ int processMultibulkBuffer(client *c) { serverAssertWithInfo(c,NULL,c->argc == 0); /* Multi bulk length cannot be read without a \r\n */ - newline = strchr(c->querybuf,'\r'); + newline = strchr(c->querybuf+c->qb_pos,'\r'); if (newline == NULL) { - if (sdslen(c->querybuf) > PROTO_INLINE_MAX_SIZE) { + if (sdslen(c->querybuf)-c->qb_pos > PROTO_INLINE_MAX_SIZE) { addReplyError(c,"Protocol error: too big mbulk count string"); - setProtocolError("too big mbulk count string",c,0); + setProtocolError("too big mbulk count string",c); } return C_ERR; } /* Buffer should also contain \n */ - if (newline-(c->querybuf) > ((signed)sdslen(c->querybuf)-2)) + if (newline-(c->querybuf+c->qb_pos) > (ssize_t)(sdslen(c->querybuf)-c->qb_pos-2)) return C_ERR; /* We know for sure there is a whole line since newline != NULL, * so go ahead and find out the multi bulk length. */ - serverAssertWithInfo(c,NULL,c->querybuf[0] == '*'); - ok = string2ll(c->querybuf+1,newline-(c->querybuf+1),&ll); + serverAssertWithInfo(c,NULL,c->querybuf[c->qb_pos] == '*'); + ok = string2ll(c->querybuf+1+c->qb_pos,newline-(c->querybuf+1+c->qb_pos),&ll); if (!ok || ll > 1024*1024) { addReplyError(c,"Protocol error: invalid multibulk length"); - setProtocolError("invalid mbulk count",c,pos); + setProtocolError("invalid mbulk count",c); return C_ERR; } - pos = (newline-c->querybuf)+2; - if (ll <= 0) { - sdsrange(c->querybuf,pos,-1); - return C_OK; - } + c->qb_pos = (newline-c->querybuf)+2; + + if (ll <= 0) return C_OK; c->multibulklen = ll; @@ -1225,64 +1312,67 @@ int processMultibulkBuffer(client *c) { while(c->multibulklen) { /* Read bulk length if unknown */ if (c->bulklen == -1) { - newline = strchr(c->querybuf+pos,'\r'); + newline = strchr(c->querybuf+c->qb_pos,'\r'); if (newline == NULL) { - if (sdslen(c->querybuf) > PROTO_INLINE_MAX_SIZE) { + if (sdslen(c->querybuf)-c->qb_pos > PROTO_INLINE_MAX_SIZE) { addReplyError(c, "Protocol error: too big bulk count string"); - setProtocolError("too big bulk count string",c,0); + setProtocolError("too big bulk count string",c); return C_ERR; } break; } /* Buffer should also contain \n */ - if (newline-(c->querybuf) > ((signed)sdslen(c->querybuf)-2)) + if (newline-(c->querybuf+c->qb_pos) > (ssize_t)(sdslen(c->querybuf)-c->qb_pos-2)) break; - if (c->querybuf[pos] != '$') { + if (c->querybuf[c->qb_pos] != '$') { addReplyErrorFormat(c, "Protocol error: expected '$', got '%c'", - c->querybuf[pos]); - setProtocolError("expected $ but got something else",c,pos); + c->querybuf[c->qb_pos]); + setProtocolError("expected $ but got something else",c); return C_ERR; } - ok = string2ll(c->querybuf+pos+1,newline-(c->querybuf+pos+1),&ll); + ok = string2ll(c->querybuf+c->qb_pos+1,newline-(c->querybuf+c->qb_pos+1),&ll); if (!ok || ll < 0 || ll > server.proto_max_bulk_len) { addReplyError(c,"Protocol error: invalid bulk length"); - setProtocolError("invalid bulk length",c,pos); + setProtocolError("invalid bulk length",c); return C_ERR; } - pos += newline-(c->querybuf+pos)+2; + c->qb_pos = newline-c->querybuf+2; if (ll >= PROTO_MBULK_BIG_ARG) { - size_t qblen; - /* If we are going to read a large object from network * try to make it likely that it will start at c->querybuf * boundary so that we can optimize object creation - * avoiding a large copy of data. */ - sdsrange(c->querybuf,pos,-1); - pos = 0; - qblen = sdslen(c->querybuf); - /* Hint the sds library about the amount of bytes this string is - * going to contain. */ - if (qblen < (size_t)ll+2) - c->querybuf = sdsMakeRoomFor(c->querybuf,ll+2-qblen); + * avoiding a large copy of data. + * + * But only when the data we have not parsed is less than + * or equal to ll+2. If the data length is greater than + * ll+2, trimming querybuf is just a waste of time, because + * at this time the querybuf contains not only our bulk. */ + if (sdslen(c->querybuf)-c->qb_pos <= (size_t)ll+2) { + sdsrange(c->querybuf,c->qb_pos,-1); + c->qb_pos = 0; + /* Hint the sds library about the amount of bytes this string is + * going to contain. */ + c->querybuf = sdsMakeRoomFor(c->querybuf,ll+2); + } } c->bulklen = ll; } /* Read bulk argument */ - if (sdslen(c->querybuf)-pos < (size_t)(c->bulklen+2)) { + if (sdslen(c->querybuf)-c->qb_pos < (size_t)(c->bulklen+2)) { /* Not enough data (+2 == trailing \r\n) */ break; } else { /* Optimization: if the buffer contains JUST our bulk element * instead of creating a new object by *copying* the sds we * just use the current sds string. */ - if (pos == 0 && + if (c->qb_pos == 0 && c->bulklen >= PROTO_MBULK_BIG_ARG && sdslen(c->querybuf) == (size_t)(c->bulklen+2)) { @@ -1290,22 +1380,18 @@ int processMultibulkBuffer(client *c) { sdsIncrLen(c->querybuf,-2); /* remove CRLF */ /* Assume that if we saw a fat argument we'll see another one * likely... */ - c->querybuf = sdsnewlen(NULL,c->bulklen+2); + c->querybuf = sdsnewlen(SDS_NOINIT,c->bulklen+2); sdsclear(c->querybuf); - pos = 0; } else { c->argv[c->argc++] = - createStringObject(c->querybuf+pos,c->bulklen); - pos += c->bulklen+2; + createStringObject(c->querybuf+c->qb_pos,c->bulklen); + c->qb_pos += c->bulklen+2; } c->bulklen = -1; c->multibulklen--; } } - /* Trim to pos */ - if (pos) sdsrange(c->querybuf,pos,-1); - /* We're done when c->multibulk == 0 */ if (c->multibulklen == 0) return C_OK; @@ -1319,14 +1405,21 @@ int processMultibulkBuffer(client *c) { * pending query buffer, already representing a full command, to process. */ void processInputBuffer(client *c) { server.current_client = c; + /* Keep processing while there is something in the input buffer */ - while(sdslen(c->querybuf)) { + while(c->qb_pos < sdslen(c->querybuf)) { /* Return if clients are paused. */ if (!(c->flags & CLIENT_SLAVE) && clientsArePaused()) break; /* Immediately abort if the client is in the middle of something. */ if (c->flags & CLIENT_BLOCKED) break; + /* Don't process input from the master while there is a busy script + * condition on the slave. We want just to accumulate the replication + * stream (instead of replying -BUSY like we do with other clients) and + * later resume the processing. */ + if (server.lua_timedout && c->flags & CLIENT_MASTER) break; + /* CLIENT_CLOSE_AFTER_REPLY closes the connection once the reply is * written to the client. Make sure to not let the reply grow after * this flag has been set (i.e. don't process more commands). @@ -1336,7 +1429,7 @@ void processInputBuffer(client *c) { /* Determine request type when unknown. */ if (!c->reqtype) { - if (c->querybuf[0] == '*') { + if (c->querybuf[c->qb_pos] == '*') { c->reqtype = PROTO_REQ_MULTIBULK; } else { c->reqtype = PROTO_REQ_INLINE; @@ -1359,7 +1452,7 @@ void processInputBuffer(client *c) { if (processCommand(c) == C_OK) { if (c->flags & CLIENT_MASTER && !(c->flags & CLIENT_MULTI)) { /* Update the applied replication offset of our master. */ - c->reploff = c->read_reploff - sdslen(c->querybuf); + c->reploff = c->read_reploff - sdslen(c->querybuf) + c->qb_pos; } /* Don't reset the client structure for clients blocked in a @@ -1375,9 +1468,35 @@ void processInputBuffer(client *c) { if (server.current_client == NULL) break; } } + + /* Trim to pos */ + if (c->qb_pos) { + sdsrange(c->querybuf,c->qb_pos,-1); + c->qb_pos = 0; + } + server.current_client = NULL; } +/* This is a wrapper for processInputBuffer that also cares about handling + * the replication forwarding to the sub-slaves, in case the client 'c' + * is flagged as master. Usually you want to call this instead of the + * raw processInputBuffer(). */ +void processInputBufferAndReplicate(client *c) { + if (!(c->flags & CLIENT_MASTER)) { + processInputBuffer(c); + } else { + size_t prev_offset = c->reploff; + processInputBuffer(c); + size_t applied = c->reploff - prev_offset; + if (applied) { + replicationFeedSlavesFromMasterStream(server.slaves, + c->pending_querybuf, applied); + sdsrange(c->pending_querybuf,applied,-1); + } + } +} + void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { client *c = (client*) privdata; int nread, readlen; @@ -1397,7 +1516,9 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { { ssize_t remaining = (size_t)(c->bulklen+2)-sdslen(c->querybuf); - if (remaining < readlen) readlen = remaining; + /* Note that the 'remaining' variable may be zero in some edge case, + * for example once we resume a blocked client after CLIENT PAUSE. */ + if (remaining > 0 && remaining < readlen) readlen = remaining; } qblen = sdslen(c->querybuf); @@ -1445,18 +1566,7 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { * was actually applied to the master state: this quantity, and its * corresponding part of the replication stream, will be propagated to * the sub-slaves and to the replication backlog. */ - if (!(c->flags & CLIENT_MASTER)) { - processInputBuffer(c); - } else { - size_t prev_offset = c->reploff; - processInputBuffer(c); - size_t applied = c->reploff - prev_offset; - if (applied) { - replicationFeedSlavesFromMasterStream(server.slaves, - c->pending_querybuf, applied); - sdsrange(c->pending_querybuf,applied,-1); - } - } + processInputBufferAndReplicate(c); } void getClientsMaxBuffers(unsigned long *longest_output_list, @@ -1527,6 +1637,7 @@ sds catClientInfoString(sds s, client *client) { *p++ = 'S'; } if (client->flags & CLIENT_MASTER) *p++ = 'M'; + if (client->flags & CLIENT_PUBSUB) *p++ = 'P'; if (client->flags & CLIENT_MULTI) *p++ = 'x'; if (client->flags & CLIENT_BLOCKED) *p++ = 'b'; if (client->flags & CLIENT_DIRTY_CAS) *p++ = 'd'; @@ -1565,15 +1676,16 @@ sds catClientInfoString(sds s, client *client) { client->lastcmd ? client->lastcmd->name : "NULL"); } -sds getAllClientsInfoString(void) { +sds getAllClientsInfoString(int type) { listNode *ln; listIter li; client *client; - sds o = sdsnewlen(NULL,200*listLength(server.clients)); + sds o = sdsnewlen(SDS_NOINIT,200*listLength(server.clients)); sdsclear(o); listRewind(server.clients,&li); while ((ln = listNext(&li)) != NULL) { client = listNodeValue(ln); + if (type != -1 && getClientType(client) != type) continue; o = catClientInfoString(o,client); o = sdscatlen(o,"\n",1); } @@ -1585,9 +1697,42 @@ void clientCommand(client *c) { listIter li; client *client; - if (!strcasecmp(c->argv[1]->ptr,"list") && c->argc == 2) { + if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) { + const char *help[] = { +"id -- Return the ID of the current connection.", +"getname -- Return the name of the current connection.", +"kill -- Kill connection made from .", +"kill