From f46fdb7d22f31871be9748550b68218056d53f6e Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Thu, 26 Oct 2023 08:35:49 -0700 Subject: [PATCH] Definition of bitmap_blk_allocator --- conanfile.py | 2 +- src/include/homestore/homestore.hpp | 3 +- src/lib/blkalloc/CMakeLists.txt | 2 +- src/lib/blkalloc/append_blk_allocator.cpp | 15 ++ src/lib/blkalloc/append_blk_allocator.h | 6 + ...allocator.cpp => bitmap_blk_allocator.cpp} | 0 src/lib/blkalloc/bitmap_blk_allocator.h | 123 +++++++++++ src/lib/blkalloc/blk_allocator.h | 194 +++--------------- src/lib/blkalloc/fixed_blk_allocator.cpp | 33 +-- src/lib/blkalloc/fixed_blk_allocator.h | 50 +++++ src/lib/blkalloc/varsize_blk_allocator.cpp | 34 +-- src/lib/blkalloc/varsize_blk_allocator.h | 20 +- src/lib/device/virtual_dev.cpp | 43 +--- src/lib/device/virtual_dev.hpp | 2 - src/lib/homestore.cpp | 12 +- src/lib/meta/meta_blk_service.cpp | 1 - src/tests/test_blkalloc.cpp | 11 +- 17 files changed, 269 insertions(+), 282 deletions(-) rename src/lib/blkalloc/{blk_allocator.cpp => bitmap_blk_allocator.cpp} (100%) create mode 100644 src/lib/blkalloc/bitmap_blk_allocator.h create mode 100644 src/lib/blkalloc/fixed_blk_allocator.h diff --git a/conanfile.py b/conanfile.py index 7d4a73a3a..a2c858e77 100644 --- a/conanfile.py +++ b/conanfile.py @@ -5,7 +5,7 @@ class HomestoreConan(ConanFile): name = "homestore" - version = "4.5.8" + version = "4.5.9" homepage = "https://github.com/eBay/Homestore" description = "HomeStore Storage Engine" diff --git a/src/include/homestore/homestore.hpp b/src/include/homestore/homestore.hpp index c27594a87..3d099872d 100644 --- a/src/include/homestore/homestore.hpp +++ b/src/include/homestore/homestore.hpp @@ -124,7 +124,6 @@ class HomeStore { HS_SERVICE m_services; // Services homestore is starting with hs_before_services_starting_cb_t m_before_services_starting_cb{nullptr}; - bool m_init_done{false}; public: @@ -154,6 +153,7 @@ class HomeStore { // cap_attrs get_system_capacity() const; // Need to move this to homeblks/homeobj bool is_first_time_boot() const; + bool is_initializing() const { return !m_init_done; } // Getters bool has_index_service() const; @@ -174,7 +174,6 @@ class HomeStore { private: void init_cache(); - void init_done(); shared< VirtualDev > create_vdev_cb(const vdev_info& vinfo, bool load_existing); uint64_t pct_to_size(float pct, HSDevType dev_type) const; void do_start(); diff --git a/src/lib/blkalloc/CMakeLists.txt b/src/lib/blkalloc/CMakeLists.txt index 54137942f..1e18e629d 100644 --- a/src/lib/blkalloc/CMakeLists.txt +++ b/src/lib/blkalloc/CMakeLists.txt @@ -6,7 +6,7 @@ include_directories (BEFORE .) add_library(hs_blkalloc OBJECT) target_sources(hs_blkalloc PRIVATE blk.cpp - blk_allocator.cpp + bitmap_blk_allocator.cpp fixed_blk_allocator.cpp varsize_blk_allocator.cpp blk_cache_queue.cpp diff --git a/src/lib/blkalloc/append_blk_allocator.cpp b/src/lib/blkalloc/append_blk_allocator.cpp index 6505eca49..471b21442 100644 --- a/src/lib/blkalloc/append_blk_allocator.cpp +++ b/src/lib/blkalloc/append_blk_allocator.cpp @@ -119,6 +119,20 @@ BlkAllocStatus AppendBlkAllocator::alloc(blk_count_t nblks, const blk_alloc_hint return BlkAllocStatus::SUCCESS; } +BlkAllocStatus AppendBlkAllocator::alloc_on_disk(BlkId const&) { + DEBUG_ASSERT(false, "alloc_on_disk called on non-persisted allocator"); + return BlkAllocStatus::SUCCESS; +} + +void AppendBlkAllocator::free_on_disk(BlkId const&) { + DEBUG_ASSERT(false, "free_on_disk called on non-persisted allocator"); +} + +bool AppendBlkAllocator::is_blk_alloced_on_disk(BlkId const&, bool) const { + DEBUG_ASSERT(false, "is_blk_alloced_on_disk called on non-persisted allocator"); + return false; +} + // // cp_flush doesn't need CPGuard as it is triggered by CPMgr which already handles the reference check; // @@ -183,4 +197,5 @@ blk_num_t AppendBlkAllocator::get_freeable_nblks() const { return m_freeable_nbl blk_num_t AppendBlkAllocator::get_defrag_nblks() const { return get_freeable_nblks() - available_blks(); } +nlohmann::json AppendBlkAllocator::get_status(int log_level) const { return nlohmann::json{}; } } // namespace homestore diff --git a/src/lib/blkalloc/append_blk_allocator.h b/src/lib/blkalloc/append_blk_allocator.h index 1818ab8e8..63ddd527b 100644 --- a/src/lib/blkalloc/append_blk_allocator.h +++ b/src/lib/blkalloc/append_blk_allocator.h @@ -79,6 +79,10 @@ class AppendBlkAllocator : public BlkAllocator { BlkAllocStatus alloc(blk_count_t nblks, blk_alloc_hints const& hints, BlkId& out_blkid) override; void free(BlkId const& b) override; + BlkAllocStatus alloc_on_disk(BlkId const& in_bid) override; + void free_on_disk(BlkId const& b) override; + bool is_blk_alloced_on_disk(BlkId const& b, bool use_lock = false) const override; + /** * @brief : the number of available blocks that can be allocated by the AppendBlkAllocator. * @return : the number of available blocks. @@ -120,6 +124,8 @@ class AppendBlkAllocator : public BlkAllocator { void cp_flush(CP* cp) override; + nlohmann::json get_status(int log_level) const override; + private: std::string get_name() const; void on_meta_blk_found(const sisl::byte_view& buf, void* meta_cookie); diff --git a/src/lib/blkalloc/blk_allocator.cpp b/src/lib/blkalloc/bitmap_blk_allocator.cpp similarity index 100% rename from src/lib/blkalloc/blk_allocator.cpp rename to src/lib/blkalloc/bitmap_blk_allocator.cpp diff --git a/src/lib/blkalloc/bitmap_blk_allocator.h b/src/lib/blkalloc/bitmap_blk_allocator.h new file mode 100644 index 000000000..a9063dd36 --- /dev/null +++ b/src/lib/blkalloc/bitmap_blk_allocator.h @@ -0,0 +1,123 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include "common/homestore_config.hpp" +#include "common/homestore_assert.hpp" + +#include "blk_allocator.h" + +namespace homestore { + +class BlkAllocPortion { +private: + mutable std::mutex m_blk_lock; + blk_num_t m_portion_num; + blk_temp_t m_temperature; + blk_num_t m_available_blocks; + +public: + BlkAllocPortion(blk_temp_t temp = default_temperature()) : m_temperature(temp) {} + ~BlkAllocPortion() = default; + BlkAllocPortion(BlkAllocPortion const&) = delete; + BlkAllocPortion(BlkAllocPortion&&) noexcept = delete; + BlkAllocPortion& operator=(BlkAllocPortion const&) = delete; + BlkAllocPortion& operator=(BlkAllocPortion&&) noexcept = delete; + + auto portion_auto_lock() const { return std::scoped_lock< std::mutex >(m_blk_lock); } + blk_num_t get_portion_num() const { return m_portion_num; } + blk_num_t get_available_blocks() const { return m_available_blocks; } + blk_temp_t temperature() const { return m_temperature; } + + void set_portion_num(blk_num_t portion_num) { m_portion_num = portion_num; } + void set_temperature(const blk_temp_t temp) { m_temperature = temp; } + static constexpr blk_temp_t default_temperature() { return 1; } +}; + +class CP; +class BitmapBlkAllocator : public BlkAllocator { +public: + BitmapBlkAllocator(BlkAllocConfig const& cfg, bool is_fresh, chunk_num_t id = 0); + BitmapBlkAllocator(BlkAllocator const&) = delete; + BitmapBlkAllocator(BitmapBlkAllocator&&) noexcept = delete; + BitmapBlkAllocator& operator=(BitmapBlkAllocator const&) = delete; + BitmapBlkAllocator& operator=(BitmapBlkAllocator&&) noexcept = delete; + virtual ~BitmapBlkAllocator() = default; + + virtual void load() = 0; + BlkAllocStatus alloc_on_disk(BlkId const& in_bid) override; + void free_on_disk(BlkId const& b) override; + bool is_blk_alloced_on_disk(BlkId const& b, bool use_lock = false) const override; + void cp_flush(CP* cp) override; + + blk_num_t get_num_portions() const { return (m_num_blks - 1) / m_blks_per_portion + 1; } + blk_num_t get_blks_per_portion() const { return m_blks_per_portion; } + + BlkAllocPortion& get_blk_portion(blk_num_t portion_num) { + HS_DBG_ASSERT_LT(portion_num, get_num_portions(), "Portion num is not in range"); + return m_blk_portions[portion_num]; + } + + blk_num_t blknum_to_portion_num(const blk_num_t blknum) const { return blknum / m_blks_per_portion; } + BlkAllocPortion& blknum_to_portion(blk_num_t blknum) { return m_blk_portions[blknum_to_portion_num(blknum)]; } + BlkAllocPortion const& blknum_to_portion_const(blk_num_t blknum) const { + return m_blk_portions[blknum_to_portion_num(blknum)]; + } + + sisl::Bitset const* get_disk_bitmap() const { return is_persistent() ? m_disk_bm.get() : nullptr; } + + /* Get status */ + nlohmann::json get_status(int log_level) const override; + +private: + void do_init(); + sisl::ThreadVector< BlkId >* get_alloc_blk_list(); + void on_meta_blk_found(void* mblk_cookie, sisl::byte_view const& buf, size_t size); + + // Acquire the underlying bitmap buffer and while the caller has acquired, all the new allocations + // will be captured in a separate list and then pushes into buffer once released. + // NOTE: THIS IS NON-THREAD SAFE METHOD. Caller is expected to ensure synchronization between multiple + // acquires/releases + sisl::byte_array acquire_underlying_buffer(); + void release_underlying_buffer(); + +protected: + blk_num_t m_blks_per_portion; + +private: + sisl::ThreadVector< BlkId >* m_alloc_blkid_list{nullptr}; + std::unique_ptr< BlkAllocPortion[] > m_blk_portions; + std::unique_ptr< sisl::Bitset > m_disk_bm{nullptr}; + std::atomic< bool > m_is_disk_bm_dirty{true}; // initially disk_bm treated as dirty + void* m_meta_blk_cookie{nullptr}; +}; +} // namespace homestore \ No newline at end of file diff --git a/src/lib/blkalloc/blk_allocator.h b/src/lib/blkalloc/blk_allocator.h index fb75bc0f4..8f63aa0f8 100644 --- a/src/lib/blkalloc/blk_allocator.h +++ b/src/lib/blkalloc/blk_allocator.h @@ -1,7 +1,6 @@ /********************************************************************************* * Modifications Copyright 2017-2019 eBay Inc. * - * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at @@ -13,8 +12,7 @@ * specific language governing permissions and limitations under the License. * *********************************************************************************/ -#ifndef ALLOCATOR_H -#define ALLOCATOR_H +#pragma once #include #include @@ -65,37 +63,28 @@ struct BlkAllocConfig { const uint32_t m_align_size; const blk_num_t m_capacity; const blk_num_t m_blks_per_portion; + const bool m_persistent{false}; const std::string m_unique_name; - bool m_auto_recovery{false}; - bool m_realtime_bm_on{false}; // only specifically turn off in BlkAlloc Test; public: - BlkAllocConfig(uint32_t blk_size, uint32_t align_size, uint64_t size, const std::string& name = "", - bool realtime_bm_on = true) : + BlkAllocConfig(uint32_t blk_size, uint32_t align_size, uint64_t size, bool persistent, + const std::string& name = "") : m_blk_size{blk_size}, m_align_size{align_size}, m_capacity{static_cast< blk_num_t >(size / blk_size)}, m_blks_per_portion{std::min(HS_DYNAMIC_CONFIG(blkallocator.num_blks_per_portion), m_capacity)}, - m_unique_name{name} { -#ifdef _PRERELEASE - // for pre-release build, take it from input which is defaulted to true; - m_realtime_bm_on = realtime_bm_on; -#else - // for release build, take it from dynamic config which is defaulted to false - m_realtime_bm_on = HS_DYNAMIC_CONFIG(blkallocator.realtime_bitmap_on); -#endif - } + m_persistent{persistent}, + m_unique_name{name} {} BlkAllocConfig(BlkAllocConfig const&) = default; BlkAllocConfig(BlkAllocConfig&&) noexcept = delete; BlkAllocConfig& operator=(BlkAllocConfig const&) = default; BlkAllocConfig& operator=(BlkAllocConfig&&) noexcept = delete; virtual ~BlkAllocConfig() = default; - void set_auto_recovery(bool is_auto_recovery) { m_auto_recovery = is_auto_recovery; } virtual std::string to_string() const { - return fmt::format("BlkSize={} TotalBlks={} BlksPerPortion={} auto_recovery={}", in_bytes(m_blk_size), - in_bytes(m_capacity), m_blks_per_portion, m_auto_recovery); + return fmt::format("BlkSize={} TotalBlks={} BlksPerPortion={} persistent={}", in_bytes(m_blk_size), + in_bytes(m_capacity), m_blks_per_portion, m_persistent); } }; @@ -108,34 +97,6 @@ VENUM(BlkOpStatus, uint8_t, ENUM(BlkAllocatorState, uint8_t, INIT, WAITING, SWEEP_SCHEDULED, SWEEPING, EXITING, DONE); -class BlkAllocPortion { -private: - mutable std::mutex m_blk_lock; - blk_num_t m_portion_num; - blk_temp_t m_temperature; - blk_num_t m_available_blocks; - -public: - BlkAllocPortion(blk_temp_t temp = default_temperature()) : m_temperature(temp) {} - ~BlkAllocPortion() = default; - BlkAllocPortion(BlkAllocPortion const&) = delete; - BlkAllocPortion(BlkAllocPortion&&) noexcept = delete; - BlkAllocPortion& operator=(BlkAllocPortion const&) = delete; - BlkAllocPortion& operator=(BlkAllocPortion&&) noexcept = delete; - - auto portion_auto_lock() const { return std::scoped_lock< std::mutex >(m_blk_lock); } - blk_num_t get_portion_num() const { return m_portion_num; } - blk_num_t get_available_blocks() const { return m_available_blocks; } - blk_temp_t temperature() const { return m_temperature; } - - void set_portion_num(blk_num_t portion_num) { m_portion_num = portion_num; } - void set_available_blocks(const blk_num_t available_blocks) { m_available_blocks = available_blocks; } - blk_num_t decrease_available_blocks(const blk_num_t count) { return (m_available_blocks -= count); } - blk_num_t increase_available_blocks(const blk_num_t count) { return (m_available_blocks += count); } - void set_temperature(const blk_temp_t temp) { m_temperature = temp; } - static constexpr blk_temp_t default_temperature() { return 1; } -}; - /* We have the following design requirement it is used in auto recovery mode * - Free BlkIDs should not be re allocated until its free status is persisted on disk. Reasons :- * - It helps is reconstructing btree in crash as it depends on old blkid to read the data @@ -175,7 +136,13 @@ class BlkAllocPortion { class CP; class BlkAllocator { public: - BlkAllocator(BlkAllocConfig const& cfg, chunk_num_t id = 0); + BlkAllocator(BlkAllocConfig const& cfg, chunk_num_t id = 0) : + m_name{cfg.m_unique_name}, + m_blk_size{cfg.m_blk_size}, + m_align_size{cfg.m_align_size}, + m_num_blks{cfg.m_capacity}, + m_chunk_id{id}, + m_is_persistent{cfg.m_persistent} {} BlkAllocator(BlkAllocator const&) = delete; BlkAllocator(BlkAllocator&&) noexcept = delete; BlkAllocator& operator=(BlkAllocator const&) = delete; @@ -184,151 +151,40 @@ class BlkAllocator { virtual BlkAllocStatus alloc_contiguous(BlkId& bid) = 0; virtual BlkAllocStatus alloc(blk_count_t nblks, blk_alloc_hints const& hints, BlkId& out_blkid) = 0; + virtual BlkAllocStatus alloc_on_disk(BlkId const& bid) = 0; + virtual void free(BlkId const& id) = 0; + virtual void free_on_disk(BlkId const& bid) = 0; + virtual blk_num_t available_blks() const = 0; virtual blk_num_t get_used_blks() const = 0; virtual bool is_blk_alloced(BlkId const& b, bool use_lock = false) const = 0; - virtual std::string to_string() const = 0; + virtual bool is_blk_alloced_on_disk(BlkId const& b, bool use_lock = false) const = 0; - virtual void cp_flush(CP* cp); // TODO: it needs to be a pure virtual function after bitmap blkallocator is derived - // from base BlkAllocator; - - sisl::Bitset* get_disk_bm_mutable() { - set_disk_bm_dirty(); - return m_disk_bm.get(); - } - const sisl::Bitset* get_disk_bm_const() const { return m_disk_bm.get(); }; - sisl::Bitset* get_realtime_bm() { return m_realtime_bm.get(); } - const sisl::Bitset* get_realtime_bm() const { return m_realtime_bm.get(); } - - bool need_flush_dirty_bm() const { return is_disk_bm_dirty; } - - void set_disk_bm(std::unique_ptr< sisl::Bitset > recovered_bm); - BlkAllocPortion& get_blk_portion(blk_num_t portion_num) { - HS_DBG_ASSERT_LT(portion_num, get_num_portions(), "Portion num is not in range"); - return m_blk_portions[portion_num]; - } - - virtual void inited(); + virtual std::string to_string() const = 0; + virtual void cp_flush(CP* cp) = 0; void incr_alloced_blk_count(blk_count_t nblks) { m_alloced_blk_count.fetch_add(nblks, std::memory_order_relaxed); } - void decr_alloced_blk_count(blk_count_t nblks) { m_alloced_blk_count.fetch_sub(nblks, std::memory_order_relaxed); } - int64_t get_alloced_blk_count() const { return m_alloced_blk_count.load(std::memory_order_acquire); } - /* It is used during recovery in both mode :- auto recovery and manual recovery - * It is also used in normal IO during auto recovery mode. - */ - - BlkAllocStatus alloc_on_disk(BlkId const& in_bid); - - BlkAllocStatus alloc_on_realtime(BlkId const& b); - - bool is_blk_alloced_on_disk(BlkId const& b, bool use_lock = false) const; - - // - // Caller should consume the return value and print context when return false; - // - [[nodiscard]] bool free_on_realtime(BlkId const& b); - - void free_on_disk(BlkId const& b); - - /* CP start is called when all its consumers have purged their free lists and now want to persist the - * disk bitmap. - */ - // sisl::byte_array cp_start([[maybe_unused]] const std::shared_ptr< blkalloc_cp >& id); - - // void cp_done(); - uint32_t get_align_size() const { return m_align_size; } blk_num_t get_total_blks() const { return m_num_blks; } - blk_num_t get_blks_per_portion() const { return m_blks_per_portion; } - blk_num_t get_num_portions() const { return (m_num_blks - 1) / m_blks_per_portion + 1; } const std::string& get_name() const { return m_name; } - bool auto_recovery_on() const { return m_auto_recovery; } + bool is_persistent() const { return m_is_persistent; } uint32_t get_blk_size() const { return m_blk_size; } - blk_num_t blknum_to_portion_num(const blk_num_t blknum) const { return blknum / m_blks_per_portion; } - BlkAllocPortion& blknum_to_portion(blk_num_t blknum) { return m_blk_portions[blknum_to_portion_num(blknum)]; } - BlkAllocPortion const& blknum_to_portion_const(blk_num_t blknum) const { - return m_blk_portions[blknum_to_portion_num(blknum)]; - } - - void create_debug_bm(); - void update_debug_bm(BlkId const& bid); - bool verify_debug_bm(bool free_debug_bm); - /* Get status */ - nlohmann::json get_status(int log_level) const; - - bool realtime_bm_on() const { return (m_realtime_bm_on && m_auto_recovery); } - void reset_disk_bm_dirty() { is_disk_bm_dirty = false; } - -private: - sisl::Bitset* get_debug_bm() { return m_debug_bm.get(); } - sisl::ThreadVector< BlkId >* get_alloc_blk_list(); - void set_disk_bm_dirty() { is_disk_bm_dirty = true; } - - // Acquire the underlying bitmap buffer and while the caller has acquired, all the new allocations - // will be captured in a separate list and then pushes into buffer once released. - // NOTE: THIS IS NON-THREAD SAFE METHOD. Caller is expected to ensure synchronization between multiple - // acquires/releases - sisl::byte_array acquire_underlying_buffer(); - void release_underlying_buffer(); + virtual nlohmann::json get_status(int log_level) const = 0; protected: const std::string m_name; const uint32_t m_blk_size; const uint32_t m_align_size; const blk_num_t m_num_blks; - blk_num_t m_blks_per_portion; - const bool m_auto_recovery{false}; - const bool m_realtime_bm_on{false}; // only specifically turn off in BlkAlloc Test; - bool m_inited{false}; const chunk_num_t m_chunk_id; - -private: - sisl::ThreadVector< BlkId >* m_alloc_blkid_list{nullptr}; - std::unique_ptr< BlkAllocPortion[] > m_blk_portions; - std::unique_ptr< sisl::Bitset > m_disk_bm{nullptr}; - std::unique_ptr< sisl::Bitset > m_debug_bm{ - nullptr}; // it is used only for debugging during boot or when HS is in restricted mode - std::unique_ptr< sisl::Bitset > m_realtime_bm{ - nullptr}; // it is used only for debugging to keep track of allocated/free blkids in real time + const bool m_is_persistent; std::atomic< int64_t > m_alloced_blk_count{0}; - std::atomic< bool > is_disk_bm_dirty{true}; // initially disk_bm treated as dirty -}; - -/* FixedBlkAllocator is a fast allocator where it allocates only 1 size block and ALL free blocks are cached instead - * of selectively caching few blks which are free. Thus there is no sweeping of bitmap or other to refill the cache. - * It does not support temperature of blocks and allocates simply on first come first serve basis - */ -class FixedBlkAllocator : public BlkAllocator { -public: - FixedBlkAllocator(BlkAllocConfig const& cfg, bool init, chunk_num_t chunk_id); - FixedBlkAllocator(FixedBlkAllocator const&) = delete; - FixedBlkAllocator(FixedBlkAllocator&&) noexcept = delete; - FixedBlkAllocator& operator=(FixedBlkAllocator const&) = delete; - FixedBlkAllocator& operator=(FixedBlkAllocator&&) noexcept = delete; - ~FixedBlkAllocator() override = default; - - BlkAllocStatus alloc_contiguous(BlkId& bid) override; - BlkAllocStatus alloc(blk_count_t nblks, blk_alloc_hints const& hints, BlkId& out_blkid) override; - void free(BlkId const& b) override; - void inited() override; - - blk_num_t available_blks() const override; - blk_num_t get_used_blks() const override; - bool is_blk_alloced(BlkId const& in_bid, bool use_lock = false) const override; - std::string to_string() const override; - -private: - blk_num_t init_portion(BlkAllocPortion& portion, blk_num_t start_blk_num); - -private: - folly::MPMCQueue< BlkId > m_blk_q; }; } // namespace homestore -#endif diff --git a/src/lib/blkalloc/fixed_blk_allocator.cpp b/src/lib/blkalloc/fixed_blk_allocator.cpp index d922edf03..3b1776d7c 100644 --- a/src/lib/blkalloc/fixed_blk_allocator.cpp +++ b/src/lib/blkalloc/fixed_blk_allocator.cpp @@ -17,22 +17,21 @@ #include #include "common/homestore_assert.hpp" -#include "blk_allocator.h" +#include "fixed_blk_allocator.h" namespace homestore { -FixedBlkAllocator::FixedBlkAllocator(BlkAllocConfig const& cfg, bool init, chunk_num_t chunk_id) : - BlkAllocator(cfg, chunk_id), m_blk_q{get_total_blks()} { - LOGINFO("total blks: {}", get_total_blks()); - if (init) { inited(); } +FixedBlkAllocator::FixedBlkAllocator(BlkAllocConfig const& cfg, bool is_fresh, chunk_num_t chunk_id) : + BitmapBlkAllocator(cfg, is_fresh, chunk_id), m_blk_q{get_total_blks()} { + LOGINFO("FixedBlkAllocator total blks: {}", get_total_blks()); + + if (is_fresh || !is_persistent()) { load(); } } -void FixedBlkAllocator::inited() { +void FixedBlkAllocator::load() { blk_num_t blk_num{0}; - while (blk_num < get_total_blks()) { blk_num = init_portion(blknum_to_portion(blk_num), blk_num); } - BlkAllocator::inited(); } blk_num_t FixedBlkAllocator::init_portion(BlkAllocPortion& portion, blk_num_t start_blk_num) { @@ -43,7 +42,7 @@ blk_num_t FixedBlkAllocator::init_portion(BlkAllocPortion& portion, blk_num_t st BlkAllocPortion& cur_portion = blknum_to_portion(blk_num); if (portion.get_portion_num() != cur_portion.get_portion_num()) break; - if (!get_disk_bm_const()->is_bits_set(blk_num, 1)) { + if (!is_persistent() || get_disk_bitmap()->is_bits_reset(blk_num, 1)) { const auto pushed = m_blk_q.write(BlkId{blk_num, 1, m_chunk_id}); HS_DBG_ASSERT_EQ(pushed, true, "Expected to be able to push the blk on fixed capacity Q"); } @@ -64,24 +63,14 @@ BlkAllocStatus FixedBlkAllocator::alloc_contiguous(BlkId& out_blkid) { #ifdef _PRERELEASE if (iomgr_flip::instance()->test_flip("fixed_blkalloc_no_blks")) { return BlkAllocStatus::SPACE_FULL; } #endif - const auto ret = m_blk_q.read(out_blkid); - if (ret) { - // update real time bitmap; - if (realtime_bm_on()) { alloc_on_realtime(out_blkid); } - return BlkAllocStatus::SUCCESS; - } else { - return BlkAllocStatus::SPACE_FULL; - } + return m_blk_q.read(out_blkid) ? BlkAllocStatus::SUCCESS : BlkAllocStatus::SPACE_FULL; } void FixedBlkAllocator::free(BlkId const& b) { HS_DBG_ASSERT_EQ(b.blk_count(), 1, "Multiple blk free for FixedBlkAllocator? allocated by different allocator?"); - // No need to set in cache if it is not recovered. When recovery is complete we copy the disk_bm to cache bm. - if (m_inited) { - const auto pushed = m_blk_q.write(b); - HS_DBG_ASSERT_EQ(pushed, true, "Expected to be able to push the blk on fixed capacity Q"); - } + const auto pushed = m_blk_q.write(b); + HS_DBG_ASSERT_EQ(pushed, true, "Expected to be able to push the blk on fixed capacity Q"); } blk_num_t FixedBlkAllocator::available_blks() const { return m_blk_q.sizeGuess(); } diff --git a/src/lib/blkalloc/fixed_blk_allocator.h b/src/lib/blkalloc/fixed_blk_allocator.h new file mode 100644 index 000000000..a4def4eb3 --- /dev/null +++ b/src/lib/blkalloc/fixed_blk_allocator.h @@ -0,0 +1,50 @@ +/********************************************************************************* + * Modifications Copyright 2017-2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * + *********************************************************************************/ +#pragma once + +#include "bitmap_blk_allocator.h" + +namespace homestore { +/* FixedBlkAllocator is a fast allocator where it allocates only 1 size block and ALL free blocks are cached instead + * of selectively caching few blks which are free. Thus there is no sweeping of bitmap or other to refill the cache. + * It does not support temperature of blocks and allocates simply on first come first serve basis + */ +class FixedBlkAllocator : public BitmapBlkAllocator { +public: + FixedBlkAllocator(BlkAllocConfig const& cfg, bool is_fresh, chunk_num_t chunk_id); + FixedBlkAllocator(FixedBlkAllocator const&) = delete; + FixedBlkAllocator(FixedBlkAllocator&&) noexcept = delete; + FixedBlkAllocator& operator=(FixedBlkAllocator const&) = delete; + FixedBlkAllocator& operator=(FixedBlkAllocator&&) noexcept = delete; + virtual ~FixedBlkAllocator() = default; + + void load() override; + + BlkAllocStatus alloc_contiguous(BlkId& bid) override; + BlkAllocStatus alloc(blk_count_t nblks, blk_alloc_hints const& hints, BlkId& out_blkid) override; + void free(BlkId const& b) override; + + blk_num_t available_blks() const override; + blk_num_t get_used_blks() const override; + bool is_blk_alloced(BlkId const& in_bid, bool use_lock = false) const override; + std::string to_string() const override; + +private: + blk_num_t init_portion(BlkAllocPortion& portion, blk_num_t start_blk_num); + +private: + folly::MPMCQueue< BlkId > m_blk_q; +}; +} \ No newline at end of file diff --git a/src/lib/blkalloc/varsize_blk_allocator.cpp b/src/lib/blkalloc/varsize_blk_allocator.cpp index 34d2e6dab..33856b28f 100644 --- a/src/lib/blkalloc/varsize_blk_allocator.cpp +++ b/src/lib/blkalloc/varsize_blk_allocator.cpp @@ -50,8 +50,8 @@ std::condition_variable VarsizeBlkAllocator::s_sweeper_cv; std::queue< VarsizeBlkAllocator* > VarsizeBlkAllocator::s_sweeper_queue; std::unordered_set< VarsizeBlkAllocator* > VarsizeBlkAllocator::s_block_allocators; -VarsizeBlkAllocator::VarsizeBlkAllocator(VarsizeBlkAllocConfig const& cfg, bool init, chunk_num_t chunk_id) : - BlkAllocator{cfg, chunk_id}, +VarsizeBlkAllocator::VarsizeBlkAllocator(VarsizeBlkAllocConfig const& cfg, bool is_fresh, chunk_num_t chunk_id) : + BitmapBlkAllocator{cfg, is_fresh, chunk_id}, m_state{BlkAllocatorState::INIT}, m_cfg{cfg}, m_rand_portion_num_generator{0, s_cast< blk_count_t >(get_num_portions() - 1)}, @@ -84,8 +84,7 @@ VarsizeBlkAllocator::VarsizeBlkAllocator(VarsizeBlkAllocConfig const& cfg, bool LOGINFO("m_fb_cache total free blks: {}", m_fb_cache->total_free_blks()); } - // Start a thread which will do sweeping job of free segments - if (init) { inited(); } + if (is_fresh || !is_persistent()) { do_start(); } } VarsizeBlkAllocator::~VarsizeBlkAllocator() { @@ -232,13 +231,15 @@ bool VarsizeBlkAllocator::allocator_state_machine() { return active_state; } -void VarsizeBlkAllocator::inited() { - m_cache_bm->copy(*(get_disk_bm_const())); - BlkAllocator::inited(); +void VarsizeBlkAllocator::load() { + m_cache_bm->copy(*get_disk_bitmap()); BLKALLOC_LOG(INFO, "VarSizeBlkAllocator initialized loading bitmap of size={} used blks={} from persistent storage", in_bytes(m_cache_bm->size()), get_alloced_blk_count()); + do_start(); +} +void VarsizeBlkAllocator::do_start() { // if use slabs then add to sweeper threads queue if (m_cfg.m_use_slabs) { { @@ -367,10 +368,7 @@ void VarsizeBlkAllocator::fill_cache_in_portion(blk_num_t portion_num, blk_cache fill_session.session_id, portion_num, b.start_bit, nblks_added, get_alloced_blk_count()); // Set the bitmap indicating the blocks are allocated - if (nblks_added > 0) { - m_cache_bm->set_bits(b.start_bit, nblks_added); - if (portion.decrease_available_blocks(nblks_added) == 0) break; - } + if (nblks_added > 0) { m_cache_bm->set_bits(b.start_bit, nblks_added); } cur_blk_id = b.start_bit + b.nbits; } } @@ -440,9 +438,6 @@ BlkAllocStatus VarsizeBlkAllocator::alloc(blk_count_t nblks, blk_alloc_hints con if ((status == BlkAllocStatus::SUCCESS) || (status == BlkAllocStatus::PARTIAL)) { incr_alloced_blk_count(num_allocated); - // update real time bitmap - if (realtime_bm_on()) { alloc_on_realtime(out_mbid); } - #ifdef _PRERELEASE alloc_sanity_check(num_allocated, hints, out_mbid); #endif @@ -616,7 +611,6 @@ blk_count_t VarsizeBlkAllocator::alloc_blks_direct(blk_count_t nblks, blk_alloc_ // Set the bitmap indicating the blocks are allocated m_cache_bm->set_bits(b.start_bit, b.nbits); - if (portion.decrease_available_blocks(b.nbits) == 0) break; cur_blk_id = b.start_bit + b.nbits; } } @@ -633,11 +627,6 @@ blk_count_t VarsizeBlkAllocator::alloc_blks_direct(blk_count_t nblks, blk_alloc_ } void VarsizeBlkAllocator::free(BlkId const& bid) { - if (!m_inited) { - BLKALLOC_LOG(DEBUG, "Free not required for blk num = {}", bid.blk_num()); - return; - } - blk_count_t n_freed = (m_cfg.m_use_slabs && (bid.blk_count() <= m_cfg.highest_slab_blks_count())) ? free_blks_slab(r_cast< MultiBlkId const& >(bid)) : free_blks_direct(r_cast< MultiBlkId const& >(bid)); @@ -684,7 +673,6 @@ blk_count_t VarsizeBlkAllocator::free_blks_direct(MultiBlkId const& bid) { "Expected end bit to be smaller than portion end bit"); BLKALLOC_REL_ASSERT(m_cache_bm->is_bits_set(b.blk_num(), b.blk_count()), "Expected bits to be set"); m_cache_bm->reset_bits(b.blk_num(), b.blk_count()); - portion.increase_available_blocks(b.blk_count()); } BLKALLOC_LOG(TRACE, "Freeing directly to portion={} blkid={} set_bits_count={}", blknum_to_portion_num(b.blk_num()), b.to_string(), get_alloced_blk_count()); @@ -704,8 +692,6 @@ blk_count_t VarsizeBlkAllocator::free_blks_direct(MultiBlkId const& bid) { } bool VarsizeBlkAllocator::is_blk_alloced(BlkId const& bid, bool use_lock) const { - if (!m_inited) { return true; } - auto check_bits_set = [this](BlkId const& b, bool use_lock) { if (use_lock) { BlkAllocPortion const& portion = blknum_to_portion_const(b.blk_num()); @@ -745,7 +731,7 @@ void VarsizeBlkAllocator::alloc_sanity_check(blk_count_t nblks, blk_alloc_hints BLKALLOC_REL_ASSERT(m_cache_bm->is_bits_set(b->blk_num(), b->blk_count()), "Expected blkid={} to be already set in cache bitmap", b->to_string()); - if (get_disk_bm_const()) { + if (is_persistent()) { BLKALLOC_REL_ASSERT(!is_blk_alloced_on_disk(*b), "Expected blkid={} to be already free in disk bitmap", b->to_string()); } diff --git a/src/lib/blkalloc/varsize_blk_allocator.h b/src/lib/blkalloc/varsize_blk_allocator.h index 7e23597fd..09f13240a 100644 --- a/src/lib/blkalloc/varsize_blk_allocator.h +++ b/src/lib/blkalloc/varsize_blk_allocator.h @@ -33,7 +33,7 @@ #include #include -#include "blk_allocator.h" +#include "bitmap_blk_allocator.h" #include "blk_cache.h" #include "common/homestore_assert.hpp" #include "common/homestore_config.hpp" @@ -51,12 +51,12 @@ struct VarsizeBlkAllocConfig : public BlkAllocConfig { const bool m_use_slabs{true}; // use sweeping thread pool with slabs in variable size block allocator public: - VarsizeBlkAllocConfig() : VarsizeBlkAllocConfig{0, 0, 0, 0, ""} {} - VarsizeBlkAllocConfig(std::string const& name) : VarsizeBlkAllocConfig{0, 0, 0, 0, name} {} + VarsizeBlkAllocConfig() : VarsizeBlkAllocConfig{0, 0, 0, 0, false, ""} {} + VarsizeBlkAllocConfig(std::string const& name) : VarsizeBlkAllocConfig{0, 0, 0, 0, false, name} {} - VarsizeBlkAllocConfig(uint32_t blk_size, uint32_t ppage_sz, uint32_t align_sz, uint64_t size, - std::string const& name, bool realtime_bm_on = true, bool use_slabs = true) : - BlkAllocConfig{blk_size, align_sz, size, name, realtime_bm_on}, + VarsizeBlkAllocConfig(uint32_t blk_size, uint32_t ppage_sz, uint32_t align_sz, uint64_t size, bool persistent, + std::string const& name, bool use_slabs = true) : + BlkAllocConfig{blk_size, align_sz, size, persistent, name}, m_phys_page_size{ppage_sz}, m_nsegments{HS_DYNAMIC_CONFIG(blkallocator.max_segments)}, m_blks_per_temp_group{m_capacity / HS_DYNAMIC_CONFIG(blkallocator.num_blk_temperatures)}, @@ -199,21 +199,22 @@ class BlkAllocMetrics : public sisl::MetricsGroup { * 3. Caching of available blocks instead of scanning during allocation. * */ -class VarsizeBlkAllocator : public BlkAllocator { +class VarsizeBlkAllocator : public BitmapBlkAllocator { public: VarsizeBlkAllocator(VarsizeBlkAllocConfig const& cfg, bool init, chunk_num_t chunk_id); VarsizeBlkAllocator(VarsizeBlkAllocator const&) = delete; VarsizeBlkAllocator(VarsizeBlkAllocator&&) noexcept = delete; VarsizeBlkAllocator& operator=(VarsizeBlkAllocator const&) = delete; VarsizeBlkAllocator& operator=(VarsizeBlkAllocator&&) noexcept = delete; - virtual ~VarsizeBlkAllocator() override; + virtual ~VarsizeBlkAllocator(); + + void load() override; BlkAllocStatus alloc_contiguous(BlkId& bid) override; BlkAllocStatus alloc_contiguous(blk_count_t nblks, blk_alloc_hints const& hints, BlkId& out_blkid); BlkAllocStatus alloc(blk_count_t nblks, blk_alloc_hints const& hints, BlkId& out_blkid) override; BlkAllocStatus alloc(blk_count_t nblks, blk_alloc_hints const& hints, std::vector< BlkId >& out_blkids); void free(BlkId const& blk_id) override; - void inited() override; blk_num_t available_blks() const override; blk_num_t get_used_blks() const override; @@ -261,6 +262,7 @@ class VarsizeBlkAllocator : public BlkAllocator { private: static void sweeper_thread(size_t thread_num); bool allocator_state_machine(); + void do_start(); blk_count_t alloc_blks_slab(blk_count_t nblks, blk_alloc_hints const& hints, MultiBlkId& out_blkid); blk_count_t alloc_blks_direct(blk_count_t nblks, blk_alloc_hints const& hints, MultiBlkId& out_blkids); diff --git a/src/lib/device/virtual_dev.cpp b/src/lib/device/virtual_dev.cpp index 906bba9f1..04a0d3930 100644 --- a/src/lib/device/virtual_dev.cpp +++ b/src/lib/device/virtual_dev.cpp @@ -44,6 +44,7 @@ #include "blkalloc/varsize_blk_allocator.h" #include "device/round_robin_chunk_selector.h" #include "blkalloc/append_blk_allocator.h" +#include "blkalloc/fixed_blk_allocator.h" SISL_LOGGING_DECL(device) @@ -54,8 +55,8 @@ static std::shared_ptr< BlkAllocator > create_blk_allocator(blk_allocator_type_t bool is_auto_recovery, uint32_t unique_id, bool is_init) { switch (btype) { case blk_allocator_type_t::fixed: { - BlkAllocConfig cfg{vblock_size, align_sz, size, std::string{"fixed_chunk_"} + std::to_string(unique_id)}; - cfg.set_auto_recovery(is_auto_recovery); + BlkAllocConfig cfg{vblock_size, align_sz, size, is_auto_recovery, + std::string{"fixed_chunk_"} + std::to_string(unique_id)}; return std::make_shared< FixedBlkAllocator >(cfg, is_init, unique_id); } case blk_allocator_type_t::varsize: { @@ -63,16 +64,15 @@ static std::shared_ptr< BlkAllocator > create_blk_allocator(blk_allocator_type_t ppage_sz, align_sz, size, + is_auto_recovery, std::string("varsize_chunk_") + std::to_string(unique_id), - true /* realtime_bitmap */, is_data_drive_hdd() ? false : true /* use_slabs */}; // HS_DBG_ASSERT_EQ((size % MIN_DATA_CHUNK_SIZE(ppage_sz)), 0); - cfg.set_auto_recovery(is_auto_recovery); return std::make_shared< VarsizeBlkAllocator >(cfg, is_init, unique_id); } case blk_allocator_type_t::append: { - BlkAllocConfig cfg{vblock_size, align_sz, size, std::string("append_chunk_") + std::to_string(unique_id)}; - cfg.set_auto_recovery(is_auto_recovery); + BlkAllocConfig cfg{vblock_size, align_sz, size, false, + std::string("append_chunk_") + std::to_string(unique_id)}; return std::make_shared< AppendBlkAllocator >(cfg, is_init, unique_id); } case blk_allocator_type_t::none: @@ -141,10 +141,6 @@ folly::Future< std::error_code > VirtualDev::async_format() { }); } -/*std::shared_ptr< blkalloc_cp > VirtualDev::attach_prepare_cp(const std::shared_ptr< blkalloc_cp >& cur_ba_cp) { - return (Chunk::attach_prepare_cp(cur_ba_cp)); -}*/ - bool VirtualDev::is_blk_alloced(BlkId const& blkid) const { return m_dmgr.get_chunk(blkid.chunk_num())->blk_allocator()->is_blk_alloced(blkid); } @@ -248,12 +244,6 @@ BlkAllocStatus VirtualDev::alloc_blks_from_chunk(blk_count_t nblks, blk_alloc_hi #endif auto status = chunk->blk_allocator_mutable()->alloc(nblks, hints, out_blkid); if ((status == BlkAllocStatus::PARTIAL) && (!hints.partial_alloc_ok)) { - // free partial result - auto it = out_blkid.iterate(); - while (auto const b = it.next()) { - auto const ret = chunk->blk_allocator_mutable()->free_on_realtime(*b); - HS_REL_ASSERT(ret, "failed to free on realtime"); - } chunk->blk_allocator_mutable()->free(out_blkid); out_blkid = MultiBlkId{}; status = BlkAllocStatus::FAILED; @@ -262,11 +252,6 @@ BlkAllocStatus VirtualDev::alloc_blks_from_chunk(blk_count_t nblks, blk_alloc_hi return status; } -/*bool VirtualDev::free_on_realtime(BlkId const& b) { - Chunk* chunk = m_dmgr.get_chunk_mutable(b.chunk_num()); - return chunk->blk_allocator_mutable()->free_on_realtime(b); -}*/ - void VirtualDev::free_blk(BlkId const& b) { if (b.is_multi()) { MultiBlkId const& mb = r_cast< MultiBlkId const& >(b); @@ -278,13 +263,6 @@ void VirtualDev::free_blk(BlkId const& b) { } } -void VirtualDev::recovery_done() { - DEBUG_ASSERT_EQ(m_auto_recovery, false, "recovery done (manual recovery completion) called on auto recovery vdev"); - for (auto& chunk : m_all_chunks) { - chunk->blk_allocator_mutable()->inited(); - } -} - uint64_t VirtualDev::get_len(const iovec* iov, int iovcnt) { uint64_t len{0}; for (int i{0}; i < iovcnt; ++i) { @@ -507,15 +485,6 @@ uint64_t VirtualDev::used_size() const { std::vector< shared< Chunk > > VirtualDev::get_chunks() const { return m_all_chunks; } -/*void VirtualDev::blkalloc_cp_start(const std::shared_ptr< blkalloc_cp >& ba_cp) { - for (size_t i{0}; i < m_primary_pdev_chunks_list.size(); ++i) { - for (size_t chunk_indx{0}; chunk_indx < m_primary_pdev_chunks_list[i].chunks_in_pdev.size(); ++chunk_indx) { - auto* chunk = m_primary_pdev_chunks_list[i].chunks_in_pdev[chunk_indx]; - chunk->cp_start(ba_cp); - } - } -}*/ - /* Get status for all chunks */ nlohmann::json VirtualDev::get_status(int log_level) const { nlohmann::json j; diff --git a/src/lib/device/virtual_dev.hpp b/src/lib/device/virtual_dev.hpp index 79cb95478..0fcb9f709 100644 --- a/src/lib/device/virtual_dev.hpp +++ b/src/lib/device/virtual_dev.hpp @@ -251,8 +251,6 @@ class VirtualDev { /// @brief Submit the batch of IOs previously queued as part of async read/write APIs. void submit_batch(); - virtual void recovery_done(); - ////////////////////// Checkpointing related methods /////////////////////////// /// @brief /// diff --git a/src/lib/homestore.cpp b/src/lib/homestore.cpp index e75bb2fbf..5c8444783 100644 --- a/src/lib/homestore.cpp +++ b/src/lib/homestore.cpp @@ -220,22 +220,20 @@ void HomeStore::do_start() { // to start log store if (has_log_service() && inp_params.auto_recovery) { m_log_service->start(is_first_time_boot() /* format */); } - init_done(); + m_init_done = true; } -void HomeStore::init_done() { m_init_done = true; } - void HomeStore::shutdown() { - if (!m_init_done) { + if (!m_init_done) { LOGWARN("Homestore shutdown is called before init is completed"); - return; + return; } - + LOGINFO("Homestore shutdown is started"); if (has_index_service()) { m_index_service->stop(); -// m_index_service.reset(); + // m_index_service.reset(); } if (has_log_service()) { diff --git a/src/lib/meta/meta_blk_service.cpp b/src/lib/meta/meta_blk_service.cpp index 510033dfa..02ba4b836 100644 --- a/src/lib/meta/meta_blk_service.cpp +++ b/src/lib/meta/meta_blk_service.cpp @@ -100,7 +100,6 @@ void MetaBlkService::start(bool need_format) { } else { load_ssb(); scan_meta_blks(); - m_sb_vdev->recovery_done(); } recover(); } diff --git a/src/tests/test_blkalloc.cpp b/src/tests/test_blkalloc.cpp index 2fdf2f90c..0d6204022 100644 --- a/src/tests/test_blkalloc.cpp +++ b/src/tests/test_blkalloc.cpp @@ -35,10 +35,10 @@ #include #include -#include "blkalloc/blk_allocator.h" #include "blkalloc/blk_cache.h" #include "common/homestore_assert.hpp" #include "common/homestore_config.hpp" +#include "blkalloc/fixed_blk_allocator.h" #include "blkalloc/varsize_blk_allocator.h" SISL_LOGGING_INIT(HOMESTORE_LOG_MODS) @@ -368,9 +368,8 @@ struct BlkAllocatorTest { struct FixedBlkAllocatorTest : public ::testing::Test, BlkAllocatorTest { std::unique_ptr< FixedBlkAllocator > m_allocator; FixedBlkAllocatorTest() : BlkAllocatorTest() { - BlkAllocConfig fixed_cfg{4096, 4096, static_cast< uint64_t >(m_total_count) * 4096, "", false}; + BlkAllocConfig fixed_cfg{4096, 4096, static_cast< uint64_t >(m_total_count) * 4096, false}; m_allocator = std::make_unique< FixedBlkAllocator >(fixed_cfg, true, 0); - HS_REL_ASSERT_EQ(m_allocator->realtime_bm_on(), false); } FixedBlkAllocatorTest(const FixedBlkAllocatorTest&) = delete; FixedBlkAllocatorTest(FixedBlkAllocatorTest&&) noexcept = delete; @@ -427,11 +426,9 @@ struct VarsizeBlkAllocatorTest : public ::testing::Test, BlkAllocatorTest { virtual void TearDown() override{}; void create_allocator(const bool use_slabs = true) { - VarsizeBlkAllocConfig cfg{4096, 4096, 4096u, static_cast< uint64_t >(m_total_count) * 4096, - "", false, use_slabs}; - cfg.set_auto_recovery(true); + VarsizeBlkAllocConfig cfg{4096, 4096, 4096u, static_cast< uint64_t >(m_total_count) * 4096, + false, "", use_slabs}; m_allocator = std::make_unique< VarsizeBlkAllocator >(cfg, true, 0); - HS_REL_ASSERT_EQ(m_allocator->realtime_bm_on(), false); } [[nodiscard]] bool alloc_rand_blk(const BlkAllocStatus exp_status, const bool is_contiguous,