From 72d8c3ad2bae0da2bfd8fb133fa07734680a0234 Mon Sep 17 00:00:00 2001 From: Harihara Kadayam Date: Fri, 10 May 2024 14:56:10 -0700 Subject: [PATCH] Addressed review comments and combined free_on_disk() to free() --- src/lib/blkalloc/append_blk_allocator.cpp | 9 +++++---- src/lib/blkalloc/append_blk_allocator.h | 7 +++---- src/lib/blkalloc/bitmap_blk_allocator.h | 4 +++- src/lib/blkalloc/blk_allocator.h | 6 ++---- src/lib/blkalloc/fixed_blk_allocator.cpp | 2 ++ src/lib/blkalloc/varsize_blk_allocator.cpp | 2 ++ src/lib/device/virtual_dev.cpp | 2 -- 7 files changed, 17 insertions(+), 15 deletions(-) diff --git a/src/lib/blkalloc/append_blk_allocator.cpp b/src/lib/blkalloc/append_blk_allocator.cpp index 43255f3a4..b000f3ec5 100644 --- a/src/lib/blkalloc/append_blk_allocator.cpp +++ b/src/lib/blkalloc/append_blk_allocator.cpp @@ -88,7 +88,8 @@ BlkAllocStatus AppendBlkAllocator::alloc(blk_count_t nblks, const blk_alloc_hint // Reserve on disk will update the commit_offset with the new_offset, if its above the current commit_offset. BlkAllocStatus AppendBlkAllocator::reserve_on_disk(BlkId const& blkid) { - auto cpg = hs()->cp_mgr().cp_guard(); + HS_DBG_ASSERT(is_blk_alloced(blkid), "Trying to reserve on disk for unallocated blkid={}", blkid); + auto new_offset = blkid.blk_num() + blkid.blk_count(); auto cur_offset = m_commit_offset.load(); bool modified{true}; @@ -132,7 +133,6 @@ void AppendBlkAllocator::cp_flush(CP* cp) { // 2. if the blk being freed happens to be last block, move last_append_offset backwards accordingly; // void AppendBlkAllocator::free(const BlkId& bid) { - // If we are freeing the last block, just move the offset back // If we are freeing the last block, just move the offset back blk_num_t cur_last_offset = m_last_append_offset.load(); auto const input_last_offset = bid.blk_num() + bid.blk_count(); @@ -151,11 +151,12 @@ void AppendBlkAllocator::free(const BlkId& bid) { if (freeing_in_middle) { // Freeing something in the middle, increment the count m_freeable_nblks.fetch_add(bid.blk_count()); + } else { + m_commit_offset.store(m_last_append_offset.load()); } + m_is_dirty.store(true); } -void AppendBlkAllocator::free_on_disk(BlkId const&) { m_is_dirty.store(true); } - bool AppendBlkAllocator::is_blk_alloced(const BlkId& in_bid, bool) const { // blk_num starts from 0; return in_bid.blk_num() < get_used_blks(); diff --git a/src/lib/blkalloc/append_blk_allocator.h b/src/lib/blkalloc/append_blk_allocator.h index e9b5a8e91..06713e4b5 100644 --- a/src/lib/blkalloc/append_blk_allocator.h +++ b/src/lib/blkalloc/append_blk_allocator.h @@ -80,7 +80,6 @@ class AppendBlkAllocator : public BlkAllocator { BlkAllocStatus reserve_on_disk(BlkId const& in_bid) override; BlkAllocStatus reserve_on_cache(BlkId const& b) override; - void free_on_disk(BlkId const& b) override; bool is_blk_alloced_on_disk(BlkId const& b, bool use_lock = false) const override; /** @@ -118,9 +117,9 @@ class AppendBlkAllocator : public BlkAllocator { void on_meta_blk_found(const sisl::byte_view& buf, void* meta_cookie); private: - std::atomic< blk_num_t > m_last_append_offset{0}; // last appended offset in blocks; - std::atomic< blk_num_t > m_freeable_nblks{0}; - std::atomic< blk_num_t > m_commit_offset{0}; // commit offset in on-disk version + std::atomic< blk_num_t > m_last_append_offset{0}; // last appended offset in blocks in memory + std::atomic< blk_num_t > m_freeable_nblks{0}; // count of blks fragmentedly freed (both on-disk and in-memory) + std::atomic< blk_num_t > m_commit_offset{0}; // offset in on-disk version std::atomic< bool > m_is_dirty{false}; AppendBlkAllocMetrics m_metrics; superblk< append_blk_sb_t > m_sb; // only cp will be writing to this disk diff --git a/src/lib/blkalloc/bitmap_blk_allocator.h b/src/lib/blkalloc/bitmap_blk_allocator.h index af5d65974..03ba713d8 100644 --- a/src/lib/blkalloc/bitmap_blk_allocator.h +++ b/src/lib/blkalloc/bitmap_blk_allocator.h @@ -73,7 +73,6 @@ class BitmapBlkAllocator : public BlkAllocator { virtual void load() = 0; BlkAllocStatus reserve_on_disk(BlkId const& in_bid) override; - void free_on_disk(BlkId const& b) override; bool is_blk_alloced_on_disk(BlkId const& b, bool use_lock = false) const override; void cp_flush(CP* cp) override; @@ -100,6 +99,9 @@ class BitmapBlkAllocator : public BlkAllocator { void decr_alloced_blk_count(blk_count_t nblks) { m_alloced_blk_count.fetch_sub(nblks, std::memory_order_relaxed); } int64_t get_alloced_blk_count() const { return m_alloced_blk_count.load(std::memory_order_acquire); } +protected: + void free_on_disk(BlkId const& b); + private: void do_init(); sisl::ThreadVector< MultiBlkId >* get_alloc_blk_list(); diff --git a/src/lib/blkalloc/blk_allocator.h b/src/lib/blkalloc/blk_allocator.h index 8c36e6910..c95935c77 100644 --- a/src/lib/blkalloc/blk_allocator.h +++ b/src/lib/blkalloc/blk_allocator.h @@ -129,9 +129,8 @@ ENUM(BlkAllocatorState, uint8_t, INIT, WAITING, SWEEP_SCHEDULED, SWEEPING, EXITI // // Free: // Freeing the blocks is done in a slightly different way, where blkallocator free will free the blk from cache version. -// Then the virtual dev will call free_on_disk() which will free from the on-disk version and expected to persist them. -// Since there is no additional phase of uncommitted free_blk. Free blks are always committed entries. Blkallocator free -// is idempotent. +// and also on disk version and will be persisted on next cp. In other words, free blks are always committed entries. +// Blkallocator free is idempotent. // class CP; class BlkAllocator { @@ -155,7 +154,6 @@ class BlkAllocator { virtual BlkAllocStatus reserve_on_cache(BlkId const& bid) = 0; virtual void free(BlkId const& id) = 0; - virtual void free_on_disk(BlkId const& bid) = 0; virtual blk_num_t available_blks() const = 0; virtual blk_num_t get_defrag_nblks() const = 0; diff --git a/src/lib/blkalloc/fixed_blk_allocator.cpp b/src/lib/blkalloc/fixed_blk_allocator.cpp index 5315d61b1..1e2af3257 100644 --- a/src/lib/blkalloc/fixed_blk_allocator.cpp +++ b/src/lib/blkalloc/fixed_blk_allocator.cpp @@ -99,6 +99,8 @@ void FixedBlkAllocator::free(BlkId const& b) { const auto pushed = m_free_blk_q.write(b.blk_num()); HS_DBG_ASSERT_EQ(pushed, true, "Expected to be able to push the blk on fixed capacity Q"); + + if (is_persistent()) { free_on_disk(b); } } blk_num_t FixedBlkAllocator::available_blks() const { return m_free_blk_q.sizeGuess(); } diff --git a/src/lib/blkalloc/varsize_blk_allocator.cpp b/src/lib/blkalloc/varsize_blk_allocator.cpp index b14007b7a..f62917b8c 100644 --- a/src/lib/blkalloc/varsize_blk_allocator.cpp +++ b/src/lib/blkalloc/varsize_blk_allocator.cpp @@ -651,6 +651,8 @@ void VarsizeBlkAllocator::free(BlkId const& bid) { blk_count_t n_freed = (m_cfg.m_use_slabs && (bid.blk_count() <= m_cfg.highest_slab_blks_count())) ? free_blks_slab(r_cast< MultiBlkId const& >(bid)) : free_blks_direct(r_cast< MultiBlkId const& >(bid)); + + if (is_persistent()) { free_on_disk(bid); } decr_alloced_blk_count(n_freed); BLKALLOC_LOG(TRACE, "Freed blk_num={}", bid.to_string()); } diff --git a/src/lib/device/virtual_dev.cpp b/src/lib/device/virtual_dev.cpp index 116c49f6c..c653d470a 100644 --- a/src/lib/device/virtual_dev.cpp +++ b/src/lib/device/virtual_dev.cpp @@ -287,7 +287,6 @@ void VirtualDev::free_blk(BlkId const& bid, VDevCPContext* vctx) { } else { BlkAllocator* allocator = m_dmgr.get_chunk_mutable(b.chunk_num())->blk_allocator_mutable(); allocator->free(b); - if (m_auto_recovery) { allocator->free_on_disk(b); } } }; @@ -637,7 +636,6 @@ void VirtualDev::cp_flush(VDevCPContext* v_cp_ctx) { // allocation on the new CP dirty collection session which is ongoing for (auto const& b : v_cp_ctx->m_free_blkid_list) { BlkAllocator* allocator = m_dmgr.get_chunk_mutable(b.chunk_num())->blk_allocator_mutable(); - if (m_auto_recovery) { allocator->free_on_disk(b); } allocator->free(b); } }