Skip to content

Commit

Permalink
Persistence of allocated blocks during CP and moved to C++-20.
Browse files Browse the repository at this point in the history
Without this commit, homestore 4.x doesn't persist the bitmap of allocated blks during CP.
This commit handles that gap. On top of that, the free blk id collection is moved to VirtualDev
layer, so that both index and data service has common blk id collection. This blkid collection
is essential so that all free blk ids are moved to the next CP after the current CP is flushed.

Changed it to C++-20 standard which resulted in few compilation errors, fixed that.
  • Loading branch information
hkadayam committed Oct 31, 2023
1 parent b07ac80 commit 3b545df
Show file tree
Hide file tree
Showing 24 changed files with 148 additions and 172 deletions.
3 changes: 2 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ set_property(GLOBAL PROPERTY USE_FOLDERS ON) # turn on folder hierarchies

include (cmake/Flags.cmake)

set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD 20)
enable_testing()

if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/conanbuildinfo.cmake)
Expand Down Expand Up @@ -107,6 +107,7 @@ else()
endif()
endif()

add_flags("-g")
add_subdirectory(src)

# build info
Expand Down
2 changes: 1 addition & 1 deletion conanfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

class HomestoreConan(ConanFile):
name = "homestore"
version = "4.5.9"
version = "4.5.10"

homepage = "https://github.com/eBay/Homestore"
description = "HomeStore Storage Engine"
Expand Down
7 changes: 3 additions & 4 deletions src/include/homestore/checkpoint/cp_mgr.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,15 +56,14 @@ VENUM(cp_consumer_t, uint8_t,

struct CP;
class CPContext {
private:
cp_id_t m_cp_id;
protected:
CP* m_cp;
folly::Promise< bool > m_flush_comp;

public:
CPContext(cp_id_t id) : m_cp_id{id} {}
cp_id_t id() const { return m_cp_id; }
CPContext(CP* cp) : m_cp{cp} {}
CP* cp() { return m_cp; }
cp_id_t id() const;
void complete(bool status) { m_flush_comp.setValue(status); }
folly::Future< bool > get_future() { return m_flush_comp.getFuture(); }

Expand Down
14 changes: 3 additions & 11 deletions src/lib/blkalloc/append_blk_allocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -119,19 +119,11 @@ BlkAllocStatus AppendBlkAllocator::alloc(blk_count_t nblks, const blk_alloc_hint
return BlkAllocStatus::SUCCESS;
}

BlkAllocStatus AppendBlkAllocator::alloc_on_disk(BlkId const&) {
DEBUG_ASSERT(false, "alloc_on_disk called on non-persisted allocator");
return BlkAllocStatus::SUCCESS;
}
BlkAllocStatus AppendBlkAllocator::alloc_on_disk(BlkId const&) { return BlkAllocStatus::SUCCESS; }

void AppendBlkAllocator::free_on_disk(BlkId const&) {
DEBUG_ASSERT(false, "free_on_disk called on non-persisted allocator");
}
void AppendBlkAllocator::free_on_disk(BlkId const&) {}

bool AppendBlkAllocator::is_blk_alloced_on_disk(BlkId const&, bool) const {
DEBUG_ASSERT(false, "is_blk_alloced_on_disk called on non-persisted allocator");
return false;
}
bool AppendBlkAllocator::is_blk_alloced_on_disk(BlkId const&, bool) const { return false; }

//
// cp_flush doesn't need CPGuard as it is triggered by CPMgr which already handles the reference check;
Expand Down
64 changes: 35 additions & 29 deletions src/lib/blkalloc/blk_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,24 @@ struct blk_cache_refill_status {

void mark_refill_done() { slab_refilled_count = slab_required_count; }
};
} // namespace homestore

namespace fmt {
template <>
struct formatter< homestore::blk_cache_refill_status > {
template < typename ParseContext >
constexpr auto parse(ParseContext& ctx) {
return ctx.begin();
}

template < typename FormatContext >
auto format(const homestore::blk_cache_refill_status& s, FormatContext& ctx) {
return format_to(ctx.out(), "{}/{}", s.slab_refilled_count, s.slab_required_count);
}
};
} // namespace fmt

namespace homestore {
struct blk_cache_fill_session {
uint64_t session_id;
std::vector< blk_cache_refill_status > slab_requirements; // A slot for each slab about count of required/refilled
Expand Down Expand Up @@ -231,7 +248,24 @@ struct SlabCacheConfig {
}
std::string get_name() const { return m_name; }
};
} // namespace homestore

namespace fmt {
template <>
struct formatter< homestore::SlabCacheConfig > {
template < typename ParseContext >
constexpr auto parse(ParseContext& ctx) {
return ctx.begin();
}

template < typename FormatContext >
auto format(const homestore::SlabCacheConfig& s, FormatContext& ctx) {
return format_to(ctx.out(), "{}", s.to_string());
}
};
} // namespace fmt

namespace homestore {
class FreeBlkCache {
public:
FreeBlkCache() = default;
Expand Down Expand Up @@ -281,32 +315,4 @@ class FreeBlkCache {
return nblks_to_round_down_slab_tbl[nblks];
}
};
} // namespace homestore

namespace fmt {
template <>
struct formatter< homestore::blk_cache_refill_status > {
template < typename ParseContext >
constexpr auto parse(ParseContext& ctx) {
return ctx.begin();
}

template < typename FormatContext >
auto format(const homestore::blk_cache_refill_status& s, FormatContext& ctx) {
return format_to(ctx.out(), "{}/{}", s.slab_refilled_count, s.slab_required_count);
}
};

template <>
struct formatter< homestore::SlabCacheConfig > {
template < typename ParseContext >
constexpr auto parse(ParseContext& ctx) {
return ctx.begin();
}

template < typename FormatContext >
auto format(const homestore::SlabCacheConfig& s, FormatContext& ctx) {
return format_to(ctx.out(), "{}", s.to_string());
}
};
} // namespace fmt
} // namespace homestore
5 changes: 4 additions & 1 deletion src/lib/blkdata_svc/blkdata_service.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,10 @@ folly::Future< std::error_code > BlkDataService::async_free_blk(MultiBlkId const
auto f = promise.getFuture();

m_blk_read_tracker->wait_on(bids, [this, bids, p = std::move(promise)]() mutable {
m_vdev->free_blk(bids);
{
auto cpg = hs()->cp_mgr().cp_guard();
m_vdev->free_blk(bids, s_cast< VDevCPContext* >(cpg.context(cp_consumer_t::BLK_DATA_SVC)));
}
p.setValue(std::error_code{});
});
return f;
Expand Down
6 changes: 3 additions & 3 deletions src/lib/blkdata_svc/data_svc_cp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,21 +22,21 @@ namespace homestore {
DataSvcCPCallbacks::DataSvcCPCallbacks(shared< VirtualDev > vdev) : m_vdev{vdev} {}

std::unique_ptr< CPContext > DataSvcCPCallbacks::on_switchover_cp(CP* cur_cp, CP* new_cp) {
return m_vdev->create_cp_context(new_cp->id());
return m_vdev->create_cp_context(new_cp);
}

folly::Future< bool > DataSvcCPCallbacks::cp_flush(CP* cp) {
// Pick a CP Manager blocking IO fiber to execute the cp flush of vdev
// iomanager.run_on_forget(hs()->cp_mgr().pick_blocking_io_fiber(), [this, cp]() {
auto cp_ctx = s_cast< VDevCPContext* >(cp->context(cp_consumer_t::BLK_DATA_SVC));
m_vdev->cp_flush(cp); // this is a blocking io call
m_vdev->cp_flush(cp_ctx); // this is a blocking io call
cp_ctx->complete(true);
//});

return folly::makeFuture< bool >(true);
}

void DataSvcCPCallbacks::cp_cleanup(CP* cp) { m_vdev->cp_cleanup(cp); }
void DataSvcCPCallbacks::cp_cleanup(CP* cp) {}

int DataSvcCPCallbacks::cp_progress_percent() { return m_vdev->cp_progress_percent(); }

Expand Down
2 changes: 2 additions & 0 deletions src/lib/checkpoint/cp_mgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -405,4 +405,6 @@ void CPWatchdog::cp_watchdog_timer() {
}
}

cp_id_t CPContext::id() const { return m_cp->id(); }

} // namespace homestore
2 changes: 0 additions & 2 deletions src/lib/device/chunk.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,6 @@ namespace homestore {
Chunk::Chunk(PhysicalDev* pdev, const chunk_info& cinfo, uint32_t chunk_slot) :
m_chunk_info{cinfo}, m_pdev{pdev}, m_chunk_slot{chunk_slot}, m_stream_id{pdev->chunk_to_stream_id(cinfo)} {}

void Chunk::cp_flush(CP* cp) { blk_allocator_mutable()->cp_flush(cp); }

std::string Chunk::to_string() const {
return fmt::format("chunk_id={}, vdev_id={}, start_offset={}, size={}, end_of_chunk={}, slot_num_in_pdev={} "
"pdev_ordinal={} vdev_ordinal={} stream_id={}",
Expand Down
2 changes: 0 additions & 2 deletions src/lib/device/chunk.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,6 @@ class Chunk {
Chunk& operator=(Chunk&&) noexcept = delete;
virtual ~Chunk() = default;

void cp_flush(CP* cp);

/////////////// Pointer Getters ////////////////////
const PhysicalDev* physical_dev() const { return m_pdev; }
PhysicalDev* physical_dev_mutable() { return m_pdev; };
Expand Down
1 change: 0 additions & 1 deletion src/lib/device/device_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ DeviceManager::DeviceManager(const std::vector< dev_info >& devs, vdev_create_cb
if (is_hdd(dev_info.dev_name)) {
HomeStoreStaticConfig::instance().hdd_drive_present = true;
found_hdd_dev = true;
LOGINFO("HDD device found: {}");
break;
}
}
Expand Down
53 changes: 34 additions & 19 deletions src/lib/device/virtual_dev.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ BlkAllocStatus VirtualDev::alloc_blks(blk_count_t nblks, blk_alloc_hints const&
}

if ((status != BlkAllocStatus::SUCCESS) && !((status == BlkAllocStatus::PARTIAL) && hints.partial_alloc_ok)) {
LOGERROR("nblks={} failed to alloc after trying to alloc on every chunks {} and devices {}.", nblks);
LOGERROR("nblks={} failed to alloc after trying to alloc on every chunks and devices", nblks);
COUNTER_INCREMENT(m_metrics, vdev_num_alloc_failure, 1);
}

Expand Down Expand Up @@ -252,14 +252,25 @@ BlkAllocStatus VirtualDev::alloc_blks_from_chunk(blk_count_t nblks, blk_alloc_hi
return status;
}

void VirtualDev::free_blk(BlkId const& b) {
if (b.is_multi()) {
MultiBlkId const& mb = r_cast< MultiBlkId const& >(b);
Chunk* chunk = m_dmgr.get_chunk_mutable(mb.chunk_num());
chunk->blk_allocator_mutable()->free(mb);
void VirtualDev::free_blk(BlkId const& bid, VDevCPContext* vctx) {
auto do_free_action = [this](auto const& b, VDevCPContext* vctx) {
if (vctx) {
vctx->m_free_blkid_list.push_back(b);
} else {
BlkAllocator* allocator = m_dmgr.get_chunk_mutable(b.chunk_num())->blk_allocator_mutable();
if (m_auto_recovery) { allocator->free_on_disk(b); }
allocator->free(b);
}
};

if (bid.is_multi()) {
MultiBlkId const& mbid = r_cast< MultiBlkId const& >(bid);
auto it = mbid.iterate();
while (auto const b = it.next()) {
do_free_action(*b, vctx);
}
} else {
Chunk* chunk = m_dmgr.get_chunk_mutable(b.chunk_num());
chunk->blk_allocator_mutable()->free(b);
do_free_action(bid, vctx);
}
}

Expand Down Expand Up @@ -541,25 +552,29 @@ void VirtualDev::update_vdev_private(const sisl::blob& private_data) {
}

///////////////////////// VirtualDev Checkpoint methods /////////////////////////////
VDevCPContext::VDevCPContext(CP* cp) : CPContext(cp) {}

VDevCPContext::VDevCPContext(cp_id_t cp_id) : CPContext(cp_id) {}
std::unique_ptr< CPContext > VirtualDev::create_cp_context(CP* cp) { return std::make_unique< VDevCPContext >(cp); }

std::unique_ptr< CPContext > VirtualDev::create_cp_context(cp_id_t cp_id) {
return std::make_unique< VDevCPContext >(cp_id);
}
void VirtualDev::cp_flush(VDevCPContext* v_cp_ctx) {
CP* cp = v_cp_ctx->cp();

void VirtualDev::cp_flush(CP* cp) {
// pass down cp so that underlying componnents can get their customized CP context if needed;
m_chunk_selector->foreach_chunks([this, cp](cshared< Chunk >& chunk) { chunk->cp_flush(cp); });
// pass down cp so that underlying components can get their customized CP context if needed;
m_chunk_selector->foreach_chunks(
[this, cp](cshared< Chunk >& chunk) { chunk->blk_allocator_mutable()->cp_flush(cp); });

// All of the blkids which were captured in the current vdev cp context will now be freed and hence available for
// allocation on the new CP dirty collection session which is ongoing
for (auto const& b : v_cp_ctx->m_free_blkid_list) {
BlkAllocator* allocator = m_dmgr.get_chunk_mutable(b.chunk_num())->blk_allocator_mutable();
if (m_auto_recovery) { allocator->free_on_disk(b); }
allocator->free(b);
}
}

// sync-ops during cp_flush, so return 100;
int VirtualDev::cp_progress_percent() { return 100; }

void VirtualDev::cp_cleanup(CP*) {
// no-op;
}

///////////////////////// VirtualDev Private Methods /////////////////////////////
uint64_t VirtualDev::to_dev_offset(BlkId const& b, Chunk** chunk) const {
*chunk = m_dmgr.get_chunk_mutable(b.chunk_num());
Expand Down
15 changes: 9 additions & 6 deletions src/lib/device/virtual_dev.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
#include <sisl/utility/obj_life_counter.hpp>
#include <sisl/utility/atomic_counter.hpp>
#include <sisl/utility/enum.hpp>
#include <sisl/fds/concurrent_insert_vector.hpp>

#include <homestore/checkpoint/cp_mgr.hpp>
#include <homestore/homestore_decl.hpp>
Expand Down Expand Up @@ -79,6 +80,7 @@ struct blkalloc_cp;
class VirtualDev;
ENUM(vdev_event_t, uint8_t, SIZE_THRESHOLD_REACHED, VDEV_ERRORED_OUT);
using vdev_event_cb_t = std::function< void(VirtualDev&, vdev_event_t, const std::string&) >;
class VDevCPContext;

class VirtualDev {
protected:
Expand Down Expand Up @@ -150,7 +152,7 @@ class VirtualDev {
/// @return Allocation Status
virtual BlkAllocStatus commit_blk(BlkId const& blkid);

virtual void free_blk(BlkId const& b);
virtual void free_blk(BlkId const& b, VDevCPContext* vctx = nullptr);

/////////////////////// Write API related methods /////////////////////////////
/// @brief Asynchornously write the buffer to the device on a given blkid
Expand Down Expand Up @@ -255,14 +257,12 @@ class VirtualDev {
/// @brief
///
/// @param cp
void cp_flush(CP* cp);

void cp_cleanup(CP* cp);
void cp_flush(VDevCPContext* v_cp_ctx);

/// @brief : percentage CP has been progressed, this api is normally used for cp watchdog;
int cp_progress_percent();

std::unique_ptr< CPContext > create_cp_context(cp_id_t cp_id);
std::unique_ptr< CPContext > create_cp_context(CP* cp);

////////////////////////// Standard Getters ///////////////////////////////
virtual uint64_t available_blks() const;
Expand Down Expand Up @@ -295,7 +295,10 @@ class VirtualDev {
// place holder for future needs in which components underlying virtualdev needs cp flush context;
class VDevCPContext : public CPContext {
public:
VDevCPContext(cp_id_t cp_id);
sisl::ConcurrentInsertVector< BlkId > m_free_blkid_list;

public:
VDevCPContext(CP* cp);
virtual ~VDevCPContext() = default;
};

Expand Down
2 changes: 1 addition & 1 deletion src/lib/index/index_cp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ namespace homestore {
IndexCPCallbacks::IndexCPCallbacks(IndexWBCache* wb_cache) : m_wb_cache{wb_cache} {}

std::unique_ptr< CPContext > IndexCPCallbacks::on_switchover_cp(CP* cur_cp, CP* new_cp) {
return m_wb_cache->create_cp_context(new_cp->id());
return std::make_unique< IndexCPContext >(new_cp);
}

folly::Future< bool > IndexCPCallbacks::cp_flush(CP* cp) {
Expand Down
Loading

0 comments on commit 3b545df

Please sign in to comment.