From 391168223297f672a03fd95e1ddef810535b005a Mon Sep 17 00:00:00 2001 From: Jie Yao Date: Tue, 7 Nov 2023 10:54:52 +0800 Subject: [PATCH] recover metablk in subtype order (#200) Signed-off-by: Jie Yao --- src/include/homestore/meta_service.hpp | 10 +- src/lib/blkdata_svc/blk_read_tracker.hpp | 2 +- src/lib/common/homestore_utils.cpp | 36 +++++ src/lib/common/homestore_utils.hpp | 8 ++ src/lib/homestore.cpp | 8 +- src/lib/meta/meta_blk_service.cpp | 165 ++++++++++++--------- src/lib/meta/meta_sb.hpp | 1 + src/tests/test_meta_blk_mgr.cpp | 176 ++++++++++++++++++++++- 8 files changed, 324 insertions(+), 82 deletions(-) diff --git a/src/include/homestore/meta_service.hpp b/src/include/homestore/meta_service.hpp index cb2c65e25..1cbcf1a3e 100644 --- a/src/include/homestore/meta_service.hpp +++ b/src/include/homestore/meta_service.hpp @@ -18,11 +18,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include @@ -46,10 +48,12 @@ struct vdev_info; // new blk found subsystem callback typedef std::function< void(meta_blk* mblk, sisl::byte_view buf, size_t size) > meta_blk_found_cb_t; typedef std::string meta_sub_type; +typedef std::vector< meta_sub_type > meta_subtype_vec_t; typedef std::function< void(bool success) > meta_blk_recover_comp_cb_t; // recover complete subsystem callbacks; typedef std::map< uint64_t, meta_blk* > meta_blk_map_t; // blkid to meta_blk map; typedef std::map< uint64_t, meta_blk_ovf_hdr* > ovf_hdr_map_t; // ovf_blkid to ovf_blk_hdr map; typedef std::map< meta_sub_type, MetaSubRegInfo > client_info_map_t; // client information map; +typedef std::unordered_map< meta_sub_type, std::vector< meta_sub_type > > subtype_graph_t; class MetablkMetrics : public sisl::MetricsGroupWrapper { public: @@ -87,6 +91,7 @@ class MetaBlkService { MetablkMetrics m_metrics; bool m_inited{false}; std::unique_ptr< meta_vdev_context > m_meta_vdev_context; + subtype_graph_t m_dep_topo_graph; public: MetaBlkService(const char* name = "MetaBlkStore"); @@ -121,7 +126,7 @@ class MetaBlkService { * @param cb : subsystem cb */ void register_handler(meta_sub_type type, const meta_blk_found_cb_t& cb, const meta_blk_recover_comp_cb_t& comp_cb, - bool do_crc = true); + bool do_crc = true, std::optional< meta_subtype_vec_t > deps = std::nullopt); /** * @brief @@ -357,6 +362,9 @@ class MetaBlkService { bool scan_and_load_meta_blks(meta_blk_map_t& meta_blks, ovf_hdr_map_t& ovf_blk_hdrs, BlkId* last_mblk_id, client_info_map_t& sub_info); + void recover_meta_block(meta_blk* meta_block); + void recover_meta_sub_type(bool do_comp_cb, const meta_sub_type&); + public: bool verify_metablk_store(); diff --git a/src/lib/blkdata_svc/blk_read_tracker.hpp b/src/lib/blkdata_svc/blk_read_tracker.hpp index ec62d77c4..771cb4f2d 100644 --- a/src/lib/blkdata_svc/blk_read_tracker.hpp +++ b/src/lib/blkdata_svc/blk_read_tracker.hpp @@ -143,7 +143,7 @@ class BlkReadTracker { /** * @brief : decrease the reference count of the BlkId by 1 in this read tracker. * If the ref count drops to zero, it means no read is pending on this blkid and if there is a waiter on this blkid, - * callback should be trigggered and all entries associated with this blkid (there could be more than one + * callback should be triggered and all entries associated with this blkid (there could be more than one * sub_ranges) should be removed. * * @param blkid : blkid that is being dereferneced; diff --git a/src/lib/common/homestore_utils.cpp b/src/lib/common/homestore_utils.cpp index 43deb808b..34c814fb9 100644 --- a/src/lib/common/homestore_utils.cpp +++ b/src/lib/common/homestore_utils.cpp @@ -74,5 +74,41 @@ sisl::byte_array hs_utils::extract_byte_array(const sisl::byte_view& b, const bo return (is_aligned_needed) ? b.extract(alignment) : b.extract(0); }; +bool hs_utils::topological_sort(std::unordered_map< std::string, std::vector< std::string > >& DAG, + std::vector< std::string >& ordered_entries) { + std::unordered_map< std::string, int > in_degree; + std::queue< std::string > q; + + // Calculate in-degree of each vertex + for (const auto& [vertex, edges] : DAG) { + // we should make sure all the vertex in in_degree map; + // if vertex is not in the map, 0 will be assigned. + in_degree[vertex]; + for (const auto& edge : edges) { + in_degree[edge]++; + } + } + + // Add vertices with in-degree 0 to the queue + for (const auto& [vertex, degree] : in_degree) { + if (degree == 0) q.push(vertex); + } + + // Process vertices in the queue + while (!q.empty()) { + const auto vertex = q.front(); + q.pop(); + ordered_entries.push_back(vertex); + + for (const auto& edge : DAG[vertex]) { + in_degree[edge]--; + if (in_degree[edge] == 0) { q.push(edge); } + } + } + + // Check for cycle + return ordered_entries.size() != DAG.size(); +} + size_t hs_utils::m_btree_mempool_size; } // namespace homestore diff --git a/src/lib/common/homestore_utils.hpp b/src/lib/common/homestore_utils.hpp index 604ea673e..7de531806 100644 --- a/src/lib/common/homestore_utils.hpp +++ b/src/lib/common/homestore_utils.hpp @@ -43,5 +43,13 @@ class hs_utils { static sisl::byte_array make_byte_array(const uint64_t size, const bool is_aligned_needed, const sisl::buftag tag, const size_t alignment); static uuid_t gen_random_uuid(); + + /** + * @brief given a DAG graph , build the partial order sequence. + * + * @return true if the DAG has a circle ,or false if not. + */ + static bool topological_sort(std::unordered_map< std::string, std::vector< std::string > >& DAG, + std::vector< std::string >& ordered_entries); }; } // namespace homestore diff --git a/src/lib/homestore.cpp b/src/lib/homestore.cpp index e75bb2fbf..e8606e719 100644 --- a/src/lib/homestore.cpp +++ b/src/lib/homestore.cpp @@ -226,16 +226,16 @@ void HomeStore::do_start() { void HomeStore::init_done() { m_init_done = true; } void HomeStore::shutdown() { - if (!m_init_done) { + if (!m_init_done) { LOGWARN("Homestore shutdown is called before init is completed"); - return; + return; } - + LOGINFO("Homestore shutdown is started"); if (has_index_service()) { m_index_service->stop(); -// m_index_service.reset(); + // m_index_service.reset(); } if (has_log_service()) { diff --git a/src/lib/meta/meta_blk_service.cpp b/src/lib/meta/meta_blk_service.cpp index 510033dfa..ead1ca2b1 100644 --- a/src/lib/meta/meta_blk_service.cpp +++ b/src/lib/meta/meta_blk_service.cpp @@ -351,6 +351,7 @@ void MetaBlkService::deregister_handler(meta_sub_type type) { const auto it = m_sub_info.find(type); if (it != std::end(m_sub_info)) { m_sub_info.erase(it); + m_dep_topo_graph.erase(type); HS_LOG(INFO, metablk, "[type={}] deregistered Successfully", type); } else { HS_LOG(INFO, metablk, "[type={}] not found in registered list, no-op", type); @@ -358,7 +359,8 @@ void MetaBlkService::deregister_handler(meta_sub_type type) { } void MetaBlkService::register_handler(meta_sub_type type, const meta_blk_found_cb_t& cb, - const meta_blk_recover_comp_cb_t& comp_cb, bool do_crc) { + const meta_blk_recover_comp_cb_t& comp_cb, bool do_crc, + std::optional< meta_subtype_vec_t > deps) { std::lock_guard< decltype(m_meta_mtx) > lk(m_meta_mtx); HS_REL_ASSERT_LT(type.length(), MAX_SUBSYS_TYPE_LEN, "type len: {} should not exceed len: {}", type.length(), MAX_SUBSYS_TYPE_LEN); @@ -375,7 +377,13 @@ void MetaBlkService::register_handler(meta_sub_type type, const meta_blk_found_c m_sub_info[type].cb = cb; m_sub_info[type].comp_cb = comp_cb; m_sub_info[type].do_crc = do_crc ? 1 : 0; - HS_LOG(INFO, metablk, "[type={}] registered with do_crc: {}", type, do_crc); + if (deps.has_value()) { + m_sub_info[type].has_deps = true; + for (auto const& x : deps.value()) { + m_sub_info[x].has_deps = true; + m_dep_topo_graph[x].push_back(type); + } + } } void MetaBlkService::add_sub_sb(meta_sub_type type, const uint8_t* context_data, uint64_t sz, void*& cookie) { @@ -1074,84 +1082,103 @@ sisl::byte_array MetaBlkService::read_sub_sb_internal(const meta_blk* mblk) cons void MetaBlkService::recover(bool do_comp_cb) { // for each registered subsystem, look up in cache for their meta blks; std::lock_guard< decltype(m_shutdown_mtx) > lg{m_shutdown_mtx}; - for (auto& m : m_meta_blks) { - auto* mblk = m.second; - auto buf = read_sub_sb_internal(mblk); - - // found a meta blk and callback to sub system; - const auto itr = m_sub_info.find(mblk->hdr.h.type); - if (itr != std::end(m_sub_info)) { - // if subsystem registered crc protection, verify crc before sending to subsystem; - if (itr->second.do_crc) { - const auto crc = crc32_ieee(init_crc32, s_cast< const uint8_t* >(buf->bytes), mblk->hdr.h.context_sz); - - HS_REL_ASSERT_EQ(crc, uint32_cast(mblk->hdr.h.crc), - "[type={}], CRC mismatch: {}/{}, on mblk bid: {}, context_sz: {}", mblk->hdr.h.type, - crc, uint32_cast(mblk->hdr.h.crc), mblk->hdr.h.bid.to_string(), - uint64_cast(mblk->hdr.h.context_sz)); - } else { - HS_LOG(DEBUG, metablk, "[type={}] meta blk found with bypassing crc.", mblk->hdr.h.type); - } + meta_subtype_vec_t ordered_subtypes; - // send the callbck; - auto& cb = itr->second.cb; - if (cb) { // cb could be nullptr because client want to get its superblock via read api; - // decompress if necessary - if (mblk->hdr.h.compressed) { - // HS_DBG_ASSERT_GE(mblk->hdr.h.context_sz, META_BLK_CONTEXT_SZ); - // TO DO: Might need to address alignment based on data or fast type - auto decompressed_buf{hs_utils::make_byte_array(mblk->hdr.h.src_context_sz, true /* aligned */, - sisl::buftag::compression, align_size())}; - size_t decompressed_size = mblk->hdr.h.src_context_sz; - const auto ret{sisl::Compress::decompress(r_cast< const char* >(buf->bytes), - r_cast< char* >(decompressed_buf->bytes), - mblk->hdr.h.compressed_sz, &decompressed_size)}; - if (ret != 0) { - LOGERROR("[type={}], negative result: {} from decompress trying to decompress the " - "data. compressed_sz: {}, src_context_sz: {}", - mblk->hdr.h.type, ret, uint64_cast(mblk->hdr.h.compressed_sz), - uint64_cast(mblk->hdr.h.src_context_sz)); - HS_REL_ASSERT(false, "failed to decompress"); - } else { - // decompressed_size must equal to input sz before compress - HS_REL_ASSERT_EQ(uint64_cast(mblk->hdr.h.src_context_sz), - uint64_cast(decompressed_size)); /* since decompressed_size is >=0 it - is safe to cast to uint64_t */ - HS_LOG(DEBUG, metablk, - "[type={}] Successfully decompressed, compressed_sz: {}, src_context_sz: {}, " - "decompressed_size: {}", - mblk->hdr.h.type, uint64_cast(mblk->hdr.h.compressed_sz), - uint64_cast(mblk->hdr.h.src_context_sz), decompressed_size); - } + if (hs_utils::topological_sort(m_dep_topo_graph, ordered_subtypes)) { + throw homestore::homestore_exception( + "MetaBlkService has circular dependency, please check the dependency graph", homestore_error::init_failed); + } + + // all the subsystems are divided into two parts. + // for subsystems in ordered_subtypes, we need to recover in order. + for (const auto& subtype : ordered_subtypes) { + recover_meta_sub_type(do_comp_cb, subtype); + } + + // TODO: for independent subsystems, we can use concurrent recovery if necessary. + for (auto const& x : m_sub_info) { + auto& reg_info = x.second; + if (!reg_info.has_deps) { recover_meta_sub_type(do_comp_cb, x.first); } + } +} + +void MetaBlkService::recover_meta_sub_type(bool do_comp_cb, const meta_sub_type& sub_type) { + for (const auto& m : m_sub_info[sub_type].meta_bids) { + auto mblk = m_meta_blks[m]; + recover_meta_block(mblk); + } + + if (do_comp_cb && m_sub_info[sub_type].comp_cb) { + m_sub_info[sub_type].comp_cb(true); + HS_LOG(DEBUG, metablk, "[type={}] completion callback sent.", sub_type); + } +} + +void MetaBlkService::recover_meta_block(meta_blk* mblk) { + auto buf = read_sub_sb_internal(mblk); + // found a meta blk and callback to sub system; + const auto itr = m_sub_info.find(mblk->hdr.h.type); + if (itr != std::end(m_sub_info)) { + // if subsystem registered crc protection, verify crc before sending to subsystem; + if (itr->second.do_crc) { + const auto crc = crc32_ieee(init_crc32, s_cast< const uint8_t* >(buf->bytes), mblk->hdr.h.context_sz); + + HS_REL_ASSERT_EQ(crc, uint32_cast(mblk->hdr.h.crc), + "[type={}], CRC mismatch: {}/{}, on mblk bid: {}, context_sz: {}", mblk->hdr.h.type, crc, + uint32_cast(mblk->hdr.h.crc), mblk->hdr.h.bid.to_string(), + uint64_cast(mblk->hdr.h.context_sz)); + } else { + HS_LOG(DEBUG, metablk, "[type={}] meta blk found with bypassing crc.", mblk->hdr.h.type); + } - cb(mblk, decompressed_buf, mblk->hdr.h.src_context_sz); + // send the callbck; + auto& cb = itr->second.cb; + if (cb) { // cb could be nullptr because client want to get its superblock via read api; + // decompress if necessary + if (mblk->hdr.h.compressed) { + // HS_DBG_ASSERT_GE(mblk->hdr.h.context_sz, META_BLK_CONTEXT_SZ); + // TO DO: Might need to address alignment based on data or fast type + auto decompressed_buf{hs_utils::make_byte_array(mblk->hdr.h.src_context_sz, true /* aligned */, + sisl::buftag::compression, align_size())}; + size_t decompressed_size = mblk->hdr.h.src_context_sz; + const auto ret{sisl::Compress::decompress(r_cast< const char* >(buf->bytes), + r_cast< char* >(decompressed_buf->bytes), + mblk->hdr.h.compressed_sz, &decompressed_size)}; + if (ret != 0) { + LOGERROR("[type={}], negative result: {} from decompress trying to decompress the " + "data. compressed_sz: {}, src_context_sz: {}", + mblk->hdr.h.type, ret, uint64_cast(mblk->hdr.h.compressed_sz), + uint64_cast(mblk->hdr.h.src_context_sz)); + HS_REL_ASSERT(false, "failed to decompress"); } else { - // There is use case that cb could be nullptr because client want to get its superblock via - // read api; - cb(mblk, buf, mblk->hdr.h.context_sz); + // decompressed_size must equal to input sz before compress + HS_REL_ASSERT_EQ(uint64_cast(mblk->hdr.h.src_context_sz), + uint64_cast(decompressed_size)); /* since decompressed_size is >=0 it + is safe to cast to uint64_t */ + HS_LOG(DEBUG, metablk, + "[type={}] Successfully decompressed, compressed_sz: {}, src_context_sz: {}, " + "decompressed_size: {}", + mblk->hdr.h.type, uint64_cast(mblk->hdr.h.compressed_sz), + uint64_cast(mblk->hdr.h.src_context_sz), decompressed_size); } - HS_LOG(DEBUG, metablk, "[type={}] meta blk sent with size: {}.", mblk->hdr.h.type, - uint64_cast(mblk->hdr.h.context_sz)); + cb(mblk, decompressed_buf, mblk->hdr.h.src_context_sz); + } else { + // There is use case that cb could be nullptr because client want to get its superblock via + // read api; + cb(mblk, buf, mblk->hdr.h.context_sz); } - } else { - HS_LOG(DEBUG, metablk, "[type={}], unregistered client found. "); + + HS_LOG(DEBUG, metablk, "[type={}] meta blk sent with size: {}.", mblk->hdr.h.type, + uint64_cast(mblk->hdr.h.context_sz)); + } + } else { + HS_LOG(DEBUG, metablk, "[type={}], unregistered client found. "); #if 0 // should never arrive here since we do assert on type before write to disk; HS_LOG_ASSERT( false, "[type={}] not registered for mblk found on disk. Skip this meta blk. ", mblk->hdr.h.type); #endif - } - } - - if (do_comp_cb) { - // for each registered subsystem, do recovery complete callback; - for (auto& sub : m_sub_info) { - if (sub.second.comp_cb) { - sub.second.comp_cb(true); - HS_LOG(DEBUG, metablk, "[type={}] completion callback sent.", sub.first); - } - } } } diff --git a/src/lib/meta/meta_sb.hpp b/src/lib/meta/meta_sb.hpp index 9eda9b017..e392d614d 100644 --- a/src/lib/meta/meta_sb.hpp +++ b/src/lib/meta/meta_sb.hpp @@ -91,6 +91,7 @@ struct MetaSubRegInfo { std::set< uint64_t > meta_bids; // meta blk id meta_blk_found_cb_t cb{nullptr}; meta_blk_recover_comp_cb_t comp_cb{nullptr}; + bool has_deps{false}; }; // meta blk super block put as 1st block in the block chain; diff --git a/src/tests/test_meta_blk_mgr.cpp b/src/tests/test_meta_blk_mgr.cpp index 1e4c211f0..ea1445c43 100644 --- a/src/tests/test_meta_blk_mgr.cpp +++ b/src/tests/test_meta_blk_mgr.cpp @@ -27,7 +27,6 @@ #include #include #include - #include #include #include @@ -86,6 +85,9 @@ class VMetaBlkMgrTest : public ::testing::Test { std::string mtype; Clock::time_point m_start_time; + std::vector< meta_sub_type > actual_cb_order; + std::vector< meta_sub_type > actual_on_complete_cb_order; + std::vector< void* > cookies; VMetaBlkMgrTest() = default; VMetaBlkMgrTest(const VMetaBlkMgrTest&) = delete; @@ -365,7 +367,7 @@ class VMetaBlkMgrTest : public ::testing::Test { iomanager.iobuf_free(buf); } else { if (unaligned_addr) { - delete[] (buf - unaligned_shift); + delete[](buf - unaligned_shift); } else { delete[] buf; } @@ -460,6 +462,16 @@ class VMetaBlkMgrTest : public ::testing::Test { m_mbm->recover(false); } + void recover_with_on_complete() { + // TODO: This scan_blks and recover should be replaced with actual TestHelper::start_homestore with restart + // on. That way, we don't need to simulate all these calls here + // do recover and callbacks will be triggered; + m_cb_blks.clear(); + hs()->cp_mgr().shutdown(); + hs()->cp_mgr().start(false /* first_time_boot */); + m_mbm->recover(true); + } + void validate() { // verify received blks via callbaks are all good; verify_cb_blks(); @@ -553,6 +565,92 @@ class VMetaBlkMgrTest : public ::testing::Test { [this](bool success) { HS_DBG_ASSERT_EQ(success, true); }); } + void register_client_inlcuding_dependencies() { + m_mbm = &(meta_service()); + m_total_wrt_sz = m_mbm->used_size(); + + HS_REL_ASSERT_EQ(m_mbm->total_size() - m_total_wrt_sz, m_mbm->available_blks() * m_mbm->block_size()); + + m_mbm->deregister_handler(mtype); + + /* + we have a DAG to simulate dependencies like this: + A + / \ + B C + / \ \ + D E F + */ + + // register with dependencies + m_mbm->register_handler( + "A", + [this](meta_blk* mblk, sisl::byte_view buf, size_t size) { + meta_sub_type subType(mblk->hdr.h.type); + actual_cb_order.push_back(subType); + }, + [this](bool success) { actual_on_complete_cb_order.push_back("A"); }, false, + std::optional< meta_subtype_vec_t >({"B", "C"})); + + m_mbm->register_handler( + "B", + [this](meta_blk* mblk, sisl::byte_view buf, size_t size) { + meta_sub_type subType(mblk->hdr.h.type); + actual_cb_order.push_back(subType); + }, + [this](bool success) { actual_on_complete_cb_order.push_back("B"); }, false, + std::optional< meta_subtype_vec_t >({"D", "E"})); + + m_mbm->register_handler( + "C", + [this](meta_blk* mblk, sisl::byte_view buf, size_t size) { + meta_sub_type subType(mblk->hdr.h.type); + actual_cb_order.push_back(subType); + }, + [this](bool success) { actual_on_complete_cb_order.push_back("C"); }, false, + std::optional< meta_subtype_vec_t >({"F"})); + + m_mbm->register_handler( + "D", + [this](meta_blk* mblk, sisl::byte_view buf, size_t size) { + meta_sub_type subType(mblk->hdr.h.type); + actual_cb_order.push_back(subType); + }, + [this](bool success) { actual_on_complete_cb_order.push_back("D"); }, false); + m_mbm->register_handler( + "E", + [this](meta_blk* mblk, sisl::byte_view buf, size_t size) { + meta_sub_type subType(mblk->hdr.h.type); + actual_cb_order.push_back(subType); + }, + [this](bool success) { actual_on_complete_cb_order.push_back("E"); }, false); + m_mbm->register_handler( + "F", + [this](meta_blk* mblk, sisl::byte_view buf, size_t size) { + meta_sub_type subType(mblk->hdr.h.type); + actual_cb_order.push_back(subType); + }, + [this](bool success) { actual_on_complete_cb_order.push_back("F"); }, false); + } + + void deregister_client_inlcuding_dependencies() { + m_mbm->deregister_handler("A"); + m_mbm->deregister_handler("B"); + m_mbm->deregister_handler("C"); + m_mbm->deregister_handler("D"); + m_mbm->deregister_handler("E"); + m_mbm->deregister_handler("F"); + m_mbm->register_handler( + mtype, + [this](meta_blk* mblk, sisl::byte_view buf, size_t size) { + if (mblk) { + std::unique_lock< std::mutex > lg{m_mtx}; + m_cb_blks[mblk->hdr.h.bid.to_integer()] = std::string{r_cast< const char* >(buf.bytes()), size}; + } + }, + [this](bool success) { HS_DBG_ASSERT_EQ(success, true); }); + } + #ifdef _PRERELEASE void set_flip_point(const std::string flip_name) { flip::FlipCondition null_cond; @@ -614,6 +712,70 @@ TEST_F(VMetaBlkMgrTest, single_read_test) { this->shutdown(); } +TEST_F(VMetaBlkMgrTest, random_dependency_test) { + reset_counters(); + m_start_time = Clock::now(); + this->register_client_inlcuding_dependencies(); + + // add sub super block out of order + uint8_t* buf = iomanager.iobuf_alloc(512, 1); + void* cookie{nullptr}; + for (int i = 0; i < 10; i++) { + m_mbm->add_sub_sb("E", buf, 1, cookie); + cookies.push_back(cookie); + m_mbm->add_sub_sb("B", buf, 1, cookie); + cookies.push_back(cookie); + m_mbm->add_sub_sb("A", buf, 1, cookie); + cookies.push_back(cookie); + m_mbm->add_sub_sb("F", buf, 1, cookie); + cookies.push_back(cookie); + m_mbm->add_sub_sb("C", buf, 1, cookie); + cookies.push_back(cookie); + m_mbm->add_sub_sb("D", buf, 1, cookie); + cookies.push_back(cookie); + } + + iomanager.iobuf_free(buf); + + // simulate reboot case that MetaBlkMgr will scan the disk for all the metablks that were written; + this->scan_blks(); + + this->recover_with_on_complete(); + + std::unordered_map< meta_sub_type, int > actual_first_cb_order_map; + std::unordered_map< meta_sub_type, int > actual_last_cb_order_map; + + // verify the order of callback + for (long unsigned int i = 0; i < actual_cb_order.size(); i++) { + meta_sub_type subType = actual_cb_order[i]; + actual_last_cb_order_map[subType] = i; + if (actual_first_cb_order_map.find(subType) == actual_first_cb_order_map.end()) { + actual_first_cb_order_map[subType] = i; + } + } + + EXPECT_TRUE(actual_last_cb_order_map["B"] < actual_first_cb_order_map["A"]); + EXPECT_TRUE(actual_last_cb_order_map["C"] < actual_first_cb_order_map["A"]); + EXPECT_TRUE(actual_last_cb_order_map["D"] < actual_first_cb_order_map["B"]); + EXPECT_TRUE(actual_last_cb_order_map["E"] < actual_first_cb_order_map["B"]); + EXPECT_TRUE(actual_last_cb_order_map["F"] < actual_first_cb_order_map["C"]); + + actual_first_cb_order_map.clear(); + + for (long unsigned int i = 0; i < actual_on_complete_cb_order.size(); i++) { + actual_first_cb_order_map[actual_on_complete_cb_order[i]] = i; + } + EXPECT_TRUE(actual_first_cb_order_map["B"] < actual_first_cb_order_map["A"]); + EXPECT_TRUE(actual_first_cb_order_map["C"] < actual_first_cb_order_map["A"]); + EXPECT_TRUE(actual_first_cb_order_map["D"] < actual_first_cb_order_map["B"]); + EXPECT_TRUE(actual_first_cb_order_map["E"] < actual_first_cb_order_map["B"]); + EXPECT_TRUE(actual_first_cb_order_map["F"] < actual_first_cb_order_map["C"]); + + this->deregister_client_inlcuding_dependencies(); + + this->shutdown(); +} + // 1. randome write, update, remove; // 2. recovery test and verify callback context data matches; TEST_F(VMetaBlkMgrTest, random_load_test) { @@ -637,8 +799,8 @@ TEST_F(VMetaBlkMgrTest, random_load_test) { #ifdef _PRERELEASE // release build doens't have flip point // // 1. Turn on flip to simulate fix is not there; -// 2. Write compressed then uncompressed to reproduce the issue which ends up writing bad data (hdr size mismatch) to -// disk, Change dynamic setting to skip hdr size check, because we've introduced bad data. +// 2. Write compressed then uncompressed to reproduce the issue which ends up writing bad data (hdr size mismatch) +// to disk, Change dynamic setting to skip hdr size check, because we've introduced bad data. // 3. Do a recover, verify no crash or assert should happen (need the code change to recover a bad data during // scan_meta_blks) and data can be fixed during recovery and send back to consumer; // 4. After recovery everything should be fine; @@ -677,8 +839,8 @@ TEST_F(VMetaBlkMgrTest, RecoveryFromBadData) { this->validate(); - // up to this point, we can not use the cached meta blk to keep doing update because the mblk in memory copy inside - // metablkstore are all freed; + // up to this point, we can not use the cached meta blk to keep doing update because the mblk in memory copy + // inside metablkstore are all freed; this->shutdown(); } @@ -731,7 +893,7 @@ SISL_OPTION_GROUP( (bitmap, "", "bitmap", "bitmap test", ::cxxopts::value< bool >()->default_value("false"), "true or false")); int main(int argc, char* argv[]) { - ::testing::GTEST_FLAG(filter) = "*random_load_test*"; + ::testing::GTEST_FLAG(filter) = "*random*"; ::testing::InitGoogleTest(&argc, argv); SISL_OPTIONS_LOAD(argc, argv, logging, test_meta_blk_mgr, iomgr, test_common_setup); sisl::logging::SetLogger("test_meta_blk_mgr");