Skip to content

Commit

Permalink
Remove group_id & fix comments
Browse files Browse the repository at this point in the history
  • Loading branch information
koujl committed Nov 27, 2024
1 parent 0d4333a commit 8d3cfb0
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 18 deletions.
5 changes: 2 additions & 3 deletions src/lib/homestore_backend/hs_homeobject.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -329,8 +329,7 @@ class HSHomeObject : public HomeObjectImpl {
constexpr static shard_id_t invalid_shard_id = 0;
constexpr static shard_id_t shard_list_end_marker = ULLONG_MAX;

SnapshotReceiveHandler(HSHomeObject& home_obj, homestore::group_id_t group_id,
shared< homestore::ReplDev > repl_dev);
SnapshotReceiveHandler(HSHomeObject& home_obj, shared< homestore::ReplDev > repl_dev);

void process_pg_snapshot_data(ResyncPGMetaData const& pg_meta);
int process_shard_snapshot_data(ResyncShardMetaData const& shard_meta);
Expand All @@ -349,12 +348,12 @@ class HSHomeObject : public HomeObjectImpl {
std::vector< shard_id_t > shard_list;
const int64_t snp_lsn;
const pg_id_t pg_id;
shared< BlobIndexTable > index_table;

SnapshotContext(int64_t lsn, pg_id_t pg_id) : snp_lsn{lsn}, pg_id{pg_id} {}
};

HSHomeObject& home_obj_;
const homestore::group_id_t group_id_;
const shared< homestore::ReplDev > repl_dev_;

std::unique_ptr< SnapshotContext > ctx_;
Expand Down
4 changes: 1 addition & 3 deletions src/lib/homestore_backend/replication_state_machine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -269,9 +269,7 @@ void ReplicationStateMachine::write_snapshot_data(std::shared_ptr< homestore::sn
RELEASE_ASSERT(context != nullptr, "Context null");
RELEASE_ASSERT(snp_data != nullptr, "Snapshot data null");
auto r_dev = repl_dev();
if (!m_snp_rcv_handler) {
std::make_unique< HSHomeObject::SnapshotReceiveHandler >(*home_object_, r_dev->group_id(), r_dev);
}
if (!m_snp_rcv_handler) { std::make_unique< HSHomeObject::SnapshotReceiveHandler >(*home_object_, r_dev); }

auto s = dynamic_pointer_cast< homestore::nuraft_snapshot_context >(context)->nuraft_snapshot();
auto obj_id = objId(static_cast< snp_obj_id_t >(snp_data->offset));
Expand Down
15 changes: 7 additions & 8 deletions src/lib/homestore_backend/snapshot_receive_handler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
#include <homestore/blkdata_service.hpp>

namespace homeobject {
HSHomeObject::SnapshotReceiveHandler::SnapshotReceiveHandler(HSHomeObject& home_obj, homestore::group_id_t group_id,
HSHomeObject::SnapshotReceiveHandler::SnapshotReceiveHandler(HSHomeObject& home_obj,
shared< homestore::ReplDev > repl_dev) :
home_obj_(home_obj), group_id_(group_id), repl_dev_(std::move(repl_dev)) {}
home_obj_(home_obj), repl_dev_(std::move(repl_dev)) {}

void HSHomeObject::SnapshotReceiveHandler::process_pg_snapshot_data(ResyncPGMetaData const& pg_meta) {
LOGI("process_pg_snapshot_data pg_id:{}", pg_meta.pg_id());
Expand Down Expand Up @@ -103,16 +103,15 @@ int HSHomeObject::SnapshotReceiveHandler::process_blobs_snapshot_data(ResyncBlob
continue;
}

// Check duplication to avoid reprocessing
shared< BlobIndexTable > index_table;
{
// Check duplication to avoid reprocessing. This may happen on resent blob batches.
if (!ctx_->index_table) {
std::shared_lock lock_guard(home_obj_._pg_lock);
auto iter = home_obj_._pg_map.find(ctx_->pg_id);
RELEASE_ASSERT(iter != home_obj_._pg_map.end(), "PG not found");
index_table = dynamic_cast< HS_PG* >(iter->second.get())->index_table_;
ctx_->index_table = dynamic_cast< HS_PG* >(iter->second.get())->index_table_;
}
RELEASE_ASSERT(index_table != nullptr, "Index table instance null");
if (home_obj_.get_blob_from_index_table(index_table, ctx_->shard_cursor, blob->blob_id())) {
RELEASE_ASSERT(ctx_->index_table != nullptr, "Index table instance null");
if (home_obj_.get_blob_from_index_table(ctx_->index_table, ctx_->shard_cursor, blob->blob_id())) {
LOGD("Skip already persisted blob_id:{}", blob->blob_id());
continue;
}
Expand Down
8 changes: 4 additions & 4 deletions src/lib/homestore_backend/tests/hs_blob_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,7 @@ TEST_F(HomeObjectFixture, PGBlobIterator) {
}

TEST_F(HomeObjectFixture, SnapshotReceiveHandler) {
// TODO: add filps to test corrupted data
// MUST TEST WITH replica=1
constexpr uint64_t snp_lsn = 1;
constexpr uint64_t num_shards_per_pg = 3;
Expand All @@ -302,8 +303,7 @@ TEST_F(HomeObjectFixture, SnapshotReceiveHandler) {
auto r_dev = homestore::HomeStore::instance()->repl_service().get_repl_dev(stats.replica_set_uuid);
ASSERT_TRUE(r_dev.hasValue());

auto handler = std::make_unique< homeobject::HSHomeObject::SnapshotReceiveHandler >(
*_obj_inst, stats.replica_set_uuid, r_dev.value());
auto handler = std::make_unique< homeobject::HSHomeObject::SnapshotReceiveHandler >(*_obj_inst, r_dev.value());
handler->reset_context(snp_lsn, pg_id);

// Step 1: Test write pg meta - cannot test full logic since the PG already exists
Expand Down Expand Up @@ -345,8 +345,8 @@ TEST_F(HomeObjectFixture, SnapshotReceiveHandler) {
shard.lsn = snp_lsn;

auto shard_entry =
CreateResyncShardMetaData(builder, shard.id, pg_id, uint8_t(shard.state), shard.lsn, shard.created_time,
shard.last_modified_time, shard.total_capacity_bytes, 0);
CreateResyncShardMetaData(builder, shard.id, pg_id, static_cast< uint8_t >(shard.state), shard.lsn,
shard.created_time, shard.last_modified_time, shard.total_capacity_bytes, 0);
builder.Finish(shard_entry);
auto shard_meta = GetResyncShardMetaData(builder.GetBufferPointer());
builder.Reset();
Expand Down

0 comments on commit 8d3cfb0

Please sign in to comment.