Skip to content

Commit

Permalink
adapt raft repl dev (#132)
Browse files Browse the repository at this point in the history
 this PR using server_side instead of solo when initialize homestore to adpat raft repl dev. what is more:

 1 since raft repl dev does not support recovery, the PR disable recovery related test cases.
 2 since we need three instances when testing real 3-replica raft cases and that  needs extra works, this PR just using raft group with only one replica to run all the test cases. we can  it back to 3 replicas after we implement real 3-replica test framework
  • Loading branch information
JacksonYao287 authored Jan 18, 2024
1 parent 53a412c commit c3a1459
Show file tree
Hide file tree
Showing 10 changed files with 34 additions and 19 deletions.
3 changes: 1 addition & 2 deletions conanfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@

class HomeObjectConan(ConanFile):
name = "homeobject"
version = "1.0.3"

version = "1.0.4"
homepage = "https://github.com/eBay/HomeObject"
description = "Blob Store built on HomeReplication"
topics = ("ebay")
Expand Down
5 changes: 4 additions & 1 deletion src/lib/homestore_backend/hs_homeobject.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "hs_hmobj_cp.hpp"
#include "replication_state_machine.hpp"

const string uri_prefix{"http://"};

namespace homeobject {

Expand Down Expand Up @@ -63,6 +64,8 @@ class HSReplApplication : public homestore::ReplApplication {

std::pair< std::string, uint16_t > lookup_peer(homestore::replica_id_t uuid) const override {
auto endpoint = _ho_application->lookup_peer(uuid);
// for folly::uri to parse correctly, we need to add "http://" prefix
endpoint = uri_prefix + endpoint;
std::pair< std::string, uint16_t > host_port;
try {
folly::Uri uri(endpoint);
Expand Down Expand Up @@ -108,7 +111,7 @@ void HSHomeObject::init_homestore() {

chunk_selector_ = std::make_shared< HeapChunkSelector >();
using namespace homestore;
auto repl_app = std::make_shared< HSReplApplication >(repl_impl_type::solo, false, this, app);
auto repl_app = std::make_shared< HSReplApplication >(repl_impl_type::server_side, false, this, app);
bool need_format = HomeStore::instance()
->with_index_service(std::make_unique< BlobIndexServiceCallbacks >(this))
.with_repl_data_service(repl_app, chunk_selector_)
Expand Down
3 changes: 2 additions & 1 deletion src/lib/homestore_backend/hs_pg_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@ PGManager::NullAsyncResult HSHomeObject::_create_pg(PGInfo&& pg_info, std::set<
pg_info.replica_set_uuid = boost::uuids::random_generator()();
return hs_repl_service()
.create_repl_dev(pg_info.replica_set_uuid, peers)
.deferValue([this, pg_info = std::move(pg_info)](auto&& v) mutable -> PGManager::NullResult {
.via(executor_)
.thenValue([this, pg_info = std::move(pg_info)](auto&& v) mutable -> PGManager::NullResult {
if (v.hasError()) { return folly::makeUnexpected(toPgError(v.error())); }

// TODO create index table during create shard.
Expand Down
9 changes: 6 additions & 3 deletions src/lib/homestore_backend/tests/homeobj_cp_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,13 @@ TEST_F(HomeObjectFixture, HSHomeObjectCPTestBasic) {
// Step-3: trigger a cp;
trigger_cp(true /* wait */);

_obj_inst.reset();
// TODO:enable this after we have recovery ability for raft repl dev
/*
_obj_inst.reset();
// Step-4: re-create the homeobject and pg infos and shard infos will be recover automatically.
_obj_inst = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app));
// Step-4: re-create the homeobject and pg infos and shard infos will be recover automatically.
_obj_inst = homeobject::init_homeobject(std::weak_ptr< homeobject::HomeObjectApplication >(app));
*/
ho = dynamic_cast< homeobject::HSHomeObject* >(_obj_inst.get());

EXPECT_TRUE(ho->_pg_map.size() == 1);
Expand Down
7 changes: 6 additions & 1 deletion src/lib/homestore_backend/tests/homeobj_fixture.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,16 @@ class HomeObjectFixture : public ::testing::Test {
void create_pg(pg_id_t pg_id) {
auto info = homeobject::PGInfo(pg_id);
auto peer1 = _obj_inst->our_uuid();
info.members.insert(homeobject::PGMember{peer1, "peer1", 1});

// TODO:: add the following back when we have 3-replica raft test framework
/*
auto peer2 = boost::uuids::random_generator()();
auto peer3 = boost::uuids::random_generator()();
info.members.insert(homeobject::PGMember{peer1, "peer1", 1});
info.members.insert(homeobject::PGMember{peer2, "peer2", 0});
info.members.insert(homeobject::PGMember{peer3, "peer3", 0});
*/

auto p = _obj_inst->pg_manager()->create_pg(std::move(info)).get();
ASSERT_TRUE(!!p);
}
Expand Down
9 changes: 5 additions & 4 deletions src/lib/homestore_backend/tests/hs_blob_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ TEST_F(HomeObjectFixture, BasicPutGetDelBlobWRestart) {
trigger_cp(true /* wait */);

// Restart homeobject
restart();
// restart();

// Verify all get blobs after restart
verify_get_blob(blob_map);
Expand Down Expand Up @@ -101,7 +101,7 @@ TEST_F(HomeObjectFixture, BasicPutGetDelBlobWRestart) {
trigger_cp(true /* wait */);

// Restart homeobject
restart();
// restart();

// After restart, for all deleted blobs, get should fail
for (const auto& [id, blob] : blob_map) {
Expand Down Expand Up @@ -147,7 +147,7 @@ TEST_F(HomeObjectFixture, SealShardWithRestart) {
LOGINFO("Put blob {}", b.error());

// Restart homeobject
restart();
// restart();

// Verify shard is sealed.
s = _obj_inst->shard_manager()->get_shard(shard_id).get();
Expand Down Expand Up @@ -211,7 +211,8 @@ TEST_F(HomeObjectFixture, PGStatsTest) {
EXPECT_EQ(pg_stats.id, pg_id);
EXPECT_EQ(pg_stats.total_shards, 2);
EXPECT_EQ(pg_stats.open_shards, 1);
EXPECT_EQ(pg_stats.num_members, 3);
// TODO: EXPECT_EQ(pg_stats.num_members, 1) after having real 3-replica repl dev in test
EXPECT_EQ(pg_stats.num_members, 1);

auto stats = _obj_inst->get_stats();
LOGINFO("HomeObj stats: {}", stats.to_string());
Expand Down
11 changes: 7 additions & 4 deletions src/lib/homestore_backend/tests/hs_shard_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,12 @@ TEST_F(TestFixture, CreateMultiShards) {
TEST_F(TestFixture, CreateMultiShardsOnMultiPG) {
// create another PG;
auto peer1 = homeobj_->our_uuid();
auto peer2 = boost::uuids::random_generator()();
// auto peer2 = boost::uuids::random_generator()();

auto new_pg_id = static_cast< homeobject::pg_id_t >(_pg_id + 1);
auto info = homeobject::PGInfo(_pg_id + 1);
info.members.insert(homeobject::PGMember{peer1, "peer1", 1});
info.members.insert(homeobject::PGMember{peer2, "peer2", 0});
// info.members.insert(homeobject::PGMember{peer2, "peer2", 0});
EXPECT_TRUE(homeobj_->pg_manager()->create_pg(std::move(info)).get());

std::vector< homeobject::pg_id_t > pgs{_pg_id, new_pg_id};
Expand Down Expand Up @@ -146,6 +146,8 @@ class ShardManagerTestingRecovery : public ::testing::Test {
std::shared_ptr< FixtureApp > app;
};

// TODO: enable the following test case after we fix raft repl dev recovery issue.
/*
TEST_F(ShardManagerTestingRecovery, ShardManagerRecovery) {
// prepare the env first;
auto app_with_recovery = dp_cast< FixtureAppWithRecovery >(app);
Expand All @@ -166,7 +168,7 @@ TEST_F(ShardManagerTestingRecovery, ShardManagerRecovery) {
auto info = homeobject::PGInfo(_pg_id);
info.members.insert(homeobject::PGMember{_peer1, "peer1", 1});
info.members.insert(homeobject::PGMember{_peer2, "peer2", 0});
// info.members.insert(homeobject::PGMember{_peer2, "peer2", 0});
EXPECT_TRUE(_home_object->pg_manager()->create_pg(std::move(info)).get());
// create one shard;
auto e = _home_object->shard_manager()->create_shard(_pg_id, Mi).get();
Expand Down Expand Up @@ -256,7 +258,7 @@ TEST_F(ShardManagerTestingRecovery, SealedShardRecovery) {
auto info = homeobject::PGInfo(_pg_id);
info.members.insert(homeobject::PGMember{_peer1, "peer1", 1});
info.members.insert(homeobject::PGMember{_peer2, "peer2", 0});
// info.members.insert(homeobject::PGMember{_peer2, "peer2", 0});
EXPECT_TRUE(_home_object->pg_manager()->create_pg(std::move(info)).get());
// create one shard;
auto e = _home_object->shard_manager()->create_shard(_pg_id, Mi).get();
Expand Down Expand Up @@ -301,3 +303,4 @@ TEST_F(ShardManagerTestingRecovery, SealedShardRecovery) {
_home_object.reset();
std::filesystem::remove(fpath);
}
*/
2 changes: 1 addition & 1 deletion src/lib/tests/PGManagerTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ TEST_F(TestFixture, CreatePgNotMember) {
TEST_F(TestFixture, CreateDuplicatePg) {
auto info = PGInfo(_pg_id);
info.members.insert(PGMember{boost::uuids::random_generator()(), "peer3", 6});
info.members.insert(PGMember{boost::uuids::random_generator()(), "peer4", 2});
// info.members.insert(PGMember{boost::uuids::random_generator()(), "peer4", 2});
EXPECT_EQ(homeobj_->pg_manager()->create_pg(std::move(info)).get().error(), PGError::INVALID_ARG);
}

Expand Down
2 changes: 1 addition & 1 deletion src/lib/tests/fixture_app.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ void TestFixture::SetUp() {

auto info = homeobject::PGInfo(_pg_id);
info.members.insert(homeobject::PGMember{_peer1, "peer1", 1});
info.members.insert(homeobject::PGMember{_peer2, "peer2", 0});
// info.members.insert(homeobject::PGMember{_peer2, "peer2", 0});

LOGDEBUG("Setup Pg");
EXPECT_TRUE(homeobj_->pg_manager()->create_pg(std::move(info)).get());
Expand Down
2 changes: 1 addition & 1 deletion src/lib/tests/fixture_app.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class FixtureApp : public homeobject::HomeObjectApplication {
}

homeobject::peer_id_t discover_svcid(std::optional< homeobject::peer_id_t > const& p) const override;
std::string lookup_peer(homeobject::peer_id_t const&) const override { return "test_fixture.com"; }
std::string lookup_peer(homeobject::peer_id_t const&) const override { return "127.0.0.1:4000"; }
};

class TestFixture : public ::testing::Test {
Expand Down

0 comments on commit c3a1459

Please sign in to comment.