Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updated tests to use Helper #454

Merged
merged 9 commits into from
Jun 24, 2024
2 changes: 1 addition & 1 deletion conanfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

class HomestoreConan(ConanFile):
name = "homestore"
version = "6.4.20"
version = "6.4.21"
homepage = "https://github.com/eBay/Homestore"
description = "HomeStore Storage Engine"
topics = ("ebay", "nublox")
Expand Down
131 changes: 77 additions & 54 deletions src/tests/btree_helpers/btree_test_helper.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,60 +100,63 @@ struct BtreeTestHelper {
}
#endif
void preload(uint32_t preload_size) {
if (preload_size) {
const auto n_fibers = std::min(preload_size, (uint32_t)m_fibers.size());
const auto chunk_size = preload_size / n_fibers;
const auto last_chunk_size = preload_size % chunk_size ?: chunk_size;
auto test_count = n_fibers;

for (std::size_t i = 0; i < n_fibers; ++i) {
const auto start_range = i * chunk_size;
const auto end_range = start_range + ((i == n_fibers - 1) ? last_chunk_size : chunk_size);
auto fiber_id = i;
iomanager.run_on_forget(
m_fibers[i], [this, start_range, end_range, &test_count, fiber_id, preload_size]() {
double progress_interval =
(double)(end_range - start_range) / 20; // 5% of the total number of iterations
double progress_thresh = progress_interval; // threshold for progress interval
double elapsed_time, progress_percent, last_progress_time = 0;
auto m_start_time = Clock::now();

for (uint32_t i = start_range; i < end_range; i++) {
put(i, btree_put_type::INSERT);
if (fiber_id == 0) {
elapsed_time = get_elapsed_time_sec(m_start_time);
progress_percent = (double)(i - start_range) / (end_range - start_range) * 100;

// check progress every 5% of the total number of iterations or every 30 seconds
bool print_time = false;
if (i >= progress_thresh) {
progress_thresh += progress_interval;
print_time = true;
}
if (elapsed_time - last_progress_time > 30) {
last_progress_time = elapsed_time;
print_time = true;
}
if (print_time) {
LOGINFO("Progress: iterations completed ({:.2f}%)- Elapsed time: {:.0f} seconds- "
"populated entries: {} ({:.2f}%)",
progress_percent, elapsed_time, m_shadow_map.size(),
m_shadow_map.size() * 100.0 / preload_size);
}
}
if (preload_size == 0) {
LOGINFO("Preload Skipped");
return;
}

const auto n_fibers = std::min(preload_size, (uint32_t)m_fibers.size());
const auto chunk_size = preload_size / n_fibers;
const auto last_chunk_size = preload_size % chunk_size ?: chunk_size;
auto test_count = n_fibers;

for (std::size_t i = 0; i < n_fibers; ++i) {
const auto start_range = i * chunk_size;
const auto end_range = start_range + ((i == n_fibers - 1) ? last_chunk_size : chunk_size) - 1;
shosseinimotlagh marked this conversation as resolved.
Show resolved Hide resolved
auto fiber_id = i;
iomanager.run_on_forget(m_fibers[i], [this, start_range, end_range, &test_count, fiber_id, preload_size]() {
double progress_interval =
(double)(end_range - start_range) / 20; // 5% of the total number of iterations
double progress_thresh = progress_interval; // threshold for progress interval
double elapsed_time, progress_percent, last_progress_time = 0;
auto m_start_time = Clock::now();

for (uint32_t i = start_range; i < end_range; i++) {
put(i, btree_put_type::INSERT);
if (fiber_id == 0) {
elapsed_time = get_elapsed_time_sec(m_start_time);
progress_percent = (double)(i - start_range) / (end_range - start_range) * 100;

// check progress every 5% of the total number of iterations or every 30 seconds
bool print_time = false;
if (i >= progress_thresh) {
progress_thresh += progress_interval;
print_time = true;
}
if (elapsed_time - last_progress_time > 30) {
last_progress_time = elapsed_time;
print_time = true;
}
{
std::unique_lock lg(m_test_done_mtx);
if (--test_count == 0) { m_test_done_cv.notify_one(); }
if (print_time) {
LOGINFO("Progress: iterations completed ({:.2f}%)- Elapsed time: {:.0f} seconds- "
"populated entries: {} ({:.2f}%)",
progress_percent, elapsed_time, m_shadow_map.size(),
m_shadow_map.size() * 100.0 / preload_size);
}
});
}
}
}
{
std::unique_lock lg(m_test_done_mtx);
if (--test_count == 0) { m_test_done_cv.notify_one(); }
}
});
}

{
std::unique_lock< std::mutex > lk(m_test_done_mtx);
m_test_done_cv.wait(lk, [&]() { return test_count == 0; });
}
{
std::unique_lock< std::mutex > lk(m_test_done_mtx);
m_test_done_cv.wait(lk, [&]() { return test_count == 0; });
}

LOGINFO("Preload Done");
}

Expand All @@ -171,6 +174,18 @@ struct BtreeTestHelper {
do_put(start_k, btree_put_type::INSERT, V::generate_rand());
}

void force_upsert(uint64_t k) {
auto existing_v = std::make_unique< V >();
K key = K{k};
V value = V::generate_rand();
auto sreq = BtreeSinglePutRequest{&key, &value, btree_put_type::UPSERT, existing_v.get()};
sreq.enable_route_tracing();

auto const ret = m_bt->put(sreq);
ASSERT_EQ(ret, btree_status_t::success) << "Upsert key=" << k << " failed with error=" << enum_name(ret);
m_shadow_map.force_put(k, value);
}
shosseinimotlagh marked this conversation as resolved.
Show resolved Hide resolved

void range_put(uint32_t start_k, uint32_t end_k, V const& value, bool update) {
K start_key = K{start_k};
K end_key = K{end_k};
Expand Down Expand Up @@ -346,7 +361,16 @@ struct BtreeTestHelper {
}

void multi_op_execute(const std::vector< std::pair< std::string, int > >& op_list, bool skip_preload = false) {
if (!skip_preload) { preload(SISL_OPTIONS["preload_size"].as< uint32_t >()); }
if (!skip_preload) {
auto preload_size = SISL_OPTIONS["preload_size"].as< uint32_t >();
auto const num_entries = SISL_OPTIONS["num_entries"].as< uint32_t >();
if (preload_size > num_entries / 2) {
LOGWARN("Preload size={} is more than half of num_entries, setting preload_size to {}", preload_size,
num_entries / 2);
preload_size = num_entries / 2;
}
preload(preload_size);
hkadayam marked this conversation as resolved.
Show resolved Hide resolved
}
run_in_parallel(op_list);
}

Expand Down Expand Up @@ -388,13 +412,12 @@ struct BtreeTestHelper {
K key = K{k};
auto sreq = BtreeSinglePutRequest{&key, &value, put_type, existing_v.get()};
sreq.enable_route_tracing();
// bool done = (m_bt->put(sreq) == btree_status_t::success);
bool done = expect_success ? (m_bt->put(sreq) == btree_status_t::success)
: m_bt->put(sreq) == btree_status_t::put_failed;

if (put_type == btree_put_type::INSERT) {
ASSERT_EQ(done, !m_shadow_map.exists(key));
} else {
} else if (put_type == btree_put_type::UPDATE) {
ASSERT_EQ(done, m_shadow_map.exists(key));
}
if (expect_success) { m_shadow_map.put_and_check(key, value, *existing_v, done); }
Expand Down Expand Up @@ -488,7 +511,7 @@ struct BtreeTestHelper {
LOGINFO("ALL parallel jobs joined");
}

std::vector< std::pair< std::string, int > > build_op_list(std::vector< std::string >& input_ops) {
std::vector< std::pair< std::string, int > > build_op_list(std::vector< std::string > const& input_ops) {
std::vector< std::pair< std::string, int > > ops;
int total = std::accumulate(input_ops.begin(), input_ops.end(), 0, [](int sum, const auto& str) {
std::vector< std::string > tokens;
Expand Down
50 changes: 45 additions & 5 deletions src/tests/btree_helpers/shadow_map.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,12 @@ class ShadowMap {
private:
std::map< K, V > m_map;
RangeScheduler m_range_scheduler;
uint32_t m_max_keys;
using mutex = iomgr::FiberManagerLib::shared_mutex;
mutex m_mutex;

public:
ShadowMap(uint32_t num_keys) : m_range_scheduler(num_keys) {}
ShadowMap(uint32_t num_keys) : m_range_scheduler(num_keys), m_max_keys{num_keys} {}

void put_and_check(const K& key, const V& val, const V& old_val, bool expected_success) {
std::lock_guard lock{m_mutex};
Expand All @@ -24,6 +25,12 @@ class ShadowMap {
m_range_scheduler.put_key(key.key());
}

void force_put(const K& key, const V& val) {
std::lock_guard lock{m_mutex};
m_map.insert_or_assign(key, val);
m_range_scheduler.put_key(key.key());
}
shosseinimotlagh marked this conversation as resolved.
Show resolved Hide resolved

void range_upsert(uint64_t start_k, uint32_t count, const V& val) {
std::lock_guard lock{m_mutex};
for (uint32_t i{0}; i < count; ++i) {
Expand Down Expand Up @@ -58,6 +65,8 @@ class ShadowMap {
return std::pair(start_it->first, it->first);
}

uint32_t max_keys() const { return m_max_keys; }

bool exists(const K& key) const {
std::lock_guard lock{m_mutex};
return m_map.find(key) != m_map.end();
Expand Down Expand Up @@ -128,6 +137,38 @@ class ShadowMap {
m_range_scheduler.remove_keys(start_key.key(), end_key.key());
}

std::vector< std::pair< K, bool > > diff(ShadowMap< K, V > const& other) {
auto it1 = m_map.begin();
auto it2 = other.m_map.begin();
std::vector< std::pair< K, bool > > ret_diff;

while ((it1 != m_map.end()) && (it2 != m_map.end())) {
auto const x = it1->first.compare(it2->first);
if (x == 0) {
++it1;
++it2;
} else if (x < 0) {
// Has in current map, add it to addition
ret_diff.emplace_back(it1->first, true /* addition */);
++it1;
} else {
ret_diff.emplace_back(it2->first, false /* addition */);
++it2;
}
}

while (it1 != m_map.end()) {
ret_diff.emplace_back(it1->first, true /* addition */);
++it1;
}

while (it2 != other.m_map.end()) {
ret_diff.emplace_back(it1->first, false /* addition */);
++it2;
}
return ret_diff;
}
shosseinimotlagh marked this conversation as resolved.
Show resolved Hide resolved

mutex& guard() { return m_mutex; }
std::map< K, V >& map() { return m_map; }
const std::map< K, V >& map_const() const { return m_map; }
Expand All @@ -144,12 +185,11 @@ class ShadowMap {
const int key_width = 20;

// Format the key-value pairs and insert them into the result string
ss << std::left << std::setw(key_width) << "KEY"
<< " "
<< "VaLUE" << '\n';
ss << std::left << std::setw(key_width) << "KEY" << " " << "VaLUE" << '\n';
foreach ([&](const auto& key, const auto& value) {
ss << std::left << std::setw(key_width) << key.to_string() << " " << value.to_string() << '\n';
});
})
;
result = ss.str();
return result;
}
Expand Down
35 changes: 17 additions & 18 deletions src/tests/index_btree_benchmark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

using namespace homestore;

#define INDEX_BETREE_BENCHMARK(BTREE_TYPE) \
#define INDEX_BTREE_BENCHMARK(BTREE_TYPE) \
shosseinimotlagh marked this conversation as resolved.
Show resolved Hide resolved
BENCHMARK(run_benchmark< BTREE_TYPE >) \
->Setup(BM_Setup< BTREE_TYPE >) \
->Teardown(BM_Teardown< BTREE_TYPE >) \
Expand All @@ -38,12 +38,9 @@ using namespace homestore;
->Name(#BTREE_TYPE);

// this is used to splite the setup and teardown from the benchmark to get a more accurate result
void* globle_helper{nullptr};

#define GET_BENCHMARK_HELPER(BTREE_TYPE) static_cast< IndexBtreeBenchmark< BTREE_TYPE >* >(globle_helper)
void* g_btree_helper{nullptr};

SISL_LOGGING_INIT(HOMESTORE_LOG_MODS)
std::vector< std::string > test_common::HSTestHelper::s_dev_names;
SISL_OPTIONS_ENABLE(logging, index_btree_benchmark, iomgr, test_common_setup)

SISL_OPTION_GROUP(index_btree_benchmark,
Expand All @@ -69,8 +66,8 @@ struct IndexBtreeBenchmark : public BtreeTestHelper< TestType > {
~IndexBtreeBenchmark() { TearDown(); }

void SetUp() {
test_common::HSTestHelper::start_homestore(
"index_btree_benchmark", {{HS_SERVICE::META, {.size_pct = 10.0}}, {HS_SERVICE::INDEX, {.size_pct = 70.0}}});
m_helper.start_homestore("index_btree_benchmark",
{{HS_SERVICE::META, {.size_pct = 10.0}}, {HS_SERVICE::INDEX, {.size_pct = 70.0}}});

this->m_cfg = BtreeConfig(hs()->index_service().node_size());
this->m_is_multi_threaded = true;
Expand All @@ -87,30 +84,32 @@ struct IndexBtreeBenchmark : public BtreeTestHelper< TestType > {

void TearDown() {
BtreeTestHelper< TestType >::TearDown();
test_common::HSTestHelper::shutdown_homestore();
m_helper.shutdown_homestore();
}

void run_benchmark() { this->run_in_parallel(m_op_list); }

private:
test_common::HSTestHelper m_helper;
std::vector< std::pair< std::string, int > > m_op_list;
};

template < class BenchmarkType >
void BM_Setup(const benchmark::State& state) {
globle_helper = new IndexBtreeBenchmark< BenchmarkType >();
auto helper = GET_BENCHMARK_HELPER(BenchmarkType);
g_btree_helper = new IndexBtreeBenchmark< BenchmarkType >();
auto helper = s_cast< IndexBtreeBenchmark< BenchmarkType >* >(g_btree_helper);
helper->preload(SISL_OPTIONS["preload_size"].as< uint32_t >());
}

template < class BenchmarkType >
void BM_Teardown(const benchmark::State& state) {
delete GET_BENCHMARK_HELPER(BenchmarkType);
auto helper = s_cast< IndexBtreeBenchmark< BenchmarkType >* >(g_btree_helper);
delete helper;
}

template < class BenchmarkType >
void add_custom_counter(benchmark::State& state) {
auto helper = GET_BENCHMARK_HELPER(BenchmarkType);
auto helper = s_cast< IndexBtreeBenchmark< BenchmarkType >* >(g_btree_helper);
auto totol_ops = helper->get_op_num();
state.counters["thread_num"] = SISL_OPTIONS["num_threads"].as< uint32_t >();
state.counters["fiber_num"] = SISL_OPTIONS["num_fibers"].as< uint32_t >();
Expand All @@ -122,18 +121,18 @@ void add_custom_counter(benchmark::State& state) {

template < class BenchmarkType >
void run_benchmark(benchmark::State& state) {
auto helper = GET_BENCHMARK_HELPER(BenchmarkType);
auto helper = s_cast< IndexBtreeBenchmark< BenchmarkType >* >(g_btree_helper);
for (auto _ : state) {
helper->run_benchmark();
}
add_custom_counter< BenchmarkType >(state);
}

INDEX_BETREE_BENCHMARK(FixedLenBtree)
INDEX_BETREE_BENCHMARK(VarKeySizeBtree)
INDEX_BETREE_BENCHMARK(VarValueSizeBtree)
INDEX_BETREE_BENCHMARK(VarObjSizeBtree)
//INDEX_BETREE_BENCHMARK(PrefixIntervalBtree)
INDEX_BTREE_BENCHMARK(FixedLenBtree)
INDEX_BTREE_BENCHMARK(VarKeySizeBtree)
INDEX_BTREE_BENCHMARK(VarValueSizeBtree)
INDEX_BTREE_BENCHMARK(VarObjSizeBtree)
// INDEX_BTREE_BENCHMARK(PrefixIntervalBtree)

int main(int argc, char** argv) {
SISL_OPTIONS_LOAD(argc, argv, logging, index_btree_benchmark, iomgr, test_common_setup);
Expand Down
Loading
Loading