diff --git a/.jenkins/Dockerfile b/.jenkins/Dockerfile index cc3556eea..b09e03467 100644 --- a/.jenkins/Dockerfile +++ b/.jenkins/Dockerfile @@ -15,8 +15,12 @@ RUN set -eux; \ COPY test_index_btree /usr/local/bin/test_index_btree COPY test_meta_blk_mgr /usr/local/bin/test_meta_blk_mgr COPY test_log_store /usr/local/bin/test_log_store -COPY btree_test.py /usr/local/bin/scripts/btree_test.py +COPY test_raft_repl_dev /usr/local/bin/test_raft_repl_dev +COPY test_solo_repl_dev /usr/local/bin/test_solo_repl_dev +COPY index_test.py /usr/local/bin/scripts/index_test.py COPY log_meta_test.py /usr/local/bin/scripts/log_meta_test.py +COPY long_running.py /usr/local/bin/scripts/long_running.py + EXPOSE 5000 # ########## ####### ############ diff --git a/.jenkins/jenkinsfile_nightly b/.jenkins/jenkinsfile_nightly index d00b7b722..1ee47ff38 100644 --- a/.jenkins/jenkinsfile_nightly +++ b/.jenkins/jenkinsfile_nightly @@ -44,8 +44,11 @@ pipeline { sh "find ${CONAN_USER_HOME} -type f -wholename '*bin/test_index_btree' -exec cp {} .jenkins/test_index_btree \\;" sh "find ${CONAN_USER_HOME} -type f -wholename '*bin/test_meta_blk_mgr' -exec cp {} .jenkins/test_meta_blk_mgr \\;" sh "find ${CONAN_USER_HOME} -type f -wholename '*bin/test_log_store' -exec cp {} .jenkins/test_log_store \\;" - sh "find ${CONAN_USER_HOME} -type f -wholename '*bin/scripts/btree_test.py' -exec install -Dm755 {} .jenkins/btree_test.py \\; " + sh "find ${CONAN_USER_HOME} -type f -wholename '*bin/test_raft_repl_dev' -exec cp {} .jenkins/test_raft_repl_dev \\;" + sh "find ${CONAN_USER_HOME} -type f -wholename '*bin/test_solo_repl_dev' -exec cp {} .jenkins/test_solo_repl_dev \\;" + sh "find ${CONAN_USER_HOME} -type f -wholename '*bin/scripts/index_test.py' -exec install -Dm755 {} .jenkins/index_test.py \\; " sh "find ${CONAN_USER_HOME} -type f -wholename '*bin/scripts/log_meta_test.py' -exec install -Dm755 {} .jenkins/log_meta_test.py \\; " + sh "find ${CONAN_USER_HOME} -type f -wholename '*bin/scripts/long_running.py' -exec install -Dm755 {} .jenkins/long_running.py \\; " } post { failure { diff --git a/conanfile.py b/conanfile.py index af88ccdcb..cb10618b9 100644 --- a/conanfile.py +++ b/conanfile.py @@ -5,7 +5,7 @@ class HomestoreConan(ConanFile): name = "homestore" - version = "6.4.9" + version = "6.4.11" homepage = "https://github.com/eBay/Homestore" description = "HomeStore Storage Engine" diff --git a/src/lib/meta/meta_blk_service.cpp b/src/lib/meta/meta_blk_service.cpp index 3946ea518..24fa8553c 100644 --- a/src/lib/meta/meta_blk_service.cpp +++ b/src/lib/meta/meta_blk_service.cpp @@ -196,9 +196,14 @@ void MetaBlkService::format_ssb() { // m_meta_lock should be while calling this function; void MetaBlkService::write_ssb() { // write current ovf blk to disk; - try { - m_sb_vdev->sync_write((const char*)m_ssb, block_size(), m_ssb->bid); - } catch (std::exception& e) { HS_REL_ASSERT(false, "exception happen during write {}", e.what()); } + auto error = m_sb_vdev->sync_write((const char*)m_ssb, block_size(), m_ssb->bid); + if (error.value()) { + // the offset and buffer length is printed in the error messages of iomgr. + // buf address here is to show whether the buffer is aligned or not. + // TODO: hanle this error properly + HS_REL_ASSERT(false, "error happens happen during write ssb: {}, buf address: {}", error.value(), + (const char*)m_ssb); + } LOGINFO("Successfully write m_ssb to disk: {}", m_ssb->to_string()); @@ -436,9 +441,14 @@ void MetaBlkService::write_ovf_blk_to_disk(meta_blk_ovf_hdr* ovf_hdr, const uint HS_DBG_ASSERT_LE(ovf_hdr->h.context_sz + offset, sz); // write current ovf blk to disk; - try { - m_sb_vdev->sync_write((const char*)ovf_hdr, block_size(), ovf_hdr->h.bid); - } catch (std::exception& e) { HS_REL_ASSERT(false, "exception happen during write {}", e.what()); } + auto error = m_sb_vdev->sync_write((const char*)ovf_hdr, block_size(), ovf_hdr->h.bid); + if (error.value()) { + // the offset and buffer length is printed in the error messages of iomgr. + // buf address here is to show whether the buffer is aligned or not. + // TODO: hanle this error properly + HS_REL_ASSERT(false, "error happens happen during write: {}, buf address: {}", error.value(), + (const char*)ovf_hdr); + } // NOTE: The start write pointer which is context data pointer plus offset must be dma boundary aligned // TO DO: Might need to differentiate based on data or fast type @@ -488,9 +498,14 @@ void MetaBlkService::write_ovf_blk_to_disk(meta_blk_ovf_hdr* ovf_hdr, const uint size_written += (ovf_hdr->h.context_sz - size_written); } - try { - m_sb_vdev->sync_write(r_cast< const char* >(cur_ptr), cur_size, data_bid[i]); - } catch (std::exception& e) { HS_REL_ASSERT(false, "exception happen during write {}", e.what()); } + auto error = m_sb_vdev->sync_write(r_cast< const char* >(cur_ptr), cur_size, data_bid[i]); + if (error.value()) { + // the offset and buffer length is printed in the error messages of iomgr. + // buf address here is to show whether the buffer is aligned or not. + // TODO: hanle this error properly + HS_REL_ASSERT(false, "error happens happen during write: {}, buf address: {}", error.value(), + r_cast< const char* >(cur_ptr)); + } } if (data_buf) { hs_utils::iobuf_free(data_buf, sisl::buftag::metablk); } @@ -501,9 +516,14 @@ void MetaBlkService::write_ovf_blk_to_disk(meta_blk_ovf_hdr* ovf_hdr, const uint void MetaBlkService::write_meta_blk_to_disk(meta_blk* mblk) { // write current ovf blk to disk; - try { - m_sb_vdev->sync_write((const char*)mblk, block_size(), mblk->hdr.h.bid); - } catch (std::exception& e) { HS_REL_ASSERT(false, "exception happen during write {}", e.what()); } + auto error = m_sb_vdev->sync_write((const char*)mblk, block_size(), mblk->hdr.h.bid); + if (error.value()) { + // the offset and buffer length is printed in the error messages of iomgr. + // buf address here is to show whether the buffer is aligned or not. + // TODO: hanle this error properly + HS_REL_ASSERT(false, "error happens happen during write_meta_blk_to_disk: {}, buf address: {}", error.value(), + (const char*)mblk); + } } // @@ -1122,11 +1142,8 @@ void MetaBlkService::recover_meta_block(meta_blk* mblk) { // if subsystem registered crc protection, verify crc before sending to subsystem; if (itr->second.do_crc) { const auto crc = crc32_ieee(init_crc32, buf->cbytes(), mblk->hdr.h.context_sz); - - HS_REL_ASSERT_EQ(crc, uint32_cast(mblk->hdr.h.crc), - "[type={}], CRC mismatch: {}/{}, on mblk bid: {}, context_sz: {}", mblk->hdr.h.type, crc, - uint32_cast(mblk->hdr.h.crc), mblk->hdr.h.bid.to_string(), - uint64_cast(mblk->hdr.h.context_sz)); + HS_REL_ASSERT_EQ(crc, uint32_cast(mblk->hdr.h.crc), "CRC mismatch: {}/{}, meta_blk details: {}", crc, + uint32_cast(mblk->hdr.h.crc), mblk->hdr.h.to_string()); } else { HS_LOG(DEBUG, metablk, "[type={}] meta blk found with bypassing crc.", mblk->hdr.h.type); } diff --git a/src/lib/meta/meta_sb.hpp b/src/lib/meta/meta_sb.hpp index e392d614d..e3d08faea 100644 --- a/src/lib/meta/meta_sb.hpp +++ b/src/lib/meta/meta_sb.hpp @@ -134,10 +134,11 @@ struct meta_blk_hdr_s { uint8_t pad[7]; std::string to_string() const { - return fmt::format("type: {}, version: {}, magic: {}, crc: {}, next_bid: {}, prev_bid: {}, ovf_bid: {}, " - "self_bid: {}, compressed: {}", - type, version, magic, crc, next_bid.to_string(), prev_bid.to_string(), ovf_bid.to_string(), - bid.to_string(), compressed); + return fmt::format( + "magic: {}, type: {}, version: {}, gen_cnt: {}, crc: {}, next_bid: {}, prev_bid: {}, " + "ovf_bid: {}, self_bid: {}, context_sz: {}, compressed_sz: {}, src_context_sz : {}, compressed: {} ", + magic, type, version, gen_cnt, crc, next_bid.to_string(), prev_bid.to_string(), ovf_bid.to_string(), + bid.to_string(), context_sz, compressed_sz, src_context_sz, compressed); } }; #pragma pack() diff --git a/src/tests/test_scripts/CMakeLists.txt b/src/tests/test_scripts/CMakeLists.txt index aafa6842d..9159ca57f 100644 --- a/src/tests/test_scripts/CMakeLists.txt +++ b/src/tests/test_scripts/CMakeLists.txt @@ -1,8 +1,9 @@ file(COPY vol_test.py DESTINATION ${CMAKE_BINARY_DIR}/bin/scripts) file(COPY home_blk_flip.py DESTINATION ${CMAKE_BINARY_DIR}/bin/scripts) file(COPY home_blk_test.py DESTINATION ${CMAKE_BINARY_DIR}/bin/scripts) -file(COPY btree_test.py DESTINATION ${CMAKE_BINARY_DIR}/bin/scripts) +file(COPY index_test.py DESTINATION ${CMAKE_BINARY_DIR}/bin/scripts) file(COPY log_meta_test.py DESTINATION ${CMAKE_BINARY_DIR}/bin/scripts) +file(COPY long_running.py DESTINATION ${CMAKE_BINARY_DIR}/bin/scripts) #add_test(NAME TestVolRecovery COMMAND ${CMAKE_BINARY_DIR}/bin/scripts/vol_test.py --test_suits=recovery --dirpath=${CMAKE_BINARY_DIR}/bin/) #SET_TESTS_PROPERTIES(TestVolRecovery PROPERTIES DEPENDS TestVol) diff --git a/src/tests/test_scripts/btree_test.py b/src/tests/test_scripts/btree_test.py deleted file mode 100755 index 1ffd41d52..000000000 --- a/src/tests/test_scripts/btree_test.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/env python3 -## @file btree_test.py - -import subprocess -import os -import sys -import getopt -import sys -import time -import random - -sys.stdout.flush() -import requests - -opts, args = getopt.getopt(sys.argv[1:], 'tdlme:', - ['test_suits=', 'dirpath=', 'op_list=', 'log_mods=', 'threads=', 'fibers=', 'preload_size=', - 'op_list=', 'num_entries=', 'num_iters=', 'dev_list=', 'run_time=']) -test_suits = "" -dirpath = "./" -op_list = "" -log_mods = "" -threads = " --num_threads=2" -fibers = " --num_fibers=2" -preload_size = " --preload_size=262144" # 256K -num_entries = " --num_entries=2097152" # 2M -num_iters = " --num_iters=100000000" -run_time = " --run_time=14400" # 4 hours -dev_list = "" - -for opt, arg in opts: - if opt in ('-t', '--test_suits'): - test_suits = arg - print("testing suits (%s)" % arg) - if opt in ('-d', '--dirpath'): - dirpath = arg - print("dir path (%s)" % arg) - if opt in ('-l', '--op_list'): - # --op_list='query:20 put:20 remove:20 range_update:20 range_remove=10' - op_list = arg - print("op list (%s)" % arg) - if opt in ('-m', '--log_mods'): - log_mods = arg - print("log_mods (%s)" % arg) - if opt in ('-f', '--fibers'): - fibers = " --num_fibers=" + arg - print("number of fibers per thread (%s)" % arg) - if opt in ('-p', '--preload_size'): - preload_size = " --preload_size=" + arg - print("preload_size = (%s)" % arg) - if opt in ('-t', '--threads'): - threads = " --num_threads=" + arg - print("number of threads (%s)" % arg) - if opt in ('-n', '--num_entries'): - num_entries = " --num_entries=" + arg - print("number of entries (%s)" % arg) - if opt in ('-i', '--num_iters'): - num_iters = " --num_iters=" + arg - print("number of iterations (%s)" % arg) - if opt in ('-r', '--run_time'): - run_time = " --run_time=" + arg - print("total run time (%s)" % arg) - if opt in ('-v', '--dev_list'): - dev_list = arg - print(("device list (%s)") % (arg)) - -operations = "" -if bool(op_list and op_list.strip()): - operations = ''.join([f' --operation_list={op}' for op in op_list.split()]) - -addln_opts = ' ' -if bool(dev_list and dev_list.strip()): - addln_opts += ' --device_list ' - addln_opts += dev_list - -btree_options = num_entries + num_iters + preload_size + fibers + threads + operations + addln_opts -class TestFailedError(Exception): - pass - -def long_runnig_index(type=0): - print("normal test started with (%s)" % (btree_options+ " " + run_time)) - # " --operation_list=query:20 --operation_list=put:20 --operation_list=remove:20" - cmd_opts = "--gtest_filter=BtreeConcurrentTest/" + str(type) +".ConcurrentAllOps --gtest_break_on_failure " + btree_options + " "+log_mods + run_time - subprocess.check_call(dirpath + "test_index_btree " + cmd_opts, stderr=subprocess.STDOUT, shell=True) - print("Long running test completed") - -def function_normal(runtime, cleanup_after_shutdown=False, init_device=False, type=0): - normal_options = "--gtest_filter=BtreeConcurrentTest/" + str(type) +".ConcurrentAllOps --gtest_break_on_failure " + btree_options + " " + log_mods + " --run_time " + str(runtime) - cmd_opts = normal_options + " --cleanup_after_shutdown=" + str(cleanup_after_shutdown) + " --init_device=" + str(init_device) - print("normal test started with (%s)" % cmd_opts) - try: - subprocess.check_call(dirpath + "test_index_btree " + - cmd_opts, stderr=subprocess.STDOUT, shell=True) - except subprocess.CalledProcessError as e: - print("UT failed: {}".format(e)) - raise TestFailedError("UT failed for type {}".format(type)) - -def function_crash(runtime, cleanup_after_shutdown=False, init_device=False, type=0): - normal_options =" --gtest_filter=BtreeConcurrentTest/" + str(type) +".ConcurrentAllOps --gtest_break_on_failure " + btree_options + " "+log_mods +" --enable_crash" - cmd_opts = normal_options +" --cleanup_after_shutdown=" + str(cleanup_after_shutdown) + " --init_device="+str(init_device) +" --run_time " + str(runtime) - subprocess.check_call(dirpath + "test_index_btree " + cmd_opts, stderr=subprocess.STDOUT, shell=True) - -def long_running_clean_shutdown(type=0): - normal_run_time = 1 * 1200 # 20 minutes - try: - function_normal(normal_run_time, False, True, type) - for i in range(1,5): - function_normal(normal_run_time, False, False, type) - print("Iteration {} completed successfully".format(i)) - print("All iterations completed successfully for type {}".format(type)) - except TestFailedError as e: - print("Test failed: {}".format(e)) - raise - -def crash_recovery_framework(): - total_run_time = 30 * 3600 - normal_run_time = 10 * 60 - crash_run_time = 10 * 60 - crash_execution_frequency = 0 - - function_normal(normal_run_time, False, True) - elapsed_time = normal_run_time - - while elapsed_time <= total_run_time: - start_time = time.time() - p = random.randint(0, 100) # some distribution - if p < crash_execution_frequency: - function_crash(crash_run_time, False, False) - else: - function_normal(min(normal_run_time, total_run_time - elapsed_time), False, False) - end_time = time.time() - elapsed_time += end_time - start_time - function_normal(0, True, False) #cleanup after shutdown - print("crash recovery test completed") - -def test_index_btree(): - while True: - try: - #TODO enable for other types when fix is available for varlen node types. - for type in range(1): - long_running_clean_shutdown(type) - print("long_running_clean_shutdown completed successfully for type {}".format(type)) - except: - print("Test failed: {}".format(e)) - break - - # wait for 1 minute before running again - time.sleep(60) - -def nightly(): - long_runnig_index(0) - long_running_clean_shutdown(0) - # long_running_clean_shutdown() - # crash_recovery_framework() - -# The name of the method to be called is the var test_suits -eval(f"{test_suits}()") diff --git a/src/tests/test_scripts/index_test.py b/src/tests/test_scripts/index_test.py new file mode 100755 index 000000000..fe31f9d23 --- /dev/null +++ b/src/tests/test_scripts/index_test.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +## @file index_test.py +import subprocess +import sys +import getopt +import argparse + + +class TestFailedError(Exception): + pass + + +def run_test(options, type): + cmd_opts = f"--gtest_filter=BtreeConcurrentTest/{type}.ConcurrentAllOps --gtest_break_on_failure --cleanup_after_shutdown={options['cleanup_after_shutdown']} --init_device={options['init_device']} --preload_size={options['preload_size']} {options['log_mods']} --run_time={options['run_time']} --num_iters={options['num_iters']} --num_entries={options['num_entries']} --num_threads={options['threads']} --num_fibers={options['fibers']} {options['dev_list']} {options['op_list']}" + # print(f"Running test with options: {cmd_opts}") + try: + subprocess.check_call(f"{options['dirpath']}test_index_btree {cmd_opts}", stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + print(f"Test failed: {e}") + raise TestFailedError(f"Test failed for type {type}") + print("Test completed") + + +def parse_arguments(): + # Create the parser + parser = argparse.ArgumentParser(description='Parse command line options.') + + # Add arguments with default values + parser.add_argument('--test_suits', help='Test suits to run', default='') + parser.add_argument('--dirpath', help='Directory path', default='bin/') + parser.add_argument('--op_list', help='List of operations', default='') + parser.add_argument('--log_mods', help='Log modules', default='') + parser.add_argument('--threads', help='Number of threads', type=int, default=2) + parser.add_argument('--fibers', help='Number of fibers', type=int, default=2) + parser.add_argument('--preload_size', help='Preload size', type=int, default=262144) # 256K + parser.add_argument('--num_entries', help='Number of entries', type=int, default=2097152) # 2M + parser.add_argument('--num_iters', help='Number of iterations', type=int, default=100000000) + parser.add_argument('--run_time', help='Run time in seconds', type=int, default=14400) # 4 hours + parser.add_argument('--dev_list', help='Device list', default='') + parser.add_argument('--cleanup_after_shutdown', help='Cleanup after shutdown', type=bool, default=False) + parser.add_argument('--init_device', help='Initialize device', type=bool, default=True) + + # Parse the known arguments and ignore any unknown arguments + args, unknown = parser.parse_known_args() + + if args.op_list: + args.op_list = ''.join([f' --operation_list={op}' for op in args.op_list.split()]) + if args.dev_list: + args.dev_list = f' --device_list={args.dev_list}' + + options = vars(args) + + return options + + +def long_runnig_index(options, type=0): + print("Long running test started") + print(f"options: {options}") + run_test(options, type) + print("Long running test completed") + + +def long_running_clean_shutdown(options, type=0): + print("Long running clean shutdown started") + options['run_time'] = int(options['run_time']) // 10 # 20 minutes + + try: + run_test(options, type) + options['init_device'] = False + print("Iteration 0 (aka init) completed successfully") + for i in range(1, 10): + run_test(options, type) + print("Iteration {} clean shutdown completed successfully".format(i)) + except TestFailedError as e: + print(f"Test failed: {e}") + raise + print("Long running clean shutdown completed") + + +def main(): + options = parse_arguments() + test_suite_name = options['test_suits'] + try: + # Retrieve the function based on the name provided in options['test_suits'] + test_suite_function = globals().get(test_suite_name) + if callable(test_suite_function): + print(f"Running {test_suite_name} with options: {options}") + test_suite_function(options) + else: + print(f"Test suite '{test_suite_name}' is not a callable function.") + except KeyError: + print(f"Test suite '{test_suite_name}' not found.") + + +def long_running(*args): + options = parse_arguments() + long_runnig_index(options) + long_running_clean_shutdown(options) + + +if __name__ == "__main__": + main() diff --git a/src/tests/test_scripts/log_meta_test.py b/src/tests/test_scripts/log_meta_test.py index 765680e31..f6f15a4f4 100755 --- a/src/tests/test_scripts/log_meta_test.py +++ b/src/tests/test_scripts/log_meta_test.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -## @file vol_test.py +## @file log_meta_test.py import subprocess import os @@ -7,91 +7,124 @@ import getopt import sys from multiprocessing import Process + sys.stdout.flush() from time import sleep -import requests +import argparse from threading import Thread -opts,args = getopt.getopt(sys.argv[1:], 'tdlme:', ['test_suits=', 'dirpath=', 'dev_list=', 'log_mods=', 'emulate_hdd=', 'http_port='] ) -test_suits = "" -dirpath = "./" -dev_list = "" -log_mods = "" -http_port = "" - -for opt,arg in opts: - if opt in ('-t', '--test_suits'): - test_suits = arg - print(("testing suits (%s)") % (arg)) - if opt in ('-d', '--dirpath'): - dirpath = arg - print(("dir path (%s)") % (arg)) - if opt in ('-l', '--dev_list'): - dev_list = arg - print(("device list (%s)") % (arg)) - if opt in ('-m', '--log_mods'): - log_mods = arg - print(("log_mods (%s)") % (arg)) - if opt in ('-p', '--http_port'): - http_port = " --http_port " + arg - print(("http_port (%s)") % (arg)) - -addln_opts = ' ' -if bool(dev_list and dev_list.strip()): - addln_opts += ' --device_list ' - addln_opts += dev_list - -if bool(log_mods and log_mods.strip()): - addln_opts += ' --log_mods ' - addln_opts += log_mods - -addln_opts += ' '.join(map(str, args)) - -print("addln_opts: " + addln_opts) - - -def meta_svc_nightly(): + +def parse_arguments(): + # Create the parser + parser = argparse.ArgumentParser(description='Process command line options.') + + # Define the expected command-line arguments + parser.add_argument('-t', '--test_suits', help='Test suits to run', default='') + parser.add_argument('-d', '--dirpath', help='Directory path', default='bin/') + parser.add_argument('-l', '--dev_list', help='Device list', default='') + parser.add_argument('-m', '--log_mods', help='Log modules', default='') + parser.add_argument('-e', '--emulate_hdd', help='Emulate HDD', default='') + parser.add_argument('-p', '--http_port', help='HTTP port', default=5000) + + # Parse the known arguments and ignore any unknown arguments + args, unknown = parser.parse_known_args() + + # Print the values if they are provided + if args.test_suits: + print(f"testing suits ({args.test_suits})") + if args.dirpath: + print(f"dir path ({args.dirpath})") + if args.dev_list: + print(f"device list ({args.dev_list})") + if args.log_mods: + print(f"log_mods ({args.log_mods})") + if args.http_port: + print(f"http_port ({args.http_port})") + + # Construct additional options string + addln_opts = '' + if args.dev_list: + addln_opts += f' --device_list {args.dev_list}' + if args.log_mods: + addln_opts += f' --log_mods {args.log_mods}' + if args.http_port: + addln_opts += f' --http_port {args.http_port}' + if args.dev_list: + args.dev_list = f' --device_list={args.dev_list}' + # Return the parsed arguments and additional options + return args, addln_opts + + +def meta_nightly(options, addln_opts): print("meta blk store test started") cmd_opts = "--gtest_filter=VMetaBlkMgrTest.CompressionBackoff" - subprocess.check_call(dirpath + "test_meta_blk_mgr " + cmd_opts + http_port + addln_opts, stderr=subprocess.STDOUT, shell=True) - + subprocess.check_call(options.dirpath + "test_meta_blk_mgr " + cmd_opts + addln_opts, stderr=subprocess.STDOUT, + shell=True) + cmd_opts = "--gtest_filter=VMetaBlkMgrTest.RecoveryFromBadData" - subprocess.check_call(dirpath + "test_meta_blk_mgr " + cmd_opts + http_port + addln_opts, stderr=subprocess.STDOUT, shell=True) + subprocess.check_call(options.dirpath + "test_meta_blk_mgr " + cmd_opts + addln_opts, stderr=subprocess.STDOUT, + shell=True) cmd_opts = "--gtest_filter=VMetaBlkMgrTest.min_drive_size_test" - subprocess.check_call(dirpath + "test_meta_blk_mgr " + cmd_opts + http_port + addln_opts, stderr=subprocess.STDOUT, shell=True) + subprocess.check_call(options.dirpath + "test_meta_blk_mgr " + cmd_opts + addln_opts, stderr=subprocess.STDOUT, + shell=True) cmd_opts = "--gtest_filter=VMetaBlkMgrTest.single_read_test" - subprocess.check_call(dirpath + "test_meta_blk_mgr " + cmd_opts + http_port + addln_opts, stderr=subprocess.STDOUT, shell=True) - + subprocess.check_call(options.dirpath + "test_meta_blk_mgr " + cmd_opts + addln_opts, stderr=subprocess.STDOUT, + shell=True) + cmd_opts = "--run_time=7200 --num_io=1000000" - subprocess.check_call(dirpath + "test_meta_blk_mgr " + cmd_opts + http_port + addln_opts, stderr=subprocess.STDOUT, shell=True) - + subprocess.check_call(options.dirpath + "test_meta_blk_mgr " + cmd_opts + addln_opts, stderr=subprocess.STDOUT, + shell=True) + cmd_opts = "--min_write_size=65536 --max_write_size=2097152 --run_time=14400 --num_io=1000000" - subprocess.check_call(dirpath + "test_meta_blk_mgr " + cmd_opts + http_port + addln_opts, stderr=subprocess.STDOUT, shell=True) - + subprocess.check_call(options.dirpath + "test_meta_blk_mgr " + cmd_opts + addln_opts, stderr=subprocess.STDOUT, + shell=True) + cmd_opts = "--min_write_size=10485760 --max_write_size=104857600 --bitmap=1" - subprocess.check_call(dirpath + "test_meta_blk_mgr " + cmd_opts + http_port + addln_opts, stderr=subprocess.STDOUT, shell=True) - - cmd_opts = "--gtest_filter=VMetaBlkMgrTest.write_to_full_test" # write to file instead of real disk to save time; - subprocess.check_call(dirpath + "test_meta_blk_mgr " + cmd_opts + http_port + addln_opts, stderr=subprocess.STDOUT, shell=True) + subprocess.check_call(options.dirpath + "test_meta_blk_mgr " + cmd_opts + addln_opts, stderr=subprocess.STDOUT, + shell=True) + + cmd_opts = "--gtest_filter=VMetaBlkMgrTest.write_to_full_test" # write to file instead of real disk to save time; + subprocess.check_call(options.dirpath + "test_meta_blk_mgr " + cmd_opts + addln_opts, stderr=subprocess.STDOUT, + shell=True) print("meta blk store test completed") -def logstore_nightly(): - print("log store test started") +def logstore_nightly(options, addln_opts): + print("log store test started") cmd_opts = "--iterations=10" - subprocess.check_call(dirpath + "test_log_store " + cmd_opts + http_port, stderr=subprocess.STDOUT, shell=True) + subprocess.check_call(options.dirpath + "test_log_store " + cmd_opts + addln_opts, stderr=subprocess.STDOUT, shell=True) print("log store test completed") -def nightly(): - logstore_nightly() - sleep(5) - meta_svce_nightly() - sleep(5) +def logstore_long_running(*args): + options, addln_opts = parse_arguments() + print(f"Running logstore_long_running with options: {options} and additional options: {addln_opts}") + logstore_nightly(options, addln_opts) + + +def meta_long_running(*args): + options, addln_opts = parse_arguments() + print(f"Running meta_long_running with options: {options} and additional options: {addln_opts}") + meta_nightly(options, addln_opts) + + +def main(): + options, addln_opts = parse_arguments() + test_suite_name = options.test_suits + try: + # Retrieve the function based on the name provided in options.test_suits + test_suite_function = globals().get(test_suite_name) + if callable(test_suite_function): + print(f"Running {test_suite_name} with options: {options}") + test_suite_function(options, addln_opts) + else: + print(f"Test suite '{test_suite_name}' is not a callable function.") + except KeyError: + print(f"Test suite '{test_suite_name}' not found.") -# The name of the method to be called is the var test_suits -eval(f"{test_suits}()") +if __name__ == "__main__": + main() diff --git a/src/tests/test_scripts/long_running.py b/src/tests/test_scripts/long_running.py new file mode 100755 index 000000000..cf14b7980 --- /dev/null +++ b/src/tests/test_scripts/long_running.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +## @file long_running.py +# + +import sys +import argparse +import index_test +import log_meta_test + + +def parse_arguments(): + parser = argparse.ArgumentParser(description='Run long-running tests.') + parser.add_argument('-t', '--test_suits', help='Test suits to run', default='') + args, service_args = parser.parse_known_args() + return args, service_args + + +def index_long_running(*args): + index_test.long_running(*args) + + +def logstore_long_running(*args): + log_meta_test.logstore_long_running(*args) + + +def meta_long_running(*args): + log_meta_test.meta_long_running(*args) + +# def data_long_running(*args): +# data.long_running(*args) + + +def main(): + args, service_args = parse_arguments() + + # Check if the test_suits argument is provided and is valid + if args.test_suits: + if args.test_suits == 'index_long_running': + index_long_running(service_args) + elif args.test_suits == 'logstore_long_running': + logstore_long_running(service_args) + elif args.test_suits == 'meta_long_running': + meta_long_running(service_args) + # elif args.test_suits == 'data_long_running': + # data_long_running(service_args) + else: + print(f"Unknown test suite: {args.test_suits}") + sys.exit(1) + else: + print("No test suite specified. Use the --test_suits option.") + sys.exit(1) + + +if __name__ == "__main__": + main() \ No newline at end of file