From 8cb66374a7523b70adebde63d0af9553c0071ea0 Mon Sep 17 00:00:00 2001 From: Joe Naegele Date: Mon, 25 Nov 2024 21:14:17 +0000 Subject: [PATCH] Remove unused integration test and config files --- core/CMakeLists.txt | 5 - core/config/distributed_default.xml | 111 --- core/config/distributed_generic_default.xml | 110 --- core/config/distributed_image_default.xml | 91 -- gadgets/T1/CMakeLists.txt | 2 +- gadgets/T1/config/MOLLI_T1_Moco_istore.xml | 219 ----- gadgets/mri_core/CMakeLists.txt | 2 - gadgets/mri_core/config/default_short.xml | 74 -- gadgets/mri_core/config/isalive.xml | 6 - test/integration/mem_watch.py | 507 ----------- test/integration/run_gadgetron_test.py | 838 ------------------ test/integration/run_tests.py | 341 ------- test/integration/stats_to_junit.py | 30 - test/integration/temp/README | 1 - test/integration/temp/all_chains.txt | 127 --- .../integration/temp/check_untested_chains.py | 37 - .../temp/definitely_untested_gadgets.txt | 44 - test/integration/temp/list_tested_gadgets.py | 42 - test/integration/temp/tested_chains.txt | 55 -- test/integration/temp/tested_gadgets.txt | 74 -- test/integration/temp/untested_chains.txt | 75 -- 21 files changed, 1 insertion(+), 2790 deletions(-) delete mode 100644 core/config/distributed_default.xml delete mode 100644 core/config/distributed_generic_default.xml delete mode 100644 core/config/distributed_image_default.xml delete mode 100644 gadgets/T1/config/MOLLI_T1_Moco_istore.xml delete mode 100644 gadgets/mri_core/config/default_short.xml delete mode 100644 gadgets/mri_core/config/isalive.xml delete mode 100755 test/integration/mem_watch.py delete mode 100755 test/integration/run_gadgetron_test.py delete mode 100755 test/integration/run_tests.py delete mode 100644 test/integration/stats_to_junit.py delete mode 100644 test/integration/temp/README delete mode 100644 test/integration/temp/all_chains.txt delete mode 100644 test/integration/temp/check_untested_chains.py delete mode 100644 test/integration/temp/definitely_untested_gadgets.txt delete mode 100644 test/integration/temp/list_tested_gadgets.py delete mode 100644 test/integration/temp/tested_chains.txt delete mode 100644 test/integration/temp/tested_gadgets.txt delete mode 100644 test/integration/temp/untested_chains.txt diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index f72176e48..78e08e9ab 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -55,10 +55,5 @@ install(FILES io/primitives.h io/primitives.hpp DESTINATION ${GADGETRON_INSTALL_INCLUDE_PATH}/io COMPONENT main) -install(FILES - config/distributed_default.xml - config/distributed_generic_default.xml - config/distributed_image_default.xml - DESTINATION ${GADGETRON_INSTALL_CONFIG_PATH} COMPONENT main) set(GADGETRON_BUILD_RPATH "${CMAKE_CURRENT_BINARY_DIR};${GADGETRON_BUILD_RPATH}" PARENT_SCOPE) diff --git a/core/config/distributed_default.xml b/core/config/distributed_default.xml deleted file mode 100644 index fbe24c0e5..000000000 --- a/core/config/distributed_default.xml +++ /dev/null @@ -1,111 +0,0 @@ - - - 2 - - - - - gadgetron_mricore - MRIImageReader - - - gadgetron_mricore - GadgetIsmrmrdAcquisitionMessageReader - - - - - gadgetron_mricore - MRIImageWriter - - - gadgetron_mricore - GadgetIsmrmrdAcquisitionMessageWriter - - - - - Distribute - gadgetron_core_distributed - AcquisitionDistributor - - - - - - RemoveROOversampling - gadgetron_mricore - RemoveROOversamplingGadget - - - - AccTrig - gadgetron_mricore - AcquisitionAccumulateTriggerGadget - - trigger_dimension - repetition - - - sorting_dimension - slice - - - - - Buff - gadgetron_mricore - BucketToBufferGadget - - N_dimension - - - - S_dimension - - - - split_slices - true - - - - - SimpleRecon - gadgetron_mricore - SimpleReconGadget - - - - ImageArraySplit - gadgetron_mricore - ImageArraySplitGadget - - - - - - - Extract - gadgetron_mricore - ExtractGadget - - - - - - Sort - gadgetron_mricore - ImageSortGadget - - sorting_dimension - repetition - - - - - gadgetron_mricore - ImageIndexGadget - - - diff --git a/core/config/distributed_generic_default.xml b/core/config/distributed_generic_default.xml deleted file mode 100644 index d1c3df0fd..000000000 --- a/core/config/distributed_generic_default.xml +++ /dev/null @@ -1,110 +0,0 @@ - - - 2 - - - RemoveROOversampling - gadgetron_mricore - RemoveROOversamplingGadget - - - - AccTrig - gadgetron_mricore - AcquisitionAccumulateTriggerGadget - - trigger_dimension - repetition - - - sorting_dimension - slice - - - - - Buff - gadgetron_mricore - BucketToBufferGadget - - N_dimension - - - - S_dimension - - - - split_slices - true - - - - - - - gadgetron_mricore - MRIImageReader - - - gadgetron_core_readers - BufferReader - - - - - gadgetron_core_writers - BufferWriter - - - gadgetron_mricore - MRIImageWriter - - - - - - Distribute - gadgetron_core_distributed - BufferDistributor - - - - - SimpleRecon - gadgetron_mricore - SimpleReconGadget - - - ImageArraySplit - gadgetron_mricore - ImageArraySplitGadget - - - - - - - Extract - gadgetron_mricore - ExtractGadget - - - - - - Sort - gadgetron_mricore - ImageSortGadget - - sorting_dimension - repetition - - - - - gadgetron_mricore - ImageIndexGadget - - - diff --git a/core/config/distributed_image_default.xml b/core/config/distributed_image_default.xml deleted file mode 100644 index 0767dd304..000000000 --- a/core/config/distributed_image_default.xml +++ /dev/null @@ -1,91 +0,0 @@ - - - 2 - - - RemoveROOversampling - gadgetron_mricore - RemoveROOversamplingGadget - - - - AccTrig - gadgetron_mricore - AcquisitionAccumulateTriggerGadget - - trigger_dimension - repetition - - - sorting_dimension - slice - - - - - Buff - gadgetron_mricore - BucketToBufferGadget - - N_dimension - - - - S_dimension - - - - split_slices - true - - - - - SimpleRecon - gadgetron_mricore - SimpleReconGadget - - - - ImageArraySplit - gadgetron_mricore - ImageArraySplitGadget - - - - - - - MRIImageReader - gadgetron_mricore - - - - - MRIImageWriter - gadgetron_mricore - - - - - - Convert - gadgetron_mricore - ComplexToFloatGadget - - - - - - - ImageSort - gadgetron_mricore - ImageSortGadget - - sorting_dimension - repetition - - - - - diff --git a/gadgets/T1/CMakeLists.txt b/gadgets/T1/CMakeLists.txt index 9f4d13caf..dd678c769 100644 --- a/gadgets/T1/CMakeLists.txt +++ b/gadgets/T1/CMakeLists.txt @@ -1,5 +1,5 @@ -set(gadgetron_t1_config_files config/MOLLI_T1_Moco.xml config/MOLLI_T1_Moco_istore.xml) +set(gadgetron_t1_config_files config/MOLLI_T1_Moco.xml) source_group(config FILES ${gadgetron_t1_config_files}) diff --git a/gadgets/T1/config/MOLLI_T1_Moco_istore.xml b/gadgets/T1/config/MOLLI_T1_Moco_istore.xml deleted file mode 100644 index e80ac816b..000000000 --- a/gadgets/T1/config/MOLLI_T1_Moco_istore.xml +++ /dev/null @@ -1,219 +0,0 @@ - - - 2 - - - - - - IsmrmrdDump - gadgetron_mricore - IsmrmrdDumpGadget - file_prefixT1_MOLLI - - - - NoiseAdjustgadgetron_mricoreNoiseAdjustGadget - - - AsymmetricEchogadgetron_mricoreAsymmetricEchoAdjustROGadget - - - RemoveROOversamplinggadgetron_mricoreRemoveROOversamplingGadget - - - - AccTrig - gadgetron_mricore - AcquisitionAccumulateTriggerGadget - trigger_dimensionslice - sorting_dimension - - - - BucketToBuffer - gadgetron_mricore - BucketToBufferGadget - N_dimensionset - S_dimensioncontrast - split_slicesfalse - ignore_segmenttrue - verbosetrue - - - - - PrepRef - gadgetron_mricore - GenericReconCartesianReferencePrepGadget - - - debug_folder - perform_timingtrue - verbosetrue - - - average_all_ref_Ntrue - - average_all_ref_Strue - - prepare_ref_alwaystrue - - - - - CoilCompression - gadgetron_mricore - GenericReconEigenChannelGadget - - - debug_folder - perform_timingtrue - verbosefalse - - average_all_ref_Ntrue - average_all_ref_Strue - - - upstream_coil_compressiontrue - upstream_coil_compression_thres0.002 - upstream_coil_compression_num_modesKept0 - - - - - Recon - gadgetron_mricore - GenericReconCartesianGrappaGadget - - - image_series0 - - - coil_map_algorithmInati - - - downstream_coil_compressionfalse - downstream_coil_compression_thres0.01 - downstream_coil_compression_num_modesKept0 - - - debug_folder - perform_timingtrue - verbosetrue - - - send_out_gfactorfalse - - - - - PartialFourierHandling - gadgetron_mricore - GenericReconPartialFourierHandlingPOCSGadget - - - debug_folder - perform_timingfalse - verbosefalse - - - skip_processing_meta_fieldSkip_processing_after_recon - - partial_fourier_POCS_iters6 - partial_fourier_POCS_thres0.01 - partial_fourier_POCS_transitBand24 - partial_fourier_POCS_transitBand_E216 - - - - - KSpaceFilter - gadgetron_mricore - GenericReconKSpaceFilteringGadget - - - debug_folder - perform_timingfalse - verbosefalse - - - skip_processing_meta_fieldSkip_processing_after_recon - - - filterROGaussian - filterRO_sigma1.5 - filterRO_width0.15 - - filterE1Gaussian - filterE1_sigma1.5 - filterE1_width0.15 - - filterE2Gaussian - filterE2_sigma1.5 - filterE2_width0.15 - - - - - FOVAdjustment - gadgetron_mricore - GenericReconFieldOfViewAdjustmentGadget - - - debug_folder - perform_timingfalse - verbosefalse - - - - Scaling - gadgetron_mricore - GenericReconImageArrayScalingGadget - - - perform_timingfalse - verbosefalse - - min_intensity_value64 - max_intensity_value4095 - scalingFactor10.0 - use_constant_scalingFactortrue - auto_scaling_only_oncetrue - scalingFactor_dedicated100.0 - - - - T1Gadget - gadgetron_t1 - T1MocoGadget - - - - - - - - ImageArraySplit - gadgetron_mricore - ImageArraySplitGadget - - - - - ComplexToFloatAttrib - gadgetron_mricore - ComplexToFloatGadget - - - - FloatToShortAttrib - gadgetron_mricore - FloatToUShortGadget - max_intensity32767 - min_intensity0 - intensity_offset0 - - - - diff --git a/gadgets/mri_core/CMakeLists.txt b/gadgets/mri_core/CMakeLists.txt index 3da67313c..da5e5c690 100644 --- a/gadgets/mri_core/CMakeLists.txt +++ b/gadgets/mri_core/CMakeLists.txt @@ -85,10 +85,8 @@ set(gadgetron_mricore_src_files set(gadgetron_mricore_config_files config/default.xml - config/default_short.xml config/default_optimized.xml config/default_measurement_dependencies.xml - config/isalive.xml ) set(gadgetron_mricore_generic_recon_gadgets_header_files generic_recon_gadgets/GenericReconBase.h diff --git a/gadgets/mri_core/config/default_short.xml b/gadgets/mri_core/config/default_short.xml deleted file mode 100644 index 66d628b5f..000000000 --- a/gadgets/mri_core/config/default_short.xml +++ /dev/null @@ -1,74 +0,0 @@ - - - - - RemoveROOversampling - gadgetron_mricore - RemoveROOversamplingGadget - - - - AccTrig - gadgetron_mricore - AcquisitionAccumulateTriggerGadget - - trigger_dimension - repetition - - - sorting_dimension - slice - - - - - Buff - gadgetron_mricore - BucketToBufferGadget - - N_dimension - - - - S_dimension - - - - split_slices - true - - - - - SimpleRecon - gadgetron_mricore - SimpleReconGadget - - - - ImageArraySplit - gadgetron_mricore - ImageArraySplitGadget - - - - Extract - gadgetron_mricore - ExtractGadget - - - - AutoScale - gadgetron_mricore - AutoScaleGadget - - - - FloatToShort - gadgetron_mricore - FloatToUShortGadget - - - diff --git a/gadgets/mri_core/config/isalive.xml b/gadgets/mri_core/config/isalive.xml deleted file mode 100644 index 46766bcd4..000000000 --- a/gadgets/mri_core/config/isalive.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - diff --git a/test/integration/mem_watch.py b/test/integration/mem_watch.py deleted file mode 100755 index ca549fe3f..000000000 --- a/test/integration/mem_watch.py +++ /dev/null @@ -1,507 +0,0 @@ -#!/usr/bin/python3 - -import re -import sys - -import os -import os.path - -import glob -import argparse - -import itertools - -import subprocess - -import logging - - -__colors = { - 'no-color': '\033[0m', - 'red': '\033[0;31m', - 'green': '\033[0;32m', - 'orange-brown': '\033[0;33m', - 'blue': '\033[0;34m', - 'purple': '\033[0;35m', - 'cyan': '\033[0;36m', - 'light-gray': '\033[0;37m', - 'dark-gray': '\033[1;30m', - 'light-red': '\033[1;31m', - 'light-green': '\033[1;32m', - 'yellow': '\033[1;33m', - 'light-blue': '\033[1;34m', - 'light-purple': '\033[1;35m', - 'light-cyan': '\033[1;36m', - 'white': '\033[1;37m' -} - - -def colored(str, color): - return "{}{}{}".format(__colors[color], str, __colors['no-color']) - - -def info(str): - return colored(str, 'light-cyan') - - -def success(str): - return colored(str, 'light-green') - - -def failure(str): - return colored(str, 'red') - - -def bytes_to_pretty_string(nbytes): - - def sensiblize(nunits, units): - - unit = units[0] - - if abs(nunits) < 1024 and unit == ' B': - return "{:d} {}".format(nunits, unit) - - if abs(nunits) < 1024: - return "{:.1f} {}".format(nunits, unit) - - return sensiblize(nunits / 1024, units[1:]) - - return sensiblize(nbytes, [ - ' B', - 'KiB', - 'MiB', - 'GiB', - 'TiB', - 'PiB', - 'EiB', - 'ZiB', - 'YiB' - ]) - - -class MemoryMap: - - class Entry: - - def __init__(self, m): - self.m = m - - self.address = int(self.address_start, 16) - self.size = int(self.address_end, 16) - int(self.address_start, 16) - - def __getattr__(self, item): - return self.m.group(item) - - class Stack: - - def __init__(self, mem_map, entries): - self.entries = entries - mem_map.stacks.append(self) - - self.size = sum((entry.size for entry in entries)) - - class Heap: - - def __init__(self, mem_map, entries): - self.entries = entries - mem_map.heaps.append(self) - - self.used = entries[0].size - self.size = sum((entry.size for entry in entries)) - - class Text: - - def __init__(self, mem_map, entries): - self.entries = entries - mem_map.texts.append(self) - - self.size = sum((entry.size for entry in entries)) - - class Other: - - def __init__(self, mem_map, entries): - self.entries = entries - mem_map.others.append(self) - - self.size = sum((entry.size for entry in entries)) - - line_pattern = re.compile(r"(?P[0-9A-Fa-f]+)-(?P[0-9A-Fa-f]+) (?P[rwxsp-]+) " - r"(?P[0-9A-Fa-f]+) (?P.+) (?P\d+)(\s*)(?P.*)") - - def __init__(self, raw_maps): - - self.others = [] - self.stacks = [] - self.heaps = [] - self.texts = [] - - self.__sort_lines(MemoryMap.__match_lines(raw_maps)) - - @staticmethod - def __match_lines(raw_maps): - - entries = [] - - for line in raw_maps.splitlines(): - m = re.match(MemoryMap.line_pattern, line) - - if not m: - continue - - entries.append(MemoryMap.Entry(m)) - - return entries - - def __sort_lines(self, entries): - - parse_options = [ - MemoryMap.__try_as_labelled_heap, - MemoryMap.__try_as_labelled_stack, - MemoryMap.__try_as_unlabelled_heap, - MemoryMap.__try_as_unlabelled_stack, - MemoryMap.__try_as_text, - MemoryMap.__try_as_other - ] - - while entries: - - option, count = next(filter(lambda match: match[0], - [opt(entries) for opt in parse_options])) - - option(self, entries[:count]) - entries = entries[count:] - - - @staticmethod - def __try_as_labelled_heap(entries): - if entries[0].name == '[heap]': - return MemoryMap.Heap, 1 - else: - return None, 0 - - @staticmethod - def __try_as_unlabelled_heap(entries): - # Default heap size is 64 MiB, but it's a little more complicated than that. Each heap - # is split into two mappings, each with different access permissions. - - # Make sure we have the requisite two entries for the heap. - if len(entries) < 2: - return None, 0 - - heap_entry = entries[0] - guard_entry = entries[1] - - expected_total_size = int(os.environ.get("GADGETRON_HEAP_SIZE", 67108864)) - - if heap_entry.mode != 'rw-p' or guard_entry.mode != '---p': - return None, 0 - - if expected_total_size != heap_entry.size + guard_entry.size: - return None, 0 - - return MemoryMap.Heap, 2 - - @staticmethod - def __try_as_labelled_stack(entries): - if entries[0].name == '[stack]': - return MemoryMap.Stack, 1 - else: - return None, 0 - - @staticmethod - def __try_as_unlabelled_stack(entries): - # Default stack size is 8 MiB (8388608 bytes), followed by a single no-permission page - # to discourage overflow etc. - - # Make sure we have the requisite two entries for the stack. - if len(entries) < 2: - return None, 0 - - stack_entry = entries[0] - guard_entry = entries[1] - - expected_stack_size = int(os.environ.get("GADGETRON_STACK_SIZE", 8388608)) - expected_guard_size = int(os.environ.get("GADGETRON_PAGE_SIZE", 4096)) - - if stack_entry.size != expected_stack_size or stack_entry.mode != 'rw-p': - return None, 0 - - if guard_entry.size != expected_guard_size or guard_entry.mode != '---p': - return None, 0 - - return MemoryMap.Stack, 2 - - @staticmethod - def __try_as_text(entries): - if os.path.exists(entries[0].name): - return MemoryMap.Text, 1 - else: - return None, 0 - - @staticmethod - def __try_as_other(entries): - return MemoryMap.Other, 1 - - def total_allocated_heap(self): - return sum((heap.size for heap in self.heaps)) - - def total_used_heap(self): - return sum((heap.used for heap in self.heaps)) - - def total_allocated_stack(self): - return sum((stack.size for stack in self.stacks)) - - def total_allocated_text(self): - return sum((text.size for text in self.texts)) - - def total_allocated_other(self): - return sum((other.size for other in self.others)) - - def total_size(self): - return sum([self.total_allocated_heap(), - self.total_allocated_stack(), - self.total_allocated_text(), - self.total_allocated_other()]) - - -class Context: - - Success = success("[SUCCESS]") - Skipped = info("[SKIPPED]") - Failed = failure("[FAILURE]") - Unknown = failure("[UNKNOWN]") - - return_codes = {0: Success, 1: Failed, 2: Skipped} - - def __init__(self, args, test, pid): - self.args = args - self.test = test - self.pid = pid - - self.result = None - - self.status = {} - self.maps = {} - self.pmaps = {} - - - def total_size_change(self): - return self.maps['post'].total_size() - self.maps['pre'].total_size() - - -def guess_gadgetron_pid(port): - logging.info("Resolving gadgetron pid from port: {}".format(port)) - - # ss -Hltpn '( sport = 9002 )' - proc = subprocess.run(['ss', '-Hltpn', "( sport = {} )".format(port)], - stdout=subprocess.PIPE, - universal_newlines=True) - - m = re.search(r'pid=(?P\d+)', proc.stdout) - - return m.group('pid') if m else None - - -def proc(pid, path): - return os.path.join("/proc", str(pid), path) - - -def fetch_gadgetron_stats(pid): - - with open(proc(pid, "status"), 'r') as status_file: - status = status_file.read() - - with open(proc(pid, "maps"), 'r') as maps_file: - maps = MemoryMap(maps_file.read()) - - pmap = subprocess.run(['pmap', str(pid)], - stdout=subprocess.PIPE, - universal_newlines=True) - - return status, maps, pmap.stdout - - -def pre_test(context): - logging.debug("Pre-test hook called for pid: {}".format(context.pid)) - - context.status['pre'], context.maps['pre'], context.pmaps['pre'] = fetch_gadgetron_stats(context.pid) - - -def post_test(context): - logging.debug("Post-test hook called for pid: {}".format(context.pid)) - - context.status['post'], context.maps['post'], context.pmaps['post'] = fetch_gadgetron_stats(context.pid) - - -def run_test(context): - logging.info("Running test: {}".format(context.test)) - - proc = subprocess.run(['python3', 'run_gadgetron_test.py', '-e', - '-p', str(args.port), - '-G', os.path.expanduser(args.gadgetron_home), - '-I', os.path.expanduser(args.ismrmrd_home), - os.path.expanduser(context.test)]) - - context.result = Context.return_codes.get(proc.returncode, Context.Unknown) - - -def output_summary(contexts): - - print("\n") - print("Completed {} tests. ({} failed, {} skipped)" - .format(len(contexts), - sum((1 for c in contexts if c.result is Context.Failed)), - sum((1 for c in contexts if c.result is Context.Skipped)))) - - leaking_contexts = sorted([context for context in contexts if context.total_size_change() > 0], - key=Context.total_size_change, - reverse=True) - - for context in leaking_contexts: - print("{:<72} \u0394: {}".format(info(context.test), - failure(bytes_to_pretty_string(context.total_size_change())))) - - -def output_test(context): - - print("") - output_mem_total(context) - output_leaks(context) - - -def output_mem_total(context): - - format_string = ("Completed test case:\n" - "\t\t{test:>48} {status}\n" - "Total virtual memory usage:\n" - "\tbefore:\t{total_mem_before:>48}\n" - "\t after:\t{total_mem_after:>48}\n" - "\t \u0394:\t{total_mem_delta:>48} {total_mem_delta_perc}\n") - - size_before = context.maps['pre'].total_size() - size_after = context.maps['post'].total_size() - size_delta = size_after - size_before - size_delta_perc = 100 * size_delta / size_before - - arguments = { - 'test': info(context.test), - 'status': context.result, - 'total_mem_before': info(bytes_to_pretty_string(size_before)), - 'total_mem_after': info(bytes_to_pretty_string(size_after)), - 'total_mem_delta': colored(bytes_to_pretty_string(size_delta), - 'light-green' if size_delta <= 0 else 'red'), - 'total_mem_delta_perc': colored("{:+.1f}%".format(size_delta_perc), - 'light-green' if size_delta <= 0 else 'red') - } - - print(format_string.format(**arguments)) - - -def print_leak(leak_info): - - format_string = "{description:<32} {before:>21} -> {after:>21} \u0394: {delta}" - print(format_string.format(**leak_info)) - - -def build_optional_leak_info(description, pre_map, post_map, key_fn, predicate=lambda d: d != 0): - - before = key_fn(pre_map) - after = key_fn(post_map) - - delta = after - before - percent = 100 * delta / before - - if predicate(delta): - return { - 'description': description, - 'before': info(bytes_to_pretty_string(before)), - 'after': info(bytes_to_pretty_string(after)), - 'delta': failure("{} {:+.1f}%".format(bytes_to_pretty_string(delta), - percent)) - } - else: - return None - - -def output_leaks(context): - - optional_leak_info = [ - build_optional_leak_info("Used Heap [new, malloc, etc.]:", - context.maps['pre'], - context.maps['post'], - MemoryMap.total_used_heap), - build_optional_leak_info("Stacks [pthread_create, etc.]:", - context.maps['pre'], - context.maps['post'], - MemoryMap.total_allocated_stack), - build_optional_leak_info("Other [unknown]:", - context.maps['pre'], - context.maps['post'], - MemoryMap.total_allocated_other), - build_optional_leak_info("Text [loaded .so files]:", - context.maps['pre'], - context.maps['post'], - MemoryMap.total_allocated_text) - ] - - for info in filter(lambda i: i, optional_leak_info): - print_leak(info) - - -def process_test(args, test, pid): - logging.info("Processing test: {}".format(test)) - - context = Context(args, test, pid) - - pre_test(context) - run_test(context) - post_test(context) - - output_test(context) - - return context - - -def main(args): - - pid = guess_gadgetron_pid(args.port) - - if not pid: - logging.error("Failed to find gadgetron instance listening on port: {}".format(args.port)) - sys.exit(1) - - logging.info("Found running Gadgetron instance: {}".format(pid)) - - tests = [glob.glob(pattern) for pattern in args.tests] * args.repeat - contexts = [process_test(args, test, pid) for test in itertools.chain(*tests)] - - output_summary(contexts) - - -if __name__ == '__main__': - - logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO) - - parser = argparse.ArgumentParser(description= - 'Tiny script to watch gadgetron process' - 'memory consumption while running tests.', - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - parser.add_argument('-G', '--gadgetron-home', type=str, default=os.environ.get('GADGETRON_HOME')) - parser.add_argument('-I', '--ismrmrd-home', type=str, default=os.environ.get('ISMRMRD_HOME')) - - parser.add_argument('-v', '--verbose', action='store_true') - - parser.add_argument('-r', '--repeat', type=int, default=1, - help="Repeat test cases a number of times.") - - parser.add_argument('port', type=int, default=9002) - parser.add_argument('tests', type=str, nargs='+') - - args = parser.parse_args() - - if args.verbose: - logging.root.setLevel(logging.DEBUG) - - main(args) - diff --git a/test/integration/run_gadgetron_test.py b/test/integration/run_gadgetron_test.py deleted file mode 100755 index 257d33b02..000000000 --- a/test/integration/run_gadgetron_test.py +++ /dev/null @@ -1,838 +0,0 @@ -#!/usr/bin/env python3 - -import os - -# Importing h5py on windows will mess with your environment. When we pass the messed up environment to gadgetron -# child processes, they won't load properly. We're saving our environment here to spare our children from the -# crimes of h5py. -environment = dict(os.environ) - -import argparse -import configparser -import functools -import glob -import itertools -import json -import pathlib -import random -import re -import shlex -import shutil -import string -import subprocess -import sys -import tempfile -import time -import urllib.error -import urllib.request - -import h5py -import ismrmrd -import numpy - -default_config_values = { - "DEFAULT": { - 'parameter_xml': 'IsmrmrdParameterMap_Siemens.xml', - 'parameter_xsl': 'IsmrmrdParameterMap_Siemens.xsl', - 'value_comparison_threshold': '0.01', - 'scale_comparison_threshold': '0.01', - 'node_port_base': '9050', - 'dataset_group': 'dataset', - 'reference_group': 'dataset', - 'disable_image_header_test': 'false', - 'disable_image_meta_test': 'false', - } -} - -Passed = "Passed", 0 -Failure = "Failure", 1 - -_codes = { - 'red': '\033[91m', - 'green': '\033[92m', - 'cyan': '\033[96m', - 'end': '\033[0m', -} - - -def _colors_disabled(text, color): - return text - - -def _colors_enabled(text, color): - return "{begin}{text}{end}".format( - begin=_codes.get(color), - text=text, - end=_codes.get('end'), - ) - - -def enabled(option): - return option.lower() in ['true', 'yes', '1', 'enabled'] - - -def report_test(*, color_handler, section, result, reason): - print("{section:<26} [{status}] ({reason})".format( - section=section, - status=color_handler("FAILURE", 'red') if result else color_handler("OK", 'green'), - reason=reason, - )) - - -def siemens_to_ismrmrd(echo_handler, *, input, output, parameters, schema, measurement, flag=None): - command = ["siemens_to_ismrmrd", "-X", - "-f", input, - "-m", parameters, - "-x", schema, - "-o", output, - "-z", measurement] + ([flag] if flag else []) - - echo_handler(command) - subprocess.run(command, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - -def send_data_to_gadgetron(echo_handler, gadgetron, *, input, output, configuration, group, log, additional_arguments): - print("Passing data to Gadgetron: {} -> {}".format(input, output)) - - command = ["gadgetron_ismrmrd_client", - "-a", gadgetron.host, - "-p", gadgetron.port, - "-f", input, - "-o", output, - "-G", group] + configuration - - if additional_arguments: - command = command + additional_arguments.split() - - echo_handler(command) - subprocess.run(command, - env=environment, - stdout=log, - stderr=log) - -def stream_data_to_gadgetron(echo_handler, storage_address, *, input, output, configurations, input_adapter, output_adapter, output_group, log_stdout, log_stderr): - stream_command = f"{input_adapter} -i {input} --use-stdout" - - stream_command += f" | ismrmrd_to_mrd" - - commands = [f'gadgetron -E {storage_address} --from_stream -c {configuration["config"]} {configuration["args"]}' for configuration in configurations] - - for command in commands: - stream_command += f" | {command}" - - stream_command += f" | mrd_to_ismrmrd" - - stream_command += f" | {output_adapter} --use-stdin -o {output} -g {output_group}" - - split_cmd = ['bash', '-c', stream_command] - echo_handler(split_cmd) - subprocess.run(split_cmd, - env=environment, - stdout=log_stdout, - stderr=log_stderr) - - -def wait_for_storage_server(port, proc, retries=50): - for i in range(retries): - try: - urllib.request.urlopen(f"http://localhost:{port}/healthcheck") - return - except (urllib.error.URLError, urllib.error.HTTPError) as e: - if i == retries - 1 or proc.poll() is not None: - raise RuntimeError("Unable to get a successful response from storage server.") from e - time.sleep(0.2) - - -def start_storage_server(*, log, port, storage_folder): - storage_server_environment = environment.copy() - storage_server_environment["MRD_STORAGE_SERVER_PORT"] = port - storage_server_environment["MRD_STORAGE_SERVER_STORAGE_CONNECTION_STRING"] = storage_folder - storage_server_environment["MRD_STORAGE_SERVER_DATABASE_CONNECTION_STRING"] = storage_folder + "/metadata.db" - - retries = 5 - for i in range(retries): - print("Starting MRD Storage Server on port", port) - proc = subprocess.Popen(["mrd-storage-server", "--require-parent-pid", str(os.getpid())], - stdout=log, - stderr=log, - env=storage_server_environment) - - try: - wait_for_storage_server(port, proc) - return proc - except: - # If the process has exited, it might be because the - # port was in use. This can be because the previous storage server - # instance was just killed. So we try again. - if proc.poll() is not None and i < retries: - time.sleep(1) - else: - proc.kill() - raise - - -def start_gadgetron_instance(*, log_stdout, log_stderr, port, storage_address, env=environment): - print("Starting Gadgetron instance on port", port) - proc = subprocess.Popen(["gadgetron", "-p", port, "-E", storage_address], - stdout=log_stdout, - stderr=log_stderr, - env=env) - return proc - - -def validate_dataset(*, dataset_file, reference_file, dataset_group, reference_group): - try: - dataset_file = ismrmrd.File(dataset_file, 'r') - except OSError as e: - return Failure, "Failed to read dataset file '{}'".format(dataset_file) - - try: - reference_file = ismrmrd.File(reference_file, 'r') - except OSError as e: - return Failure, "Failed to read reference file '{}'".format(reference_file) - - header = dataset_file[dataset_group].header - ref_header = reference_file[reference_group].header - if not dataset_file[dataset_group].header == reference_file[reference_group].header: - import deepdiff - diff = deepdiff.diff.DeepDiff(header, ref_header) - print(diff.pretty()) - return Failure, "Dataset header did not match reference header" - - for attribute in ['acquisitions', 'waveforms', 'images']: - - dataset = getattr(dataset_file[dataset_group], attribute) or [] - reference = getattr(reference_file[reference_group], attribute) or [] - - if not list(dataset) == list(reference): - return Failure, "Dataset {attr} did not match reference {attr}".format(attr=attribute) - - return None, "Dataset matched reference" - - -def validate_output(*, output_file, reference_file, output_group, reference_group, value_threshold, scale_threshold): - try: - # The errors produced by h5py are not entirely excellent. We spend some code here to clear them up a bit. - def get_group_data(file, group): - with h5py.File(file, mode='r') as f: - try: - group = group + '/data' - return numpy.squeeze(f[group]) - except KeyError: - raise RuntimeError("Did not find group '{}' in file {}".format(group, file)) - - output_data = get_group_data(output_file, output_group) - reference_data = get_group_data(reference_file, reference_group) - except OSError as e: - return Failure, str(e) - except RuntimeError as e: - return Failure, str(e) - - output = output_data[...].flatten().astype('float32') - reference = reference_data[...].flatten().astype('float32') - - norm_diff = numpy.linalg.norm(output - reference) / numpy.linalg.norm(reference) - scale = numpy.dot(output, output) / numpy.dot(output, reference) - - if value_threshold < norm_diff: - return Failure, "Comparing values, norm diff: {} (threshold: {})".format(norm_diff, value_threshold) - - if scale_threshold < abs(1 - scale): - return Failure, "Comparing image scales, ratio: {} ({}) (threshold: {})".format(scale, abs(1 - scale), - scale_threshold) - - return None, "Norm: {:.1e} [{}] Scale: {:.1e} [{}]".format(norm_diff, value_threshold, abs(1 - scale), - scale_threshold) - - -def validate_image_header(*, output_file, reference_file, output_group, reference_group): - def equals(): - return lambda out, ref: out == ref - - def approx(threshold=1e-6): - return lambda out, ref: abs(out - ref) <= threshold - - def ignore(): - return lambda out, ref: True - - def each(rule): - return lambda out, ref: all(rule(out, ref) for out, ref in itertools.zip_longest(out, ref)) - - header_rules = { - 'version': equals(), - 'data_type': equals(), - 'flags': equals(), - 'measurement_uid': equals(), - 'matrix_size': each(equals()), - 'field_of_view': each(approx()), - 'channels': equals(), - 'position': each(approx()), - 'read_dir': each(approx()), - 'phase_dir': each(approx()), - 'slice_dir': each(approx()), - 'patient_table_position': each(approx()), - 'average': equals(), - 'slice': equals(), - 'contrast': equals(), - 'phase': equals(), - 'repetition': equals(), - 'set': equals(), - 'acquisition_time_stamp': ignore(), - 'physiology_time_stamp': each(ignore()), - 'image_type': equals(), - 'image_index': equals(), - 'image_series_index': ignore(), - 'user_int': each(equals()), - 'user_float': each(approx()), - 'attribute_string_len': ignore() - } - - def check_image_header(output, reference): - - if not output: - raise RuntimeError("Missing output") - - if not reference: - raise RuntimeError("Missing reference") - - output = output.getHead() - reference = reference.getHead() - - for attribute, rule in header_rules.items(): - if not rule(getattr(output, attribute), getattr(reference, attribute)): - print(output) - print(reference) - - raise RuntimeError( - "Image header '{}' does not match reference. [index {}, series {}]".format( - attribute, - output.image_index, - output.image_series_index - ) - ) - - try: - with ismrmrd.File(output_file, 'r') as output_file: - with ismrmrd.File(reference_file, 'r') as reference_file: - output_images = output_file[output_group].images or [] - reference_images = reference_file[reference_group].images or [] - - for output_image, reference_image in itertools.zip_longest(output_images, reference_images): - check_image_header(output_image, reference_image) - - except OSError as e: - return Failure, str(e) - except RuntimeError as e: - return Failure, str(e) - - return None, "Output headers matched reference" - - -def error_handlers(args, config): - def handle_subprocess_errors(cont, **state): - try: - return cont(**state) - except subprocess.CalledProcessError as e: - print("An error occurred in a subprocess with the following command:") - print(' '.join(e.cmd)) - - return Failure - - yield handle_subprocess_errors - - -def clear_test_folder(args, config): - def clear_test_folder_action(cont, **state): - if os.path.exists(args.test_folder): - shutil.rmtree(args.test_folder) - os.makedirs(args.test_folder, exist_ok=True) - - return cont(**state) - - yield clear_test_folder_action - - -def ensure_storage_server(args, config): - class Storage: - def __init__(self, address): - self.address = address - if args.external: - return - - def start_storage_server_action(cont, **state): - with open(os.path.join(args.test_folder, 'storage.log'), 'w') as log: - with tempfile.TemporaryDirectory() as storage_folder: - with start_storage_server( - log=log, - port=str(args.storage_port), - storage_folder=storage_folder - ) as proc: - try: - return cont(storage=Storage("http://localhost:" + str(args.storage_port)), **state) - finally: - proc.kill() - - yield start_storage_server_action - - -def start_additional_nodes(args, config): - if args.external: - return - - if not config.has_section('distributed'): - return - - def set_distributed_environment_action(cont, *, worker_list=[], env=dict(environment), **state): - if sys.platform.startswith('win32'): - env['GADGETRON_REMOTE_WORKER_COMMAND'] = 'cmd /k echo ' + json.dumps(worker_list) + ' & exit' - else: - env["GADGETRON_REMOTE_WORKER_COMMAND"] = "echo " + json.dumps(worker_list) - - print("Setting env to", env["GADGETRON_REMOTE_WORKER_COMMAND"]) - return cont(env=env, **state) - - base_port = int(config['distributed']['node_port_base']) - number_of_nodes = int(config['distributed']['nodes']) - - def create_worker_ports_action(ids, cont, **state): - print("Will start additional Gadgetron workers on ports:", *map(lambda idx: base_port + idx, ids)) - return cont(**state) - - def start_additional_worker_action(port, cont, *, storage, worker_list=[], **state): - with open(os.path.join(args.test_folder, 'gadgetron_worker' + port + '.log.out'), 'w') as log_stdout: - with open(os.path.join(args.test_folder, 'gadgetron_worker' + port + '.log.err'), 'w') as log_stderr: - with start_gadgetron_instance(log_stdout=log_stdout, log_stderr=log_stderr, port=port, storage_address=storage.address) as instance: - try: - return cont(worker_list=worker_list + ['localhost:' + port], storage=storage, **state) - finally: - instance.kill() - - yield functools.partial(create_worker_ports_action, range(number_of_nodes)) - - yield from (functools.partial(start_additional_worker_action, str(base_port + idx)) - for idx in range(number_of_nodes)) - - yield set_distributed_environment_action - - -def ensure_gadgetron_instance(args, config): - class Gadgetron: - def __init__(self, *, host, port): - self.host = host - self.port = port - - gadgetron = Gadgetron(host=str(args.host), port=str(args.port)) - - def start_gadgetron_action(cont, *, storage, env=environment, **state): - with open(os.path.join(args.test_folder, 'gadgetron.log.out'), 'w') as log_stdout: - with open(os.path.join(args.test_folder, 'gadgetron.log.err'), 'w') as log_stderr: - with start_gadgetron_instance(log_stdout=log_stdout, log_stderr=log_stderr, port=gadgetron.port, storage_address=storage.address, - env=env) as instance: - try: - return cont(gadgetron=gadgetron, storage=storage, **state) - finally: - instance.kill() - - def use_external_gadgetron_action(cont, **state): - return cont(gadgetron=gadgetron, **state) - - if args.external: - yield use_external_gadgetron_action - else: - yield start_gadgetron_action - - -def copy_input_data(args, config, section): - destination_file = os.path.join(args.test_folder, section + '.copied.mrd') - - def copy_input_action(cont, **state): - source_file = os.path.join(args.data_folder, config[section]['source']) - - print("Copying prepared ISMRMRD data: {} -> {}".format(source_file, destination_file)) - shutil.copyfile(source_file, destination_file) - - state.update(client_input=destination_file) - return cont(**state) - - yield copy_input_action - - -def convert_siemens_data(args, config, section): - destination_file = os.path.join(args.test_folder, section + '.converted.mrd') - - def convert_siemens_data_action(cont, **state): - source_file = os.path.join(args.data_folder, config[section]['data_file']) - - print("Converting Siemens data: {} (measurement {}) -> {}".format(source_file, config[section]['measurement'], - destination_file)) - - siemens_to_ismrmrd(args.echo_handler, - input=source_file, - output=destination_file, - parameters=config[section]['parameter_xml'], - schema=config[section]['parameter_xsl'], - measurement=config[section]['measurement'], - flag=config[section].get('data_conversion_flag', None)) - - state.update(client_input=destination_file) - return cont(**state) - - yield convert_siemens_data_action - - -def run_gadgetron_client(args, config, section): - output_file = os.path.join(args.test_folder, section + '.output.mrd') - - def prepare_config_action(cont, **state): - state.update( - group=config[section]['configuration'], - configuration=['-c', config[section]['configuration']], - ) - return cont(**state) - - def prepare_template_action(cont, **state): - - template_file = os.path.join(args.template_folder, config[section]['template']) - configuration_file = os.path.join(args.test_folder, section + '.config.xml') - - with open(template_file, 'r') as input: - with open(configuration_file, 'w') as output: - output.write( - string.Template(input.read()).substitute( - test_folder=os.path.abspath(args.test_folder), - # Expand substitution list as needed. - ) - ) - - state.update( - group=section, - configuration=['-C', configuration_file], - ) - return cont(**state) - - def send_data_action(cont, *, gadgetron, client_input, configuration, group, processing_time=0, **state): - - with open(os.path.join(args.test_folder, section + '.client.log'), 'w') as log: - - start_time = time.time() - - try: - additional_args = config[section]['additional_arguments'] - except KeyError: - additional_args = None - - send_data_to_gadgetron(args.echo_handler, - gadgetron, - input=client_input, - output=output_file, - configuration=configuration, - group=group, - log=log, - additional_arguments=additional_args) - - end_time = time.time() - - duration = end_time - start_time - - print("Gadgetron processing time: {:.2f} s".format(duration)) - - state.update( - gadgetron=gadgetron, - client_input=client_input, - client_output=output_file, - configuration=configuration, - group=group, - processing_time=processing_time + duration - ) - return cont(**state) - - yield from (action for key, action in [('configuration', prepare_config_action), - ('template', prepare_template_action)] - if key in config[section]) - - yield send_data_action - -def interpolate_args(test_args, action_args): - import re - pattern = r"\$\{([^\}]+)\}" - return re.sub(pattern, lambda match: getattr(test_args, match.group(1)), action_args) - -def prepare_stream_configurations(args, config, section): - def prepare_configurations_action(cont, **state): - c = {'config': config[section]['configuration'], 'args': interpolate_args(args, config[section]['args']) if 'args' in config[section] else ''} - if state.get('configurations'): - configurations=state['configurations'] - configurations.append(c) - state.update(configurations=configurations, - ) - else: - state.update(configurations=[c]) - - return cont(**state) - - yield prepare_configurations_action - -def stream_gadgetron_data(args, config, section): - output_file = os.path.join(args.test_folder, section + '.output.mrd') - - def prepare_adapter_config(cont, **state): - state.update( - input_adapter=config[section]['input_adapter'], - output_adapter=config[section]['output_adapter'], - output_group=config[section]['output_group'] - ) - return cont(**state) - - def stream_data_action(cont, *, client_input, configurations, input_adapter, output_adapter, output_group, processing_time=0, **state): - with open(os.path.join(args.test_folder, 'gadgetron.log.out'), 'w') as log_stdout: - with open(os.path.join(args.test_folder, 'gadgetron.log.err'), 'w') as log_stderr: - start_time = time.time() - - stream_data_to_gadgetron(args.echo_handler, - storage_address="http://localhost:" + str(args.storage_port), - input=client_input, - output=output_file, - configurations=configurations, - input_adapter=input_adapter, - output_adapter=output_adapter, - output_group=output_group, - log_stdout=log_stdout, - log_stderr=log_stderr) - - end_time = time.time() - duration = end_time - start_time - print("Gadgetron processing time: {:.2f} s".format(duration)) - - state.update( - client_input=client_input, - client_output=output_file, - processing_time=processing_time + duration, - ) - - return cont(**state) - - yield prepare_adapter_config - yield stream_data_action - - -def stdout_compliance(args, config): - def stdout_compliance_action(cont, **state): - files = glob.glob(os.path.join(args.test_folder, 'gadgetron_worker*.log.out')) - files.append(os.path.join(args.test_folder, 'gadgetron.log.out')) - - for file in files: - if os.path.isfile(file): - if os.stat(file).st_size != 0: - raise RuntimeError(f"stdout is not empty as indicated by {file}") - - return cont(**state) - - yield stdout_compliance_action - - -def validate_client_output(args, config, section): - reference_file = os.path.join(args.data_folder, config[section]['reference_file']) - - def validate_output_action(cont, *, client_output, status=Passed, **state): - result, reason = validate_output(output_file=client_output, - reference_file=reference_file, - output_group=config[section]['output_images'], - reference_group=config[section]['reference_images'], - value_threshold=float(config[section]['value_comparison_threshold']), - scale_threshold=float(config[section]['scale_comparison_threshold'])) - - report_test(color_handler=args.color_handler, section=section, result=result, reason=reason) - - return cont( - client_output=client_output, - status=status if result is None else Failure, - **state - ) - - def validate_meta(validator, cont, *, client_output, status=Passed, **state): - result, reason = validator(output_file=client_output, - reference_file=reference_file, - output_group=config[section]['output_images'], - reference_group=config[section]['reference_images']) - - report_test(color_handler=args.color_handler, section=section, result=result, reason=reason) - - return cont( - client_output=client_output, - status=status if result is None else Failure, - **state - ) - - yield validate_output_action - - if not enabled(config[section]['disable_image_header_test']): - yield functools.partial(validate_meta, validate_image_header) - - -def validate_dataset_output(args, config, section): - def find_dataset_action(cont, status=Passed, **state): - - dataset_prefix = os.path.join(args.test_folder, config[section]['dataset_prefix']) - dataset_files = glob.glob(dataset_prefix + "*") - - rules = [(lambda files: len(files) == 0, "Found no dataset with prefix: {}".format(dataset_prefix)), - (lambda files: len(files) > 1, "Too many datasets with prefix: {}".format(dataset_prefix))] - - def check_rules(): - for test, reason in rules: - if test(dataset_files): - return Failure, reason, None - return None, "Found appropriate dataset", dataset_files[0] - - result, reason, dataset_file = check_rules() - - report_test(color_handler=args.color_handler, section=section, result=result, reason=reason) - - return cont( - dataset_file=dataset_file if dataset_files else None, - status=status if result is None else Failure, - **state - ) - - def validate_dataset_action(cont, *, dataset_file, status=Passed, **state): - - if not dataset_file: - return cont(status=status, **state) - - reference_file = os.path.join(args.data_folder, config[section]['reference_file']) - result, reason = validate_dataset(dataset_file=dataset_file, - dataset_group=config[section]['dataset_group'], - reference_file=reference_file, - reference_group=config[section]['reference_group']) - - report_test(color_handler=args.color_handler, section=section, result=result, reason=reason) - - return cont( - status=status if result is None else Failure, - **state - ) - - yield find_dataset_action - yield validate_dataset_action - - -def prepare_sequence_actions(args, config): - action_factories = { - 'copy': lambda section: copy_input_data(args, config, section), - 'siemens': lambda section: convert_siemens_data(args, config, section), - 'client': lambda section: run_gadgetron_client(args, config, section), - 'stream': lambda section: prepare_stream_configurations(args, config, section), - 'adapter': lambda section: stream_gadgetron_data(args, config, section), - 'equals': lambda section: validate_dataset_output(args, config, section), - 'test': lambda section: validate_client_output(args, config, section), - } - - pattern = re.compile(r"(?P\w+)\.(?P(copy)|(siemens)|(client)|(stream)|(adapter)|(equals)|(test))(\.\w+)*") - - def prepare_sequence_action(section): - m = re.match(pattern, section) - return action_factories.get(m['action_key'])(section) - - for section in config.sections(): - if re.match(pattern, section): - yield from prepare_sequence_action(section) - - -def output_stats(args, config): - def output_stats_action(cont, **state): - stats = { - 'test': state.get('name'), - 'processing_time': state.get('processing_time'), - 'status': None if "status" not in state else state.get('status')[0] - } - - with open(os.path.join(args.test_folder, 'stats.json'), 'w') as f: - json.dump(stats, f) - - return cont(**state) - - yield output_stats_action - - -def build_actions(args, config): - yield from error_handlers(args, config) - yield from clear_test_folder(args, config) - yield from ensure_storage_server(args, config) - yield from start_additional_nodes(args, config) - yield from ensure_gadgetron_instance(args, config) - yield from prepare_sequence_actions(args, config) - yield from output_stats(args, config) - yield from stdout_compliance(args, config) - - -def chain_actions(actions): - try: - action = next(actions) - return lambda **state: action(chain_actions(actions), **state) - except StopIteration: - return lambda **state: state.get('status') - - -def main(): - parser = argparse.ArgumentParser(description="Gadgetron Integration Test", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - parser.add_argument('-G', '--gadgetron-home', - default=os.environ.get('GADGETRON_HOME'), - help="Gadgetron installation home") - parser.add_argument('-I', '--ismrmrd-home', - default=os.environ.get('ISMRMRD_HOME'), - help="ISMRMRD installation home") - - parser.add_argument('-p', '--port', type=int, default=9003, help="Port used by Gadgetron") - parser.add_argument('-a', '--host', type=str, default="localhost", help="Address of (external) Gadgetron host") - parser.add_argument('-s', '--storage_port', type=int, default=9113, help="Port used by Gadgetron Storage Server") - - parser.add_argument('-e', '--external', action='store_true', default=False, - help="External, do not start Gadgetron") - - parser.add_argument('-c', '--template-folder', - type=str, default='config', - help="Look for test configuration templates in the specified folder") - parser.add_argument('-d', '--data-folder', - type=str, default='data', - help="Look for test data in the specified folder") - parser.add_argument('-t', '--test-folder', - type=str, default='test', - help="Save Gadgetron output and client logs to specified folder") - - parser.add_argument('--force', action='store_true', default=False, - help="Do not query Gadgetron capabilities; just run the test.") - - parser.add_argument('--disable-color', dest='color_handler', action='store_const', - const=_colors_disabled, default=_colors_enabled, - help="Disable colors in the test script output.") - - parser.add_argument('--echo-commands', dest='echo_handler', action='store_const', - const=lambda cmd: print(' '.join(cmd)), default=lambda *_: None, - help="Echo the commands issued while running the test.") - - parser.add_argument('test', help="Test case file", type=pathlib.Path) - - args = parser.parse_args() - - print("Running Gadgetron test {} with:".format(args.test)) - print(" -- ISMRMRD_HOME : {}".format(args.ismrmrd_home)) - print(" -- GADGETRON_HOME : {}".format(args.gadgetron_home)) - print(" -- TEST CASE : {}".format(args.test)) - - config_parser = configparser.ConfigParser() - config_parser.read_dict(default_config_values) - config_parser.read(args.test) - - action_chain = chain_actions(build_actions(args, config_parser)) - result, return_code = action_chain(test=args.test, name=args.test.stem) - - print("Test status: {}".format(args.color_handler(result, 'red' if return_code else 'green'))) - return return_code - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/test/integration/run_tests.py b/test/integration/run_tests.py deleted file mode 100755 index 3dc796d37..000000000 --- a/test/integration/run_tests.py +++ /dev/null @@ -1,341 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -import glob - -import re -import csv -import json -import itertools -import subprocess - -import argparse -import configparser - -from pathlib import Path - - -reqs = { - 'python_support': 'python', - 'julia_support': 'julia', - 'matlab_support': 'matlab', - 'system_memory': 'memory', - 'gpu_support': 'cuda', - 'gpu_memory': 'cuda' -} - -_codes = { - 'red': '\033[91m', - 'green': '\033[92m', - 'cyan': '\033[96m', - 'bold': '\033[1m', - 'end': '\033[0m', -} - - -def _colors_disabled(text, color): - return text - - -def _colors_enabled(text, color): - return "{begin}{text}{end}".format( - begin=_codes.get(color), - text=text, - end=_codes.get('end'), - ) - - -def split_tag_list(arg): - return re.split(r"[,;]", arg) if arg else [] - - -def output_csv(stats, filename): - print("Writing stats to: {}".format(filename)) - - with open(filename, 'w') as f: - writer = csv.DictWriter(f, ['test', 'processing_time', 'status']) - writer.writeheader() - writer.writerows(stats) - - -def output_log_file(filename): - print("\nWriting logfile {} to stdout:".format(filename)) - - with open(filename, 'r') as f: - print(f.read()) - - -def query_capabilities_from_executable(): - - command = ["gadgetron", "--info"] - - return subprocess.check_output(command, stderr=subprocess.STDOUT, universal_newlines=True) - - -def query_capabilities_from_instance(host, port): - - command = ["gadgetron_ismrmrd_client", - "-a", host, - "-p", str(port), - "-q", "-Q", "gadgetron::info"] - - return subprocess.check_output(command, stderr=subprocess.STDOUT, universal_newlines=True) - - -def ignore_gadgetron_capabilities(args): - return {} - - -def query_gadgetron_capabilities(args): - print("Querying Gadgetron capabilities...") - - info_string = query_capabilities_from_instance(args.host, args.port) if args.external else \ - query_capabilities_from_executable() - - value_pattern = r"(?:\s*):(?:\s+)(?P.*)?" - - capability_markers = { - 'version': "Version", - 'build': "Git SHA1", - 'memory': "System Memory size", - 'python': "Python Support", - 'julia': "Julia Support", - 'matlab': "Matlab Support", - 'cuda': "CUDA Support", - } - - plural_capability_markers = { - 'cuda_memory': "Total amount of global GPU memory" - } - - def find_value(marker): - pattern = re.compile(marker + value_pattern, re.IGNORECASE) - match = pattern.search(info_string) - - if not match: - raise RuntimeError("Failed to parse Gadgetron info string; Gadgetron capabilities could not be determined.") - - return match['value'] - - def find_plural_values(marker): - pattern = re.compile(marker + value_pattern, re.IGNORECASE) - return [match['value'] for match in pattern.finditer(info_string)] - - capabilities = {key: find_value(marker) for key, marker in capability_markers.items()} - capabilities.update({key: find_plural_values(marker) for key, marker in plural_capability_markers.items()}) - - return capabilities - - -def read_test_details(filename): - config = configparser.ConfigParser() - config.read(filename) - - def tags_from_tags(section): - return split_tag_list(section['tags']) - - def tags_from_reqs(section): - return [tag for key, tag in reqs.items() if key in section] - - class Rule: - def __init__(self, capability, validator, message): - self.capability = capability - self.validator = validator - self.message = message - - def is_satisfied(self, capabilities): - value = capabilities.get(self.capability) - return self.validator(value) - - def rules_from_reqs(section): - - def parse_memory(string): - pattern = re.compile(r"(?P\d+)(?: MB)?") - match = pattern.search(string) - return float(match['value']) - - def is_enabled(value): - return value in ['YES', 'yes', 'True', 'true', '1'] - - def has_more_than(target): - return lambda value: parse_memory(target) <= parse_memory(value) - - def each(validator): - return lambda values: all([validator(value) for value in values]) - - rules = [ - ('matlab_support', lambda req: Rule('matlab', is_enabled, "MATLAB support required.")), - ('python_support', lambda req: Rule('python', is_enabled, "Python support required.")), - ('julia_support', lambda req: Rule('julia', is_enabled, "Julia support required.")), - ('system_memory', lambda req: Rule('memory', has_more_than(req), "Not enough system memory.")), - ('gpu_support', lambda req: Rule('cuda', is_enabled, "CUDA support required.")), - ('gpu_memory', lambda req: Rule('cuda_memory', each(has_more_than(req)), "Not enough graphics memory.")) - ] - - return [(key, rule(section[key])) for key, rule in rules if key in section] - - return { - 'file': filename, - 'tags': set(tags_from_tags(config['tags']) + - tags_from_reqs(config['requirements']) + - ['all']), - 'reqs': rules_from_reqs(config['requirements']) - } - - -def should_skip_test(test, capabilities, args, skip_handler): - - def key_is_ignored(key): - return reqs.get(key, None) in args.ignore_requirements - - if 'all' not in args.ignore_requirements: - for rule in [rule for key, rule in test.get('reqs') if not key_is_ignored(key)]: - if not rule.is_satisfied(capabilities): - skip_handler(test, rule.message) - return True - - if not any([tag in test.get('tags') for tag in args.only]): - skip_handler(test, "Test missing required tag.") - return True - - if any([tag in test.get('tags') for tag in args.exclude]): - skip_handler(test, "Test matched excluded tag.") - return True - - return False - - -def main(): - - script_dir = Path(sys.argv[0]).parent - subscript = script_dir / "run_gadgetron_test.py" - - stats = [] - passed = [] - failed = [] - skipped = [] - - def skip_handler(test, message): - skipped.append((test, message)) - - def pass_handler(test): - passed.append(test) - with open(os.path.join(args.test_folder, 'stats.json')) as f: - stats.append(json.loads(f.read())) - - def ignore_failure(test): - args.echo_log() - failed.append(test) - with open(os.path.join(args.test_folder, 'stats.json')) as f: - stats.append(json.loads(f.read())) - - def exit_on_failure(_): - args.echo_log() - sys.exit(1) - - def echo_log(): - for log in glob.glob(os.path.join(args.test_folder, '*.log')): - output_log_file(log) - - def do_not_echo_log(): - pass - - parser = argparse.ArgumentParser(description="Gadgetron Integration Test", - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - parser.add_argument('-p', '--port', type=int, default=9003, help="Port of Gadgetron instance") - parser.add_argument('-a', '--host', type=str, default="localhost", help="Address of (external) Gadgetron host") - - parser.add_argument('-e', '--external', action='store_const', const=['-e'], default=[], - help="Use external Gadgetron; don't start a new instance each test.") - - parser.add_argument('-d', '--data-folder', - type=str, default='data', - help="Look for test data in the specified folder") - parser.add_argument('-t', '--test-folder', - type=str, default='test', - help="Save Gadgetron and Client output and logs to specified folder") - - parser.add_argument('-F', '--ignore-failures', dest='failure_handler', - action='store_const', const=ignore_failure, default=exit_on_failure, - help="Ignore a failing cases; keep running tests.") - parser.add_argument('-s', '--stats', type=str, default=None, - help="Output individual test stats to CSV file.") - - parser.add_argument('--timeout', type=int, default=None, - help="Fail test if it's been running for more than timeout seconds.") - - parser.add_argument('--echo-log-on-failure', dest='echo_log', - action='store_const', const=echo_log, default=do_not_echo_log, - help="Send test logs to stdout on a failed test.") - - parser.add_argument('--disable-color', dest='color_handler', action='store_const', - const=_colors_disabled, default=_colors_enabled, - help="Disable colors in the test script output.") - - parser.add_argument('--ignore-requirements', type=split_tag_list, default='none', metavar='tags', - help="Run tests with the specified tags regardless of Gadgetron capabilities.") - parser.add_argument('--disable-capability-query', action='store_const', dest='capability_query_function', - const=ignore_gadgetron_capabilities, - default=query_gadgetron_capabilities, - help="Disable querying Gadgetron capabilities. Few tests will run unless you force them.") - - parser.add_argument('--only', type=split_tag_list, default='all', metavar='tags', - help="Only run tests with the specified tags.") - parser.add_argument('--exclude', type=split_tag_list, default='none', metavar='tags', - help="Do not run tests with the specified tags.") - - parser.add_argument('tests', type=str, nargs='+', help="Glob patterns; tests to run.") - - args = parser.parse_args() - - capabilities = args.capability_query_function(args) - - files = sorted(set(itertools.chain(*[glob.glob(pattern) for pattern in args.tests]))) - tests = [read_test_details(file) for file in files] - tests = [test for test in tests if not should_skip_test(test, capabilities, args, skip_handler)] - - handlers = {0: pass_handler} - - if skipped: - print("\nSkipped tests:") - for test, message in skipped: - print("\t{} ({})".format(test.get('file'), message)) - - for i, test in enumerate(tests, start=1): - print(args.color_handler("\nTest {} of {}: {}\n".format(i, len(tests), test.get('file')), 'bold')) - - disable_color = ['--disable-colors'] if args.color_handler == _colors_disabled else [] - - command = [sys.executable, str(subscript), - '-a', str(args.host), - '-d', str(args.data_folder), - '-t', str(args.test_folder), - '-p', str(args.port)] + args.external + disable_color + [test.get('file')] - - with subprocess.Popen(command) as proc: - try: - proc.wait(timeout=args.timeout) - handlers.get(proc.returncode, args.failure_handler)(test) - except subprocess.TimeoutExpired: - print("Timeout happened during test: {}".format(test.get('file'))) - proc.kill() - args.failure_handler(test) - - if args.stats: - output_csv(stats, args.stats) - - if failed: - print("\nFailed tests:") - for test in failed: - print("\t{}".format(test.get('file'))) - - print("\n{} tests passed. {} tests failed. {} tests skipped.".format(len(passed), len(failed), len(skipped))) - print("Total processing time: {:.2f} seconds.".format(sum(stat['processing_time'] for stat in stats))) - - sys.exit(bool(failed)) - - -if __name__ == '__main__': - main() diff --git a/test/integration/stats_to_junit.py b/test/integration/stats_to_junit.py deleted file mode 100644 index 939edf5f0..000000000 --- a/test/integration/stats_to_junit.py +++ /dev/null @@ -1,30 +0,0 @@ -import argparse -import pathlib -import csv -from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Failure - -def convert_csv_to_junit(csv_filename, junit_filename): - suite = TestSuite('Gadgetron Integration') - with open(csv_filename) as csv_file: - statsreader = csv.DictReader(csv_file) - for row in statsreader: - case = TestCase(name=row['test'],time=row['processing_time']) - if row['status'] != "Passed": - case.result = [Failure()] - suite.add_testcase(case) - xml = JUnitXml() - xml.add_testsuite(suite) - xml.write(str(junit_filename)) - - -def main(): - parser = argparse.ArgumentParser(description='Converts Gadgetron stats to jUNIT xml', - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('-i', '--input', type=pathlib.Path, help='Input CSV file') - parser.add_argument('-o', '--output', type=pathlib.Path, help='Output junit xml') - args = parser.parse_args() - - convert_csv_to_junit(args.input,args.output) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/test/integration/temp/README b/test/integration/temp/README deleted file mode 100644 index a951b83c5..000000000 --- a/test/integration/temp/README +++ /dev/null @@ -1 +0,0 @@ -TODO Joe: Remove this directory once finished upgrading to mrd2 \ No newline at end of file diff --git a/test/integration/temp/all_chains.txt b/test/integration/temp/all_chains.txt deleted file mode 100644 index 30bfca9cf..000000000 --- a/test/integration/temp/all_chains.txt +++ /dev/null @@ -1,127 +0,0 @@ -gadgets/bart/BART_Recon_cloud_Standard.xml -gadgets/bart/BART_Recon_cloud.xml -gadgets/bart/BART_Recon.xml -gadgets/cmr/config/BinningCine/CMR_2DT_RTCine_KspaceBinning_Cloud.xml -gadgets/cmr/config/BinningCine/CMR_2DT_RTCine_KspaceBinning_MultiSeries_Cloud.xml -gadgets/cmr/config/BinningCine/CMR_2DT_RTCine_KspaceBinning_MultiSeries.xml -gadgets/cmr/config/BinningCine/CMR_2DT_RTCine_KspaceBinning.xml -gadgets/cmr/config/LandmarkDetection/CMR_Image_Chain_RTCine_LAX_AI.xml -gadgets/cmr/config/LandmarkDetection/CMR_RTCine_LAX_AI.xml -gadgets/cmr/config/LandmarkDetection/CMR_RTCine_Recon.xml -gadgets/cmr/config/LandmarkDetection/stream_image_array.xml -gadgets/cmr/config/Mapping/CMR_2DT_T1Mapping_SASHA.xml -gadgets/dicom/dicom.xml -gadgets/epi/epi_gtplus_grappa.xml -gadgets/epi/epi.xml -gadgets/examples/config/external_connect_example.xml -gadgets/examples/config/external_equivalent_example.xml -gadgets/examples/config/external_example.xml -gadgets/examples/config/external_julia_acquisition_example.xml -gadgets/examples/config/external_matlab_acquisition_example.xml -gadgets/examples/config/external_matlab_bucket_example.xml -gadgets/examples/config/external_matlab_buffer_example.xml -gadgets/examples/config/external_matlab_tiny_example.xml -gadgets/examples/config/external_python_acquisition_example.xml -gadgets/examples/config/external_python_bucket_example.xml -gadgets/examples/config/external_python_buffer_example.xml -gadgets/examples/config/parallel_bypass_example.xml -gadgets/examples/config/stream_complex_to_float.xml -gadgets/examples/config/stream_float_to_short.xml -gadgets/examples/config/stream_image_array_scaling.xml -gadgets/examples/config/stream_image_array_split.xml -gadgets/fatwater/config/Generic_Cartesian_Grappa_FatWater.xml -gadgets/grappa/config/grappa_cpu.xml -gadgets/grappa/config/grappa_float_cpu.xml -gadgets/grappa/config/grappa_float.xml -gadgets/grappa/config/grappa_unoptimized_float.xml -gadgets/grappa/config/grappa_unoptimized.xml -gadgets/grappa/config/grappa.xml -gadgets/hyper/CMRT3D.xml -gadgets/hyper/CMRT.xml -gadgets/hyper/FS-CSI.xml -gadgets/hyper/NFFT2D.xml -gadgets/interventional_mri/grappa_device_cpu.xml -gadgets/interventional_mri/grappa_device.xml -gadgets/moco/config/cpureg_cartesian_averaging.xml -gadgets/moco/config/gpureg_cartesian_averaging.xml -gadgets/mri_core/config/default_measurement_dependencies_ismrmrd_storage.xml -gadgets/mri_core/config/default_measurement_dependencies.xml -gadgets/mri_core/config/default_optimized.xml -gadgets/mri_core/config/default_short.xml -gadgets/mri_core/config/default.xml -gadgets/mri_core/config/gtquery.xml -gadgets/mri_core/config/isalive.xml -gadgets/mri_core/config/ismrmrd_dump.xml -gadgets/mri_core/config/NoiseSummary.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_FFT.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_AI.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_Cine_Denoise.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_Complex.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_EPI_AVE.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_EPI.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_ImageArray.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_RealTimeCine_Cloud.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_RealTimeCine.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_SNR.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_T2W.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Image_Chain_FFT.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_NonLinear_Spirit_RealTimeCine_Cloud.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_NonLinear_Spirit_RealTimeCine.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_RandomSampling_NonLinear_Spirit_RealTimeCine_Cloud.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_RandomSampling_NonLinear_Spirit_RealTimeCine.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Spirit_RealTimeCine.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Spirit_SASHA.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Spirit.xml -gadgets/mri_noncartesian/config/Generic_CPU_Gridding_Recon.xml -gadgets/mri_noncartesian/config/Generic_Spiral_Flag.xml -gadgets/mri_noncartesian/config/Generic_Spiral_SNR.xml -gadgets/mri_noncartesian/config/Generic_Spiral.xml -gadgets/plplot/Generic_Cartesian_Grappa_SNR_CoilQA.xml -gadgets/pmri/config/generic_gpu_ktsense_singleshot.xml -gadgets/pmri/config/generic_gpusense_cg_singleshot.xml -gadgets/pmri/config/generic_gpusense_cg.xml -gadgets/pmri/config/generic_gpusense_nlcg_singleshot.xml -gadgets/pmri/config/generic_gpusense_sb_singleshot.xml -gadgets/python/config/Generic_Cartesian_Grappa_RealTimeCine_Python.xml -gadgets/python/legacy/config/pseudoreplica.xml -gadgets/python/legacy/config/python_buckets.xml -gadgets/python/legacy/config/python_image_array_recon.xml -gadgets/python/legacy/config/python_passthrough.xml -gadgets/python/legacy/config/python_short.xml -gadgets/radial/config/fixed_radial_mode0_gpu_ktsense.xml -gadgets/radial/config/fixed_radial_mode0_gpusense_cg_unoptimized.xml -gadgets/radial/config/fixed_radial_mode0_gpusense_cg.xml -gadgets/radial/config/fixed_radial_mode0_gpusense_sb_unoptimized.xml -gadgets/radial/config/fixed_radial_mode0_gpusense_sb.xml -gadgets/radial/config/fixed_radial_mode0_realtime.xml -gadgets/radial/config/fixed_radial_mode1_gpu_ktsense.xml -gadgets/radial/config/fixed_radial_mode1_gpusense_cg_unoptimized.xml -gadgets/radial/config/fixed_radial_mode1_gpusense_cg.xml -gadgets/radial/config/fixed_radial_mode1_gpusense_sb_unoptimized.xml -gadgets/radial/config/fixed_radial_mode1_gpusense_sb.xml -gadgets/radial/config/fixed_radial_mode1_realtime.xml -gadgets/radial/config/golden_radial_mode2_gpu_ktsense.xml -gadgets/radial/config/golden_radial_mode2_gpusense_cg_unoptimized.xml -gadgets/radial/config/golden_radial_mode2_gpusense_cg.xml -gadgets/radial/config/golden_radial_mode2_gpusense_nlcg_unoptimized.xml -gadgets/radial/config/golden_radial_mode2_gpusense_nlcg.xml -gadgets/radial/config/golden_radial_mode2_gpusense_sb_unoptimized.xml -gadgets/radial/config/golden_radial_mode2_gpusense_sb.xml -gadgets/radial/config/golden_radial_mode2_os_realtime.xml -gadgets/radial/config/golden_radial_mode2_realtime.xml -gadgets/radial/config/golden_radial_mode3_gpusense_cg.xml -gadgets/radial/config/golden_radial_mode3_gpusense_sb.xml -gadgets/radial/config/golden_radial_mode3_os_realtime.xml -gadgets/radial/config/spirit.xml -gadgets/spiral/config/deblurring_recon_acctrig.xml -gadgets/spiral/config/spiral_flow_generic_gpusense_cg.xml -gadgets/spiral/config/spiral_flow_generic_gpusense_sb.xml -gadgets/spiral/config/spiral_flow_gpusense_cg_ecg.xml -gadgets/spiral/config/spiral_flow_gpusense_cg_unoptimized.xml -gadgets/spiral/config/spiral_flow_gpusense_cg.xml -gadgets/spiral/config/spiral_flow_gpusense_sb_unoptimized.xml -gadgets/spiral/config/spiral_flow_gpusense_sb.xml -gadgets/spiral/config/spiral_interactive.xml -gadgets/T1/config/MOLLI_T1_Moco_istore.xml -gadgets/T1/config/MOLLI_T1_Moco.xml diff --git a/test/integration/temp/check_untested_chains.py b/test/integration/temp/check_untested_chains.py deleted file mode 100644 index f570042fb..000000000 --- a/test/integration/temp/check_untested_chains.py +++ /dev/null @@ -1,37 +0,0 @@ -import os -import re -import subprocess - - -classnames_in_untested_chains = [] -with open("untested_chains.txt", "r") as f: - for line in f: - path = line.strip() - chain_name = os.path.basename(path) - - if subprocess.call(["grep", "-r", chain_name, "./cases/"]) == 0: - print(f"Found chain {chain_name} in test cases") - - path = os.path.join("..", "..", path) - with open(path, "r") as ch: - for line in ch: - if "" in line: - classname = line.strip() - match = re.search(r'(.+)', line) - if match: - classname = match.group(1) - classnames_in_untested_chains.append(classname) - -tested_classnames = [] -with open("tested_gadgets.txt", "r") as f: - for line in f: - tested_classnames.append(line.strip()) - - -definitely_untested_classnames = set(classnames_in_untested_chains) - set(tested_classnames) -definitely_untested_classnames = sorted(definitely_untested_classnames) - -print("Writing definitely untested gadgets to definitely_untested_gadgets.txt") -with open("definitely_untested_gadgets.txt", "w") as f: - for classname in definitely_untested_classnames: - f.write(classname + "\n") \ No newline at end of file diff --git a/test/integration/temp/definitely_untested_gadgets.txt b/test/integration/temp/definitely_untested_gadgets.txt deleted file mode 100644 index 1c3d0c7a4..000000000 --- a/test/integration/temp/definitely_untested_gadgets.txt +++ /dev/null @@ -1,44 +0,0 @@ -BartGadget -CMRT3DGadget -CMRTGadget -CPUGriddingReconGadget -CSIGadget -CartesianToGenericGadget -CollectGadget -DependencyQueryGadget -DependencyQueryWriter -DeviceChannelSplitterGadgetUSHORT -DicomFinishGadget -DicomImageWriter -FatWaterGadget -FlagTriggerGadget -GenericReconCartesianFFTGadget -GenericReconCartesianGrappaAIGadget -GrappaGadget -GrappaUnmixingGadget -GriddingReconGadget -GtPlusAccumulatorWorkOrderTriggerGadget -GtPlusRecon2DTGadget -ImageAccumulatorGadget -ImageFFTGadget -ImageFinishGadgetCPLX -ImageFinishGadgetFLOAT -IsmrmrdAcquisitionDistributeGadget -MRIImageWriterCPLX -MRIImageWriterFLOAT -MRIImageWriterUSHORT -NFFT2DGadget -NoiseAdjustGadget_unoptimized -NoiseCovariancePlottingGadget -NoiseSummaryGadget -PseudoReplicatorGadget -SpiralToGenericGadget -T1MocoGadget -cpuRegistrationAveragingGadget2D -gpuCSICoilEstimationGadget -gpuCgSpiritGadget -gpuGenericSensePrepGadget -gpuLALMSenseGadget -gpuNlcgSenseGadget -gpuRadialSpiritPrepGadget -gpuRegistrationAveragingGadget2D diff --git a/test/integration/temp/list_tested_gadgets.py b/test/integration/temp/list_tested_gadgets.py deleted file mode 100644 index ef884f637..000000000 --- a/test/integration/temp/list_tested_gadgets.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -import re -import glob -import subprocess - -grep = subprocess.check_output(["grep", "-r", "configuration=", "./cases"]) -grep = str(grep, "utf-8") -tested_chain_names = [] -for line in grep.splitlines(): - cfg, xml = line.split(":configuration=") - tested_chain_names.append(xml) - - -tested_chain_paths = [] -for chain in tested_chain_names: - path = subprocess.check_output(["find", "../..", "-name", chain]) - path = str(path, "utf-8").splitlines() - assert(len(path) == 1) - tested_chain_paths.append(path[0]) - -classnames = [] -for path in tested_chain_paths: - with open(path) as f: - for line in f: - match = re.search(r'(.+)', line) - if match: - classnames.append(match.group(1)) - -unique_chains = sorted(set(chain[6:] for chain in tested_chain_paths)) -unique_classnames = sorted(set(classnames)) - -chains_file = "tested_chains.txt" -print(f"Writing tested chains to {chains_file}") -with open("tested_chains.txt", "w") as f: - for chain in unique_chains: - f.write(chain + "\n") - -gadgets_file = "tested_gadgets.txt" -print(f"Writing tested Gadgets to {gadgets_file}") -with open("tested_gadgets.txt", "w") as f: - for classname in unique_classnames: - f.write(classname + "\n") \ No newline at end of file diff --git a/test/integration/temp/tested_chains.txt b/test/integration/temp/tested_chains.txt deleted file mode 100644 index db9562855..000000000 --- a/test/integration/temp/tested_chains.txt +++ /dev/null @@ -1,55 +0,0 @@ -core/config/distributed_default.xml -core/config/distributed_generic_default.xml -core/config/distributed_image_default.xml -gadgets/cmr/config/BinningCine/CMR_2DT_RTCine_KspaceBinning.xml -gadgets/cmr/config/BinningCine/CMR_2DT_RTCine_KspaceBinning_Cloud.xml -gadgets/cmr/config/LandmarkDetection/CMR_Image_Chain_RTCine_LAX_AI.xml -gadgets/cmr/config/LandmarkDetection/CMR_RTCine_LAX_AI.xml -gadgets/cmr/config/LandmarkDetection/CMR_RTCine_Recon.xml -gadgets/cmr/config/LandmarkDetection/stream_image_array.xml -gadgets/cmr/config/Mapping/CMR_2DT_T1Mapping_SASHA.xml -gadgets/epi/epi.xml -gadgets/examples/config/external_equivalent_example.xml -gadgets/examples/config/external_julia_acquisition_example.xml -gadgets/examples/config/external_matlab_acquisition_example.xml -gadgets/examples/config/external_matlab_bucket_example.xml -gadgets/examples/config/external_matlab_buffer_example.xml -gadgets/examples/config/external_matlab_tiny_example.xml -gadgets/examples/config/external_python_acquisition_example.xml -gadgets/examples/config/external_python_bucket_example.xml -gadgets/examples/config/external_python_buffer_example.xml -gadgets/examples/config/parallel_bypass_example.xml -gadgets/examples/config/stream_complex_to_float.xml -gadgets/examples/config/stream_float_to_short.xml -gadgets/examples/config/stream_image_array_scaling.xml -gadgets/examples/config/stream_image_array_split.xml -gadgets/grappa/config/grappa_float.xml -gadgets/grappa/config/grappa_float_cpu.xml -gadgets/mri_core/config/default.xml -gadgets/mri_core/config/default_measurement_dependencies.xml -gadgets/mri_core/config/default_optimized.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_Cine_Denoise.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_Complex.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_EPI_AVE.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_ImageArray.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_SNR.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_T2W.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_NonLinear_Spirit_RealTimeCine.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_RandomSampling_NonLinear_Spirit_RealTimeCine.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Spirit_SASHA.xml -gadgets/python/config/Generic_Cartesian_Grappa_RealTimeCine_Python.xml -gadgets/python/legacy/config/python_buckets.xml -gadgets/python/legacy/config/python_image_array_recon.xml -gadgets/python/legacy/config/python_passthrough.xml -gadgets/python/legacy/config/python_short.xml -gadgets/radial/config/fixed_radial_mode1_gpu_ktsense.xml -gadgets/radial/config/fixed_radial_mode1_gpusense_cg.xml -gadgets/radial/config/fixed_radial_mode1_realtime.xml -gadgets/radial/config/golden_radial_mode2_gpu_ktsense.xml -gadgets/radial/config/golden_radial_mode2_gpusense_cg.xml -gadgets/radial/config/golden_radial_mode2_gpusense_sb.xml -gadgets/radial/config/golden_radial_mode2_realtime.xml -gadgets/spiral/config/deblurring_recon_acctrig.xml -gadgets/spiral/config/spiral_flow_gpusense_cg.xml -gadgets/spiral/config/spiral_flow_gpusense_sb.xml diff --git a/test/integration/temp/tested_gadgets.txt b/test/integration/temp/tested_gadgets.txt deleted file mode 100644 index 6266da41e..000000000 --- a/test/integration/temp/tested_gadgets.txt +++ /dev/null @@ -1,74 +0,0 @@ -AcquisitionAccumulateTriggerGadget -AcquisitionDistributor -AcquisitionFanout -AcquisitionReader -AsymmetricEchoAdjustROGadget -AugmentImageMetadataGadget -AutoScaleGadget -BucketToBufferGadget -BufferDistributor -BufferReader -BufferWriter -CmrCartesianKSpaceBinningCineGadget -CmrParametricT1SRMappingGadget -CmrRealTimeLAXCineAIAnalysisGadget -CoilReductionGadget -CombineGadget -ComplexToFloatGadget -DenoiseGadget -EPICorrGadget -EPIReconXGadget -ExtractGadget -FFTGadget -FFTXGadget -FloatToUShortGadget -FlowPhaseSubtractionGadget -GadgetIsmrmrdAcquisitionMessageReader -GadgetIsmrmrdAcquisitionMessageWriter -GadgetIsmrmrdWaveformMessageReader -GenericReconCartesianGrappaGadget -GenericReconCartesianNonLinearSpirit2DTGadget -GenericReconCartesianReferencePrepGadget -GenericReconCartesianSpiritGadget -GenericReconEigenChannelGadget -GenericReconFieldOfViewAdjustmentGadget -GenericReconImageArrayScalingGadget -GenericReconKSpaceFilteringGadget -GenericReconNoiseStdMapComputingGadget -GenericReconPartialFourierHandlingFilterGadget -GenericReconPartialFourierHandlingPOCSGadget -ImageAccumulator -ImageArraySplitGadget -ImageFanout -ImageFinishGadget -ImageIndexGadget -ImageInverter -ImageLayerer -ImageSortGadget -ImageWriter -ImageWriterGadgetCPLX -ImageWriterGadgetFLOAT -IsmrmrdDumpGadget -IsmrmrdImageArrayReader -IsmrmrdImageArrayWriter -MRIImageReader -MRIImageWriter -MaxwellCorrectionGadget -NoiseAdjustGadget -OneEncodingGadget -PCACoilGadget -PhysioInterpolationGadget -PythonGadget -RemoveROOversamplingGadget -SimpleReconGadget -SliceAccumulator -Unmixing -WaveformReader -cpuWeightsCalculator -gpuCgKtSenseGadget -gpuCgSenseGadget -gpuRadialSensePrepGadget -gpuSbSenseGadget -gpuSpiralDeblurGadget -gpuSpiralSensePrepGadget -gpuWeightsCalculator diff --git a/test/integration/temp/untested_chains.txt b/test/integration/temp/untested_chains.txt deleted file mode 100644 index 168e01f4f..000000000 --- a/test/integration/temp/untested_chains.txt +++ /dev/null @@ -1,75 +0,0 @@ -gadgets/bart/BART_Recon_cloud_Standard.xml -gadgets/bart/BART_Recon_cloud.xml -gadgets/bart/BART_Recon.xml -gadgets/cmr/config/BinningCine/CMR_2DT_RTCine_KspaceBinning_MultiSeries_Cloud.xml -gadgets/cmr/config/BinningCine/CMR_2DT_RTCine_KspaceBinning_MultiSeries.xml -gadgets/dicom/dicom.xml -gadgets/epi/epi_gtplus_grappa.xml -gadgets/examples/config/external_connect_example.xml -gadgets/examples/config/external_example.xml -gadgets/fatwater/config/Generic_Cartesian_Grappa_FatWater.xml -gadgets/grappa/config/grappa_cpu.xml -gadgets/grappa/config/grappa_unoptimized_float.xml -gadgets/grappa/config/grappa_unoptimized.xml -gadgets/grappa/config/grappa.xml -gadgets/hyper/CMRT3D.xml -gadgets/hyper/CMRT.xml -gadgets/hyper/FS-CSI.xml -gadgets/hyper/NFFT2D.xml -gadgets/interventional_mri/grappa_device_cpu.xml -gadgets/interventional_mri/grappa_device.xml -gadgets/moco/config/cpureg_cartesian_averaging.xml -gadgets/moco/config/gpureg_cartesian_averaging.xml -gadgets/mri_core/config/default_measurement_dependencies_ismrmrd_storage.xml -gadgets/mri_core/config/default_short.xml -gadgets/mri_core/config/gtquery.xml -gadgets/mri_core/config/isalive.xml -gadgets/mri_core/config/ismrmrd_dump.xml -gadgets/mri_core/config/NoiseSummary.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_FFT.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_AI.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_EPI.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_RealTimeCine_Cloud.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Grappa_RealTimeCine.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Image_Chain_FFT.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_NonLinear_Spirit_RealTimeCine_Cloud.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_RandomSampling_NonLinear_Spirit_RealTimeCine_Cloud.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Spirit_RealTimeCine.xml -gadgets/mri_core/generic_recon_gadgets/config/Generic_Cartesian_Spirit.xml -gadgets/mri_noncartesian/config/Generic_CPU_Gridding_Recon.xml -gadgets/mri_noncartesian/config/Generic_Spiral_Flag.xml -gadgets/mri_noncartesian/config/Generic_Spiral_SNR.xml -gadgets/mri_noncartesian/config/Generic_Spiral.xml -gadgets/plplot/Generic_Cartesian_Grappa_SNR_CoilQA.xml -gadgets/pmri/config/generic_gpu_ktsense_singleshot.xml -gadgets/pmri/config/generic_gpusense_cg_singleshot.xml -gadgets/pmri/config/generic_gpusense_cg.xml -gadgets/pmri/config/generic_gpusense_nlcg_singleshot.xml -gadgets/pmri/config/generic_gpusense_sb_singleshot.xml -gadgets/python/legacy/config/pseudoreplica.xml -gadgets/radial/config/fixed_radial_mode0_gpu_ktsense.xml -gadgets/radial/config/fixed_radial_mode0_gpusense_cg_unoptimized.xml -gadgets/radial/config/fixed_radial_mode0_gpusense_cg.xml -gadgets/radial/config/fixed_radial_mode0_gpusense_sb_unoptimized.xml -gadgets/radial/config/fixed_radial_mode0_gpusense_sb.xml -gadgets/radial/config/fixed_radial_mode0_realtime.xml -gadgets/radial/config/fixed_radial_mode1_gpusense_cg_unoptimized.xml -gadgets/radial/config/fixed_radial_mode1_gpusense_sb_unoptimized.xml -gadgets/radial/config/fixed_radial_mode1_gpusense_sb.xml -gadgets/radial/config/golden_radial_mode2_gpusense_cg_unoptimized.xml -gadgets/radial/config/golden_radial_mode2_gpusense_nlcg_unoptimized.xml -gadgets/radial/config/golden_radial_mode2_gpusense_nlcg.xml -gadgets/radial/config/golden_radial_mode2_gpusense_sb_unoptimized.xml -gadgets/radial/config/golden_radial_mode2_os_realtime.xml -gadgets/radial/config/golden_radial_mode3_gpusense_cg.xml -gadgets/radial/config/golden_radial_mode3_gpusense_sb.xml -gadgets/radial/config/golden_radial_mode3_os_realtime.xml -gadgets/radial/config/spirit.xml -gadgets/spiral/config/spiral_flow_generic_gpusense_cg.xml -gadgets/spiral/config/spiral_flow_generic_gpusense_sb.xml -gadgets/spiral/config/spiral_flow_gpusense_cg_ecg.xml -gadgets/spiral/config/spiral_flow_gpusense_cg_unoptimized.xml -gadgets/spiral/config/spiral_flow_gpusense_sb_unoptimized.xml -gadgets/spiral/config/spiral_interactive.xml -gadgets/T1/config/MOLLI_T1_Moco_istore.xml -gadgets/T1/config/MOLLI_T1_Moco.xml