diff --git a/src/BenchmarkManager.py b/src/BenchmarkManager.py index 4cb31cea..817be578 100644 --- a/src/BenchmarkManager.py +++ b/src/BenchmarkManager.py @@ -144,7 +144,7 @@ def _create_store_dir(self, store_dir: str = None, tag: str = None) -> None: f"{datetime.today().strftime('%Y-%m-%d-%H-%M-%S')}" Path(self.store_dir).mkdir(parents=True, exist_ok=True) self._set_logger() - + def _resume_store_dir(self, store_dir) -> None: self.store_dir = store_dir self._set_logger() @@ -177,7 +177,7 @@ def orchestrate_benchmark(self, benchmark_config_manager: ConfigManager, app_mod self.interrupted_results_path = interrupted_results_path if interrupted_results_path and not store_dir: self._resume_store_dir(os.path.dirname(interrupted_results_path)) - else: + else: self._create_store_dir(store_dir, tag=benchmark_config_manager.get_config()["application"]["name"].lower()) benchmark_config_manager.save(self.store_dir) benchmark_config_manager.load_config(app_modules) @@ -196,7 +196,7 @@ def orchestrate_benchmark(self, benchmark_config_manager: ConfigManager, app_mod results = self._collect_all_results() self._save_as_json(results) - def run_benchmark(self, benchmark_backlog: list, repetitions: int): + def run_benchmark(self, benchmark_backlog: list, repetitions: int): # pylint: disable=R0915 """ Goes through the benchmark backlog, which contains all the benchmarks to execute. @@ -247,7 +247,8 @@ def run_benchmark(self, benchmark_backlog: list, repetitions: int): git_revision_number, git_uncommitted_changes, i, repetitions) self.application.metrics.set_module_config(backlog_item["config"]) - instruction, problem, preprocessing_time = preprocess(self.application, None, backlog_item["config"], + instruction, problem, preprocessing_time = preprocess(self.application, None, + backlog_item["config"], store_dir=path, rep_count=i, previous_job_info=job_info) self.application.metrics.set_preprocessing_time(preprocessing_time) @@ -331,13 +332,14 @@ def run_benchmark(self, benchmark_backlog: list, repetitions: int): rel_path = Path(self.store_dir).relative_to(os.getcwd()) except ValueError: rel_path = self.store_dir - logging.info(f"====== There are interrupted jobs. You may resume them by running QUARK with") + logging.info("====== There are interrupted jobs. You may resume them by running QUARK with") logging.info(f"====== --resume-dir={rel_path}") logging.info(80*"=") logging.info("") - def traverse_config(self, module: dict, input_data: any, path: str, rep_count: int, previous_job_info: dict = None) -> (any, BenchmarkRecord): + def traverse_config(self, module: dict, input_data: any, path: str, rep_count: int, previous_job_info: + dict = None) -> (any, BenchmarkRecord): """ Executes a benchmark by traversing down the initialized config recursively until it reaches the end. Then traverses up again. Once it reaches the root/application, a benchmark run is finished. @@ -357,14 +359,14 @@ def traverse_config(self, module: dict, input_data: any, path: str, rep_count: i # Only the value of the dict is needed (dict has only one key) module = module[next(iter(module))] module_instance: Core = module["instance"] - + submodule_job_info = None if previous_job_info and previous_job_info.get("submodule"): assert module['name'] == previous_job_info["submodule"]["module_name"], \ f"asyncronous job info given, but no information about module {module['name']} stored in it" #TODO!! if 'submodule' in previous_job_info and previous_job_info['submodule']: submodule_job_info = previous_job_info['submodule'] - + module_instance.metrics.set_module_config(module["config"]) instruction, module_instance.preprocessed_input, preprocessing_time\ = preprocess(module_instance, input_data, @@ -378,7 +380,6 @@ def traverse_config(self, module: dict, input_data: any, path: str, rep_count: i benchmark_record = self.benchmark_record_template.copy() postprocessing_time = 0.0 if instruction == Instruction.PROCEED: - # Check if end of the chain is reached if not module["submodule"]: # If we reach the end of the chain we create the benchmark record, fill it and then pass it up diff --git a/src/main.py b/src/main.py index 14b48327..de5445a4 100644 --- a/src/main.py +++ b/src/main.py @@ -198,7 +198,8 @@ def handle_benchmark_run(args: argparse.Namespace) -> None: logging.info("Selected config is:") config_manager.print() else: - interrupted_results_path = None if args.resume_dir is None else os.path.join(args.resume_dir, "results.json") + interrupted_results_path = None if args.resume_dir is None else os.path.join(args.resume_dir, + "results.json") benchmark_manager.orchestrate_benchmark(config_manager, app_modules, interrupted_results_path=interrupted_results_path) comm.Barrier() diff --git a/src/modules/applications/optimization/TSP/mappings/ISING.py b/src/modules/applications/optimization/TSP/mappings/ISING.py index 3f69bdbb..2fd6a663 100644 --- a/src/modules/applications/optimization/TSP/mappings/ISING.py +++ b/src/modules/applications/optimization/TSP/mappings/ISING.py @@ -346,17 +346,17 @@ def _map_qiskit(graph: nx.Graph, config: Config) -> (dict, float): return {"J": j_matrix, "t": t_matrix}, end_time_measurement(start) - def reverse_map(self, solution: dict) -> (dict, float): + def reverse_map(self, solution: any) -> (dict, float): """ Maps the solution back to the representation needed by the TSP class for validation/evaluation. - :param solution: dictionary containing the solution - :type solution: dict + :param solution: list or array containing the solution + :type solution: any :return: solution mapped accordingly, time it took to map it :rtype: tuple(dict, float) """ start = start_time_measurement() - if np.any(solution == "-1"): # ising model output from Braket QAOA + if -1 in solution: # ising model output from Braket QAOA solution = self._convert_ising_to_qubo(solution) elif self.config["mapping"] == "pyqubo" or self.config["mapping"] == "ocean": logging.debug("Flip bits in solutions to unify different mappings") diff --git a/src/modules/solvers/QAOA.py b/src/modules/solvers/QAOA.py index 15a5e9cd..d30ae5bf 100644 --- a/src/modules/solvers/QAOA.py +++ b/src/modules/solvers/QAOA.py @@ -151,6 +151,11 @@ def run(self, mapped_problem: any, device_wrapper: any, config: Config, **kwargs """ j = mapped_problem['J'] + if np.any(np.iscomplex(j)): + logging.warning("The problem matrix of the QAOA solver contains imaginary numbers." + "This may lead to an error later in the run.") + else: + j = np.real(j) # set up the problem n_qubits = j.shape[0] diff --git a/src/modules/solvers/QiskitQAOA.py b/src/modules/solvers/QiskitQAOA.py index 8fbd1774..fb8b1b35 100644 --- a/src/modules/solvers/QiskitQAOA.py +++ b/src/modules/solvers/QiskitQAOA.py @@ -91,7 +91,10 @@ def get_parameter_options(self) -> dict: }, "iterations": { # number measurements to make on circuit "values": [1, 5, 10, 20, 50, 75], - "description": "How many iterations do you need? Warning: When using the IBM Eagle Device you should only choose a lower number of iterations, since a high number would lead to a waiting time that could take up to mulitple days!" + "description": "How many iterations do you need? Warning: When using\ + the IBM Eagle Device you should only choose a lower number of\ + iterations, since a high number would lead to a waiting time that\ + could take up to mulitple days!" }, "depth": { "values": [2, 3, 4, 5, 10, 20], @@ -103,7 +106,9 @@ def get_parameter_options(self) -> dict: }, "optimizer": { "values": ["POWELL", "SPSA", "COBYLA"], - "description": "Which Qiskit solver should be used? Warning: When using the IBM Eagle Device you should not use the SPSA optimizer, since it is not suited for only one evaluation!" + "description": "Which Qiskit solver should be used? Warning: When\ + using the IBM Eagle Device you should not use the SPSA optimizer,\ + since it is not suited for only one evaluation!" } } @@ -115,7 +120,9 @@ def get_parameter_options(self) -> dict: }, "iterations": { # number measurements to make on circuit "values": [1, 5, 10, 20, 50, 75], - "description": "How many iterations do you need? Warning: When using the IBM Eagle Device you should only choose a lower number of iterations, since a high number would lead to a waiting time that could take up to mulitple days!" + "description": "How many iterations do you need? Warning: When using the IBM Eagle Device you\ + should only choose a lower number of iterations, since a high number would lead to a waiting \ + ime that could take up to mulitple days!" }, "depth": { "values": [2, 3, 4, 5, 10, 20], @@ -127,7 +134,8 @@ def get_parameter_options(self) -> dict: }, "optimizer": { "values": ["POWELL", "SPSA", "COBYLA"], - "description": "Which Qiskit solver should be used? Warning: When using the IBM Eagle Device you should not use the SPSA optimizer for a low number of iterations!" + "description": "Which Qiskit solver should be used? Warning: When using the IBM Eagle Device\ + you should not use the SPSA optimizer for a low number of iterations!" } } @@ -191,7 +199,8 @@ def run(self, mapped_problem: any, device_wrapper: any, config: Config, **kwargs if config["optimizer"] == "COBYLA": optimizer = COBYLA(maxiter=config["iterations"]) elif config["optimizer"] == "POWELL": - optimizer = POWELL(maxiter=config["iterations"], maxfev=config["iterations"] if device_wrapper.device == 'ibm_eagle' else None) + optimizer = POWELL(maxiter=config["iterations"], maxfev=config["iterations"] if + device_wrapper.device == 'ibm_eagle' else None) elif config["optimizer"] == "SPSA": optimizer = SPSA(maxiter=config["iterations"]) if config["method"] == "vqe":