Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bugfix issue #49 #117

Merged
merged 7 commits into from
Mar 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 10 additions & 9 deletions src/BenchmarkManager.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def _create_store_dir(self, store_dir: str = None, tag: str = None) -> None:
f"{datetime.today().strftime('%Y-%m-%d-%H-%M-%S')}"
Path(self.store_dir).mkdir(parents=True, exist_ok=True)
self._set_logger()

def _resume_store_dir(self, store_dir) -> None:
self.store_dir = store_dir
self._set_logger()
Expand Down Expand Up @@ -177,7 +177,7 @@ def orchestrate_benchmark(self, benchmark_config_manager: ConfigManager, app_mod
self.interrupted_results_path = interrupted_results_path
if interrupted_results_path and not store_dir:
self._resume_store_dir(os.path.dirname(interrupted_results_path))
else:
else:
self._create_store_dir(store_dir, tag=benchmark_config_manager.get_config()["application"]["name"].lower())
benchmark_config_manager.save(self.store_dir)
benchmark_config_manager.load_config(app_modules)
Expand All @@ -196,7 +196,7 @@ def orchestrate_benchmark(self, benchmark_config_manager: ConfigManager, app_mod
results = self._collect_all_results()
self._save_as_json(results)

def run_benchmark(self, benchmark_backlog: list, repetitions: int):
def run_benchmark(self, benchmark_backlog: list, repetitions: int): # pylint: disable=R0915
"""
Goes through the benchmark backlog, which contains all the benchmarks to execute.

Expand Down Expand Up @@ -247,7 +247,8 @@ def run_benchmark(self, benchmark_backlog: list, repetitions: int):
git_revision_number, git_uncommitted_changes,
i, repetitions)
self.application.metrics.set_module_config(backlog_item["config"])
instruction, problem, preprocessing_time = preprocess(self.application, None, backlog_item["config"],
instruction, problem, preprocessing_time = preprocess(self.application, None,
backlog_item["config"],
store_dir=path, rep_count=i,
previous_job_info=job_info)
self.application.metrics.set_preprocessing_time(preprocessing_time)
Expand Down Expand Up @@ -331,13 +332,14 @@ def run_benchmark(self, benchmark_backlog: list, repetitions: int):
rel_path = Path(self.store_dir).relative_to(os.getcwd())
except ValueError:
rel_path = self.store_dir
logging.info(f"====== There are interrupted jobs. You may resume them by running QUARK with")
logging.info("====== There are interrupted jobs. You may resume them by running QUARK with")
logging.info(f"====== --resume-dir={rel_path}")
logging.info(80*"=")
logging.info("")


def traverse_config(self, module: dict, input_data: any, path: str, rep_count: int, previous_job_info: dict = None) -> (any, BenchmarkRecord):
def traverse_config(self, module: dict, input_data: any, path: str, rep_count: int, previous_job_info:
dict = None) -> (any, BenchmarkRecord):
"""
Executes a benchmark by traversing down the initialized config recursively until it reaches the end. Then
traverses up again. Once it reaches the root/application, a benchmark run is finished.
Expand All @@ -357,14 +359,14 @@ def traverse_config(self, module: dict, input_data: any, path: str, rep_count: i
# Only the value of the dict is needed (dict has only one key)
module = module[next(iter(module))]
module_instance: Core = module["instance"]

submodule_job_info = None
if previous_job_info and previous_job_info.get("submodule"):
assert module['name'] == previous_job_info["submodule"]["module_name"], \
f"asyncronous job info given, but no information about module {module['name']} stored in it" #TODO!!
if 'submodule' in previous_job_info and previous_job_info['submodule']:
submodule_job_info = previous_job_info['submodule']

module_instance.metrics.set_module_config(module["config"])
instruction, module_instance.preprocessed_input, preprocessing_time\
= preprocess(module_instance, input_data,
Expand All @@ -378,7 +380,6 @@ def traverse_config(self, module: dict, input_data: any, path: str, rep_count: i
benchmark_record = self.benchmark_record_template.copy()
postprocessing_time = 0.0
if instruction == Instruction.PROCEED:

# Check if end of the chain is reached
if not module["submodule"]:
# If we reach the end of the chain we create the benchmark record, fill it and then pass it up
Expand Down
3 changes: 2 additions & 1 deletion src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,8 @@ def handle_benchmark_run(args: argparse.Namespace) -> None:
logging.info("Selected config is:")
config_manager.print()
else:
interrupted_results_path = None if args.resume_dir is None else os.path.join(args.resume_dir, "results.json")
interrupted_results_path = None if args.resume_dir is None else os.path.join(args.resume_dir,
"results.json")
benchmark_manager.orchestrate_benchmark(config_manager, app_modules,
interrupted_results_path=interrupted_results_path)
comm.Barrier()
Expand Down
8 changes: 4 additions & 4 deletions src/modules/applications/optimization/TSP/mappings/ISING.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,17 +346,17 @@ def _map_qiskit(graph: nx.Graph, config: Config) -> (dict, float):

return {"J": j_matrix, "t": t_matrix}, end_time_measurement(start)

def reverse_map(self, solution: dict) -> (dict, float):
def reverse_map(self, solution: any) -> (dict, float):
"""
Maps the solution back to the representation needed by the TSP class for validation/evaluation.

:param solution: dictionary containing the solution
:type solution: dict
:param solution: list or array containing the solution
:type solution: any
:return: solution mapped accordingly, time it took to map it
:rtype: tuple(dict, float)
"""
start = start_time_measurement()
if np.any(solution == "-1"): # ising model output from Braket QAOA
if -1 in solution: # ising model output from Braket QAOA
solution = self._convert_ising_to_qubo(solution)
elif self.config["mapping"] == "pyqubo" or self.config["mapping"] == "ocean":
logging.debug("Flip bits in solutions to unify different mappings")
Expand Down
5 changes: 5 additions & 0 deletions src/modules/solvers/QAOA.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,11 @@ def run(self, mapped_problem: any, device_wrapper: any, config: Config, **kwargs
"""

j = mapped_problem['J']
if np.any(np.iscomplex(j)):
logging.warning("The problem matrix of the QAOA solver contains imaginary numbers."
"This may lead to an error later in the run.")
else:
j = np.real(j)

# set up the problem
n_qubits = j.shape[0]
Expand Down
19 changes: 14 additions & 5 deletions src/modules/solvers/QiskitQAOA.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,10 @@ def get_parameter_options(self) -> dict:
},
"iterations": { # number measurements to make on circuit
"values": [1, 5, 10, 20, 50, 75],
"description": "How many iterations do you need? Warning: When using the IBM Eagle Device you should only choose a lower number of iterations, since a high number would lead to a waiting time that could take up to mulitple days!"
"description": "How many iterations do you need? Warning: When using\
the IBM Eagle Device you should only choose a lower number of\
iterations, since a high number would lead to a waiting time that\
could take up to mulitple days!"
},
"depth": {
"values": [2, 3, 4, 5, 10, 20],
Expand All @@ -103,7 +106,9 @@ def get_parameter_options(self) -> dict:
},
"optimizer": {
"values": ["POWELL", "SPSA", "COBYLA"],
"description": "Which Qiskit solver should be used? Warning: When using the IBM Eagle Device you should not use the SPSA optimizer, since it is not suited for only one evaluation!"
"description": "Which Qiskit solver should be used? Warning: When\
using the IBM Eagle Device you should not use the SPSA optimizer,\
since it is not suited for only one evaluation!"
}
}

Expand All @@ -115,7 +120,9 @@ def get_parameter_options(self) -> dict:
},
"iterations": { # number measurements to make on circuit
"values": [1, 5, 10, 20, 50, 75],
"description": "How many iterations do you need? Warning: When using the IBM Eagle Device you should only choose a lower number of iterations, since a high number would lead to a waiting time that could take up to mulitple days!"
"description": "How many iterations do you need? Warning: When using the IBM Eagle Device you\
should only choose a lower number of iterations, since a high number would lead to a waiting \
ime that could take up to mulitple days!"
},
"depth": {
"values": [2, 3, 4, 5, 10, 20],
Expand All @@ -127,7 +134,8 @@ def get_parameter_options(self) -> dict:
},
"optimizer": {
"values": ["POWELL", "SPSA", "COBYLA"],
"description": "Which Qiskit solver should be used? Warning: When using the IBM Eagle Device you should not use the SPSA optimizer for a low number of iterations!"
"description": "Which Qiskit solver should be used? Warning: When using the IBM Eagle Device\
you should not use the SPSA optimizer for a low number of iterations!"
}
}

Expand Down Expand Up @@ -191,7 +199,8 @@ def run(self, mapped_problem: any, device_wrapper: any, config: Config, **kwargs
if config["optimizer"] == "COBYLA":
optimizer = COBYLA(maxiter=config["iterations"])
elif config["optimizer"] == "POWELL":
optimizer = POWELL(maxiter=config["iterations"], maxfev=config["iterations"] if device_wrapper.device == 'ibm_eagle' else None)
optimizer = POWELL(maxiter=config["iterations"], maxfev=config["iterations"] if
device_wrapper.device == 'ibm_eagle' else None)
elif config["optimizer"] == "SPSA":
optimizer = SPSA(maxiter=config["iterations"])
if config["method"] == "vqe":
Expand Down
Loading