From ea51ba4c8d99b432addd0eb3bac729cf1c56abba Mon Sep 17 00:00:00 2001 From: gonfeco Date: Thu, 7 Sep 2023 15:25:46 +0200 Subject: [PATCH] Updated BTC_04_PH --- tnbs/BTC_04_PH/PH/btc_ph.py | 50 +++- tnbs/BTC_04_PH/PH/notebooks/01_Ansatzes.ipynb | 95 +++++++- .../PH/notebooks/03_ParentHamiltonian.ipynb | 203 ++++++++++++++++ tnbs/BTC_04_PH/my_benchmark_execution.py | 217 ++++++++++++++---- tnbs/BTC_04_PH/my_benchmark_info.py | 80 ++++++- tnbs/BTC_04_PH/my_benchmark_summary.py | 82 ++++--- tnbs/BTC_04_PH/my_environment_info.py | 8 +- tnbs/BTC_04_PH/neasqc_benchmark.py | 9 +- 8 files changed, 648 insertions(+), 96 deletions(-) create mode 100644 tnbs/BTC_04_PH/PH/notebooks/03_ParentHamiltonian.ipynb diff --git a/tnbs/BTC_04_PH/PH/btc_ph.py b/tnbs/BTC_04_PH/PH/btc_ph.py index a279e90..3c84e50 100644 --- a/tnbs/BTC_04_PH/PH/btc_ph.py +++ b/tnbs/BTC_04_PH/PH/btc_ph.py @@ -7,6 +7,7 @@ import time import uuid import sys +import os import numpy as np import pandas as pd from qat.core import Observable, Term @@ -24,6 +25,35 @@ logger = logging.getLogger('__name__') +def create_folder(folder_name): + """ + Check if folder exist. If not the function creates it + + Parameters + ---------- + + folder_name : str + Name of the folder + + Returns + ---------- + + folder_name : str + Name of the folder + """ + + # Check if the folder already exists + if not os.path.exists(folder_name): + # If it does not exist, create the folder + os.mkdir(folder_name) + print(f"Folder '{folder_name}' created.") + else: + print(f"Folder '{folder_name}' already exists.") + if folder_name.endswith('/') != True: + folder_name = folder_name + "/" + return folder_name + + def get_qpu(qpu=None): """ Function for selecting solver. @@ -32,7 +62,8 @@ def get_qpu(qpu=None): ---------- qpu : str - * qlmass: for trying to use QLM as a Service connection to CESGA QLM + * qlmass: for trying to use QLM as a Service connection + to CESGA QLM * python: for using PyLinalg simulator. * c: for using CLinalg simulator * mps: for using mps @@ -141,13 +172,15 @@ def ph_btc(**kwargs): quantum_time = tock - tick_q elapsed_time = tock - tick text = ['gse', 'elapsed_time', 'quantum_time'] - res = pd.DataFrame( + pdf_info = pd.DataFrame(kwargs, index=[0]) + result = pd.DataFrame( [gse, elapsed_time, quantum_time], index=text ).T + result = pd.concat([pdf_info, result], axis =1 ) if save: - res.to_csv(folder + filename_base+'_result.csv', sep=';') - return res + result.to_csv(folder + filename_base+'_result.csv', sep=';') + return result if __name__ == "__main__": import argparse @@ -207,7 +240,10 @@ def ph_btc(**kwargs): args = parser.parse_args() print(args) dict_ph = vars(args) - dict_ph.update({"qpu_ansatz": get_qpu(dict_ph['qpu_ansatz'])}) - dict_ph.update({"qpu_ph": get_qpu(dict_ph['qpu_ph'])}) + dict_ph.update({"qpu_ansatz": get_qpu(dict_ph["qpu_ansatz"])}) + dict_ph.update({"qpu_ph": get_qpu(dict_ph["qpu_ph"])}) print(dict_ph) - ph_btc(**dict_ph) + if dict_ph["save"]: + dict_ph.update({"folder": create_folder(dict_ph["folder"])}) + result = ph_btc(**dict_ph) + print(result) diff --git a/tnbs/BTC_04_PH/PH/notebooks/01_Ansatzes.ipynb b/tnbs/BTC_04_PH/PH/notebooks/01_Ansatzes.ipynb index db911fc..1ab7315 100644 --- a/tnbs/BTC_04_PH/PH/notebooks/01_Ansatzes.ipynb +++ b/tnbs/BTC_04_PH/PH/notebooks/01_Ansatzes.ipynb @@ -433,6 +433,16 @@ "lda_circ = make_ldca_circ(nqubit, depth)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "acfdd011", + "metadata": {}, + "outputs": [], + "source": [ + "make_ldca_circ?" + ] + }, { "cell_type": "code", "execution_count": null, @@ -466,7 +476,7 @@ "metadata": {}, "outputs": [], "source": [ - "hwe_circ = make_general_hwe_circ(nqubit, n_cycles=3)" + "hwe_circ = make_general_hwe_circ(nqubit, n_cycles=1)" ] }, { @@ -494,6 +504,89 @@ "solv_hwe.run()\n", "solv_hwe.state" ] + }, + { + "cell_type": "markdown", + "id": "5ce2d533", + "metadata": {}, + "source": [ + "### 1.4 Ansatz selector\n", + "\n", + "In order to simplify the ansatz selection a function called **ansatz_selector** was built. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17a0fb89", + "metadata": {}, + "outputs": [], + "source": [ + "from PH.ansatzes import ansatz_selector" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8aa75dc0", + "metadata": {}, + "outputs": [], + "source": [ + "conf_dict = {\n", + " 'nqubits' : 8,\n", + " 'depth' : 3\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "37f06d87", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "circuit = ansatz_selector('simple01', **conf_dict)\n", + "%qatdisplay circuit --svg" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34780806", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "circuit = ansatz_selector('simple02', **conf_dict)\n", + "%qatdisplay circuit --svg" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "efbcae81", + "metadata": {}, + "outputs": [], + "source": [ + "circuit = ansatz_selector('lda', **conf_dict)\n", + "%qatdisplay circuit --svg" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fa897cb6", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "circuit = ansatz_selector('hwe', **conf_dict)\n", + "%qatdisplay circuit --svg" + ] } ], "metadata": { diff --git a/tnbs/BTC_04_PH/PH/notebooks/03_ParentHamiltonian.ipynb b/tnbs/BTC_04_PH/PH/notebooks/03_ParentHamiltonian.ipynb new file mode 100644 index 0000000..828e188 --- /dev/null +++ b/tnbs/BTC_04_PH/PH/notebooks/03_ParentHamiltonian.ipynb @@ -0,0 +1,203 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "65171bc2", + "metadata": {}, + "source": [ + "# Complete PH\n", + "\n", + "Here we document the code for executing a complete parent hamiltonian BTC." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "833398ed", + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "logging.basicConfig(\n", + " format='%(asctime)s-%(levelname)s: %(message)s',\n", + " datefmt='%m/%d/%Y %I:%M:%S %p',\n", + " level=logging.INFO\n", + " #level=logging.DEBUG\n", + ")\n", + "logger = logging.getLogger('__name__')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dc8da478", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd\n", + "import time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "721e3dc7", + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "sys.path.append(\"../../\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c96e5f5a", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# myQLM qpus\n", + "from qat.qpus import PyLinalg, CLinalg\n", + "qpu_c = CLinalg()\n", + "qpu_p = PyLinalg()\n", + "# QLM qpus\n", + "from qlmaas.qpus import LinAlg, MPS\n", + "qpu_qaass = LinAlg()\n", + "qpu_mps = MPS(lnnize =True)" + ] + }, + { + "cell_type": "markdown", + "id": "6c0538eb", + "metadata": {}, + "source": [ + "## Module PH.btc_ph\n", + "\n", + "In this module we built the function **ph_btc** that implements a complete workflow of a **BTC** of a the **PH kernel**. The input of this function is a complete dictionary that configures the complete case. The main key will be:\n", + "\n", + "* ansatz: for selecting the ansatz to use (restricted to simple01, simple02, lda or hwe).\n", + "* nqubits: number of qubits for the ansatz\n", + "* depth: ansatz depth\n", + "* qpu_ansatz: atos myqlm QPU that will be used for solving the ansatz\n", + "* qpu_ph: atos myqlm QPU that will be used for solving the complete PH problem\n", + "* nb_shots: shots\n", + "* save: for saving or not intermediate data (like parameters of the ansztz or the complete Pauli decomposition of the Hamiltoninan)\n", + "* folder: Folder for saving the intermediate files\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3d04c9f", + "metadata": {}, + "outputs": [], + "source": [ + "from PH.btc_ph import ph_btc, get_qpu, create_folder" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cd1308c0", + "metadata": {}, + "outputs": [], + "source": [ + "ansatz = 'simple02'\n", + "nqubits = 5\n", + "depth = 3\n", + "qpu_ansatz = \"mps\"\n", + "qpu_ph = \"c\"\n", + "\n", + "folder_name = \"ansatz_{}_nqubits_{}_depth_{}_qpu_ansatz_{}_qpu_ph_{}\".format(\n", + " ansatz, nqubits, depth, qpu_ansatz, qpu_ph)\n", + " \n", + "ph_configuration = {\n", + " 'ansatz': ansatz, \n", + " 'nqubits': nqubits,\n", + " 'depth': depth,\n", + " 'qpu_ansatz' : get_qpu(qpu_ansatz),\n", + " 'qpu_ph' : get_qpu(qpu_ph),\n", + " 'nb_shots' : 0,\n", + " 'save': True,\n", + " 'folder': create_folder(folder_name),\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "759dec17", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "result = ph_btc(**ph_configuration)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4dd97c68", + "metadata": {}, + "outputs": [], + "source": [ + "result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eeb4ff8b", + "metadata": {}, + "outputs": [], + "source": [ + "ansatz = 'hwe'\n", + "nqubits = 7\n", + "depth = 3\n", + "qpu_ansatz = \"mps\"\n", + "qpu_ph = \"qlmass\"\n", + "\n", + "folder_name = \"ansatz_{}_nqubits_{}_depth_{}_qpu_ansatz_{}_qpu_ph_{}\".format(\n", + " ansatz, nqubits, depth, qpu_ansatz, qpu_ph)\n", + " \n", + "ph_configuration = {\n", + " 'ansatz': ansatz, \n", + " 'nqubits': nqubits,\n", + " 'depth': depth,\n", + " 'qpu_ansatz' : get_qpu(qpu_ansatz),\n", + " 'qpu_ph' : get_qpu(qpu_ph),\n", + " 'nb_shots' : 0,\n", + " 'save': True,\n", + " 'folder': create_folder(folder_name),\n", + "}\n", + "result = ph_btc(**ph_configuration)\n", + "print(result)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tnbs/BTC_04_PH/my_benchmark_execution.py b/tnbs/BTC_04_PH/my_benchmark_execution.py index 22ec66f..0fcc0b5 100644 --- a/tnbs/BTC_04_PH/my_benchmark_execution.py +++ b/tnbs/BTC_04_PH/my_benchmark_execution.py @@ -6,19 +6,37 @@ import json import copy from datetime import datetime +from copy import deepcopy import pandas as pd -def run_code(n_qbits, repetitions, **kwargs): +def build_iterator(**kwargs): + """ + For building the iterator of the benchmark + """ + import itertools as it + + list4it = [ + kwargs["list_of_qbits"], + kwargs["kernel_configuration"]["depth"] + ] + + iterator = it.product(*list4it) + + return list(iterator) + +def run_code(iterator_step, repetitions, stage_bench, **kwargs): """ For configuration and execution of the benchmark kernel. Parameters ---------- - n_qbits : int - number of qubits used for domain discretization + iterator_step : tuple + tuple with elements from iterator built from build_iterator. repetitions : list - number of repetitions for the integral + number of repetitions for each execution + stage_bench : str + benchmark stage. Only: benchmark, pre-benchamrk kwargs : keyword arguments for configuration of the benchmark kernel @@ -27,22 +45,77 @@ def run_code(n_qbits, repetitions, **kwargs): metrics : pandas DataFrame DataFrame with the desired metrics obtained for the integral computation + save_name : string + Desired name for saving the results of the execution """ - if n_qbits is None: - raise ValueError("n_qbits CAN NOT BE None") + + from PH.btc_ph import ph_btc, get_qpu, create_folder + if stage_bench not in ["benchmark", "pre-benchmark"]: + raise ValueError( + "Valid values for stage_bench: benchmark or pre-benchmark") + if repetitions is None: raise ValueError("samples CAN NOT BE None") - #Here the code for configuring and execute the benchmark kernel - - metrics = pd.DataFrame() - return metrics + kernel_configuration = deepcopy(kwargs.get("kernel_configuration", None)) + if kernel_configuration is None: + raise ValueError("kernel_configuration can not be None") + # Configuring kernel + kernel_configuration.update({"nqubits": iterator_step[0]}) + kernel_configuration.update({"depth": iterator_step[1]}) + + fold_name = "ansatz_{}_nqubits_{}_depth_{}_qpu_ansatz_{}_qpu_ph_{}".format( + kernel_configuration["ansatz"], + kernel_configuration["nqubits"], + kernel_configuration["depth"], + kernel_configuration["qpu_ansatz"], + kernel_configuration["qpu_ph"]) + + folder_name = kwargs["saving_folder"] + fold_name + + # Load QPU for solving ansatz + kernel_configuration.update( + {"qpu_ansatz": get_qpu(kernel_configuration["qpu_ansatz"])}) + # Load QPU for solving parent hamiltonian + kernel_configuration.update( + {"qpu_ph": get_qpu(kernel_configuration["qpu_ph"])}) + # Update and create the folder for intermediate storing + kernel_configuration.update( + {"folder": create_folder(folder_name)}) + + + # following keys are not mandatory for executing kernel + del kernel_configuration["gse_error"] + del kernel_configuration["time_error"] + print(kernel_configuration) + + list_of_metrics = [] + #print(qpe_rz_dict) + for i in range(repetitions): + metric_step = ph_btc(**kernel_configuration) + list_of_metrics.append(metric_step) + + metrics = pd.concat(list_of_metrics) + metrics.reset_index(drop=True, inplace=True) + + if stage_bench == "pre-benchmark": + # Name for storing Pre-Benchmark results + save_name = "pre_benchmark_nq_{}_ansatz_{}_depth_{}.csv".format( + iterator_step[0], + kernel_configuration["ansatz"], + iterator_step[1] + ) + if stage_bench == "benchmark": + # Name for storing Benchmark results + save_name = kwargs.get("csv_results") + #save_name = "pre_benchmark_step_{}.csv".format(n_qbits) + return metrics, save_name def compute_samples(**kwargs): """ - This functions computes the number of executions of the benchmark - for assure an error r with a confidence of alpha + This function computes the number of executions of the benchmark + for ensuring an error r with a confidence level of alpha Parameters ---------- @@ -62,20 +135,44 @@ def compute_samples(**kwargs): #Desired Confidence level alpha = kwargs.get("alpha", 0.05) - + if alpha is None: + alpha = 0.05 + metrics = kwargs.get("pre_metrics") + bench_conf = kwargs.get("kernel_configuration") #Code for computing the number of samples for getting the desired #statististical significance. Depends on benchmark kernel - samples_ = pd.Series([100, 100]) - samples_.name = "samples" - #If user wants limit the number of samples + from scipy.stats import norm + zalpha = norm.ppf(1-(alpha/2)) # 95% of confidence level + # Error expected for the Groud State Energy + error_gse = bench_conf.get("gse_error", 0.01) + if error_gse is None: + error_gse = 0.01 + std_ = metrics[["gse"]].std() + samples_gse = (zalpha * std_ / error_gse) ** 2 + + # Relative Error for elpased time + time_error = bench_conf.get("time_error", 0.05) + if time_error is None: + time_error = 0.05 + mean_time = metrics[["elapsed_time"]].mean() + std_time = metrics[["elapsed_time"]].std() + samples_time = (zalpha * std_time / (time_error * mean_time)) ** 2 + + #Maximum number of sampls will be used + samples_ = pd.Series(pd.concat([samples_time, samples_gse]).max()) + + #Apply lower and higher limits to samples #Minimum and Maximum number of samples - min_meas = kwargs.get("min_meas", 5) + min_meas = kwargs.get("min_meas", None) + if min_meas is None: + min_meas = 5 max_meas = kwargs.get("max_meas", None) samples_.clip(upper=max_meas, lower=min_meas, inplace=True) - return list(samples_) + samples_ = samples_.max().astype(int) + return samples_ def summarize_results(**kwargs): """ @@ -83,13 +180,18 @@ def summarize_results(**kwargs): """ folder = kwargs.get("saving_folder") - csv_results = kwargs.get("csv_results") + csv_results = folder + kwargs.get("csv_results") #Code for summarize the benchamark results. Depending of the #kernel of the benchmark - - results = pd.DataFrame() - + pdf = pd.read_csv(csv_results, index_col=0, sep=";") + pdf["classic_time"] = pdf["elapsed_time"] - pdf["quantum_time"] + pdf.drop(["save", "folder"], axis=1, inplace=True) + # The angles are randomly selected. Not interesting for aggregation + columns = pdf.columns + columns = list(columns.drop( + ["gse", "elapsed_time", "quantum_time", "classic_time"])) + results = pdf.groupby(columns).agg(["mean", "std", "count"]) return results class KERNEL_BENCHMARK: @@ -111,7 +213,9 @@ def __init__(self, **kwargs): #Benchmark Configuration #Repetitions for pre benchmark step - self.pre_samples = self.kwargs.get("pre_samples", 10) + self.pre_samples = self.kwargs.get("pre_samples", None) + if self.pre_samples is None: + self.pre_samples = 10 #Saving pre benchmark step results self.pre_save = self.kwargs.get("pre_save", True) #For executing or not the benchmark step @@ -119,9 +223,18 @@ def __init__(self, **kwargs): #Name for saving the pre benchmark step results self.save_name = self.kwargs.get("save_name", None) - #NNumber of qbits + #Number of qbits self.list_of_qbits = self.kwargs.get("list_of_qbits", [4]) + save_type = self.kwargs.get("save_append", True) + if save_type: + self.save_type = "a" + else: + self.save_type = "w" + + #Create the iterator + self.iterator = build_iterator(**self.kwargs) + #Configure names for CSV files self.saving_folder = self.kwargs.get("saving_folder") self.benchmark_times = self.saving_folder + \ @@ -157,7 +270,7 @@ def save(self, save, save_name, input_pdf, save_mode): f_pointer, mode=save_mode, header=f_pointer.tell() == 0, - sep=';' + sep=";" ) def exe(self): @@ -165,18 +278,18 @@ def exe(self): Execute complete Benchmark WorkFlow """ start_time = datetime.now().astimezone().isoformat() - for n_qbits in self.list_of_qbits: - print("n_qbits: {}".format(n_qbits)) + for step_iterator in self.iterator: + # print("n_qbits: {}".format(n_qbits)) if self.pre_benchmark: print("\t Executing Pre-Benchmark") #Pre benchmark step - pre_metrics = run_code( - n_qbits, self.pre_samples, **self.kwargs + pre_metrics, pre_save_name = run_code( + step_iterator, self.pre_samples, "pre-benchmark", + **self.kwargs ) #For saving pre-benchmark step results - pre_save_name = self.saving_folder + \ - "pre_benchmark_step_{}.csv".format(n_qbits) + pre_save_name = self.saving_folder + pre_save_name self.save(self.pre_save, pre_save_name, pre_metrics, "w") #Using pre benchmark results for computing the number of #repetitions @@ -185,11 +298,13 @@ def exe(self): #Compute needed samples for desired #statistical significance samples_ = compute_samples(**self.kwargs) + print("\t Executing Benchmark Step") print("\t step samples: {}".format(samples_)) - metrics = run_code( - n_qbits, samples_, **self.kwargs + metrics, save_name = run_code( + step_iterator, samples_, "benchmark", **self.kwargs ) - self.save(self.save, self.csv_results, metrics, "a") + save_name = self.saving_folder + save_name + self.save(self.save, save_name, metrics, self.save_type) end_time = datetime.now().astimezone().isoformat() pdf_times = pd.DataFrame( [start_time, end_time], @@ -202,29 +317,49 @@ def exe(self): results.to_csv(self.summary_results) - if __name__ == "__main__": import os import shutil + ansatz = "simple02" + qpu_ansatz = "mps" + qpu_ph = "c" + depth = [2, 3] + + kernel_configuration = { + "ansatz": ansatz, + "depth": depth, + "qpu_ansatz" : qpu_ansatz, + "qpu_ph" : qpu_ph, + "nb_shots" : 0, + "save": True, + "folder": None, + "gse_error" : None, + "time_error": None + } + + benchmark_arguments = { #Pre benchmark sttuff - "pre_benchmark": False, - "pre_samples": [10, 10], + "pre_benchmark": True, + "pre_samples": None, "pre_save": True, #Saving stuff + "save_append" : True, "saving_folder": "./Results/", "benchmark_times": "kernel_times_benchmark.csv", "csv_results": "kernel_benchmark.csv", "summary_results": "kernel_SummaryResults.csv", #Computing Repetitions stuff - "alpha": 0.05, - "min_meas": 5, - "max_meas": 10, + "alpha": None, + "min_meas": None, + "max_meas": None, #List number of qubits tested - "list_of_qbits": [4],#, 6, 8], + "list_of_qbits": [4, 6], } + #Configuration for the benchmark kernel + benchmark_arguments.update({"kernel_configuration": kernel_configuration}) kernel_bench = KERNEL_BENCHMARK(**benchmark_arguments) kernel_bench.exe() diff --git a/tnbs/BTC_04_PH/my_benchmark_info.py b/tnbs/BTC_04_PH/my_benchmark_info.py index 65f83f5..5785462 100644 --- a/tnbs/BTC_04_PH/my_benchmark_info.py +++ b/tnbs/BTC_04_PH/my_benchmark_info.py @@ -3,49 +3,56 @@ report """ +import sys +import platform from collections import OrderedDict from my_benchmark_summary import summarize_results +import pandas as pd def my_benchmark_kernel(**kwargs): """ Name for the benchmark Kernel """ - return "AmplitudeEstimation" + return "ParentHamiltonian" def my_starttime(**kwargs): """ Providing the start time of the benchmark """ - start_time = "2022-12-12T16:46:57.268509+01:00" + times_filename = kwargs.get("times_filename", None) + pdf = pd.read_csv(times_filename, index_col=0) + start_time = pdf["StartTime"][0] return start_time def my_endtime(**kwargs): """ Providing the end time of the benchmark """ - end_time = "2022-12-12T16:46:57.268509+01:00" + times_filename = kwargs.get("times_filename", None) + pdf = pd.read_csv(times_filename, index_col=0) + end_time = pdf["EndTime"][0] return end_time def my_timemethod(**kwargs): """ Providing the method for getting the times """ - time_method = "None" + time_method = "time.time" return time_method def my_programlanguage(**kwargs): """ Getting the programing language used for benchmark """ - program_language = "None" + program_language = platform.python_implementation() return program_language def my_programlanguage_version(**kwargs): """ Getting the version of the programing language used for benchmark """ - language_version = "None" + language_version = platform.python_version() return language_version def my_programlanguage_vendor(**kwargs): @@ -59,10 +66,38 @@ def my_api(**kwargs): """ Collect the information about the used APIs """ - api = OrderedDict() - api["Name"] = "None" - api["Version"] = "None" - list_of_apis = [api] + # api = OrderedDict() + # api["Name"] = "None" + # api["Version"] = "None" + # list_of_apis = [api] + modules = [] + list_of_apis = [] + for module in list(sys.modules): + api = OrderedDict() + module = module.split('.')[0] + if module not in modules: + modules.append(module) + api["Name"] = module + try: + version = sys.modules[module].__version__ + except AttributeError: + #print("NO VERSION: "+str(sys.modules[module])) + try: + if isinstance(sys.modules[module].version, str): + version = sys.modules[module].version + #print("\t Attribute Version"+version) + else: + version = sys.modules[module].version() + #print("\t Methdod Version"+version) + except (AttributeError, TypeError) as error: + #print('\t NO VERSION: '+str(sys.modules[module])) + try: + version = sys.modules[module].VERSION + except AttributeError: + #print('\t\t NO VERSION: '+str(sys.modules[module])) + version = "Unknown" + api["Version"] = str(version) + list_of_apis.append(api) return list_of_apis def my_quantum_compilation(**kwargs): @@ -91,7 +126,24 @@ def my_metadata_info(**kwargs): """ metadata = OrderedDict() - metadata["None"] = None + benchmark_file = kwargs.get("benchmark_file", None) + index_columns = [0, 1, 2, 3, 4, 5] + pdf = pd.read_csv(benchmark_file, header=[0, 1], index_col=index_columns) + ansatz = list(set(pdf.index.get_level_values('ansatz'))) + if len(ansatz) != 1: + raise ValueError("Found more than 1 ansatzes") + metadata["AnsatzName"] = ansatz[0] + + qpu_ansatz = list(set(pdf.index.get_level_values('qpu_ansatz'))) + if len(qpu_ansatz) != 1: + raise ValueError("Found more than 1 qpu_ansatzes") + metadata["QPUforAnsatz"] = qpu_ansatz[0] + + qpu_ph = list(set(pdf.index.get_level_values('qpu_ph'))) + if len(qpu_ph) != 1: + raise ValueError("Found more than 1 qpu_phes") + metadata["QPUforPH"] = qpu_ph[0] + metadata["AnsatzDepths"] = list(set(pdf.index.get_level_values('depth'))) return metadata @@ -127,7 +179,11 @@ def my_benchmark_info(**kwargs): ################## Configuration ########################## - configuration = {"None": None} + folder = "Results/" + configuration = { + "times_filename" : folder + "kernel_times_benchmark.csv", + "benchmark_file" : folder + "kernel_SummaryResults.csv", + } ######## Execute Validations ##################################### diff --git a/tnbs/BTC_04_PH/my_benchmark_summary.py b/tnbs/BTC_04_PH/my_benchmark_summary.py index 8283317..9536b21 100644 --- a/tnbs/BTC_04_PH/my_benchmark_summary.py +++ b/tnbs/BTC_04_PH/my_benchmark_summary.py @@ -12,38 +12,60 @@ def summarize_results(**kwargs): the NEASQC jsonschema """ - n_qbits = [4] - #Info with the benchmark results like a csv or a DataFrame - pdf = None - #Metrics needed for reporting. Depend on the benchmark kernel - list_of_metrics = ["MRSE"] + # n_qbits = [4] + # #Info with the benchmark results like a csv or a DataFrame + # pdf = None + # #Metrics needed for reporting. Depend on the benchmark kernel + # list_of_metrics = ["MRSE"] + + import pandas as pd + benchmark_file = kwargs.get("benchmark_file", None) + index_columns = [0, 1, 2, 3, 4, 5] + pdf = pd.read_csv(benchmark_file, header=[0, 1], index_col=index_columns) + pdf.reset_index(inplace=True) + n_qbits = list(set(pdf["nqubits"])) + depth = list(set(pdf["depth"])) + list_of_metrics = ["gse"] results = [] #If several qbits are tested + # For ordering by n_qbits for n_ in n_qbits: - #Fields for benchmark test of a fixed number of qubits - result = OrderedDict() - result["NumberOfQubits"] = n_ - result["QubitPlacement"] = list(range(n_)) - result["QPUs"] = [1] - result["CPUs"] = psutil.Process().cpu_affinity() - result["TotalTime"] = 10.0 - result["SigmaTotalTime"] = 1.0 - result["QuantumTime"] = 9.0 - result["SigmaQuantumTime"] = 0.5 - result["ClassicalTime"] = 1.0 - result["SigmaClassicalTime"] = 0.1 - metrics = [] - #For each fixed number of qbits several metrics can be reported - for metric_name in list_of_metrics: - metric = OrderedDict() - #MANDATORY - metric["Metric"] = metric_name - metric["Value"] = 0.1 - metric["STD"] = 0.001 - metrics.append(metric) - result["Metrics"] = metrics - results.append(result) + for depth_ in depth: + # For ordering by auxiliar qbits + result = OrderedDict() + result["NumberOfQubits"] = n_ + result["QubitPlacement"] = list(range(n_)) + result["QPUs"] = [1] + result["CPUs"] = psutil.Process().cpu_affinity() + #Select the proper data + indice = (pdf["nqubits"] == n_) & (pdf["depth"] == depth_) + step_pdf = pdf[indice] + result["TotalTime"] = step_pdf["elapsed_time"]["mean"].iloc[0] + result["SigmaTotalTime"] = step_pdf["elapsed_time"]["std"].iloc[0] + result["QuantumTime"] = step_pdf["quantum_time"]["mean"].iloc[0] + result["SigmaQuantumTime"] = step_pdf["quantum_time"]["std"].iloc[0] + result["ClassicalTime"] = step_pdf["classic_time"]["mean"].iloc[0] + result["SigmaClassicalTime"] = step_pdf["classic_time"]["std"].iloc[0] + + # For identifying the test + # result['AnsatzName'] = step_pdf["ansatz"].iloc[0] + result['AnsatzDepth'] = depth_ + # result['QPUforAnsatz'] = step_pdf["qpu_ansatz"].iloc[0] + # result['QPUforPH'] = step_pdf["qpu_ph"].iloc[0] + result['Shots'] = int(step_pdf['nb_shots'].iloc[0]) + metrics = [] + #For each fixed number of qbits several metrics can be reported + for metric_name in list_of_metrics: + metric = OrderedDict() + #MANDATORY + metric["Metric"] = metric_name + metric["Value"] = step_pdf[metric_name]["mean"].iloc[0] + metric["STD"] = step_pdf[metric_name]["std"].iloc[0] + metric["COUNT"] = int(step_pdf[metric_name]["count"].iloc[0]) + metrics.append(metric) + result["Metrics"] = metrics + results.append(result) return results if __name__ == "__main__": @@ -57,7 +79,9 @@ def summarize_results(**kwargs): ################## Configuring the files ########################## - configuration = {"None" : None} + configuration = { + "benchmark_file" : "Results/kernel_SummaryResults.csv" + } ######## Execute Validations ##################################### diff --git a/tnbs/BTC_04_PH/my_environment_info.py b/tnbs/BTC_04_PH/my_environment_info.py index 45d4d7d..0804c55 100644 --- a/tnbs/BTC_04_PH/my_environment_info.py +++ b/tnbs/BTC_04_PH/my_environment_info.py @@ -11,14 +11,14 @@ def my_organisation(**kwargs): """ Given information about the organisation how uploads the benchmark """ - name = "None" + name = "CESGA" return name def my_machine_name(**kwargs): """ Name of the machine where the benchmark was performed """ - machine_name = "None" + machine_name = platform.node() return machine_name def my_qpu_model(**kwargs): @@ -91,7 +91,7 @@ def my_cpu_model(**kwargs): """ model of the cpu used in the benchmark """ - cpu_model = "None" + cpu_model = platform.processor() return cpu_model def my_frecuency(**kwargs): @@ -100,7 +100,7 @@ def my_frecuency(**kwargs): """ #Use the nominal frequency. Here, it collects the maximum frequency #print(psutil.cpu_freq()) - cpu_frec = 0 + cpu_frec = psutil.cpu_freq().max/1000 return cpu_frec def my_network(**kwargs): diff --git a/tnbs/BTC_04_PH/neasqc_benchmark.py b/tnbs/BTC_04_PH/neasqc_benchmark.py index f928f3a..7c05f94 100644 --- a/tnbs/BTC_04_PH/neasqc_benchmark.py +++ b/tnbs/BTC_04_PH/neasqc_benchmark.py @@ -158,6 +158,7 @@ def exe(self, info): self.set_qpu_cpu_connection(info["QPUCPUConnection"]) self.set_benchmark_info(info["Benchmarks"]) self.validate() + #print(self.report) self.save(info["json_file_name"]) def validate(self): @@ -198,7 +199,11 @@ def save(self, filename): ################## Configuration ########################## - kwargs = {"None": None} + folder = "Results/" + kwargs = { + "times_filename" : folder + "kernel_times_benchmark.csv", + "benchmark_file" : folder + "kernel_SummaryResults.csv", + } benchmark_conf = { "ReportOrganization": my_environment_info.my_organisation( @@ -212,7 +217,7 @@ def save(self, filename): "QPUCPUConnection":my_environment_info.my_QPUCPUConnection( **kwargs), "Benchmarks": my_benchmark_info.my_benchmark_info(**kwargs), - "json_file_name": "./benchmark_report.json" + "json_file_name": folder + "benchmark_report.json" } benchmark = BENCHMARK()