diff --git a/code-postprocessing/bbob_pproc/bestalg.py b/code-postprocessing/bbob_pproc/bestalg.py index 2b6ce786c..973570f24 100644 --- a/code-postprocessing/bbob_pproc/bestalg.py +++ b/code-postprocessing/bbob_pproc/bestalg.py @@ -9,8 +9,8 @@ to access best algorithm data set. The best algorithm data set can be accessed by the - :py:data:`bestalgentries2009` variable. This variable needs to be - initialized by executing functions :py:func:`loadBBOB2009()` + :py:data:`bestAlgorithmEntries` variable. This variable needs to be + initialized by executing functions :py:func:`load_best_algorithm()` This module can also be used generate the best algorithm data set with its generate method. @@ -29,14 +29,9 @@ from . import readalign, pproc from .toolsdivers import print_done from .ppfig import Usage -from . import toolsstats, genericsettings +from . import toolsstats, testbedsettings bestAlgorithmEntries = {} -bestalgentries2009 = {} -bestalgentries2010 = {} -bestalgentries2012 = {} -bestalgentriesever = {} -bestbiobjalgentries2016 = {} algs2009 = ("ALPS", "AMALGAM", "BAYEDA", "BFGS", "Cauchy-EDA", "BIPOP-CMA-ES", "CMA-ESPLUSSEL", "DASA", "DE-PSO", "DIRECT", "EDA-PSO", @@ -343,7 +338,7 @@ def load_best_algorithm(force=False): if not force and bestAlgorithmEntries: return bestAlgorithmEntries - bestAlgorithmFilename = genericsettings.current_testbed.best_algorithm_filename + bestAlgorithmFilename = testbedsettings.current_testbed.best_algorithm_filename # If the file name is not specified then we skip the load. if not bestAlgorithmFilename: @@ -372,155 +367,6 @@ def load_best_algorithm(force=False): return bestAlgorithmEntries -def loadBBOB2009(force=False): - """Assigns :py:data:`bestalgentries2009`. - - This function is needed to set the global variable - :py:data:`bestalgentries2009`. It unpickles file - :file:`bestalgentries2009.pickle.gz` - - :py:data:`bestalgentries2009` is a dictionary accessed by providing - a tuple :py:data:`(dimension, function)`. This returns an instance - of :py:class:`BestAlgSet`. - The data is that of algorithms submitted to BBOB 2009, the list of - which can be found in variable :py:data:`algs2009`. - - """ - global bestalgentries2009 - # global statement necessary to change the variable bestalg.bestalgentries2009 - - if not force and bestalgentries2009: - return - - print "Loading best algorithm data from BBOB-2009...", - sys.stdout.flush() - - bestalgfilepath = os.path.split(__file__)[0] - # picklefilename = os.path.join(bestalgfilepath, 'bestalgentries2009.pickle') - # cocofy(picklefilename) - # fid = open(picklefilename, 'r') - - picklefilename = os.path.join(bestalgfilepath, 'bestalgentries2009.pickle.gz') - fid = gzip.open(picklefilename, 'r') - try: - bestalgentries2009 = pickle.load(fid) - except: - warnings.warn("no best algorithm loaded") - # raise # outcomment to diagnose - bestalgentries2009 = None - fid.close() - print_done() - -def loadBBOB2010(): - """Assigns :py:data:`bestalgentries2010`. - - This function is needed to set the global variable - :py:data:`bestalgentries2010`. It unpickles file - :file:`bestalgentries2010.pickle.gz` - - :py:data:`bestalgentries2010` is a dictionary accessed by providing - a tuple :py:data:`(dimension, function)`. This returns an instance - of :py:class:`BestAlgSet`. - The data is that of algorithms submitted to BBOB 20&0, the list of - which can be found in variable :py:data:`algs2010`. - - """ - global bestalgentries2010 - # global statement necessary to change the variable bestalg.bestalgentries2010 - - print "Loading best algorithm data from BBOB-2010...", - bestalgfilepath = os.path.split(__file__)[0] - picklefilename = os.path.join(bestalgfilepath, 'bestalgentries2010.pickle.gz') - # cocofy(picklefilename) - fid = gzip.open(picklefilename, 'r') - bestalgentries2010 = pickle.load(fid) - fid.close() - print " done." - -def loadBBOB2012(): - """Assigns :py:data:`bestalgentries2012`. - - This function is needed to set the global variable - :py:data:`bestalgentries2012`. It unpickles file - :file:`bestalgentries2012.pickle.gz` - - :py:data:`bestalgentries2012` is a dictionary accessed by providing - a tuple :py:data:`(dimension, function)`. This returns an instance - of :py:class:`BestAlgSet`. - The data is that of algorithms submitted to BBOB 20&0, the list of - which can be found in variable :py:data:`algs2012`. - - """ - global bestalgentries2012 - # global statement necessary to change the variable bestalg.bestalgentries2012 - - print "Loading best algorithm data from BBOB-2012...", - bestalgfilepath = os.path.split(__file__)[0] - picklefilename = os.path.join(bestalgfilepath, 'bestalgentries2012.pickle.gz') - # cocofy(picklefilename) - fid = gzip.open(picklefilename, 'r') - bestalgentries2012 = pickle.load(fid) - fid.close() - print " done." - -def loadBBOBever(): - """Assigns :py:data:`bestalgentriesever`. - - This function is needed to set the global variable - :py:data:`bestalgentriesever`. It unpickles file - :file:`bestalgentriesever.pickle.gz` - - :py:data:`bestalgentriesever` is a dictionary accessed by providing - a tuple :py:data:`(dimension, function)`. This returns an instance - of :py:class:`BestAlgSet`. - The data is that of algorithms submitted to BBOB 2009 and 2010, the - list of which is the union in variables :py:data:`algs2009` - and :py:data:`algs2010`. - - """ - global bestalgentriesever - # global statement necessary to change the variable bestalg.bestalgentriesever - - print "Loading best algorithm data from BBOB...", - bestalgfilepath = os.path.split(__file__)[0] - picklefilename = os.path.join(bestalgfilepath, 'bestalgentriesever.pickle.gz') - # cocofy(picklefilename) - fid = gzip.open(picklefilename, 'r') - bestalgentriesever = pickle.load(fid) - fid.close() - print " done." - -def loadBestBiobj2016(): - """Assigns :py:data:`bestbiobjalgentries2016`. - - This function is needed to set the global variable - :py:data:`bestbiobjalgentries2016`. It unpickles file - :file:`bestbiobjalgentries2016.pickle.gz` - - :py:data:`bestbiobjalgentries2016` is a dictionary accessed by providing - a tuple :py:data:`(dimension, function)`. This returns an instance - of :py:class:`BestAlgSet`. - - """ - global bestbiobjalgentries2016 - # global statement necessary to change the variable bestalg.bestbiobjalgentries2016 - - if bestbiobjalgentries2016: - return - - print "Loading best bi-objective algorithm data from BBOB-2016...", - sys.stdout.flush() - - bestalgfilepath = os.path.split(__file__)[0] - #picklefilename = os.path.join(bestalgfilepath, 'bestbiobjalgentries2016.pickle.gz') - picklefilename = os.path.join(bestalgfilepath, 'bestbiobjalgentries2016.pickle') - #fid = gzip.open(picklefilename, 'r') - fid = open(picklefilename, 'r') - bestbiobjalgentries2016 = pickle.load(fid) - fid.close() - print_done() - - def usage(): print __doc__ # same as: sys.modules[__name__].__doc__, was: main.__doc__ diff --git a/code-postprocessing/bbob_pproc/comp2/ppfig2.py b/code-postprocessing/bbob_pproc/comp2/ppfig2.py index 2934f2395..cd1de6078 100644 --- a/code-postprocessing/bbob_pproc/comp2/ppfig2.py +++ b/code-postprocessing/bbob_pproc/comp2/ppfig2.py @@ -22,7 +22,7 @@ import numpy as np -from .. import toolsstats, readalign, ppfigparam, genericsettings, toolsdivers +from .. import toolsstats, readalign, ppfigparam, testbedsettings, toolsdivers from ..toolsstats import ranksumtest from ..ppfig import saveFigure, plotUnifLogXMarkers #try: @@ -421,7 +421,7 @@ def main(dsList0, dsList1, minfvalue=1e-8, outputdir='', verbose=True): if func in funInfos.keys(): plt.title(funInfos[func]) - if func in genericsettings.current_testbed.functions_with_legend: + if func in testbedsettings.current_testbed.functions_with_legend: toolsdivers.legend(loc='best') # save diff --git a/code-postprocessing/bbob_pproc/comp2/ppscatter.py b/code-postprocessing/bbob_pproc/comp2/ppscatter.py index 75598727a..af29e993c 100644 --- a/code-postprocessing/bbob_pproc/comp2/ppscatter.py +++ b/code-postprocessing/bbob_pproc/comp2/ppscatter.py @@ -40,7 +40,7 @@ except ImportError: # compatibility matplotlib 0.8 from matplotlib.transforms import blend_xy_sep_transform as blend -from .. import genericsettings, htmldesc, ppfigparam +from .. import genericsettings, htmldesc, ppfigparam, testbedsettings from ..ppfig import saveFigure from .. import toolsdivers from .. import pproc @@ -79,10 +79,10 @@ def prepare_figure_caption(): 40:{\color{magenta}$\Diamond$}. """ - if genericsettings.current_testbed.name == genericsettings.testbed_name_bi: + if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi: # NOTE: no runlength-based targets supported yet caption = caption_start_fixed + caption_finish - elif genericsettings.current_testbed.name == genericsettings.testbed_name_single: + elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single: if genericsettings.runlength_based_targets: caption = caption_start_rlbased + caption_finish else: @@ -95,10 +95,10 @@ def prepare_figure_caption(): def figure_caption(for_html = False): - targets = genericsettings.current_testbed.ppscatter_target_values + targets = testbedsettings.current_testbed.ppscatter_target_values if for_html: caption = htmldesc.getValue('##bbobppscatterlegend' + - genericsettings.current_testbed.scenario + '##') + testbedsettings.current_testbed.scenario + '##') else: caption = prepare_figure_caption() @@ -167,7 +167,7 @@ def main(dsList0, dsList1, outputdir, verbose=True): dictFunc1 = dsList1.dictByFunc() funcs = set(dictFunc0.keys()) & set(dictFunc1.keys()) - targets = genericsettings.current_testbed.ppscatter_target_values + targets = testbedsettings.current_testbed.ppscatter_target_values if isinstance(targets, pproc.RunlengthBasedTargetValues): linewidth = linewidth_rld_based else: diff --git a/code-postprocessing/bbob_pproc/comp2/pptable2.py b/code-postprocessing/bbob_pproc/comp2/pptable2.py index c4d36257a..f68f7e75a 100644 --- a/code-postprocessing/bbob_pproc/comp2/pptable2.py +++ b/code-postprocessing/bbob_pproc/comp2/pptable2.py @@ -15,7 +15,7 @@ import os, warnings import numpy import matplotlib.pyplot as plt -from .. import genericsettings, bestalg, toolsstats, pproc +from .. import genericsettings, testbedsettings, bestalg, toolsstats, pproc from ..pptex import tableLaTeX, tableLaTeXStar, writeFEvals2, writeFEvalsMaxPrec, writeLabels from ..toolsstats import significancetest @@ -25,7 +25,7 @@ samplesize = genericsettings.simulated_runlength_bootstrap_sample_size def get_table_caption(): - """ Sets table caption, based on the genericsettings.current_testbed + """ Sets table caption, based on the testbedsettings.current_testbed and genericsettings.runlength_based_targets. """ @@ -40,7 +40,7 @@ def get_table_caption(): target, the corresponding best \aRT\ in the first row. The different target \Df-values are shown in the top row. \#succ is the number of trials that reached the (final) target - $\fopt + """ + genericsettings.current_testbed.hardesttargetlatex + r"""$. + $\fopt + """ + testbedsettings.current_testbed.hardesttargetlatex + r"""$. """ table_caption_two2 = r"""% run-length based target, the corresponding best \aRT\ @@ -55,7 +55,7 @@ def get_table_caption(): 90\%-tile of (bootstrapped) runtimes is shown for the different target \Df-values as shown in the top row. \#succ is the number of trials that reached the last target - $\hvref + """ + genericsettings.current_testbed.hardesttargetlatex + r"""$. + $\hvref + """ + testbedsettings.current_testbed.hardesttargetlatex + r"""$. """ table_caption_rest = (r"""% The median number of conducted function evaluations is additionally given in @@ -66,14 +66,14 @@ def get_table_caption(): following the $\star$ symbol, with Bonferroni correction of #1.""" + (r"""A $\downarrow$ indicates the same tested against the best algorithm of BBOB-2009.""" - if not (genericsettings.current_testbed.name == genericsettings.testbed_name_bi) + if not (testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi) else "") ) - if genericsettings.current_testbed.name == genericsettings.testbed_name_bi: + if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi: # NOTE: no runlength-based targets supported yet table_caption = table_caption_bi + table_caption_rest - elif genericsettings.current_testbed.name == genericsettings.testbed_name_single: + elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single: if genericsettings.runlength_based_targets: table_caption = table_caption_one + table_caption_two2 + table_caption_rest else: @@ -89,7 +89,7 @@ def main(dsList0, dsList1, dimsOfInterest, outputdir, info='', verbose=True): #TODO: method is long, split if possible - testbed = genericsettings.current_testbed + testbed = testbedsettings.current_testbed targetsOfInterest = testbed.pptable2_targetsOfInterest diff --git a/code-postprocessing/bbob_pproc/compall/ppfigs.py b/code-postprocessing/bbob_pproc/compall/ppfigs.py index 50a3063d5..77f876c8a 100644 --- a/code-postprocessing/bbob_pproc/compall/ppfigs.py +++ b/code-postprocessing/bbob_pproc/compall/ppfigs.py @@ -9,6 +9,7 @@ import warnings from pdb import set_trace from .. import toolsdivers, toolsstats, bestalg, pproc, genericsettings, htmldesc, ppfigparam +from .. import testbedsettings from ..ppfig import saveFigure from ..pptex import color_to_latex, marker_to_latex, marker_to_html, writeLabels @@ -65,10 +66,10 @@ def prepare_scaling_figure_caption(): scaling_figure_caption_fixed = scaling_figure_caption_start_fixed + scaling_figure_caption_end scaling_figure_caption_rlbased = scaling_figure_caption_start_rlbased + scaling_figure_caption_end - if genericsettings.current_testbed.name == genericsettings.testbed_name_bi: + if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi: # NOTE: no runlength-based targets supported yet figure_caption = scaling_figure_caption_fixed - elif genericsettings.current_testbed.name == genericsettings.testbed_name_single: + elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single: if genericsettings.runlength_based_targets: figure_caption = scaling_figure_caption_rlbased else: @@ -83,11 +84,11 @@ def scaling_figure_caption(for_html = False): if for_html: figure_caption = htmldesc.getValue('##bbobppfigslegend' + - genericsettings.current_testbed.scenario + '##') + testbedsettings.current_testbed.scenario + '##') else: figure_caption = prepare_scaling_figure_caption() - target = genericsettings.current_testbed.ppfigs_ftarget + target = testbedsettings.current_testbed.ppfigs_ftarget target = pproc.TargetValues.cast([target] if numpy.isscalar(target) else target) assert len(target) == 1 @@ -127,10 +128,10 @@ def prepare_ecdfs_figure_caption(): r"with $k\in \{0.5, 1.2, 3, 10, 50\}$. " ) - if genericsettings.current_testbed.name == genericsettings.testbed_name_bi: + if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi: # NOTE: no runlength-based targets supported yet figure_caption = ecdfs_figure_caption_standard - elif genericsettings.current_testbed.name == genericsettings.testbed_name_single: + elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single: if genericsettings.runlength_based_targets: figure_caption = ecdfs_figure_caption_rlbased + best2009text else: @@ -144,24 +145,24 @@ def prepare_ecdfs_figure_caption(): def ecdfs_figure_caption(for_html = False, dimension = 0): if for_html: - key = '##bbobECDFslegend%s%d##' % (genericsettings.current_testbed.scenario, dimension) + key = '##bbobECDFslegend%s%d##' % (testbedsettings.current_testbed.scenario, dimension) caption = htmldesc.getValue(key) else: caption = prepare_ecdfs_figure_caption() - target = genericsettings.current_testbed.ppfigs_ftarget + target = testbedsettings.current_testbed.ppfigs_ftarget target = pproc.TargetValues.cast([target] if numpy.isscalar(target) else target) assert len(target) == 1 caption = caption.replace('BBOBPPFIGSTARGETRANGE', - str(genericsettings.current_testbed.pprldmany_target_range_latex)) + str(testbedsettings.current_testbed.pprldmany_target_range_latex)) if genericsettings.runlength_based_targets: caption = caption.replace('REFERENCE_ALGORITHM', target.reference_algorithm) caption = caption.replace('REFERENCEALGORITHM', target.reference_algorithm) else: caption = caption.replace('BBOBPPFIGSFTARGET', - str(len(genericsettings.current_testbed.pprldmany_target_values))) + str(len(testbedsettings.current_testbed.pprldmany_target_values))) return caption @@ -170,9 +171,9 @@ def get_ecdfs_single_fcts_caption(): ''' For the moment, only the bi-objective case is covered! ''' s = (r"""Empirical cumulative distribution of simulated (bootstrapped) runtimes in number of objective function evaluations divided by dimension (FEvals/DIM) for the $""" + - str(len(genericsettings.current_testbed.pprldmany_target_values)) + + str(len(testbedsettings.current_testbed.pprldmany_target_values)) + r"$ targets " + - str(genericsettings.current_testbed.pprldmany_target_range_latex) + + str(testbedsettings.current_testbed.pprldmany_target_range_latex) + r" for functions $f_1$ to $f_{16}$ and all dimensions. " ) return s @@ -184,9 +185,9 @@ def get_ecdfs_all_groups_caption(): # r"(FEvals/DIM) for " + s = (r"""Empirical cumulative distribution of simulated (bootstrapped) runtimes, measured in number of objective function evaluations, divided by dimension (FEvals/DIM) for the $""" + - str(len(genericsettings.current_testbed.pprldmany_target_values)) + + str(len(testbedsettings.current_testbed.pprldmany_target_values)) + r"$ targets " + - str(genericsettings.current_testbed.pprldmany_target_range_latex) + + str(testbedsettings.current_testbed.pprldmany_target_range_latex) + r" for all function groups and all dimensions. The aggregation" + r" over all 55 functions is shown in the last plot." ) @@ -391,7 +392,7 @@ def main(dictAlg, htmlFilePrefix, isBiobjective, sortedAlgs=None, outputdir='ppd """ # target becomes a TargetValues "list" with one element - target = genericsettings.current_testbed.ppfigs_ftarget + target = testbedsettings.current_testbed.ppfigs_ftarget target = pproc.TargetValues.cast([target] if numpy.isscalar(target) else target) latex_commands_filename = os.path.join(outputdir, 'bbob_pproc_commands.tex') assert isinstance(target, pproc.TargetValues) @@ -517,7 +518,7 @@ def main(dictAlg, htmlFilePrefix, isBiobjective, sortedAlgs=None, outputdir='ppd if f in funInfos.keys(): plt.gca().set_title(funInfos[f], fontsize=fontSize) - functions_with_legend = genericsettings.current_testbed.functions_with_legend + functions_with_legend = testbedsettings.current_testbed.functions_with_legend isLegend = False if legend: plotLegend(handles) diff --git a/code-postprocessing/bbob_pproc/compall/ppperfprof.py b/code-postprocessing/bbob_pproc/compall/ppperfprof.py index 0bf80380b..87e69db5f 100755 --- a/code-postprocessing/bbob_pproc/compall/ppperfprof.py +++ b/code-postprocessing/bbob_pproc/compall/ppperfprof.py @@ -32,7 +32,7 @@ # plot the profiles figure() - # bb.compall.ppperfprof.plotmultiple(dsets, dsref=bb.bestalg.bestalgentries2009) + # bb.compall.ppperfprof.plotmultiple(dsets, dsref=bb.bestalg.bestAlgorithmEntries) """ from __future__ import absolute_import diff --git a/code-postprocessing/bbob_pproc/compall/pprldmany.py b/code-postprocessing/bbob_pproc/compall/pprldmany.py index f69e09f2a..b2258e4b8 100644 --- a/code-postprocessing/bbob_pproc/compall/pprldmany.py +++ b/code-postprocessing/bbob_pproc/compall/pprldmany.py @@ -46,7 +46,7 @@ from pdb import set_trace import numpy as np import matplotlib.pyplot as plt -from .. import toolsstats, bestalg, genericsettings +from .. import toolsstats, bestalg, genericsettings, testbedsettings from .. import pproc as pp # import dictAlgByDim, dictAlgByFun from .. import toolsdivers # strip_pathname, str_to_latex from .. import pprldistr # plotECDF, beautifyECDF @@ -405,7 +405,7 @@ def plot(dsList, targets=None, craftingeffort=0., **kwargs): """ if targets is None: - targets = genericsettings.current_testbed.pprldmany_target_values + targets = testbedsettings.current_testbed.pprldmany_target_values try: if np.min(targets) >= 1: ValueError('smallest target f-value is not smaller than one, use ``pproc.TargetValues(targets)`` to prevent this error') @@ -612,7 +612,7 @@ def main(dictAlg, isBiobjective, order=None, outputdir='.', info='default', # funcsolved = [set()] * len(targets) # number of functions solved per target xbest2009 = [] maxevalsbest2009 = [] - target_values = genericsettings.current_testbed.pprldmany_target_values + target_values = testbedsettings.current_testbed.pprldmany_target_values dictDimList = pp.dictAlgByDim(dictAlg) dims = sorted(dictDimList) @@ -627,7 +627,7 @@ def main(dictAlg, isBiobjective, order=None, outputdir='.', info='default', # print target_values((f, dim)) for j, t in enumerate(target_values((f, dim))): - # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)): + # for j, t in enumerate(testbedsettings.current_testbed.ecdf_target_values(1e2, f)): # funcsolved[j].add(f) for alg in algorithms_with_data: @@ -778,11 +778,11 @@ def algname_to_label(algname, dirname=None): dictFG = pp.dictAlgByFuncGroup(dictAlg) dictKey = dictFG.keys()[0] functionGroups = dictAlg[dictAlg.keys()[0]].getFuncGroups() - text = '%s\n%s, %d-D' % (genericsettings.current_testbed.name, + text = '%s\n%s, %d-D' % (testbedsettings.current_testbed.name, functionGroups[dictKey], dimList[0]) else: - text = '%s - %s' % (genericsettings.current_testbed.name, + text = '%s - %s' % (testbedsettings.current_testbed.name, ppfig.consecutiveNumbers(sorted(dictFunc.keys()), 'f')) if not (plotType == PlotType.DIM): text += ', %d-D' % dimList[0] @@ -799,7 +799,7 @@ def algname_to_label(algname, dirname=None): verticalalignment="top", transform=plt.gca().transAxes, size='small') if len(dictFunc) == 1: plt.title(' '.join((str(dictFunc.keys()[0]), - genericsettings.current_testbed.short_names[dictFunc.keys()[0]]))) + testbedsettings.current_testbed.short_names[dictFunc.keys()[0]]))) a = plt.gca() plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative) diff --git a/code-postprocessing/bbob_pproc/compall/pptables.py b/code-postprocessing/bbob_pproc/compall/pptables.py index 160399640..38986fdab 100644 --- a/code-postprocessing/bbob_pproc/compall/pptables.py +++ b/code-postprocessing/bbob_pproc/compall/pptables.py @@ -8,7 +8,7 @@ from pdb import set_trace import warnings import numpy -from .. import genericsettings, bestalg, toolsstats, pproc, ppfigparam +from .. import genericsettings, bestalg, toolsstats, pproc, ppfigparam, testbedsettings from ..pptex import writeFEvals, writeFEvals2, writeFEvalsMaxPrec, tableXLaTeX, numtotext from ..toolsstats import significancetest, significance_all_best_vs_other from ..pproc import DataSetList @@ -23,7 +23,7 @@ """ def get_table_caption(): - """ Sets table caption, based on the genericsettings.current_testbed + """ Sets table caption, based on the testbedsettings.current_testbed and genericsettings.runlength_based_targets. TODO: \hvref and \fopt should be defined via the current_testbed, @@ -41,7 +41,7 @@ def get_table_caption(): target, the corresponding best \aRT\ in the first row. The different target \Df-values are shown in the top row. \#succ is the number of trials that reached the (final) target - $\fopt + """ + genericsettings.current_testbed.hardesttargetlatex + r"""$. + $\fopt + """ + testbedsettings.current_testbed.hardesttargetlatex + r"""$. """ table_caption_two2 = r"""% run-length based target, the corresponding best \aRT\ @@ -53,9 +53,9 @@ def get_table_caption(): in number of function evaluations, in #1. For each function, the \aRT\ and, in braces as dispersion measure, the half difference between 10 and 90\%-tile of (bootstrapped) runtimes is shown for the different - target \Df-values as shown in the top row. + target \DI-values as shown in the top row. \#succ is the number of trials that reached the last target - $\hvref + """ + genericsettings.current_testbed.hardesttargetlatex + r"""$. + $\hvref + """ + testbedsettings.current_testbed.hardesttargetlatex + r"""$. """ table_caption_rest = (r"""% The median number of conducted function evaluations is additionally given in @@ -63,17 +63,17 @@ def get_table_caption(): Entries, succeeded by a star, are statistically significantly better (according to the rank-sum test) when compared to all other algorithms of the table, with $p = 0.05$ or $p = 10^{-k}$ when the number $k$ following the star is larger - than 1, with Bonferroni correction by the number of instances. """ + + than 1, with Bonferroni correction of #2. """ + (r"""A $\downarrow$ indicates the same tested against the best algorithm of BBOB-2009.""" - if not (genericsettings.current_testbed.name == genericsettings.testbed_name_bi) + if not (testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi) else "") + r"""Best results are printed in bold. """) - if genericsettings.current_testbed.name == genericsettings.testbed_name_bi: + if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi: # NOTE: no runlength-based targets supported yet table_caption = table_caption_one_bi + table_caption_rest - elif genericsettings.current_testbed.name == genericsettings.testbed_name_single: + elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single: if genericsettings.runlength_based_targets: table_caption = table_caption_one + table_caption_two2 + table_caption_rest else: @@ -254,7 +254,7 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi * significance test against best algorithm * table width... - Takes ``pptable_targetsOfInterest`` from genericsetting's Testbed instance + Takes ``pptable_targetsOfInterest`` from testbedsetting's Testbed instance as "input argument" to compute the desired target values. ``pptable_targetsOfInterest`` might be configured via config. @@ -264,7 +264,7 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi bestalgentries = bestalg.load_best_algorithm() - testbed = genericsettings.current_testbed + testbed = testbedsettings.current_testbed # Sort data per dimension and function dictData = {} @@ -283,7 +283,7 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi for df in dictData: # Generate one table per df # first update targets for each dimension-function pair if needed: - targetsOfInterest = testbed.pptable_targetsOfInterest((df[1], df[0])) + targetsOfInterest = testbed.pptablemany_targetsOfInterest((df[1], df[0])) if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues): targetf = targetsOfInterest[-1] else: @@ -394,6 +394,7 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi % (2 * len(targetsOfInterest) + 2, header)]) extraeol.append('') + curlineHtml = [] if function_targets_line is True or (function_targets_line and df[1] in function_targets_line): if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues): curline = [r'\#FEs/D'] @@ -404,8 +405,12 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi curlineHtml.append('%s
REPLACE%d\n' % (i, counter)) counter += 1 else: - curline = [r'$\Delta f_\mathrm{opt}$'] - curlineHtml = ['\n\nΔ fopt
REPLACEH\n'] + if (testbed.name == testbedsettings.testbed_name_bi): + curline = [r'$\Df$'] + curlineHtml = ['\n\nΔ HVref
REPLACEH\n'] + else: + curline = [r'$\Delta f_\mathrm{opt}$'] + curlineHtml = ['\n\nΔ fopt
REPLACEH\n'] counter = 1 for t in targetsOfInterest: curline.append(r'\multicolumn{2}{@{\,}X@{\,}}{%s}' @@ -414,16 +419,21 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi counter += 1 # curline.append(r'\multicolumn{2}{@{\,}X@{}|}{%s}' # % writeFEvals2(targetsOfInterest[-1], precision=1, isscientific=True)) - curline.append(r'\multicolumn{2}{@{}l@{}}{\#succ}') + if (testbed.name == testbedsettings.testbed_name_bi): + curline.append(r'\multicolumn{2}{|@{}l@{}}{\begin{rotate}{30}\#succ\end{rotate}}') + else: + curline.append(r'\multicolumn{2}{|@{}l@{}}{\#succ}') curlineHtml.append('#succ
REPLACEF\n\n\n') table.append(curline) - + extraeol.append(r'\hline') # extraeol.append(r'\hline\arrayrulecolor{tableShade}') curline = [r'\aRT{}$_{\text{best}}$'] if with_table_heading else [r'\textbf{f%d}' % df[1]] replaceValue = '\aRT{}best' if with_table_heading else ('f%d' % df[1]) curlineHtml = [item.replace('REPLACEH', replaceValue) for item in curlineHtml] + + if bestalgentries: if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues): # write ftarget:fevals @@ -468,7 +478,7 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi curlineHtml = [item.replace('REPLACEF', replaceValue) for item in curlineHtml] else: # if not bestalgentries - curline.append(r'\multicolumn{%d}{@{}c@{}|}{}' % (2 * (len(targetsOfInterest) + 1))) + curline.append(r'\multicolumn{%d}{@{}c@{}|}{} & ' % (2 * (len(targetsOfInterest)))) for counter in range(1, len(targetsOfInterest) + 1): curlineHtml = [item.replace('REPLACE%d' % counter, ' ') for item in curlineHtml] curlineHtml = [item.replace('REPLACEF', ' ') for item in curlineHtml] @@ -486,7 +496,7 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi #if df == (5, 17): #set_trace() - header = r'\providecommand{\ntables}{7}' + header = r'\providecommand{\ntables}{%d}' % len(testbed.pptablemany_targetsOfInterest) for i, alg in enumerate(algnames): tableHtml.append('\n') #algname, entries, irs, line, line2, succ, runs, testres1alg in zip(algnames, diff --git a/code-postprocessing/bbob_pproc/config.py b/code-postprocessing/bbob_pproc/config.py index fdf3ddec1..8f8977199 100644 --- a/code-postprocessing/bbob_pproc/config.py +++ b/code-postprocessing/bbob_pproc/config.py @@ -19,6 +19,7 @@ import numpy as np import ppfigdim, pptable from . import genericsettings, pproc, pprldistr +from . import testbedsettings as tbs from .comp2 import ppfig2, ppscatter, pptable2 from .compall import ppfigs, pprldmany, pptables @@ -34,13 +35,14 @@ def target_values(is_expensive, dict_max_fun_evals={}, runlength_limit=1e3): genericsettings.runlength_based_targets = False genericsettings.maxevals_fix_display = None -def config(isBiobjective=None): + +def config(testbed_name=None): """called from a high level, e.g. rungeneric, to configure the lower level modules via modifying parameter settings. """ - - if isBiobjective is not None: - genericsettings.loadCurrentTestbed(isBiobjective, pproc.TargetValues) + + if testbed_name: + tbs.load_current_testbed(testbed_name, pproc.TargetValues) genericsettings.simulated_runlength_bootstrap_sample_size = (10 + 990 / (1 + 10 * max(0, genericsettings.in_a_hurry))) @@ -48,7 +50,7 @@ def config(isBiobjective=None): # bestAlg for the biobjective case # TODO: once this is solved, make sure that expensive setting is not # available if no bestAlg or other reference algorithm is available - if genericsettings.current_testbed and genericsettings.current_testbed.name == genericsettings.testbed_name_bi: + if tbs.current_testbed and tbs.current_testbed.name == tbs.testbed_name_bi: if (genericsettings.isExpensive in (True, 1) or genericsettings.runlength_based_targets in (True, 1)): warnings.warn('Expensive setting not yet supported with bbob-biobj testbed; using non-expensive setting instead.') @@ -67,11 +69,11 @@ def config(isBiobjective=None): pprldmany.x_limit = genericsettings.maxevals_fix_display # always fixed - if genericsettings.current_testbed: + if tbs.current_testbed: - testbed = genericsettings.current_testbed + testbed = tbs.current_testbed - testbed.scenario = genericsettings.scenario_rlbased + testbed.scenario = tbs.scenario_rlbased # genericsettings (to be used in rungeneric2 while calling pprldistr.comp(...)): testbed.rldValsOfInterest = pproc.RunlengthBasedTargetValues( genericsettings.target_runlengths_in_single_rldistr, @@ -100,7 +102,17 @@ def config(isBiobjective=None): testbed.ppscatter_target_values = pproc.RunlengthBasedTargetValues(np.logspace(np.log10(0.5), np.log10(50), 8)) # pptable: - testbed.pptable_targetsOfInterest = pproc.RunlengthBasedTargetValues(genericsettings.target_runlengths_in_table, + testbed.pptable_targetsOfInterest = pproc.RunlengthBasedTargetValues(testbed.pptable_target_runlengths, + reference_data = reference_data, + force_different_targets_factor=10**-0.2) + + # pptable2: + testbed.pptable2_targetsOfInterest = pproc.RunlengthBasedTargetValues(testbed.pptable2_target_runlengths, + reference_data = reference_data, + force_different_targets_factor=10**-0.2) + + # pptables: + testbed.pptables_targetsOfInterest = pproc.RunlengthBasedTargetValues(testbed.pptables_target_runlengths, reference_data = reference_data, force_different_targets_factor=10**-0.2) # ppfigs @@ -121,18 +133,7 @@ def config(isBiobjective=None): {'color': 'c', 'marker': 'v', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4}, {'color': 'b', 'marker': '.', 'linewidth': 4}, {'color': 'k', 'marker': 'o', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4}, - ] - - - # pptable2: - pptable2.targetsOfInterest = pproc.RunlengthBasedTargetValues(genericsettings.target_runlengths_in_table, - reference_data = reference_data, - force_different_targets_factor=10**-0.2) - - # pptables (for rungenericmany): - pptables.targetsOfInterest = pproc.RunlengthBasedTargetValues(genericsettings.target_runlengths_in_table, - reference_data = reference_data, - force_different_targets_factor=10**-0.2) + ] ppscatter.markersize = 16 @@ -140,9 +141,9 @@ def config(isBiobjective=None): pass # here the default values of the modules apply # pprlmany.x_limit = ...should depend on noisy/noiseless if 11 < 3: # for testing purpose - if genericsettings.current_testbed: + if tbs.current_testbed: # TODO: this case needs to be tested yet: the current problem is that no noisy data are in this folder - genericsettings.current_testbed.pprldmany_target_values = pproc.RunlengthBasedTargetValues(10**np.arange(1, 4, 0.2), 'RANDOMSEARCH') + tbs.current_testbed.pprldmany_target_values = pproc.RunlengthBasedTargetValues(10**np.arange(1, 4, 0.2), 'RANDOMSEARCH') pprldmany.fontsize = 20.0 # should depend on the number of data lines down to 10.0 ? diff --git a/code-postprocessing/bbob_pproc/genericsettings.py b/code-postprocessing/bbob_pproc/genericsettings.py index 75b510f20..0e74de6c2 100644 --- a/code-postprocessing/bbob_pproc/genericsettings.py +++ b/code-postprocessing/bbob_pproc/genericsettings.py @@ -13,33 +13,34 @@ import os import warnings import numpy as np + test = False # debug/test flag, set to False for committing the final version if 1 < 3 and test: np.seterr(all='raise') np.seterr(under='ignore') # ignore underflow force_assertions = False # another debug flag for time-consuming assertions -in_a_hurry = 1000 # [0, 1000] lower resolution, no eps, saves 30% time +in_a_hurry = 1000 # [0, 1000] lower resolution, no eps, saves 30% time maxevals_fix_display = None # 3e2 is the expensive setting only used in config, yet to be improved!? runlength_based_targets = 'auto' # 'auto' means automatic choice, otherwise True or False dimensions_to_display = (2, 3, 5, 10, 20, 40) # this could be used to set the dimensions in respective modules -generate_svg_files = True # generate the svg figures -scaling_figures_with_boxes = True +generate_svg_files = True # generate the svg figures +scaling_figures_with_boxes = True # should replace ppfigdim.dimsBBOB, ppfig2.dimensions, ppfigparam.dimsBBOB? # Variables used in the routines defining desired output for BBOB. tabDimsOfInterest = (5, 20) # dimension which are displayed in the tables target_runlengths_in_scaling_figs = [0.5, 1.2, 3, 10, 50] # used in config -target_runlengths_in_table = [0.5, 1.2, 3, 10, 50] # [0.5, 2, 10, 50] # used in config target_runlengths_in_single_rldistr = [0.5, 2, 10, 50] # used in config -target_runlength = 10 # used in ppfigs.main +target_runlength = 10 # used in ppfigs.main xlimit_expensive = 1e3 # used in -tableconstant_target_function_values = (1e1, 1e0, 1e-1, 1e-3, 1e-5, 1e-7) # used as input for pptables.main in rungenericmany +#tableconstant_target_function_values = ( +#1e1, 1e0, 1e-1, 1e-3, 1e-5, 1e-7) # used as input for pptables.main in rungenericmany # tableconstant_target_function_values = (1e3, 1e2, 1e1, 1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-7) # for post-workshop landscape tables -#tabValsOfInterest = (1.0, 1.0e-2, 1.0e-4, 1.0e-6, 1.0e-8) -#tabValsOfInterest = (10, 1.0, 1e-1, 1e-3, 1e-5, 1.0e-8) +# tabValsOfInterest = (1.0, 1.0e-2, 1.0e-4, 1.0e-6, 1.0e-8) +# tabValsOfInterest = (10, 1.0, 1e-1, 1e-3, 1e-5, 1.0e-8) dim_related_markers = ('+', 'v', '*', 'o', 's', 'D', 'x') dim_related_colors = ('c', 'g', 'b', 'k', 'r', 'm', 'k', 'y', 'k', 'c', 'r', 'm') @@ -57,141 +58,139 @@ # summarized_target_function_values = [-1, 3] # easy easy # summarized_target_function_values = (10, 1e0, 1e-1) # all in one figure (means what?) -instancesOfInterest2009 = {1:3, 2:3, 3:3, 4:3, 5:3} # 2009 instances -instancesOfInterest2010 = {1:1, 2:1, 3:1, 4:1, 5:1, 6:1, 7:1, 8:1, 9:1, - 10:1, 11:1, 12:1, 13:1, 14:1, 15:1} # 2010 instances -instancesOfInterest2012 = {1:1, 2:1, 3:1, 4:1, 5:1, 21:1, 22:1, 23:1, 24:1, - 25:1, 26:1, 27:1, 28:1, 29:1, 30:1} # 2012 instances -instancesOfInterest2013 = {1:1, 2:1, 3:1, 4:1, 5:1, 31:1, 32:1, 33:1, 34:1, - 35:1, 36:1, 37:1, 38:1, 39:1, 40:1} # 2013 instances -instancesOfInterest2015 = {1:1, 2:1, 3:1, 4:1, 5:1, 41:1, 42:1, 43:1, 44:1, - 45:1, 46:1, 47:1, 48:1, 49:1, 50:1} # 2015 instances -instancesOfInterest2016 = {1:1, 2:1, 3:1, 4:1, 5:1, 51:1, 52:1, 53:1, 54:1, - 55:1, 56:1, 57:1, 58:1, 59:1, 60:1} # 2016 instances -instancesOfInterestBiobj2016 = {1:1, 2:1, 3:1, 4:1, 5:1} # bi-objective 2016 instances -instancesOfInterest = {1:1, 2:1, 3:1, 4:1, 5:1, 41:1, 42:1, 43:1, 44:1, - 45:1, 46:1, 47:1, 48:1, 49:1, 50:1} # 2015 instances; only for consistency checking +instancesOfInterest2009 = {1: 3, 2: 3, 3: 3, 4: 3, 5: 3} # 2009 instances +instancesOfInterest2010 = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1, + 10: 1, 11: 1, 12: 1, 13: 1, 14: 1, 15: 1} # 2010 instances +instancesOfInterest2012 = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 21: 1, 22: 1, 23: 1, 24: 1, + 25: 1, 26: 1, 27: 1, 28: 1, 29: 1, 30: 1} # 2012 instances +instancesOfInterest2013 = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 31: 1, 32: 1, 33: 1, 34: 1, + 35: 1, 36: 1, 37: 1, 38: 1, 39: 1, 40: 1} # 2013 instances +instancesOfInterest2015 = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 41: 1, 42: 1, 43: 1, 44: 1, + 45: 1, 46: 1, 47: 1, 48: 1, 49: 1, 50: 1} # 2015 instances +instancesOfInterest2016 = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 51: 1, 52: 1, 53: 1, 54: 1, + 55: 1, 56: 1, 57: 1, 58: 1, 59: 1, 60: 1} # 2016 instances +instancesOfInterestBiobj2016 = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1} # bi-objective 2016 instances +instancesOfInterest = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 41: 1, 42: 1, 43: 1, 44: 1, + 45: 1, 46: 1, 47: 1, 48: 1, 49: 1, 50: 1} # 2015 instances; only for consistency checking line_styles = [ # used by ppfigs and pprlmany - {'marker': 'o', 'markersize': 31, 'linestyle': '-', 'color': '#000080'}, # 'NavyBlue' - {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': '#ff00ff'}, # 'Magenta' - {'marker': '*', 'markersize': 33, 'linestyle': '-', 'color': '#ffa500'}, # 'Orange' - {'marker': 'v', 'markersize': 28, 'linestyle': '-', 'color': '#6495ed'}, # 'CornflowerBlue' - {'marker': 'h', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, # 'Red' - {'marker': '^', 'markersize': 25, 'linestyle': '-', 'color': '#9acd32'}, # 'YellowGreen' -# {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'g'}, # 'green' avoid green because of -# {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': '#ffd700'}, # 'Goldenrod' seems too light -# {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'}, # 'Black' is too close to NavyBlue -# {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': '#d02090'}, # square, 'VioletRed' seems too close to red - {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': 'c'}, - {'marker': 'H', 'markersize': 23, 'linestyle': '-', 'color': '#bebebe'}, # 'Gray' - # {'marker': 'o', 'markersize': 23, 'linestyle': '-', 'color': '#ffff00'}, # 'Yellow' - {'marker': '3', 'markersize': 23, 'linestyle': '-', 'color': '#adff2f'}, # 'GreenYellow' - {'marker': '1', 'markersize': 23, 'linestyle': '-', 'color': '#228b22'}, # 'ForestGreen' - {'marker': 'D', 'markersize': 23, 'linestyle': '-', 'color': '#ffc0cb'}, # 'Lavender' - {'marker': '<', 'markersize': 23, 'linestyle': '-', 'color': '#87ceeb'}, # 'SkyBlue' close to CornflowerBlue - {'marker': 'v', 'markersize': 23, 'linestyle': '--', 'color': '#000080'}, # 'NavyBlue' - {'marker': '*', 'markersize': 23, 'linestyle': '--', 'color': 'r'}, # 'Red' - {'marker': 's', 'markersize': 23, 'linestyle': '--', 'color': '#ffd700'}, # 'Goldenrod' - {'marker': 'd', 'markersize': 23, 'linestyle': '--', 'color': '#d02090'}, # square, 'VioletRed' - {'marker': '^', 'markersize': 23, 'linestyle': '--', 'color': '#6495ed'}, # 'CornflowerBlue' - {'marker': '<', 'markersize': 23, 'linestyle': '--', 'color': '#ffa500'}, # 'Orange' - {'marker': 'h', 'markersize': 23, 'linestyle': '--', 'color': '#ff00ff'}, # 'Magenta' - # {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'm'}, # square, magenta - {'marker': 'p', 'markersize': 23, 'linestyle': '--', 'color': '#bebebe'}, # 'Gray' - {'marker': 'H', 'markersize': 23, 'linestyle': '--', 'color': '#87ceeb'}, # 'SkyBlue' - {'marker': '1', 'markersize': 23, 'linestyle': '--', 'color': '#ffc0cb'}, # 'Lavender' - {'marker': '2', 'markersize': 23, 'linestyle': '--', 'color': '#228b22'}, # 'ForestGreen' - {'marker': '4', 'markersize': 23, 'linestyle': '--', 'color': '#32cd32'}, # 'LimeGreen' - {'marker': '3', 'markersize': 23, 'linestyle': '--', 'color': '#9acd32'}, # 'YellowGreen' - {'marker': 'D', 'markersize': 23, 'linestyle': '--', 'color': '#adff2f'}, # 'GreenYellow' - ] + {'marker': 'o', 'markersize': 31, 'linestyle': '-', 'color': '#000080'}, # 'NavyBlue' + {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': '#ff00ff'}, # 'Magenta' + {'marker': '*', 'markersize': 33, 'linestyle': '-', 'color': '#ffa500'}, # 'Orange' + {'marker': 'v', 'markersize': 28, 'linestyle': '-', 'color': '#6495ed'}, # 'CornflowerBlue' + {'marker': 'h', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, # 'Red' + {'marker': '^', 'markersize': 25, 'linestyle': '-', 'color': '#9acd32'}, # 'YellowGreen' + # {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'g'}, # 'green' avoid green because of + # {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': '#ffd700'}, # 'Goldenrod' seems too light + # {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'}, # 'Black' is too close to NavyBlue + # {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': '#d02090'}, # square, 'VioletRed' seems too close to red + {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': 'c'}, + {'marker': 'H', 'markersize': 23, 'linestyle': '-', 'color': '#bebebe'}, # 'Gray' + # {'marker': 'o', 'markersize': 23, 'linestyle': '-', 'color': '#ffff00'}, # 'Yellow' + {'marker': '3', 'markersize': 23, 'linestyle': '-', 'color': '#adff2f'}, # 'GreenYellow' + {'marker': '1', 'markersize': 23, 'linestyle': '-', 'color': '#228b22'}, # 'ForestGreen' + {'marker': 'D', 'markersize': 23, 'linestyle': '-', 'color': '#ffc0cb'}, # 'Lavender' + {'marker': '<', 'markersize': 23, 'linestyle': '-', 'color': '#87ceeb'}, # 'SkyBlue' close to CornflowerBlue + {'marker': 'v', 'markersize': 23, 'linestyle': '--', 'color': '#000080'}, # 'NavyBlue' + {'marker': '*', 'markersize': 23, 'linestyle': '--', 'color': 'r'}, # 'Red' + {'marker': 's', 'markersize': 23, 'linestyle': '--', 'color': '#ffd700'}, # 'Goldenrod' + {'marker': 'd', 'markersize': 23, 'linestyle': '--', 'color': '#d02090'}, # square, 'VioletRed' + {'marker': '^', 'markersize': 23, 'linestyle': '--', 'color': '#6495ed'}, # 'CornflowerBlue' + {'marker': '<', 'markersize': 23, 'linestyle': '--', 'color': '#ffa500'}, # 'Orange' + {'marker': 'h', 'markersize': 23, 'linestyle': '--', 'color': '#ff00ff'}, # 'Magenta' + # {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'm'}, # square, magenta + {'marker': 'p', 'markersize': 23, 'linestyle': '--', 'color': '#bebebe'}, # 'Gray' + {'marker': 'H', 'markersize': 23, 'linestyle': '--', 'color': '#87ceeb'}, # 'SkyBlue' + {'marker': '1', 'markersize': 23, 'linestyle': '--', 'color': '#ffc0cb'}, # 'Lavender' + {'marker': '2', 'markersize': 23, 'linestyle': '--', 'color': '#228b22'}, # 'ForestGreen' + {'marker': '4', 'markersize': 23, 'linestyle': '--', 'color': '#32cd32'}, # 'LimeGreen' + {'marker': '3', 'markersize': 23, 'linestyle': '--', 'color': '#9acd32'}, # 'YellowGreen' + {'marker': 'D', 'markersize': 23, 'linestyle': '--', 'color': '#adff2f'}, # 'GreenYellow' +] line_styles_old = [ # used by ppfigs and pprlmany - {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'b'}, - {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, - {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'c'}, - {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'm'}, # square - {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'}, - {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': 'y'}, - {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'g'}, - {'marker': 's', 'markersize': 24, 'linestyle': '-', 'color': 'b'}, - {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'r'}, - {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'c'}, - {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'm'}, - {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'k'}, - {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'y'}, - {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, - {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, - {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'r'}, - {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'b'}, - {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'm'}, - {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'c'}, # square - {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'y'}, - {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': 'k'}, - {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'b'}, - {'marker': 's', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, - {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'c'}, - {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'r'}, - {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'k'}, - {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'm'}, - {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, - {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'y'}, - {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'r'} - ] + {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'b'}, + {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, + {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'c'}, + {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'm'}, # square + {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'}, + {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': 'y'}, + {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'g'}, + {'marker': 's', 'markersize': 24, 'linestyle': '-', 'color': 'b'}, + {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'r'}, + {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'c'}, + {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'm'}, + {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'k'}, + {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'y'}, + {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, + {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, + {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'r'}, + {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'b'}, + {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'm'}, + {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'c'}, # square + {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'y'}, + {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': 'k'}, + {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'b'}, + {'marker': 's', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, + {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'c'}, + {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'r'}, + {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'k'}, + {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'm'}, + {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, + {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'y'}, + {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'r'} +] more_old_line_styles = [ # used by ppfigs and pprlmany - {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': '#000080'}, # 'NavyBlue' - {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, # 'Red' - {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': '#ffd700'}, # 'Goldenrod' seems too light - {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': '#d02090'}, # square, 'VioletRed' - {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'}, # 'Black' is too close to NavyBlue - {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': '#6495ed'}, # 'CornflowerBlue' - {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': '#ffa500'}, # 'Orange' - {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': '#ff00ff'}, # 'Magenta' - {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': '#bebebe'}, # 'Gray' - {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': '#87ceeb'}, # 'SkyBlue' - {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': '#ffc0cb'}, # 'Lavender' - {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': '#228b22'}, # 'ForestGreen' - {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': '#32cd32'}, # 'LimeGreen' - {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': '#9acd32'}, # 'YellowGreen' - {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': '#adff2f'}, # 'GreenYellow' - #{'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': '#ffff00'}, # 'Yellow' - {'marker': 'v', 'markersize': 30, 'linestyle': '--', 'color': '#000080'}, # 'NavyBlue' - {'marker': '*', 'markersize': 31, 'linestyle': '--', 'color': 'r'}, # 'Red' - {'marker': 's', 'markersize': 20, 'linestyle': '--', 'color': '#ffd700'}, # 'Goldenrod' - {'marker': 'd', 'markersize': 27, 'linestyle': '--', 'color': '#d02090'}, # square, 'VioletRed' - {'marker': '^', 'markersize': 26, 'linestyle': '--', 'color': '#6495ed'}, # 'CornflowerBlue' - {'marker': '<', 'markersize': 25, 'linestyle': '--', 'color': '#ffa500'}, # 'Orange' - {'marker': 'h', 'markersize': 24, 'linestyle': '--', 'color': '#ff00ff'}, # 'Magenta' - {'marker': 'p', 'markersize': 24, 'linestyle': '--', 'color': '#bebebe'}, # 'Gray' - {'marker': 'H', 'markersize': 24, 'linestyle': '--', 'color': '#87ceeb'}, # 'SkyBlue' - {'marker': '1', 'markersize': 24, 'linestyle': '--', 'color': '#ffc0cb'}, # 'Lavender' - {'marker': '2', 'markersize': 24, 'linestyle': '--', 'color': '#228b22'}, # 'ForestGreen' - {'marker': '4', 'markersize': 24, 'linestyle': '--', 'color': '#32cd32'}, # 'LimeGreen' - {'marker': '3', 'markersize': 24, 'linestyle': '--', 'color': '#9acd32'}, # 'YellowGreen' - {'marker': 'D', 'markersize': 24, 'linestyle': '--', 'color': '#adff2f'}, # 'GreenYellow' - ] - - + {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': '#000080'}, # 'NavyBlue' + {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, # 'Red' + {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': '#ffd700'}, # 'Goldenrod' seems too light + {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': '#d02090'}, # square, 'VioletRed' + {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'}, # 'Black' is too close to NavyBlue + {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': '#6495ed'}, # 'CornflowerBlue' + {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': '#ffa500'}, # 'Orange' + {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': '#ff00ff'}, # 'Magenta' + {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': '#bebebe'}, # 'Gray' + {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': '#87ceeb'}, # 'SkyBlue' + {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': '#ffc0cb'}, # 'Lavender' + {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': '#228b22'}, # 'ForestGreen' + {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': '#32cd32'}, # 'LimeGreen' + {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': '#9acd32'}, # 'YellowGreen' + {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': '#adff2f'}, # 'GreenYellow' + # {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': '#ffff00'}, # 'Yellow' + {'marker': 'v', 'markersize': 30, 'linestyle': '--', 'color': '#000080'}, # 'NavyBlue' + {'marker': '*', 'markersize': 31, 'linestyle': '--', 'color': 'r'}, # 'Red' + {'marker': 's', 'markersize': 20, 'linestyle': '--', 'color': '#ffd700'}, # 'Goldenrod' + {'marker': 'd', 'markersize': 27, 'linestyle': '--', 'color': '#d02090'}, # square, 'VioletRed' + {'marker': '^', 'markersize': 26, 'linestyle': '--', 'color': '#6495ed'}, # 'CornflowerBlue' + {'marker': '<', 'markersize': 25, 'linestyle': '--', 'color': '#ffa500'}, # 'Orange' + {'marker': 'h', 'markersize': 24, 'linestyle': '--', 'color': '#ff00ff'}, # 'Magenta' + {'marker': 'p', 'markersize': 24, 'linestyle': '--', 'color': '#bebebe'}, # 'Gray' + {'marker': 'H', 'markersize': 24, 'linestyle': '--', 'color': '#87ceeb'}, # 'SkyBlue' + {'marker': '1', 'markersize': 24, 'linestyle': '--', 'color': '#ffc0cb'}, # 'Lavender' + {'marker': '2', 'markersize': 24, 'linestyle': '--', 'color': '#228b22'}, # 'ForestGreen' + {'marker': '4', 'markersize': 24, 'linestyle': '--', 'color': '#32cd32'}, # 'LimeGreen' + {'marker': '3', 'markersize': 24, 'linestyle': '--', 'color': '#9acd32'}, # 'YellowGreen' + {'marker': 'D', 'markersize': 24, 'linestyle': '--', 'color': '#adff2f'}, # 'GreenYellow' +] if 11 < 3: # in case using my own linestyles line_styles = [ # used by ppfigs and pprlmany, to be modified - {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'b'}, - {'marker': 'o', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, - {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'b'}, - {'marker': '*', 'markersize': 20, 'linestyle': '-', 'color': 'r'}, - {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'b'}, - {'marker': '^', 'markersize': 26, 'linestyle': '-', 'color': 'r'}, - {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'g'}, - {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': 'b'}, - {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'r'}, - {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'c'}, - {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'm'}, - {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'k'}, - {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'y'}, - {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, - {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'g'} - ] + {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'b'}, + {'marker': 'o', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, + {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'b'}, + {'marker': '*', 'markersize': 20, 'linestyle': '-', 'color': 'r'}, + {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'b'}, + {'marker': '^', 'markersize': 26, 'linestyle': '-', 'color': 'r'}, + {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'g'}, + {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': 'b'}, + {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'r'}, + {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'c'}, + {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'm'}, + {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'k'}, + {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'y'}, + {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'g'}, + {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'g'} + ] minmax_algorithm_fontsize = [10, 15] # depending on the number of algorithms @@ -213,7 +212,7 @@ pprldmany_file_name = 'pprldmany' pprldmany_group_file_name = 'pprldmany_gr' -latex_commands_for_html = 'latex_commands_for_html' +latex_commands_for_html = 'latex_commands_for_html' extraction_folder_prefix = '_extracted_' @@ -222,187 +221,48 @@ isFig = True isTab = True isNoisy = False -isNoiseFree = False +isNoiseFree = False isConv = False verbose = False outputdir = 'ppdata' inputsettings = 'color' -isExpensive = False +isExpensive = False isRldOnSingleFcts = True isRLDistr = True ## -isLogLoss = True # only affects rungeneric1 -isPickled = False # only affects rungeneric1 +isLogLoss = True # only affects rungeneric1 +isPickled = False # only affects rungeneric1 ## -isScatter = True # only affects rungeneric2 -isScaleUp = True # only affects rungeneric2, only set here and not altered by any command line argument for now +isScatter = True # only affects rungeneric2 +isScaleUp = True # only affects rungeneric2, only set here and not altered by any command line argument for now # Used by getopt: -shortoptlist = "hvpo:" +shortoptlist = "hvpo:" longoptlist = ["help", "output-dir=", "noisy", "noise-free", "tab-only", "fig-only", "rld-only", "no-rld-single-fcts", - "verbose", "settings=", "conv", + "verbose", "settings=", "conv", "expensive", "runlength-based", "los-only", "crafting-effort=", "pickle", "sca-only", "no-svg"] + + # thereby, "los-only", "crafting-effort=", and "pickle" affect only rungeneric1 # and "sca-only" only affects rungeneric2 -def getBenchmarksShortInfos(isBiobjective): - return 'biobj-benchmarkshortinfos.txt' if isBiobjective else 'benchmarkshortinfos.txt' - + def getFigFormats(): if in_a_hurry: fig_formats = ('pdf', 'svg') if generate_svg_files else ('pdf',) else: fig_formats = ('eps', 'pdf', 'svg') if generate_svg_files else ('eps', 'pdf') # fig_formats = ('eps', 'pdf', 'pdf', 'png', 'svg') - + return fig_formats - + + def getFontSize(nameList): maxFuncLength = max(len(i) for i in nameList) fontSize = 24 - max(0, 2 * ((maxFuncLength - 35) / 5)) return fontSize -scenario_rlbased = 'rlbased' -scenario_fixed = 'fixed' -scenario_biobjfixed = 'biobjfixed' -all_scenarios = [scenario_rlbased, scenario_fixed, scenario_biobjfixed] - -testbed_name_single = 'bbob' -testbed_name_bi = 'bbob-biobj' - -class Testbed(object): - """this might become the future way to have settings related to testbeds - TODO: should go somewhere else than genericsettings.py - TODO: how do we pass information from the benchmark to the post-processing? - - """ - def info(self, fun_number=None): - """info on the testbed if ``fun_number is None`` or one-line info - for function with number ``fun_number``. - - """ - if fun_number is None: - return self.__doc__ - - for line in open(os.path.join(os.path.abspath(os.path.split(__file__)[0]), - self.info_filename)).readlines(): - if line.split(): # ie if not empty - try: # empty lines are ignored - fun = int(line.split()[0]) - if fun == fun_number: - return 'F'+str(fun) + ' ' + ' '.join(line.split()[1:]) - except ValueError: - continue # ignore annotations - - def isBiobjective(self): - return self.name == testbed_name_bi - -class GECCOBBOBTestbed(Testbed): - """Testbed used in the GECCO BBOB workshops 2009, 2010, 2012, 2013, 2015. - """ - def __init__(self, targetValues): - # TODO: should become a function, as low_budget is a display setting - # not a testbed setting - # only the short info, how to deal with both infos? - self.info_filename = 'GECCOBBOBbenchmarkinfos.txt' - self.name = testbed_name_single - self.short_names = {} - self.hardesttargetlatex = '10^{-8}' # used for ppfigs, pptable, pptable2, and pptables - self.ppfigs_ftarget = 1e-8 - self.ppfigdim_target_values = targetValues((10, 1, 1e-1, 1e-2, 1e-3, 1e-5, 1e-8)) # possibly changed in config - self.pprldistr_target_values = targetValues((10., 1e-1, 1e-4, 1e-8)) # possibly changed in config - self.pprldmany_target_values = targetValues(10**np.arange(2, -8.2, -0.2)) # possibly changed in config - self.pprldmany_target_range_latex = '$10^{[-8..2]}$' - self.ppscatter_target_values = targetValues(np.logspace(-8, 2, 46)) - self.rldValsOfInterest = (10, 1e-1, 1e-4, 1e-8) # possibly changed in config - self.ppfvdistr_min_target = 1e-8 - self.functions_with_legend = (1, 24, 101, 130) - self.number_of_functions = 24 - self.pptable_ftarget = 1e-8 # value for determining the success ratio in all tables - self.pptable_targetsOfInterest = targetValues((10, 1, 1e-1, 1e-2, 1e-3, 1e-5, 1e-7)) # for pptable and pptables - self.pptable2_targetsOfInterest = targetValues((1e+1, 1e-1, 1e-3, 1e-5, 1e-7)) # used for pptable2 - self.scenario = scenario_fixed - self.best_algorithm_filename = 'bestalgentries2009.pickle.gz' - - try: - info_list = open(os.path.join(os.path.dirname(__file__), - getBenchmarksShortInfos(False)), - 'r').read().split('\n') - info_dict = {} - for info in info_list: - key_val = info.split(' ', 1) - if len(key_val) > 1: - info_dict[int(key_val[0])] = key_val[1] - self.short_names = info_dict - except: - warnings.warn('benchmark infos not found') - - -class GECCOBiobjBBOBTestbed(Testbed): - """Testbed used in the GECCO biobjective BBOB workshop 2016. - """ - def __init__(self, targetValues): - # TODO: should become a function, as low_budget is a display setting - # not a testbed setting - # only the short info, how to deal with both infos? - self.info_filename = 'GECCOBBOBbenchmarkinfos.txt' - self.name = testbed_name_bi - self.short_names = {} - self.hardesttargetlatex = '10^{-5}' # used for ppfigs, pptable, pptable2, and pptables - self.ppfigs_ftarget = 1e-5 - self.ppfigdim_target_values = targetValues((1e-1, 1e-2, 1e-3, 1e-4, 1e-5)) # possibly changed in config - self.pprldistr_target_values = targetValues((1e-1, 1e-2, 1e-3, 1e-5)) # possibly changed in config - target_values = np.append(np.append(10**np.arange(0, -5.1, -0.1), [0]), -10**np.arange(-5, -3.9, 0.2)) - self.pprldmany_target_values = targetValues(target_values) # possibly changed in config - self.pprldmany_target_range_latex = '$\{-10^{-4}, -10^{-4.2}, $ $-10^{-4.4}, -10^{-4.6}, -10^{-4.8}, -10^{-5}, 0, 10^{-5}, 10^{-4.9}, 10^{-4.8}, \dots, 10^{-0.1}, 10^0\}$' - # ppscatter_target_values are copied from the single objective case. Define the correct values! - self.ppscatter_target_values = targetValues(np.logspace(-8, 2, 46)) # that does not look right here! - self.rldValsOfInterest = (1e-1, 1e-2, 1e-3, 1e-4, 1e-5) # possibly changed in config - self.ppfvdistr_min_target = 1e-5 - self.functions_with_legend = (1, 30, 31, 55) - self.number_of_functions = 55 - self.pptable_ftarget = 1e-5 # value for determining the success ratio in all tables - self.pptable_targetsOfInterest = targetValues((1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5)) # possibly changed in config for all tables - self.pptable2_targetsOfInterest = targetValues((1e-1, 1e-2, 1e-3, 1e-4, 1e-5)) # used for pptable2 - self.scenario = scenario_biobjfixed - self.best_algorithm_filename = '' - - try: - info_list = open(os.path.join(os.path.dirname(__file__), - getBenchmarksShortInfos(True)), - 'r').read().split('\n') - info_dict = {} - for info in info_list: - key_val = info.split(' ', 1) - if len(key_val) > 1: - info_dict[int(key_val[0])] = key_val[1] - self.short_names = info_dict - except: - warnings.warn('benchmark infos not found') - -class GECCOBBOBNoisefreeTestbed(GECCOBBOBTestbed): - __doc__ = GECCOBBOBTestbed.__doc__ - -class GECCOBiobjBBOBNoisefreeTestbed(GECCOBiobjBBOBTestbed): - __doc__ = GECCOBiobjBBOBTestbed.__doc__ - -# TODO: this needs to be set somewhere, e.g. in rungeneric* -# or even better by investigating in the data attributes - -current_testbed = None - -def loadCurrentTestbed(isBiobjective, targetValues): - - global current_testbed - - #if not current_testbed: - if isBiobjective: - current_testbed = GECCOBiobjBBOBNoisefreeTestbed(targetValues) - else: - current_testbed = GECCOBBOBNoisefreeTestbed(targetValues) - - return current_testbed diff --git a/code-postprocessing/bbob_pproc/latex_commands_for_html.html b/code-postprocessing/bbob_pproc/latex_commands_for_html.html index 4c1332e35..4a5b71d91 100644 --- a/code-postprocessing/bbob_pproc/latex_commands_for_html.html +++ b/code-postprocessing/bbob_pproc/latex_commands_for_html.html @@ -152,7 +152,7 @@ Entries, succeeded by a star, are statistically significantly better (according to the rank-sum test) when compared to all other algorithms of the table, with p = 0.05 or p = 10−k when the number k following the star is larger - than 1, with Bonferroni correction by the number of instances. A ↓ indicates the same tested against the best + than 1, with Bonferroni correction of 48. A ↓ indicates the same tested against the best algorithm of BBOB-2009.Best results are printed in bold.
@@ -170,7 +170,7 @@ Entries, succeeded by a star, are statistically significantly better (according to the rank-sum test) when compared to all other algorithms of the table, with p = 0.05 or p = 10−k when the number k following the star is larger - than 1, with Bonferroni correction by the number of instances. A ↓ indicates the same tested against the best + than 1, with Bonferroni correction of 48. A ↓ indicates the same tested against the best algorithm of BBOB-2009.Best results are printed in bold.
@@ -341,7 +341,7 @@ Entries, succeeded by a star, are statistically significantly better (according to the rank-sum test) when compared to all other algorithms of the table, with p = 0.05 or p = 10−k when the number k following the star is larger - than 1, with Bonferroni correction by the number of instances. A ↓ indicates the same tested against the best + than 1, with Bonferroni correction of 48. A ↓ indicates the same tested against the best algorithm of BBOB-2009.Best results are printed in bold.
@@ -360,7 +360,7 @@ Entries, succeeded by a star, are statistically significantly better (according to the rank-sum test) when compared to all other algorithms of the table, with p = 0.05 or p = 10−k when the number k following the star is larger - than 1, with Bonferroni correction by the number of instances. A ↓ indicates the same tested against the best + than 1, with Bonferroni correction of 48. A ↓ indicates the same tested against the best algorithm of BBOB-2009.Best results are printed in bold.
@@ -425,9 +425,9 @@ Empirical cumulative distribution functions (ECDF), plotting the fraction of trials with an outcome not larger than the respective value on the x-axis. Left subplots: ECDF of the number of function evaluations (FEvals) divided by search space dimension D, - to fall below HVref+∆f with ∆f + to fall below Iref+∆f with ∆f =10k, where k is the first value in the legend. - The thick red line represents the most difficult target value HVref+10−5. Legends indicate for each target the number of functions that were solved in at + The thick red line represents the most difficult target value Iref+10−5. Legends indicate for each target the number of functions that were solved in at least one trial within the displayed budget. Right subplots: ECDF of the best achieved ∆f for running times of 0.5D, 1.2D, 3D, 10D, 100D, 1000D,... @@ -445,7 +445,7 @@ of run lengths and speed-up ratios in 5-D (left) and 20-D (right). Left sub-columns: ECDF of the number of function evaluations divided by dimension D - (FEvals/D) to reach a target value HVref+∆f with ∆f + (FEvals/D) to reach a target value Iref+∆f with ∆f =10k, where k is given by the first value in the legend, for algorithmA (°) and algorithmB () . Right sub-columns: @@ -459,7 +459,7 @@
##bbobppfigdimlegendbiobjfixed## - Scaling of runtime to reach HVref+10# with dimension; + Scaling of runtime to reach Iref+10# with dimension; runtime is measured in number of f-evaluations and # is given in the legend; Lines: average runtime (aRT); Cross (+): median runtime of successful runs to reach the most difficult @@ -481,7 +481,7 @@ 90%-tile of (bootstrapped) runtimes is shown for the different target ∆f-values as shown in the top row. #succ is the number of trials that reached the last target - HVref+ 10−5. + Iref+ 10−5. The median number of conducted function evaluations is additionally given in italics, if the target in the last column was never reached. @@ -495,7 +495,7 @@ 90%-tile of (bootstrapped) runtimes is shown for the different target ∆f-values as shown in the top row. #succ is the number of trials that reached the last target - HVref+ 10−5. + Iref+ 10−5. The median number of conducted function evaluations is additionally given in italics, if the last target was never reached. 1:algorithmAshort is algorithmA and 2:algorithmBshort is algorithmB. @@ -510,15 +510,15 @@ in number of function evaluations, in dimension 5. For each function, the aRT  and, in braces as dispersion measure, the half difference between 10 and 90%-tile of (bootstrapped) runtimes is shown for the different - target ∆f-values as shown in the top row. + target ∆I-values as shown in the top row. #succ is the number of trials that reached the last target - HVref+ 10−5. + Iref+ 10−5. The median number of conducted function evaluations is additionally given in italics, if the target in the last column was never reached. Entries, succeeded by a star, are statistically significantly better (according to the rank-sum test) when compared to all other algorithms of the table, with p = 0.05 or p = 10−k when the number k following the star is larger - than 1, with Bonferroni correction by the number of instances. Best results are printed in bold. + than 1, with Bonferroni correction of 110. Best results are printed in bold.
##bbobpptablesmanylegendbiobjfixed20## @@ -527,15 +527,15 @@ in number of function evaluations, in dimension 20. For each function, the aRT  and, in braces as dispersion measure, the half difference between 10 and 90%-tile of (bootstrapped) runtimes is shown for the different - target ∆f-values as shown in the top row. + target ∆I-values as shown in the top row. #succ is the number of trials that reached the last target - HVref+ 10−5. + Iref+ 10−5. The median number of conducted function evaluations is additionally given in italics, if the target in the last column was never reached. Entries, succeeded by a star, are statistically significantly better (according to the rank-sum test) when compared to all other algorithms of the table, with p = 0.05 or p = 10−k when the number k following the star is larger - than 1, with Bonferroni correction by the number of instances. Best results are printed in bold. + than 1, with Bonferroni correction of 110. Best results are printed in bold.
##bbobppscatterlegendbiobjfixed## @@ -583,5 +583,5 @@ TEX by TTH, -version 4.08.
On 04 Apr 2016, 23:04. +version 4.08.
On 11 May 2016, 00:57. diff --git a/code-postprocessing/bbob_pproc/ppfig.py b/code-postprocessing/bbob_pproc/ppfig.py index 2a4cbbad3..f9795c884 100644 --- a/code-postprocessing/bbob_pproc/ppfig.py +++ b/code-postprocessing/bbob_pproc/ppfig.py @@ -12,7 +12,7 @@ from matplotlib import pyplot as plt import shutil # from pdb import set_trace -from . import genericsettings, toolsstats, htmldesc # absolute_import => . refers to where ppfig resides in the package +from . import genericsettings, testbedsettings, toolsstats, htmldesc # absolute_import => . refers to where ppfig resides in the package import pkg_resources bbox_inches_choices = { # do we also need pad_inches = 0? @@ -175,9 +175,10 @@ def getRldLink(htmlPage, currentDir, isBiobjective): links += addLink(currentDir, folder, fileName, 'Runtime distribution plots (per dimension)', ignoreFileExists=ignoreFileExists) - fileName = '%s_02D.html' % genericsettings.pprldmany_group_file_name - links += addLink(currentDir, folder, fileName, 'Runtime distribution plots by group (per dimension)', - ignoreFileExists=ignoreFileExists) + if htmlPage == HtmlPage.ONE: + fileName = '%s_02D.html' % genericsettings.pprldmany_group_file_name + links += addLink(currentDir, folder, fileName, 'Runtime distribution plots by group (per dimension)', + ignoreFileExists=ignoreFileExists) return links @@ -216,7 +217,7 @@ def save_single_functions_html(filename, if not htmlPage == HtmlPage.PPRLDMANY_BY_GROUP: functionGroups.update({'noiselessall':'All functions'}) - maxFunctionIndex = genericsettings.current_testbed.number_of_functions + maxFunctionIndex = testbedsettings.current_testbed.number_of_functions captionStringFormat = '

\n%s\n

' addLinkForNextDim = add_to_names.endswith('D') bestAlgExists = not isBiobjective @@ -264,7 +265,7 @@ def save_single_functions_html(filename, f.write(addImage('%s_%02dD_%s.%s' % (name, dimension, typeKey, extension), True)) f.write('') - key = 'bbobpprldistrlegendtwo' + genericsettings.current_testbed.scenario + key = 'bbobpprldistrlegendtwo' + testbedsettings.current_testbed.scenario f.write(captionStringFormat % htmldesc.getValue('##' + key + '##')) currentHeader = 'Table showing the aRT in number of function evaluations' @@ -273,7 +274,7 @@ def save_single_functions_html(filename, f.write("\n

%s

\n" % currentHeader) f.write("\n\n") - key = 'bbobpptablestwolegend' + genericsettings.current_testbed.scenario + key = 'bbobpptablestwolegend' + testbedsettings.current_testbed.scenario f.write(captionStringFormat % htmldesc.getValue('##' + key + '##')) elif htmlPage is HtmlPage.MANY: @@ -320,7 +321,7 @@ def save_single_functions_html(filename, currentHeader = 'aRT in number of function evaluations' f.write("

%s

\n" % currentHeader) f.write("\n\n") - key = 'bbobpptablecaption' + genericsettings.current_testbed.scenario + key = 'bbobpptablecaption' + testbedsettings.current_testbed.scenario f.write(captionStringFormat % htmldesc.getValue('##' + key + '##')) elif htmlPage is HtmlPage.PPRLDISTR: @@ -337,7 +338,7 @@ def save_single_functions_html(filename, f.write(addImage('%s_%02dD_%s.%s' % (name, dimension, typeKey, extension), True)) f.write('') - key = 'bbobpprldistrlegend' + genericsettings.current_testbed.scenario + key = 'bbobpprldistrlegend' + testbedsettings.current_testbed.scenario f.write(captionStringFormat % htmldesc.getValue('##' + key + '##')) elif htmlPage is HtmlPage.PPLOGLOSS: @@ -348,7 +349,7 @@ def save_single_functions_html(filename, for dimension in dimensions: f.write(addImage('pplogloss_%02dD_noiselessall.%s' % (dimension, extension), True)) f.write("\n\n") - scenario = genericsettings.current_testbed.scenario + scenario = testbedsettings.current_testbed.scenario f.write(captionStringFormat % htmldesc.getValue('##bbobloglosstablecaption' + scenario + '##')) for typeKey, typeValue in functionGroups.iteritems(): @@ -391,7 +392,7 @@ def write_pptables(f, dimension, captionStringFormat, maxFunctionIndex, bestAlgE f.write("\n\n" % (ifun, dimension)) if genericsettings.isTab: - key = 'bbobpptablesmanylegend' + genericsettings.current_testbed.scenario + key = 'bbobpptablesmanylegend' + testbedsettings.current_testbed.scenario f.write(captionStringFormat % htmldesc.getValue('##' + key + str(dimension) + '##')) def copy_js_files(outputdir): diff --git a/code-postprocessing/bbob_pproc/ppfigdim.py b/code-postprocessing/bbob_pproc/ppfigdim.py index 7b2a110fd..0764c4007 100644 --- a/code-postprocessing/bbob_pproc/ppfigdim.py +++ b/code-postprocessing/bbob_pproc/ppfigdim.py @@ -59,6 +59,7 @@ import numpy as np from . import genericsettings, toolsstats, bestalg, pproc, ppfig, ppfigparam, htmldesc, toolsdivers +from . import testbedsettings xlim_max = None ynormalize_by_dimension = True # not at all tested yet @@ -113,7 +114,7 @@ def scaling_figure_caption(): reaching the respective target. """ + ( # TODO: add here "(out of XYZ trials)" r"""The light thick line with diamonds indicates the respective best result from BBOB-2009 for - $\Df=10^{-8}$. """ if genericsettings.current_testbed.name != + $\Df=10^{-8}$. """ if testbedsettings.current_testbed.name != 'bbob-biobj' else "") + """Horizontal lines mean linear scaling, slanted grid lines depict quadratic scaling. """ @@ -133,10 +134,10 @@ def scaling_figure_caption(): # r"Shown is the \aRT\ for the smallest $\Df$-values $\ge10^{-8}$ for which the \aRT\ of the GECCO-BBOB-2009 best algorithm " + # r"was below $10^{\{values_of_interest\}}\times\DIM$ evaluations. " + - if genericsettings.current_testbed.name == genericsettings.testbed_name_bi: + if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi: # NOTE: no runlength-based targets supported yet figure_caption = scaling_figure_caption_fixed.replace('\\fopt', '\\hvref') - elif genericsettings.current_testbed.name == genericsettings.testbed_name_single: + elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single: if genericsettings.runlength_based_targets: figure_caption = scaling_figure_caption_rlbased else: @@ -144,7 +145,7 @@ def scaling_figure_caption(): else: warnings.warn("Current settings do not support ppfigdim caption.") - values_of_interest = genericsettings.current_testbed.ppfigdim_target_values + values_of_interest = testbedsettings.current_testbed.ppfigdim_target_values figure_caption = figure_caption.replace('values_of_interest', ', '.join(values_of_interest.labels())) return figure_caption @@ -171,7 +172,7 @@ def beautify(axesLabel=True): # axisHandle.xaxis.grid(True, linewidth=0, which='major') ymin, ymax = plt.ylim() - values_of_interest = genericsettings.current_testbed.ppfigdim_target_values + values_of_interest = testbedsettings.current_testbed.ppfigdim_target_values # horizontal grid if isinstance(values_of_interest, pproc.RunlengthBasedTargetValues): @@ -357,7 +358,7 @@ def plot(dsList, valuesOfInterest=None, styles=styles): """ if not valuesOfInterest: - valuesOfInterest = genericsettings.current_testbed.ppfigdim_target_values + valuesOfInterest = testbedsettings.current_testbed.ppfigdim_target_values valuesOfInterest = pproc.TargetValues.cast(valuesOfInterest) styles = list(reversed(styles[:len(valuesOfInterest)])) @@ -500,7 +501,7 @@ def plot_previous_algorithms(func, target=None): # lambda x: [1e-8]): last, most difficult target in ``target``.""" if not target: - target = genericsettings.current_testbed.ppfigdim_target_values + target = testbedsettings.current_testbed.ppfigdim_target_values target = pproc.TargetValues.cast(target) @@ -551,9 +552,9 @@ def main(dsList, _valuesOfInterest, outputdir, verbose=True): dictFunc = dsList.dictByFunc() dictAlg = dsList.dictByAlg() - values_of_interest = genericsettings.current_testbed.ppfigdim_target_values + values_of_interest = testbedsettings.current_testbed.ppfigdim_target_values - key = 'bbobppfigdimlegend' + genericsettings.current_testbed.scenario + key = 'bbobppfigdimlegend' + testbedsettings.current_testbed.scenario joined_values_of_interest = ', '.join(values_of_interest.labels()) if genericsettings.runlength_based_targets else ', '.join(values_of_interest.loglabels()) caption = htmldesc.getValue('##' + key + '##').replace('valuesofinterest', joined_values_of_interest) @@ -600,7 +601,7 @@ def main(dsList, _valuesOfInterest, outputdir, verbose=True): instanceText = '%d instances' % len(((dictFunc[func][0]).instancenumbers)) plt.text(plt.xlim()[0], plt.ylim()[0]+0.5, instanceText, fontsize=14) - if func in genericsettings.current_testbed.functions_with_legend: + if func in testbedsettings.current_testbed.functions_with_legend: toolsdivers.legend(loc="best") if func in funInfos.keys(): # print(plt.rcParams['axes.titlesize']) diff --git a/code-postprocessing/bbob_pproc/ppfigparam.py b/code-postprocessing/bbob_pproc/ppfigparam.py index 77130372b..8e7bd4be6 100755 --- a/code-postprocessing/bbob_pproc/ppfigparam.py +++ b/code-postprocessing/bbob_pproc/ppfigparam.py @@ -17,7 +17,7 @@ import sys import matplotlib.pyplot as plt import numpy as np -from . import toolsstats, bestalg, genericsettings, toolsdivers +from . import toolsstats, testbedsettings, genericsettings, toolsdivers from .ppfig import saveFigure __all__ = ['beautify', 'plot', 'main'] @@ -45,7 +45,7 @@ def read_fun_infos(isBiobjective): try: funInfos = {} - filename = genericsettings.getBenchmarksShortInfos(isBiobjective) + filename = testbedsettings.get_benchmarks_short_infos(isBiobjective) infofile = os.path.join(os.path.split(__file__)[0], filename) f = open(infofile, 'r') for line in f: @@ -225,13 +225,13 @@ def main(dsList, _targets=(10., 1., 1e-1, 1e-2, 1e-3, 1e-5, 1e-8), handles = plot(dictfunc, param[0], targets) - # # display best 2009 - # if not bestalg.bestalgentries2009: - # bestalg.loadBBOB2009() + # # display best algorithm + # if not bestalg.bestAlgorithmEntries: + # bestalg.load_best_algorithm() # bestalgdata = [] # for d in dimsBBOB: - # entry = bestalg.bestalgentries2009[(d, func)] + # entry = bestalg.bestAlgorithmEntries[(d, func)] # tmp = entry.detERT([1e-8])[0] # if not np.isinf(tmp): # bestalgdata.append(tmp/d) @@ -257,7 +257,7 @@ def main(dsList, _targets=(10., 1., 1e-1, 1e-2, 1e-3, 1e-5, 1e-8), if is_normalized: plt.setp(plt.gca(), 'ylabel', plt.getp(a, 'ylabel') + ' / ' + param[1]) - if func in genericsettings.current_testbed.functions_with_legend: + if func in testbedsettings.current_testbed.functions_with_legend: toolsdivers.legend(loc="best") fontSize = genericsettings.getFontSize(funInfos.values()) diff --git a/code-postprocessing/bbob_pproc/pprldistr.py b/code-postprocessing/bbob_pproc/pprldistr.py index 833ad2f4f..040ae0228 100644 --- a/code-postprocessing/bbob_pproc/pprldistr.py +++ b/code-postprocessing/bbob_pproc/pprldistr.py @@ -49,6 +49,7 @@ import matplotlib.pyplot as plt from pdb import set_trace from . import toolsstats, genericsettings, pproc, toolsdivers +from . import testbedsettings from .ppfig import consecutiveNumbers, plotUnifLogXMarkers, saveFigure, logxticks from .pptex import color_to_latex, marker_to_latex @@ -142,7 +143,7 @@ def caption_single(): Left subplots: ECDF of the number of function evaluations (FEvals) divided by search space dimension $D$, to fall below $\fopt+\Df$ with $\Df=10^{k}$, where $k$ is the first value in the legend. The thick red line represents the most difficult target value $\fopt+""" + - genericsettings.current_testbed.hardesttargetlatex + """$. """) + testbedsettings.current_testbed.hardesttargetlatex + """$. """) caption_left_rlbased_targets = r"""% Left subplots: ECDF of number of function evaluations (FEvals) divided by search space dimension $D$, to fall below $\fopt+\Df$ where \Df\ is the @@ -160,7 +161,7 @@ def caption_single(): (from right to left cycling cyan-magenta-black\dots) and final $\Df$-value (red), where \Df\ and \textsf{Df} denote the difference to the optimal function value. """ + ( r"""Light brown lines in the background show ECDFs for the most difficult target of all - algorithms benchmarked during BBOB-2009.""" if genericsettings.current_testbed.name != genericsettings.testbed_name_bi + algorithms benchmarked during BBOB-2009.""" if testbedsettings.current_testbed.name != testbedsettings.testbed_name_bi else r"""Shown are aggregations over functions where the single objectives are in the same BBOB function class, as indicated on the left side and the aggregation over all 55 functions in the last @@ -169,10 +170,10 @@ def caption_single(): caption_single_fixed = caption_part_one + caption_left_fixed_targets + caption_wrap_up + caption_right caption_single_rlbased = caption_part_one + caption_left_rlbased_targets + caption_wrap_up + caption_right - if genericsettings.current_testbed.name == genericsettings.testbed_name_bi: + if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi: # NOTE: no runlength-based targets supported yet figure_caption = caption_single_fixed.replace('\\fopt', '\\hvref') - elif genericsettings.current_testbed.name == genericsettings.testbed_name_single: + elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single: if genericsettings.runlength_based_targets: figure_caption = caption_single_rlbased else: @@ -202,7 +203,7 @@ def caption_two(): caption_two_fixed_targets_part3 = r""")% . """ + (r"""Light beige lines show the ECDF of FEvals for target value $\Df=10^{-8}$ of all algorithms benchmarked during - BBOB-2009. """ if genericsettings.current_testbed.name != genericsettings.testbed_name_bi + BBOB-2009. """ if testbedsettings.current_testbed.name != testbedsettings.testbed_name_bi else "") + r"""Right sub-columns: ECDF of FEval ratios of \algorithmA\ divided by \algorithmB for target function values $10^k$ with $k$ given in the legend; all @@ -240,10 +241,10 @@ def caption_two(): + symbAlgorithmB + caption_two_rlbased_targets_part3) - if genericsettings.current_testbed.name == genericsettings.testbed_name_bi: + if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi: # NOTE: no runlength-based targets supported yet figure_caption = caption_two_fixed.replace('\\fopt', '\\hvref') - elif genericsettings.current_testbed.name == genericsettings.testbed_name_single: + elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single: if genericsettings.runlength_based_targets: figure_caption = caption_two_rlbased else: @@ -312,7 +313,7 @@ def beautifyRLD(xlimit_max = None): plt.xlim(xmin = runlen_xlimits_min) plt.text(plt.xlim()[0], plt.ylim()[0], - genericsettings.current_testbed.pprldistr_target_values.short_info, + testbedsettings.current_testbed.pprldistr_target_values.short_info, fontsize = 14) beautifyECDF() @@ -534,7 +535,7 @@ def plotFVDistr(dsList, budget, min_f = None, **plotArgs): """ if not min_f: - min_f = genericsettings.current_testbed.ppfvdistr_min_target + min_f = testbedsettings.current_testbed.ppfvdistr_min_target x = [] nn = 0 @@ -694,7 +695,7 @@ def plot(dsList, targets=None, **plotArgs): res = [] if not targets: - targets = genericsettings.current_testbed.ppfigdim_target_values + targets = testbedsettings.current_testbed.ppfigdim_target_values plt.subplot(121) maxEvalsFactor = max(i.mMaxEvals() / i.dim for i in dsList) @@ -821,7 +822,7 @@ def main(dsList, isStoringXMax = False, outputdir = '', # plt.rc("ytick", labelsize=20) # plt.rc("font", size=20) # plt.rc("legend", fontsize=20) - testbed = genericsettings.current_testbed + testbed = testbedsettings.current_testbed targets = testbed.pprldistr_target_values # convenience abbreviation for d, dictdim in dsList.dictByDim().iteritems(): diff --git a/code-postprocessing/bbob_pproc/pproc.py b/code-postprocessing/bbob_pproc/pproc.py index d00fd83cf..ae924edb9 100644 --- a/code-postprocessing/bbob_pproc/pproc.py +++ b/code-postprocessing/bbob_pproc/pproc.py @@ -30,6 +30,7 @@ import matplotlib.pyplot as plt from collections import OrderedDict from . import genericsettings, findfiles, toolsstats, toolsdivers +from . import testbedsettings from .readalign import split, alignData, HMultiReader, VMultiReader, openfile from .readalign import HArrayMultiReader, VArrayMultiReader, alignArrayData from .ppfig import consecutiveNumbers, Usage @@ -245,7 +246,7 @@ def __init__(self, run_lengths, reference_data='bestAlgorithm', defines "how much more difficult". TODO: check use case where ``reference_data`` is a dictionary similar - to ``bestalg.bestalgentries2009`` with each key dim_fun a reference + to ``bestalg.bestAlgorithmEntries`` with each key dim_fun a reference DataSet, computed by bestalg module or portfolio module. dsList, sortedAlgs, dictAlg = pproc.processInputArgs(args, verbose=verbose) @@ -594,6 +595,7 @@ class DataSet(): readmaxevals splitByTrials target + testbed_name >>> all(ds.evals[:, 0] == ds.target) # first column of ds.evals is the "target" f-value True >>> ds.evals[0::10, (0,5,6)] # show row 0,10,20,... and of the result columns 0,5,6, index 0 is ftarget @@ -651,10 +653,24 @@ class DataSet(): 'indicator': ('indicator', str), 'folder': ('folder', str), 'algId': ('algId', str), - 'algorithm': ('algId', str)} + 'algorithm': ('algId', str), + 'suite': ('suite', str)} def isBiobjective(self): return hasattr(self, 'indicator') + + def testbed_name(self): + testbed = None + if hasattr(self, 'suite'): + testbed = getattr(self, 'suite') + + if not testbed: + if self.isBiobjective(): + testbed = testbedsettings.default_testbed_bi + else: + testbed = testbedsettings.default_testbed_single + + return testbed def __init__(self, header, comment, data, indexfile, verbose=True): """Instantiate a DataSet. @@ -855,7 +871,11 @@ def _cut_data(self): does not exist. """ - if isinstance(genericsettings.loadCurrentTestbed(self.isBiobjective(), TargetValues), genericsettings.GECCOBBOBTestbed): + + if not testbedsettings.current_testbed: + testbedsettings.load_current_testbed(self.testbed_name(), TargetValues) + + if isinstance(testbedsettings.current_testbed, testbedsettings.GECCOBBOBTestbed): Ndata = np.size(self.evals, 0) i = Ndata while i > 1 and not self.isBiobjective() and self.evals[i-1][0] <= self.precision: diff --git a/code-postprocessing/bbob_pproc/pptable.py b/code-postprocessing/bbob_pproc/pptable.py index 9e55a8176..cfc65b336 100644 --- a/code-postprocessing/bbob_pproc/pptable.py +++ b/code-postprocessing/bbob_pproc/pptable.py @@ -27,6 +27,7 @@ import numpy as np import matplotlib.pyplot as plt from . import genericsettings, bestalg, toolsstats, pproc +from . import testbedsettings from .pptex import tableLaTeX, tableLaTeXStar, writeFEvals2, writeFEvalsMaxPrec from .toolsstats import significancetest @@ -49,7 +50,7 @@ def get_table_caption(): - """ Sets table caption, based on the genericsettings.current_testbed + """ Sets table caption, based on the testbedsettings.current_testbed and genericsettings.runlength_based_targets. """ @@ -63,7 +64,7 @@ def get_table_caption(): table_caption_two1 = (r"""% in the first. The different target \Df-values are shown in the top row. \#succ is the number of trials that reached the (final) target $\fopt + """ - + genericsettings.current_testbed.hardesttargetlatex + r"""$. + + testbedsettings.current_testbed.hardesttargetlatex + r"""$. """) table_caption_two2 = r"""% (preceded by the target \Df-value in \textit{italics}) in the first. @@ -79,7 +80,7 @@ def get_table_caption(): functions. """ - if genericsettings.current_testbed.name == genericsettings.testbed_name_bi: + if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi: # NOTE: no runlength-based targets supported yet table_caption = r"""% Average runtime (\aRT) to reach given targets, measured @@ -88,11 +89,11 @@ def get_table_caption(): 90\%-tile of (bootstrapped) runtimes is shown for the different target \Df-values as shown in the top row. \#succ is the number of trials that reached the last target - $\hvref + """ + genericsettings.current_testbed.hardesttargetlatex + r"""$. + $\hvref + """ + testbedsettings.current_testbed.hardesttargetlatex + r"""$. The median number of conducted function evaluations is additionally given in \textit{italics}, if the target in the last column was never reached. """ - elif genericsettings.current_testbed.name == genericsettings.testbed_name_single: + elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single: if genericsettings.runlength_based_targets: table_caption = table_caption_one + table_caption_two2 + table_caption_rest else: @@ -119,8 +120,8 @@ def main(dsList, dimsOfInterest, outputdir, info='', verbose=True): #bestalg.bestalgentries which is the virtual best of BBOB dictDim = dsList.dictByDim() - targetf = genericsettings.current_testbed.pptable_ftarget - targetsOfInterest = genericsettings.current_testbed.pptable_targetsOfInterest + targetf = testbedsettings.current_testbed.pptable_ftarget + targetsOfInterest = testbedsettings.current_testbed.pptable_targetsOfInterest if info: info = '_' + info diff --git a/code-postprocessing/bbob_pproc/preparetexforhtml.py b/code-postprocessing/bbob_pproc/preparetexforhtml.py index 81a0636ef..7ea66f1ad 100644 --- a/code-postprocessing/bbob_pproc/preparetexforhtml.py +++ b/code-postprocessing/bbob_pproc/preparetexforhtml.py @@ -12,6 +12,7 @@ import warnings from . import genericsettings, pplogloss, ppfigdim, pptable, pprldistr, config +from . import testbedsettings from .compall import pptables, ppfigs from .comp2 import ppscatter, pptable2 @@ -41,9 +42,10 @@ \\newcommand{\\nruns}{\ensuremath{\mathrm{Nruns}}} \\newcommand{\\Dfb}{\ensuremath{\Delta f_{\mathrm{best}}}} \\newcommand{\\Df}{\ensuremath{\Delta f}} +\\newcommand{\\DI}{\ensuremath{\Delta I}} \\newcommand{\\nbFEs}{\ensuremath{\mathrm{\#FEs}}} \\newcommand{\\fopt}{\ensuremath{f_\mathrm{opt}}} -\\newcommand{\\hvref}{\ensuremath{HV_\mathrm{ref}}} +\\newcommand{\\hvref}{I^{\mathrm{ref}}} \\newcommand{\\ftarget}{\ensuremath{f_\mathrm{t}}} \\newcommand{\\CrE}{\ensuremath{\mathrm{CrE}}} \\newcommand{\\change}[1]{{\color{red} #1}} @@ -62,17 +64,17 @@ def main(latex_commands_for_html): f.write(header) - for scenario in genericsettings.all_scenarios: + for scenario in testbedsettings.all_scenarios: # set up scenario, especially wrt genericsettings - if scenario == genericsettings.scenario_rlbased: + if scenario == testbedsettings.scenario_rlbased: genericsettings.runlength_based_targets = True - config.config(isBiobjective=False) - elif scenario == genericsettings.scenario_fixed: + config.config(testbedsettings.default_testbed_single) + elif scenario == testbedsettings.scenario_fixed: genericsettings.runlength_based_targets = False - config.config(isBiobjective=False) - elif scenario == genericsettings.scenario_biobjfixed: + config.config(testbedsettings.default_testbed_single) + elif scenario == testbedsettings.scenario_biobjfixed: genericsettings.runlength_based_targets = False - config.config(isBiobjective=True) + config.config(testbedsettings.default_testbed_bi) else: warnings.warn("Scenario not supported yet in HTML") @@ -108,7 +110,7 @@ def main(latex_commands_for_html): f.writelines(prepare_providecommand('bbobpptablestwolegend', scenario, pptable2Legend)) # 6. pptables - f.writelines(prepare_providecommand('bbobpptablesmanylegend', scenario, pptables.get_table_caption())) + f.writelines(prepare_providecommand_two('bbobpptablesmanylegend', scenario, pptables.get_table_caption())) # 7. ppscatter ppscatterLegend = ppscatter.prepare_figure_caption().replace('REFERENCE_ALGORITHM', 'REFERENCEALGORITHM') @@ -128,7 +130,7 @@ def main(latex_commands_for_html): # 1. ppfigs for dim in ['5', '20']: f.write(prepare_item('bbobECDFslegend' + scenario + dim, 'bbobECDFslegend' + scenario, str(dim))) - param = '$f_1$ and $f_{%d}$' % genericsettings.current_testbed.number_of_functions + param = '$f_1$ and $f_{%d}$' % testbedsettings.current_testbed.number_of_functions f.write(prepare_item('bbobppfigslegend' + scenario, param=param)) # 2. pprldistr @@ -144,10 +146,11 @@ def main(latex_commands_for_html): # 6. pptables command_name = 'bbobpptablesmanylegend' + scenario for dim in ['5', '20']: - f.write(prepare_item(command_name + dim, command_name, 'dimension ' + dim)) + bonferroni = str(2 * testbedsettings.current_testbed.number_of_functions) + f.write(prepare_item_two(command_name + dim, command_name, 'dimension ' + dim, bonferroni)) # 7. ppscatter - param = '$f_1$ - $f_{%d}$' % genericsettings.current_testbed.number_of_functions + param = '$f_1$ - $f_{%d}$' % testbedsettings.current_testbed.number_of_functions f.write(prepare_item('bbobppscatterlegend' + scenario, param=param)) # 8. pplogloss @@ -161,9 +164,17 @@ def main(latex_commands_for_html): def prepare_providecommand(command, scenario, captiontext): return ['\\providecommand{\\', command, scenario, '}[1]{\n', captiontext, '\n}\n'] +def prepare_providecommand_two(command, scenario, captiontext): + return ['\\providecommand{\\', command, scenario, '}[2]{\n', captiontext, '\n}\n'] def prepare_item(name, command_name='', param=''): if not command_name: command_name = name return '\#\#%s\#\#\n\\%s{%s}\n' % (name, command_name, param) + +def prepare_item_two(name, command_name='', paramOne='', paramTwo=''): + if not command_name: + command_name = name + + return '\#\#%s\#\#\n\\%s{%s}{%s}\n' % (name, command_name, paramOne, paramTwo) diff --git a/code-postprocessing/bbob_pproc/rungeneric1.py b/code-postprocessing/bbob_pproc/rungeneric1.py index f769984dd..dcc635f07 100644 --- a/code-postprocessing/bbob_pproc/rungeneric1.py +++ b/code-postprocessing/bbob_pproc/rungeneric1.py @@ -35,7 +35,7 @@ import warnings, getopt, numpy as np -from . import genericsettings, ppfig, pptable, pprldistr, ppfigdim, pplogloss, findfiles +from . import genericsettings, testbedsettings, ppfig, pptable, pprldistr, ppfigdim, pplogloss, findfiles from .pproc import DataSetList from .ppfig import Usage from .toolsdivers import print_done, prepend_to_file, replace_in_file, strip_pathname1, str_to_latex @@ -250,7 +250,7 @@ def main(argv=None): # import testbedsettings as testbedsettings # input settings try: fp, pathname, description = imp.find_module("testbedsettings") - testbedsettings = imp.load_module("testbedsettings", fp, pathname, description) + testbedsettings1 = imp.load_module("testbedsettings", fp, pathname, description) finally: fp.close() @@ -294,7 +294,7 @@ def main(argv=None): from . import config config.target_values(genericsettings.isExpensive) - config.config(dsList.isBiobjective()) + config.config(dsList[0].testbed_name()) if (genericsettings.verbose): for i in dsList: @@ -330,7 +330,7 @@ def main(argv=None): genericsettings.verbose, genericsettings.single_algorithm_file_name) - values_of_interest = genericsettings.current_testbed.ppfigdim_target_values + values_of_interest = testbedsettings.current_testbed.ppfigdim_target_values if genericsettings.isFig: print "Scaling figures...", sys.stdout.flush() diff --git a/code-postprocessing/bbob_pproc/rungeneric2.py b/code-postprocessing/bbob_pproc/rungeneric2.py index 526a7a010..ae7e98e97 100644 --- a/code-postprocessing/bbob_pproc/rungeneric2.py +++ b/code-postprocessing/bbob_pproc/rungeneric2.py @@ -56,6 +56,7 @@ from . import pproc from . import genericsettings, config +from . import testbedsettings from . import pprldistr from . import htmldesc from .pproc import DataSetList, processInputArgs, TargetValues, RunlengthBasedTargetValues @@ -301,7 +302,7 @@ def main(argv=None): i.algId = alg1name config.target_values(genericsettings.isExpensive) - config.config(dsList[0].isBiobjective()) + config.config(dsList[0].testbed_name()) ######################### Post-processing ############################# if genericsettings.isFig or genericsettings.isRLDistr or genericsettings.isTab or genericsettings.isScatter: @@ -393,7 +394,7 @@ def main(argv=None): # ECDF for all functions altogether try: pprldistr2.main(dictDim0[dim], dictDim1[dim], dim, - genericsettings.current_testbed.rldValsOfInterest, + testbedsettings.current_testbed.rldValsOfInterest, outputdir, '%02dD_all' % dim, genericsettings.verbose) @@ -408,7 +409,7 @@ def main(argv=None): for fGroup in set(dictFG0.keys()) & set(dictFG1.keys()): pprldistr2.main(dictFG1[fGroup], dictFG0[fGroup], dim, - genericsettings.current_testbed.rldValsOfInterest, + testbedsettings.current_testbed.rldValsOfInterest, outputdir, '%02dD_%s' % (dim, fGroup), genericsettings.verbose) @@ -419,7 +420,7 @@ def main(argv=None): for fGroup in set(dictFN0.keys()) & set(dictFN1.keys()): pprldistr2.main(dictFN1[fGroup], dictFN0[fGroup], dim, - genericsettings.current_testbed.rldValsOfInterest, + testbedsettings.current_testbed.rldValsOfInterest, outputdir, '%02dD_%s' % (dim, fGroup), genericsettings.verbose) @@ -431,6 +432,20 @@ def main(argv=None): '}' ]) + # ECDFs per function groups, code copied from rungenericmany.py + # (needed for bbob-biobj multiple algo template) + dictFG = pproc.dictAlgByFuncGroup(dictAlg) + for fg, tmpdictAlg in dictFG.iteritems(): + dictDim = pproc.dictAlgByDim(tmpdictAlg) + for d, entries in dictDim.iteritems(): + pprldmany.main(entries, + dsList0.isBiobjective(), + order=sortedAlgs, + outputdir=outputdir, + info=('%02dD_%s' % (d, fg)), + verbose=genericsettings.verbose) + + print "ECDF runlength ratio graphs done." @@ -441,7 +456,7 @@ def main(argv=None): if dim in inset.rldDimsOfInterest: try: pprldistr.comp(dictDim1[dim], dictDim0[dim], - genericsettings.current_testbed.rldValsOfInterest, # TODO: let rldVals... possibly be RL-based targets + testbedsettings.current_testbed.rldValsOfInterest, # TODO: let rldVals... possibly be RL-based targets True, outputdir, 'all', genericsettings.verbose) except KeyError: @@ -455,7 +470,7 @@ def main(argv=None): for fGroup in set(dictFG0.keys()) & set(dictFG1.keys()): pprldistr.comp(dictFG1[fGroup], dictFG0[fGroup], - genericsettings.current_testbed.rldValsOfInterest, True, + testbedsettings.current_testbed.rldValsOfInterest, True, outputdir, '%s' % fGroup, genericsettings.verbose) @@ -464,7 +479,7 @@ def main(argv=None): dictFN1 = dictDim1[dim].dictByNoise() for fGroup in set(dictFN0.keys()) & set(dictFN1.keys()): pprldistr.comp(dictFN1[fGroup], dictFN0[fGroup], - genericsettings.current_testbed.rldValsOfInterest, True, + testbedsettings.current_testbed.rldValsOfInterest, True, outputdir, '%s' % fGroup, genericsettings.verbose) @@ -560,7 +575,7 @@ def split_seq(seq, nbgroups): '}' ]) - key = '##bbobpptablestwolegend%s##' % (genericsettings.current_testbed.scenario) + key = '##bbobpptablestwolegend%s##' % (testbedsettings.current_testbed.scenario) replace_in_file(htmlFileName, '##bbobpptablestwolegend##', htmldesc.getValue(key)) replace_in_file(htmlFileName, 'algorithmAshort', algName0[0:3]) diff --git a/code-postprocessing/bbob_pproc/rungenericmany.py b/code-postprocessing/bbob_pproc/rungenericmany.py index c7af0f08c..3fa8293aa 100644 --- a/code-postprocessing/bbob_pproc/rungenericmany.py +++ b/code-postprocessing/bbob_pproc/rungenericmany.py @@ -36,8 +36,8 @@ res = cocopp.rungenericmany.main(sys.argv[1:]) sys.exit(res) -from . import genericsettings, ppfig -from . import dataoutput, pproc, pptex +from . import genericsettings, ppfig, testbedsettings +from . import pproc, pptex from .pproc import DataSetList, processInputArgs from .ppfig import Usage from .toolsdivers import prepend_to_file, strip_pathname1, str_to_latex @@ -276,7 +276,7 @@ def main(argv=None): # set target values from . import config config.target_values(genericsettings.isExpensive) - config.config(dsList[0].isBiobjective()) + config.config(dsList[0].testbed_name()) for i in dsList: @@ -315,7 +315,7 @@ def main(argv=None): genericsettings.many_algorithm_file_name) # empirical cumulative distribution functions (ECDFs) aka Data profiles if genericsettings.isRLDistr: - config.config(dsList[0].isBiobjective()) + config.config(dsList[0].testbed_name()) # ECDFs per noise groups dictNoi = pproc.dictAlgByNoi(dictAlg) for ng, tmpdictAlg in dictNoi.iteritems(): @@ -372,7 +372,7 @@ def main(argv=None): if genericsettings.isTab: prepend_to_file(os.path.join(outputdir, 'bbob_pproc_commands.tex'), - ['\providecommand{\\bbobpptablesmanylegend}[1]{' + + ['\providecommand{\\bbobpptablesmanylegend}[2]{' + pptables.get_table_caption() + '}']) dictNoi = pproc.dictAlgByNoi(dictAlg) for ng, tmpdictng in dictNoi.iteritems(): @@ -383,7 +383,8 @@ def main(argv=None): sortedAlgs, dsList[0].isBiobjective(), outputdir, - genericsettings.verbose) + genericsettings.verbose, + ([1,20,38] if (testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi) else True)) print "Comparison tables done." diff --git a/code-postprocessing/bbob_pproc/testbedsettings.py b/code-postprocessing/bbob_pproc/testbedsettings.py new file mode 100644 index 000000000..8f6e56d15 --- /dev/null +++ b/code-postprocessing/bbob_pproc/testbedsettings.py @@ -0,0 +1,145 @@ +import os +import numpy as np +import warnings + +scenario_rlbased = 'rlbased' +scenario_fixed = 'fixed' +scenario_biobjfixed = 'biobjfixed' +all_scenarios = [scenario_rlbased, scenario_fixed, scenario_biobjfixed] + +testbed_name_single = 'bbob' +testbed_name_bi = 'bbob-biobj' + +default_testbed_single = 'GECCOBBOBTestbed' +default_testbed_bi = 'GECCOBiObjBBOBTestbed' + +current_testbed = None + + +def load_current_testbed(testbed_name, target_values): + global current_testbed + + if testbed_name in globals(): + constructor = globals()[testbed_name] + current_testbed = constructor(target_values) + else: + raise ValueError('Testbed class %s does not exist. Add it to testbedsettings.py to process this data.' % testbed_name) + + return current_testbed + + +def get_benchmarks_short_infos(is_biobjective): + return 'biobj-benchmarkshortinfos.txt' if is_biobjective else 'benchmarkshortinfos.txt' + + +def get_short_names(file_name): + try: + info_list = open(os.path.join(os.path.dirname(__file__), file_name), 'r').read().split('\n') + info_dict = {} + for info in info_list: + key_val = info.split(' ', 1) + if len(key_val) > 1: + info_dict[int(key_val[0])] = key_val[1] + + return info_dict + except: + warnings.warn('benchmark infos not found') + + +class Testbed(object): + """this might become the future way to have settings related to testbeds + TODO: should go somewhere else than genericsettings.py + TODO: how do we pass information from the benchmark to the post-processing? + + """ + + def info(self, fun_number=None): + """info on the testbed if ``fun_number is None`` or one-line info + for function with number ``fun_number``. + + """ + if fun_number is None: + return self.__doc__ + + for line in open(os.path.join(os.path.abspath(os.path.split(__file__)[0]), + self.info_filename)).readlines(): + if line.split(): # ie if not empty + try: # empty lines are ignored + fun = int(line.split()[0]) + if fun == fun_number: + return 'F' + str(fun) + ' ' + ' '.join(line.split()[1:]) + except ValueError: + continue # ignore annotations + + +class GECCOBBOBTestbed(Testbed): + """Testbed used in the GECCO BBOB workshops 2009, 2010, 2012, 2013, 2015. + """ + + def __init__(self, targetValues): + # TODO: should become a function, as low_budget is a display setting + # not a testbed setting + # only the short info, how to deal with both infos? + self.info_filename = 'GECCOBBOBbenchmarkinfos.txt' + self.name = testbed_name_single + self.short_names = {} + self.hardesttargetlatex = '10^{-8}' # used for ppfigs, pptable, pptable2, and pptables + self.ppfigs_ftarget = 1e-8 + self.ppfigdim_target_values = targetValues((10, 1, 1e-1, 1e-2, 1e-3, 1e-5, 1e-8)) # possibly changed in config + self.pprldistr_target_values = targetValues((10., 1e-1, 1e-4, 1e-8)) # possibly changed in config + self.pprldmany_target_values = targetValues(10 ** np.arange(2, -8.2, -0.2)) # possibly changed in config + self.pprldmany_target_range_latex = '$10^{[-8..2]}$' + self.ppscatter_target_values = targetValues(np.logspace(-8, 2, 46)) + self.rldValsOfInterest = (10, 1e-1, 1e-4, 1e-8) # possibly changed in config + self.ppfvdistr_min_target = 1e-8 + self.functions_with_legend = (1, 24, 101, 130) + self.number_of_functions = 24 + self.pptable_ftarget = 1e-8 # value for determining the success ratio in all tables + self.pptable_targetsOfInterest = targetValues((10, 1, 1e-1, 1e-2, 1e-3, 1e-5, 1e-7)) # for pptable and pptables + self.pptable2_targetsOfInterest = targetValues((1e+1, 1e-1, 1e-3, 1e-5, 1e-7)) # used for pptable2 + self.pptablemany_targetsOfInterest = self.pptable_targetsOfInterest + self.scenario = scenario_fixed + self.best_algorithm_filename = 'bestalgentries2009.pickle.gz' + self.short_names = get_short_names(get_benchmarks_short_infos(False)) + # expensive optimization settings: + self.pptable_target_runlengths = [0.5, 1.2, 3, 10, 50] # [0.5, 2, 10, 50] # used in config for expensive setting + self.pptable2_target_runlengths = self.pptable_target_runlengths # [0.5, 2, 10, 50] # used in config for expensive setting + self.pptables_target_runlengths = self.pptable_target_runlengths # used in config for expensive setting + + +class GECCOBiObjBBOBTestbed(Testbed): + """Testbed used in the GECCO biobjective BBOB workshop 2016. + """ + + def __init__(self, targetValues): + # TODO: should become a function, as low_budget is a display setting + # not a testbed setting + # only the short info, how to deal with both infos? + self.info_filename = 'GECCOBBOBbenchmarkinfos.txt' + self.name = testbed_name_bi + self.short_names = {} + self.hardesttargetlatex = '10^{-5}' # used for ppfigs, pptable, pptable2, and pptables + self.ppfigs_ftarget = 1e-5 + self.ppfigdim_target_values = targetValues((1e-1, 1e-2, 1e-3, 1e-4, 1e-5)) # possibly changed in config + self.pprldistr_target_values = targetValues((1e-1, 1e-2, 1e-3, 1e-5)) # possibly changed in config + target_values = np.append(np.append(10 ** np.arange(0, -5.1, -0.1), [0]), -10 ** np.arange(-5, -3.9, 0.2)) + self.pprldmany_target_values = targetValues(target_values) # possibly changed in config + self.pprldmany_target_range_latex = '$\{-10^{-4}, -10^{-4.2}, $ $-10^{-4.4}, -10^{-4.6}, -10^{-4.8}, -10^{-5}, 0, 10^{-5}, 10^{-4.9}, 10^{-4.8}, \dots, 10^{-0.1}, 10^0\}$' + # ppscatter_target_values are copied from the single objective case. Define the correct values! + self.ppscatter_target_values = targetValues(np.logspace(-8, 2, 46)) # that does not look right here! + self.rldValsOfInterest = (1e-1, 1e-2, 1e-3, 1e-4, 1e-5) # possibly changed in config + self.ppfvdistr_min_target = 1e-5 + self.functions_with_legend = (1, 30, 31, 55) + self.number_of_functions = 55 + self.pptable_ftarget = 1e-5 # value for determining the success ratio in all tables + self.pptable_targetsOfInterest = targetValues( + (1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5)) # possibly changed in config for all tables + self.pptable2_targetsOfInterest = targetValues((1e-1, 1e-2, 1e-3, 1e-4, 1e-5)) # used for pptable2 + self.pptablemany_targetsOfInterest = targetValues((1e-0, 1e-2, 1e-5)) # used for pptables + self.scenario = scenario_biobjfixed + self.best_algorithm_filename = '' + self.short_names = get_short_names(get_benchmarks_short_infos(True)) + # expensive optimization settings: + self.pptable_target_runlengths = [0.5, 1.2, 3, 10, 50] # [0.5, 2, 10, 50] # used in config for expensive setting + self.pptable2_target_runlengths = [0.5, 1.2, 3, 10, 50] # [0.5, 2, 10, 50] # used in config for expensive setting + self.pptables_target_runlengths = [2, 10, 50] # used in config for expensive setting \ No newline at end of file diff --git a/code-postprocessing/latex-templates/templateBBOBarticle.tex b/code-postprocessing/latex-templates/templateBBOBarticle.tex index accb098d4..87047b9b6 100644 --- a/code-postprocessing/latex-templates/templateBBOBarticle.tex +++ b/code-postprocessing/latex-templates/templateBBOBarticle.tex @@ -247,14 +247,14 @@ \section{Results} Results of \algname\ from experiments according to \cite{hansen2016exp} and \cite{hansen2016perfass} on the benchmark functions given in \cite{wp200901_2010,hansen2012fun} are presented in -Figures~\ref{fig:ERTgraphs}, \ref{fig:RLDs}, \ref{tab:ERTloss}, and \ref{fig:ERTlogloss} and in -Tables~\ref{tab:ERTs}. The experiments were performed with COCO \cite{hansen2016cocoplat}, version \change{1.0.1}, the plots were produced with version \change{1.0.4}. +Figures~\ref{fig:aRTgraphs}, \ref{fig:RLDs}, \ref{tab:aRTloss}, and \ref{fig:aRTlogloss} and in +Tables~\ref{tab:aRTs}. The experiments were performed with COCO \cite{hansen2016cocoplat}, version \change{1.0.1}, the plots were produced with version \change{1.0.4}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Scaling of ERT with dimension +% Scaling of aRT with dimension %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{figure*} @@ -285,7 +285,7 @@ \section{Results} \includegraphics[width=0.268\textwidth]{ppfigdim_f024} \end{tabular} \vspace{-3ex} - \caption{\label{fig:ERTgraphs} + \caption{\label{fig:aRTgraphs} \bbobppfigdimlegend{$f_1$ and $f_{24}$} } \end{figure*} @@ -293,8 +293,8 @@ \section{Results} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Table showing the expected running time (ERT in number of function -% evaluations) divided by the best ERT measured during BBOB-2009 (given in the +% Table showing the average runtime (aRT in number of function +% evaluations) divided by the best aRT measured during BBOB-2009 (given in the % first row of each cell) for functions $f_1$--$f_{24}$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -307,7 +307,7 @@ \section{Results} \parbox{0.499\textwidth}{\centering {\small 20-D}\\ \input{\bbobdatapath\algfolder pptable_20D_noiselessall}}}% -\caption[Table of ERTs]{\label{tab:ERTs}\bbobpptablecaption{} +\caption[Table of aRTs]{\label{tab:aRTs}\bbobpptablecaption{} } \end{table*} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -369,7 +369,7 @@ \section{Results} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% ERT loss ratios (figure and table) +% aRT loss ratios (figure and table) %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{figure} @@ -382,7 +382,7 @@ \section{Results} % \input{\bbobdatapath\algfolder pploglosstable_05D_noiselessall}\\ \input{\bbobdatapath\algfolder pploglosstable_20D_noiselessall} -\caption{\label{tab:ERTloss}% +\caption{\label{tab:aRTloss}% \bbobloglosstablecaption{} } \end{figure} @@ -391,7 +391,7 @@ \section{Results} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% ERT loss ratios per function group +% aRT loss ratios per function group %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{figure} @@ -421,7 +421,7 @@ \section{Results} \includegraphics[width=0.24\textwidth,trim=7mm 0 9mm 12mm, clip]{pplogloss_20D_mult2} \vspace*{-0.5ex} \end{tabular} - \caption{\label{fig:ERTlogloss}% + \caption{\label{fig:aRTlogloss}% \bbobloglossfigurecaption{} } \end{figure} diff --git a/code-postprocessing/latex-templates/templateBBOBcmp.tex b/code-postprocessing/latex-templates/templateBBOBcmp.tex index 702f824a5..f1049fda6 100644 --- a/code-postprocessing/latex-templates/templateBBOBcmp.tex +++ b/code-postprocessing/latex-templates/templateBBOBcmp.tex @@ -255,10 +255,10 @@ \section{Results} Results from experiments according to \cite{hansen2016exp} and \cite{hansen2016perfass} on the benchmark functions given in \cite{wp200901_2010,hansen2012fun} are presented in Figures~\ref{fig:scaling}, \ref{fig:scatterplots} and \ref{fig:RLDs} and -in Table~\ref{tab:ERTs}. The experiments were performed with COCO \cite{hansen2016cocoplat}, +in Table~\ref{tab:aRTs}. The experiments were performed with COCO \cite{hansen2016cocoplat}, version \change{1.0.1}, the plots were produced with version \change{1.0.4}. -The \textbf{expected running time (ERT)}, used in the figures and table, depends on a +The \textbf{average runtime (aRT)}, used in the figures and table, depends on a given target function value, $\ftarget=\fopt+\Df$, and is computed over all relevant trials as the number of function evaluations executed during each trial while the best function value did not reach \ftarget, summed over all trials @@ -275,7 +275,7 @@ \section{Results} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Scaling of ERT with dimension +% Scaling of aRT with dimension %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{figure*} @@ -439,8 +439,8 @@ \section{Results} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Table showing the expected running time (ERT in number of function -% evaluations) divided by the best ERT measured during BBOB-2009 (given in the +% Table showing the average runtime (aRT in number of function +% evaluations) divided by the best aRT measured during BBOB-2009 (given in the % first row of each cell) for functions $f_1$--$f_{24}$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -451,7 +451,7 @@ \section{Results} \mbox{ \input{\bbobdatapath pptable2_05D_noiselessall}\hfill% \input{\bbobdatapath pptable2_20D_noiselessall}} -\caption{\label{tab:ERTs} +\caption{\label{tab:aRTs} \bbobpptablestwolegend{48} } \end{table*} diff --git a/code-postprocessing/latex-templates/templateBBOBmany.tex b/code-postprocessing/latex-templates/templateBBOBmany.tex index 3d1925ba3..96759d426 100644 --- a/code-postprocessing/latex-templates/templateBBOBmany.tex +++ b/code-postprocessing/latex-templates/templateBBOBmany.tex @@ -265,11 +265,11 @@ \section{Results} Results from experiments according to \cite{hansen2016exp} and \cite{hansen2016perfass} on the benchmark functions given in \cite{wp200901_2010,hansen2012fun} are presented in Figures~\ref{fig:scaling}, \ref{fig:ECDFs05D} and -\ref{fig:ECDFs20D} and in Tables~\ref{tab:ERTs5} and~\ref{tab:ERTs20}. +\ref{fig:ECDFs20D} and in Tables~\ref{tab:aRTs5} and~\ref{tab:aRTs20}. The experiments were performed with COCO \cite{hansen2016cocoplat}, version \change{1.0.1}, the plots were produced with version \change{1.0.4}. -The \textbf{expected running time (ERT)}, used in the figures and tables, +The \textbf{average runtime (aRT)}, used in the figures and tables, depends on a given target function value, $\ftarget=\fopt+\Df$, and is computed over all relevant trials as the number of function evaluations executed during each trial while the best function value @@ -289,7 +289,7 @@ \section{Results} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Scaling of ERT with dimension +% Scaling of aRT with dimension %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{figure*} @@ -399,8 +399,8 @@ \section{Results} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Expected running time (ERT in number of function evaluations) -% divided by the best ERT measured during BBOB-2009 (given in the respective +% Average runtime (aRT in number of function evaluations) +% divided by the best aRT measured during BBOB-2009 (given in the respective % first row) for functions $f_1$--$f_{24}$ for dimension 5. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -461,8 +461,8 @@ \section{Results} \input{\bbobdatapath pptables_f024_05D} \end{minipage}} - \caption{\label{tab:ERTs5} - \bbobpptablesmanylegend{dimension $5$} + \caption{\label{tab:aRTs5} + \bbobpptablesmanylegend{dimension $5$}{110} % Bonferroni correction: #dimensions * #functions } \end{table*} @@ -470,8 +470,8 @@ \section{Results} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Expected running time (ERT in number of function evaluations) -% divided by the best ERT measured during BBOB-2009 (given in the respective +% Average runtime (aRT in number of function evaluations) +% divided by the best aRT measured during BBOB-2009 (given in the respective % first row) for functions $f_1$--$f_{24}$ for dimension 20. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -530,8 +530,8 @@ \section{Results} \input{\bbobdatapath pptables_f024_20D} \end{minipage}} - \caption{\label{tab:ERTs20} - \bbobpptablesmanylegend{dimension $20$} + \caption{\label{tab:aRTs20} + \bbobpptablesmanylegend{dimension $20$}{110} % Bonferroni correction: #dimensions * #functions } \end{table*} diff --git a/code-postprocessing/latex-templates/templateBIOBJmultiple.tex b/code-postprocessing/latex-templates/templateBIOBJmultiple.tex new file mode 100644 index 000000000..b92090c17 --- /dev/null +++ b/code-postprocessing/latex-templates/templateBIOBJmultiple.tex @@ -0,0 +1,750 @@ +% This is "sig-alternate.tex" V2.1 April 2013 +% This file should be compiled with V2.5 of "sig-alternate.cls" May 2012 +% +% This example file demonstrates the use of the 'sig-alternate.cls' +% V2.5 LaTeX2e document class file. It is for those submitting +% articles to ACM Conference Proceedings WHO DO NOT WISH TO +% STRICTLY ADHERE TO THE SIGS (PUBS-BOARD-ENDORSED) STYLE. +% The 'sig-alternate.cls' file will produce a similar-looking, +% albeit, 'tighter' paper resulting in, invariably, fewer pages. +% +% ---------------------------------------------------------------------------------------------------------------- +% This .tex file (and associated .cls V2.5) produces: +% 1) The Permission Statement +% 2) The Conference (location) Info information +% 3) The Copyright Line with ACM data +% 4) NO page numbers +% +% as against the acm_proc_article-sp.cls file which +% DOES NOT produce 1) thru' 3) above. +% +% Using 'sig-alternate.cls' you have control, however, from within +% the source .tex file, over both the CopyrightYear +% (defaulted to 200X) and the ACM Copyright Data +% (defaulted to X-XXXXX-XX-X/XX/XX). +% e.g. +% \CopyrightYear{2007} will cause 2007 to appear in the copyright line. +% \crdata{0-12345-67-8/90/12} will cause 0-12345-67-8/90/12 to appear in the copyright line. +% +% --------------------------------------------------------------------------------------------------------------- +% This .tex source is an example which *does* use +% the .bib file (from which the .bbl file % is produced). +% REMEMBER HOWEVER: After having produced the .bbl file, +% and prior to final submission, you *NEED* to 'insert' +% your .bbl file into your source .tex file so as to provide +% ONE 'self-contained' source file. +% +% ================= IF YOU HAVE QUESTIONS ======================= +% Questions regarding the SIGS styles, SIGS policies and +% procedures, Conferences etc. should be sent to +% Adrienne Griscti (griscti@acm.org) +% +% Technical questions _only_ to +% Gerald Murray (murray@hq.acm.org) +% +% Technical questions related to COCO/BBOB to bbob@lri.fr +% =============================================================== +% +% For tracking purposes - this is V2.0 - May 2012 + +\documentclass{sig-alternate} +\pdfpagewidth=8.5in +\pdfpageheight=11in +\special{papersize=8.5in,11in} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% Packages +\usepackage{graphicx} +\usepackage{tabularx} +\usepackage[dvipsnames]{xcolor} +\usepackage{xspace} +\usepackage{float} +\usepackage{rotating} +\usepackage{xstring} % for string operations +\usepackage{wasysym} % Table legend with symbols input from post-processing +\usepackage{MnSymbol} % Table legend with symbols input from post-processing +%\usepackage[hidelinks]{hyperref} % make COCO papers clickable + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% Definitions + +% Algorithm names as they appear in the tables, uncomment if necessary +%\newcommand{\algAtables}{\algaperfprof} % first argument in the post-processing +% \newcommand{\algBtables}{\algbperfprof} % first argument in the post-processing +% \newcommand{\algCtables}{\algcperfprof} % first argument in the post-processing +% \newcommand{\algDtables}{\algdperfprof} % second argument in the post-processing +% ... +% location of pictures files +\newcommand{\bbobdatapath}{ppdata/} +\input{\bbobdatapath bbob_pproc_commands.tex} +\graphicspath{{\bbobdatapath}} + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% pre-defined commands +\newcommand{\DIM}{\ensuremath{\mathrm{DIM}}} +\newcommand{\aRT}{\ensuremath{\mathrm{aRT}}} +\newcommand{\FEvals}{\ensuremath{\mathrm{FEvals}}} +\newcommand{\nruns}{\ensuremath{\mathrm{Nruns}}} +\newcommand{\Df}{\ensuremath{\Delta f}} +\newcommand{\DI}{\ensuremath{\Delta I}} +\newcommand{\nbFEs}{\ensuremath{\mathrm{\#FEs}}} +\newcommand{\fopt}{\ensuremath{f_\mathrm{opt}}} +\newcommand{\ftarget}{\ensuremath{f_\mathrm{t}}} +\newcommand{\Itarget}{\ensuremath{I_\mathrm{target}}} +\newcommand{\CrE}{\ensuremath{\mathrm{CrE}}} +\newcommand{\change}[1]{{\color{red} #1}} +\newcommand{\bbobbiobj}{{\ttfamily bbob-biobj}\xspace} +\newcommand{\hvref}{I^{\mathrm{ref}}} +\newcommand{\TODO}[1]{{\color{red} TODO: #1}} + + \renewcommand{\topfraction}{1} % max fraction of floats at top + \renewcommand{\bottomfraction}{1} % max fraction of floats at bottom + % Parameters for TEXT pages (not float pages): + \setcounter{topnumber}{3} + \setcounter{bottomnumber}{3} + \setcounter{totalnumber}{3} % 2 may work better + \setcounter{dbltopnumber}{4} % for 2-column pages + \renewcommand{\dbltopfraction}{1} % fit big float above 2-col. text + \renewcommand{\textfraction}{0.0} % allow minimal text w. figs + % Parameters for FLOAT pages (not text pages): + \renewcommand{\floatpagefraction}{0.80} % require fuller float pages + % N.B.: floatpagefraction MUST be less than topfraction !! + \renewcommand{\dblfloatpagefraction}{0.7} % require fuller float pages + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\begin{document} + +\IfFileExists{\bbobdatapath ppfig2_f001.pdf}{2 algorithms currently not supported}{} + +% +% --- Author Metadata here --- +\conferenceinfo{GECCO'17,} {July 1-2, 2017, Berlin, Germany.} +\CopyrightYear{2017} +\crdata{TBA} +\clubpenalty=10000 +\widowpenalty = 10000 +% --- End of Author Metadata --- + +\title{Black-Box Optimization Benchmarking Template for the Comparison of Multiple Algorithms on the Biobjective {\Large \bbobbiobj} Testbed} +\subtitle{Draft version +\titlenote{Submission deadline: March 28th.}} +%Camera-ready paper due April 16th.}} + +% +% You need the command \numberofauthors to handle the 'placement +% and alignment' of the authors beneath the title. +% +% For aesthetic reasons, we recommend 'three authors at a time' +% i.e. three 'name/affiliation blocks' be placed beneath the title. +% +% NOTE: You are NOT restricted in how many 'rows' of +% "name/affiliations" may appear. We just ask that you restrict +% the number of 'columns' to three. +% +% Because of the available 'opening page real-estate' +% we ask you to refrain from putting more than six authors +% (two rows with three columns) beneath the article title. +% More than six makes the first-page appear very cluttered indeed. +% +% Use the \alignauthor commands to handle the names +% and affiliations for an 'aesthetic maximum' of six authors. +% Add names, affiliations, addresses for +% the seventh etc. author(s) as the argument for the +% \additionalauthors command. +% These 'additional authors' will be output/set for you +% without further effort on your part as the last section in +% the body of your article BEFORE References or any Appendices. + +\numberofauthors{1} % in this sample file, there are a *total* +% of EIGHT authors. SIX appear on the 'first-page' (for formatting +% reasons) and the remaining two appear in the \additionalauthors section. +% +\author{ +% You can go ahead and credit any number of authors here, +% e.g. one 'row of three' or two rows (consisting of one row of three +% and a second row of one, two or three). +% +% The command \alignauthor (no curly braces needed) should +% precede each author name, affiliation/snail-mail address and +% e-mail address. Additionally, tag each line of +% affiliation/address with \affaddr, and tag the +% e-mail address with \email. +% +% 1st. author +\alignauthor +Forename Name\\ %\titlenote{Dr.~Trovato insisted his name be first.}\\ +% \affaddr{Institute for Clarity in Documentation}\\ +% \affaddr{1932 Wallamaloo Lane}\\ +% \affaddr{Wallamaloo, New Zealand}\\ +% \email{trovato@corporation.com} +%% 2nd. author +%\alignauthor +%G.K.M. Tobin\titlenote{The secretary disavows +%any knowledge of this author's actions.}\\ +% \affaddr{Institute for Clarity in Documentation}\\ +% \affaddr{P.O. Box 1212}\\ +% \affaddr{Dublin, Ohio 43017-6221}\\ +% \email{webmaster@marysville-ohio.com} +%% 3rd. author +%\alignauthor Lars Th{\o}rv{\"a}ld\titlenote{This author is the +%one who did all the really hard work.}\\ +% \affaddr{The Th{\o}rv{\"a}ld Group}\\ +% \affaddr{1 Th{\o}rv{\"a}ld Circle}\\ +% \affaddr{Hekla, Iceland}\\ +% \email{larst@affiliation.org} +%\and % use '\and' if you need 'another row' of author names +%% 4th. author +%\alignauthor Lawrence P. Leipuner\\ +% \affaddr{Brookhaven Laboratories}\\ +% \affaddr{Brookhaven National Lab}\\ +% \affaddr{P.O. Box 5000}\\ +% \email{lleipuner@researchlabs.org} +%% 5th. author +%\alignauthor Sean Fogarty\\ +% \affaddr{NASA Ames Research Center}\\ +% \affaddr{Moffett Field}\\ +% \affaddr{California 94035}\\ +% \email{fogartys@amesres.org} +%% 6th. author +%\alignauthor Charles Palmer\\ +% \affaddr{Palmer Research Laboratories}\\ +% \affaddr{8600 Datapoint Drive}\\ +% \affaddr{San Antonio, Texas 78229}\\ +% \email{cpalmer@prl.com} +} % author +%% There's nothing stopping you putting the seventh, eighth, etc. +%% author on the opening page (as the 'third row') but we ask, +%% for aesthetic reasons that you place these 'additional authors' +%% in the \additional authors block, viz. +%\additionalauthors{Additional authors: John Smith (The Th{\o}rv{\"a}ld Group, +%email: {\texttt{jsmith@affiliation.org}}) and Julius P.~Kumquat +%(The Kumquat Consortium, email: {\texttt{jpkumquat@consortium.net}}).} +%\date{30 July 1999} +%% Just remember to make sure that the TOTAL number of authors +%% is the number that will appear on the first page PLUS the +%% number that will appear in the \additionalauthors section. + +\maketitle +\begin{abstract} +to be written +\end{abstract} + +% Add any ACM category that you feel is needed, not mandatory anymore +%\category{G.1.6}{Numerical Analysis}{Optimization}[global optimization, +%unconstrained optimization] +%\category{F.2.1}{Analysis of Algorithms and Problem Complexity}{Numerical Algorithms and Problems} + +% Complete with anything that is needed +\terms{Algorithms} + +% Complete with anything that is needed +\keywords{Benchmarking, Black-box optimization} + +% \section{Introduction} +% +% \section{Algorithm Presentation} +% +% \section{Experimental Procedure} +% + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{CPU Timing} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% note that the following text is just a proposal and can/should be changed to your needs: +In order to evaluate the CPU timing of the algorithm, we have run the \change{\algorithmA} with restarts on the entire bbob-biobj test suite \cite{biobj2016func} for $2 D$ function evaluations. The \change{C/Java/Matlab/Octave/Python} code was run on a \change{Mac Intel(R) Core(TM) i5-2400S CPU @ 2.50GHz} with \change{1} processor and \change{4} cores. The time per function evaluation for dimensions 2, 3, 5, 10, 20\change{, 40} equals \change{$x.x$}, \change{$x.x$}, \change{$x.x$}, \change{$xx$}, \change{$xxx$}\change{, and $xxx$} seconds respectively. + +\change{repeat the above for any algorithm tested} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Results} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +Results from experiments according to \cite{hansen2016exp}, +\cite{hansen2016perfass} and \cite{biobj2016perfass} on the benchmark +functions given in \cite{biobj2016func} are presented in +Figures~\ref{ECDFsingleOne}, \ref{ECDFsingleTwo}, \ref{fig:ECDFs05D} and +\ref{fig:ECDFs20D} and in Tables~\ref{tab:aRTs5} and~\ref{tab:aRTs20}. +The experiments were performed with COCO \cite{hansen2016cocoplat}, version +\change{1.0.1}, the plots were produced with version \change{1.1.1}. + +The \textbf{average runtime (aRT)}, used in the figures and tables, +depends on a given quality indicator value, $\Itarget=\hvref+\DI$, and is +computed over all relevant trials as the number of function +evaluations executed during each trial while the best function value +did not reach \Itarget, summed over all trials and divided by the +number of trials that actually reached \Itarget\ +\cite{hansen2016exp,price1997dev}. \textbf{Statistical significance} +is tested with the rank-sum test for a given target $\Itarget$ +using, for each trial, +either the number of needed function evaluations to reach +$\Itarget$ (inverted and multiplied by $-1$), or, if the target +was not reached, the best $\DI$-value achieved, measured only up to +the smallest number of overall function evaluations for any +unsuccessful trial under consideration. + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% ECDFs per function in dimension 10 + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{figure*} +\centering +\begin{tabular}{@{\hspace*{-0.005\textwidth}}l@{\hspace*{-0.005\textwidth}}l@{\hspace*{-0.005\textwidth}}l@{\hspace*{-0.005\textwidth}}l@{\hspace*{-0.005\textwidth}}l@{\hspace*{-0.005\textwidth}}} +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f001_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f002_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f003_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f004_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f005_10D}\\[-1.8ex] +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f006_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f007_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f008_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f009_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f010_10D}\\[-1.8ex] +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f011_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f012_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f013_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f014_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f015_10D}\\[-1.8ex] +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f016_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f017_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f018_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f019_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f020_10D}\\[-1.8ex] +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f021_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f022_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f023_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f024_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f025_10D}\\[-1.8ex] +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f026_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f027_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f028_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f029_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f030_10D}\\[-1.8ex] +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f031_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f032_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f033_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f034_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f035_10D}\\[-1.8ex] +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f036_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f037_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f038_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f039_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f040_10D}\\[-1.8ex] + +\end{tabular} + \caption{\label{fig:ECDFsingleOne} + Bootstrapped empirical cumulative distribution of the number of objective function evaluations divided by dimension (FEvals/DIM) for $58$ targets with target precision in $\{-10^{-4}, -10^{-4.2}, $ $-10^{-4.4}, -10^{-4.6}, -10^{-4.8}, -10^{-5}, 0, 10^{-5}, 10^{-4.9}, 10^{-4.8}, \dots, 10^{-0.1}, 10^0\}$ for each single function $f_{1}$ to $f_{40}$ in 10-D. +} +\end{figure*} + +\begin{figure*} +\centering +\begin{tabular}{@{\hspace*{-0.005\textwidth}}l@{\hspace*{-0.005\textwidth}}l@{\hspace*{-0.005\textwidth}}l@{\hspace*{-0.005\textwidth}}l@{\hspace*{-0.005\textwidth}}l@{\hspace*{-0.005\textwidth}}} +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f041_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f042_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f043_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f044_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f045_10D}\\[-1.8ex] +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f046_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f047_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f048_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f049_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f040_10D}\\[-1.8ex] +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f051_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f052_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f053_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f054_10D}& +\includegraphics[width=0.2\textwidth]{pprldmany-single-functions/pprldmany_f055_10D}\\[-1.8ex] +\end{tabular} + \caption{\label{fig:ECDFsingleTwo} + Bootstrapped empirical cumulative distribution of the number of objective function evaluations divided by dimension (FEvals/DIM) as in Fig.~\ref{fig:ECDFsingleOne} but for functions $f_{41}$ to $f_{55}$ in 10-D. +} +\end{figure*} + + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% Empirical cumulative distribution functions (ECDFs) per function group (5-D) + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\begin{figure*} +\begin{tabular}{c@{\hspace*{-0.02\textwidth}}c@{\hspace*{-0.02\textwidth}}c@{\hspace*{-0.02\textwidth}}c} +separable-separable & separable-moderate & separable-ill-cond. & separable-multimodal\\ +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_1-separable_1-separable} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_1-separable_2-moderate} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_1-separable_3-ill-conditioned} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_1-separable_4-multi-modal}\\ +separable-weakstructure & moderate-moderate & moderate-ill-cond. & moderate-multimodal\\ +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_1-separable_5-weakly-structured} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_2-moderate_2-moderate} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_2-moderate_3-ill-conditioned} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_2-moderate_4-multi-modal}\\ +moderate-weakstructure & ill-cond.-ill-cond. & ill-cond.-multimodal & ill-cond.-weakstructure\\ +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_2-moderate_5-weakly-structured} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_3-ill-conditioned_3-ill-conditioned} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_3-ill-conditioned_4-multi-modal} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_3-ill-conditioned_5-weakly-structured} \\ +multimodal-multimodal & multimodal-weakstructure & weakstructure-weakstructure & all 55 functions\\ +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_4-multi-modal_4-multi-modal} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_4-multi-modal_5-weakly-structured} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_5-weakly-structured_5-weakly-structured} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_05D_noiselessall} +\vspace*{-0.5ex} +\end{tabular} + \caption{\label{fig:ECDFsGroups} + \bbobECDFslegend{5} + } +\end{figure*} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% Empirical cumulative distribution functions (ECDFs) per function group (20-D) + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\begin{figure*} +\begin{tabular}{c@{\hspace*{-0.02\textwidth}}c@{\hspace*{-0.02\textwidth}}c@{\hspace*{-0.02\textwidth}}c} +separable-separable & separable-moderate & separable-ill-cond. & separable-multimodal\\ +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_1-separable_1-separable} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_1-separable_2-moderate} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_1-separable_3-ill-conditioned} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_1-separable_4-multi-modal}\\ +separable-weakstructure & moderate-moderate & moderate-ill-cond. & moderate-multimodal\\ +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_1-separable_5-weakly-structured} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_2-moderate_2-moderate} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_2-moderate_3-ill-conditioned} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_2-moderate_4-multi-modal}\\ +moderate-weakstructure & ill-cond.-ill-cond. & ill-cond.-multimodal & ill-cond.-weakstructure\\ +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_2-moderate_5-weakly-structured} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_3-ill-conditioned_3-ill-conditioned} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_3-ill-conditioned_4-multi-modal} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_3-ill-conditioned_5-weakly-structured} \\ +multimodal-multimodal & multimodal-weakstructure & weakstructure-weakstructure & all 55 functions\\ +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_4-multi-modal_4-multi-modal} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_4-multi-modal_5-weakly-structured} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_5-weakly-structured_5-weakly-structured} & +\includegraphics[width=0.268\textwidth,trim=0 0 0 13mm, clip]{pprldmany_20D_noiselessall} +\vspace*{-0.5ex} +\end{tabular} + \caption{\label{fig:ECDFsGroups} + \bbobECDFslegend{20} + } +\end{figure*} + + +\clearpage + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% Average runtime (aRT in number of function evaluations) +% for functions $f_1$--$f_{55}$ of the bbob-biobj suite for dimension 5. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{table*}\tiny +\centering +\mbox{\begin{minipage}[t]{0.32\textwidth}\tiny +\centering +\input{\bbobdatapath pptables_f001_05D} + +\input{\bbobdatapath pptables_f002_05D} + +\input{\bbobdatapath pptables_f003_05D} + +\input{\bbobdatapath pptables_f004_05D} + +\input{\bbobdatapath pptables_f005_05D} + +\input{\bbobdatapath pptables_f006_05D} + +\input{\bbobdatapath pptables_f007_05D} + +\input{\bbobdatapath pptables_f008_05D} + +\input{\bbobdatapath pptables_f009_05D} + +\input{\bbobdatapath pptables_f010_05D} + +\input{\bbobdatapath pptables_f011_05D} + +\input{\bbobdatapath pptables_f012_05D} + +\input{\bbobdatapath pptables_f013_05D} + +\input{\bbobdatapath pptables_f014_05D} + +\input{\bbobdatapath pptables_f015_05D} + +\input{\bbobdatapath pptables_f016_05D} + +\input{\bbobdatapath pptables_f017_05D} + +\input{\bbobdatapath pptables_f018_05D} + +\input{\bbobdatapath pptables_f019_05D} + +\end{minipage} +\hspace{0.002\textwidth} +\begin{minipage}[t]{0.32\textwidth}\tiny +\centering + +\input{\bbobdatapath pptables_f020_05D} + +\input{\bbobdatapath pptables_f021_05D} + +\input{\bbobdatapath pptables_f022_05D} + +\input{\bbobdatapath pptables_f023_05D} + +\input{\bbobdatapath pptables_f024_05D} + +\input{\bbobdatapath pptables_f025_05D} + +\input{\bbobdatapath pptables_f026_05D} + +\input{\bbobdatapath pptables_f027_05D} + +\input{\bbobdatapath pptables_f028_05D} + +\input{\bbobdatapath pptables_f029_05D} + +\input{\bbobdatapath pptables_f030_05D} + +\input{\bbobdatapath pptables_f031_05D} + +\input{\bbobdatapath pptables_f032_05D} + +\input{\bbobdatapath pptables_f033_05D} + +\input{\bbobdatapath pptables_f034_05D} + +\input{\bbobdatapath pptables_f035_05D} + +\input{\bbobdatapath pptables_f036_05D} + +\input{\bbobdatapath pptables_f037_05D} + +\end{minipage} + +\hspace{0.002\textwidth} +\begin{minipage}[t]{0.32\textwidth}\tiny +\centering + +\input{\bbobdatapath pptables_f038_05D} + +\input{\bbobdatapath pptables_f039_05D} + +\input{\bbobdatapath pptables_f040_05D} + +\input{\bbobdatapath pptables_f041_05D} + +\input{\bbobdatapath pptables_f042_05D} + +\input{\bbobdatapath pptables_f043_05D} + +\input{\bbobdatapath pptables_f044_05D} + +\input{\bbobdatapath pptables_f045_05D} + +\input{\bbobdatapath pptables_f046_05D} + +\input{\bbobdatapath pptables_f047_05D} + +\input{\bbobdatapath pptables_f048_05D} + +\input{\bbobdatapath pptables_f049_05D} + +\input{\bbobdatapath pptables_f050_05D} + +\input{\bbobdatapath pptables_f051_05D} + +\input{\bbobdatapath pptables_f052_05D} + +\input{\bbobdatapath pptables_f053_05D} + +\input{\bbobdatapath pptables_f054_05D} + +\input{\bbobdatapath pptables_f055_05D} + +\end{minipage}} + + \caption{\label{tab:aRTs5} + \bbobpptablesmanylegend{dimension $5$}{110} % Bonferroni correction: #dimensions * #functions + } +\end{table*} +%sideways + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% Average runtime (aRT in number of function evaluations) +% for functions $f_1$--$f_{55}$ of the bbob-biobj suite for dimension 20. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{table*}\tiny +\centering +\mbox{\begin{minipage}[t]{0.32\textwidth}\tiny +\centering +\input{\bbobdatapath pptables_f001_20D} + +\input{\bbobdatapath pptables_f002_20D} + +\input{\bbobdatapath pptables_f003_20D} + +\input{\bbobdatapath pptables_f004_20D} + +\input{\bbobdatapath pptables_f005_20D} + +\input{\bbobdatapath pptables_f006_20D} + +\input{\bbobdatapath pptables_f007_20D} + +\input{\bbobdatapath pptables_f008_20D} + +\input{\bbobdatapath pptables_f009_20D} + +\input{\bbobdatapath pptables_f010_20D} + +\input{\bbobdatapath pptables_f011_20D} + +\input{\bbobdatapath pptables_f012_20D} + +\input{\bbobdatapath pptables_f013_20D} + +\input{\bbobdatapath pptables_f014_20D} + +\input{\bbobdatapath pptables_f015_20D} + +\input{\bbobdatapath pptables_f016_20D} + +\input{\bbobdatapath pptables_f017_20D} + +\input{\bbobdatapath pptables_f018_20D} + +\input{\bbobdatapath pptables_f019_20D} + +\end{minipage} +\hspace{0.002\textwidth} +\begin{minipage}[t]{0.32\textwidth}\tiny +\centering + +\input{\bbobdatapath pptables_f020_20D} + +\input{\bbobdatapath pptables_f021_20D} + +\input{\bbobdatapath pptables_f022_20D} + +\input{\bbobdatapath pptables_f023_20D} + +\input{\bbobdatapath pptables_f024_20D} + +\input{\bbobdatapath pptables_f025_20D} + +\input{\bbobdatapath pptables_f026_20D} + +\input{\bbobdatapath pptables_f027_20D} + +\input{\bbobdatapath pptables_f028_20D} + +\input{\bbobdatapath pptables_f029_20D} + +\input{\bbobdatapath pptables_f030_20D} + +\input{\bbobdatapath pptables_f031_20D} + +\input{\bbobdatapath pptables_f032_20D} + +\input{\bbobdatapath pptables_f033_20D} + +\input{\bbobdatapath pptables_f034_20D} + +\input{\bbobdatapath pptables_f035_20D} + +\input{\bbobdatapath pptables_f036_20D} + +\input{\bbobdatapath pptables_f037_20D} + +\end{minipage} + +\hspace{0.002\textwidth} +\begin{minipage}[t]{0.32\textwidth}\tiny +\centering + +\input{\bbobdatapath pptables_f038_20D} + +\input{\bbobdatapath pptables_f039_20D} + +\input{\bbobdatapath pptables_f040_20D} + +\input{\bbobdatapath pptables_f041_20D} + +\input{\bbobdatapath pptables_f042_20D} + +\input{\bbobdatapath pptables_f043_20D} + +\input{\bbobdatapath pptables_f044_20D} + +\input{\bbobdatapath pptables_f045_20D} + +\input{\bbobdatapath pptables_f046_20D} + +\input{\bbobdatapath pptables_f047_20D} + +\input{\bbobdatapath pptables_f048_20D} + +\input{\bbobdatapath pptables_f049_20D} + +\input{\bbobdatapath pptables_f050_20D} + +\input{\bbobdatapath pptables_f051_20D} + +\input{\bbobdatapath pptables_f052_20D} + +\input{\bbobdatapath pptables_f053_20D} + +\input{\bbobdatapath pptables_f054_20D} + +\input{\bbobdatapath pptables_f055_20D} + +\end{minipage}} + + \caption{\label{tab:aRTs20} + \bbobpptablesmanylegend{dimension $20$}{110} % Bonferroni correction: #dimensions * #functions + } +\end{table*} + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% +% The following two commands are all you need in the +% initial runs of your .tex file to +% produce the bibliography for the citations in your paper. +\bibliographystyle{abbrv} +\bibliography{bbob} % bbob.bib is the name of the Bibliography in this case +% You must have a proper ".bib" file +% and remember to run: +% latex bibtex latex latex +% to resolve all references +% to create the ~.bbl file. Insert that ~.bbl file into +% the .tex source file and comment out +% the command \texttt{{\char'134}thebibliography}. +% +% ACM needs 'a single self-contained file'! +% + +% \clearpage % otherwise the last figure might be missing +\end{document}