diff --git a/code-postprocessing/bbob_pproc/bestalg.py b/code-postprocessing/bbob_pproc/bestalg.py
index 2b6ce786c..973570f24 100644
--- a/code-postprocessing/bbob_pproc/bestalg.py
+++ b/code-postprocessing/bbob_pproc/bestalg.py
@@ -9,8 +9,8 @@
to access best algorithm data set.
The best algorithm data set can be accessed by the
- :py:data:`bestalgentries2009` variable. This variable needs to be
- initialized by executing functions :py:func:`loadBBOB2009()`
+ :py:data:`bestAlgorithmEntries` variable. This variable needs to be
+ initialized by executing functions :py:func:`load_best_algorithm()`
This module can also be used generate the best algorithm data set
with its generate method.
@@ -29,14 +29,9 @@
from . import readalign, pproc
from .toolsdivers import print_done
from .ppfig import Usage
-from . import toolsstats, genericsettings
+from . import toolsstats, testbedsettings
bestAlgorithmEntries = {}
-bestalgentries2009 = {}
-bestalgentries2010 = {}
-bestalgentries2012 = {}
-bestalgentriesever = {}
-bestbiobjalgentries2016 = {}
algs2009 = ("ALPS", "AMALGAM", "BAYEDA", "BFGS", "Cauchy-EDA",
"BIPOP-CMA-ES", "CMA-ESPLUSSEL", "DASA", "DE-PSO", "DIRECT", "EDA-PSO",
@@ -343,7 +338,7 @@ def load_best_algorithm(force=False):
if not force and bestAlgorithmEntries:
return bestAlgorithmEntries
- bestAlgorithmFilename = genericsettings.current_testbed.best_algorithm_filename
+ bestAlgorithmFilename = testbedsettings.current_testbed.best_algorithm_filename
# If the file name is not specified then we skip the load.
if not bestAlgorithmFilename:
@@ -372,155 +367,6 @@ def load_best_algorithm(force=False):
return bestAlgorithmEntries
-def loadBBOB2009(force=False):
- """Assigns :py:data:`bestalgentries2009`.
-
- This function is needed to set the global variable
- :py:data:`bestalgentries2009`. It unpickles file
- :file:`bestalgentries2009.pickle.gz`
-
- :py:data:`bestalgentries2009` is a dictionary accessed by providing
- a tuple :py:data:`(dimension, function)`. This returns an instance
- of :py:class:`BestAlgSet`.
- The data is that of algorithms submitted to BBOB 2009, the list of
- which can be found in variable :py:data:`algs2009`.
-
- """
- global bestalgentries2009
- # global statement necessary to change the variable bestalg.bestalgentries2009
-
- if not force and bestalgentries2009:
- return
-
- print "Loading best algorithm data from BBOB-2009...",
- sys.stdout.flush()
-
- bestalgfilepath = os.path.split(__file__)[0]
- # picklefilename = os.path.join(bestalgfilepath, 'bestalgentries2009.pickle')
- # cocofy(picklefilename)
- # fid = open(picklefilename, 'r')
-
- picklefilename = os.path.join(bestalgfilepath, 'bestalgentries2009.pickle.gz')
- fid = gzip.open(picklefilename, 'r')
- try:
- bestalgentries2009 = pickle.load(fid)
- except:
- warnings.warn("no best algorithm loaded")
- # raise # outcomment to diagnose
- bestalgentries2009 = None
- fid.close()
- print_done()
-
-def loadBBOB2010():
- """Assigns :py:data:`bestalgentries2010`.
-
- This function is needed to set the global variable
- :py:data:`bestalgentries2010`. It unpickles file
- :file:`bestalgentries2010.pickle.gz`
-
- :py:data:`bestalgentries2010` is a dictionary accessed by providing
- a tuple :py:data:`(dimension, function)`. This returns an instance
- of :py:class:`BestAlgSet`.
- The data is that of algorithms submitted to BBOB 20&0, the list of
- which can be found in variable :py:data:`algs2010`.
-
- """
- global bestalgentries2010
- # global statement necessary to change the variable bestalg.bestalgentries2010
-
- print "Loading best algorithm data from BBOB-2010...",
- bestalgfilepath = os.path.split(__file__)[0]
- picklefilename = os.path.join(bestalgfilepath, 'bestalgentries2010.pickle.gz')
- # cocofy(picklefilename)
- fid = gzip.open(picklefilename, 'r')
- bestalgentries2010 = pickle.load(fid)
- fid.close()
- print " done."
-
-def loadBBOB2012():
- """Assigns :py:data:`bestalgentries2012`.
-
- This function is needed to set the global variable
- :py:data:`bestalgentries2012`. It unpickles file
- :file:`bestalgentries2012.pickle.gz`
-
- :py:data:`bestalgentries2012` is a dictionary accessed by providing
- a tuple :py:data:`(dimension, function)`. This returns an instance
- of :py:class:`BestAlgSet`.
- The data is that of algorithms submitted to BBOB 20&0, the list of
- which can be found in variable :py:data:`algs2012`.
-
- """
- global bestalgentries2012
- # global statement necessary to change the variable bestalg.bestalgentries2012
-
- print "Loading best algorithm data from BBOB-2012...",
- bestalgfilepath = os.path.split(__file__)[0]
- picklefilename = os.path.join(bestalgfilepath, 'bestalgentries2012.pickle.gz')
- # cocofy(picklefilename)
- fid = gzip.open(picklefilename, 'r')
- bestalgentries2012 = pickle.load(fid)
- fid.close()
- print " done."
-
-def loadBBOBever():
- """Assigns :py:data:`bestalgentriesever`.
-
- This function is needed to set the global variable
- :py:data:`bestalgentriesever`. It unpickles file
- :file:`bestalgentriesever.pickle.gz`
-
- :py:data:`bestalgentriesever` is a dictionary accessed by providing
- a tuple :py:data:`(dimension, function)`. This returns an instance
- of :py:class:`BestAlgSet`.
- The data is that of algorithms submitted to BBOB 2009 and 2010, the
- list of which is the union in variables :py:data:`algs2009`
- and :py:data:`algs2010`.
-
- """
- global bestalgentriesever
- # global statement necessary to change the variable bestalg.bestalgentriesever
-
- print "Loading best algorithm data from BBOB...",
- bestalgfilepath = os.path.split(__file__)[0]
- picklefilename = os.path.join(bestalgfilepath, 'bestalgentriesever.pickle.gz')
- # cocofy(picklefilename)
- fid = gzip.open(picklefilename, 'r')
- bestalgentriesever = pickle.load(fid)
- fid.close()
- print " done."
-
-def loadBestBiobj2016():
- """Assigns :py:data:`bestbiobjalgentries2016`.
-
- This function is needed to set the global variable
- :py:data:`bestbiobjalgentries2016`. It unpickles file
- :file:`bestbiobjalgentries2016.pickle.gz`
-
- :py:data:`bestbiobjalgentries2016` is a dictionary accessed by providing
- a tuple :py:data:`(dimension, function)`. This returns an instance
- of :py:class:`BestAlgSet`.
-
- """
- global bestbiobjalgentries2016
- # global statement necessary to change the variable bestalg.bestbiobjalgentries2016
-
- if bestbiobjalgentries2016:
- return
-
- print "Loading best bi-objective algorithm data from BBOB-2016...",
- sys.stdout.flush()
-
- bestalgfilepath = os.path.split(__file__)[0]
- #picklefilename = os.path.join(bestalgfilepath, 'bestbiobjalgentries2016.pickle.gz')
- picklefilename = os.path.join(bestalgfilepath, 'bestbiobjalgentries2016.pickle')
- #fid = gzip.open(picklefilename, 'r')
- fid = open(picklefilename, 'r')
- bestbiobjalgentries2016 = pickle.load(fid)
- fid.close()
- print_done()
-
-
def usage():
print __doc__ # same as: sys.modules[__name__].__doc__, was: main.__doc__
diff --git a/code-postprocessing/bbob_pproc/comp2/ppfig2.py b/code-postprocessing/bbob_pproc/comp2/ppfig2.py
index 2934f2395..cd1de6078 100644
--- a/code-postprocessing/bbob_pproc/comp2/ppfig2.py
+++ b/code-postprocessing/bbob_pproc/comp2/ppfig2.py
@@ -22,7 +22,7 @@
import numpy as np
-from .. import toolsstats, readalign, ppfigparam, genericsettings, toolsdivers
+from .. import toolsstats, readalign, ppfigparam, testbedsettings, toolsdivers
from ..toolsstats import ranksumtest
from ..ppfig import saveFigure, plotUnifLogXMarkers
#try:
@@ -421,7 +421,7 @@ def main(dsList0, dsList1, minfvalue=1e-8, outputdir='', verbose=True):
if func in funInfos.keys():
plt.title(funInfos[func])
- if func in genericsettings.current_testbed.functions_with_legend:
+ if func in testbedsettings.current_testbed.functions_with_legend:
toolsdivers.legend(loc='best')
# save
diff --git a/code-postprocessing/bbob_pproc/comp2/ppscatter.py b/code-postprocessing/bbob_pproc/comp2/ppscatter.py
index 75598727a..af29e993c 100644
--- a/code-postprocessing/bbob_pproc/comp2/ppscatter.py
+++ b/code-postprocessing/bbob_pproc/comp2/ppscatter.py
@@ -40,7 +40,7 @@
except ImportError:
# compatibility matplotlib 0.8
from matplotlib.transforms import blend_xy_sep_transform as blend
-from .. import genericsettings, htmldesc, ppfigparam
+from .. import genericsettings, htmldesc, ppfigparam, testbedsettings
from ..ppfig import saveFigure
from .. import toolsdivers
from .. import pproc
@@ -79,10 +79,10 @@ def prepare_figure_caption():
40:{\color{magenta}$\Diamond$}. """
- if genericsettings.current_testbed.name == genericsettings.testbed_name_bi:
+ if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi:
# NOTE: no runlength-based targets supported yet
caption = caption_start_fixed + caption_finish
- elif genericsettings.current_testbed.name == genericsettings.testbed_name_single:
+ elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single:
if genericsettings.runlength_based_targets:
caption = caption_start_rlbased + caption_finish
else:
@@ -95,10 +95,10 @@ def prepare_figure_caption():
def figure_caption(for_html = False):
- targets = genericsettings.current_testbed.ppscatter_target_values
+ targets = testbedsettings.current_testbed.ppscatter_target_values
if for_html:
caption = htmldesc.getValue('##bbobppscatterlegend' +
- genericsettings.current_testbed.scenario + '##')
+ testbedsettings.current_testbed.scenario + '##')
else:
caption = prepare_figure_caption()
@@ -167,7 +167,7 @@ def main(dsList0, dsList1, outputdir, verbose=True):
dictFunc1 = dsList1.dictByFunc()
funcs = set(dictFunc0.keys()) & set(dictFunc1.keys())
- targets = genericsettings.current_testbed.ppscatter_target_values
+ targets = testbedsettings.current_testbed.ppscatter_target_values
if isinstance(targets, pproc.RunlengthBasedTargetValues):
linewidth = linewidth_rld_based
else:
diff --git a/code-postprocessing/bbob_pproc/comp2/pptable2.py b/code-postprocessing/bbob_pproc/comp2/pptable2.py
index c4d36257a..f68f7e75a 100644
--- a/code-postprocessing/bbob_pproc/comp2/pptable2.py
+++ b/code-postprocessing/bbob_pproc/comp2/pptable2.py
@@ -15,7 +15,7 @@
import os, warnings
import numpy
import matplotlib.pyplot as plt
-from .. import genericsettings, bestalg, toolsstats, pproc
+from .. import genericsettings, testbedsettings, bestalg, toolsstats, pproc
from ..pptex import tableLaTeX, tableLaTeXStar, writeFEvals2, writeFEvalsMaxPrec, writeLabels
from ..toolsstats import significancetest
@@ -25,7 +25,7 @@
samplesize = genericsettings.simulated_runlength_bootstrap_sample_size
def get_table_caption():
- """ Sets table caption, based on the genericsettings.current_testbed
+ """ Sets table caption, based on the testbedsettings.current_testbed
and genericsettings.runlength_based_targets.
"""
@@ -40,7 +40,7 @@ def get_table_caption():
target, the corresponding best \aRT\
in the first row. The different target \Df-values are shown in the top row.
\#succ is the number of trials that reached the (final) target
- $\fopt + """ + genericsettings.current_testbed.hardesttargetlatex + r"""$.
+ $\fopt + """ + testbedsettings.current_testbed.hardesttargetlatex + r"""$.
"""
table_caption_two2 = r"""%
run-length based target, the corresponding best \aRT\
@@ -55,7 +55,7 @@ def get_table_caption():
90\%-tile of (bootstrapped) runtimes is shown for the different
target \Df-values as shown in the top row.
\#succ is the number of trials that reached the last target
- $\hvref + """ + genericsettings.current_testbed.hardesttargetlatex + r"""$.
+ $\hvref + """ + testbedsettings.current_testbed.hardesttargetlatex + r"""$.
"""
table_caption_rest = (r"""%
The median number of conducted function evaluations is additionally given in
@@ -66,14 +66,14 @@ def get_table_caption():
following the $\star$ symbol, with Bonferroni correction of #1.""" +
(r"""A $\downarrow$ indicates the same tested against the best
algorithm of BBOB-2009."""
- if not (genericsettings.current_testbed.name == genericsettings.testbed_name_bi)
+ if not (testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi)
else "")
)
- if genericsettings.current_testbed.name == genericsettings.testbed_name_bi:
+ if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi:
# NOTE: no runlength-based targets supported yet
table_caption = table_caption_bi + table_caption_rest
- elif genericsettings.current_testbed.name == genericsettings.testbed_name_single:
+ elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single:
if genericsettings.runlength_based_targets:
table_caption = table_caption_one + table_caption_two2 + table_caption_rest
else:
@@ -89,7 +89,7 @@ def main(dsList0, dsList1, dimsOfInterest, outputdir, info='', verbose=True):
#TODO: method is long, split if possible
- testbed = genericsettings.current_testbed
+ testbed = testbedsettings.current_testbed
targetsOfInterest = testbed.pptable2_targetsOfInterest
diff --git a/code-postprocessing/bbob_pproc/compall/ppfigs.py b/code-postprocessing/bbob_pproc/compall/ppfigs.py
index 50a3063d5..77f876c8a 100644
--- a/code-postprocessing/bbob_pproc/compall/ppfigs.py
+++ b/code-postprocessing/bbob_pproc/compall/ppfigs.py
@@ -9,6 +9,7 @@
import warnings
from pdb import set_trace
from .. import toolsdivers, toolsstats, bestalg, pproc, genericsettings, htmldesc, ppfigparam
+from .. import testbedsettings
from ..ppfig import saveFigure
from ..pptex import color_to_latex, marker_to_latex, marker_to_html, writeLabels
@@ -65,10 +66,10 @@ def prepare_scaling_figure_caption():
scaling_figure_caption_fixed = scaling_figure_caption_start_fixed + scaling_figure_caption_end
scaling_figure_caption_rlbased = scaling_figure_caption_start_rlbased + scaling_figure_caption_end
- if genericsettings.current_testbed.name == genericsettings.testbed_name_bi:
+ if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi:
# NOTE: no runlength-based targets supported yet
figure_caption = scaling_figure_caption_fixed
- elif genericsettings.current_testbed.name == genericsettings.testbed_name_single:
+ elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single:
if genericsettings.runlength_based_targets:
figure_caption = scaling_figure_caption_rlbased
else:
@@ -83,11 +84,11 @@ def scaling_figure_caption(for_html = False):
if for_html:
figure_caption = htmldesc.getValue('##bbobppfigslegend' +
- genericsettings.current_testbed.scenario + '##')
+ testbedsettings.current_testbed.scenario + '##')
else:
figure_caption = prepare_scaling_figure_caption()
- target = genericsettings.current_testbed.ppfigs_ftarget
+ target = testbedsettings.current_testbed.ppfigs_ftarget
target = pproc.TargetValues.cast([target] if numpy.isscalar(target) else target)
assert len(target) == 1
@@ -127,10 +128,10 @@ def prepare_ecdfs_figure_caption():
r"with $k\in \{0.5, 1.2, 3, 10, 50\}$. "
)
- if genericsettings.current_testbed.name == genericsettings.testbed_name_bi:
+ if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi:
# NOTE: no runlength-based targets supported yet
figure_caption = ecdfs_figure_caption_standard
- elif genericsettings.current_testbed.name == genericsettings.testbed_name_single:
+ elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single:
if genericsettings.runlength_based_targets:
figure_caption = ecdfs_figure_caption_rlbased + best2009text
else:
@@ -144,24 +145,24 @@ def prepare_ecdfs_figure_caption():
def ecdfs_figure_caption(for_html = False, dimension = 0):
if for_html:
- key = '##bbobECDFslegend%s%d##' % (genericsettings.current_testbed.scenario, dimension)
+ key = '##bbobECDFslegend%s%d##' % (testbedsettings.current_testbed.scenario, dimension)
caption = htmldesc.getValue(key)
else:
caption = prepare_ecdfs_figure_caption()
- target = genericsettings.current_testbed.ppfigs_ftarget
+ target = testbedsettings.current_testbed.ppfigs_ftarget
target = pproc.TargetValues.cast([target] if numpy.isscalar(target) else target)
assert len(target) == 1
caption = caption.replace('BBOBPPFIGSTARGETRANGE',
- str(genericsettings.current_testbed.pprldmany_target_range_latex))
+ str(testbedsettings.current_testbed.pprldmany_target_range_latex))
if genericsettings.runlength_based_targets:
caption = caption.replace('REFERENCE_ALGORITHM', target.reference_algorithm)
caption = caption.replace('REFERENCEALGORITHM', target.reference_algorithm)
else:
caption = caption.replace('BBOBPPFIGSFTARGET',
- str(len(genericsettings.current_testbed.pprldmany_target_values)))
+ str(len(testbedsettings.current_testbed.pprldmany_target_values)))
return caption
@@ -170,9 +171,9 @@ def get_ecdfs_single_fcts_caption():
''' For the moment, only the bi-objective case is covered! '''
s = (r"""Empirical cumulative distribution of simulated (bootstrapped) runtimes in number
of objective function evaluations divided by dimension (FEvals/DIM) for the $""" +
- str(len(genericsettings.current_testbed.pprldmany_target_values)) +
+ str(len(testbedsettings.current_testbed.pprldmany_target_values)) +
r"$ targets " +
- str(genericsettings.current_testbed.pprldmany_target_range_latex) +
+ str(testbedsettings.current_testbed.pprldmany_target_range_latex) +
r" for functions $f_1$ to $f_{16}$ and all dimensions. "
)
return s
@@ -184,9 +185,9 @@ def get_ecdfs_all_groups_caption():
# r"(FEvals/DIM) for " +
s = (r"""Empirical cumulative distribution of simulated (bootstrapped) runtimes, measured in number
of objective function evaluations, divided by dimension (FEvals/DIM) for the $""" +
- str(len(genericsettings.current_testbed.pprldmany_target_values)) +
+ str(len(testbedsettings.current_testbed.pprldmany_target_values)) +
r"$ targets " +
- str(genericsettings.current_testbed.pprldmany_target_range_latex) +
+ str(testbedsettings.current_testbed.pprldmany_target_range_latex) +
r" for all function groups and all dimensions. The aggregation" +
r" over all 55 functions is shown in the last plot."
)
@@ -391,7 +392,7 @@ def main(dictAlg, htmlFilePrefix, isBiobjective, sortedAlgs=None, outputdir='ppd
"""
# target becomes a TargetValues "list" with one element
- target = genericsettings.current_testbed.ppfigs_ftarget
+ target = testbedsettings.current_testbed.ppfigs_ftarget
target = pproc.TargetValues.cast([target] if numpy.isscalar(target) else target)
latex_commands_filename = os.path.join(outputdir, 'bbob_pproc_commands.tex')
assert isinstance(target, pproc.TargetValues)
@@ -517,7 +518,7 @@ def main(dictAlg, htmlFilePrefix, isBiobjective, sortedAlgs=None, outputdir='ppd
if f in funInfos.keys():
plt.gca().set_title(funInfos[f], fontsize=fontSize)
- functions_with_legend = genericsettings.current_testbed.functions_with_legend
+ functions_with_legend = testbedsettings.current_testbed.functions_with_legend
isLegend = False
if legend:
plotLegend(handles)
diff --git a/code-postprocessing/bbob_pproc/compall/ppperfprof.py b/code-postprocessing/bbob_pproc/compall/ppperfprof.py
index 0bf80380b..87e69db5f 100755
--- a/code-postprocessing/bbob_pproc/compall/ppperfprof.py
+++ b/code-postprocessing/bbob_pproc/compall/ppperfprof.py
@@ -32,7 +32,7 @@
# plot the profiles
figure()
- # bb.compall.ppperfprof.plotmultiple(dsets, dsref=bb.bestalg.bestalgentries2009)
+ # bb.compall.ppperfprof.plotmultiple(dsets, dsref=bb.bestalg.bestAlgorithmEntries)
"""
from __future__ import absolute_import
diff --git a/code-postprocessing/bbob_pproc/compall/pprldmany.py b/code-postprocessing/bbob_pproc/compall/pprldmany.py
index f69e09f2a..b2258e4b8 100644
--- a/code-postprocessing/bbob_pproc/compall/pprldmany.py
+++ b/code-postprocessing/bbob_pproc/compall/pprldmany.py
@@ -46,7 +46,7 @@
from pdb import set_trace
import numpy as np
import matplotlib.pyplot as plt
-from .. import toolsstats, bestalg, genericsettings
+from .. import toolsstats, bestalg, genericsettings, testbedsettings
from .. import pproc as pp # import dictAlgByDim, dictAlgByFun
from .. import toolsdivers # strip_pathname, str_to_latex
from .. import pprldistr # plotECDF, beautifyECDF
@@ -405,7 +405,7 @@ def plot(dsList, targets=None, craftingeffort=0., **kwargs):
"""
if targets is None:
- targets = genericsettings.current_testbed.pprldmany_target_values
+ targets = testbedsettings.current_testbed.pprldmany_target_values
try:
if np.min(targets) >= 1:
ValueError('smallest target f-value is not smaller than one, use ``pproc.TargetValues(targets)`` to prevent this error')
@@ -612,7 +612,7 @@ def main(dictAlg, isBiobjective, order=None, outputdir='.', info='default',
# funcsolved = [set()] * len(targets) # number of functions solved per target
xbest2009 = []
maxevalsbest2009 = []
- target_values = genericsettings.current_testbed.pprldmany_target_values
+ target_values = testbedsettings.current_testbed.pprldmany_target_values
dictDimList = pp.dictAlgByDim(dictAlg)
dims = sorted(dictDimList)
@@ -627,7 +627,7 @@ def main(dictAlg, isBiobjective, order=None, outputdir='.', info='default',
# print target_values((f, dim))
for j, t in enumerate(target_values((f, dim))):
- # for j, t in enumerate(genericsettings.current_testbed.ecdf_target_values(1e2, f)):
+ # for j, t in enumerate(testbedsettings.current_testbed.ecdf_target_values(1e2, f)):
# funcsolved[j].add(f)
for alg in algorithms_with_data:
@@ -778,11 +778,11 @@ def algname_to_label(algname, dirname=None):
dictFG = pp.dictAlgByFuncGroup(dictAlg)
dictKey = dictFG.keys()[0]
functionGroups = dictAlg[dictAlg.keys()[0]].getFuncGroups()
- text = '%s\n%s, %d-D' % (genericsettings.current_testbed.name,
+ text = '%s\n%s, %d-D' % (testbedsettings.current_testbed.name,
functionGroups[dictKey],
dimList[0])
else:
- text = '%s - %s' % (genericsettings.current_testbed.name,
+ text = '%s - %s' % (testbedsettings.current_testbed.name,
ppfig.consecutiveNumbers(sorted(dictFunc.keys()), 'f'))
if not (plotType == PlotType.DIM):
text += ', %d-D' % dimList[0]
@@ -799,7 +799,7 @@ def algname_to_label(algname, dirname=None):
verticalalignment="top", transform=plt.gca().transAxes, size='small')
if len(dictFunc) == 1:
plt.title(' '.join((str(dictFunc.keys()[0]),
- genericsettings.current_testbed.short_names[dictFunc.keys()[0]])))
+ testbedsettings.current_testbed.short_names[dictFunc.keys()[0]])))
a = plt.gca()
plt.xlim(xmin=1e-0, xmax=x_limit**annotation_space_end_relative)
diff --git a/code-postprocessing/bbob_pproc/compall/pptables.py b/code-postprocessing/bbob_pproc/compall/pptables.py
index 160399640..38986fdab 100644
--- a/code-postprocessing/bbob_pproc/compall/pptables.py
+++ b/code-postprocessing/bbob_pproc/compall/pptables.py
@@ -8,7 +8,7 @@
from pdb import set_trace
import warnings
import numpy
-from .. import genericsettings, bestalg, toolsstats, pproc, ppfigparam
+from .. import genericsettings, bestalg, toolsstats, pproc, ppfigparam, testbedsettings
from ..pptex import writeFEvals, writeFEvals2, writeFEvalsMaxPrec, tableXLaTeX, numtotext
from ..toolsstats import significancetest, significance_all_best_vs_other
from ..pproc import DataSetList
@@ -23,7 +23,7 @@
"""
def get_table_caption():
- """ Sets table caption, based on the genericsettings.current_testbed
+ """ Sets table caption, based on the testbedsettings.current_testbed
and genericsettings.runlength_based_targets.
TODO: \hvref and \fopt should be defined via the current_testbed,
@@ -41,7 +41,7 @@ def get_table_caption():
target, the corresponding best \aRT\
in the first row. The different target \Df-values are shown in the top row.
\#succ is the number of trials that reached the (final) target
- $\fopt + """ + genericsettings.current_testbed.hardesttargetlatex + r"""$.
+ $\fopt + """ + testbedsettings.current_testbed.hardesttargetlatex + r"""$.
"""
table_caption_two2 = r"""%
run-length based target, the corresponding best \aRT\
@@ -53,9 +53,9 @@ def get_table_caption():
in number of function evaluations, in #1. For each function, the \aRT\
and, in braces as dispersion measure, the half difference between 10 and
90\%-tile of (bootstrapped) runtimes is shown for the different
- target \Df-values as shown in the top row.
+ target \DI-values as shown in the top row.
\#succ is the number of trials that reached the last target
- $\hvref + """ + genericsettings.current_testbed.hardesttargetlatex + r"""$.
+ $\hvref + """ + testbedsettings.current_testbed.hardesttargetlatex + r"""$.
"""
table_caption_rest = (r"""%
The median number of conducted function evaluations is additionally given in
@@ -63,17 +63,17 @@ def get_table_caption():
Entries, succeeded by a star, are statistically significantly better (according to
the rank-sum test) when compared to all other algorithms of the table, with
$p = 0.05$ or $p = 10^{-k}$ when the number $k$ following the star is larger
- than 1, with Bonferroni correction by the number of instances. """ +
+ than 1, with Bonferroni correction of #2. """ +
(r"""A $\downarrow$ indicates the same tested against the best
algorithm of BBOB-2009."""
- if not (genericsettings.current_testbed.name == genericsettings.testbed_name_bi)
+ if not (testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi)
else "") + r"""Best results are printed in bold.
""")
- if genericsettings.current_testbed.name == genericsettings.testbed_name_bi:
+ if testbedsettings.current_testbed.name == testbedsettings.testbed_name_bi:
# NOTE: no runlength-based targets supported yet
table_caption = table_caption_one_bi + table_caption_rest
- elif genericsettings.current_testbed.name == genericsettings.testbed_name_single:
+ elif testbedsettings.current_testbed.name == testbedsettings.testbed_name_single:
if genericsettings.runlength_based_targets:
table_caption = table_caption_one + table_caption_two2 + table_caption_rest
else:
@@ -254,7 +254,7 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi
* significance test against best algorithm
* table width...
- Takes ``pptable_targetsOfInterest`` from genericsetting's Testbed instance
+ Takes ``pptable_targetsOfInterest`` from testbedsetting's Testbed instance
as "input argument" to compute the desired target values.
``pptable_targetsOfInterest`` might be configured via config.
@@ -264,7 +264,7 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi
bestalgentries = bestalg.load_best_algorithm()
- testbed = genericsettings.current_testbed
+ testbed = testbedsettings.current_testbed
# Sort data per dimension and function
dictData = {}
@@ -283,7 +283,7 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi
for df in dictData:
# Generate one table per df
# first update targets for each dimension-function pair if needed:
- targetsOfInterest = testbed.pptable_targetsOfInterest((df[1], df[0]))
+ targetsOfInterest = testbed.pptablemany_targetsOfInterest((df[1], df[0]))
if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues):
targetf = targetsOfInterest[-1]
else:
@@ -394,6 +394,7 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi
% (2 * len(targetsOfInterest) + 2, header)])
extraeol.append('')
+ curlineHtml = []
if function_targets_line is True or (function_targets_line and df[1] in function_targets_line):
if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues):
curline = [r'\#FEs/D']
@@ -404,8 +405,12 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi
curlineHtml.append('
%s REPLACE%d | \n' % (i, counter))
counter += 1
else:
- curline = [r'$\Delta f_\mathrm{opt}$']
- curlineHtml = ['\n\nΔ fopt REPLACEH | \n']
+ if (testbed.name == testbedsettings.testbed_name_bi):
+ curline = [r'$\Df$']
+ curlineHtml = ['\n\nΔ HVref REPLACEH | \n']
+ else:
+ curline = [r'$\Delta f_\mathrm{opt}$']
+ curlineHtml = ['\n\nΔ fopt REPLACEH | \n']
counter = 1
for t in targetsOfInterest:
curline.append(r'\multicolumn{2}{@{\,}X@{\,}}{%s}'
@@ -414,16 +419,21 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi
counter += 1
# curline.append(r'\multicolumn{2}{@{\,}X@{}|}{%s}'
# % writeFEvals2(targetsOfInterest[-1], precision=1, isscientific=True))
- curline.append(r'\multicolumn{2}{@{}l@{}}{\#succ}')
+ if (testbed.name == testbedsettings.testbed_name_bi):
+ curline.append(r'\multicolumn{2}{|@{}l@{}}{\begin{rotate}{30}\#succ\end{rotate}}')
+ else:
+ curline.append(r'\multicolumn{2}{|@{}l@{}}{\#succ}')
curlineHtml.append('#succ REPLACEF | \n
\n\n')
table.append(curline)
-
+
extraeol.append(r'\hline')
# extraeol.append(r'\hline\arrayrulecolor{tableShade}')
curline = [r'\aRT{}$_{\text{best}}$'] if with_table_heading else [r'\textbf{f%d}' % df[1]]
replaceValue = '\aRT{}best' if with_table_heading else ('f%d' % df[1])
curlineHtml = [item.replace('REPLACEH', replaceValue) for item in curlineHtml]
+
+
if bestalgentries:
if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues):
# write ftarget:fevals
@@ -468,7 +478,7 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi
curlineHtml = [item.replace('REPLACEF', replaceValue) for item in curlineHtml]
else: # if not bestalgentries
- curline.append(r'\multicolumn{%d}{@{}c@{}|}{}' % (2 * (len(targetsOfInterest) + 1)))
+ curline.append(r'\multicolumn{%d}{@{}c@{}|}{} & ' % (2 * (len(targetsOfInterest))))
for counter in range(1, len(targetsOfInterest) + 1):
curlineHtml = [item.replace('REPLACE%d' % counter, ' ') for item in curlineHtml]
curlineHtml = [item.replace('REPLACEF', ' ') for item in curlineHtml]
@@ -486,7 +496,7 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi
#if df == (5, 17):
#set_trace()
- header = r'\providecommand{\ntables}{7}'
+ header = r'\providecommand{\ntables}{%d}' % len(testbed.pptablemany_targetsOfInterest)
for i, alg in enumerate(algnames):
tableHtml.append('
\n')
#algname, entries, irs, line, line2, succ, runs, testres1alg in zip(algnames,
diff --git a/code-postprocessing/bbob_pproc/config.py b/code-postprocessing/bbob_pproc/config.py
index fdf3ddec1..8f8977199 100644
--- a/code-postprocessing/bbob_pproc/config.py
+++ b/code-postprocessing/bbob_pproc/config.py
@@ -19,6 +19,7 @@
import numpy as np
import ppfigdim, pptable
from . import genericsettings, pproc, pprldistr
+from . import testbedsettings as tbs
from .comp2 import ppfig2, ppscatter, pptable2
from .compall import ppfigs, pprldmany, pptables
@@ -34,13 +35,14 @@ def target_values(is_expensive, dict_max_fun_evals={}, runlength_limit=1e3):
genericsettings.runlength_based_targets = False
genericsettings.maxevals_fix_display = None
-def config(isBiobjective=None):
+
+def config(testbed_name=None):
"""called from a high level, e.g. rungeneric, to configure the lower level
modules via modifying parameter settings.
"""
-
- if isBiobjective is not None:
- genericsettings.loadCurrentTestbed(isBiobjective, pproc.TargetValues)
+
+ if testbed_name:
+ tbs.load_current_testbed(testbed_name, pproc.TargetValues)
genericsettings.simulated_runlength_bootstrap_sample_size = (10 + 990 / (1 + 10 * max(0, genericsettings.in_a_hurry)))
@@ -48,7 +50,7 @@ def config(isBiobjective=None):
# bestAlg for the biobjective case
# TODO: once this is solved, make sure that expensive setting is not
# available if no bestAlg or other reference algorithm is available
- if genericsettings.current_testbed and genericsettings.current_testbed.name == genericsettings.testbed_name_bi:
+ if tbs.current_testbed and tbs.current_testbed.name == tbs.testbed_name_bi:
if (genericsettings.isExpensive in (True, 1) or
genericsettings.runlength_based_targets in (True, 1)):
warnings.warn('Expensive setting not yet supported with bbob-biobj testbed; using non-expensive setting instead.')
@@ -67,11 +69,11 @@ def config(isBiobjective=None):
pprldmany.x_limit = genericsettings.maxevals_fix_display # always fixed
- if genericsettings.current_testbed:
+ if tbs.current_testbed:
- testbed = genericsettings.current_testbed
+ testbed = tbs.current_testbed
- testbed.scenario = genericsettings.scenario_rlbased
+ testbed.scenario = tbs.scenario_rlbased
# genericsettings (to be used in rungeneric2 while calling pprldistr.comp(...)):
testbed.rldValsOfInterest = pproc.RunlengthBasedTargetValues(
genericsettings.target_runlengths_in_single_rldistr,
@@ -100,7 +102,17 @@ def config(isBiobjective=None):
testbed.ppscatter_target_values = pproc.RunlengthBasedTargetValues(np.logspace(np.log10(0.5), np.log10(50), 8))
# pptable:
- testbed.pptable_targetsOfInterest = pproc.RunlengthBasedTargetValues(genericsettings.target_runlengths_in_table,
+ testbed.pptable_targetsOfInterest = pproc.RunlengthBasedTargetValues(testbed.pptable_target_runlengths,
+ reference_data = reference_data,
+ force_different_targets_factor=10**-0.2)
+
+ # pptable2:
+ testbed.pptable2_targetsOfInterest = pproc.RunlengthBasedTargetValues(testbed.pptable2_target_runlengths,
+ reference_data = reference_data,
+ force_different_targets_factor=10**-0.2)
+
+ # pptables:
+ testbed.pptables_targetsOfInterest = pproc.RunlengthBasedTargetValues(testbed.pptables_target_runlengths,
reference_data = reference_data,
force_different_targets_factor=10**-0.2)
# ppfigs
@@ -121,18 +133,7 @@ def config(isBiobjective=None):
{'color': 'c', 'marker': 'v', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
{'color': 'b', 'marker': '.', 'linewidth': 4},
{'color': 'k', 'marker': 'o', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
- ]
-
-
- # pptable2:
- pptable2.targetsOfInterest = pproc.RunlengthBasedTargetValues(genericsettings.target_runlengths_in_table,
- reference_data = reference_data,
- force_different_targets_factor=10**-0.2)
-
- # pptables (for rungenericmany):
- pptables.targetsOfInterest = pproc.RunlengthBasedTargetValues(genericsettings.target_runlengths_in_table,
- reference_data = reference_data,
- force_different_targets_factor=10**-0.2)
+ ]
ppscatter.markersize = 16
@@ -140,9 +141,9 @@ def config(isBiobjective=None):
pass # here the default values of the modules apply
# pprlmany.x_limit = ...should depend on noisy/noiseless
if 11 < 3: # for testing purpose
- if genericsettings.current_testbed:
+ if tbs.current_testbed:
# TODO: this case needs to be tested yet: the current problem is that no noisy data are in this folder
- genericsettings.current_testbed.pprldmany_target_values = pproc.RunlengthBasedTargetValues(10**np.arange(1, 4, 0.2), 'RANDOMSEARCH')
+ tbs.current_testbed.pprldmany_target_values = pproc.RunlengthBasedTargetValues(10**np.arange(1, 4, 0.2), 'RANDOMSEARCH')
pprldmany.fontsize = 20.0 # should depend on the number of data lines down to 10.0 ?
diff --git a/code-postprocessing/bbob_pproc/genericsettings.py b/code-postprocessing/bbob_pproc/genericsettings.py
index 75b510f20..0e74de6c2 100644
--- a/code-postprocessing/bbob_pproc/genericsettings.py
+++ b/code-postprocessing/bbob_pproc/genericsettings.py
@@ -13,33 +13,34 @@
import os
import warnings
import numpy as np
+
test = False # debug/test flag, set to False for committing the final version
if 1 < 3 and test:
np.seterr(all='raise')
np.seterr(under='ignore') # ignore underflow
force_assertions = False # another debug flag for time-consuming assertions
-in_a_hurry = 1000 # [0, 1000] lower resolution, no eps, saves 30% time
+in_a_hurry = 1000 # [0, 1000] lower resolution, no eps, saves 30% time
maxevals_fix_display = None # 3e2 is the expensive setting only used in config, yet to be improved!?
runlength_based_targets = 'auto' # 'auto' means automatic choice, otherwise True or False
dimensions_to_display = (2, 3, 5, 10, 20, 40) # this could be used to set the dimensions in respective modules
-generate_svg_files = True # generate the svg figures
-scaling_figures_with_boxes = True
+generate_svg_files = True # generate the svg figures
+scaling_figures_with_boxes = True
# should replace ppfigdim.dimsBBOB, ppfig2.dimensions, ppfigparam.dimsBBOB?
# Variables used in the routines defining desired output for BBOB.
tabDimsOfInterest = (5, 20) # dimension which are displayed in the tables
target_runlengths_in_scaling_figs = [0.5, 1.2, 3, 10, 50] # used in config
-target_runlengths_in_table = [0.5, 1.2, 3, 10, 50] # [0.5, 2, 10, 50] # used in config
target_runlengths_in_single_rldistr = [0.5, 2, 10, 50] # used in config
-target_runlength = 10 # used in ppfigs.main
+target_runlength = 10 # used in ppfigs.main
xlimit_expensive = 1e3 # used in
-tableconstant_target_function_values = (1e1, 1e0, 1e-1, 1e-3, 1e-5, 1e-7) # used as input for pptables.main in rungenericmany
+#tableconstant_target_function_values = (
+#1e1, 1e0, 1e-1, 1e-3, 1e-5, 1e-7) # used as input for pptables.main in rungenericmany
# tableconstant_target_function_values = (1e3, 1e2, 1e1, 1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-7) # for post-workshop landscape tables
-#tabValsOfInterest = (1.0, 1.0e-2, 1.0e-4, 1.0e-6, 1.0e-8)
-#tabValsOfInterest = (10, 1.0, 1e-1, 1e-3, 1e-5, 1.0e-8)
+# tabValsOfInterest = (1.0, 1.0e-2, 1.0e-4, 1.0e-6, 1.0e-8)
+# tabValsOfInterest = (10, 1.0, 1e-1, 1e-3, 1e-5, 1.0e-8)
dim_related_markers = ('+', 'v', '*', 'o', 's', 'D', 'x')
dim_related_colors = ('c', 'g', 'b', 'k', 'r', 'm', 'k', 'y', 'k', 'c', 'r', 'm')
@@ -57,141 +58,139 @@
# summarized_target_function_values = [-1, 3] # easy easy
# summarized_target_function_values = (10, 1e0, 1e-1) # all in one figure (means what?)
-instancesOfInterest2009 = {1:3, 2:3, 3:3, 4:3, 5:3} # 2009 instances
-instancesOfInterest2010 = {1:1, 2:1, 3:1, 4:1, 5:1, 6:1, 7:1, 8:1, 9:1,
- 10:1, 11:1, 12:1, 13:1, 14:1, 15:1} # 2010 instances
-instancesOfInterest2012 = {1:1, 2:1, 3:1, 4:1, 5:1, 21:1, 22:1, 23:1, 24:1,
- 25:1, 26:1, 27:1, 28:1, 29:1, 30:1} # 2012 instances
-instancesOfInterest2013 = {1:1, 2:1, 3:1, 4:1, 5:1, 31:1, 32:1, 33:1, 34:1,
- 35:1, 36:1, 37:1, 38:1, 39:1, 40:1} # 2013 instances
-instancesOfInterest2015 = {1:1, 2:1, 3:1, 4:1, 5:1, 41:1, 42:1, 43:1, 44:1,
- 45:1, 46:1, 47:1, 48:1, 49:1, 50:1} # 2015 instances
-instancesOfInterest2016 = {1:1, 2:1, 3:1, 4:1, 5:1, 51:1, 52:1, 53:1, 54:1,
- 55:1, 56:1, 57:1, 58:1, 59:1, 60:1} # 2016 instances
-instancesOfInterestBiobj2016 = {1:1, 2:1, 3:1, 4:1, 5:1} # bi-objective 2016 instances
-instancesOfInterest = {1:1, 2:1, 3:1, 4:1, 5:1, 41:1, 42:1, 43:1, 44:1,
- 45:1, 46:1, 47:1, 48:1, 49:1, 50:1} # 2015 instances; only for consistency checking
+instancesOfInterest2009 = {1: 3, 2: 3, 3: 3, 4: 3, 5: 3} # 2009 instances
+instancesOfInterest2010 = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1,
+ 10: 1, 11: 1, 12: 1, 13: 1, 14: 1, 15: 1} # 2010 instances
+instancesOfInterest2012 = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 21: 1, 22: 1, 23: 1, 24: 1,
+ 25: 1, 26: 1, 27: 1, 28: 1, 29: 1, 30: 1} # 2012 instances
+instancesOfInterest2013 = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 31: 1, 32: 1, 33: 1, 34: 1,
+ 35: 1, 36: 1, 37: 1, 38: 1, 39: 1, 40: 1} # 2013 instances
+instancesOfInterest2015 = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 41: 1, 42: 1, 43: 1, 44: 1,
+ 45: 1, 46: 1, 47: 1, 48: 1, 49: 1, 50: 1} # 2015 instances
+instancesOfInterest2016 = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 51: 1, 52: 1, 53: 1, 54: 1,
+ 55: 1, 56: 1, 57: 1, 58: 1, 59: 1, 60: 1} # 2016 instances
+instancesOfInterestBiobj2016 = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1} # bi-objective 2016 instances
+instancesOfInterest = {1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 41: 1, 42: 1, 43: 1, 44: 1,
+ 45: 1, 46: 1, 47: 1, 48: 1, 49: 1, 50: 1} # 2015 instances; only for consistency checking
line_styles = [ # used by ppfigs and pprlmany
- {'marker': 'o', 'markersize': 31, 'linestyle': '-', 'color': '#000080'}, # 'NavyBlue'
- {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': '#ff00ff'}, # 'Magenta'
- {'marker': '*', 'markersize': 33, 'linestyle': '-', 'color': '#ffa500'}, # 'Orange'
- {'marker': 'v', 'markersize': 28, 'linestyle': '-', 'color': '#6495ed'}, # 'CornflowerBlue'
- {'marker': 'h', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, # 'Red'
- {'marker': '^', 'markersize': 25, 'linestyle': '-', 'color': '#9acd32'}, # 'YellowGreen'
-# {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'g'}, # 'green' avoid green because of
-# {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': '#ffd700'}, # 'Goldenrod' seems too light
-# {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'}, # 'Black' is too close to NavyBlue
-# {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': '#d02090'}, # square, 'VioletRed' seems too close to red
- {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': 'c'},
- {'marker': 'H', 'markersize': 23, 'linestyle': '-', 'color': '#bebebe'}, # 'Gray'
- # {'marker': 'o', 'markersize': 23, 'linestyle': '-', 'color': '#ffff00'}, # 'Yellow'
- {'marker': '3', 'markersize': 23, 'linestyle': '-', 'color': '#adff2f'}, # 'GreenYellow'
- {'marker': '1', 'markersize': 23, 'linestyle': '-', 'color': '#228b22'}, # 'ForestGreen'
- {'marker': 'D', 'markersize': 23, 'linestyle': '-', 'color': '#ffc0cb'}, # 'Lavender'
- {'marker': '<', 'markersize': 23, 'linestyle': '-', 'color': '#87ceeb'}, # 'SkyBlue' close to CornflowerBlue
- {'marker': 'v', 'markersize': 23, 'linestyle': '--', 'color': '#000080'}, # 'NavyBlue'
- {'marker': '*', 'markersize': 23, 'linestyle': '--', 'color': 'r'}, # 'Red'
- {'marker': 's', 'markersize': 23, 'linestyle': '--', 'color': '#ffd700'}, # 'Goldenrod'
- {'marker': 'd', 'markersize': 23, 'linestyle': '--', 'color': '#d02090'}, # square, 'VioletRed'
- {'marker': '^', 'markersize': 23, 'linestyle': '--', 'color': '#6495ed'}, # 'CornflowerBlue'
- {'marker': '<', 'markersize': 23, 'linestyle': '--', 'color': '#ffa500'}, # 'Orange'
- {'marker': 'h', 'markersize': 23, 'linestyle': '--', 'color': '#ff00ff'}, # 'Magenta'
- # {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'm'}, # square, magenta
- {'marker': 'p', 'markersize': 23, 'linestyle': '--', 'color': '#bebebe'}, # 'Gray'
- {'marker': 'H', 'markersize': 23, 'linestyle': '--', 'color': '#87ceeb'}, # 'SkyBlue'
- {'marker': '1', 'markersize': 23, 'linestyle': '--', 'color': '#ffc0cb'}, # 'Lavender'
- {'marker': '2', 'markersize': 23, 'linestyle': '--', 'color': '#228b22'}, # 'ForestGreen'
- {'marker': '4', 'markersize': 23, 'linestyle': '--', 'color': '#32cd32'}, # 'LimeGreen'
- {'marker': '3', 'markersize': 23, 'linestyle': '--', 'color': '#9acd32'}, # 'YellowGreen'
- {'marker': 'D', 'markersize': 23, 'linestyle': '--', 'color': '#adff2f'}, # 'GreenYellow'
- ]
+ {'marker': 'o', 'markersize': 31, 'linestyle': '-', 'color': '#000080'}, # 'NavyBlue'
+ {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': '#ff00ff'}, # 'Magenta'
+ {'marker': '*', 'markersize': 33, 'linestyle': '-', 'color': '#ffa500'}, # 'Orange'
+ {'marker': 'v', 'markersize': 28, 'linestyle': '-', 'color': '#6495ed'}, # 'CornflowerBlue'
+ {'marker': 'h', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, # 'Red'
+ {'marker': '^', 'markersize': 25, 'linestyle': '-', 'color': '#9acd32'}, # 'YellowGreen'
+ # {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'g'}, # 'green' avoid green because of
+ # {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': '#ffd700'}, # 'Goldenrod' seems too light
+ # {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'}, # 'Black' is too close to NavyBlue
+ # {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': '#d02090'}, # square, 'VioletRed' seems too close to red
+ {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': 'c'},
+ {'marker': 'H', 'markersize': 23, 'linestyle': '-', 'color': '#bebebe'}, # 'Gray'
+ # {'marker': 'o', 'markersize': 23, 'linestyle': '-', 'color': '#ffff00'}, # 'Yellow'
+ {'marker': '3', 'markersize': 23, 'linestyle': '-', 'color': '#adff2f'}, # 'GreenYellow'
+ {'marker': '1', 'markersize': 23, 'linestyle': '-', 'color': '#228b22'}, # 'ForestGreen'
+ {'marker': 'D', 'markersize': 23, 'linestyle': '-', 'color': '#ffc0cb'}, # 'Lavender'
+ {'marker': '<', 'markersize': 23, 'linestyle': '-', 'color': '#87ceeb'}, # 'SkyBlue' close to CornflowerBlue
+ {'marker': 'v', 'markersize': 23, 'linestyle': '--', 'color': '#000080'}, # 'NavyBlue'
+ {'marker': '*', 'markersize': 23, 'linestyle': '--', 'color': 'r'}, # 'Red'
+ {'marker': 's', 'markersize': 23, 'linestyle': '--', 'color': '#ffd700'}, # 'Goldenrod'
+ {'marker': 'd', 'markersize': 23, 'linestyle': '--', 'color': '#d02090'}, # square, 'VioletRed'
+ {'marker': '^', 'markersize': 23, 'linestyle': '--', 'color': '#6495ed'}, # 'CornflowerBlue'
+ {'marker': '<', 'markersize': 23, 'linestyle': '--', 'color': '#ffa500'}, # 'Orange'
+ {'marker': 'h', 'markersize': 23, 'linestyle': '--', 'color': '#ff00ff'}, # 'Magenta'
+ # {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'm'}, # square, magenta
+ {'marker': 'p', 'markersize': 23, 'linestyle': '--', 'color': '#bebebe'}, # 'Gray'
+ {'marker': 'H', 'markersize': 23, 'linestyle': '--', 'color': '#87ceeb'}, # 'SkyBlue'
+ {'marker': '1', 'markersize': 23, 'linestyle': '--', 'color': '#ffc0cb'}, # 'Lavender'
+ {'marker': '2', 'markersize': 23, 'linestyle': '--', 'color': '#228b22'}, # 'ForestGreen'
+ {'marker': '4', 'markersize': 23, 'linestyle': '--', 'color': '#32cd32'}, # 'LimeGreen'
+ {'marker': '3', 'markersize': 23, 'linestyle': '--', 'color': '#9acd32'}, # 'YellowGreen'
+ {'marker': 'D', 'markersize': 23, 'linestyle': '--', 'color': '#adff2f'}, # 'GreenYellow'
+]
line_styles_old = [ # used by ppfigs and pprlmany
- {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'b'},
- {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'r'},
- {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'c'},
- {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'm'}, # square
- {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'},
- {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': 'y'},
- {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'g'},
- {'marker': 's', 'markersize': 24, 'linestyle': '-', 'color': 'b'},
- {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'r'},
- {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'c'},
- {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'm'},
- {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'k'},
- {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'y'},
- {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'g'},
- {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'g'},
- {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'r'},
- {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'b'},
- {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'm'},
- {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'c'}, # square
- {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'y'},
- {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': 'k'},
- {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'b'},
- {'marker': 's', 'markersize': 24, 'linestyle': '-', 'color': 'g'},
- {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'c'},
- {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'r'},
- {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'k'},
- {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'm'},
- {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'g'},
- {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'y'},
- {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'r'}
- ]
+ {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'b'},
+ {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'r'},
+ {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'c'},
+ {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'm'}, # square
+ {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'},
+ {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': 'y'},
+ {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'g'},
+ {'marker': 's', 'markersize': 24, 'linestyle': '-', 'color': 'b'},
+ {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'r'},
+ {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'c'},
+ {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'm'},
+ {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'k'},
+ {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'y'},
+ {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'g'},
+ {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'g'},
+ {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'r'},
+ {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'b'},
+ {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'm'},
+ {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': 'c'}, # square
+ {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'y'},
+ {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': 'k'},
+ {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'b'},
+ {'marker': 's', 'markersize': 24, 'linestyle': '-', 'color': 'g'},
+ {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'c'},
+ {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'r'},
+ {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'k'},
+ {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'm'},
+ {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'g'},
+ {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'y'},
+ {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'r'}
+]
more_old_line_styles = [ # used by ppfigs and pprlmany
- {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': '#000080'}, # 'NavyBlue'
- {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, # 'Red'
- {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': '#ffd700'}, # 'Goldenrod' seems too light
- {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': '#d02090'}, # square, 'VioletRed'
- {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'}, # 'Black' is too close to NavyBlue
- {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': '#6495ed'}, # 'CornflowerBlue'
- {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': '#ffa500'}, # 'Orange'
- {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': '#ff00ff'}, # 'Magenta'
- {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': '#bebebe'}, # 'Gray'
- {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': '#87ceeb'}, # 'SkyBlue'
- {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': '#ffc0cb'}, # 'Lavender'
- {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': '#228b22'}, # 'ForestGreen'
- {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': '#32cd32'}, # 'LimeGreen'
- {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': '#9acd32'}, # 'YellowGreen'
- {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': '#adff2f'}, # 'GreenYellow'
- #{'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': '#ffff00'}, # 'Yellow'
- {'marker': 'v', 'markersize': 30, 'linestyle': '--', 'color': '#000080'}, # 'NavyBlue'
- {'marker': '*', 'markersize': 31, 'linestyle': '--', 'color': 'r'}, # 'Red'
- {'marker': 's', 'markersize': 20, 'linestyle': '--', 'color': '#ffd700'}, # 'Goldenrod'
- {'marker': 'd', 'markersize': 27, 'linestyle': '--', 'color': '#d02090'}, # square, 'VioletRed'
- {'marker': '^', 'markersize': 26, 'linestyle': '--', 'color': '#6495ed'}, # 'CornflowerBlue'
- {'marker': '<', 'markersize': 25, 'linestyle': '--', 'color': '#ffa500'}, # 'Orange'
- {'marker': 'h', 'markersize': 24, 'linestyle': '--', 'color': '#ff00ff'}, # 'Magenta'
- {'marker': 'p', 'markersize': 24, 'linestyle': '--', 'color': '#bebebe'}, # 'Gray'
- {'marker': 'H', 'markersize': 24, 'linestyle': '--', 'color': '#87ceeb'}, # 'SkyBlue'
- {'marker': '1', 'markersize': 24, 'linestyle': '--', 'color': '#ffc0cb'}, # 'Lavender'
- {'marker': '2', 'markersize': 24, 'linestyle': '--', 'color': '#228b22'}, # 'ForestGreen'
- {'marker': '4', 'markersize': 24, 'linestyle': '--', 'color': '#32cd32'}, # 'LimeGreen'
- {'marker': '3', 'markersize': 24, 'linestyle': '--', 'color': '#9acd32'}, # 'YellowGreen'
- {'marker': 'D', 'markersize': 24, 'linestyle': '--', 'color': '#adff2f'}, # 'GreenYellow'
- ]
-
-
+ {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': '#000080'}, # 'NavyBlue'
+ {'marker': 'v', 'markersize': 30, 'linestyle': '-', 'color': 'r'}, # 'Red'
+ {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': '#ffd700'}, # 'Goldenrod' seems too light
+ {'marker': 's', 'markersize': 20, 'linestyle': '-', 'color': '#d02090'}, # square, 'VioletRed'
+ {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'k'}, # 'Black' is too close to NavyBlue
+ {'marker': 'd', 'markersize': 26, 'linestyle': '-', 'color': '#6495ed'}, # 'CornflowerBlue'
+ {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': '#ffa500'}, # 'Orange'
+ {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': '#ff00ff'}, # 'Magenta'
+ {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': '#bebebe'}, # 'Gray'
+ {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': '#87ceeb'}, # 'SkyBlue'
+ {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': '#ffc0cb'}, # 'Lavender'
+ {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': '#228b22'}, # 'ForestGreen'
+ {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': '#32cd32'}, # 'LimeGreen'
+ {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': '#9acd32'}, # 'YellowGreen'
+ {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': '#adff2f'}, # 'GreenYellow'
+ # {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': '#ffff00'}, # 'Yellow'
+ {'marker': 'v', 'markersize': 30, 'linestyle': '--', 'color': '#000080'}, # 'NavyBlue'
+ {'marker': '*', 'markersize': 31, 'linestyle': '--', 'color': 'r'}, # 'Red'
+ {'marker': 's', 'markersize': 20, 'linestyle': '--', 'color': '#ffd700'}, # 'Goldenrod'
+ {'marker': 'd', 'markersize': 27, 'linestyle': '--', 'color': '#d02090'}, # square, 'VioletRed'
+ {'marker': '^', 'markersize': 26, 'linestyle': '--', 'color': '#6495ed'}, # 'CornflowerBlue'
+ {'marker': '<', 'markersize': 25, 'linestyle': '--', 'color': '#ffa500'}, # 'Orange'
+ {'marker': 'h', 'markersize': 24, 'linestyle': '--', 'color': '#ff00ff'}, # 'Magenta'
+ {'marker': 'p', 'markersize': 24, 'linestyle': '--', 'color': '#bebebe'}, # 'Gray'
+ {'marker': 'H', 'markersize': 24, 'linestyle': '--', 'color': '#87ceeb'}, # 'SkyBlue'
+ {'marker': '1', 'markersize': 24, 'linestyle': '--', 'color': '#ffc0cb'}, # 'Lavender'
+ {'marker': '2', 'markersize': 24, 'linestyle': '--', 'color': '#228b22'}, # 'ForestGreen'
+ {'marker': '4', 'markersize': 24, 'linestyle': '--', 'color': '#32cd32'}, # 'LimeGreen'
+ {'marker': '3', 'markersize': 24, 'linestyle': '--', 'color': '#9acd32'}, # 'YellowGreen'
+ {'marker': 'D', 'markersize': 24, 'linestyle': '--', 'color': '#adff2f'}, # 'GreenYellow'
+]
if 11 < 3: # in case using my own linestyles
line_styles = [ # used by ppfigs and pprlmany, to be modified
- {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'b'},
- {'marker': 'o', 'markersize': 30, 'linestyle': '-', 'color': 'r'},
- {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'b'},
- {'marker': '*', 'markersize': 20, 'linestyle': '-', 'color': 'r'},
- {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'b'},
- {'marker': '^', 'markersize': 26, 'linestyle': '-', 'color': 'r'},
- {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'g'},
- {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': 'b'},
- {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'r'},
- {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'c'},
- {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'm'},
- {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'k'},
- {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'y'},
- {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'g'},
- {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'g'}
- ]
+ {'marker': 'o', 'markersize': 25, 'linestyle': '-', 'color': 'b'},
+ {'marker': 'o', 'markersize': 30, 'linestyle': '-', 'color': 'r'},
+ {'marker': '*', 'markersize': 31, 'linestyle': '-', 'color': 'b'},
+ {'marker': '*', 'markersize': 20, 'linestyle': '-', 'color': 'r'},
+ {'marker': '^', 'markersize': 27, 'linestyle': '-', 'color': 'b'},
+ {'marker': '^', 'markersize': 26, 'linestyle': '-', 'color': 'r'},
+ {'marker': 'h', 'markersize': 25, 'linestyle': '-', 'color': 'g'},
+ {'marker': 'p', 'markersize': 24, 'linestyle': '-', 'color': 'b'},
+ {'marker': 'H', 'markersize': 24, 'linestyle': '-', 'color': 'r'},
+ {'marker': '<', 'markersize': 24, 'linestyle': '-', 'color': 'c'},
+ {'marker': 'D', 'markersize': 24, 'linestyle': '-', 'color': 'm'},
+ {'marker': '1', 'markersize': 24, 'linestyle': '-', 'color': 'k'},
+ {'marker': '2', 'markersize': 24, 'linestyle': '-', 'color': 'y'},
+ {'marker': '4', 'markersize': 24, 'linestyle': '-', 'color': 'g'},
+ {'marker': '3', 'markersize': 24, 'linestyle': '-', 'color': 'g'}
+ ]
minmax_algorithm_fontsize = [10, 15] # depending on the number of algorithms
@@ -213,7 +212,7 @@
pprldmany_file_name = 'pprldmany'
pprldmany_group_file_name = 'pprldmany_gr'
-latex_commands_for_html = 'latex_commands_for_html'
+latex_commands_for_html = 'latex_commands_for_html'
extraction_folder_prefix = '_extracted_'
@@ -222,187 +221,48 @@
isFig = True
isTab = True
isNoisy = False
-isNoiseFree = False
+isNoiseFree = False
isConv = False
verbose = False
outputdir = 'ppdata'
inputsettings = 'color'
-isExpensive = False
+isExpensive = False
isRldOnSingleFcts = True
isRLDistr = True
##
-isLogLoss = True # only affects rungeneric1
-isPickled = False # only affects rungeneric1
+isLogLoss = True # only affects rungeneric1
+isPickled = False # only affects rungeneric1
##
-isScatter = True # only affects rungeneric2
-isScaleUp = True # only affects rungeneric2, only set here and not altered by any command line argument for now
+isScatter = True # only affects rungeneric2
+isScaleUp = True # only affects rungeneric2, only set here and not altered by any command line argument for now
# Used by getopt:
-shortoptlist = "hvpo:"
+shortoptlist = "hvpo:"
longoptlist = ["help", "output-dir=", "noisy", "noise-free",
"tab-only", "fig-only", "rld-only", "no-rld-single-fcts",
- "verbose", "settings=", "conv",
+ "verbose", "settings=", "conv",
"expensive", "runlength-based",
"los-only", "crafting-effort=", "pickle",
"sca-only", "no-svg"]
+
+
# thereby, "los-only", "crafting-effort=", and "pickle" affect only rungeneric1
# and "sca-only" only affects rungeneric2
-def getBenchmarksShortInfos(isBiobjective):
- return 'biobj-benchmarkshortinfos.txt' if isBiobjective else 'benchmarkshortinfos.txt'
-
+
def getFigFormats():
if in_a_hurry:
fig_formats = ('pdf', 'svg') if generate_svg_files else ('pdf',)
else:
fig_formats = ('eps', 'pdf', 'svg') if generate_svg_files else ('eps', 'pdf')
# fig_formats = ('eps', 'pdf', 'pdf', 'png', 'svg')
-
+
return fig_formats
-
+
+
def getFontSize(nameList):
maxFuncLength = max(len(i) for i in nameList)
fontSize = 24 - max(0, 2 * ((maxFuncLength - 35) / 5))
return fontSize
-scenario_rlbased = 'rlbased'
-scenario_fixed = 'fixed'
-scenario_biobjfixed = 'biobjfixed'
-all_scenarios = [scenario_rlbased, scenario_fixed, scenario_biobjfixed]
-
-testbed_name_single = 'bbob'
-testbed_name_bi = 'bbob-biobj'
-
-class Testbed(object):
- """this might become the future way to have settings related to testbeds
- TODO: should go somewhere else than genericsettings.py
- TODO: how do we pass information from the benchmark to the post-processing?
-
- """
- def info(self, fun_number=None):
- """info on the testbed if ``fun_number is None`` or one-line info
- for function with number ``fun_number``.
-
- """
- if fun_number is None:
- return self.__doc__
-
- for line in open(os.path.join(os.path.abspath(os.path.split(__file__)[0]),
- self.info_filename)).readlines():
- if line.split(): # ie if not empty
- try: # empty lines are ignored
- fun = int(line.split()[0])
- if fun == fun_number:
- return 'F'+str(fun) + ' ' + ' '.join(line.split()[1:])
- except ValueError:
- continue # ignore annotations
-
- def isBiobjective(self):
- return self.name == testbed_name_bi
-
-class GECCOBBOBTestbed(Testbed):
- """Testbed used in the GECCO BBOB workshops 2009, 2010, 2012, 2013, 2015.
- """
- def __init__(self, targetValues):
- # TODO: should become a function, as low_budget is a display setting
- # not a testbed setting
- # only the short info, how to deal with both infos?
- self.info_filename = 'GECCOBBOBbenchmarkinfos.txt'
- self.name = testbed_name_single
- self.short_names = {}
- self.hardesttargetlatex = '10^{-8}' # used for ppfigs, pptable, pptable2, and pptables
- self.ppfigs_ftarget = 1e-8
- self.ppfigdim_target_values = targetValues((10, 1, 1e-1, 1e-2, 1e-3, 1e-5, 1e-8)) # possibly changed in config
- self.pprldistr_target_values = targetValues((10., 1e-1, 1e-4, 1e-8)) # possibly changed in config
- self.pprldmany_target_values = targetValues(10**np.arange(2, -8.2, -0.2)) # possibly changed in config
- self.pprldmany_target_range_latex = '$10^{[-8..2]}$'
- self.ppscatter_target_values = targetValues(np.logspace(-8, 2, 46))
- self.rldValsOfInterest = (10, 1e-1, 1e-4, 1e-8) # possibly changed in config
- self.ppfvdistr_min_target = 1e-8
- self.functions_with_legend = (1, 24, 101, 130)
- self.number_of_functions = 24
- self.pptable_ftarget = 1e-8 # value for determining the success ratio in all tables
- self.pptable_targetsOfInterest = targetValues((10, 1, 1e-1, 1e-2, 1e-3, 1e-5, 1e-7)) # for pptable and pptables
- self.pptable2_targetsOfInterest = targetValues((1e+1, 1e-1, 1e-3, 1e-5, 1e-7)) # used for pptable2
- self.scenario = scenario_fixed
- self.best_algorithm_filename = 'bestalgentries2009.pickle.gz'
-
- try:
- info_list = open(os.path.join(os.path.dirname(__file__),
- getBenchmarksShortInfos(False)),
- 'r').read().split('\n')
- info_dict = {}
- for info in info_list:
- key_val = info.split(' ', 1)
- if len(key_val) > 1:
- info_dict[int(key_val[0])] = key_val[1]
- self.short_names = info_dict
- except:
- warnings.warn('benchmark infos not found')
-
-
-class GECCOBiobjBBOBTestbed(Testbed):
- """Testbed used in the GECCO biobjective BBOB workshop 2016.
- """
- def __init__(self, targetValues):
- # TODO: should become a function, as low_budget is a display setting
- # not a testbed setting
- # only the short info, how to deal with both infos?
- self.info_filename = 'GECCOBBOBbenchmarkinfos.txt'
- self.name = testbed_name_bi
- self.short_names = {}
- self.hardesttargetlatex = '10^{-5}' # used for ppfigs, pptable, pptable2, and pptables
- self.ppfigs_ftarget = 1e-5
- self.ppfigdim_target_values = targetValues((1e-1, 1e-2, 1e-3, 1e-4, 1e-5)) # possibly changed in config
- self.pprldistr_target_values = targetValues((1e-1, 1e-2, 1e-3, 1e-5)) # possibly changed in config
- target_values = np.append(np.append(10**np.arange(0, -5.1, -0.1), [0]), -10**np.arange(-5, -3.9, 0.2))
- self.pprldmany_target_values = targetValues(target_values) # possibly changed in config
- self.pprldmany_target_range_latex = '$\{-10^{-4}, -10^{-4.2}, $ $-10^{-4.4}, -10^{-4.6}, -10^{-4.8}, -10^{-5}, 0, 10^{-5}, 10^{-4.9}, 10^{-4.8}, \dots, 10^{-0.1}, 10^0\}$'
- # ppscatter_target_values are copied from the single objective case. Define the correct values!
- self.ppscatter_target_values = targetValues(np.logspace(-8, 2, 46)) # that does not look right here!
- self.rldValsOfInterest = (1e-1, 1e-2, 1e-3, 1e-4, 1e-5) # possibly changed in config
- self.ppfvdistr_min_target = 1e-5
- self.functions_with_legend = (1, 30, 31, 55)
- self.number_of_functions = 55
- self.pptable_ftarget = 1e-5 # value for determining the success ratio in all tables
- self.pptable_targetsOfInterest = targetValues((1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5)) # possibly changed in config for all tables
- self.pptable2_targetsOfInterest = targetValues((1e-1, 1e-2, 1e-3, 1e-4, 1e-5)) # used for pptable2
- self.scenario = scenario_biobjfixed
- self.best_algorithm_filename = ''
-
- try:
- info_list = open(os.path.join(os.path.dirname(__file__),
- getBenchmarksShortInfos(True)),
- 'r').read().split('\n')
- info_dict = {}
- for info in info_list:
- key_val = info.split(' ', 1)
- if len(key_val) > 1:
- info_dict[int(key_val[0])] = key_val[1]
- self.short_names = info_dict
- except:
- warnings.warn('benchmark infos not found')
-
-class GECCOBBOBNoisefreeTestbed(GECCOBBOBTestbed):
- __doc__ = GECCOBBOBTestbed.__doc__
-
-class GECCOBiobjBBOBNoisefreeTestbed(GECCOBiobjBBOBTestbed):
- __doc__ = GECCOBiobjBBOBTestbed.__doc__
-
-# TODO: this needs to be set somewhere, e.g. in rungeneric*
-# or even better by investigating in the data attributes
-
-current_testbed = None
-
-def loadCurrentTestbed(isBiobjective, targetValues):
-
- global current_testbed
-
- #if not current_testbed:
- if isBiobjective:
- current_testbed = GECCOBiobjBBOBNoisefreeTestbed(targetValues)
- else:
- current_testbed = GECCOBBOBNoisefreeTestbed(targetValues)
-
- return current_testbed
diff --git a/code-postprocessing/bbob_pproc/latex_commands_for_html.html b/code-postprocessing/bbob_pproc/latex_commands_for_html.html
index 4c1332e35..4a5b71d91 100644
--- a/code-postprocessing/bbob_pproc/latex_commands_for_html.html
+++ b/code-postprocessing/bbob_pproc/latex_commands_for_html.html
@@ -152,7 +152,7 @@
Entries, succeeded by a star, are statistically significantly better (according to
the rank-sum test) when compared to all other algorithms of the table, with
p = 0.05 or p = 10−k when the number k following the star is larger
- than 1, with Bonferroni correction by the number of instances. A ↓ indicates the same tested against the best
+ than 1, with Bonferroni correction of 48. A ↓ indicates the same tested against the best
algorithm of BBOB-2009.Best results are printed in bold.
@@ -170,7 +170,7 @@
Entries, succeeded by a star, are statistically significantly better (according to
the rank-sum test) when compared to all other algorithms of the table, with
p = 0.05 or p = 10−k when the number k following the star is larger
- than 1, with Bonferroni correction by the number of instances. A ↓ indicates the same tested against the best
+ than 1, with Bonferroni correction of 48. A ↓ indicates the same tested against the best
algorithm of BBOB-2009.Best results are printed in bold.
@@ -341,7 +341,7 @@
Entries, succeeded by a star, are statistically significantly better (according to
the rank-sum test) when compared to all other algorithms of the table, with
p = 0.05 or p = 10−k when the number k following the star is larger
- than 1, with Bonferroni correction by the number of instances. A ↓ indicates the same tested against the best
+ than 1, with Bonferroni correction of 48. A ↓ indicates the same tested against the best
algorithm of BBOB-2009.Best results are printed in bold.
@@ -360,7 +360,7 @@
Entries, succeeded by a star, are statistically significantly better (according to
the rank-sum test) when compared to all other algorithms of the table, with
p = 0.05 or p = 10−k when the number k following the star is larger
- than 1, with Bonferroni correction by the number of instances. A ↓ indicates the same tested against the best
+ than 1, with Bonferroni correction of 48. A ↓ indicates the same tested against the best
algorithm of BBOB-2009.Best results are printed in bold.
@@ -425,9 +425,9 @@
Empirical cumulative distribution functions (ECDF), plotting the fraction of
trials with an outcome not larger than the respective value on the x-axis.
Left subplots: ECDF of the number of function evaluations (FEvals) divided by search space dimension D,
- to fall below HVref+∆f with ∆f
+ to fall below Iref+∆f with ∆f
=10k, where k is the first value in the legend.
- The thick red line represents the most difficult target value HVref+10−5. Legends indicate for each target the number of functions that were solved in at
+ The thick red line represents the most difficult target value Iref+10−5. Legends indicate for each target the number of functions that were solved in at
least one trial within the displayed budget. Right subplots: ECDF of the
best achieved ∆f
for running times of 0.5D, 1.2D, 3D, 10D, 100D, 1000D,...
@@ -445,7 +445,7 @@
of run lengths and speed-up ratios in 5-D (left) and 20-D (right).
Left sub-columns: ECDF of
the number of function evaluations divided by dimension D
- (FEvals/D) to reach a target value HVref+∆f with ∆f
+ (FEvals/D) to reach a target value Iref+∆f with ∆f
=10k, where
k is given by the first value in the legend, for
algorithmA (°) and algorithmB (♦) . Right sub-columns:
@@ -459,7 +459,7 @@
##bbobppfigdimlegendbiobjfixed##
- Scaling of runtime to reach HVref+10# with dimension;
+ Scaling of runtime to reach Iref+10# with dimension;
runtime is measured in number of f-evaluations and # is given in the legend;
Lines: average runtime (aRT);
Cross (+): median runtime of successful runs to reach the most difficult
@@ -481,7 +481,7 @@
90%-tile of (bootstrapped) runtimes is shown for the different
target ∆f-values as shown in the top row.
#succ is the number of trials that reached the last target
- HVref+ 10−5.
+ Iref+ 10−5.
The median number of conducted function evaluations is additionally given in
italics, if the target in the last column was never reached.
@@ -495,7 +495,7 @@
90%-tile of (bootstrapped) runtimes is shown for the different
target ∆f-values as shown in the top row.
#succ is the number of trials that reached the last target
- HVref+ 10−5.
+ Iref+ 10−5.
The median number of conducted function evaluations is additionally given in
italics, if the last target was never reached.
1:algorithmAshort is algorithmA and 2:algorithmBshort is algorithmB.
@@ -510,15 +510,15 @@
in number of function evaluations, in dimension 5. For each function, the aRT
and, in braces as dispersion measure, the half difference between 10 and
90%-tile of (bootstrapped) runtimes is shown for the different
- target ∆f-values as shown in the top row.
+ target ∆I-values as shown in the top row.
#succ is the number of trials that reached the last target
- HVref+ 10−5.
+ Iref+ 10−5.
The median number of conducted function evaluations is additionally given in
italics, if the target in the last column was never reached.
Entries, succeeded by a star, are statistically significantly better (according to
the rank-sum test) when compared to all other algorithms of the table, with
p = 0.05 or p = 10−k when the number k following the star is larger
- than 1, with Bonferroni correction by the number of instances. Best results are printed in bold.
+ than 1, with Bonferroni correction of 110. Best results are printed in bold.
##bbobpptablesmanylegendbiobjfixed20##
@@ -527,15 +527,15 @@
in number of function evaluations, in dimension 20. For each function, the aRT
and, in braces as dispersion measure, the half difference between 10 and
90%-tile of (bootstrapped) runtimes is shown for the different
- target ∆f-values as shown in the top row.
+ target ∆I-values as shown in the top row.
#succ is the number of trials that reached the last target
- HVref+ 10−5.
+ Iref+ 10−5.
The median number of conducted function evaluations is additionally given in
italics, if the target in the last column was never reached.
Entries, succeeded by a star, are statistically significantly better (according to
the rank-sum test) when compared to all other algorithms of the table, with
p = 0.05 or p = 10−k when the number k following the star is larger
- than 1, with Bonferroni correction by the number of instances. Best results are printed in bold.
+ than 1, with Bonferroni correction of 110. Best results are printed in bold.
##bbobppscatterlegendbiobjfixed##
@@ -583,5 +583,5 @@
TEX
by
TTH,
-version 4.08.
On 04 Apr 2016, 23:04.
+version 4.08.
On 11 May 2016, 00:57.