Skip to content

Commit

Permalink
Merge pull request #940 from numbbo/devel-test3
Browse files Browse the repository at this point in the history
Devel test3
  • Loading branch information
dtusar committed Mar 29, 2016
2 parents f0dad6b + 7c3186b commit f6b8b13
Show file tree
Hide file tree
Showing 80 changed files with 176,399 additions and 1,676 deletions.
29 changes: 29 additions & 0 deletions code-experiments/build/matlab/exampleexperiment.m
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,12 @@
% for fewer output than 'info'.
cocoSetLogLevel('info');

% keep track of problem dimension and #funevals to print timing information:
printeddim = 1;
doneEvalsAfter = 0; % summed function evaluations for a single problem
doneEvalsTotal = 0; % summed function evaluations per dimension
printstring = '\n'; % store strings to be printed until experiment is finished

%%%%%%%%%%%%%%%%%%%%%%%%%
% Run Experiment %
%%%%%%%%%%%%%%%%%%%%%%%%%
Expand All @@ -51,6 +57,20 @@
end
dimension = cocoProblemGetDimension(problem);

% printing
if printeddim < dimension
if printeddim > 1
elapsedtime = toc;
printstring = strcat(printstring, ...
sprintf(" COCO TIMING: dimension %d finished in %e seconds/evaluation\n", ...
printeddim, elapsedtime/double(doneEvalsTotal)));
tic;
end
doneEvalsTotal = 0;
printeddim = dimension;
tic;
end

% restart functionality: do at most NUM_OF_INDEPENDENT_RESTARTS+1
% independent runs until budget is used:
i = -1; % count number of independent restarts
Expand Down Expand Up @@ -85,6 +105,15 @@
break;
end
end

doneEvalsTotal = doneEvalsTotal + doneEvalsAfter;
end

elapsedtime = toc;
printstring = strcat(printstring, ...
sprintf(" COCO TIMING: dimension %d finished in %e seconds/evaluation\n", ...
printeddim, elapsedtime/double(doneEvalsTotal)));
fprintf(printstring);

cocoObserverFree(observer);
cocoSuiteFree(suite);
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@
% Experiment Parameters %
%%%%%%%%%%%%%%%%%%%%%%%%%
BUDGET_MULTIPLIER = 2; % algorithm runs for BUDGET_MULTIPLIER*dimension funevals
NUM_OF_INDEPENDENT_RESTARTS = 0; % number of independent algorithm restarts;
% if >0, make sure that the algorithm is not
% always doing the same thing in each run
% (typically trivial for randomized algorithms)
NUM_OF_INDEPENDENT_RESTARTS = 1e9; % number of independent algorithm restarts;
% if >0, make sure that the algorithm is not
% always doing the same thing in each run
% (typically trivial for randomized algorithms)

%%%%%%%%%%%%%%%%%%%%%%%%%
% Prepare Experiment %
Expand All @@ -33,6 +33,12 @@
% for fewer output than 'info'.
cocoCall('cocoSetLogLevel', 'info');

% keep track of problem dimension and #funevals to print timing information:
printeddim = 1;
doneEvalsAfter = 0; % summed function evaluations for a single problem
doneEvalsTotal = 0; % summed function evaluations per dimension


%%%%%%%%%%%%%%%%%%%%%%%%%
% Run Experiment %
%%%%%%%%%%%%%%%%%%%%%%%%%
Expand All @@ -44,6 +50,20 @@
end
dimension = cocoCall('cocoProblemGetDimension', problem);

% printing timing information
if printeddim < dimension
if printeddim > 1
elapsedtime = toc;
fprintf("\n COCO TIMING: dimension %d finished in %e seconds/evaluation\n", ...
printeddim, elapsedtime/double(doneEvalsTotal));
tic;
end
doneEvalsTotal = 0;
printeddim = dimension;
tic;
end


% restart functionality: do at most NUM_OF_INDEPENDENT_RESTARTS+1
% independent runs until budget is used:
i = -1; % count number of independent restarts
Expand Down Expand Up @@ -79,5 +99,9 @@
end
end

elapsedtime = toc;
fprintf("\n COCO TIMING: dimension %d finished in %e seconds/evaluation\n", ...
printeddim, elapsedtime/double(doneEvalsTotal));

cocoCall('cocoObserverFree', observer);
cocoCall('cocoSuiteFree', suite);
12 changes: 6 additions & 6 deletions code-postprocessing/bbob_pproc/comp2/pptable2.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,20 +31,20 @@ def get_table_caption():
"""

table_caption_one = r"""%
Average running time (aRT in number of function
evaluations) divided by the respective best aRT measured during BBOB-2009 in
Average running time (\aRT\ in number of function
evaluations) divided by the respective best \aRT\ measured during BBOB-2009 in
dimensions 5 (left) and 20 (right).
The aRT and in braces, as dispersion measure, the half difference between 90 and
10\%-tile of bootstrapped run lengths appear for each algorithm and
The \aRT\ and in braces, as dispersion measure, the half difference between 10
and 90\%-tile of bootstrapped run lengths appear for each algorithm and
"""
table_caption_two1 = r"""%
target, the corresponding best aRT
target, the corresponding best \aRT\
in the first row. The different target \Df-values are shown in the top row.
\#succ is the number of trials that reached the (final) target
$\fopt + """ + genericsettings.current_testbed.hardesttargetlatex + r"""$.
"""
table_caption_two2 = r"""%
run-length based target, the corresponding best aRT
run-length based target, the corresponding best \aRT\
(preceded by the target \Df-value in \textit{italics}) in the first row.
\#succ is the number of trials that reached the target value of the last column.
"""
Expand Down
29 changes: 27 additions & 2 deletions code-postprocessing/bbob_pproc/compall/ppfigs.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ def ecdfs_figure_caption(target):
r"Bootstrapped empirical cumulative distribution of the number " +
r"of objective function evaluations divided by dimension " +
r"(FEvals/DIM) for " +
str(len(genericsettings.current_testbed.pprldmany_target_range_latex)) +
r" targets in " +
str(len(genericsettings.current_testbed.pprldmany_target_values)) +
r" targets with target precision in " +
str(genericsettings.current_testbed.pprldmany_target_range_latex) +
r" for all functions and subgroups in #1-D. " + ( best2009text
if genericsettings.current_testbed.name != 'bbob-biobj' else "")
Expand Down Expand Up @@ -134,7 +134,32 @@ def ecdfs_figure_caption_html(target, dimension):
s = htmldesc.getValue('##bbobECDFslegendfixed%d##' % dimension)
return s

def get_ecdfs_single_fcts_caption():
''' For the moment, only the bi-objective case is covered! '''
s = (r"""Empirical cumulative distribution of simulated (bootstrapped) runtimes in number
of objective function evaluations divided by dimension (FEvals/DIM) for the $""" +
str(len(genericsettings.current_testbed.pprldmany_target_values)) +
r"$ targets " +
str(genericsettings.current_testbed.pprldmany_target_range_latex) +
r" for functions $f_1$ to $f_{16}$ and all dimensions. "
)
return s

def get_ecdfs_all_groups_caption():
''' For the moment, only the bi-objective case is covered! '''
# s = (r"Bootstrapped empirical cumulative distribution of the number " +
# r"of objective function evaluations divided by dimension " +
# r"(FEvals/DIM) for " +
s = (r"""Empirical cumulative distribution of simulated (bootstrapped) runtimes, measured in number
of objective function evaluations, divided by dimension (FEvals/DIM) for the $""" +
str(len(genericsettings.current_testbed.pprldmany_target_values)) +
r"$ targets " +
str(genericsettings.current_testbed.pprldmany_target_range_latex) +
r" for all function groups and all dimensions. The aggregation" +
r" over all 55 functions is shown in the last plot."
)
return s

def plotLegend(handles, maxval=None):
"""Display right-side legend.
Expand Down
15 changes: 10 additions & 5 deletions code-postprocessing/bbob_pproc/compall/pprldmany.py
Original file line number Diff line number Diff line change
Expand Up @@ -778,17 +778,22 @@ def algname_to_label(algname, dirname=None):
dictFG = pp.dictAlgByFuncGroup(dictAlg)
dictKey = dictFG.keys()[0]
functionGroups = dictAlg[dictAlg.keys()[0]].getFuncGroups()
text = '%s, %d-D\n%s' % (functionGroups[dictKey],
dimList[0],
genericsettings.current_testbed.name)
text = '%s\n%s, %d-D' % (genericsettings.current_testbed.name,
functionGroups[dictKey],
dimList[0])
else:
text = '%s - %s' % (genericsettings.current_testbed.name,
ppfig.consecutiveNumbers(sorted(dictFunc.keys()), 'f'))
if not (plotType == PlotType.DIM):
text += ', %d-D' % dimList[0]

# add number of instances
text += '\n'
for alg in algorithms_with_data:
text += '%d, ' % len(dictAlgperFunc[alg][0].instancenumbers)
text = text.rstrip(', ')
text += ' instances'
plt.text(0.01, 0.98, text, horizontalalignment="left",
verticalalignment="top", transform=plt.gca().transAxes)
verticalalignment="top", transform=plt.gca().transAxes, size='small')
if len(dictFunc) == 1:
plt.title(' '.join((str(dictFunc.keys()[0]),
genericsettings.current_testbed.short_names[dictFunc.keys()[0]])))
Expand Down
27 changes: 15 additions & 12 deletions code-postprocessing/bbob_pproc/compall/pptables.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,35 +25,38 @@
def get_table_caption():
""" Sets table caption, based on the genericsettings.current_testbed
and genericsettings.runlength_based_targets.
TODO: \hvref and \fopt should be defined via the current_testbed,
preferably with a single latex command.
"""

table_caption_one = r"""%
Average running time (aRT in number of function
evaluations) divided by the respective best aRT measured during BBOB-2009 in
Average running time (\aRT\ in number of function
evaluations) divided by the respective best \aRT\ measured during BBOB-2009 in
#1.
The aRT and in braces, as dispersion measure, the half difference between 90 and
10\%-tile of bootstrapped run lengths appear for each algorithm and
The \aRT\ and in braces, as dispersion measure, the half difference between
10 and 90\%-tile of bootstrapped run lengths appear for each algorithm and
"""
table_caption_two1 = r"""%
target, the corresponding best aRT
target, the corresponding best \aRT\
in the first row. The different target \Df-values are shown in the top row.
\#succ is the number of trials that reached the (final) target
$\fopt + """ + genericsettings.current_testbed.hardesttargetlatex + r"""$.
"""
table_caption_two2 = r"""%
run-length based target, the corresponding best aRT
(preceded by the target \Df-value in \textit{italics}) in the first row.
run-length based target, the corresponding best \aRT\
(preceded by the target \Df-value in \textit{italics}) in the first row.
\#succ is the number of trials that reached the target value of the last column.
"""
table_caption_two_bi = r"""%
target, the corresponding best aRT
target, the corresponding best \aRT\
in the first row. The different target \Df-values are shown in the top row.
\#succ is the number of trials that reached the (final) target
$\hvref + """ + genericsettings.current_testbed.hardesttargetlatex + r"""$.
"""
table_caption_rest = r"""%
The median number of conducted function evaluations is additionally given in
\textit{italics}, if the target in the last column was never reached.
The median number of conducted function evaluations is additionally given in
\textit{italics}, if the target in the last column was never reached.
Entries, succeeded by a star, are statistically significantly better (according to
the rank-sum test) when compared to all other algorithms of the table, with
$p = 0.05$ or $p = 10^{-k}$ when the number $k$ following the star is larger
Expand Down Expand Up @@ -411,8 +414,8 @@ def main(dictAlg, sortedAlgs, isBiobjective, outputdir='.', verbose=True, functi
extraeol.append(r'\hline')
# extraeol.append(r'\hline\arrayrulecolor{tableShade}')

curline = [r'aRT$_{\text{best}}$'] if with_table_heading else [r'\textbf{f%d}' % df[1]]
replaceValue = 'aRT<sub>best</sub>' if with_table_heading else ('<b>f%d</b>' % df[1])
curline = [r'\aRT{}$_{\text{best}}$'] if with_table_heading else [r'\textbf{f%d}' % df[1]]
replaceValue = '\aRT{}<sub>best</sub>' if with_table_heading else ('<b>f%d</b>' % df[1])
curlineHtml = [item.replace('REPLACEH', replaceValue) for item in curlineHtml]
if bestalgentries:
if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues):
Expand Down
6 changes: 3 additions & 3 deletions code-postprocessing/bbob_pproc/genericsettings.py
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ def __init__(self, targetValues):
self.info_filename = 'GECCOBBOBbenchmarkinfos.txt'
self.name = testbed_name_single
self.short_names = {}
self.hardesttargetlatex = '10^{-8}'
self.hardesttargetlatex = '10^{-8}' # used for ppfigs and pptable
self.ppfigs_ftarget = 1e-8
self.ppfigdim_target_values = targetValues((10, 1, 1e-1, 1e-2, 1e-3, 1e-5, 1e-8)) # possibly changed in config
self.pprldistr_target_values = targetValues((10., 1e-1, 1e-4, 1e-8)) # possibly changed in config
Expand Down Expand Up @@ -348,13 +348,13 @@ def __init__(self, targetValues):
self.info_filename = 'GECCOBBOBbenchmarkinfos.txt'
self.name = testbed_name_bi
self.short_names = {}
self.hardesttargetlatex = '10^{-5}'
self.hardesttargetlatex = '10^{-5}' # used for ppfigs and pptable
self.ppfigs_ftarget = 1e-5
self.ppfigdim_target_values = targetValues((1e-1, 1e-2, 1e-3, 1e-4, 1e-5)) # possibly changed in config
self.pprldistr_target_values = targetValues((1e-1, 1e-2, 1e-3, 1e-5)) # possibly changed in config
target_values = np.append(np.append(10**np.arange(0, -5.1, -0.1), [0]), -10**np.arange(-5, -3.9, 0.2))
self.pprldmany_target_values = targetValues(target_values) # possibly changed in config
self.pprldmany_target_range_latex = '$10^{[-5..0]}$'
self.pprldmany_target_range_latex = '$\{-10^{-4}, -10^{-4.2}, $ $-10^{-4.4}, -10^{-4.6}, -10^{-4.8}, -10^{-5}, 0, 10^{-5}, 10^{-4.9}, 10^{-4.8}, \dots, 10^{-0.1}, 10^0\}$'
self.rldValsOfInterest = (1e-1, 1e-2, 1e-3, 1e-4, 1e-5) # possibly changed in config
self.ppfvdistr_min_target = 1e-5
self.functions_with_legend = (1, 30, 31, 55)
Expand Down
Loading

0 comments on commit f6b8b13

Please sign in to comment.