diff --git a/pypesto/optimize/ess/ess.py b/pypesto/optimize/ess/ess.py index 2a86e18d9..734fb668b 100644 --- a/pypesto/optimize/ess/ess.py +++ b/pypesto/optimize/ess/ess.py @@ -401,7 +401,6 @@ def _create_result(self) -> pypesto.Result: for i, optimizer_result in enumerate(self.local_solutions): i_result += 1 optimizer_result.id = f"Local solution {i}" - optimizer_result.optimizer = str(self.local_optimizer) result.optimize_result.append(optimizer_result) if self._result_includes_refset: diff --git a/pypesto/optimize/optimizer.py b/pypesto/optimize/optimizer.py index b05570e73..4e965f3d8 100644 --- a/pypesto/optimize/optimizer.py +++ b/pypesto/optimize/optimizer.py @@ -39,7 +39,7 @@ def __init__(self, optimizer: str): def hierarchical_decorator(minimize): """Add inner parameters to the optimizer result. - Default decorator for the minimize() method. + Default decorator for the :meth:`Optimizer.minimize` method. """ @wraps(minimize) @@ -81,7 +81,7 @@ def wrapped_minimize( def history_decorator(minimize): """Initialize and extract information stored in the history. - Default decorator for the minimize() method. + Default decorator for the :meth:`Optimizer.minimize` method. """ @wraps(minimize) @@ -140,7 +140,11 @@ def wrapped_minimize( logger.error(f"start {id} failed:\n{trace}") result = OptimizerResult( - x0=x0, exitflag=-1, message=str(err), id=id + x0=x0, + exitflag=-1, + message=str(err), + id=id, + optimizer=str(self), ) else: raise @@ -163,7 +167,7 @@ def wrapped_minimize( def time_decorator(minimize): """Measure time of optimization. - Default decorator for the minimize() method to take time. + Default decorator for the :meth:`Optimizer.minimize` method to take time. Currently, the method time.time() is used, which measures the wall-clock time. """ @@ -196,8 +200,8 @@ def wrapped_minimize( def fix_decorator(minimize): """Include also fixed parameters in the result arrays of minimize(). - Default decorator for the minimize() method (nans will be inserted in the - derivatives). + Default decorator for the :meth:`Optimizer.minimize` method (nans will be + inserted in the derivatives). """ @wraps(minimize) @@ -523,6 +527,7 @@ def fun(x): hess=getattr(res, "hess", None), exitflag=res.status, message=res.message, + optimizer=str(self), ) return optimizer_result @@ -612,7 +617,10 @@ def minimize( # the ipopt return object is a scipy.optimize.OptimizeResult return OptimizerResult( - x=ret.x, exitflag=ret.status, message=ret.message + x=ret.x, + exitflag=ret.status, + message=ret.message, + optimizer=str(self), ) def is_least_squares(self): @@ -630,7 +638,7 @@ def __init__(self, options: dict = None): if self.options is None: self.options = DlibOptimizer.get_default_options(self) elif "maxiter" not in self.options: - raise KeyError("Dlib options are missing the key word " "maxiter.") + raise KeyError("Dlib options are missing the keyword maxiter.") def __repr__(self) -> str: rep = f"<{self.__class__.__name__}" @@ -677,7 +685,7 @@ def get_fval_vararg(*x): 0.002, ) - optimizer_result = OptimizerResult() + optimizer_result = OptimizerResult(optimizer=str(self)) return optimizer_result @@ -737,7 +745,9 @@ def minimize( problem.objective.get_fval, lb, ub, **self.options ) - optimizer_result = OptimizerResult(x=np.array(xopt), fval=fopt) + optimizer_result = OptimizerResult( + x=np.array(xopt), fval=fopt, optimizer=str(self) + ) return optimizer_result @@ -821,7 +831,7 @@ def minimize( ) optimizer_result = OptimizerResult( - x=np.array(result[0]), fval=result[1] + x=np.array(result[0]), fval=result[1], optimizer=str(self) ) return optimizer_result @@ -901,7 +911,7 @@ def minimize( ) optimizer_result = OptimizerResult( - x=np.array(result.x), fval=result.fun + x=np.array(result.x), fval=result.fun, optimizer=str(self) ) return optimizer_result @@ -1019,6 +1029,7 @@ def successively_working_fval(swarm: np.ndarray) -> np.ndarray: optimizer_result = OptimizerResult( x=pos, fval=float(cost), + optimizer=str(self), ) return optimizer_result @@ -1249,6 +1260,7 @@ def nlopt_objective(x, grad): fval=opt.last_optimum_value(), message=msg, exitflag=opt.last_optimize_result(), + optimizer=str(self), ) return optimizer_result @@ -1433,6 +1445,7 @@ def minimize( hess=opt.hess, message=msg, exitflag=opt.exitflag, + optimizer=str(self), ) return optimizer_result diff --git a/pypesto/optimize/task.py b/pypesto/optimize/task.py index 10ae83dd8..7482097e1 100644 --- a/pypesto/optimize/task.py +++ b/pypesto/optimize/task.py @@ -63,7 +63,6 @@ def execute(self) -> OptimizerResult: history_options=self.history_options, optimize_options=self.optimize_options, ) - optimizer_result.optimizer = str(self.optimizer) if not self.optimize_options.report_hess: optimizer_result.hess = None diff --git a/test/optimize/test_optimize.py b/test/optimize/test_optimize.py index 48ebdea55..0ddc3735b 100644 --- a/test/optimize/test_optimize.py +++ b/test/optimize/test_optimize.py @@ -308,6 +308,7 @@ def check_minimize(problem, library, solver, allow_failed_starts=False): ]: assert np.isfinite(result.optimize_result.list[0]["fval"]) assert result.optimize_result.list[0]["x"] is not None + assert result.optimize_result.list[0]["optimizer"] is not None def test_trim_results(problem):