Skip to content

Commit

Permalink
Remove the everest restart functionality
Browse files Browse the repository at this point in the history
The restart functionality was added to support a research project.
However it turned out to be unnecessary for that project. Since
functionality should not be added for such a purpose only and the
general usefulness of the feature is unclear it should be removed.
  • Loading branch information
verveerpj committed Sep 27, 2024
1 parent e5190f6 commit 02091d4
Show file tree
Hide file tree
Showing 9 changed files with 48 additions and 280 deletions.
41 changes: 0 additions & 41 deletions docs/everest/config_generated.rst
Original file line number Diff line number Diff line change
Expand Up @@ -577,47 +577,6 @@ Optimizer options
The default is to use parallel evaluation if supported.


**restart (optional)**
Type: *Optional[RestartConfig]*

Optional restarting configuration.

Restarting the optimization from scratch from a new initial point can be
beneficial to the optimization process for some optimization algorithms. This
option can be used to direct Everest to restart the optimization once or
multiple times.

**max_restarts (optional)**
Type: *int*

The maximum number of restarts.

Sets the maximum number of times that the optimization process will be
restarted.

The default is equal to a single restart.


**restart_from (required)**
Type: *Literal['initial', 'last', 'optimal', 'last_optimal']*

Restart from the initial, optimal or the last controls.

When restarting, the initial values for the new run are set according to this field:
- initial: Use the initial controls from the configuration
- last: Use the last controls used by the previous run
- optimal: Use the controls from the optimal solution found so far
- last_optimal: Use the controls from the optimal solution found in previous run

When restarting from optimal values, the best result obtained so far (either
overall, or in the last restart run) is used, which is defined as the result
with the maximal weighted total objective value. If the `constraint_tolerance`
option is set in the `optimization` section, this tolerance will be used to
exclude results that violate a constraint.





objective_functions (required)
------------------------------
Expand Down
2 changes: 0 additions & 2 deletions src/everest/config/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
from .objective_function_config import ObjectiveFunctionConfig
from .optimization_config import OptimizationConfig
from .output_constraint_config import OutputConstraintConfig
from .restart_config import RestartConfig
from .sampler_config import SamplerConfig
from .server_config import ServerConfig
from .simulator_config import SimulatorConfig
Expand All @@ -38,7 +37,6 @@
"ObjectiveFunctionConfig",
"OptimizationConfig",
"OutputConstraintConfig",
"RestartConfig",
"SamplerConfig",
"ServerConfig",
"SimulatorConfig",
Expand Down
11 changes: 0 additions & 11 deletions src/everest/config/optimization_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from pydantic import BaseModel, Field, model_validator

from everest.config.cvar_config import CVaRConfig
from everest.config.restart_config import RestartConfig
from everest.optimizer.utils import get_ropt_plugin_manager


Expand Down Expand Up @@ -194,16 +193,6 @@ class OptimizationConfig(BaseModel, extra="forbid"): # type: ignore
parallel, if supported by the optimization algorithm.
The default is to use parallel evaluation if supported.
""",
)
restart: Optional[RestartConfig] = Field(
default=None,
description="""Optional restarting configuration.
Restarting the optimization from scratch from a new initial point can be
beneficial to the optimization process for some optimization algorithms. This
option can be used to direct Everest to restart the optimization once or
multiple times.
""",
)

Expand Down
39 changes: 0 additions & 39 deletions src/everest/config/restart_config.py

This file was deleted.

8 changes: 0 additions & 8 deletions src/everest/suite.py
Original file line number Diff line number Diff line change
Expand Up @@ -443,14 +443,6 @@ def _configure_optimizer(self, simulator: Simulator) -> OptimizationPlanRunner:
seed=self._config.environment.random_seed,
)

# Configure restarting:
if self.config.optimization.restart is not None:
optimizer.repeat(
iterations=self.config.optimization.restart.max_restarts + 1,
restart_from=self.config.optimization.restart.restart_from,
metadata_var="restart",
)

# Initialize output tables. `min_header_len` is set to ensure that all
# tables have the same number of header lines, simplifying code that
# reads them as fixed width tables. `maximize` is set because ropt
Expand Down
95 changes: 0 additions & 95 deletions test-data/everest/math_func/config_restart.yml

This file was deleted.

56 changes: 0 additions & 56 deletions tests/everest/test_cache.py

This file was deleted.

28 changes: 0 additions & 28 deletions tests/everest/test_restart.py

This file was deleted.

48 changes: 48 additions & 0 deletions tests/everest/test_simulator_cache.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
from ropt.plan import OptimizationPlanRunner

from everest.config import EverestConfig, SimulatorConfig
from everest.optimizer.everest2ropt import everest2ropt
from everest.simulator import Simulator
from tests.everest.utils import relpath, tmp

CONFIG_PATH = relpath("..", "..", "test-data", "everest", "math_func")
CONFIG_FILE = "config_advanced_scipy.yml"


def test_simulator_cache(monkeypatch):
n_evals = 0
original_call = Simulator.__call__

def new_call(*args):
nonlocal n_evals
result = original_call(*args)
n_evals += (result.evaluation_ids >= 0).sum()
return result

monkeypatch.setattr(Simulator, "__call__", new_call)

with tmp(CONFIG_PATH):
config = EverestConfig.load_file(CONFIG_FILE)
config.optimization.max_function_evaluations = 2
config.optimization.perturbation_num = 2
config.simulator = SimulatorConfig(enable_cache=True)

ropt_config = everest2ropt(config)
simulator = Simulator(config)

# Run once, populating the cache of the simulator:
OptimizationPlanRunner(
enopt_config=ropt_config,
evaluator=simulator,
seed=config.environment.random_seed,
).run()
assert n_evals == 12

# Run again with the same simulator:
n_evals = 0
OptimizationPlanRunner(
enopt_config=ropt_config,
evaluator=simulator,
seed=config.environment.random_seed,
).run()
assert n_evals == 0

0 comments on commit 02091d4

Please sign in to comment.