diff --git a/test-data/everest/math_func/config_advanced_scipy.yml b/test-data/everest/math_func/config_advanced_scipy.yml deleted file mode 100644 index 786052a2d7e..00000000000 --- a/test-data/everest/math_func/config_advanced_scipy.yml +++ /dev/null @@ -1,92 +0,0 @@ -# This config file is the same as config_advanced.yml, except that it uses the -# SciPy backend. - -wells: [] - -controls: -- initial_guess: 0.25 - max: 1.0 - min: -1.0 - name: point - perturbation_magnitude: 0.005 - type: generic_control - variables: - - name: x - index: 0 - - name: x - index: 1 - - name: x - index: 2 - -objective_functions: - - - name: distance - -input_constraints: - - - weights: - point.x-0: 0 - point.x-1: 0 - point.x-2: 1 - upper_bound: 0.4 - -output_constraints: - - name: x-0_coord - lower_bound: 0.1 - scale: 0.1 - - -# Optimal value expected at x=0.1, y=0, z=0.4, with distance 3.72 - - -install_jobs: - - - name: adv_distance3 - source: jobs/ADV_DISTANCE3 - - - name: adv_dump_controls - source: jobs/ADV_DUMP_CONTROLS - - -forward_model: - # Compute distance (squared and negated) between 2 points - - adv_distance3 --point-file point.json - --target-file data/r{{ realization}}/target.json - --out distance - # Write the value of each control to a separate file - - adv_dump_controls --controls-file point.json - --out-suffix _coord - - - -model: - realizations: [0, 2] - realizations_weights: [ 0.25, 0.75] - - -install_data: - - - link: false - source: r{{ configpath }}/adv_target_r{{ realization }}.json - target: data/r{{ realization}}/target.json - - -install_templates: [] - -optimization: - backend: scipy - algorithm: SLSQP - convergence_tolerance: 0.001 - constraint_tolerance: 0.001 - perturbation_num: 7 - speculative: True - max_batch_num: 4 - backend_options: - maxiter: 100 - - -environment: - log_level: debug - random_seed: 123 - simulation_folder: scratch/advanced/ - output_folder: everest_output/ diff --git a/test-data/everest/math_func/config_auto_scaled_controls.yml b/test-data/everest/math_func/config_auto_scaled_controls.yml deleted file mode 100644 index d66b6fc85a0..00000000000 --- a/test-data/everest/math_func/config_auto_scaled_controls.yml +++ /dev/null @@ -1,49 +0,0 @@ -controls: - - - name: point - type: generic_control - min: -1 - max: 1 - initial_guess: 0.2 - perturbation_magnitude : 0.001 - auto_scale: True - scaled_range: [0.3, 0.7] - variables: - - name: x - - name: y - - name: z - - -objective_functions: - - - name: distance - - -optimization: - algorithm: optpp_q_newton - convergence_tolerance: 0.001 - max_batch_num: 10 - -install_jobs: - - - name: distance3 - source: jobs/DISTANCE3 - -model: - realizations: [0] - -forward_model: - - distance3 --point-file point.json - --target 0.5 0.5 0.5 - --out distance - --scaling -1 1 0.3 0.7 - -environment: - simulation_folder: sim_output - log_level: debug - random_seed: 999 - -input_constraints: - - - weights: {point.x: 1.0, point.y: 1.0} - upper_bound: 0.5 diff --git a/test-data/everest/math_func/config_cvar.yml b/test-data/everest/math_func/config_cvar.yml deleted file mode 100644 index bd9353f707a..00000000000 --- a/test-data/everest/math_func/config_cvar.yml +++ /dev/null @@ -1,45 +0,0 @@ -controls: - - - name: point - type: generic_control - perturbation_magnitude : 0.01 - min: -2.0 - max: 2.0 - variables: - - name: x - initial_guess: 0.0 - - name: y - initial_guess: 0.0 - - name: z - initial_guess: 0.0 - -objective_functions: - - - name: distance - -optimization: - backend: scipy - algorithm: slsqp - max_batch_num: 5 - cvar: - percentile: 0.5 - # number_of_realizations: 1 - - -install_jobs: - - - name: distance3 - source: jobs/DISTANCE3 - -model: - realizations: [0, 1] - -forward_model: - - distance3 --point-file point.json --realization r{{realization}} - --target 0.5 0.5 0.5 - --out distance - -environment: - output_folder: distance_output - simulation_folder: sim_output - random_seed: 999 diff --git a/test-data/everest/math_func/config_discrete.yml b/test-data/everest/math_func/config_discrete.yml deleted file mode 100644 index fd1ad8adaa9..00000000000 --- a/test-data/everest/math_func/config_discrete.yml +++ /dev/null @@ -1,40 +0,0 @@ -controls: - - name: point - type: generic_control - min: 0 - max: 10 - initial_guess: 0 - control_type: integer - variables: - - name: x - - name: y - -objective_functions: - - name: func - -input_constraints: - - weights: { point.x: 1.0, point.y: 1.0 } - upper_bound: 10 - -optimization: - backend: scipy - algorithm: differential_evolution - max_function_evaluations: 4 - backend_options: - seed: 9 - parallel: False - -install_jobs: - - name: discrete - source: jobs/DISCRETE - -model: - realizations: [0] - -forward_model: - - discrete --point-file point.json --out func - -environment: - simulation_folder: sim_output - log_level: debug - random_seed: 999 diff --git a/test-data/everest/math_func/config_fm_failure.yml b/test-data/everest/math_func/config_fm_failure.yml deleted file mode 100644 index 9445a4cffbb..00000000000 --- a/test-data/everest/math_func/config_fm_failure.yml +++ /dev/null @@ -1,54 +0,0 @@ -wells: [] - -controls: - - - name: point - type: generic_control - min: -1.0 - max: 1.0 - initial_guess: 0 - perturbation_magnitude : 0.001 - variables: - - name: x - - name: y - - name: z - -objective_functions: - - - name: distance - -install_jobs: - - - name: distance3 - source: jobs/DISTANCE3 - - - name: toggle_failure - source: jobs/FAIL_SIMULATION - -forward_model: - - distance3 --point-file point.json - --target 0.5 0.5 0.5 - --out distance - - - toggle_failure --fail simulation_2 - - -model: - realizations: [0] - -optimization: - algorithm: optpp_q_newton - convergence_tolerance: 0.005 - min_realizations_success: 1 - min_pert_success: 1 - max_iterations: 1 - perturbation_num: 2 - -environment: - random_seed: 123 - simulation_folder: scratch/advanced/ - output_folder: everest_output/ - - -simulator: - delete_run_path: True diff --git a/test-data/everest/math_func/config_minimal_slow.yml b/test-data/everest/math_func/config_minimal_slow.yml deleted file mode 100644 index 4e590d46710..00000000000 --- a/test-data/everest/math_func/config_minimal_slow.yml +++ /dev/null @@ -1,46 +0,0 @@ -controls: - - - name: point - type: generic_control - min: -1.0 - max: 1.0 - initial_guess: 0.1 - perturbation_magnitude : 0.001 - variables: - - name: x - - name: y - - name: z - - -objective_functions: - - - name: distance - - -optimization: - algorithm: optpp_q_newton - convergence_tolerance: 0.001 - max_batch_num: 4 - - -install_jobs: - - - name: distance3 - source: jobs/DISTANCE3 - - - name: sleep - source: jobs/SLEEP - -model: - realizations: [0] - -forward_model: - - distance3 --point-file point.json - --target 0.5 0.5 0.5 - --out distance - - sleep --sleep 10 - -environment: - simulation_folder: sim_output - log_level: debug - random_seed: 123 diff --git a/test-data/everest/math_func/config_one_batch.yml b/test-data/everest/math_func/config_one_batch.yml deleted file mode 100644 index 466cf72f779..00000000000 --- a/test-data/everest/math_func/config_one_batch.yml +++ /dev/null @@ -1,41 +0,0 @@ -controls: - - - name: point - type: generic_control - min: -1.0 - max: 1.0 - initial_guess: 0 - perturbation_magnitude : 0.001 - variables: - - name: x - - name: y - - name: z - - -objective_functions: - - - name: distance - - -optimization: - algorithm: optpp_q_newton - max_batch_num: 1 - - -install_jobs: - - - name: distance3 - source: jobs/DISTANCE3 - -model: - realizations: [0] - -forward_model: - - distance3 --point-file point.json - --target 0.5 0.5 0.5 - --out distance - -environment: - simulation_folder: sim_output - log_level: debug - random_seed: 999 diff --git a/test-data/everest/math_func/config_remove_run_path.yml b/test-data/everest/math_func/config_remove_run_path.yml deleted file mode 100644 index 3b6ecda5067..00000000000 --- a/test-data/everest/math_func/config_remove_run_path.yml +++ /dev/null @@ -1,55 +0,0 @@ -wells: [] - -controls: - - - name: point - type: generic_control - min: -1.0 - max: 1.0 - initial_guess: 0 - perturbation_magnitude : 0.001 - variables: - - name: x - - name: y - - name: z - -objective_functions: - - - name: distance - -install_jobs: - - - name: distance3 - source: jobs/DISTANCE3 - - - name: toggle_failure - source: jobs/FAIL_SIMULATION - -forward_model: - - distance3 --point-file point.json - --target 0.5 0.5 0.5 - --out distance - - - toggle_failure - - -model: - realizations: [0] - -optimization: - algorithm: optpp_q_newton - convergence_tolerance: 0.005 - min_realizations_success: 1 - min_pert_success: 1 - max_iterations: 1 - perturbation_num: 2 - -environment: - log_level: debug - random_seed: 123 - simulation_folder: scratch/advanced/ - output_folder: everest_output/ - - -simulator: - delete_run_path: True diff --git a/test-data/everest/math_func/config_stddev.yml b/test-data/everest/math_func/config_stddev.yml deleted file mode 100644 index 8616039c7b5..00000000000 --- a/test-data/everest/math_func/config_stddev.yml +++ /dev/null @@ -1,49 +0,0 @@ -controls: - - - name: point - type: generic_control - perturbation_magnitude : 0.01 - min: -1.0 - max: 1.0 - variables: - - name: x - initial_guess: 0.0 - - name: y - initial_guess: 0.0 - - name: z - initial_guess: 0.0 - -objective_functions: - - - name: distance - weight: 1.0 - - - name: stddev - weight: 1.0 - type: stddev - alias: distance - -optimization: - backend: scipy - algorithm: slsqp - max_batch_num: 5 - convergence_tolerance: 0.0001 - perturbation_num: 3 - -install_jobs: - - - name: distance3 - source: jobs/DISTANCE3 - -model: - realizations: [0, 1] - -forward_model: - - distance3 --point-file point.json --realization r{{realization}} - --target 0.5 0.5 0.5 - --out distance - -environment: - output_folder: distance_output - simulation_folder: sim_output - random_seed: 999 diff --git a/test-data/everest/math_func/jobs/SLEEP b/test-data/everest/math_func/jobs/SLEEP deleted file mode 100755 index 58ed6cdd5c4..00000000000 --- a/test-data/everest/math_func/jobs/SLEEP +++ /dev/null @@ -1,4 +0,0 @@ -EXECUTABLE sleep.py - -MIN_ARG 0 -MAX_ARG 6 diff --git a/test-data/everest/math_func/jobs/sleep.py b/test-data/everest/math_func/jobs/sleep.py deleted file mode 100755 index d41e15e522c..00000000000 --- a/test-data/everest/math_func/jobs/sleep.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python - -import argparse -import sys -import time - - -def main(argv): - arg_parser = argparse.ArgumentParser() - arg_parser.add_argument("--sleep", type=int) - options, _ = arg_parser.parse_known_args(args=argv) - time.sleep(options.sleep) - - -if __name__ == "__main__": - main(sys.argv[1:]) diff --git a/tests/everest/test_api_snapshots.py b/tests/everest/test_api_snapshots.py index 2a51b6534b3..04640232f92 100644 --- a/tests/everest/test_api_snapshots.py +++ b/tests/everest/test_api_snapshots.py @@ -53,15 +53,7 @@ def make_api_snapshot(api) -> dict[str, Any]: @pytest.mark.parametrize( "config_file", - [ - "config_advanced.yml", - "config_minimal.yml", - "config_multiobj.yml", - "config_auto_scaled_controls.yml", - "config_cvar.yml", - "config_discrete.yml", - "config_stddev.yml", - ], + ["config_advanced.yml", "config_minimal.yml", "config_multiobj.yml"], ) def test_api_snapshots(config_file, snapshot, cached_example): config_path, config_file, optimal_result_json = cached_example( diff --git a/tests/everest/test_cvar.py b/tests/everest/test_cvar.py index e030532421c..414de3102ff 100644 --- a/tests/everest/test_cvar.py +++ b/tests/everest/test_cvar.py @@ -1,21 +1,37 @@ import pytest from ert.run_models.everest_run_model import EverestRunModel -from everest.config import EverestConfig - -CONFIG_FILE_CVAR = "config_cvar.yml" +from everest.config import ( + CVaRConfig, + EverestConfig, + ModelConfig, + OptimizationConfig, +) +@pytest.mark.integration_test def test_mathfunc_cvar( copy_math_func_test_data_to_tmp, evaluator_server_config_generator ): - config = EverestConfig.load_file(CONFIG_FILE_CVAR) - + # Arrange + config = EverestConfig.load_file("config_minimal.yml") + config.optimization = OptimizationConfig( + backend="scipy", + algorithm="slsqp", + cvar=CVaRConfig(percentile=0.5), + max_batch_num=5, + ) + config.model = ModelConfig(realizations=[0, 1]) + config.forward_model = [ + "distance3 --point-file point.json --realization --target 0.5 0.5 0.5 --out distance" + ] + + # Act run_model = EverestRunModel.create(config) evaluator_server_config = evaluator_server_config_generator(run_model) run_model.run_experiment(evaluator_server_config) - # Check resulting points + # Assert x0, x1, x2 = (run_model.result.controls["point_" + p] for p in ["x", "y", "z"]) assert x0 == pytest.approx(0.5, 0.05) diff --git a/tests/everest/test_detached.py b/tests/everest/test_detached.py index 6d317d940c5..a3b9f660a1b 100644 --- a/tests/everest/test_detached.py +++ b/tests/everest/test_detached.py @@ -16,7 +16,7 @@ activate_script, ) from ert.scheduler.event import FinishedEvent -from everest.config import EverestConfig +from everest.config import EverestConfig, InstallJobConfig from everest.config.server_config import ServerConfig from everest.config.simulator_config import SimulatorConfig from everest.config_keys import ConfigKeys as CK @@ -49,7 +49,15 @@ @pytest.mark.fails_on_macos_github_workflow @pytest.mark.xdist_group(name="starts_everest") async def test_https_requests(copy_math_func_test_data_to_tmp): - everest_config = EverestConfig.load_file("config_minimal_slow.yml") + everest_config = EverestConfig.load_file("config_minimal.yml") + # Overwrite forward_model with model that actually does nothing, since we test for httprequests and server status + everest_config.forward_model = ["toggle_failure"] + everest_config.install_jobs = [ + InstallJobConfig(name="toggle_failure", source="jobs/FAIL_SIMULATION") + ] + # start_server() loads config based on config_path, so we need to actually overwrite it + everest_config.dump("config_minimal.yml") + status_path = ServerConfig.get_everserver_status_path(everest_config.output_dir) expected_server_status = ServerStatus.never_run assert expected_server_status == everserver_status(status_path)["status"] diff --git a/tests/everest/test_discrete.py b/tests/everest/test_discrete.py index 102e8ad5eab..12d512e8fc8 100644 --- a/tests/everest/test_discrete.py +++ b/tests/everest/test_discrete.py @@ -1,17 +1,50 @@ -from ert.run_models.everest_run_model import EverestRunModel -from everest.config import EverestConfig +import pytest -CONFIG_DISCRETE = "config_discrete.yml" +from ert.run_models.everest_run_model import EverestRunModel +from everest.config import ( + ControlConfig, + EverestConfig, + InputConstraintConfig, + InstallJobConfig, + OptimizationConfig, +) +@pytest.mark.integration_test def test_discrete_optimizer( copy_math_func_test_data_to_tmp, evaluator_server_config_generator ): - config = EverestConfig.load_file(CONFIG_DISCRETE) + # Arrange + config = EverestConfig.load_file("config_minimal.yml") + config.controls = [ + ControlConfig( + name="point", + type="generic_control", + min=0, + max=10, + control_type="integer", + initial_guess=0, + variables=[{"name": "x"}, {"name": "y"}], + ) + ] + config.input_constraints = [ + InputConstraintConfig(weights={"point.x": 1.0, "point.y": 1.0}, upper_bound=10) + ] + config.optimization = OptimizationConfig( + backend="scipy", + algorithm="differential_evolution", + max_function_evaluations=4, + parallel=False, + backend_options={"seed": 9}, + ) + config.install_jobs = [InstallJobConfig(name="discrete", source="jobs/DISCRETE")] + config.forward_model = ["discrete --point-file point.json --out distance"] + # Act run_model = EverestRunModel.create(config) evaluator_server_config = evaluator_server_config_generator(run_model) run_model.run_experiment(evaluator_server_config) + # Assert assert run_model.result.controls["point_x"] == 3 assert run_model.result.controls["point_y"] == 7 diff --git a/tests/everest/test_everserver.py b/tests/everest/test_everserver.py index ea496fc4703..12f31f3ba66 100644 --- a/tests/everest/test_everserver.py +++ b/tests/everest/test_everserver.py @@ -8,7 +8,7 @@ from ropt.enums import OptimizerExitCode from seba_sqlite.snapshot import SebaSnapshot -from everest.config import EverestConfig, ServerConfig +from everest.config import EverestConfig, OptimizationConfig, ServerConfig from everest.detached import ServerStatus, everserver_status from everest.detached.jobs import everserver from everest.simulator import JOB_FAILURE, JOB_SUCCESS @@ -194,7 +194,7 @@ def test_everserver_status_exception( assert "Exception: Failed optimization" in status["message"] -@patch("sys.argv", ["name", "--config-file", "config_one_batch.yml"]) +@patch("sys.argv", ["name", "--config-file", "config_minimal.yml"]) @patch( "everest.detached.jobs.everserver._sim_monitor", side_effect=partial(set_shared_status, progress=[]), @@ -202,8 +202,12 @@ def test_everserver_status_exception( def test_everserver_status_max_batch_num( _1, mock_server, copy_math_func_test_data_to_tmp ): - config_file = "config_one_batch.yml" - config = EverestConfig.load_file(config_file) + config = EverestConfig.load_file("config_minimal.yml") + config.optimization = OptimizationConfig( + algorithm="optpp_q_newton", max_batch_num=1 + ) + config.dump("config_minimal.yml") + everserver.main() status = everserver_status( ServerConfig.get_everserver_status_path(config.output_dir) diff --git a/tests/everest/test_fix_control.py b/tests/everest/test_fix_control.py index 9327ff938e7..bcf75e5a333 100644 --- a/tests/everest/test_fix_control.py +++ b/tests/everest/test_fix_control.py @@ -1,7 +1,7 @@ from ert.run_models.everest_run_model import EverestRunModel from everest.config import EverestConfig -CONFIG_FILE_ADVANCED = "config_advanced_scipy.yml" +CONFIG_FILE_ADVANCED = "config_advanced.yml" def test_fix_control( diff --git a/tests/everest/test_logging.py b/tests/everest/test_logging.py index e86337a5d0a..3c0dce087e9 100644 --- a/tests/everest/test_logging.py +++ b/tests/everest/test_logging.py @@ -4,12 +4,14 @@ import pytest from ert.scheduler.event import FinishedEvent -from everest.config import EverestConfig, ServerConfig +from everest.config import ( + EverestConfig, + ServerConfig, +) +from everest.config.install_job_config import InstallJobConfig from everest.detached import start_server, wait_for_server from everest.util import makedirs_if_needed -CONFIG_FILE = "config_fm_failure.yml" - def _string_exists_in_file(file_path, string): return string in Path(file_path).read_text(encoding="utf-8") @@ -26,7 +28,13 @@ async def server_running(): if isinstance(event, FinishedEvent) and event.iens == 0: return - everest_config = EverestConfig.load_file(CONFIG_FILE) + everest_config = EverestConfig.load_file("config_minimal.yml") + everest_config.forward_model.append("toggle_failure --fail simulation_2") + everest_config.install_jobs.append( + InstallJobConfig(name="toggle_failure", source="jobs/FAIL_SIMULATION") + ) + # start_server() loads config based on config_path, so we need to actually overwrite it + everest_config.dump("config_minimal.yml") makedirs_if_needed(everest_config.output_dir, roll_if_exists=True) driver = await start_server(everest_config, debug=True) diff --git a/tests/everest/test_math_func.py b/tests/everest/test_math_func.py index 54446757c2d..301789eceb2 100644 --- a/tests/everest/test_math_func.py +++ b/tests/everest/test_math_func.py @@ -4,18 +4,17 @@ import numpy as np import pandas as pd import pytest +import yaml from ert.run_models.everest_run_model import EverestRunModel from everest import ConfigKeys as CK -from everest.config import EverestConfig +from everest.config import EverestConfig, InputConstraintConfig from everest.config.export_config import ExportConfig from everest.export import export_data from everest.util import makedirs_if_needed CONFIG_FILE_MULTIOBJ = "config_multiobj.yml" CONFIG_FILE_ADVANCED = "config_advanced.yml" -CONFIG_AUTO_SCALED_CONTROLS = "config_auto_scaled_controls.yml" -CONFIG_FILE_REMOVE_RUN_PATH = "config_remove_run_path.yml" @pytest.mark.integration_test @@ -186,13 +185,16 @@ def test_math_func_advanced( def test_remove_run_path( copy_math_func_test_data_to_tmp, evaluator_server_config_generator ): - config = EverestConfig.load_file(CONFIG_FILE_REMOVE_RUN_PATH) - - simulation_should_fail = "simulation_2" - # Add to the config dictionary what simulation needs to fail - config.forward_model[config.forward_model.index("toggle_failure")] = ( - f"toggle_failure --fail {simulation_should_fail}" - ) + with open("config_minimal.yml", encoding="utf-8") as file: + config_yaml = yaml.safe_load(file) + config_yaml["simulator"] = {"delete_run_path": True} + config_yaml["install_jobs"].append( + {"name": "toggle_failure", "source": "jobs/FAIL_SIMULATION"} + ) + config_yaml["forward_model"].append("toggle_failure --fail simulation_2") + with open("config.yml", "w", encoding="utf-8") as fout: + yaml.dump(config_yaml, fout) + config = EverestConfig.load_file("config.yml") simulation_dir = config.simulation_dir @@ -236,16 +238,28 @@ def test_remove_run_path( ), "Simulation folder should be there, something went wrong and was removed" +@pytest.mark.integration_test def test_math_func_auto_scaled_controls( copy_math_func_test_data_to_tmp, evaluator_server_config_generator ): - config = EverestConfig.load_file(CONFIG_AUTO_SCALED_CONTROLS) - + # Arrange + config = EverestConfig.load_file("config_minimal.yml") + config.controls[0].auto_scale = True + config.controls[0].scaled_range = [0.3, 0.7] + config.input_constraints = [ + InputConstraintConfig(weights={"point.x": 1.0, "point.y": 1.0}, upper_bound=0.5) + ] + config.forward_model[0] += " --scaling -1 1 0.3 0.7" + # Convergence is slower that's why more batches and start closer to final solution? + config.controls[0].initial_guess = 0.2 + config.optimization.max_batch_num = 10 + + # Act run_model = EverestRunModel.create(config) evaluator_server_config = evaluator_server_config_generator(run_model) run_model.run_experiment(evaluator_server_config) - # Check resulting points + # Assert x, y, z = (run_model.result.controls["point_" + p] for p in ("x", "y", "z")) assert x == pytest.approx(0.25, abs=0.05) diff --git a/tests/everest/test_objective_type.py b/tests/everest/test_objective_type.py index 2b424e58856..28a96fc59b8 100644 --- a/tests/everest/test_objective_type.py +++ b/tests/everest/test_objective_type.py @@ -1,21 +1,36 @@ import pytest from ert.run_models.everest_run_model import EverestRunModel -from everest.config import EverestConfig +from everest.config import ( + EverestConfig, + ModelConfig, + ObjectiveFunctionConfig, +) -CONFIG_FILE_STDDEV = "config_stddev.yml" - -def test_mathfunc_stddev( +@pytest.mark.integration_test +def test_objective_type( copy_math_func_test_data_to_tmp, evaluator_server_config_generator ): - config = EverestConfig.load_file(CONFIG_FILE_STDDEV) + # Arrange + config = EverestConfig.load_file("config_minimal.yml") + config.objective_functions = [ + ObjectiveFunctionConfig(name="distance", weight=1.0), + ObjectiveFunctionConfig( + name="stddev", weight=1.0, type="stddev", alias="distance" + ), + ] + config.model = ModelConfig(realizations=[0, 1]) + config.forward_model = [ + "distance3 --point-file point.json --realization --target 0.5 0.5 0.5 --out distance" + ] + # Act run_model = EverestRunModel.create(config) evaluator_server_config = evaluator_server_config_generator(run_model) run_model.run_experiment(evaluator_server_config) - # Check resulting points + # Assert x0, x1, x2 = (run_model.result.controls["point_" + p] for p in ["x", "y", "z"]) assert x0 == pytest.approx(0.5, abs=0.025) assert x1 == pytest.approx(0.5, abs=0.025) diff --git a/tests/everest/test_samplers.py b/tests/everest/test_samplers.py index 4ea0fdc053a..663db860c70 100644 --- a/tests/everest/test_samplers.py +++ b/tests/everest/test_samplers.py @@ -4,7 +4,7 @@ from everest.config import EverestConfig from everest.config.sampler_config import SamplerConfig -CONFIG_FILE_ADVANCED = "config_advanced_scipy.yml" +CONFIG_FILE_ADVANCED = "config_advanced.yml" def test_sampler_uniform( diff --git a/tests/everest/test_simulator_cache.py b/tests/everest/test_simulator_cache.py index e347fc76476..c00e4821c22 100644 --- a/tests/everest/test_simulator_cache.py +++ b/tests/everest/test_simulator_cache.py @@ -7,8 +7,6 @@ from ert.run_models.everest_run_model import EverestRunModel from everest.config import EverestConfig, SimulatorConfig -CONFIG_FILE = "config_advanced_scipy.yml" - def test_simulator_cache(copy_math_func_test_data_to_tmp): n_evals = 0 @@ -19,7 +17,7 @@ def new_call(*args): n_evals += (result.evaluation_ids >= 0).sum() return result - config = EverestConfig.load_file(CONFIG_FILE) + config = EverestConfig.load_file("config_minimal.yml") config.simulator = SimulatorConfig(enable_cache=True) run_model = EverestRunModel.create(config) @@ -38,7 +36,7 @@ def new_call(*args): run_model.run_experiment(evaluator_server_config) assert n_evals > 0 variables1 = list(run_model.result.controls.values()) - assert np.allclose(variables1, [0.1, 0, 0.4], atol=0.02) + assert np.allclose(variables1, [0.5, 0.5, 0.5], atol=0.02) # Now do another run, where the functions should come from the cache: n_evals = 0