Skip to content

Commit

Permalink
Consolidate benchmark methods and give them a clearer naming system
Browse files Browse the repository at this point in the history
Summary:
* No longer have "default" like "SOBOL+BOTORCH_MODULAR::default"; instead, construct method from components (e.g. "SOBOL+BOTORCH_MODULAR::SingleTaskGP_qLogNoisyExpectedImprovement") and *test* that the benchmarks match defaults.
* Remove non-Log EI benchmarks, since these have all been migrated.
* Define MBM benchmarks more concisely and programmatically.

Defining benchmark methods explicitly in terms of their components, without reference to defaults, has a few benefits:

1) One can't accidentally test the same method multiple times;

2) There is a 1:1 mapping between method name and functionality rather than many:many, making it easier to track the performance of methods, and

3) tests will prevent benchmarks from drifting apart from default behavior (in MBM or CGS) over time.

Differential Revision: D49566866
  • Loading branch information
esantorella authored and facebook-github-bot committed Sep 29, 2023
1 parent 7a8069b commit fe664ee
Show file tree
Hide file tree
Showing 6 changed files with 97 additions and 337 deletions.
21 changes: 14 additions & 7 deletions ax/benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,19 @@ def compute_score_trace(
return score_trace.clip(min=0, max=100)


def _create_benchmark_experiment(
problem: BenchmarkProblemBase, method_name: str
) -> Experiment:
"""Creates an empty experiment for the given problem and method."""
return Experiment(
name=f"{problem.name}|{method_name}_{int(time())}",
search_space=problem.search_space,
optimization_config=problem.optimization_config,
tracking_metrics=problem.tracking_metrics,
runner=problem.runner,
)


def benchmark_replication(
problem: BenchmarkProblemBase,
method: BenchmarkMethod,
Expand All @@ -79,13 +92,7 @@ def benchmark_replication(
from `botorch.utils.sampling`.
"""

experiment = Experiment(
name=f"{problem.name}|{method.name}_{int(time())}",
search_space=problem.search_space,
optimization_config=problem.optimization_config,
tracking_metrics=problem.tracking_metrics,
runner=problem.runner,
)
experiment = _create_benchmark_experiment(problem=problem, method_name=method.name)

scheduler = Scheduler(
experiment=experiment,
Expand Down
30 changes: 1 addition & 29 deletions ax/benchmark/methods/choose_generation_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,32 +3,4 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

from typing import Optional

from ax.benchmark.benchmark_method import (
BenchmarkMethod,
get_sequential_optimization_scheduler_options,
)
from ax.benchmark.benchmark_problem import BenchmarkProblemBase
from ax.modelbridge.dispatch_utils import choose_generation_strategy
from ax.service.scheduler import SchedulerOptions


def get_choose_generation_strategy_method(
problem: BenchmarkProblemBase,
scheduler_options: Optional[SchedulerOptions] = None,
distribute_replications: bool = False,
) -> BenchmarkMethod:
generation_strategy = choose_generation_strategy(
search_space=problem.search_space,
optimization_config=problem.optimization_config,
num_trials=problem.num_trials,
)

return BenchmarkMethod(
name=f"ChooseGenerationStrategy::{problem.name}",
generation_strategy=generation_strategy,
scheduler_options=scheduler_options
or get_sequential_optimization_scheduler_options(),
distribute_replications=distribute_replications,
)
# File removed in next commit
56 changes: 1 addition & 55 deletions ax/benchmark/methods/gpei_and_moo.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,58 +3,4 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

from typing import Optional

from ax.benchmark.benchmark_method import (
BenchmarkMethod,
get_sequential_optimization_scheduler_options,
)
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.registry import Models
from ax.service.scheduler import SchedulerOptions


def get_gpei_default(
scheduler_options: Optional[SchedulerOptions] = None,
) -> BenchmarkMethod:
generation_strategy = GenerationStrategy(
name="SOBOL+GPEI::default",
steps=[
GenerationStep(model=Models.SOBOL, num_trials=5, min_trials_observed=5),
GenerationStep(
model=Models.GPEI,
num_trials=-1,
max_parallelism=1,
),
],
)

return BenchmarkMethod(
name=generation_strategy.name,
generation_strategy=generation_strategy,
scheduler_options=scheduler_options
or get_sequential_optimization_scheduler_options(),
)


def get_moo_default(
scheduler_options: Optional[SchedulerOptions] = None,
) -> BenchmarkMethod:
generation_strategy = GenerationStrategy(
name="SOBOL+MOO::default",
steps=[
GenerationStep(model=Models.SOBOL, num_trials=5, min_trials_observed=5),
GenerationStep(
model=Models.MOO,
num_trials=-1,
max_parallelism=1,
),
],
)

return BenchmarkMethod(
name=generation_strategy.name,
generation_strategy=generation_strategy,
scheduler_options=scheduler_options
or get_sequential_optimization_scheduler_options(),
)
# File removed in next commit
232 changes: 30 additions & 202 deletions ax/benchmark/methods/modular_botorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,238 +3,66 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

from typing import Any, Dict, Optional, Type
from typing import Dict, Optional, Type, Union

from ax.benchmark.benchmark_method import (
BenchmarkMethod,
get_sequential_optimization_scheduler_options,
)
from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy
from ax.modelbridge.registry import Models
from ax.models.torch.botorch_modular.surrogate import Surrogate
from ax.models.torch.botorch_modular.model import SurrogateSpec

from ax.service.scheduler import SchedulerOptions
from ax.utils.common.constants import Keys
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.monte_carlo import qNoisyExpectedImprovement
from botorch.acquisition.analytic import LogExpectedImprovement
from botorch.acquisition.logei import qLogNoisyExpectedImprovement
from botorch.acquisition.multi_objective.monte_carlo import (
qNoisyExpectedHypervolumeImprovement,
)
from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP
from botorch.models.gp_regression import FixedNoiseGP
from botorch.models.model import Model


def get_sobol_botorch_modular_fixed_noise_gp_qnei(
scheduler_options: Optional[SchedulerOptions] = None,
) -> BenchmarkMethod:
model_gen_kwargs = {
"model_gen_options": {
Keys.OPTIMIZER_KWARGS: {
"num_restarts": 50,
"raw_samples": 1024,
},
Keys.ACQF_KWARGS: {
"prune_baseline": True,
},
}
}
model_names_abbrevations: Dict[str, str] = {
SaasFullyBayesianSingleTaskGP.__name__: "SAAS",
}
acqf_name_abbreviations: Dict[str, str] = {
qLogNoisyExpectedImprovement.__name__: "qLogNEI",
qNoisyExpectedHypervolumeImprovement.__name__: "qNEHVI",
LogExpectedImprovement.__name__: "LogEI",
}

generation_strategy = GenerationStrategy(
name="SOBOL+BOTORCH_MODULAR::FixedNoiseGP_qNoisyExpectedImprovement",
steps=[
GenerationStep(model=Models.SOBOL, num_trials=5, min_trials_observed=5),
GenerationStep(
model=Models.BOTORCH_MODULAR,
num_trials=-1,
max_parallelism=1,
model_kwargs={
"surrogate": Surrogate(FixedNoiseGP),
"botorch_acqf_class": qNoisyExpectedImprovement,
},
model_gen_kwargs=model_gen_kwargs,
),
],
)

return BenchmarkMethod(
name=generation_strategy.name,
generation_strategy=generation_strategy,
scheduler_options=scheduler_options
or get_sequential_optimization_scheduler_options(),
)


def get_sobol_botorch_modular_fixed_noise_gp_qnehvi(
def get_sobol_botorch_modular_acquisition(
model_cls: Type[Model],
acquisition_cls: Type[AcquisitionFunction],
scheduler_options: Optional[SchedulerOptions] = None,
distribute_replications: bool = False,
name: Optional[str] = None,
) -> BenchmarkMethod:
model_gen_kwargs = {
"model_gen_options": {
Keys.OPTIMIZER_KWARGS: {
"num_restarts": 50,
"raw_samples": 1024,
},
Keys.ACQF_KWARGS: {
"prune_baseline": True,
"qmc": True,
"mc_samples": 512,
},
}
model_kwargs: Dict[
str, Union[Type[AcquisitionFunction], Dict[str, SurrogateSpec]]
] = {
"botorch_acqf_class": acquisition_cls,
"surrogate_specs": {"BoTorch": SurrogateSpec(botorch_model_class=model_cls)},
}

generation_strategy = GenerationStrategy(
name="SOBOL+BOTORCH_MODULAR::FixedNoiseGP_qNoisyExpectedHypervolumeImprovement",
steps=[
GenerationStep(
model=Models.SOBOL,
num_trials=5,
min_trials_observed=5,
),
GenerationStep(
model=Models.BOTORCH_MODULAR,
num_trials=-1,
max_parallelism=1,
model_kwargs={
"surrogate": Surrogate(FixedNoiseGP),
"botorch_acqf_class": qNoisyExpectedHypervolumeImprovement,
},
model_gen_kwargs=model_gen_kwargs,
),
],
)

return BenchmarkMethod(
name=generation_strategy.name,
generation_strategy=generation_strategy,
scheduler_options=scheduler_options
or get_sequential_optimization_scheduler_options(),
)


def get_sobol_botorch_modular_saas_fully_bayesian_single_task_gp_qnei(
scheduler_options: Optional[SchedulerOptions] = None,
distribute_replications: bool = True,
) -> BenchmarkMethod: # noqa
return get_sobol_botorch_modular_saas_fully_bayesian_single_task_gp(
qNoisyExpectedImprovement,
scheduler_options=scheduler_options
or get_sequential_optimization_scheduler_options(),
distribute_replications=distribute_replications,
model_name = model_names_abbrevations.get(model_cls.__name__, model_cls.__name__)
acqf_name = acqf_name_abbreviations.get(
acquisition_cls.__name__, acquisition_cls.__name__
)
name = f"MBM::{model_name}_{acqf_name}"


def get_sobol_botorch_modular_saas_fully_bayesian_single_task_gp(
botorch_acqf_class: Type[AcquisitionFunction],
scheduler_options: Optional[SchedulerOptions] = None,
distribute_replications: bool = True,
) -> BenchmarkMethod: # noqa
generation_strategy = GenerationStrategy(
name="SOBOL+BOTORCH_MODULAR::SaasFullyBayesianSingleTaskGP_"
+ botorch_acqf_class.__name__, # noqa
name=name,
steps=[
GenerationStep(model=Models.SOBOL, num_trials=5, min_trials_observed=5),
GenerationStep(
model=Models.BOTORCH_MODULAR,
num_trials=-1,
max_parallelism=1,
model_kwargs={
"surrogate": Surrogate(
botorch_model_class=SaasFullyBayesianSingleTaskGP
),
"botorch_acqf_class": botorch_acqf_class,
},
),
],
)

return BenchmarkMethod(
name=generation_strategy.name,
generation_strategy=generation_strategy,
scheduler_options=scheduler_options
or get_sequential_optimization_scheduler_options(),
distribute_replications=distribute_replications,
)


def get_sobol_botorch_modular_saas_fully_bayesian_single_task_gp_qnehvi(
scheduler_options: Optional[SchedulerOptions] = None,
distribute_replications: bool = True,
) -> BenchmarkMethod: # noqa
generation_strategy = GenerationStrategy(
name="SOBOL+BOTORCH_MODULAR::SaasFullyBayesianSingleTaskGP_qNoisyExpectedHypervolumeImprovement", # noqa
steps=[
GenerationStep(
model=Models.SOBOL,
num_trials=5,
min_trials_observed=5,
),
GenerationStep(
model=Models.BOTORCH_MODULAR,
num_trials=-1,
max_parallelism=1,
model_kwargs={
"surrogate": Surrogate(
botorch_model_class=SaasFullyBayesianSingleTaskGP
),
"botorch_acqf_class": qNoisyExpectedHypervolumeImprovement,
},
),
],
)
return BenchmarkMethod(
name=generation_strategy.name,
generation_strategy=generation_strategy,
scheduler_options=scheduler_options
or get_sequential_optimization_scheduler_options(),
distribute_replications=distribute_replications,
)


def get_sobol_botorch_modular_default(
scheduler_options: Optional[SchedulerOptions] = None,
distribute_replications: bool = False,
) -> BenchmarkMethod:
generation_strategy = GenerationStrategy(
name="SOBOL+BOTORCH_MODULAR::default",
steps=[
GenerationStep(model=Models.SOBOL, num_trials=5, min_trials_observed=5),
GenerationStep(
model=Models.BOTORCH_MODULAR,
num_trials=-1,
max_parallelism=1,
),
],
)

return BenchmarkMethod(
name=generation_strategy.name,
generation_strategy=generation_strategy,
scheduler_options=scheduler_options
or get_sequential_optimization_scheduler_options(),
distribute_replications=distribute_replications,
)


def get_sobol_botorch_modular_acquisition(
acquisition_cls: Type[AcquisitionFunction],
acquisition_options: Optional[Dict[str, Any]] = None,
scheduler_options: Optional[SchedulerOptions] = None,
distribute_replications: bool = False,
) -> BenchmarkMethod:
generation_strategy = GenerationStrategy(
name=f"SOBOL+BOTORCH_MODULAR::{acquisition_cls.__name__}",
steps=[
GenerationStep(
model=Models.SOBOL,
num_trials=5,
min_trials_observed=5,
),
GenerationStep(
model=Models.BOTORCH_MODULAR,
num_trials=-1,
max_parallelism=1,
model_kwargs={
"botorch_acqf_class": acquisition_cls,
"acquisition_options": acquisition_options,
},
model_kwargs=model_kwargs,
),
],
)
Expand Down
Loading

0 comments on commit fe664ee

Please sign in to comment.