Skip to content

Commit

Permalink
Update use cases of deprecated fixed noise models (#1920)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #1920

Updates use cases of fixed noise models that were deprecated in  pytorch/botorch#2052 & elsewhere.

The only remaining usage is `botorch_modular_registry`. I updated these to point to the parent models, so that they'll be less likely to cause backwards compatibility issues when we delete the models for good.

Reviewed By: esantorella

Differential Revision: D50431137

fbshipit-source-id: 604090019cfff8bc27ae01341a43d80142ef89c8
  • Loading branch information
saitcakmak authored and facebook-github-bot committed Oct 18, 2023
1 parent 23fff83 commit 3f30af6
Show file tree
Hide file tree
Showing 14 changed files with 109 additions and 170 deletions.
6 changes: 3 additions & 3 deletions ax/modelbridge/tests/test_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
from botorch.acquisition.monte_carlo import qExpectedImprovement
from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP
from botorch.models.fully_bayesian_multitask import SaasFullyBayesianMultiTaskGP
from botorch.models.gp_regression import FixedNoiseGP
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.multitask import MultiTaskGP
from botorch.utils.types import DEFAULT
Expand Down Expand Up @@ -75,9 +75,9 @@ def test_botorch_modular(self) -> None:
self.assertEqual(gpei.model.acquisition_class, Acquisition)
self.assertEqual(gpei.model.acquisition_options, {"best_f": 0.0})
self.assertIsInstance(gpei.model.surrogates[Keys.AUTOSET_SURROGATE], Surrogate)
# FixedNoiseGP should be picked since experiment data has fixed noise.
# SingleTaskGP should be picked.
self.assertIsInstance(
gpei.model.surrogates[Keys.AUTOSET_SURROGATE].model, FixedNoiseGP
gpei.model.surrogates[Keys.AUTOSET_SURROGATE].model, SingleTaskGP
)

gr = gpei.gen(n=1)
Expand Down
4 changes: 2 additions & 2 deletions ax/modelbridge/tests/test_robust.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from botorch.acquisition.multi_objective.monte_carlo import (
qNoisyExpectedHypervolumeImprovement,
)
from botorch.models.gp_regression import FixedNoiseGP
from botorch.models.gp_regression import SingleTaskGP


class TestRobust(TestCase):
Expand All @@ -46,7 +46,7 @@ def test_robust(
modelbridge = Models.BOTORCH_MODULAR(
experiment=exp,
data=exp.fetch_data(),
surrogate=Surrogate(botorch_model_class=FixedNoiseGP),
surrogate=Surrogate(botorch_model_class=SingleTaskGP),
botorch_acqf_class=acqf_class or qNoisyExpectedImprovement,
)
trial = (
Expand Down
34 changes: 24 additions & 10 deletions ax/models/tests/test_botorch_defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,16 @@
)
from botorch.acquisition.objective import ConstrainedMCObjective
from botorch.acquisition.penalized import PenalizedMCObjective
from botorch.models import FixedNoiseGP, SingleTaskGP
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP
from botorch.models.multitask import FixedNoiseMultiTaskGP, MultiTaskGP
from botorch.models.multitask import MultiTaskGP
from botorch.models.transforms.input import Warp
from botorch.utils.constraints import get_outcome_constraint_transforms
from gpytorch.kernels import MaternKernel, ScaleKernel
from gpytorch.likelihoods.gaussian_likelihood import (
FixedNoiseGaussianLikelihood,
GaussianLikelihood,
)
from gpytorch.module import Module
from gpytorch.priors import GammaPrior
from gpytorch.priors.lkj_prior import LKJCovariancePrior
Expand All @@ -53,19 +57,23 @@ def test_get_model(self) -> None:
unknown_var = torch.tensor([float("nan"), float("nan")]).unsqueeze(-1)
model = _get_model(x, y, unknown_var, None)
self.assertIsInstance(model, SingleTaskGP)
self.assertIsInstance(model.likelihood, GaussianLikelihood)

model = _get_model(X=x, Y=y, Yvar=var)
self.assertIsInstance(model, FixedNoiseGP)
self.assertIsInstance(model, SingleTaskGP)
self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
self.assertEqual(
model.covar_module.base_kernel.lengthscale_prior.concentration, 3.0
)
self.assertEqual(model.covar_module.base_kernel.lengthscale_prior.rate, 6.0)
model = _get_model(X=x, Y=y, Yvar=unknown_var, task_feature=1)
self.assertIs(type(model), MultiTaskGP) # Don't accept subclasses.
self.assertIsInstance(model.likelihood, GaussianLikelihood)
model = _get_model(X=x, Y=y, Yvar=var, task_feature=1)
self.assertIsInstance(model, FixedNoiseMultiTaskGP)
self.assertIsInstance(model, MultiTaskGP)
self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
model = _get_model(X=x, Y=y, Yvar=partial_var.clone(), task_feature=1)
self.assertIsInstance(model, FixedNoiseMultiTaskGP)
self.assertIsInstance(model, MultiTaskGP)
model = _get_model(X=x, Y=y, Yvar=partial_var.clone(), task_feature=1, rank=1)
self.assertEqual(model._rank, 1)
with self.assertRaises(ValueError):
Expand Down Expand Up @@ -155,7 +163,7 @@ def test_get_model(self) -> None:
}
}
model = _get_model(X=x, Y=y, Yvar=var, **deepcopy(kwargs6)) # pyre-ignore
self.assertIsInstance(model, FixedNoiseGP)
self.assertIsInstance(model, SingleTaskGP)
self.assertEqual(
model.covar_module.base_kernel.lengthscale_prior.concentration, 12.0
)
Expand All @@ -168,6 +176,7 @@ def test_get_model(self) -> None:
**deepcopy(kwargs6), # pyre-ignore
)
self.assertIs(type(model), MultiTaskGP)
self.assertIsInstance(model.likelihood, GaussianLikelihood)
self.assertEqual(
model.covar_module.base_kernel.lengthscale_prior.concentration, 12.0
)
Expand All @@ -179,7 +188,8 @@ def test_get_model(self) -> None:
model = _get_model(
X=x, Y=y, Yvar=var, task_feature=1, **deepcopy(kwargs6) # pyre-ignore
)
self.assertIsInstance(model, FixedNoiseMultiTaskGP)
self.assertIsInstance(model, MultiTaskGP)
self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
self.assertEqual(
model.covar_module.base_kernel.lengthscale_prior.concentration, 12.0
)
Expand All @@ -201,7 +211,8 @@ def test_get_model(self) -> None:
model = _get_model(
X=x, Y=y, Yvar=var, covar_module=covar_module, **kwargs7 # pyre-ignore
)
self.assertIsInstance(model, FixedNoiseGP)
self.assertIsInstance(model, SingleTaskGP)
self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)
self.assertEqual(covar_module, model.covar_module)

@mock.patch("ax.models.torch.botorch_defaults._get_model", wraps=_get_model)
Expand Down Expand Up @@ -289,7 +300,9 @@ def test_pass_customized_prior(self, get_model_mock: Mock) -> None:
refit_model=False,
**kwarg, # pyre-ignore
)
self.assertIs(type(model), FixedNoiseGP)
self.assertIsInstance(model, SingleTaskGP)
self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood)

self.assertEqual(
model.covar_module.base_kernel.lengthscale_prior.concentration,
12.0,
Expand All @@ -310,7 +323,8 @@ def test_pass_customized_prior(self, get_model_mock: Mock) -> None:
**kwarg, # pyre-ignore
)
for m in model.models:
self.assertIs(type(m), FixedNoiseMultiTaskGP)
self.assertIs(type(m), MultiTaskGP)
self.assertIsInstance(m.likelihood, FixedNoiseGaussianLikelihood)
self.assertEqual(
m.covar_module.base_kernel.lengthscale_prior.concentration,
12.0,
Expand Down
8 changes: 5 additions & 3 deletions ax/models/tests/test_botorch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,12 @@
from ax.utils.testing.mock import fast_botorch_optimize
from ax.utils.testing.torch_stubs import get_torch_test_data
from botorch.acquisition.utils import get_infeasible_cost
from botorch.models import FixedNoiseGP, ModelListGP, SingleTaskGP
from botorch.models import ModelListGP, SingleTaskGP
from botorch.models.transforms.input import Warp
from botorch.utils.datasets import SupervisedDataset
from botorch.utils.objective import get_objective_weights_transform
from gpytorch.likelihoods import _GaussianLikelihoodBase
from gpytorch.likelihoods.gaussian_likelihood import FixedNoiseGaussianLikelihood
from gpytorch.mlls import ExactMarginalLogLikelihood, LeaveOneOutPseudoLikelihood
from gpytorch.priors import GammaPrior
from gpytorch.priors.lkj_prior import LKJCovariancePrior
Expand Down Expand Up @@ -283,7 +284,7 @@ def test_BotorchModel(
else:
self.assertFalse(hasattr(m, "input_transform"))

# Test batched multi-output FixedNoiseGP
# Test batched multi-output SingleTaskGP
datasets_block = [
SupervisedDataset(
X=Xs1[0],
Expand Down Expand Up @@ -324,7 +325,8 @@ def test_BotorchModel(
models = [model.model]
Ys = [Ys1[0], Ys2[0]]
for i, m in enumerate(models):
self.assertIsInstance(m, FixedNoiseGP)
self.assertIsInstance(m, SingleTaskGP)
self.assertIsInstance(m.likelihood, FixedNoiseGaussianLikelihood)
expected_train_inputs = Xs1[0]

if not use_input_warping:
Expand Down
4 changes: 2 additions & 2 deletions ax/models/torch/alebo.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.analytic import ExpectedImprovement
from botorch.acquisition.objective import PosteriorTransform
from botorch.models.gp_regression import FixedNoiseGP
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.gpytorch import GPyTorchModel
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.optim.fit import fit_gpytorch_mll_scipy
Expand Down Expand Up @@ -310,7 +310,7 @@ def forward(
return postprocess_rbf(diff)


class ALEBOGP(FixedNoiseGP):
class ALEBOGP(SingleTaskGP):
"""The GP for ALEBO.
Uses the Mahalanobis kernel defined in ALEBOKernel, along with a
Expand Down
47 changes: 14 additions & 33 deletions ax/models/torch/botorch_defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@
from botorch.acquisition.utils import get_infeasible_cost
from botorch.exceptions.errors import UnsupportedError
from botorch.fit import fit_gpytorch_mll
from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP
from botorch.models.gpytorch import GPyTorchModel
from botorch.models.model import Model
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.multitask import FixedNoiseMultiTaskGP, MultiTaskGP
from botorch.models.multitask import MultiTaskGP
from botorch.models.transforms.input import Warp
from botorch.optim.optimize import optimize_acqf
from botorch.utils import (
Expand Down Expand Up @@ -761,19 +761,11 @@ def _get_model(
input_transform=warp_tf,
**kwargs,
)
elif task_feature is None and all_nan_Yvar:
gp = SingleTaskGP(
train_X=X,
train_Y=Y,
covar_module=covar_module,
input_transform=warp_tf,
**kwargs,
)
elif task_feature is None:
gp = FixedNoiseGP(
gp = SingleTaskGP(
train_X=X,
train_Y=Y,
train_Yvar=Yvar,
train_Yvar=None if all_nan_Yvar else Yvar,
covar_module=covar_module,
input_transform=warp_tf,
**kwargs,
Expand All @@ -799,27 +791,16 @@ def _get_model(
f"your prior type was {prior_type}."
)

if all_nan_Yvar:
gp = MultiTaskGP(
train_X=X,
train_Y=Y,
task_feature=task_feature,
covar_module=covar_module,
rank=kwargs.get("rank"),
task_covar_prior=task_covar_prior,
input_transform=warp_tf,
)
else:
gp = FixedNoiseMultiTaskGP(
train_X=X,
train_Y=Y,
train_Yvar=Yvar,
task_feature=task_feature,
covar_module=covar_module,
rank=kwargs.get("rank"),
task_covar_prior=task_covar_prior,
input_transform=warp_tf,
)
gp = MultiTaskGP(
train_X=X,
train_Y=Y,
train_Yvar=None if all_nan_Yvar else Yvar,
task_feature=task_feature,
covar_module=covar_module,
rank=kwargs.get("rank"),
task_covar_prior=task_covar_prior,
input_transform=warp_tf,
)
return gp


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,8 @@
from ax.models.torch.botorch_modular.kernels import ScaleMaternKernel
from ax.utils.common.typeutils import _argparse_type_encoder
from botorch.models import MultiTaskGP
from botorch.models.gp_regression import FixedNoiseGP

from botorch.models.gp_regression import SingleTaskGP
from botorch.models.model import Model
from botorch.models.multitask import FixedNoiseMultiTaskGP
from botorch.utils.datasets import SupervisedDataset
from botorch.utils.dispatcher import Dispatcher
from botorch.utils.types import _DefaultType, DEFAULT
Expand Down Expand Up @@ -101,16 +99,14 @@ def _covar_module_argparse_scale_matern(
A dictionary with covar module kwargs.
"""

if issubclass(botorch_model_class, FixedNoiseMultiTaskGP) or issubclass(
botorch_model_class, MultiTaskGP
):
if issubclass(botorch_model_class, MultiTaskGP):
if ard_num_dims is DEFAULT:
ard_num_dims = dataset.X.shape[-1] - 1

if batch_shape is DEFAULT:
batch_shape = torch.Size([])

if issubclass(botorch_model_class, FixedNoiseGP):
if issubclass(botorch_model_class, SingleTaskGP):
if ard_num_dims is DEFAULT:
ard_num_dims = dataset.X.shape[-1]

Expand Down
21 changes: 1 addition & 20 deletions ax/models/torch/botorch_modular/surrogate.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,14 @@
from __future__ import annotations

import inspect
import warnings
from copy import deepcopy
from logging import Logger
from typing import Any, Dict, List, Optional, Tuple, Type

import torch
from ax.core.search_space import SearchSpaceDigest
from ax.core.types import TCandidateMetadata
from ax.exceptions.core import AxWarning, UnsupportedError, UserInputError
from ax.exceptions.core import UnsupportedError, UserInputError
from ax.models.model_utils import best_in_sample_point
from ax.models.torch.botorch_modular.input_constructors.covar_modules import (
covar_module_argparse,
Expand Down Expand Up @@ -321,24 +320,6 @@ def _construct_model(
"categorical_features": categorical_features,
}
botorch_model_class_args = inspect.getfullargspec(botorch_model_class).args

# Temporary workaround to allow models to consume data from
# `FixedNoiseDataset`s even if they don't accept variance observations.
if "train_Yvar" not in botorch_model_class_args and dataset.Yvar is not None:
warnings.warn(
f"Provided model class {botorch_model_class} does not accept "
"`train_Yvar` argument, but received dataset with `Yvar`. Ignoring "
"variance observations.",
AxWarning,
)
dataset = SupervisedDataset(
X=dataset.X,
Y=dataset.Y,
Yvar=None,
feature_names=dataset.feature_names,
outcome_names=dataset.outcome_names,
)

formatted_model_inputs = botorch_model_class.construct_inputs(
training_data=dataset, **input_constructor_kwargs
)
Expand Down
20 changes: 4 additions & 16 deletions ax/models/torch/botorch_modular/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,8 @@
)
from botorch.fit import fit_fully_bayesian_model_nuts, fit_gpytorch_mll
from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP
from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP
from botorch.models.gp_regression_fidelity import (
FixedNoiseMultiFidelityGP,
SingleTaskMultiFidelityGP,
)
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP
from botorch.models.gp_regression_mixed import MixedSingleTaskGP
from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel, GPyTorchModel
from botorch.models.model import Model, ModelList
Expand Down Expand Up @@ -113,28 +110,19 @@ def choose_model_class(
model_class = MultiTaskGP

# Single-task multi-fidelity cases.
elif search_space_digest.fidelity_features and all_inferred:
model_class = SingleTaskMultiFidelityGP # Unknown observation noise.
elif search_space_digest.fidelity_features:
model_class = FixedNoiseMultiFidelityGP # Known observation noise.
model_class = SingleTaskMultiFidelityGP

# Mixed optimization case. Note that presence of categorical
# features in search space digest indicates that downstream in the
# stack we chose not to perform continuous relaxation on those
# features.
elif search_space_digest.categorical_features:
if not all_inferred:
logger.warning(
"Using `MixedSingleTaskGP` despire the known `Yvar` values. This "
"is a temporary measure while fixed-noise mixed BO is in the works."
)
model_class = MixedSingleTaskGP

# Single-task single-fidelity cases.
elif all_inferred: # Unknown observation noise.
model_class = SingleTaskGP
else:
model_class = FixedNoiseGP # Known observation noise.
model_class = SingleTaskGP

logger.debug(f"Chose BoTorch model class: {model_class}.")
return model_class
Expand Down
Loading

0 comments on commit 3f30af6

Please sign in to comment.