From 3f30af65b2344077f1ff65eb82339075b13b05f2 Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Wed, 18 Oct 2023 16:53:16 -0700 Subject: [PATCH] Update use cases of deprecated fixed noise models (#1920) Summary: Pull Request resolved: https://github.com/facebook/Ax/pull/1920 Updates use cases of fixed noise models that were deprecated in https://github.com/pytorch/botorch/pull/2052 & elsewhere. The only remaining usage is `botorch_modular_registry`. I updated these to point to the parent models, so that they'll be less likely to cause backwards compatibility issues when we delete the models for good. Reviewed By: esantorella Differential Revision: D50431137 fbshipit-source-id: 604090019cfff8bc27ae01341a43d80142ef89c8 --- ax/modelbridge/tests/test_registry.py | 6 +- ax/modelbridge/tests/test_robust.py | 4 +- ax/models/tests/test_botorch_defaults.py | 34 ++++++--- ax/models/tests/test_botorch_model.py | 8 +- ax/models/torch/alebo.py | 4 +- ax/models/torch/botorch_defaults.py | 47 ++++-------- .../input_constructors/covar_modules.py | 10 +-- ax/models/torch/botorch_modular/surrogate.py | 21 +----- ax/models/torch/botorch_modular/utils.py | 20 +---- .../tests/test_covar_modules_argparse.py | 12 +-- ax/models/torch/tests/test_model.py | 17 +++-- ax/models/torch/tests/test_surrogate.py | 13 +--- ax/models/torch/tests/test_utils.py | 74 +++++++------------ ax/storage/botorch_modular_registry.py | 9 ++- 14 files changed, 109 insertions(+), 170 deletions(-) diff --git a/ax/modelbridge/tests/test_registry.py b/ax/modelbridge/tests/test_registry.py index 5ad29d9fa4f..cff0962f1d4 100644 --- a/ax/modelbridge/tests/test_registry.py +++ b/ax/modelbridge/tests/test_registry.py @@ -45,7 +45,7 @@ from botorch.acquisition.monte_carlo import qExpectedImprovement from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP from botorch.models.fully_bayesian_multitask import SaasFullyBayesianMultiTaskGP -from botorch.models.gp_regression import FixedNoiseGP +from botorch.models.gp_regression import SingleTaskGP from botorch.models.model_list_gp_regression import ModelListGP from botorch.models.multitask import MultiTaskGP from botorch.utils.types import DEFAULT @@ -75,9 +75,9 @@ def test_botorch_modular(self) -> None: self.assertEqual(gpei.model.acquisition_class, Acquisition) self.assertEqual(gpei.model.acquisition_options, {"best_f": 0.0}) self.assertIsInstance(gpei.model.surrogates[Keys.AUTOSET_SURROGATE], Surrogate) - # FixedNoiseGP should be picked since experiment data has fixed noise. + # SingleTaskGP should be picked. self.assertIsInstance( - gpei.model.surrogates[Keys.AUTOSET_SURROGATE].model, FixedNoiseGP + gpei.model.surrogates[Keys.AUTOSET_SURROGATE].model, SingleTaskGP ) gr = gpei.gen(n=1) diff --git a/ax/modelbridge/tests/test_robust.py b/ax/modelbridge/tests/test_robust.py index af3aa117005..02ab10a5010 100644 --- a/ax/modelbridge/tests/test_robust.py +++ b/ax/modelbridge/tests/test_robust.py @@ -26,7 +26,7 @@ from botorch.acquisition.multi_objective.monte_carlo import ( qNoisyExpectedHypervolumeImprovement, ) -from botorch.models.gp_regression import FixedNoiseGP +from botorch.models.gp_regression import SingleTaskGP class TestRobust(TestCase): @@ -46,7 +46,7 @@ def test_robust( modelbridge = Models.BOTORCH_MODULAR( experiment=exp, data=exp.fetch_data(), - surrogate=Surrogate(botorch_model_class=FixedNoiseGP), + surrogate=Surrogate(botorch_model_class=SingleTaskGP), botorch_acqf_class=acqf_class or qNoisyExpectedImprovement, ) trial = ( diff --git a/ax/models/tests/test_botorch_defaults.py b/ax/models/tests/test_botorch_defaults.py index 1989a6ab935..2476c619fde 100644 --- a/ax/models/tests/test_botorch_defaults.py +++ b/ax/models/tests/test_botorch_defaults.py @@ -32,12 +32,16 @@ ) from botorch.acquisition.objective import ConstrainedMCObjective from botorch.acquisition.penalized import PenalizedMCObjective -from botorch.models import FixedNoiseGP, SingleTaskGP +from botorch.models.gp_regression import SingleTaskGP from botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP -from botorch.models.multitask import FixedNoiseMultiTaskGP, MultiTaskGP +from botorch.models.multitask import MultiTaskGP from botorch.models.transforms.input import Warp from botorch.utils.constraints import get_outcome_constraint_transforms from gpytorch.kernels import MaternKernel, ScaleKernel +from gpytorch.likelihoods.gaussian_likelihood import ( + FixedNoiseGaussianLikelihood, + GaussianLikelihood, +) from gpytorch.module import Module from gpytorch.priors import GammaPrior from gpytorch.priors.lkj_prior import LKJCovariancePrior @@ -53,19 +57,23 @@ def test_get_model(self) -> None: unknown_var = torch.tensor([float("nan"), float("nan")]).unsqueeze(-1) model = _get_model(x, y, unknown_var, None) self.assertIsInstance(model, SingleTaskGP) + self.assertIsInstance(model.likelihood, GaussianLikelihood) model = _get_model(X=x, Y=y, Yvar=var) - self.assertIsInstance(model, FixedNoiseGP) + self.assertIsInstance(model, SingleTaskGP) + self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood) self.assertEqual( model.covar_module.base_kernel.lengthscale_prior.concentration, 3.0 ) self.assertEqual(model.covar_module.base_kernel.lengthscale_prior.rate, 6.0) model = _get_model(X=x, Y=y, Yvar=unknown_var, task_feature=1) self.assertIs(type(model), MultiTaskGP) # Don't accept subclasses. + self.assertIsInstance(model.likelihood, GaussianLikelihood) model = _get_model(X=x, Y=y, Yvar=var, task_feature=1) - self.assertIsInstance(model, FixedNoiseMultiTaskGP) + self.assertIsInstance(model, MultiTaskGP) + self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood) model = _get_model(X=x, Y=y, Yvar=partial_var.clone(), task_feature=1) - self.assertIsInstance(model, FixedNoiseMultiTaskGP) + self.assertIsInstance(model, MultiTaskGP) model = _get_model(X=x, Y=y, Yvar=partial_var.clone(), task_feature=1, rank=1) self.assertEqual(model._rank, 1) with self.assertRaises(ValueError): @@ -155,7 +163,7 @@ def test_get_model(self) -> None: } } model = _get_model(X=x, Y=y, Yvar=var, **deepcopy(kwargs6)) # pyre-ignore - self.assertIsInstance(model, FixedNoiseGP) + self.assertIsInstance(model, SingleTaskGP) self.assertEqual( model.covar_module.base_kernel.lengthscale_prior.concentration, 12.0 ) @@ -168,6 +176,7 @@ def test_get_model(self) -> None: **deepcopy(kwargs6), # pyre-ignore ) self.assertIs(type(model), MultiTaskGP) + self.assertIsInstance(model.likelihood, GaussianLikelihood) self.assertEqual( model.covar_module.base_kernel.lengthscale_prior.concentration, 12.0 ) @@ -179,7 +188,8 @@ def test_get_model(self) -> None: model = _get_model( X=x, Y=y, Yvar=var, task_feature=1, **deepcopy(kwargs6) # pyre-ignore ) - self.assertIsInstance(model, FixedNoiseMultiTaskGP) + self.assertIsInstance(model, MultiTaskGP) + self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood) self.assertEqual( model.covar_module.base_kernel.lengthscale_prior.concentration, 12.0 ) @@ -201,7 +211,8 @@ def test_get_model(self) -> None: model = _get_model( X=x, Y=y, Yvar=var, covar_module=covar_module, **kwargs7 # pyre-ignore ) - self.assertIsInstance(model, FixedNoiseGP) + self.assertIsInstance(model, SingleTaskGP) + self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood) self.assertEqual(covar_module, model.covar_module) @mock.patch("ax.models.torch.botorch_defaults._get_model", wraps=_get_model) @@ -289,7 +300,9 @@ def test_pass_customized_prior(self, get_model_mock: Mock) -> None: refit_model=False, **kwarg, # pyre-ignore ) - self.assertIs(type(model), FixedNoiseGP) + self.assertIsInstance(model, SingleTaskGP) + self.assertIsInstance(model.likelihood, FixedNoiseGaussianLikelihood) + self.assertEqual( model.covar_module.base_kernel.lengthscale_prior.concentration, 12.0, @@ -310,7 +323,8 @@ def test_pass_customized_prior(self, get_model_mock: Mock) -> None: **kwarg, # pyre-ignore ) for m in model.models: - self.assertIs(type(m), FixedNoiseMultiTaskGP) + self.assertIs(type(m), MultiTaskGP) + self.assertIsInstance(m.likelihood, FixedNoiseGaussianLikelihood) self.assertEqual( m.covar_module.base_kernel.lengthscale_prior.concentration, 12.0, diff --git a/ax/models/tests/test_botorch_model.py b/ax/models/tests/test_botorch_model.py index 88a6509dedb..a48e9f14fc7 100644 --- a/ax/models/tests/test_botorch_model.py +++ b/ax/models/tests/test_botorch_model.py @@ -30,11 +30,12 @@ from ax.utils.testing.mock import fast_botorch_optimize from ax.utils.testing.torch_stubs import get_torch_test_data from botorch.acquisition.utils import get_infeasible_cost -from botorch.models import FixedNoiseGP, ModelListGP, SingleTaskGP +from botorch.models import ModelListGP, SingleTaskGP from botorch.models.transforms.input import Warp from botorch.utils.datasets import SupervisedDataset from botorch.utils.objective import get_objective_weights_transform from gpytorch.likelihoods import _GaussianLikelihoodBase +from gpytorch.likelihoods.gaussian_likelihood import FixedNoiseGaussianLikelihood from gpytorch.mlls import ExactMarginalLogLikelihood, LeaveOneOutPseudoLikelihood from gpytorch.priors import GammaPrior from gpytorch.priors.lkj_prior import LKJCovariancePrior @@ -283,7 +284,7 @@ def test_BotorchModel( else: self.assertFalse(hasattr(m, "input_transform")) - # Test batched multi-output FixedNoiseGP + # Test batched multi-output SingleTaskGP datasets_block = [ SupervisedDataset( X=Xs1[0], @@ -324,7 +325,8 @@ def test_BotorchModel( models = [model.model] Ys = [Ys1[0], Ys2[0]] for i, m in enumerate(models): - self.assertIsInstance(m, FixedNoiseGP) + self.assertIsInstance(m, SingleTaskGP) + self.assertIsInstance(m.likelihood, FixedNoiseGaussianLikelihood) expected_train_inputs = Xs1[0] if not use_input_warping: diff --git a/ax/models/torch/alebo.py b/ax/models/torch/alebo.py index 84ead0d65ec..0d2c6b6e91d 100644 --- a/ax/models/torch/alebo.py +++ b/ax/models/torch/alebo.py @@ -41,7 +41,7 @@ from botorch.acquisition.acquisition import AcquisitionFunction from botorch.acquisition.analytic import ExpectedImprovement from botorch.acquisition.objective import PosteriorTransform -from botorch.models.gp_regression import FixedNoiseGP +from botorch.models.gp_regression import SingleTaskGP from botorch.models.gpytorch import GPyTorchModel from botorch.models.model_list_gp_regression import ModelListGP from botorch.optim.fit import fit_gpytorch_mll_scipy @@ -310,7 +310,7 @@ def forward( return postprocess_rbf(diff) -class ALEBOGP(FixedNoiseGP): +class ALEBOGP(SingleTaskGP): """The GP for ALEBO. Uses the Mahalanobis kernel defined in ALEBOKernel, along with a diff --git a/ax/models/torch/botorch_defaults.py b/ax/models/torch/botorch_defaults.py index 4bb56942c04..f39e2833d07 100644 --- a/ax/models/torch/botorch_defaults.py +++ b/ax/models/torch/botorch_defaults.py @@ -22,12 +22,12 @@ from botorch.acquisition.utils import get_infeasible_cost from botorch.exceptions.errors import UnsupportedError from botorch.fit import fit_gpytorch_mll -from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP +from botorch.models.gp_regression import SingleTaskGP from botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP from botorch.models.gpytorch import GPyTorchModel from botorch.models.model import Model from botorch.models.model_list_gp_regression import ModelListGP -from botorch.models.multitask import FixedNoiseMultiTaskGP, MultiTaskGP +from botorch.models.multitask import MultiTaskGP from botorch.models.transforms.input import Warp from botorch.optim.optimize import optimize_acqf from botorch.utils import ( @@ -761,19 +761,11 @@ def _get_model( input_transform=warp_tf, **kwargs, ) - elif task_feature is None and all_nan_Yvar: - gp = SingleTaskGP( - train_X=X, - train_Y=Y, - covar_module=covar_module, - input_transform=warp_tf, - **kwargs, - ) elif task_feature is None: - gp = FixedNoiseGP( + gp = SingleTaskGP( train_X=X, train_Y=Y, - train_Yvar=Yvar, + train_Yvar=None if all_nan_Yvar else Yvar, covar_module=covar_module, input_transform=warp_tf, **kwargs, @@ -799,27 +791,16 @@ def _get_model( f"your prior type was {prior_type}." ) - if all_nan_Yvar: - gp = MultiTaskGP( - train_X=X, - train_Y=Y, - task_feature=task_feature, - covar_module=covar_module, - rank=kwargs.get("rank"), - task_covar_prior=task_covar_prior, - input_transform=warp_tf, - ) - else: - gp = FixedNoiseMultiTaskGP( - train_X=X, - train_Y=Y, - train_Yvar=Yvar, - task_feature=task_feature, - covar_module=covar_module, - rank=kwargs.get("rank"), - task_covar_prior=task_covar_prior, - input_transform=warp_tf, - ) + gp = MultiTaskGP( + train_X=X, + train_Y=Y, + train_Yvar=None if all_nan_Yvar else Yvar, + task_feature=task_feature, + covar_module=covar_module, + rank=kwargs.get("rank"), + task_covar_prior=task_covar_prior, + input_transform=warp_tf, + ) return gp diff --git a/ax/models/torch/botorch_modular/input_constructors/covar_modules.py b/ax/models/torch/botorch_modular/input_constructors/covar_modules.py index 4907f97545d..319a6fc09b3 100644 --- a/ax/models/torch/botorch_modular/input_constructors/covar_modules.py +++ b/ax/models/torch/botorch_modular/input_constructors/covar_modules.py @@ -12,10 +12,8 @@ from ax.models.torch.botorch_modular.kernels import ScaleMaternKernel from ax.utils.common.typeutils import _argparse_type_encoder from botorch.models import MultiTaskGP -from botorch.models.gp_regression import FixedNoiseGP - +from botorch.models.gp_regression import SingleTaskGP from botorch.models.model import Model -from botorch.models.multitask import FixedNoiseMultiTaskGP from botorch.utils.datasets import SupervisedDataset from botorch.utils.dispatcher import Dispatcher from botorch.utils.types import _DefaultType, DEFAULT @@ -101,16 +99,14 @@ def _covar_module_argparse_scale_matern( A dictionary with covar module kwargs. """ - if issubclass(botorch_model_class, FixedNoiseMultiTaskGP) or issubclass( - botorch_model_class, MultiTaskGP - ): + if issubclass(botorch_model_class, MultiTaskGP): if ard_num_dims is DEFAULT: ard_num_dims = dataset.X.shape[-1] - 1 if batch_shape is DEFAULT: batch_shape = torch.Size([]) - if issubclass(botorch_model_class, FixedNoiseGP): + if issubclass(botorch_model_class, SingleTaskGP): if ard_num_dims is DEFAULT: ard_num_dims = dataset.X.shape[-1] diff --git a/ax/models/torch/botorch_modular/surrogate.py b/ax/models/torch/botorch_modular/surrogate.py index 5e85e412d31..2ce0af804d8 100644 --- a/ax/models/torch/botorch_modular/surrogate.py +++ b/ax/models/torch/botorch_modular/surrogate.py @@ -7,7 +7,6 @@ from __future__ import annotations import inspect -import warnings from copy import deepcopy from logging import Logger from typing import Any, Dict, List, Optional, Tuple, Type @@ -15,7 +14,7 @@ import torch from ax.core.search_space import SearchSpaceDigest from ax.core.types import TCandidateMetadata -from ax.exceptions.core import AxWarning, UnsupportedError, UserInputError +from ax.exceptions.core import UnsupportedError, UserInputError from ax.models.model_utils import best_in_sample_point from ax.models.torch.botorch_modular.input_constructors.covar_modules import ( covar_module_argparse, @@ -321,24 +320,6 @@ def _construct_model( "categorical_features": categorical_features, } botorch_model_class_args = inspect.getfullargspec(botorch_model_class).args - - # Temporary workaround to allow models to consume data from - # `FixedNoiseDataset`s even if they don't accept variance observations. - if "train_Yvar" not in botorch_model_class_args and dataset.Yvar is not None: - warnings.warn( - f"Provided model class {botorch_model_class} does not accept " - "`train_Yvar` argument, but received dataset with `Yvar`. Ignoring " - "variance observations.", - AxWarning, - ) - dataset = SupervisedDataset( - X=dataset.X, - Y=dataset.Y, - Yvar=None, - feature_names=dataset.feature_names, - outcome_names=dataset.outcome_names, - ) - formatted_model_inputs = botorch_model_class.construct_inputs( training_data=dataset, **input_constructor_kwargs ) diff --git a/ax/models/torch/botorch_modular/utils.py b/ax/models/torch/botorch_modular/utils.py index 2d6f73f3915..df90a286fa2 100644 --- a/ax/models/torch/botorch_modular/utils.py +++ b/ax/models/torch/botorch_modular/utils.py @@ -23,11 +23,8 @@ ) from botorch.fit import fit_fully_bayesian_model_nuts, fit_gpytorch_mll from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP -from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP -from botorch.models.gp_regression_fidelity import ( - FixedNoiseMultiFidelityGP, - SingleTaskMultiFidelityGP, -) +from botorch.models.gp_regression import SingleTaskGP +from botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP from botorch.models.gp_regression_mixed import MixedSingleTaskGP from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel, GPyTorchModel from botorch.models.model import Model, ModelList @@ -113,28 +110,19 @@ def choose_model_class( model_class = MultiTaskGP # Single-task multi-fidelity cases. - elif search_space_digest.fidelity_features and all_inferred: - model_class = SingleTaskMultiFidelityGP # Unknown observation noise. elif search_space_digest.fidelity_features: - model_class = FixedNoiseMultiFidelityGP # Known observation noise. + model_class = SingleTaskMultiFidelityGP # Mixed optimization case. Note that presence of categorical # features in search space digest indicates that downstream in the # stack we chose not to perform continuous relaxation on those # features. elif search_space_digest.categorical_features: - if not all_inferred: - logger.warning( - "Using `MixedSingleTaskGP` despire the known `Yvar` values. This " - "is a temporary measure while fixed-noise mixed BO is in the works." - ) model_class = MixedSingleTaskGP # Single-task single-fidelity cases. - elif all_inferred: # Unknown observation noise. - model_class = SingleTaskGP else: - model_class = FixedNoiseGP # Known observation noise. + model_class = SingleTaskGP logger.debug(f"Chose BoTorch model class: {model_class}.") return model_class diff --git a/ax/models/torch/tests/test_covar_modules_argparse.py b/ax/models/torch/tests/test_covar_modules_argparse.py index fbe1a4c04e7..d78c0fdb46d 100644 --- a/ax/models/torch/tests/test_covar_modules_argparse.py +++ b/ax/models/torch/tests/test_covar_modules_argparse.py @@ -14,8 +14,8 @@ ) from ax.models.torch.botorch_modular.kernels import ScaleMaternKernel from ax.utils.common.testutils import TestCase -from botorch.models.gp_regression import FixedNoiseGP -from botorch.models.multitask import FixedNoiseMultiTaskGP +from botorch.models.gp_regression import SingleTaskGP +from botorch.models.multitask import MultiTaskGP from botorch.utils.datasets import SupervisedDataset from gpytorch.kernels.kernel import Kernel from gpytorch.priors import GammaPrior @@ -64,7 +64,7 @@ def _argparse(covar_module_class: Kernel) -> None: def test_argparse_kernel(self) -> None: covar_module_kwargs = covar_module_argparse( Kernel, - botorch_model_class=FixedNoiseGP, + botorch_model_class=SingleTaskGP, dataset=self.dataset, ) @@ -72,7 +72,7 @@ def test_argparse_kernel(self) -> None: covar_module_kwargs = covar_module_argparse( Kernel, - botorch_model_class=FixedNoiseGP, + botorch_model_class=SingleTaskGP, dataset=self.dataset, ard_num_dims=19, batch_shape=torch.Size([10]), @@ -102,7 +102,7 @@ def test_argparse_scalematern_kernel(self) -> None: }, ] - for i, botorch_model_class in enumerate([FixedNoiseGP, FixedNoiseMultiTaskGP]): + for i, botorch_model_class in enumerate([SingleTaskGP, MultiTaskGP]): covar_module_kwargs = covar_module_argparse( ScaleMaternKernel, @@ -143,7 +143,7 @@ def test_argparse_scalematern_kernel(self) -> None: ) covar_module_kwargs = covar_module_argparse( ScaleMaternKernel, - botorch_model_class=FixedNoiseGP, + botorch_model_class=SingleTaskGP, dataset=dataset, lengthscale_prior=GammaPrior(6.0, 3.0), outputscale_prior=GammaPrior(2, 0.15), diff --git a/ax/models/torch/tests/test_model.py b/ax/models/torch/tests/test_model.py index f97a7c902b1..c6a1bb3ef6b 100644 --- a/ax/models/torch/tests/test_model.py +++ b/ax/models/torch/tests/test_model.py @@ -42,12 +42,13 @@ from botorch.acquisition.multi_objective.objective import WeightedMCMultiOutputObjective from botorch.acquisition.objective import GenericMCObjective from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP -from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP -from botorch.models.gp_regression_fidelity import FixedNoiseMultiFidelityGP +from botorch.models.gp_regression import SingleTaskGP +from botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP from botorch.models.model import ModelList from botorch.sampling.normal import SobolQMCNormalSampler from botorch.utils.constraints import get_outcome_constraint_transforms from botorch.utils.datasets import SupervisedDataset +from gpytorch.likelihoods.gaussian_likelihood import FixedNoiseGaussianLikelihood from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood @@ -790,8 +791,8 @@ def test_model_list_choice(self, _) -> None: # , mock_extract_training_data): ) for submodel in model_list.models: # There are fidelity features and nonempty Yvars, so - # fixed noise MFGP should be chosen. - self.assertIsInstance(submodel, FixedNoiseMultiFidelityGP) + # MFGP should be chosen. + self.assertIsInstance(submodel, SingleTaskMultiFidelityGP) @mock.patch( f"{ACQUISITION_PATH}.Acquisition.optimize", @@ -819,7 +820,7 @@ def test_MOO(self, _) -> None: candidate_metadata=self.candidate_metadata, ) self.assertIsInstance( - model.surrogates[Keys.AUTOSET_SURROGATE].model, FixedNoiseGP + model.surrogates[Keys.AUTOSET_SURROGATE].model, SingleTaskGP ) subset_outcome_constraints = ( # model is subset since last output is not used @@ -848,7 +849,8 @@ def test_MOO(self, _) -> None: self.assertIs(model.botorch_acqf_class, qNoisyExpectedHypervolumeImprovement) mock_input_constructor.assert_called_once() m = ckwargs["model"] - self.assertIsInstance(m, FixedNoiseGP) + self.assertIsInstance(m, SingleTaskGP) + self.assertIsInstance(m.likelihood, FixedNoiseGaussianLikelihood) self.assertEqual(m.num_outputs, 2) training_data = ckwargs["training_data"] self.assertIsNotNone(training_data.Yvar) @@ -954,7 +956,8 @@ def test_MOO(self, _) -> None: self.assertTrue(torch.equal(oc[0], outcome_constraints[0])) self.assertTrue(torch.equal(oc[1], outcome_constraints[1])) m = ckwargs["model"] - self.assertIsInstance(m, FixedNoiseGP) + self.assertIsInstance(m, SingleTaskGP) + self.assertIsInstance(m.likelihood, FixedNoiseGaussianLikelihood) self.assertEqual(m.num_outputs, 2) self.assertIn("objective_thresholds", gen_results.gen_metadata) obj_t = gen_results.gen_metadata["objective_thresholds"] diff --git a/ax/models/torch/tests/test_surrogate.py b/ax/models/torch/tests/test_surrogate.py index 69c61606ef9..e2b6725f9f7 100644 --- a/ax/models/torch/tests/test_surrogate.py +++ b/ax/models/torch/tests/test_surrogate.py @@ -24,12 +24,7 @@ from ax.utils.testing.torch_stubs import get_torch_test_data from ax.utils.testing.utils import generic_equals from botorch.acquisition.monte_carlo import qSimpleRegret -from botorch.models import ( - FixedNoiseGP, - ModelListGP, - SaasFullyBayesianSingleTaskGP, - SingleTaskGP, -) +from botorch.models import ModelListGP, SaasFullyBayesianSingleTaskGP, SingleTaskGP from botorch.models.deterministic import GenericDeterministicModel from botorch.models.fully_bayesian_multitask import SaasFullyBayesianMultiTaskGP from botorch.models.gp_regression_mixed import MixedSingleTaskGP @@ -942,10 +937,10 @@ def test_fit( Surrogate(botorch_model_class=SaasFullyBayesianSingleTaskGP), Surrogate(botorch_model_class=SaasFullyBayesianMultiTaskGP), Surrogate( # Batch model - botorch_model_class=FixedNoiseGP, mll_class=ExactMarginalLogLikelihood + botorch_model_class=SingleTaskGP, mll_class=ExactMarginalLogLikelihood ), Surrogate( # ModelListGP - botorch_model_class=FixedNoiseGP, + botorch_model_class=SingleTaskGP, mll_class=ExactMarginalLogLikelihood, allow_batched_models=False, ), @@ -987,7 +982,7 @@ def test_fit( elif i == 3: self.assertEqual(mock_MLL.call_count, 1) self.assertEqual(mock_fit_gpytorch.call_count, 1) - self.assertTrue(isinstance(surrogate.model, FixedNoiseGP)) + self.assertTrue(isinstance(surrogate.model, SingleTaskGP)) mock_state_dict.reset_mock() mock_MLL.reset_mock() mock_fit_gpytorch.reset_mock() diff --git a/ax/models/torch/tests/test_utils.py b/ax/models/torch/tests/test_utils.py index fbfe39028b7..a4c2d10a1d1 100644 --- a/ax/models/torch/tests/test_utils.py +++ b/ax/models/torch/tests/test_utils.py @@ -31,11 +31,8 @@ qNoisyExpectedHypervolumeImprovement, ) from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP -from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP -from botorch.models.gp_regression_fidelity import ( - FixedNoiseMultiFidelityGP, - SingleTaskMultiFidelityGP, -) +from botorch.models.gp_regression import SingleTaskGP +from botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP from botorch.models.gp_regression_mixed import MixedSingleTaskGP from botorch.models.model import ModelList from botorch.models.multitask import MultiTaskGP @@ -106,30 +103,19 @@ def test_choose_model_class_fidelity_features(self) -> None: fidelity_features=[1], ), ) - # With fidelity features and unknown variances, use SingleTaskMultiFidelityGP. - self.assertEqual( - SingleTaskMultiFidelityGP, - choose_model_class( - datasets=self.supervised_datasets, - search_space_digest=SearchSpaceDigest( - feature_names=[], - bounds=[], - fidelity_features=[2], - ), - ), - ) - # With fidelity features and known variances, use FixedNoiseMultiFidelityGP. - self.assertEqual( - FixedNoiseMultiFidelityGP, - choose_model_class( - datasets=self.fixed_noise_datasets, - search_space_digest=SearchSpaceDigest( - feature_names=[], - bounds=[], - fidelity_features=[2], + # With fidelity features, use SingleTaskMultiFidelityGP. + for ds in [self.supervised_datasets, self.fixed_noise_datasets]: + self.assertEqual( + SingleTaskMultiFidelityGP, + choose_model_class( + datasets=ds, + search_space_digest=SearchSpaceDigest( + feature_names=[], + bounds=[], + fidelity_features=[2], + ), ), - ), - ) + ) def test_choose_model_class_task_features(self) -> None: # Only a single task feature can be used. @@ -179,28 +165,18 @@ def test_choose_model_class(self) -> None: bounds=[], ), ) - # Without fidelity/task features but with Yvar specifications, use FixedNoiseGP. - self.assertEqual( - FixedNoiseGP, - choose_model_class( - datasets=self.fixed_noise_datasets, - search_space_digest=SearchSpaceDigest( - feature_names=[], - bounds=[], - ), - ), - ) - # W/out fidelity/task features and w/out Yvar specifications, use SingleTaskGP. - self.assertEqual( - SingleTaskGP, - choose_model_class( - datasets=self.supervised_datasets, - search_space_digest=SearchSpaceDigest( - feature_names=[], - bounds=[], + # Without fidelity/task features, use SingleTaskGP. + for ds in [self.fixed_noise_datasets, self.supervised_datasets]: + self.assertEqual( + SingleTaskGP, + choose_model_class( + datasets=ds, + search_space_digest=SearchSpaceDigest( + feature_names=[], + bounds=[], + ), ), - ), - ) + ) def test_choose_botorch_acqf_class(self) -> None: self.assertEqual(qLogNoisyExpectedImprovement, choose_botorch_acqf_class()) diff --git a/ax/storage/botorch_modular_registry.py b/ax/storage/botorch_modular_registry.py index 6ed7325de20..1725acd8bb3 100644 --- a/ax/storage/botorch_modular_registry.py +++ b/ax/storage/botorch_modular_registry.py @@ -95,9 +95,12 @@ Mapping of BoTorch `Model` classes to class name strings. """ MODEL_REGISTRY: Dict[Type[Model], str] = { - FixedNoiseGP: "FixedNoiseGP", - FixedNoiseMultiFidelityGP: "FixedNoiseMultiFidelityGP", - FixedNoiseMultiTaskGP: "FixedNoiseMultiTaskGP", + # NOTE: Fixed noise models are deprecated. They point to their + # supported parent classes, so that we can reap them with minimal + # concern for backwards compatibility when the time comes. + FixedNoiseGP: "SingleTaskGP", + FixedNoiseMultiFidelityGP: "SingleTaskMultiFidelityGP", + FixedNoiseMultiTaskGP: "MultiTaskGP", MixedSingleTaskGP: "MixedSingleTaskGP", ModelListGP: "ModelListGP", MultiTaskGP: "MultiTaskGP",