Skip to content

Commit

Permalink
Add target task values to SearchSpaceDigest (#1894)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #1894

Earlier in this stack we add "target_value" as a required attribute for task parameters, like it has been for fidelity parameters. The first place in the stack where "target_value" is considered to be exclusively for fidelity parameters is in SearchSpaceDigest, so this adjusts the logic there to also track target values for task parameters. And renames the attribute, to make it more clear that it is not restricted to fidelity parameters.

Reviewed By: saitcakmak

Differential Revision: D49701286

fbshipit-source-id: c3c1dce59e41a27991a0fe858aad3033093eda37
  • Loading branch information
bletham authored and facebook-github-bot committed Oct 6, 2023
1 parent 232a5a8 commit dd10bee
Show file tree
Hide file tree
Showing 20 changed files with 80 additions and 44 deletions.
7 changes: 3 additions & 4 deletions ax/core/search_space.py
Original file line number Diff line number Diff line change
Expand Up @@ -988,9 +988,8 @@ class SearchSpaceDigest:
task parameters.
fidelity_features: A list of parameter indices to be considered as
fidelity parameters.
target_fidelities: A dictionary mapping parameter indices (of fidelity
parameters) to their respective target fidelity value. Only used
when generating candidates.
target_values: A dictionary mapping parameter indices of fidelity or
task parameters to their respective target value.
robust_digest: An optional `RobustSearchSpaceDigest` that carries the
additional attributes if using a `RobustSearchSpace`.
"""
Expand All @@ -1002,7 +1001,7 @@ class SearchSpaceDigest:
discrete_choices: Dict[int, List[Union[int, float]]] = field(default_factory=dict)
task_features: List[int] = field(default_factory=list)
fidelity_features: List[int] = field(default_factory=list)
target_fidelities: Dict[int, Union[int, float]] = field(default_factory=dict)
target_values: Dict[int, Union[int, float]] = field(default_factory=dict)
robust_digest: Optional[RobustSearchSpaceDigest] = None


Expand Down
2 changes: 1 addition & 1 deletion ax/core/tests/test_search_space.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ def setUp(self) -> None:
"discrete_choices": {1: [0, 1, 2], 2: [0, 0.25, 4.0]},
"task_features": [3],
"fidelity_features": [0],
"target_fidelities": {0: 1.0},
"target_values": {0: 1.0},
"robust_digest": None,
}

Expand Down
12 changes: 6 additions & 6 deletions ax/modelbridge/modelbridge_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,7 @@ def extract_search_space_digest(
* Otherwise, its index is added to categorical_features.
* In all cases, the choices are added to discrete_choices.
* The minimum and maximum value are added to the bounds.
* The target_value is added to target_values.
For RangeParameters:
* They're assumed not to be in the log_scale. The Log transform handles this.
Expand All @@ -204,7 +205,7 @@ def extract_search_space_digest(
If a parameter is_fidelity:
* Its target_value is assumed to be numerical.
* The target_value is added to target_fidelities.
* The target_value is added to target_values.
* Its index is added to fidelity_features.
"""
bounds: List[Tuple[Union[int, float], Union[int, float]]] = []
Expand All @@ -213,13 +214,14 @@ def extract_search_space_digest(
discrete_choices: Dict[int, List[Union[int, float]]] = {}
task_features: List[int] = []
fidelity_features: List[int] = []
target_fidelities: Dict[int, Union[int, float]] = {}
target_values: Dict[int, Union[int, float]] = {}

for i, p_name in enumerate(param_names):
p = search_space.parameters[p_name]
if isinstance(p, ChoiceParameter):
if p.is_task:
task_features.append(i)
target_values[i] = checked_cast_to_tuple((int, float), p.target_value)
elif p.is_ordered:
ordinal_features.append(i)
else:
Expand All @@ -239,10 +241,8 @@ def extract_search_space_digest(
else:
raise ValueError(f"Unknown parameter type {type(p)}")
if p.is_fidelity:
if not isinstance(not_none(p.target_value), (int, float)):
raise NotImplementedError("Only numerical target values are supported.")
target_fidelities[i] = checked_cast_to_tuple((int, float), p.target_value)
fidelity_features.append(i)
target_values[i] = checked_cast_to_tuple((int, float), p.target_value)

return SearchSpaceDigest(
feature_names=param_names,
Expand All @@ -252,7 +252,7 @@ def extract_search_space_digest(
discrete_choices=discrete_choices,
task_features=task_features,
fidelity_features=fidelity_features,
target_fidelities=target_fidelities,
target_values=target_values,
robust_digest=extract_robust_digest(
search_space=search_space, param_names=param_names
),
Expand Down
2 changes: 1 addition & 1 deletion ax/modelbridge/tests/test_torch_modelbridge.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ def test_TorchModelBridge(self, mock_init, dtype=None, device=None) -> None:
self.assertEqual(gen_opt_config.model_gen_options, {"option": "yes"})
self.assertIs(gen_opt_config.rounding_func, torch.round)
self.assertFalse(gen_opt_config.is_moo)
self.assertEqual(gen_args["search_space_digest"].target_fidelities, {})
self.assertEqual(gen_args["search_space_digest"].target_values, {})
self.assertEqual(len(gen_run.arms), 1)
self.assertEqual(gen_run.arms[0].parameters, {"x1": 1.0, "x2": 2.0, "x3": 3.0})
self.assertEqual(gen_run.weights, [1.0])
Expand Down
4 changes: 2 additions & 2 deletions ax/models/tests/test_botorch_kg.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def test_KnowledgeGradient_multifidelity(self) -> None:
feature_names=self.feature_names,
bounds=self.bounds,
fidelity_features=[2],
target_fidelities={2: 5.0},
target_values={2: 5.0},
)
model = KnowledgeGradient()
model.fit(
Expand Down Expand Up @@ -234,7 +234,7 @@ def test_KnowledgeGradient_multifidelity(self) -> None:
model.best_point(
search_space_digest=dataclasses.replace(
search_space_digest,
target_fidelities={},
target_values={},
),
torch_opt_config=torch_opt_config,
)
Expand Down
11 changes: 7 additions & 4 deletions ax/models/tests/test_botorch_mes.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,9 @@ def test_MaxValueEntropySearch(self) -> None:
with self.assertRaises(RuntimeError):
model.best_point(
search_space_digest=dataclasses.replace(
self.search_space_digest, target_fidelities={2: 1.0}
self.search_space_digest,
fidelity_features=[2],
target_values={2: 1.0},
),
torch_opt_config=torch_opt_config,
)
Expand Down Expand Up @@ -175,7 +177,8 @@ def test_MaxValueEntropySearch_MultiFidelity(self) -> None:
xbest = model.best_point(
search_space_digest=dataclasses.replace(
search_space_digest,
target_fidelities={2: 5.0},
fidelity_features=[2],
target_values={2: 5.0},
),
torch_opt_config=torch_opt_config,
)
Expand All @@ -196,7 +199,7 @@ def test_MaxValueEntropySearch_MultiFidelity(self) -> None:
model.best_point(
search_space_digest=dataclasses.replace(
search_space_digest,
target_fidelities={2: 1.0},
target_values={2: 1.0},
),
torch_opt_config=dataclasses.replace(
torch_opt_config,
Expand All @@ -210,7 +213,7 @@ def test_MaxValueEntropySearch_MultiFidelity(self) -> None:
n=n,
search_space_digest=dataclasses.replace(
search_space_digest,
target_fidelities={2: 1.0},
target_values={2: 1.0},
),
torch_opt_config=dataclasses.replace(
torch_opt_config,
Expand Down
6 changes: 4 additions & 2 deletions ax/models/tests/test_botorch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -468,7 +468,8 @@ def test_BotorchModel(
n=n,
search_space_digest=dataclasses.replace(
search_space_digest,
target_fidelities={0: 3.0},
fidelity_features=[0],
target_values={0: 3.0},
),
torch_opt_config=torch_opt_config,
)
Expand Down Expand Up @@ -500,7 +501,8 @@ def test_BotorchModel(
xbest = model.best_point(
search_space_digest=dataclasses.replace(
search_space_digest,
target_fidelities={0: 3.0},
fidelity_features=[0],
target_values={0: 3.0},
),
torch_opt_config=torch_opt_config,
)
Expand Down
11 changes: 8 additions & 3 deletions ax/models/torch/botorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,9 +330,9 @@ def gen(
acf_options = options.get(Keys.ACQF_KWARGS, {})
optimizer_options = options.get(Keys.OPTIMIZER_KWARGS, {})

if search_space_digest.target_fidelities:
if search_space_digest.fidelity_features:
raise NotImplementedError(
"target_fidelities not implemented for base BotorchModel"
"Base BotorchModel does not support fidelity_features."
)
X_pending, X_observed = _get_X_pending_and_observed(
Xs=self.Xs,
Expand Down Expand Up @@ -437,6 +437,11 @@ def best_point(
raise NotImplementedError(
"Best observed point is incompatible with MOO problems."
)
target_fidelities = {
k: v
for k, v in search_space_digest.target_values.items()
if k in search_space_digest.fidelity_features
}
return self.best_point_recommender( # pyre-ignore [28]
model=self,
bounds=search_space_digest.bounds,
Expand All @@ -445,7 +450,7 @@ def best_point(
linear_constraints=torch_opt_config.linear_constraints,
fixed_features=torch_opt_config.fixed_features,
model_gen_options=torch_opt_config.model_gen_options,
target_fidelities=search_space_digest.target_fidelities,
target_fidelities=target_fidelities,
)

@copy_doc(TorchModel.cross_validate)
Expand Down
14 changes: 12 additions & 2 deletions ax/models/torch/botorch_kg.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,11 @@ def gen(
)
bounds_ = bounds_.transpose(0, 1)

target_fidelities = {
k: v
for k, v in search_space_digest.target_values.items()
if k in search_space_digest.fidelity_features
}
# get acquisition function
acq_function = _instantiate_KG(
model=model,
Expand All @@ -176,7 +181,7 @@ def gen(
seed_inner=seed_inner,
seed_outer=acf_options.get("seed_outer", None),
X_pending=X_pending,
target_fidelities=search_space_digest.target_fidelities,
target_fidelities=target_fidelities,
fidelity_weights=options.get("fidelity_weights"),
current_value=current_value,
cost_intercept=self.cost_intercept,
Expand Down Expand Up @@ -238,6 +243,11 @@ def _get_current_value(
acquisition function' (typically `PosteriorMean` or `qSimpleRegret`), not of
the Knowledge Gradient acquisition function.
"""
target_fidelities = {
k: v
for k, v in search_space_digest.target_values.items()
if k in search_space_digest.fidelity_features
}
best_point_acqf, non_fixed_idcs = get_out_of_sample_best_point_acqf(
model=model,
Xs=self.Xs,
Expand All @@ -247,7 +257,7 @@ def _get_current_value(
seed_inner=seed_inner,
fixed_features=torch_opt_config.fixed_features,
fidelity_features=self.fidelity_features,
target_fidelities=search_space_digest.target_fidelities,
target_fidelities=target_fidelities,
qmc=qmc,
)

Expand Down
7 changes: 6 additions & 1 deletion ax/models/torch/botorch_mes.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,11 @@ def gen(
candidate_set = torch.rand(candidate_size, bounds_.size(1))
candidate_set = bounds_[0] + (bounds_[1] - bounds_[0]) * candidate_set

target_fidelities = {
k: v
for k, v in search_space_digest.target_values.items()
if k in search_space_digest.fidelity_features
}
acq_function = _instantiate_MES(
model=model,
candidate_set=candidate_set,
Expand All @@ -139,7 +144,7 @@ def gen(
num_y_samples=num_y_samples,
X_pending=X_pending,
maximize=True if objective_weights[0] == 1 else False,
target_fidelities=search_space_digest.target_fidelities,
target_fidelities=target_fidelities,
fidelity_weights=options.get("fidelity_weights"),
cost_intercept=self.cost_intercept,
)
Expand Down
8 changes: 6 additions & 2 deletions ax/models/torch/botorch_modular/acquisition.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,15 +273,19 @@ def __init__(
if len(self.surrogates) > 1
else {"model": model}
)

target_fidelities = {
k: v
for k, v in search_space_digest.target_values.items()
if k in search_space_digest.fidelity_features
}
input_constructor_kwargs = {
"X_baseline": unique_Xs_observed,
"X_pending": unique_Xs_pending,
"objective_thresholds": objective_thresholds,
"constraints": get_outcome_constraint_transforms(
outcome_constraints=outcome_constraints
),
"target_fidelities": search_space_digest.target_fidelities,
"target_fidelities": target_fidelities,
"bounds": search_space_digest.bounds,
**acqf_model_kwarg,
**model_deps,
Expand Down
4 changes: 2 additions & 2 deletions ax/models/torch/botorch_modular/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -443,11 +443,11 @@ def gen(
acqf_options=self.acquisition_options,
model_gen_options=torch_opt_config.model_gen_options,
)
# update bounds / target fidelities
# update bounds / target values
search_space_digest = dataclasses.replace(
self.search_space_digest,
bounds=search_space_digest.bounds,
target_fidelities=search_space_digest.target_fidelities or {},
target_values=search_space_digest.target_values or {},
)

acqf = self._instantiate_acquisition(
Expand Down
6 changes: 5 additions & 1 deletion ax/models/torch/botorch_modular/multi_fidelity.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,11 @@ def compute_model_dependencies(
raise UnsupportedError(
f"{self.__class__.__name__} does not support risk measures."
)
target_fidelities = search_space_digest.target_fidelities
target_fidelities = {
k: v
for k, v in search_space_digest.target_values.items()
if k in search_space_digest.fidelity_features
}
if not target_fidelities:
raise ValueError(
"Target fidelities are required for {self.__class__.__name__}."
Expand Down
4 changes: 2 additions & 2 deletions ax/models/torch/botorch_moo.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,9 +246,9 @@ def gen(
acf_options = options.get("acquisition_function_kwargs", {})
optimizer_options = options.get("optimizer_kwargs", {})

if search_space_digest.target_fidelities: # untested
if search_space_digest.fidelity_features: # untested
raise NotImplementedError(
"target_fidelities not implemented for base BotorchModel"
"fidelity_features not implemented for base BotorchModel"
)
if (
torch_opt_config.objective_thresholds is not None
Expand Down
4 changes: 2 additions & 2 deletions ax/models/torch/tests/test_acquisition.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def setUp(self) -> None:
self.search_space_digest = SearchSpaceDigest(
feature_names=self.feature_names,
bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
target_fidelities={2: 1.0},
target_values={2: 1.0},
)
self.surrogate.construct(
datasets=self.training_data,
Expand All @@ -126,7 +126,7 @@ def setUp(self) -> None:
search_space_digest=SearchSpaceDigest(
feature_names=self.search_space_digest.feature_names[:1],
bounds=self.search_space_digest.bounds,
target_fidelities=self.search_space_digest.target_fidelities,
target_values=self.search_space_digest.target_values,
),
)

Expand Down
2 changes: 1 addition & 1 deletion ax/models/torch/tests/test_input_transform_argparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def setUp(self) -> None:
discrete_choices={1: [0, 1, 2], 2: [0, 0.25, 4.0]},
task_features=[3],
fidelity_features=[0],
target_fidelities={0: 1.0},
target_values={0: 1.0},
robust_digest=None,
)

Expand Down
2 changes: 1 addition & 1 deletion ax/models/torch/tests/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def setUp(self) -> None:
bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
task_features=[],
fidelity_features=[2],
target_fidelities={1: 1.0},
target_values={1: 1.0},
)
self.metric_names = ["y"]
self.metric_names_for_list_surrogate = ["y1", "y2"]
Expand Down
Loading

0 comments on commit dd10bee

Please sign in to comment.