From 2516fa06d837ed59e468c6628108f6306cefb14c Mon Sep 17 00:00:00 2001 From: Elizabeth Santorella Date: Tue, 17 Dec 2024 15:50:15 -0800 Subject: [PATCH] Introduce BenchmarkTimeVaryingMetric for non-map metrics that vary over time (#3184) Summary: **Context**: There are situations where a metric is not a MapMetric, but its value changes depending on time. **This diff**: Introduces `BenchmarkTimeVaryingMetric`, which is not a MapMetric, can consume data with multiple time steps, is available while running, and requires a backend simulator. Differential Revision: D67224949 --- ax/benchmark/benchmark_metric.py | 35 +++++++++---- ax/benchmark/tests/test_benchmark_metric.py | 58 +++++++++++++++------ 2 files changed, 68 insertions(+), 25 deletions(-) diff --git a/ax/benchmark/benchmark_metric.py b/ax/benchmark/benchmark_metric.py index 440ad43ea3a..e8f0e71aef6 100644 --- a/ax/benchmark/benchmark_metric.py +++ b/ax/benchmark/benchmark_metric.py @@ -8,11 +8,11 @@ """ Metric classes for Ax benchmarking. -There are two Metric classes: -- `BenchmarkMetric`: For when outputs should be `Data` (not `MapData`) and data +There are three Metric classes: +- `BenchmarkMapMetric`: A `MapMetric` (outputting `MapData`). +- `BenchmarkMetric`: A non-Map metric is not available while running. -- `BenchmarkMapMetric`: For when outputs should be `MapData` (not `Data`) and - data is available while running. +- `BenchmarkTimeVaryingMetric`: For when outputs """ from abc import abstractmethod @@ -133,8 +133,10 @@ def _df_to_result(self, df: DataFrame) -> MetricFetchResult: class BenchmarkMetric(BenchmarkMetricBase): """ - Metric for benchmarking that produces `Data` and is not available while - running. + Non-map Metric for benchmarking that is not available while running. + + It cannot process data with multiple time steps, as it would only return one + value -- the value it has at completion time -- regardless. """ def _df_to_result(self, df: DataFrame) -> MetricFetchResult: @@ -147,12 +149,27 @@ def _df_to_result(self, df: DataFrame) -> MetricFetchResult: return Ok(value=Data(df=df.drop(columns=["step"]))) -class BenchmarkMapMetric(MapMetric, BenchmarkMetricBase): +class BenchmarkTimeVaryingMetric(BenchmarkMetricBase): """ - Metric for benchmarking that produces `Data` and is available while - running. + Non-Map Metric for benchmarking that is available while running. + + It can produce different values at different times depending on when it is + called, using the `time` on a `BackendSimulator`. """ + @classmethod + def is_available_while_running(cls) -> bool: + return True + + def _df_to_result(self, df: DataFrame) -> MetricFetchResult: + return Ok( + value=Data(df=df[df["step"] == df["step"].max()].drop(columns=["step"])) + ) + + +class BenchmarkMapMetric(MapMetric, BenchmarkMetricBase): + """MapMetric for benchmarking. It is available while running.""" + # pyre-fixme: Inconsistent override [15]: `map_key_info` overrides attribute # defined in `MapMetric` inconsistently. Type `MapKeyInfo[int]` is not a # subtype of the overridden attribute `MapKeyInfo[float]` diff --git a/ax/benchmark/tests/test_benchmark_metric.py b/ax/benchmark/tests/test_benchmark_metric.py index d26c455f7a8..a2cda6b4d96 100644 --- a/ax/benchmark/tests/test_benchmark_metric.py +++ b/ax/benchmark/tests/test_benchmark_metric.py @@ -13,6 +13,8 @@ _get_no_metadata_msg, BenchmarkMapMetric, BenchmarkMetric, + BenchmarkMetricBase, + BenchmarkTimeVaryingMetric, ) from ax.benchmark.benchmark_trial_metadata import BenchmarkTrialMetadata from ax.core.arm import Arm @@ -113,6 +115,10 @@ def setUp(self) -> None: name: BenchmarkMetric(name=name, lower_is_better=True) for name in self.outcome_names } + self.tv_metrics = { + name: BenchmarkTimeVaryingMetric(name=name, lower_is_better=True) + for name in self.outcome_names + } self.map_metrics = { name: BenchmarkMapMetric(name=name, lower_is_better=True) for name in self.outcome_names @@ -125,9 +131,13 @@ def test_available_while_running(self) -> None: self.assertTrue(self.map_metrics["test_metric1"].is_available_while_running()) self.assertTrue(BenchmarkMapMetric.is_available_while_running()) + self.assertTrue(self.tv_metrics["test_metric1"].is_available_while_running()) + self.assertTrue(BenchmarkTimeVaryingMetric.is_available_while_running()) + def test_exceptions(self) -> None: for metric in [ self.metrics["test_metric1"], + self.tv_metrics["test_metric1"], self.map_metrics["test_metric1"], ]: with self.subTest( @@ -153,7 +163,7 @@ def test_exceptions(self) -> None: metric.fetch_trial_data(trial, foo="bar") def _test_fetch_trial_data_one_time_step( - self, batch: bool, metric: BenchmarkMetric | BenchmarkMapMetric + self, batch: bool, metric: BenchmarkMetricBase ) -> None: trial = get_test_trial(batch=batch, has_simulator=True) df1 = assert_is_instance(metric.fetch_trial_data(trial=trial).value, Data).df @@ -182,23 +192,30 @@ def _test_fetch_trial_multiple_time_steps_with_simulator(self, batch: bool) -> N Cases for fetching data with multiple time steps: - Metric is 'BenchmarkMetric' -> exception, tested below - Has simulator, metric is 'BenchmarkMapMetric' -> df grows with each step - - No simulator, metric is 'BenchmarkMapMetric' -> all data present while - running (this behavior may be undesirable) + - Has simulator, metric is 'BenchmarkTimeVaryingMetric' -> one step at + a time, evolving as we take steps + - No simulator, metric is 'BenchmarkTimeVaryingMetric' -> completes + immediately and returns last step + - No simulator, metric is 'BenchmarkMapMetric' -> completes immediately + and returns all steps """ metric_name = "test_metric1" - # Iterating over list of length 1 here because there will be more - # metrics in the next diff - for metric in [self.map_metrics[metric_name]]: - trial_with_simulator = get_test_trial( + for metric, has_simulator in product( + [self.map_metrics[metric_name], self.tv_metrics[metric_name]], [False, True] + ): + trial = get_test_trial( has_metadata=True, batch=batch, multiple_time_steps=True, - has_simulator=True, + has_simulator=has_simulator, ) - data = metric.fetch_trial_data(trial=trial_with_simulator).value + data = metric.fetch_trial_data(trial=trial).value df_or_map_df = data.map_df if isinstance(metric, MapMetric) else data.df - self.assertEqual(len(df_or_map_df), len(trial_with_simulator.arms)) + returns_full_data = (not has_simulator) and isinstance(metric, MapMetric) + self.assertEqual( + len(df_or_map_df), len(trial.arms) * (3 if returns_full_data else 1) + ) drop_cols = ["virtual runtime"] if not isinstance(metric, MapMetric): drop_cols += ["step"] @@ -206,14 +223,23 @@ def _test_fetch_trial_multiple_time_steps_with_simulator(self, batch: bool) -> N expected_df = _get_one_step_df( batch=batch, metric_name=metric_name, step=0 ).drop(columns=drop_cols) - self.assertEqual(df_or_map_df.to_dict(), expected_df.to_dict()) + if returns_full_data: + self.assertEqual( + df_or_map_df[df_or_map_df["step"] == 0].to_dict(), + expected_df.to_dict(), + ) + else: + self.assertEqual(df_or_map_df.to_dict(), expected_df.to_dict()) - backend_simulator = trial_with_simulator.run_metadata[ + backend_simulator = trial.run_metadata[ "benchmark_metadata" ].backend_simulator + self.assertEqual(backend_simulator is None, not has_simulator) + if backend_simulator is None: + continue self.assertEqual(backend_simulator.time, 0) sim_trial = none_throws( - backend_simulator.get_sim_trial_by_index(trial_with_simulator.index) + backend_simulator.get_sim_trial_by_index(trial.index) ) self.assertIn(sim_trial, backend_simulator._running) backend_simulator.update() @@ -222,13 +248,13 @@ def _test_fetch_trial_multiple_time_steps_with_simulator(self, batch: bool) -> N backend_simulator.update() self.assertIn(sim_trial, backend_simulator._completed) self.assertEqual(backend_simulator.time, 2) - data = metric.fetch_trial_data(trial=trial_with_simulator).value + data = metric.fetch_trial_data(trial=trial).value if isinstance(metric, MapMetric): map_df = data.map_df - self.assertEqual(len(map_df), 2 * len(trial_with_simulator.arms)) + self.assertEqual(len(map_df), 2 * len(trial.arms)) self.assertEqual(set(map_df["step"].tolist()), {0, 1}) df = data.df - self.assertEqual(len(df), len(trial_with_simulator.arms)) + self.assertEqual(len(df), len(trial.arms)) expected_df = _get_one_step_df( batch=batch, metric_name=metric_name, step=1 ).drop(columns=drop_cols)