Skip to content

Commit

Permalink
chronos repo name in deps, make style
Browse files Browse the repository at this point in the history
Signed-off-by: Giridhar Ganapavarapu <[email protected]>
  • Loading branch information
gganapavarapu committed Dec 11, 2024
1 parent 88067b3 commit ebe0f8e
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 36 deletions.
2 changes: 1 addition & 1 deletion services/inference/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ urllib3 = { version = ">=1.26.19,<2" } # see https://github.com/urllib3/urllib3/
aiohttp = { version = ">=3.10.11" }

# ***********Chronos*********
chronos = { git = "https://github.com/amazon-science/chronos-forecasting.git" }
chronos-forecasting = { git = "https://github.com/amazon-science/chronos-forecasting.git" }

[[tool.poetry.source]]
name = "pytorch"
Expand Down
46 changes: 11 additions & 35 deletions services/inference/tests/test_inference_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,19 +33,14 @@
MODEL_IDS = [
os.path.basename(dirpath)
for dirpath, _, _ in os.walk("./mytest-tsfm")
if ".git" not in dirpath
and "./mytest-tsfm" != dirpath
and "finetuned" not in dirpath
and "figures" not in dirpath
if ".git" not in dirpath and "./mytest-tsfm" != dirpath and "finetuned" not in dirpath and "figures" not in dirpath
]


def min_context_length(model_id):
model_path: Path = resolve_model_path(TSFM_MODEL_DIR, model_id)
assert model_path.exists(), f"{model_path} does not exist!"
handler, e = ForecastingServiceHandler.load(
model_id=model_id, model_path=model_path
)
handler, e = ForecastingServiceHandler.load(model_id=model_id, model_path=model_path)
return handler.handler_config.minimum_context_length


Expand Down Expand Up @@ -80,9 +75,7 @@ def forecasting_input_base(request: type[FixtureRequest]) -> ForecastingInferenc
schema: ForecastingMetadataInput = ForecastingMetadataInput(
timestamp_column="date", id_columns=["ID"], target_columns=["VAL"]
)
parameters: ForecastingParameters = ForecastingParameters(
prediction_length=FORECAST_LENGTH
)
parameters: ForecastingParameters = ForecastingParameters(prediction_length=FORECAST_LENGTH)
input: ForecastingInferenceInput = ForecastingInferenceInput(
model_id=request.param,
schema=schema,
Expand All @@ -108,16 +101,12 @@ def _basic_result_checks(results: PredictOutput, df: pd.DataFrame):
# expected start time
assert results["date"].iloc[0] - df["date"].iloc[-1] == timedelta(hours=1)
# expected end time
assert results["date"].iloc[-1] - df["date"].iloc[-1] == timedelta(
hours=FORECAST_LENGTH
)
assert results["date"].iloc[-1] - df["date"].iloc[-1] == timedelta(hours=FORECAST_LENGTH)


@pytest.mark.parametrize("forecasting_input_base", MODEL_IDS, indirect=True)
@pytest.mark.parametrize("ts_data_base", MODEL_IDS, indirect=True)
def test_forecast_with_good_data(
ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput
):
def test_forecast_with_good_data(ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput):
input = forecasting_input_base
data, model_id = ts_data_base
# since we're sometimes generating non-sensible combinations
Expand Down Expand Up @@ -183,18 +172,13 @@ def test_forecast_with_integer_timestamps(
po: PredictOutput = runtime.forecast(input=input)
results = pd.DataFrame.from_dict(po.results[0])
assert results[timestamp_column].iloc[0] == series_length + 1
assert (
results[timestamp_column].iloc[-1] - df[timestamp_column].iloc[-1]
== FORECAST_LENGTH
)
assert results[timestamp_column].iloc[-1] - df[timestamp_column].iloc[-1] == FORECAST_LENGTH
assert results.dtypes[timestamp_column] == df.dtypes[timestamp_column]


@pytest.mark.parametrize("forecasting_input_base", MODEL_IDS, indirect=True)
@pytest.mark.parametrize("ts_data_base", MODEL_IDS, indirect=True)
def test_forecast_with_bogus_timestamps(
ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput
):
def test_forecast_with_bogus_timestamps(ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput):
input: ForecastingInferenceInput = copy.deepcopy(forecasting_input_base)
data, model_id = ts_data_base
# since we're sometimes generating non-sensible combinations
Expand All @@ -214,9 +198,7 @@ def test_forecast_with_bogus_timestamps(

@pytest.mark.parametrize("forecasting_input_base", MODEL_IDS, indirect=True)
@pytest.mark.parametrize("ts_data_base", MODEL_IDS, indirect=True)
def test_forecast_with_bogus_values(
ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput
):
def test_forecast_with_bogus_values(ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput):
input: ForecastingInferenceInput = copy.deepcopy(forecasting_input_base)
data, model_id = ts_data_base
# since we're sometimes generating non-sensible combinations
Expand All @@ -235,9 +217,7 @@ def test_forecast_with_bogus_values(

@pytest.mark.parametrize("forecasting_input_base", MODEL_IDS, indirect=True)
@pytest.mark.parametrize("ts_data_base", MODEL_IDS, indirect=True)
def test_forecast_with_bogus_model_id(
ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput
):
def test_forecast_with_bogus_model_id(ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput):
input: ForecastingInferenceInput = copy.deepcopy(forecasting_input_base)
data, model_id = ts_data_base
# since we're sometimes generating non-sensible combinations
Expand Down Expand Up @@ -277,9 +257,7 @@ def test_forecast_with_insufficient_context_length(
@pytest.mark.skip
@pytest.mark.parametrize("forecasting_input_base", MODEL_IDS, indirect=True)
@pytest.mark.parametrize("ts_data_base", MODEL_IDS, indirect=True)
def test_forecast_with_nan_data(
ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput
):
def test_forecast_with_nan_data(ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput):
input: ForecastingInferenceInput = copy.deepcopy(forecasting_input_base)
data, model_id = ts_data_base
# since we're sometimes generating non-sensible combinations
Expand All @@ -299,9 +277,7 @@ def test_forecast_with_nan_data(
# @pytest.mark.skip
@pytest.mark.parametrize("forecasting_input_base", MODEL_IDS, indirect=True)
@pytest.mark.parametrize("ts_data_base", MODEL_IDS, indirect=True)
def test_forecast_with_missing_row(
ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput
):
def test_forecast_with_missing_row(ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput):
input: ForecastingInferenceInput = copy.deepcopy(forecasting_input_base)
data, model_id = ts_data_base
# since we're sometimes generating non-sensible combinations
Expand Down

0 comments on commit ebe0f8e

Please sign in to comment.