diff --git a/services/inference/pyproject.toml b/services/inference/pyproject.toml index fec4291..cd75f5a 100644 --- a/services/inference/pyproject.toml +++ b/services/inference/pyproject.toml @@ -49,7 +49,7 @@ urllib3 = { version = ">=1.26.19,<2" } # see https://github.com/urllib3/urllib3/ aiohttp = { version = ">=3.10.11" } # ***********Chronos********* -chronos = { git = "https://github.com/amazon-science/chronos-forecasting.git" } +chronos-forecasting = { git = "https://github.com/amazon-science/chronos-forecasting.git" } [[tool.poetry.source]] name = "pytorch" diff --git a/services/inference/tests/test_inference_lib.py b/services/inference/tests/test_inference_lib.py index 03b7602..763a875 100644 --- a/services/inference/tests/test_inference_lib.py +++ b/services/inference/tests/test_inference_lib.py @@ -33,19 +33,14 @@ MODEL_IDS = [ os.path.basename(dirpath) for dirpath, _, _ in os.walk("./mytest-tsfm") - if ".git" not in dirpath - and "./mytest-tsfm" != dirpath - and "finetuned" not in dirpath - and "figures" not in dirpath + if ".git" not in dirpath and "./mytest-tsfm" != dirpath and "finetuned" not in dirpath and "figures" not in dirpath ] def min_context_length(model_id): model_path: Path = resolve_model_path(TSFM_MODEL_DIR, model_id) assert model_path.exists(), f"{model_path} does not exist!" - handler, e = ForecastingServiceHandler.load( - model_id=model_id, model_path=model_path - ) + handler, e = ForecastingServiceHandler.load(model_id=model_id, model_path=model_path) return handler.handler_config.minimum_context_length @@ -80,9 +75,7 @@ def forecasting_input_base(request: type[FixtureRequest]) -> ForecastingInferenc schema: ForecastingMetadataInput = ForecastingMetadataInput( timestamp_column="date", id_columns=["ID"], target_columns=["VAL"] ) - parameters: ForecastingParameters = ForecastingParameters( - prediction_length=FORECAST_LENGTH - ) + parameters: ForecastingParameters = ForecastingParameters(prediction_length=FORECAST_LENGTH) input: ForecastingInferenceInput = ForecastingInferenceInput( model_id=request.param, schema=schema, @@ -108,16 +101,12 @@ def _basic_result_checks(results: PredictOutput, df: pd.DataFrame): # expected start time assert results["date"].iloc[0] - df["date"].iloc[-1] == timedelta(hours=1) # expected end time - assert results["date"].iloc[-1] - df["date"].iloc[-1] == timedelta( - hours=FORECAST_LENGTH - ) + assert results["date"].iloc[-1] - df["date"].iloc[-1] == timedelta(hours=FORECAST_LENGTH) @pytest.mark.parametrize("forecasting_input_base", MODEL_IDS, indirect=True) @pytest.mark.parametrize("ts_data_base", MODEL_IDS, indirect=True) -def test_forecast_with_good_data( - ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput -): +def test_forecast_with_good_data(ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput): input = forecasting_input_base data, model_id = ts_data_base # since we're sometimes generating non-sensible combinations @@ -183,18 +172,13 @@ def test_forecast_with_integer_timestamps( po: PredictOutput = runtime.forecast(input=input) results = pd.DataFrame.from_dict(po.results[0]) assert results[timestamp_column].iloc[0] == series_length + 1 - assert ( - results[timestamp_column].iloc[-1] - df[timestamp_column].iloc[-1] - == FORECAST_LENGTH - ) + assert results[timestamp_column].iloc[-1] - df[timestamp_column].iloc[-1] == FORECAST_LENGTH assert results.dtypes[timestamp_column] == df.dtypes[timestamp_column] @pytest.mark.parametrize("forecasting_input_base", MODEL_IDS, indirect=True) @pytest.mark.parametrize("ts_data_base", MODEL_IDS, indirect=True) -def test_forecast_with_bogus_timestamps( - ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput -): +def test_forecast_with_bogus_timestamps(ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput): input: ForecastingInferenceInput = copy.deepcopy(forecasting_input_base) data, model_id = ts_data_base # since we're sometimes generating non-sensible combinations @@ -214,9 +198,7 @@ def test_forecast_with_bogus_timestamps( @pytest.mark.parametrize("forecasting_input_base", MODEL_IDS, indirect=True) @pytest.mark.parametrize("ts_data_base", MODEL_IDS, indirect=True) -def test_forecast_with_bogus_values( - ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput -): +def test_forecast_with_bogus_values(ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput): input: ForecastingInferenceInput = copy.deepcopy(forecasting_input_base) data, model_id = ts_data_base # since we're sometimes generating non-sensible combinations @@ -235,9 +217,7 @@ def test_forecast_with_bogus_values( @pytest.mark.parametrize("forecasting_input_base", MODEL_IDS, indirect=True) @pytest.mark.parametrize("ts_data_base", MODEL_IDS, indirect=True) -def test_forecast_with_bogus_model_id( - ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput -): +def test_forecast_with_bogus_model_id(ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput): input: ForecastingInferenceInput = copy.deepcopy(forecasting_input_base) data, model_id = ts_data_base # since we're sometimes generating non-sensible combinations @@ -277,9 +257,7 @@ def test_forecast_with_insufficient_context_length( @pytest.mark.skip @pytest.mark.parametrize("forecasting_input_base", MODEL_IDS, indirect=True) @pytest.mark.parametrize("ts_data_base", MODEL_IDS, indirect=True) -def test_forecast_with_nan_data( - ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput -): +def test_forecast_with_nan_data(ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput): input: ForecastingInferenceInput = copy.deepcopy(forecasting_input_base) data, model_id = ts_data_base # since we're sometimes generating non-sensible combinations @@ -299,9 +277,7 @@ def test_forecast_with_nan_data( # @pytest.mark.skip @pytest.mark.parametrize("forecasting_input_base", MODEL_IDS, indirect=True) @pytest.mark.parametrize("ts_data_base", MODEL_IDS, indirect=True) -def test_forecast_with_missing_row( - ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput -): +def test_forecast_with_missing_row(ts_data_base: pd.DataFrame, forecasting_input_base: ForecastingInferenceInput): input: ForecastingInferenceInput = copy.deepcopy(forecasting_input_base) data, model_id = ts_data_base # since we're sometimes generating non-sensible combinations