From b81af3ae57559b61dc2e85cbc7c53d01f944b071 Mon Sep 17 00:00:00 2001 From: Jose Javier <26491792+josejg@users.noreply.github.com> Date: Sun, 18 Aug 2024 16:52:36 -0700 Subject: [PATCH] Correct error message for inference wrapper (#1459) --- llmfoundry/models/inference_api_wrapper/openai_causal_lm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llmfoundry/models/inference_api_wrapper/openai_causal_lm.py b/llmfoundry/models/inference_api_wrapper/openai_causal_lm.py index 8253134bcd..4f1f2e6f04 100644 --- a/llmfoundry/models/inference_api_wrapper/openai_causal_lm.py +++ b/llmfoundry/models/inference_api_wrapper/openai_causal_lm.py @@ -102,7 +102,7 @@ def try_generate_completion(self, prompt: str, num_tokens: int): break except RateLimitError as e: if 'You exceeded your current quota' in str( - e._message, + e.message, ): # pyright: ignore raise e delay *= 2 * (1 + random.random())