Skip to content

Commit

Permalink
better error messages
Browse files Browse the repository at this point in the history
  • Loading branch information
aspfohl committed Jan 23, 2024
1 parent 4c49b09 commit eee02d9
Showing 1 changed file with 9 additions and 8 deletions.
17 changes: 9 additions & 8 deletions llmfoundry/models/inference_api_wrapper/fmapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,27 +23,27 @@
def block_until_ready(base_url: str):
"""Block until the endpoint is ready."""
sleep_s = 5
remaining_s = 5 * 50 # At max, wait 5 minutes
timout_s = 5 * 60 # At max, wait 5 minutes

ping_url = f'{base_url}/ping'

waited_s = 0
while True:
try:
requests.get(ping_url)
log.info(f'Endpoint {ping_url} is ready')
break
except requests.exceptions.ConnectionError:
log.debug(
f'Endpoint {ping_url} not ready yet. Sleeping {sleep_s} seconds'
)
time.sleep(sleep_s)
remaining_s -= sleep_s
else:
log.info(f'Endpoint {ping_url} is ready')
break
waited_s += sleep_s

if remaining_s <= 0:
if waited_s >= timout_s:
raise TimeoutError(
f'Endpoint {ping_url} never became ready, exiting')
f'Endpoint {ping_url} did not become read after {waited_s:,} seconds, exiting'
)


class FMAPIEvalInterface(OpenAIEvalInterface):
Expand All @@ -58,7 +58,8 @@ def __init__(self, model_cfg: Dict, tokenizer: AutoTokenizer):

if 'base_url' not in model_cfg:
raise ValueError(
'Must specify base_url in model_cfg for FMAPIsEvalWrapper')
'Must specify base_url or use local=True in model_cfg for FMAPIsEvalWrapper'
)

super().__init__(model_cfg, tokenizer)

Expand Down

0 comments on commit eee02d9

Please sign in to comment.