diff --git a/src/marvin/bots/base.py b/src/marvin/bots/base.py index c81c6d96f..57f932ace 100644 --- a/src/marvin/bots/base.py +++ b/src/marvin/bots/base.py @@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Any, Callable import pendulum +from openai.error import InvalidRequestError from pydantic import Field, validator import marvin @@ -490,9 +491,20 @@ async def _call_llm(self, messages: list[Message]) -> str: if marvin.settings.verbose: messages_repr = "\n".join(repr(m) for m in langchain_messages) self.logger.debug(f"Sending messages to LLM: {messages_repr}") - result = await self.llm.agenerate( - messages=[langchain_messages], stop=["Plugin output:", "Plugin Output:"] - ) + try: + result = await self.llm.agenerate( + messages=[langchain_messages], stop=["Plugin output:", "Plugin Output:"] + ) + except InvalidRequestError as exc: + if "does not exist" in str(exc): + raise ValueError( + "Please check your `openai_model_name` and that your OpenAI account" + " has access to this model. You can select an OpenAI model by" + " setting the `MARVIN_OPENAI_MODEL_NAME` env var." + " Read more about settings in the docs: https://www.askmarvin.ai/guide/introduction/configuration/#settings" # noqa: E501 + ) + raise exc + return result.generations[0][0].text async def interactive_chat(self, first_message: str = None):