Skip to content

Commit

Permalink
Merge pull request #16 from kanak8278/model_configurable
Browse files Browse the repository at this point in the history
chore: Set GPT_MODEL environment variable in mini_llm.py
  • Loading branch information
sameersegal authored Jul 26, 2024
2 parents afc3352 + 394a894 commit b0a1580
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 2 deletions.
4 changes: 4 additions & 0 deletions env.template
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
OPENAI_API_KEY=
FAST_MODEL=
# Internally we use SLOW_MODEL for the DSL generation
SLOW_MODEL=


#AZURE_OPENAI_API_KEY=
#AZURE_OPENAI_ENDPOINT=
Expand Down
6 changes: 4 additions & 2 deletions nl2dsl/utils/mini_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,10 @@
else:
client = OpenAI()

GPT_MODEL = "gpt-4-turbo"
SLOW_MODEL = os.getenv('SLOW_MODEL')

if SLOW_MODEL is None:
raise EnvironmentError("SLOW_MODEL environment variable is not set.")

@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(10))
def mini_llm(utterance, model="gpt-4-turbo", temperature=0.3):
Expand All @@ -33,7 +35,7 @@ def mini_llm(utterance, model="gpt-4-turbo", temperature=0.3):

@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(10))
def chat_completion_request(
messages, tools=None, tool_choice=None, model=GPT_MODEL, response_format=None
messages, tools=None, tool_choice=None, model=SLOW_MODEL, response_format=None
):
response = client.chat.completions.create(
model=model,
Expand Down

0 comments on commit b0a1580

Please sign in to comment.