Skip to content

Commit

Permalink
Merge pull request #68 from AgentOps-AI/66-calculate_prompt_cost-with…
Browse files Browse the repository at this point in the history
…-gpt-4o-is-returning-a-warning-message-about-gpt-4

Fixed 4o models getting counted as GPT-4 models
  • Loading branch information
areibman authored Aug 5, 2024
2 parents 33a5a28 + deebe95 commit ebbdb94
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 0 deletions.
3 changes: 3 additions & 0 deletions tests/test_costs.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
("gpt-4-32k-0314", 15),
("gpt-4-1106-preview", 15),
("gpt-4-vision-preview", 15),
("gpt-4o", 15),
],
)
def test_count_message_tokens(model, expected_output):
Expand All @@ -69,6 +70,7 @@ def test_count_message_tokens(model, expected_output):
("gpt-4-32k-0314", 17),
("gpt-4-1106-preview", 17),
("gpt-4-vision-preview", 17),
("gpt-4o", 17),
],
)
def test_count_message_tokens_with_name(model, expected_output):
Expand Down Expand Up @@ -108,6 +110,7 @@ def test_count_message_tokens_invalid_model():
("gpt-4-1106-preview", 4),
("gpt-4-vision-preview", 4),
("text-embedding-ada-002", 4),
("gpt-4o", 4)
],
)
def test_count_string_tokens(model, expected_output):
Expand Down
8 changes: 8 additions & 0 deletions tokencost/costs.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@ def count_message_tokens(messages: List[Dict[str, str]], model: str) -> int:
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4o",
"gpt-4o-2024-05-13",
}:
tokens_per_message = 3
tokens_per_name = 1
Expand All @@ -63,6 +67,10 @@ def count_message_tokens(messages: List[Dict[str, str]], model: str) -> int:
"gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613."
)
return count_message_tokens(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4o" in model:
print(
"Warning: gpt-4o may update over time. Returning num tokens assuming gpt-4o-2024-05-13.")
return count_message_tokens(messages, model="gpt-4o-2024-05-13")
elif "gpt-4" in model:
logger.warning(
"gpt-4 may update over time. Returning num tokens assuming gpt-4-0613."
Expand Down

0 comments on commit ebbdb94

Please sign in to comment.