Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adds Supports for OpenAI Reasoning Models #3530

Open
wants to merge 4 commits into
base: 0.2
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions autogen/oai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -374,6 +374,22 @@ def get_usage(response: Union[ChatCompletion, Completion]) -> Dict:
}


class OpenAI_O1(OpenAIClient):

def __init__(self, **kwargs):
super().__init__(OpenAI(**kwargs))

def create(self, params: Dict[str, Any]) -> ChatCompletion:
print(params["messages"])

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is the print statement required here ?

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

o1 models do not support streaming, tooling and func calling also, in this case should we restrict the usage here ?

# replace any message with the role "system" to role "assistant" to avoid errors
for message in params["messages"]:
if message["role"] == "system":
message["role"] = "assistant"
rysweet marked this conversation as resolved.
Show resolved Hide resolved

# pass the message to the create method of the parent class
return super().create(params)


class OpenAIWrapper:
"""A wrapper class for openai client."""

Expand Down Expand Up @@ -532,6 +548,9 @@ def _register_default_client(self, config: Dict[str, Any], openai_config: Dict[s
raise ImportError("Please install `anthropic` to use Anthropic API.")
client = AnthropicClient(**openai_config)
self._clients.append(client)
elif api_type is not None and api_type.startswith("openai-o1"):
client = OpenAI_O1(**openai_config)
self._clients.append(client)
elif api_type is not None and api_type.startswith("mistral"):
if mistral_import_exception:
raise ImportError("Please install `mistralai` to use the Mistral.AI API.")
Expand Down
6 changes: 6 additions & 0 deletions autogen/oai/openai_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,12 @@
"gpt-4o": (0.005, 0.015),
"gpt-4o-2024-05-13": (0.005, 0.015),
"gpt-4o-2024-08-06": (0.0025, 0.01),
# o1 models
"o1-preview": (0.015, 0.060),
"o1-preview-2024-09-12": (0.015, 0.060),
# o1-mini models
"o1-mini": (0.003, 0.012),
"o1-mini-2024-09-12": (0.003, 0.012),
# gpt-4-turbo
"gpt-4-turbo-2024-04-09": (0.01, 0.03),
# gpt-4
Expand Down
6 changes: 6 additions & 0 deletions autogen/token_count_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,12 @@ def get_max_token_limit(model: str = "gpt-3.5-turbo-0613") -> int:
"gpt-4o-2024-08-06": 128000,
"gpt-4o-mini": 128000,
"gpt-4o-mini-2024-07-18": 128000,
# o1 models
"o1-preview": 128000,
"o1-preview-2024-09-12": 128000,
# o1-mini models
"o1-mini": 128000,
"o1-mini-2024-09-12": 128000,
}
return max_token_limit[model]

Expand Down
8 changes: 8 additions & 0 deletions test/twoagent-o1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
from autogen import AssistantAgent, UserProxyAgent, config_list_from_json

config_list = [{"api_type": "openai-o1", "model": "o1-mini"}]
assistant = AssistantAgent("assistant", llm_config={"config_list": config_list})
user_proxy = UserProxyAgent(
"user_proxy", code_execution_config={"work_dir": "coding", "use_docker": False}
) # IMPORTANT: set to True to run code in docker, recommended
user_proxy.initiate_chat(assistant, message="Save a chart of NVDA and TESLA stock price change YTD.")
Loading