diff --git a/config-template.py b/config-template.py index 42e355a2..7b666863 100644 --- a/config-template.py +++ b/config-template.py @@ -248,7 +248,7 @@ trace_function_calls = False # 群内回复消息时是否引用原消息 -quote_origin = True +quote_origin = False # 群内回复消息时是否at发送者 at_sender = False diff --git a/main.py b/main.py index a5d3114b..4f6f4a19 100644 --- a/main.py +++ b/main.py @@ -191,13 +191,16 @@ def start(first_time_init=False): # 配置OpenAI proxy import openai - openai.proxy = None # 先重置,因为重载后可能需要清除proxy + openai.proxies = None # 先重置,因为重载后可能需要清除proxy if "http_proxy" in config.openai_config and config.openai_config["http_proxy"] is not None: - openai.proxy = config.openai_config["http_proxy"] + openai.proxies = { + "http": config.openai_config["http_proxy"], + "https": config.openai_config["http_proxy"] + } # 配置openai api_base if "reverse_proxy" in config.openai_config and config.openai_config["reverse_proxy"] is not None: - openai.api_base = config.openai_config["reverse_proxy"] + openai.base_url = config.openai_config["reverse_proxy"] # 主启动流程 database = pkg.database.manager.DatabaseManager() diff --git a/override-all.json b/override-all.json index 96361fb9..cde08f7c 100644 --- a/override-all.json +++ b/override-all.json @@ -63,7 +63,7 @@ "size": "256x256" }, "trace_function_calls": false, - "quote_origin": true, + "quote_origin": false, "at_sender": false, "include_image_description": true, "process_message_timeout": 120, diff --git a/pkg/openai/api/chat_completion.py b/pkg/openai/api/chat_completion.py index 032e14bc..a8e5f175 100644 --- a/pkg/openai/api/chat_completion.py +++ b/pkg/openai/api/chat_completion.py @@ -1,4 +1,5 @@ import openai +from openai.types.chat import chat_completion_message import json import logging @@ -13,13 +14,14 @@ class ChatCompletionRequest(RequestBase): 此类保证每一次返回的角色为assistant的信息的finish_reason一定为stop。 若有函数调用响应,本类的返回瀑布是:函数调用请求->函数调用结果->...->assistant的信息->stop。 """ + model: str messages: list[dict[str, str]] kwargs: dict stopped: bool = False - pending_func_call: dict = None + pending_func_call: chat_completion_message.FunctionCall = None pending_msg: str @@ -46,16 +48,18 @@ def append_message(self, role: str, content: str, name: str=None, function_call: def __init__( self, + client: openai.Client, model: str, messages: list[dict[str, str]], **kwargs ): + self.client = client self.model = model self.messages = messages.copy() self.kwargs = kwargs - self.req_func = openai.ChatCompletion.acreate + self.req_func = self.client.chat.completions.create self.pending_func_call = None @@ -84,39 +88,48 @@ def __next__(self) -> dict: # 拼接kwargs args = {**args, **self.kwargs} + + from openai.types.chat import chat_completion - resp = self._req(**args) + resp: chat_completion.ChatCompletion = self._req(**args) - choice0 = resp["choices"][0] + choice0 = resp.choices[0] # 如果不是函数调用,且finish_reason为stop,则停止迭代 - if choice0['finish_reason'] == 'stop': # and choice0["finish_reason"] == "stop" + if choice0.finish_reason == 'stop': # and choice0["finish_reason"] == "stop" self.stopped = True - if 'function_call' in choice0['message']: - self.pending_func_call = choice0['message']['function_call'] + if hasattr(choice0.message, 'function_call') and choice0.message.function_call is not None: + self.pending_func_call = choice0.message.function_call self.append_message( role="assistant", - content=choice0['message']['content'], - function_call=choice0['message']['function_call'] + content=choice0.message.content, + function_call=choice0.message.function_call ) return { - "id": resp["id"], + "id": resp.id, "choices": [ { - "index": choice0["index"], + "index": choice0.index, "message": { "role": "assistant", "type": "function_call", - "content": choice0['message']['content'], - "function_call": choice0['message']['function_call'] + "content": choice0.message.content, + "function_call": { + "name": choice0.message.function_call.name, + "arguments": choice0.message.function_call.arguments + } }, "finish_reason": "function_call" } ], - "usage": resp["usage"] + "usage": { + "prompt_tokens": resp.usage.prompt_tokens, + "completion_tokens": resp.usage.completion_tokens, + "total_tokens": resp.usage.total_tokens + } } else: @@ -124,19 +137,23 @@ def __next__(self) -> dict: # 普通回复一定处于最后方,故不用再追加进内部messages return { - "id": resp["id"], + "id": resp.id, "choices": [ { - "index": choice0["index"], + "index": choice0.index, "message": { "role": "assistant", "type": "text", - "content": choice0['message']['content'] + "content": choice0.message.content }, - "finish_reason": choice0["finish_reason"] + "finish_reason": choice0.finish_reason } ], - "usage": resp["usage"] + "usage": { + "prompt_tokens": resp.usage.prompt_tokens, + "completion_tokens": resp.usage.completion_tokens, + "total_tokens": resp.usage.total_tokens + } } else: # 处理函数调用请求 @@ -144,20 +161,20 @@ def __next__(self) -> dict: self.pending_func_call = None - func_name = cp_pending_func_call['name'] + func_name = cp_pending_func_call.name arguments = {} try: try: - arguments = json.loads(cp_pending_func_call['arguments']) + arguments = json.loads(cp_pending_func_call.arguments) # 若不是json格式的异常处理 except json.decoder.JSONDecodeError: # 获取函数的参数列表 func_schema = get_func_schema(func_name) arguments = { - func_schema['parameters']['required'][0]: cp_pending_func_call['arguments'] + func_schema['parameters']['required'][0]: cp_pending_func_call.arguments } logging.info("执行函数调用: name={}, arguments={}".format(func_name, arguments)) diff --git a/pkg/openai/api/completion.py b/pkg/openai/api/completion.py index ee0d34e9..2c74de36 100644 --- a/pkg/openai/api/completion.py +++ b/pkg/openai/api/completion.py @@ -1,4 +1,5 @@ import openai +from openai.types import completion, completion_choice from .model import RequestBase @@ -17,10 +18,12 @@ class CompletionRequest(RequestBase): def __init__( self, + client: openai.Client, model: str, messages: list[dict[str, str]], **kwargs ): + self.client = client self.model = model self.prompt = "" @@ -31,7 +34,7 @@ def __init__( self.kwargs = kwargs - self.req_func = openai.Completion.acreate + self.req_func = self.client.completions.create def __iter__(self): return self @@ -63,49 +66,35 @@ def __next__(self) -> dict: if self.stopped: raise StopIteration() - resp = self._req( + resp: completion.Completion = self._req( model=self.model, prompt=self.prompt, **self.kwargs ) - if resp["choices"][0]["finish_reason"] == "stop": + if resp.choices[0].finish_reason == "stop": self.stopped = True - choice0 = resp["choices"][0] + choice0: completion_choice.CompletionChoice = resp.choices[0] - self.prompt += choice0["text"] + self.prompt += choice0.text return { - "id": resp["id"], + "id": resp.id, "choices": [ { - "index": choice0["index"], + "index": choice0.index, "message": { "role": "assistant", "type": "text", - "content": choice0["text"] + "content": choice0.text }, - "finish_reason": choice0["finish_reason"] + "finish_reason": choice0.finish_reason } ], - "usage": resp["usage"] - } - -if __name__ == "__main__": - import os - - openai.api_key = os.environ["OPENAI_API_KEY"] - - for resp in CompletionRequest( - model="text-davinci-003", - messages=[ - { - "role": "user", - "content": "Hello, who are you?" + "usage": { + "prompt_tokens": resp.usage.prompt_tokens, + "completion_tokens": resp.usage.completion_tokens, + "total_tokens": resp.usage.total_tokens } - ] - ): - print(resp) - if resp["choices"][0]["finish_reason"] == "stop": - break + } diff --git a/pkg/openai/api/model.py b/pkg/openai/api/model.py index 58f3e3ff..3a574cb3 100644 --- a/pkg/openai/api/model.py +++ b/pkg/openai/api/model.py @@ -8,6 +8,8 @@ class RequestBase: + client: openai.Client + req_func: callable def __init__(self, *args, **kwargs): @@ -17,41 +19,17 @@ def _next_key(self): import pkg.utils.context as context switched, name = context.get_openai_manager().key_mgr.auto_switch() logging.debug("切换api-key: switched={}, name={}".format(switched, name)) - openai.api_key = context.get_openai_manager().key_mgr.get_using_key() + self.client.api_key = context.get_openai_manager().key_mgr.get_using_key() def _req(self, **kwargs): """处理代理问题""" import config - ret: dict = {} - exception: Exception = None - - async def awrapper(**kwargs): - nonlocal ret, exception - - try: - ret = await self.req_func(**kwargs) - logging.debug("接口请求返回:%s", str(ret)) - - if config.switch_strategy == 'active': - self._next_key() - - return ret - except Exception as e: - exception = e - - loop = asyncio.new_event_loop() - - thr = threading.Thread( - target=loop.run_until_complete, - args=(awrapper(**kwargs),) - ) - - thr.start() - thr.join() + ret = self.req_func(**kwargs) + logging.debug("接口请求返回:%s", str(ret)) - if exception is not None: - raise exception + if config.switch_strategy == 'active': + self._next_key() return ret diff --git a/pkg/openai/manager.py b/pkg/openai/manager.py index 8281badf..b99371f6 100644 --- a/pkg/openai/manager.py +++ b/pkg/openai/manager.py @@ -24,6 +24,8 @@ class OpenAIInteract: "size": "256x256", } + client: openai.Client = None + def __init__(self, api_key: str): self.key_mgr = pkg.openai.keymgr.KeysManager(api_key) @@ -31,7 +33,9 @@ def __init__(self, api_key: str): # logging.info("文字总使用量:%d", self.audit_mgr.get_total_text_length()) - openai.api_key = self.key_mgr.get_using_key() + self.client = openai.Client( + api_key=self.key_mgr.get_using_key() + ) pkg.utils.context.set_openai_manager(self) @@ -48,7 +52,7 @@ def request_completion(self, messages: list): cp_parmas = config.completion_api_params.copy() del cp_parmas['model'] - request = select_request_cls(model, messages, cp_parmas) + request = select_request_cls(self.client, model, messages, cp_parmas) # 请求接口 for resp in request: diff --git a/pkg/openai/modelmgr.py b/pkg/openai/modelmgr.py index abeef87a..ead97647 100644 --- a/pkg/openai/modelmgr.py +++ b/pkg/openai/modelmgr.py @@ -5,9 +5,8 @@ Completion - text-davinci-003 等模型 此模块封装此两个接口的请求实现,为上层提供统一的调用方式 """ -import openai, logging, threading, asyncio -import openai.error as aiE import tiktoken +import openai from pkg.openai.api.model import RequestBase from pkg.openai.api.completion import CompletionRequest @@ -53,11 +52,11 @@ } -def select_request_cls(model_name: str, messages: list, args: dict) -> RequestBase: +def select_request_cls(client: openai.Client, model_name: str, messages: list, args: dict) -> RequestBase: if model_name in CHAT_COMPLETION_MODELS: - return ChatCompletionRequest(model_name, messages, **args) + return ChatCompletionRequest(client, model_name, messages, **args) elif model_name in COMPLETION_MODELS: - return CompletionRequest(model_name, messages, **args) + return CompletionRequest(client, model_name, messages, **args) raise ValueError("不支持模型[{}],请检查配置文件".format(model_name)) diff --git a/pkg/openai/session.py b/pkg/openai/session.py index 6bf13dca..6277f065 100644 --- a/pkg/openai/session.py +++ b/pkg/openai/session.py @@ -278,7 +278,7 @@ def query(self, text: str=None) -> tuple[str, str, list[str]]: if resp['choices'][0]['message']['role'] == "assistant" and resp['choices'][0]['message']['content'] != None: # 包含纯文本响应 if not trace_func_calls: - res_text += resp['choices'][0]['message']['content'] + "\n" + res_text += resp['choices'][0]['message']['content'] else: res_text = resp['choices'][0]['message']['content'] pending_res_text = resp['choices'][0]['message']['content'] diff --git a/pkg/qqbot/message.py b/pkg/qqbot/message.py index 8fe168ae..131805f2 100644 --- a/pkg/qqbot/message.py +++ b/pkg/qqbot/message.py @@ -65,14 +65,14 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str, if not event.is_prevented_default(): reply = [prefix + text] - except openai.error.APIConnectionError as e: + except openai.APIConnectionError as e: err_msg = str(e) if err_msg.__contains__('Error communicating with OpenAI'): reply = handle_exception("{}会话调用API失败:{}\n您的网络无法访问OpenAI接口或网络代理不正常".format(session_name, e), "[bot]err:调用API失败,请重试或联系管理员,或等待修复") else: reply = handle_exception("{}会话调用API失败:{}".format(session_name, e), "[bot]err:调用API失败,请重试或联系管理员,或等待修复") - except openai.error.RateLimitError as e: + except openai.RateLimitError as e: logging.debug(type(e)) logging.debug(e.error['message']) @@ -116,14 +116,14 @@ def process_normal_message(text_message: str, mgr, config, launcher_type: str, else: reply = handle_exception("{}会话调用API失败:{}".format(session_name, e), "[bot]err:RateLimitError,请重试或联系作者,或等待修复") - except openai.error.InvalidRequestError as e: + except openai.BadRequestError as e: if config.auto_reset and "This model's maximum context length is" in str(e): session.reset(persist=True) reply = [tips_custom.session_auto_reset_message] else: reply = handle_exception("{}API调用参数错误:{}\n".format( session_name, e), "[bot]err:API调用参数错误,请联系管理员,或等待修复") - except openai.error.ServiceUnavailableError as e: + except openai.APIStatusError as e: reply = handle_exception("{}API调用服务不可用:{}".format(session_name, e), "[bot]err:API调用服务不可用,请重试或联系管理员,或等待修复") except Exception as e: logging.exception(e) diff --git a/requirements.txt b/requirements.txt index 8bd05f6f..1b1d63e8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ requests -openai==0.28.1 +openai dulwich~=0.21.6 colorlog~=6.6.0 yiri-mirai diff --git a/tests/proxy_test/forward_proxy_test.py b/tests/proxy_test/forward_proxy_test.py new file mode 100644 index 00000000..dbe5399f --- /dev/null +++ b/tests/proxy_test/forward_proxy_test.py @@ -0,0 +1,24 @@ +import os + +import openai + +client = openai.Client( + api_key=os.environ["OPENAI_API_KEY"], +) + +openai.proxies = { + 'http': 'http://127.0.0.1:7890', + 'https': 'http://127.0.0.1:7890', +} + +resp = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": "Hello, how are you?", + } + ] +) + +print(resp) \ No newline at end of file