Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Feature]: 希望后续增加兼容这个API方法 #928

Open
sanxianxiaohuntun opened this issue Nov 20, 2024 · 1 comment
Open

[Feature]: 希望后续增加兼容这个API方法 #928

sanxianxiaohuntun opened this issue Nov 20, 2024 · 1 comment
Labels
m: Provider OpenAI API 或其他 LLM 模型相关

Comments

@sanxianxiaohuntun
Copy link

sanxianxiaohuntun commented Nov 20, 2024

这是一个?

现有功能优化

详细描述

QChatGPT\pkg\provider\modelmgr\apis\chatcmpl.py

兼容后代码

from __future__ import annotations

import asyncio
import typing
import json
import base64
from typing import AsyncGenerator

import openai
import openai.types.chat.chat_completion as chat_completion
import httpx
import aiohttp
import async_lru

from .. import api, entities, errors
from ....core import entities as core_entities, app
from ... import entities as llm_entities
from ...tools import entities as tools_entities
from ....utils import image


@api.requester_class("openai-chat-completions")
class OpenAIChatCompletions(api.LLMAPIRequester):
    """OpenAI ChatCompletion API 请求器"""

    client: openai.AsyncClient

    requester_cfg: dict

    def __init__(self, ap: app.Application):
        self.ap = ap

        self.requester_cfg = self.ap.provider_cfg.data['requester']['openai-chat-completions']

    async def initialize(self):

        self.client = openai.AsyncClient(
            api_key="",
            base_url=self.requester_cfg['base-url'],
            timeout=self.requester_cfg['timeout'],
            http_client=httpx.AsyncClient(
                proxies=self.ap.proxy_mgr.get_forward_proxies()
            )
        )

    async def _req(
        self,
        args: dict,
    ) -> chat_completion.ChatCompletion:
        return await self.client.chat.completions.create(**args)

    async def _make_msg(
        self,
        chat_completion: typing.Union[chat_completion.ChatCompletion, str],  # 支持字符串类型
    ) -> llm_entities.Message:
        if isinstance(chat_completion, str):
            # 如果是字符串类型,去掉开头的 'data: ' 部分并处理每个数据块
            if chat_completion.startswith("data:"):
                # 分割多个 data 部分
                parts = chat_completion.split("data:")[1:]  # 分割并去掉空数据
                combined_message = ""

                for part in parts:
                    part = part.strip()  # 去除多余空格
                    try:
                        # 尝试解析每个 data 部分
                        part_data = json.loads(part)
                        if isinstance(part_data, dict) and 'choices' in part_data:
                            # 提取有效数据并拼接
                            choices = part_data['choices']
                            for choice in choices:
                                if 'delta' in choice and 'content' in choice['delta']:
                                    combined_message += choice['delta']['content']
                        else:
                            raise ValueError("Invalid response structure in part")
                    except Exception as e:
                        self.ap.logger.error(f"Failed to parse chat completion part: {e}")
                        continue  # 如果当前部分解析失败,则跳过并继续处理下一个部分

                # 将所有内容合并为一个完整的响应
                chatcmpl_message = {"content": combined_message, "role": "assistant"}

            else:
                # 如果响应不是以 'data:' 开头,直接处理
                try:
                    chat_completion = json.loads(chat_completion)
                    if isinstance(chat_completion, dict) and 'choices' in chat_completion:
                        chatcmpl_message = chat_completion['choices'][0]['message']
                    else:
                        raise ValueError("Invalid response structure: missing 'choices' key")
                except Exception as e:
                    self.ap.logger.error(f"Failed to parse chat completion string: {e}")
                    raise ValueError(f"Invalid response format: {chat_completion}")
        else:
            # 如果是 ChatCompletion 对象,直接处理
            chatcmpl_message = chat_completion.choices[0].message.dict()

        # 确保 'role' 字段存在且不为 None
        if 'role' not in chatcmpl_message or chatcmpl_message['role'] is None:
            chatcmpl_message['role'] = 'assistant'

        message = llm_entities.Message(**chatcmpl_message)

        return message

    async def _closure(
        self,
        req_messages: list[dict],
        use_model: entities.LLMModelInfo,
        use_funcs: list[tools_entities.LLMFunction] = None,
    ) -> llm_entities.Message:
        self.client.api_key = use_model.token_mgr.get_token()

        args = self.requester_cfg['args'].copy()
        args["model"] = use_model.name if use_model.model_name is None else use_model.model_name

        if use_funcs:
            tools = await self.ap.tool_mgr.generate_tools_for_openai(use_funcs)

            if tools:
                args["tools"] = tools

        # 设置此次请求中的messages
        messages = req_messages.copy()

        # 检查vision
        for msg in messages:
            if 'content' in msg and isinstance(msg["content"], list):
                for me in msg["content"]:
                    if me["type"] == "image_url":
                        me["image_url"]['url'] = await self.get_base64_str(me["image_url"]['url'])

        args["messages"] = messages

        # 发送请求
        resp = await self._req(args)

        # 处理请求结果
        message = await self._make_msg(resp)

        return message
    
    async def call(
        self,
        model: entities.LLMModelInfo,
        messages: typing.List[llm_entities.Message],
        funcs: typing.List[tools_entities.LLMFunction] = None,
    ) -> llm_entities.Message:
        req_messages = []  # req_messages 仅用于类内,外部同步由 query.messages 进行
        for m in messages:
            msg_dict = m.dict(exclude_none=True)
            content = msg_dict.get("content")
            if isinstance(content, list):
                # 检查 content 列表中是否每个部分都是文本
                if all(isinstance(part, dict) and part.get("type") == "text" for part in content):
                    # 将所有文本部分合并为一个字符串
                    msg_dict["content"] = "\n".join(part["text"] for part in content)
            req_messages.append(msg_dict)

        try:
            return await self._closure(req_messages, model, funcs)
        except asyncio.TimeoutError:
            raise errors.RequesterError('请求超时')
        except openai.BadRequestError as e:
            if 'context_length_exceeded' in e.message:
                raise errors.RequesterError(f'上文过长,请重置会话: {e.message}')
            else:
                raise errors.RequesterError(f'请求参数错误: {e.message}')
        except openai.AuthenticationError as e:
            raise errors.RequesterError(f'无效的 api-key: {e.message}')
        except openai.NotFoundError as e:
            raise errors.RequesterError(f'请求路径错误: {e.message}')
        except openai.RateLimitError as e:
            raise errors.RequesterError(f'请求过于频繁或余额不足: {e.message}')
        except openai.APIError as e:
            raise errors.RequesterError(f'请求错误: {e.message}')

    @async_lru.alru_cache(maxsize=128)
    async def get_base64_str(
        self,
        original_url: str,
    ) -> str:
        base64_image, image_format = await image.qq_image_url_to_base64(original_url)
        return f"data:image/{image_format};base64,{base64_image}"

API返回

[11-20 18:40:45.099] chatcmpl.py (67) - [ERROR] : Failed to parse chat completion string: Extra data: line 3 column 1 (char 45)
[11-20 18:40:45.099] chat.py (94) - [ERROR] : 对话(0)请求失败: Invalid response format: {"choices":[{"delta":{"content":"呜~好舒服"}}]}

data:  {"choices":[{"delta":{"content":"的抚摸呢。"}}]}

data:  {"choices":[{"delta":{"content":"(蹭主人的"}}]}

data:  {"choices":[{"delta":{"content":"手)喵~~"}}]}
[11-20 18:40:47.677] controller.py (98) - [ERROR] : Invalid response format: {"choices":[{"delta":{"content":"呜~好舒服"}}]}

data:  {"choices":[{"delta":{"content":"的抚摸呢。"}}]}

data:  {"choices":[{"delta":{"content":"(蹭主人的"}}]}

data:  {"choices":[{"delta":{"content":"手)喵~~"}}]}

兼容后可正常运行得出结果

呜~好舒服的抚摸呢。(蹭主人的手)

@sanxianxiaohuntun
Copy link
Author

sanxianxiaohuntun commented Nov 20, 2024

主要修改代码

async def _make_msg(
    self,
    chat_completion: typing.Union[chat_completion.ChatCompletion, str],  # 支持字符串类型
) -> llm_entities.Message:
    if isinstance(chat_completion, str):
        # 如果是字符串类型,去掉开头的 'data: ' 部分并处理每个数据块
        if chat_completion.startswith("data:"):
            # 分割多个 data 部分
            parts = chat_completion.split("data:")[1:]  # 分割并去掉空数据
            combined_message = ""

            for part in parts:
                part = part.strip()  # 去除多余空格
                try:
                    # 尝试解析每个 data 部分
                    part_data = json.loads(part)
                    if isinstance(part_data, dict) and 'choices' in part_data:
                        # 提取有效数据并拼接
                        choices = part_data['choices']
                        for choice in choices:
                            if 'delta' in choice and 'content' in choice['delta']:
                                combined_message += choice['delta']['content']
                    else:
                        raise ValueError("Invalid response structure in part")
                except Exception as e:
                    self.ap.logger.error(f"Failed to parse chat completion part: {e}")
                    continue  # 如果当前部分解析失败,则跳过并继续处理下一个部分

            # 将所有内容合并为一个完整的响应
            chatcmpl_message = {"content": combined_message, "role": "assistant"}

        else:
            # 如果响应不是以 'data:' 开头,直接处理
            try:
                chat_completion = json.loads(chat_completion)
                if isinstance(chat_completion, dict) and 'choices' in chat_completion:
                    chatcmpl_message = chat_completion['choices'][0]['message']
                else:
                    raise ValueError("Invalid response structure: missing 'choices' key")
            except Exception as e:
                self.ap.logger.error(f"Failed to parse chat completion string: {e}")
                raise ValueError(f"Invalid response format: {chat_completion}")
    else:
        # 如果是 ChatCompletion 对象,直接处理
        chatcmpl_message = chat_completion.choices[0].message.dict()

    # 确保 'role' 字段存在且不为 None
    if 'role' not in chatcmpl_message or chatcmpl_message['role'] is None:
        chatcmpl_message['role'] = 'assistant'

    message = llm_entities.Message(**chatcmpl_message)

    return message

@sanxianxiaohuntun sanxianxiaohuntun changed the title [Feature]: 希望后续增加兼容Nalang电子魅魔API方法 [Feature]: 希望后续增加兼容这个API方法 Nov 21, 2024
@RockChinQ RockChinQ added the m: Provider OpenAI API 或其他 LLM 模型相关 label Nov 22, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
m: Provider OpenAI API 或其他 LLM 模型相关
Projects
None yet
Development

No branches or pull requests

2 participants