Skip to content

Commit

Permalink
Merge pull request #7 from RockChinQ/feat/more-adapters
Browse files Browse the repository at this point in the history
Feat: add more adapters
  • Loading branch information
RockChinQ authored Sep 26, 2023
2 parents befedd6 + c14e11b commit 58fe27d
Show file tree
Hide file tree
Showing 13 changed files with 699 additions and 38 deletions.
13 changes: 9 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,14 @@

### 支持的 LLM 库

- [acheong08/ChatGPT](https://github.com/acheong08/ChatGPT) - ChatGPT 网页版逆向工程
- gpt-3.5-turbo
- gpt-4
|Adapter|Multi Round|Stream|Function Call|Status|Comment|
|---|---|---|---|---|---|
|[acheong08/ChatGPT](https://github.com/acheong08/ChatGPT)|||||ChatGPT 网页版|
|[KoushikNavuluri/Claude-API](https://github.com/KoushikNavuluri/Claude-API)|||||Claude 网页版|
|[dsdanielpark/Bard-API](https://github.com/dsdanielpark/Bard-API)|||||Google Bard 网页版|
|[xtekky/gpt4free](https://github.com/xtekky/gpt4free)|||||gpt4free 接入多个平台的破解|
|[Soulter/hugging-chat-api](https://github.com/Soulter/hugging-chat-api)|||||huggingface的对话模型|
|[xw5xr6/revTongYi](https://github.com/xw5xr6/revTongYi)|||||阿里云通义千问网页版|

### 支持的 API 路径

Expand Down Expand Up @@ -65,7 +70,7 @@ python main.py

1. 创建一个 channel,按照说明填写配置,然后创建一个新的 key。

![add_channel](assets/add_channel.png)
<img width="500" alt="image" src="assets/add_channel.png">

2. 将 url (e.g. http://localhost:3000/v1 ) 设置为 OpenAI 的 api_base ,将生成的 key 设置为 OpenAI api key。
3. 现在你可以使用 OpenAI API 来访问逆向工程的 LLM 库了。
Expand Down
13 changes: 9 additions & 4 deletions README_en.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,14 @@ So other application supports OpenAI GPT API can use reverse engineered LLM libs

### Supported LLM libs

- [acheong08/ChatGPT](https://github.com/acheong08/ChatGPT)
- gpt-3.5-turbo
- gpt-4
|Adapter|Multi Round|Stream|Function Call|Status|
|---|---|---|---|---|
|[acheong08/ChatGPT](https://github.com/acheong08/ChatGPT)|||||ChatGPT Web Version|
|[KoushikNavuluri/Claude-API](https://github.com/KoushikNavuluri/Claude-API)|||||Claude Web Version|
|[dsdanielpark/Bard-API](https://github.com/dsdanielpark/Bard-API)|||||Google Bard Web Version|
|[xtekky/gpt4free](https://github.com/xtekky/gpt4free)|||||gpt4free cracked multiple platforms|
|[Soulter/hugging-chat-api](https://github.com/Soulter/hugging-chat-api)|||||hubbingface chat model|
|[xw5xr6/revTongYi](https://github.com/xw5xr6/revTongYi)|||||Aliyun TongYi QianWen Web Version|

### Supported API paths

Expand Down Expand Up @@ -65,7 +70,7 @@ then you can open the admin page at `http://localhost:3000/`.

1. Create channel on the admin page, create a new key.

![add_channel](assets/add_channel.png)
<img width="500" alt="image" src="assets/add_channel.png">

2. Set the url (e.g. http://localhost:3000/v1 ) as OpenAI endpoint, and set the generated key as OpenAI api key.
3. Then you can use the OpenAI API to access the reverse engineered LLM lib.
Expand Down
94 changes: 94 additions & 0 deletions free_one_api/impls/adapter/bard.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
import typing
import traceback
import uuid
import random

import bardapi as bard

from free_one_api.entities import request, response

from ...models import adapter
from ...models.adapter import llm
from ...entities import request, response, exceptions


@adapter.llm_adapter
class BardAdapter(llm.LLMLibAdapter):

@classmethod
def name(cls) -> str:
return "dsdanielpark/Bard-API"

@classmethod
def description(self) -> str:
return "Use dsdanielpark/Bard-API to access Google Bard web edition."

def supported_models(self) -> list[str]:
return [
"gpt-3.5-turbo",
"gpt-4"
]

def function_call_supported(self) -> bool:
return False

def stream_mode_supported(self) -> bool:
return False

def multi_round_supported(self) -> bool:
return True

@classmethod
def config_comment(cls) -> str:
return \
"""Currently supports non stream mode only.
You should provide __Secure-1PSID as token extracted from cookies of Bard site.
{
"token": "bQhxxxxxxxxxxx"
}
Method of getting __Secure-1PSID string, please refer to https://github.com/dsdanielpark/Bard-API
"""

@classmethod
def supported_path(cls) -> str:
return "/v1/chat/completions"

_chatbot: bard.Bard = None

@property
def chatbot(self) -> bard.Bard:
if self._chatbot == None:
self._chatbot = bard.Bard(token=self.config['token'])
return self._chatbot

def __init__(self, config: dict):
self.config = config

async def test(self) -> (bool, str):
try:
self.chatbot.get_answer("hello, please reply 'hi' only.")
return True, ""
except Exception as e:
traceback.print_exc()
return False, str(e)

async def query(self, req: request.Request) -> typing.AsyncGenerator[response.Response, None]:
prompt = ""

for msg in req.messages:
prompt += f"{msg['role']}: {msg['content']}\n"

prompt += "assistant: "

random_int = random.randint(0, 1000000000)

resp_text = self.chatbot.get_answer(prompt)['content']

yield response.Response(
id=random_int,
finish_reason=response.FinishReason.STOP,
normal_message=resp_text,
function_call=None
)
92 changes: 92 additions & 0 deletions free_one_api/impls/adapter/claude.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import typing
import traceback
import uuid
import random

import claude_api as claude

from free_one_api.entities import request, response

from ...models import adapter
from ...models.adapter import llm
from ...entities import request, response, exceptions


@adapter.llm_adapter
class ClaudeAdapter(llm.LLMLibAdapter):

@classmethod
def name(cls) -> str:
return "KoushikNavuluri/Claude-API"

@classmethod
def description(self) -> str:
return "Use KoushikNavuluri/Claude-API to access Claude web edition."

def supported_models(self) -> list[str]:
return [
"gpt-3.5-turbo",
"gpt-4"
]

def function_call_supported(self) -> bool:
return False

def stream_mode_supported(self) -> bool:
return False

def multi_round_supported(self) -> bool:
return True

@classmethod
def config_comment(cls) -> str:
return \
"""Currently supports non stream mode only.
You should provide cookie string as `cookie` in config:
{
"cookie": "your cookie string"
}
Method of getting cookie string, please refer to https://github.com/KoushikNavuluri/Claude-API
"""

@classmethod
def supported_path(cls) -> str:
return "/v1/chat/completions"

chatbot: claude.Client

def __init__(self, config: dict):
self.config = config
self.chatbot = claude.Client(self.config["cookie"])

async def test(self) -> (bool, str):
try:
conversation_id = self.chatbot.create_new_chat()['uuid']
response = self.chatbot.send_message("Hello, Claude!", conversation_id)
return True, ""
except Exception as e:
traceback.print_exc()
return False, str(e)

async def query(self, req: request.Request) -> typing.AsyncGenerator[response.Response, None]:
prompt = ""

for msg in req.messages:
prompt += f"{msg['role']}: {msg['content']}\n"

prompt += "assistant: "

random_int = random.randint(0, 1000000000)

conversation_id = self.chatbot.create_new_chat()['uuid']
resp_text = self.chatbot.send_message(prompt, conversation_id)

self.chatbot.delete_conversation(conversation_id)

yield response.Response(
id=random_int,
finish_reason=response.FinishReason.STOP,
normal_message=resp_text,
function_call=None
)
Loading

0 comments on commit 58fe27d

Please sign in to comment.