Skip to content

Commit

Permalink
feat(g4f): Major provider updates and new model support (#2437)
Browse files Browse the repository at this point in the history
* refactor(g4f/Provider/Airforce.py): Enhance Airforce provider with dynamic model fetching

* refactor(g4f/Provider/Blackbox.py): Enhance Blackbox AI provider configuration and streamline code

* feat(g4f/Provider/RobocodersAPI.py): Add RobocodersAPI new async chat provider

* refactor(g4f/client/__init__.py): Improve provider handling in async_generate method

* refactor(g4f/models.py): Update provider configurations for multiple models

* refactor(g4f/Provider/Blackbox.py): Streamline model configuration and improve response handling

* feat(g4f/Provider/DDG.py): Enhance model support and improve conversation handling

* refactor(g4f/Provider/Copilot.py): Enhance Copilot provider with model support

* refactor(g4f/Provider/AmigoChat.py): update models and improve code structure

* chore(g4f/Provider/not_working/AIUncensored.): move AIUncensored to not_working directory

* chore(g4f/Provider/not_working/Allyfy.py): remove Allyfy provider

* Update (g4f/Provider/not_working/AIUncensored.py g4f/Provider/not_working/__init__.py)

* refactor(g4f/Provider/ChatGptEs.py): Implement format_prompt for message handling

* refactor(g4f/Provider/Blackbox.py): Update message formatting and improve code structure

* refactor(g4f/Provider/LLMPlayground.py): Enhance text generation and error handling

* refactor(g4f/Provider/needs_auth/PollinationsAI.py): move PollinationsAI to needs_auth directory

* refactor(g4f/Provider/Liaobots.py): Update Liaobots provider models and aliases

* feat(g4f/Provider/DeepInfraChat.py): Add new DeepInfra models and aliases

* Update (g4f/Provider/__init__.py)

* Update (g4f/models.py)

* g4f/models.py

* Update g4f/models.py

* Update g4f/Provider/LLMPlayground.py

* Update (g4f/models.py g4f/Provider/Airforce.py g4f/Provider/__init__.py g4f/Provider/LLMPlayground.py)

* Update g4f/Provider/__init__.py

* Update (g4f/Provider/Airforce.py)

---------

Co-authored-by: kqlio67 <[email protected]>
  • Loading branch information
kqlio67 and kqlio67 authored Nov 28, 2024
1 parent 2cf2f86 commit 8d5d522
Show file tree
Hide file tree
Showing 15 changed files with 462 additions and 282 deletions.
89 changes: 60 additions & 29 deletions g4f/Provider/Airforce.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,19 @@
from ..image import ImageResponse
from ..requests import StreamSession, raise_for_status

def split_message(message: str, max_length: int = 1000) -> list[str]:
"""Splits the message into parts up to (max_length)."""
chunks = []
while len(message) > max_length:
split_point = message.rfind(' ', 0, max_length)
if split_point == -1:
split_point = max_length
chunks.append(message[:split_point])
message = message[split_point:].strip()
if message:
chunks.append(message)
return chunks

class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://llmplayground.net"
api_endpoint_completions = "https://api.airforce/chat/completions"
Expand Down Expand Up @@ -84,6 +97,7 @@ def get_models(cls):
# HuggingFaceH4
"zephyr-7b": "zephyr-7b-beta",


### imagine ###
"sdxl": "stable-diffusion-xl-base",
"sdxl": "stable-diffusion-xl-lightning",
Expand Down Expand Up @@ -125,7 +139,6 @@ async def _generate_image(
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"origin": "https://llmplayground.net",
"user-agent": "Mozilla/5.0"
}
if seed is None:
Expand Down Expand Up @@ -167,35 +180,47 @@ async def _generate_text(
"content-type": "application/json",
"user-agent": "Mozilla/5.0"
}

full_message = "\n".join(
[f"{msg['role'].capitalize()}: {msg['content']}" for msg in messages]
)

message_chunks = split_message(full_message, max_length=1000)

async with StreamSession(headers=headers, proxy=proxy) as session:
data = {
"messages": messages,
"model": model,
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"stream": stream
}
async with session.post(cls.api_endpoint_completions, json=data) as response:
await raise_for_status(response)
content_type = response.headers.get('Content-Type', '').lower()
if 'application/json' in content_type:
json_data = await response.json()
if json_data.get("model") == "error":
raise RuntimeError(json_data['choices'][0]['message'].get('content', ''))
if stream:
async for line in response.iter_lines():
if line:
line = line.decode('utf-8').strip()
if line.startswith("data: ") and line != "data: [DONE]":
json_data = json.loads(line[6:])
content = json_data['choices'][0]['delta'].get('content', '')
if content:
yield cls._filter_content(content)
else:
json_data = await response.json()
content = json_data['choices'][0]['message']['content']
yield cls._filter_content(content)
full_response = ""
for chunk in message_chunks:
data = {
"messages": [{"role": "user", "content": chunk}],
"model": model,
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"stream": stream
}

async with session.post(cls.api_endpoint_completions, json=data) as response:
await raise_for_status(response)
content_type = response.headers.get('Content-Type', '').lower()

if 'application/json' in content_type:
json_data = await response.json()
if json_data.get("model") == "error":
raise RuntimeError(json_data['choices'][0]['message'].get('content', ''))
if stream:
async for line in response.iter_lines():
if line:
line = line.decode('utf-8').strip()
if line.startswith("data: ") and line != "data: [DONE]":
json_data = json.loads(line[6:])
content = json_data['choices'][0]['delta'].get('content', '')
if content:
yield cls._filter_content(content)
else:
content = json_data['choices'][0]['message']['content']
full_response += cls._filter_content(content)

yield full_response

@classmethod
def _filter_content(cls, part_response: str) -> str:
Expand All @@ -210,4 +235,10 @@ def _filter_content(cls, part_response: str) -> str:
'',
part_response
)

part_response = re.sub(
r"\[ERROR\] '\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'", # any-uncensored
'',
part_response
)
return part_response
156 changes: 114 additions & 42 deletions g4f/Provider/AmigoChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,69 @@
from ..requests import StreamSession, raise_for_status
from ..errors import ResponseStatusError

MODELS = {
'chat': {
'gpt-4o-2024-11-20': {'persona_id': "gpt"},
'gpt-4o': {'persona_id': "summarizer"},
'gpt-4o-mini': {'persona_id': "gemini-1-5-flash"},

'o1-preview-': {'persona_id': "openai-o-one"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'o1-preview-2024-09-12-': {'persona_id': "orion"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'o1-mini-': {'persona_id': "openai-o-one-mini"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan

'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': {'persona_id': "llama-three-point-one"},
'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': {'persona_id': "llama-3-2"},
'codellama/CodeLlama-34b-Instruct-hf': {'persona_id': "codellama-CodeLlama-34b-Instruct-hf"},

'gemini-1.5-pro': {'persona_id': "gemini-1-5-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'gemini-1.5-flash': {'persona_id': "amigo"},

'claude-3-5-sonnet-20240620': {'persona_id': "claude"},
'claude-3-5-sonnet-20241022': {'persona_id': "clude-claude-3-5-sonnet-20241022"},
'claude-3-5-haiku-latest': {'persona_id': "3-5-haiku"},

'Qwen/Qwen2.5-72B-Instruct-Turbo': {'persona_id': "qwen-2-5"},

'google/gemma-2b-it': {'persona_id': "google-gemma-2b-it"},
'google/gemma-7b': {'persona_id': "google-gemma-7b"}, # Error handling AIML chat completion stream

'Gryphe/MythoMax-L2-13b': {'persona_id': "Gryphe-MythoMax-L2-13b"},

'mistralai/Mistral-7B-Instruct-v0.3': {'persona_id': "mistralai-Mistral-7B-Instruct-v0.1"},
'mistralai/mistral-tiny': {'persona_id': "mistralai-mistral-tiny"},
'mistralai/mistral-nemo': {'persona_id': "mistralai-mistral-nemo"},

'deepseek-ai/deepseek-llm-67b-chat': {'persona_id': "deepseek-ai-deepseek-llm-67b-chat"},

'databricks/dbrx-instruct': {'persona_id': "databricks-dbrx-instruct"},

'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO': {'persona_id': "NousResearch-Nous-Hermes-2-Mixtral-8x7B-DPO"},

'x-ai/grok-beta': {'persona_id': "x-ai-grok-beta"},

'anthracite-org/magnum-v4-72b': {'persona_id': "anthracite-org-magnum-v4-72b"},

'cohere/command-r-plus': {'persona_id': "cohere-command-r-plus"},

'ai21/jamba-1-5-mini': {'persona_id': "ai21-jamba-1-5-mini"},

'zero-one-ai/Yi-34B': {'persona_id': "zero-one-ai-Yi-34B"} # Error handling AIML chat completion stream
},

'image': {
'flux-pro/v1.1': {'persona_id': "flux-1-1-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'flux-realism': {'persona_id': "flux-realism"},
'flux-pro': {'persona_id': "flux-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'flux-pro/v1.1-ultra': {'persona_id': "flux-pro-v1.1-ultra"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'flux-pro/v1.1-ultra-raw': {'persona_id': "flux-pro-v1.1-ultra-raw"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'flux/dev': {'persona_id': "flux-dev"},

'dalle-e-3': {'persona_id': "dalle-three"},

'recraft-v3': {'persona_id': "recraft"}
}
}

class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://amigochat.io/chat/"
chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions"
Expand All @@ -17,58 +80,67 @@ class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_stream = True
supports_system_message = True
supports_message_history = True

default_model = 'gpt-4o-mini'

chat_models = [
'gpt-4o',
default_model,
'o1-preview',
'o1-mini',
'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
'claude-3-sonnet-20240229',
'gemini-1.5-pro',
]

image_models = [
'flux-pro/v1.1',
'flux-realism',
'flux-pro',
'dalle-e-3',
]

models = [*chat_models, *image_models]

chat_models = list(MODELS['chat'].keys())
image_models = list(MODELS['image'].keys())
models = chat_models + image_models

model_aliases = {
"o1": "o1-preview",
### chat ###
"gpt-4o": "gpt-4o-2024-11-20",
"gpt-4o-mini": "gpt-4o-mini",

"llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
"llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
"claude-3.5-sonnet": "claude-3-sonnet-20240229",
"gemini-pro": "gemini-1.5-pro",
"codellama-34b": "codellama/CodeLlama-34b-Instruct-hf",

"gemini-flash": "gemini-1.5-flash",

"claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
"claude-3.5-haiku": "claude-3-5-haiku-latest",

"qwen-2.5-72b": "Qwen/Qwen2.5-72B-Instruct-Turbo",
"gemma-2b": "google/gemma-2b-it",

"mythomax-13b": "Gryphe/MythoMax-L2-13b",

"mixtral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
"mistral-tiny": "mistralai/mistral-tiny",
"mistral-nemo": "mistralai/mistral-nemo",

"deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat",

"dbrx-instruct": "databricks/dbrx-instruct",

"mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",

"grok-beta": "x-ai/grok-beta",

"magnum-72b": "anthracite-org/magnum-v4-72b",

"command-r-plus": "cohere/command-r-plus",

"jamba-mini": "ai21/jamba-1-5-mini",


### image ###
"flux-realism": "flux-realism",
"flux-dev": "flux/dev",

"flux-pro": "flux-pro/v1.1",
"dalle-3": "dalle-e-3",
}

persona_ids = {
'gpt-4o': "gpt",
'gpt-4o-mini': "amigo",
'o1-preview': "openai-o-one",
'o1-mini': "openai-o-one-mini",
'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': "llama-three-point-one",
'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': "llama-3-2",
'claude-3-sonnet-20240229': "claude",
'gemini-1.5-pro': "gemini-1-5-pro",
'flux-pro/v1.1': "flux-1-1-pro",
'flux-realism': "flux-realism",
'flux-pro': "flux-pro",
'dalle-e-3': "dalle-three",
}

@classmethod
def get_personaId(cls, model: str) -> str:
return cls.persona_ids[model]
if model in cls.chat_models:
return MODELS['chat'][model]['persona_id']
elif model in cls.image_models:
return MODELS['image'][model]['persona_id']
else:
raise ValueError(f"Unknown model: {model}")

@classmethod
async def create_async_generator(
Expand Down Expand Up @@ -110,7 +182,7 @@ async def create_async_generator(
"x-device-language": "en-US",
"x-device-platform": "web",
"x-device-uuid": device_uuid,
"x-device-version": "1.0.41"
"x-device-version": "1.0.42"
}

async with StreamSession(headers=headers, proxy=proxy) as session:
Expand Down
9 changes: 7 additions & 2 deletions g4f/Provider/Blackbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse, to_data_uri

from .helper import format_prompt

class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
label = "Blackbox AI"
url = "https://www.blackbox.ai"
Expand All @@ -37,7 +39,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"},
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"},
#
'Python Agent': {'mode': True, 'id': "Python Agent"},
'Java Agent': {'mode': True, 'id': "Java Agent"},
Expand All @@ -62,7 +64,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'Go Agent': {'mode': True, 'id': "Go Agent"},
'Gitlab Agent': {'mode': True, 'id': "Gitlab Agent"},
'Git Agent': {'mode': True, 'id': "Git Agent"},
'Flask Agent': {'mode': True, 'id': "Flask Agent"},
'Flask Agent': {'mode': True, 'id': "Flask Agent"},
'Firebase Agent': {'mode': True, 'id': "Firebase Agent"},
'FastAPI Agent': {'mode': True, 'id': "FastAPI Agent"},
'Erlang Agent': {'mode': True, 'id': "Erlang Agent"},
Expand Down Expand Up @@ -165,6 +167,9 @@ async def create_async_generator(
message_id = cls.generate_id()
messages = cls.add_prefix_to_messages(messages, model)
validated_value = await cls.fetch_validated()
formatted_message = format_prompt(messages)

messages = [{"id": message_id, "content": formatted_message, "role": "user"}]

if image is not None:
messages[-1]['data'] = {
Expand Down
4 changes: 3 additions & 1 deletion g4f/Provider/ChatGptEs.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ async def create_async_generator(
nonce_ = re.findall(r'data-nonce="(.+?)"', await initial_response.text())[0]
post_id = re.findall(r'data-post-id="(.+?)"', await initial_response.text())[0]

formatted_prompt = format_prompt(messages)

conversation_history = [
"Human: You are a helpful AI assistant. Please respond in the same language that the user uses in their message. Provide accurate, relevant and helpful information while maintaining a friendly and professional tone. If you're not sure about something, please acknowledge that and provide the best information you can while noting any uncertainties. Focus on being helpful while respecting the user's choice of language."
]
Expand All @@ -71,7 +73,7 @@ async def create_async_generator(
'post_id': post_id,
'url': cls.url,
'action': 'wpaicg_chat_shortcode_message',
'message': messages[-1]['content'],
'message': formatted_prompt,
'bot_id': '0',
'chatbot_identity': 'shortcode',
'wpaicg_chat_client_id': os.urandom(5).hex(),
Expand Down
Loading

0 comments on commit 8d5d522

Please sign in to comment.