浏览代码

Merge pull request #2220 from kqlio67/main

Provider updates and optimizations across multiple modules
Tekky 1 月之前
父节点
当前提交
cc80f2d315

+ 0 - 2
g4f/Provider/AiChatOnline.py

@@ -12,10 +12,8 @@ class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin):
     url = "https://aichatonlineorg.erweima.ai"
     url = "https://aichatonlineorg.erweima.ai"
     api_endpoint = "/aichatonline/api/chat/gpt"
     api_endpoint = "/aichatonline/api/chat/gpt"
     working = True
     working = True
-    supports_gpt_35_turbo = True
     supports_gpt_4 = True
     supports_gpt_4 = True
     default_model = 'gpt-4o-mini'
     default_model = 'gpt-4o-mini'
-    supports_message_history = False
 
 
     @classmethod
     @classmethod
     async def grab_token(
     async def grab_token(

+ 255 - 0
g4f/Provider/Airforce.py

@@ -0,0 +1,255 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession, ClientResponseError
+from urllib.parse import urlencode
+import json
+import io
+import asyncio
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse, is_accepted_format
+from .helper import format_prompt
+
+class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
+    url = "https://api.airforce"
+    text_api_endpoint = "https://api.airforce/chat/completions"
+    image_api_endpoint = "https://api.airforce/v1/imagine2"
+    working = True
+    supports_gpt_35_turbo = True
+    supports_gpt_4 = True
+    supports_stream = True
+    supports_system_message = True
+    supports_message_history = True
+    default_model = 'llama-3-70b-chat'
+    text_models = [
+        # Open source models
+        'llama-2-13b-chat',
+        
+        'llama-3-70b-chat',
+        'llama-3-70b-chat-turbo',
+        'llama-3-70b-chat-lite',
+        
+        'llama-3-8b-chat',
+        'llama-3-8b-chat-turbo',
+        'llama-3-8b-chat-lite',
+        
+        'llama-3.1-405b-turbo',
+        'llama-3.1-70b-turbo',
+        'llama-3.1-8b-turbo',
+        
+        'LlamaGuard-2-8b',
+        'Llama-Guard-7b',
+        'Meta-Llama-Guard-3-8B',
+
+        'Mixtral-8x7B-Instruct-v0.1',
+        'Mixtral-8x22B-Instruct-v0.1',
+        'Mistral-7B-Instruct-v0.1',
+        'Mistral-7B-Instruct-v0.2',
+        'Mistral-7B-Instruct-v0.3',
+        
+        'Qwen1.5-72B-Chat',
+        'Qwen1.5-110B-Chat',
+        'Qwen2-72B-Instruct',
+        
+        'gemma-2b-it',
+        'gemma-2-9b-it',
+        'gemma-2-27b-it',
+        
+        'dbrx-instruct',
+
+        'deepseek-llm-67b-chat',
+        
+        'Nous-Hermes-2-Mixtral-8x7B-DPO',
+        'Nous-Hermes-2-Yi-34B',
+        
+        'WizardLM-2-8x22B',
+        
+        'SOLAR-10.7B-Instruct-v1.0',
+        
+        'StripedHyena-Nous-7B',      
+        
+        'sparkdesk',
+        
+        
+        # Other models
+        'chatgpt-4o-latest',
+        'gpt-4',
+        'gpt-4-turbo',
+        'gpt-4o-mini-2024-07-18',
+        'gpt-4o-mini',
+        'gpt-4o',
+        'gpt-3.5-turbo',
+        'gpt-3.5-turbo-0125',
+        'gpt-3.5-turbo-1106',
+        'gpt-3.5-turbo-16k',
+        'gpt-3.5-turbo-0613',
+        'gpt-3.5-turbo-16k-0613',
+        
+        'gemini-1.5-flash',
+        'gemini-1.5-pro',
+    ]
+    image_models = [
+        'flux',
+        'flux-realism',
+        'flux-anime',
+        'flux-3d',
+        'flux-disney',
+        'flux-pixel',
+        'any-dark',
+    ]
+    
+    models = [
+        *text_models,
+        *image_models
+    ]
+    model_aliases = {
+        # Open source models
+        "llama-2-13b": "llama-2-13b-chat",
+        
+        "llama-3-70b": "llama-3-70b-chat",
+        "llama-3-70b": "llama-3-70b-chat-turbo",
+        "llama-3-70b": "llama-3-70b-chat-lite",
+        
+        "llama-3-8b": "llama-3-8b-chat",
+        "llama-3-8b": "llama-3-8b-chat-turbo",
+        "llama-3-8b": "llama-3-8b-chat-lite",
+        
+        "llama-3.1-405b": "llama-3.1-405b-turbo",
+        "llama-3.1-70b": "llama-3.1-70b-turbo",
+        "llama-3.1-8b": "llama-3.1-8b-turbo",
+        
+        "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
+        "mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1",
+        "mistral-7b": "Mistral-7B-Instruct-v0.1",
+        "mistral-7b": "Mistral-7B-Instruct-v0.2",
+        "mistral-7b": "Mistral-7B-Instruct-v0.3",
+        
+        "mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
+        
+        "qwen-1-5-72b": "Qwen1.5-72B-Chat",
+        "qwen-1_5-110b": "Qwen1.5-110B-Chat",
+        "qwen-2-72b": "Qwen2-72B-Instruct",
+        
+        "gemma-2b": "gemma-2b-it",
+        "gemma-2b-9b": "gemma-2-9b-it",
+        "gemma-2b-27b": "gemma-2-27b-it",
+        
+        "deepseek": "deepseek-llm-67b-chat",
+        
+        "yi-34b": "Nous-Hermes-2-Yi-34B",
+        
+        "wizardlm-2-8x22b": "WizardLM-2-8x22B",
+        
+        "solar-10-7b": "SOLAR-10.7B-Instruct-v1.0",
+        
+        "sh-n-7b": "StripedHyena-Nous-7B",
+        
+        "sparkdesk-v1.1": "sparkdesk",
+        
+        
+        # Other models
+        "gpt-4o": "chatgpt-4o-latest",
+        "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
+        
+        "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
+        "gpt-3.5-turbo": "gpt-3.5-turbo-1106",
+        "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
+        "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
+        "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
+        
+        
+        "gemini-flash": "gemini-1.5-flash",
+        "gemini-pro": "gemini-1.5-pro",
+    }
+
+    @classmethod
+    async def create_async_generator(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncResult:
+        model = cls.get_model(model)
+        
+        headers = {
+            "accept": "*/*",
+            "accept-language": "en-US,en;q=0.9",
+            "content-type": "application/json",
+            "origin": "https://api.airforce",
+            "sec-ch-ua": '"Chromium";v="128", "Not(A:Brand";v="24"',
+            "sec-ch-ua-mobile": "?0",
+            "sec-ch-ua-platform": '"Linux"',
+            "sec-fetch-dest": "empty",
+            "sec-fetch-mode": "cors",
+            "sec-fetch-site": "cross-site",
+            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
+        }
+
+        
+        if model in cls.image_models:
+            async for item in cls.generate_image(model, messages, headers, proxy, **kwargs):
+                yield item
+        else:
+            async for item in cls.generate_text(model, messages, headers, proxy, **kwargs):
+                yield item
+
+    @classmethod
+    async def generate_text(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
+        async with ClientSession(headers=headers) as session:
+            data = {
+                "messages": [{"role": "user", "content": format_prompt(messages)}],
+                "model": model,
+                "temperature": kwargs.get('temperature', 1),
+                "top_p": kwargs.get('top_p', 1),
+                "stream": True
+            }
+            
+            async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
+                response.raise_for_status()
+                async for line in response.content:
+                    if line:
+                        line = line.decode('utf-8').strip()
+                        if line.startswith("data: "):
+                            try:
+                                data = json.loads(line[6:])
+                                if 'choices' in data and len(data['choices']) > 0:
+                                    delta = data['choices'][0].get('delta', {})
+                                    if 'content' in delta:
+                                        yield delta['content']
+                            except json.JSONDecodeError:
+                                continue
+                        elif line == "data: [DONE]":
+                            break
+
+    @classmethod
+    async def generate_image(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
+        prompt = messages[-1]['content'] if messages else ""
+        params = {
+            "prompt": prompt,
+            "size": kwargs.get("size", "1:1"),
+            "seed": kwargs.get("seed"),
+            "model": model
+        }
+        params = {k: v for k, v in params.items() if v is not None}
+
+        try:
+            async with ClientSession(headers=headers) as session:
+                async with session.get(cls.image_api_endpoint, params=params, proxy=proxy) as response:
+                    response.raise_for_status()
+                    content = await response.read()
+                    
+                    if response.content_type.startswith('image/'):
+                        image_url = str(response.url)
+                        yield ImageResponse(image_url, prompt)
+                    else:
+                        try:
+                            text = content.decode('utf-8', errors='ignore')
+                            yield f"Error: {text}"
+                        except Exception as decode_error:
+                            yield f"Error: Unable to decode response - {str(decode_error)}"
+        except ClientResponseError as e:
+            yield f"Error: HTTP {e.status}: {e.message}"
+        except Exception as e:
+            yield f"Unexpected error: {str(e)}"

+ 6 - 1
g4f/Provider/Bixin123.py

@@ -2,6 +2,7 @@ from __future__ import annotations
 
 
 from aiohttp import ClientSession
 from aiohttp import ClientSession
 import json
 import json
+import random
 from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
 from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
 from ..typing import AsyncResult, Messages
 from ..typing import AsyncResult, Messages
 from .helper import format_prompt
 from .helper import format_prompt
@@ -30,6 +31,10 @@ class Bixin123(AsyncGeneratorProvider, ProviderModelMixin):
         else:
         else:
             return cls.default_model
             return cls.default_model
 
 
+    @classmethod
+    def generate_fingerprint(cls) -> str:
+        return str(random.randint(100000000, 999999999))
+
     @classmethod
     @classmethod
     async def create_async_generator(
     async def create_async_generator(
         cls,
         cls,
@@ -45,7 +50,7 @@ class Bixin123(AsyncGeneratorProvider, ProviderModelMixin):
             "accept-language": "en-US,en;q=0.9",
             "accept-language": "en-US,en;q=0.9",
             "cache-control": "no-cache",
             "cache-control": "no-cache",
             "content-type": "application/json",
             "content-type": "application/json",
-            "fingerprint": "988148794",
+            "fingerprint": cls.generate_fingerprint(),
             "origin": cls.url,
             "origin": cls.url,
             "pragma": "no-cache",
             "pragma": "no-cache",
             "priority": "u=1, i",
             "priority": "u=1, i",

+ 73 - 102
g4f/Provider/Blackbox.py

@@ -1,43 +1,40 @@
 from __future__ import annotations
 from __future__ import annotations
 
 
-import uuid
-import secrets
 import re
 import re
-import base64
+import json
+import random
+import string
 from aiohttp import ClientSession
 from aiohttp import ClientSession
-from typing import AsyncGenerator, Optional
 
 
 from ..typing import AsyncResult, Messages, ImageType
 from ..typing import AsyncResult, Messages, ImageType
-from ..image import to_data_uri, ImageResponse
+from ..image import ImageResponse, to_data_uri
 from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
 from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
 
 
 class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
 class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
     url = "https://www.blackbox.ai"
     url = "https://www.blackbox.ai"
+    api_endpoint = "https://www.blackbox.ai/api/chat"
     working = True
     working = True
+    supports_stream = True
+    supports_system_message = True
+    supports_message_history = True
+    
     default_model = 'blackbox'
     default_model = 'blackbox'
     models = [
     models = [
-        default_model,
-        "gemini-1.5-flash",
+        'blackbox',
+        'gemini-1.5-flash',
         "llama-3.1-8b",
         "llama-3.1-8b",
         'llama-3.1-70b',
         'llama-3.1-70b',
         'llama-3.1-405b',
         'llama-3.1-405b',
-        'ImageGeneration',
+        'ImageGenerationLV45LJp'
     ]
     ]
-    
-    model_aliases = {
-        "gemini-flash": "gemini-1.5-flash",
-    }
-    
-    agent_mode_map = {
-        'ImageGeneration': {"mode": True, "id": "ImageGenerationLV45LJp", "name": "Image Generation"},
-    }
 
 
-    model_id_map = {
+    model_config = {
         "blackbox": {},
         "blackbox": {},
         "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
         "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
         "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
         "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
         'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
         'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
-        'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}
+        'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
+        'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
     }
     }
 
 
     @classmethod
     @classmethod
@@ -49,108 +46,82 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
         else:
         else:
             return cls.default_model
             return cls.default_model
 
 
-    @classmethod
-    async def download_image_to_base64_url(cls, url: str) -> str:
-        async with ClientSession() as session:
-            async with session.get(url) as response:
-                image_data = await response.read()
-                base64_data = base64.b64encode(image_data).decode('utf-8')
-                mime_type = response.headers.get('Content-Type', 'image/jpeg')
-                return f"data:{mime_type};base64,{base64_data}"
-
     @classmethod
     @classmethod
     async def create_async_generator(
     async def create_async_generator(
         cls,
         cls,
         model: str,
         model: str,
         messages: Messages,
         messages: Messages,
-        proxy: Optional[str] = None,
-        image: Optional[ImageType] = None,
-        image_name: Optional[str] = None,
+        proxy: str = None,
+        image: ImageType = None,
+        image_name: str = None,
         **kwargs
         **kwargs
-    ) -> AsyncGenerator[AsyncResult, None]:
-        if image is not None:
-            messages[-1]["data"] = {
-                "fileText": image_name,
-                "imageBase64": to_data_uri(image),
-                "title": str(uuid.uuid4())
-            }
-
+    ) -> AsyncResult:
+        model = cls.get_model(model)
+        
         headers = {
         headers = {
-            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
-            "Accept": "*/*",
-            "Accept-Language": "en-US,en;q=0.5",
-            "Accept-Encoding": "gzip, deflate, br",
-            "Referer": cls.url,
-            "Content-Type": "application/json",
-            "Origin": cls.url,
-            "DNT": "1",
-            "Sec-GPC": "1",
-            "Alt-Used": "www.blackbox.ai",
-            "Connection": "keep-alive",
+            "accept": "*/*",
+            "accept-language": "en-US,en;q=0.9",
+            "cache-control": "no-cache",
+            "content-type": "application/json",
+            "origin": cls.url,
+            "pragma": "no-cache",
+            "referer": f"{cls.url}/",
+            "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
+            "sec-ch-ua-mobile": "?0",
+            "sec-ch-ua-platform": '"Linux"',
+            "sec-fetch-dest": "empty",
+            "sec-fetch-mode": "cors",
+            "sec-fetch-site": "same-origin",
+            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
         }
         }
-
+        
         async with ClientSession(headers=headers) as session:
         async with ClientSession(headers=headers) as session:
-            random_id = secrets.token_hex(16)
-            random_user_id = str(uuid.uuid4())
-            
-            model = cls.get_model(model)  # Resolve the model alias
+            if image is not None:
+                messages[-1]["data"] = {
+                    "fileText": image_name,
+                    "imageBase64": to_data_uri(image)
+                }
             
             
+            random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
+
             data = {
             data = {
                 "messages": messages,
                 "messages": messages,
                 "id": random_id,
                 "id": random_id,
-                "userId": random_user_id,
+                "previewToken": None,
+                "userId": None,
                 "codeModelMode": True,
                 "codeModelMode": True,
-                "agentMode": cls.agent_mode_map.get(model, {}),
+                "agentMode": {},
                 "trendingAgentMode": {},
                 "trendingAgentMode": {},
                 "isMicMode": False,
                 "isMicMode": False,
+                "maxTokens": None,
                 "isChromeExt": False,
                 "isChromeExt": False,
-                "playgroundMode": False,
-                "webSearchMode": False,
-                "userSystemPrompt": "",
                 "githubToken": None,
                 "githubToken": None,
-                "trendingAgentModel": cls.model_id_map.get(model, {}),
-                "maxTokens": None
+                "clickedAnswer2": False,
+                "clickedAnswer3": False,
+                "clickedForceWebSearch": False,
+                "visitFromDelta": False,
+                "mobileClient": False
             }
             }
 
 
-            async with session.post(
-                f"{cls.url}/api/chat", json=data, proxy=proxy
-            ) as response:
+            if model == 'ImageGenerationLV45LJp':
+                data["agentMode"] = cls.model_config[model]
+            else:
+                data["trendingAgentMode"] = cls.model_config[model]
+            
+            async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
                 response.raise_for_status()
                 response.raise_for_status()
-                full_response = ""
-                buffer = ""
-                image_base64_url = None
-                async for chunk in response.content.iter_any():
-                    if chunk:
-                        decoded_chunk = chunk.decode()
-                        cleaned_chunk = re.sub(r'\$@\$.+?\$@\$|\$@\$', '', decoded_chunk)
-                        
-                        buffer += cleaned_chunk
-                        
-                        # Check if there's a complete image line in the buffer
-                        image_match = re.search(r'!\[Generated Image\]\((https?://[^\s\)]+)\)', buffer)
-                        if image_match:
-                            image_url = image_match.group(1)
-                            # Download the image and convert to base64 URL
-                            image_base64_url = await cls.download_image_to_base64_url(image_url)
-                            
-                            # Remove the image line from the buffer
-                            buffer = re.sub(r'!\[Generated Image\]\(https?://[^\s\)]+\)', '', buffer)
-                        
-                        # Send text line by line
-                        lines = buffer.split('\n')
-                        for line in lines[:-1]:
-                            if line.strip():
-                                full_response += line + '\n'
-                                yield line + '\n'
-                        buffer = lines[-1]  # Keep the last incomplete line in the buffer
-
-                # Send the remaining buffer if it's not empty
-                if buffer.strip():
-                    full_response += buffer
-                    yield buffer
-
-                # If an image was found, send it as ImageResponse
-                if image_base64_url:
-                    alt_text = "Generated Image"
-                    image_response = ImageResponse(image_base64_url, alt=alt_text)
-                    yield image_response
+                if model == 'ImageGenerationLV45LJp':
+                    response_text = await response.text()
+                    url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text)
+                    if url_match:
+                        image_url = url_match.group(0)
+                        yield ImageResponse(image_url, alt=messages[-1]['content'])
+                    else:
+                        raise Exception("Image URL not found in the response")
+                else:
+                    async for chunk in response.content:
+                        if chunk:
+                            decoded_chunk = chunk.decode()
+                            if decoded_chunk.startswith('$@$v=undefined-rv1$@$'):
+                                decoded_chunk = decoded_chunk[len('$@$v=undefined-rv1$@$'):]
+                            yield decoded_chunk

+ 0 - 82
g4f/Provider/FluxAirforce.py

@@ -1,82 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession, ClientResponseError
-from urllib.parse import urlencode
-import io
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..image import ImageResponse, is_accepted_format
-
-class FluxAirforce(AsyncGeneratorProvider, ProviderModelMixin):
-    url = "https://flux.api.airforce/"
-    api_endpoint = "https://api.airforce/v1/imagine2"
-    working = True
-    default_model = 'flux-realism'
-    models = [
-        'flux',
-        'flux-realism',
-        'flux-anime',
-        'flux-3d',
-        'flux-disney'
-    ]
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "accept": "*/*",
-            "accept-language": "en-US,en;q=0.9",
-            "origin": "https://flux.api.airforce",
-            "priority": "u=1, i",
-            "referer": "https://flux.api.airforce/",
-            "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
-            "sec-ch-ua-mobile": "?0",
-            "sec-ch-ua-platform": '"Linux"',
-            "sec-fetch-dest": "empty",
-            "sec-fetch-mode": "cors",
-            "sec-fetch-site": "same-site",
-            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
-        }
-
-        prompt = messages[-1]['content'] if messages else ""
-
-        params = {
-            "prompt": prompt,
-            "size": kwargs.get("size", "1:1"),
-            "seed": kwargs.get("seed"),
-            "model": model
-        }
-
-        params = {k: v for k, v in params.items() if v is not None}
-
-        try:
-            async with ClientSession(headers=headers) as session:
-                async with session.get(f"{cls.api_endpoint}", params=params, proxy=proxy) as response:
-                    response.raise_for_status()
-                    
-                    content = await response.read()
-                    
-                    if response.content_type.startswith('image/'):
-                        image_url = str(response.url)
-                        yield ImageResponse(image_url, prompt)
-                    else:
-                        try:
-                            text = content.decode('utf-8', errors='ignore')
-                            yield f"Error: {text}"
-                        except Exception as decode_error:
-                            yield f"Error: Unable to decode response - {str(decode_error)}"
-
-        except ClientResponseError as e:
-            yield f"Error: HTTP {e.status}: {e.message}"
-        except Exception as e:
-            yield f"Unexpected error: {str(e)}"
-
-        finally:
-            if not session.closed:
-                await session.close()

+ 5 - 8
g4f/Provider/HuggingChat.py

@@ -12,24 +12,21 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
     working = True
     working = True
     supports_stream = True
     supports_stream = True
     default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
     default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
+    
     models = [
     models = [
         'meta-llama/Meta-Llama-3.1-70B-Instruct',
         'meta-llama/Meta-Llama-3.1-70B-Instruct',
-        'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
-        'CohereForAI/c4ai-command-r-plus',
+        'CohereForAI/c4ai-command-r-plus-08-2024',
         'mistralai/Mixtral-8x7B-Instruct-v0.1',
         'mistralai/Mixtral-8x7B-Instruct-v0.1',
         'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
         'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
-        '01-ai/Yi-1.5-34B-Chat',
         'mistralai/Mistral-7B-Instruct-v0.3',
         'mistralai/Mistral-7B-Instruct-v0.3',
         'microsoft/Phi-3-mini-4k-instruct',
         'microsoft/Phi-3-mini-4k-instruct',
     ]
     ]
     
     
     model_aliases = {
     model_aliases = {
         "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
         "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
-        "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
-        "command-r-plus": "CohereForAI/c4ai-command-r-plus",
+        "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
         "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
         "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
-        "mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
-        "yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat",
+        "mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
         "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
         "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
         "phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct",
         "phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct",
     }
     }
@@ -80,7 +77,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
             response = session.post('https://huggingface.co/chat/conversation', json=json_data)
             response = session.post('https://huggingface.co/chat/conversation', json=json_data)
             conversationId = response.json()['conversationId']
             conversationId = response.json()['conversationId']
 
 
-            response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=01',)
+            response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11',)
 
 
             data: list = (response.json())["nodes"][1]["data"]
             data: list = (response.json())["nodes"][1]["data"]
             keys: list[int] = data[data[0]["messages"]]
             keys: list[int] = data[data[0]["messages"]]

+ 5 - 22
g4f/Provider/HuggingFace.py

@@ -9,33 +9,16 @@ from .helper import get_connector
 from ..errors import RateLimitError, ModelNotFoundError
 from ..errors import RateLimitError, ModelNotFoundError
 from ..requests.raise_for_status import raise_for_status
 from ..requests.raise_for_status import raise_for_status
 
 
+from .HuggingChat import HuggingChat
+
 class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
 class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
     url = "https://huggingface.co/chat"
     url = "https://huggingface.co/chat"
     working = True
     working = True
     needs_auth = True
     needs_auth = True
     supports_message_history = True
     supports_message_history = True
-    default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
-    models = [
-        'meta-llama/Meta-Llama-3.1-70B-Instruct',
-        'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8',
-        'CohereForAI/c4ai-command-r-plus',
-        'mistralai/Mixtral-8x7B-Instruct-v0.1',
-        'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
-        '01-ai/Yi-1.5-34B-Chat',
-        'mistralai/Mistral-7B-Instruct-v0.3',
-        'microsoft/Phi-3-mini-4k-instruct',
-    ]
-    
-    model_aliases = {
-        "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
-        "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
-        "command-r-plus": "CohereForAI/c4ai-command-r-plus",
-        "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
-        "mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
-        "yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat",
-        "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
-        "phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct",
-    }
+    default_model = HuggingChat.default_model
+    models = HuggingChat.models
+    model_aliases = HuggingChat.model_aliases
 
 
     @classmethod
     @classmethod
     def get_model(cls, model: str) -> str:
     def get_model(cls, model: str) -> str:

+ 6 - 5
g4f/Provider/Koala.py

@@ -10,7 +10,8 @@ from .helper import get_random_string, get_connector
 from ..requests import raise_for_status
 from ..requests import raise_for_status
 
 
 class Koala(AsyncGeneratorProvider, ProviderModelMixin):
 class Koala(AsyncGeneratorProvider, ProviderModelMixin):
-    url = "https://koala.sh"
+    url = "https://koala.sh/chat"
+    api_endpoint = "https://koala.sh/api/gpt/"
     working = True
     working = True
     supports_message_history = True
     supports_message_history = True
     supports_gpt_4 = True
     supports_gpt_4 = True
@@ -26,17 +27,17 @@ class Koala(AsyncGeneratorProvider, ProviderModelMixin):
         **kwargs: Any
         **kwargs: Any
     ) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]:
     ) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]:
         if not model:
         if not model:
-            model = "gpt-3.5-turbo"
+            model = "gpt-4o-mini"
 
 
         headers = {
         headers = {
             "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
             "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
             "Accept": "text/event-stream",
             "Accept": "text/event-stream",
             "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
             "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
             "Accept-Encoding": "gzip, deflate, br",
             "Accept-Encoding": "gzip, deflate, br",
-            "Referer": f"{cls.url}/chat",
+            "Referer": f"{cls.url}",
             "Flag-Real-Time-Data": "false",
             "Flag-Real-Time-Data": "false",
             "Visitor-ID": get_random_string(20),
             "Visitor-ID": get_random_string(20),
-            "Origin": cls.url,
+            "Origin": "https://koala.sh",
             "Alt-Used": "koala.sh",
             "Alt-Used": "koala.sh",
             "Sec-Fetch-Dest": "empty",
             "Sec-Fetch-Dest": "empty",
             "Sec-Fetch-Mode": "cors",
             "Sec-Fetch-Mode": "cors",
@@ -67,7 +68,7 @@ class Koala(AsyncGeneratorProvider, ProviderModelMixin):
                 "model": model,
                 "model": model,
             }
             }
 
 
-            async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response:
+            async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
                 await raise_for_status(response)
                 await raise_for_status(response)
                 async for chunk in cls._parse_event_stream(response):
                 async for chunk in cls._parse_event_stream(response):
                     yield chunk
                     yield chunk

+ 0 - 91
g4f/Provider/Llama.py

@@ -1,91 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-
-from ..typing import AsyncResult, Messages
-from ..requests.raise_for_status import raise_for_status
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-
-
-class Llama(AsyncGeneratorProvider, ProviderModelMixin):
-    url = "https://www.llama2.ai"
-    working = False
-    supports_message_history = True
-    default_model = "meta/meta-llama-3-70b-instruct"
-    models = [
-        "meta/llama-2-7b-chat",
-        "meta/llama-2-13b-chat",
-        "meta/llama-2-70b-chat",
-        "meta/meta-llama-3-8b-instruct",
-        "meta/meta-llama-3-70b-instruct",
-    ]
-    model_aliases = {
-        "meta-llama/Meta-Llama-3-8B-Instruct": "meta/meta-llama-3-8b-instruct",
-        "meta-llama/Meta-Llama-3-70B-Instruct": "meta/meta-llama-3-70b-instruct",
-        "meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat",
-        "meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat",
-        "meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat",
-    }
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        system_message: str = "You are a helpful assistant.",
-        temperature: float = 0.75,
-        top_p: float = 0.9,
-        max_tokens: int = 8000,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
-            "Accept": "*/*",
-            "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
-            "Accept-Encoding": "gzip, deflate, br",
-            "Referer": f"{cls.url}/",
-            "Content-Type": "text/plain;charset=UTF-8",
-            "Origin": cls.url,
-            "Connection": "keep-alive",
-            "Sec-Fetch-Dest": "empty",
-            "Sec-Fetch-Mode": "cors",
-            "Sec-Fetch-Site": "same-origin",
-            "Pragma": "no-cache",
-            "Cache-Control": "no-cache",
-            "TE": "trailers"
-        }
-        async with ClientSession(headers=headers) as session:
-            system_messages = [message["content"] for message in messages if message["role"] == "system"]
-            if system_messages:
-                system_message = "\n".join(system_messages)
-                messages = [message for message in messages if message["role"] != "system"] 
-            prompt = format_prompt(messages)
-            data = {
-                "prompt": prompt,
-                "model": cls.get_model(model),
-                "systemPrompt": system_message,
-                "temperature": temperature,
-                "topP": top_p,
-                "maxTokens": max_tokens,
-                "image": None
-            }
-            started = False
-            async with session.post(f"{cls.url}/api", json=data, proxy=proxy) as response:
-                await raise_for_status(response)
-                async for chunk in response.content.iter_any():
-                    if not chunk:
-                        continue
-                    if not started:
-                        chunk = chunk.lstrip()
-                        started = True
-                    yield chunk.decode(errors="ignore")
-            
-def format_prompt(messages: Messages):
-    messages = [
-        f"[INST] {message['content']} [/INST]"
-        if message["role"] == "user"
-        else message["content"]
-        for message in messages
-    ]
-    return "\n".join(messages) + "\n"

+ 43 - 108
g4f/Provider/Nexra.py

@@ -1,40 +1,32 @@
 from __future__ import annotations
 from __future__ import annotations
-
 import json
 import json
-import base64
 from aiohttp import ClientSession
 from aiohttp import ClientSession
-from typing import AsyncGenerator
 
 
 from ..typing import AsyncResult, Messages
 from ..typing import AsyncResult, Messages
 from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
 from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..image import ImageResponse
 from .helper import format_prompt
 from .helper import format_prompt
+from ..image import ImageResponse
 
 
 class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
 class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
     url = "https://nexra.aryahcr.cc"
     url = "https://nexra.aryahcr.cc"
-    api_endpoint_text = "https://nexra.aryahcr.cc/api/chat/gpt"
-    api_endpoint_image = "https://nexra.aryahcr.cc/api/image/complements"
+    chat_api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt"
+    image_api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
     working = True
     working = True
     supports_gpt_35_turbo = True
     supports_gpt_35_turbo = True
     supports_gpt_4 = True
     supports_gpt_4 = True
-    supports_stream = True
     supports_system_message = True
     supports_system_message = True
     supports_message_history = True
     supports_message_history = True
     
     
     default_model = 'gpt-3.5-turbo'
     default_model = 'gpt-3.5-turbo'
-    models = [
-        # Text models
+    text_models = [
         'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314',
         'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314',
         'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
         'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
         'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002',
         'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002',
         'text-curie-001', 'text-babbage-001', 'text-ada-001',
         'text-curie-001', 'text-babbage-001', 'text-ada-001',
         'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002',
         'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002',
-        # Image models
-        'dalle', 'dalle-mini', 'emi'
     ]
     ]
-    
-    image_models = {"dalle", "dalle-mini", "emi"}
-    text_models = set(models) - image_models
+    image_models = ['dalle', 'dalle2', 'dalle-mini', 'emi']
+    models = [*text_models, *image_models]
     
     
     model_aliases = {
     model_aliases = {
         "gpt-4": "gpt-4-0613",
         "gpt-4": "gpt-4-0613",
@@ -60,11 +52,13 @@ class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
         "gpt-3": "ada",
         "gpt-3": "ada",
         "gpt-3": "babbage-002",
         "gpt-3": "babbage-002",
         "gpt-3": "davinci-002",
         "gpt-3": "davinci-002",
+        
+        "dalle-2": "dalle2",
     }
     }
-
+    
     @classmethod
     @classmethod
     def get_model(cls, model: str) -> str:
     def get_model(cls, model: str) -> str:
-        if model in cls.models:
+        if model in cls.text_models or model in cls.image_models:
             return model
             return model
         elif model in cls.model_aliases:
         elif model in cls.model_aliases:
             return cls.model_aliases[model]
             return cls.model_aliases[model]
@@ -78,104 +72,45 @@ class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
         messages: Messages,
         messages: Messages,
         proxy: str = None,
         proxy: str = None,
         **kwargs
         **kwargs
-    ) -> AsyncGenerator[str | ImageResponse, None]:
+    ) -> AsyncResult:
         model = cls.get_model(model)
         model = cls.get_model(model)
         
         
-        if model in cls.image_models:
-            async for result in cls.create_image_async_generator(model, messages, proxy, **kwargs):
-                yield result
-        else:
-            async for result in cls.create_text_async_generator(model, messages, proxy, **kwargs):
-                yield result
-
-    @classmethod
-    async def create_text_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncGenerator[str, None]:
         headers = {
         headers = {
             "Content-Type": "application/json",
             "Content-Type": "application/json",
         }
         }
+        
         async with ClientSession(headers=headers) as session:
         async with ClientSession(headers=headers) as session:
-            data = {
-                "messages": messages,
-                "prompt": format_prompt(messages),
-                "model": model,
-                "markdown": False,
-                "stream": False,
-            }
-            async with session.post(cls.api_endpoint_text, json=data, proxy=proxy) as response:
-                response.raise_for_status()
-                result = await response.text()
-                json_result = json.loads(result)
-                yield json_result["gpt"]
-
-    @classmethod
-    async def create_image_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncGenerator[ImageResponse | str, None]:
-        headers = {
-            "Content-Type": "application/json"
-        }
-
-        prompt = messages[-1]['content'] if messages else ""
-
-        data = {
-            "prompt": prompt,
-            "model": model
-        }
-
-        async def process_response(response_text: str) -> ImageResponse | None:
-            json_start = response_text.find('{')
-            if json_start != -1:
-                json_data = response_text[json_start:]
-                try:
-                    response_data = json.loads(json_data)
-                    image_data = response_data.get('images', [])[0]
+            if model in cls.image_models:
+                # Image generation
+                prompt = messages[-1]['content'] if messages else ""
+                data = {
+                    "prompt": prompt,
+                    "model": model,
+                    "response": "url"
+                }
+                async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response:
+                    response.raise_for_status()
+                    result = await response.text()
+                    result_json = json.loads(result.strip('_'))
+                    image_url = result_json['images'][0] if result_json['images'] else None
                     
                     
-                    if image_data.startswith('data:image/'):
-                        return ImageResponse([image_data], "Generated image")
+                    if image_url:
+                        yield ImageResponse(images=image_url, alt=prompt)
+            else:
+                # Text completion
+                data = {
+                    "messages": messages,
+                    "prompt": format_prompt(messages),
+                    "model": model,
+                    "markdown": False
+                }
+                async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy) as response:
+                    response.raise_for_status()
+                    result = await response.text()
                     
                     
                     try:
                     try:
-                        base64.b64decode(image_data)
-                        data_uri = f"data:image/jpeg;base64,{image_data}"
-                        return ImageResponse([data_uri], "Generated image")
-                    except:
-                        print("Invalid base64 data")
-                        return None
-                except json.JSONDecodeError:
-                    print("Failed to parse JSON.")
-            else:
-                print("No JSON data found in the response.")
-            return None
-
-        async with ClientSession(headers=headers) as session:
-            async with session.post(cls.api_endpoint_image, json=data, proxy=proxy) as response:
-                response.raise_for_status()
-                response_text = await response.text()
-                
-                image_response = await process_response(response_text)
-                if image_response:
-                    yield image_response
-                else:
-                    yield "Failed to process image data."
-
-    @classmethod
-    async def create_async(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> str:
-        async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
-            if isinstance(response, ImageResponse):
-                return response.images[0]
-            return response
+                        json_response = json.loads(result)
+                        gpt_response = json_response.get('gpt', '')
+                        yield gpt_response
+                    except json.JSONDecodeError:
+                        yield result

+ 10 - 1
g4f/Provider/PerplexityLabs.py

@@ -13,7 +13,7 @@ WS_URL = "wss://www.perplexity.ai/socket.io/"
 class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
 class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
     url = "https://labs.perplexity.ai"
     url = "https://labs.perplexity.ai"
     working = True
     working = True
-    default_model = "llama-3.1-8b-instruct"
+    default_model = "llama-3.1-70b-instruct"
     models = [
     models = [
         "llama-3.1-sonar-large-128k-online",
         "llama-3.1-sonar-large-128k-online",
         "llama-3.1-sonar-small-128k-online",
         "llama-3.1-sonar-small-128k-online",
@@ -22,6 +22,15 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
         "llama-3.1-8b-instruct",
         "llama-3.1-8b-instruct",
         "llama-3.1-70b-instruct",
         "llama-3.1-70b-instruct",
     ]
     ]
+    
+    model_aliases = {
+        "llama-3.1-8b": "llama-3.1-sonar-large-128k-online",
+        "llama-3.1-8b": "sonar-small-128k-online",
+        "llama-3.1-8b": "llama-3.1-sonar-large-128k-chat",
+        "llama-3.1-8b": "llama-3.1-sonar-small-128k-chat",
+        "llama-3.1-8b": "llama-3.1-8b-instruct",
+        "llama-3.1-70b": "llama-3.1-70b-instruct",
+    }
 
 
     @classmethod
     @classmethod
     async def create_async_generator(
     async def create_async_generator(

+ 149 - 0
g4f/Provider/Prodia.py

@@ -0,0 +1,149 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import time
+import asyncio
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse
+
+class Prodia(AsyncGeneratorProvider, ProviderModelMixin):
+    url = "https://app.prodia.com"
+    api_endpoint = "https://api.prodia.com/generate"
+    working = True
+    
+    default_model = 'absolutereality_v181.safetensors [3d9d4d2b]'
+    models = [
+        '3Guofeng3_v34.safetensors [50f420de]',
+        'absolutereality_V16.safetensors [37db0fc3]',
+        'absolutereality_v181.safetensors [3d9d4d2b]',
+        'amIReal_V41.safetensors [0a8a2e61]',
+        'analog-diffusion-1.0.ckpt [9ca13f02]',
+        'aniverse_v30.safetensors [579e6f85]',
+        'anythingv3_0-pruned.ckpt [2700c435]',
+        'anything-v4.5-pruned.ckpt [65745d25]',
+        'anythingV5_PrtRE.safetensors [893e49b9]',
+        'AOM3A3_orangemixs.safetensors [9600da17]',
+        'blazing_drive_v10g.safetensors [ca1c1eab]',
+        'breakdomain_I2428.safetensors [43cc7d2f]',
+        'breakdomain_M2150.safetensors [15f7afca]',
+        'cetusMix_Version35.safetensors [de2f2560]',
+        'childrensStories_v13D.safetensors [9dfaabcb]',
+        'childrensStories_v1SemiReal.safetensors [a1c56dbb]',
+        'childrensStories_v1ToonAnime.safetensors [2ec7b88b]',
+        'Counterfeit_v30.safetensors [9e2a8f19]',
+        'cuteyukimixAdorable_midchapter3.safetensors [04bdffe6]',
+        'cyberrealistic_v33.safetensors [82b0d085]',
+        'dalcefo_v4.safetensors [425952fe]',
+        'deliberate_v2.safetensors [10ec4b29]',
+        'deliberate_v3.safetensors [afd9d2d4]',
+        'dreamlike-anime-1.0.safetensors [4520e090]',
+        'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]',
+        'dreamlike-photoreal-2.0.safetensors [fdcf65e7]',
+        'dreamshaper_6BakedVae.safetensors [114c8abb]',
+        'dreamshaper_7.safetensors [5cf5ae06]',
+        'dreamshaper_8.safetensors [9d40847d]',
+        'edgeOfRealism_eorV20.safetensors [3ed5de15]',
+        'EimisAnimeDiffusion_V1.ckpt [4f828a15]',
+        'elldreths-vivid-mix.safetensors [342d9d26]',
+        'epicphotogasm_xPlusPlus.safetensors [1a8f6d35]',
+        'epicrealism_naturalSinRC1VAE.safetensors [90a4c676]',
+        'epicrealism_pureEvolutionV3.safetensors [42c8440c]',
+        'ICantBelieveItsNotPhotography_seco.safetensors [4e7a3dfd]',
+        'indigoFurryMix_v75Hybrid.safetensors [91208cbb]',
+        'juggernaut_aftermath.safetensors [5e20c455]',
+        'lofi_v4.safetensors [ccc204d6]',
+        'lyriel_v16.safetensors [68fceea2]',
+        'majicmixRealistic_v4.safetensors [29d0de58]',
+        'mechamix_v10.safetensors [ee685731]',
+        'meinamix_meinaV9.safetensors [2ec66ab0]',
+        'meinamix_meinaV11.safetensors [b56ce717]',
+        'neverendingDream_v122.safetensors [f964ceeb]',
+        'openjourney_V4.ckpt [ca2f377f]',
+        'pastelMixStylizedAnime_pruned_fp16.safetensors [793a26e8]',
+        'portraitplus_V1.0.safetensors [1400e684]',
+        'protogenx34.safetensors [5896f8d5]',
+        'Realistic_Vision_V1.4-pruned-fp16.safetensors [8d21810b]',
+        'Realistic_Vision_V2.0.safetensors [79587710]',
+        'Realistic_Vision_V4.0.safetensors [29a7afaa]',
+        'Realistic_Vision_V5.0.safetensors [614d1063]',
+        'Realistic_Vision_V5.1.safetensors [a0f13c83]',
+        'redshift_diffusion-V10.safetensors [1400e684]',
+        'revAnimated_v122.safetensors [3f4fefd9]',
+        'rundiffusionFX25D_v10.safetensors [cd12b0ee]',
+        'rundiffusionFX_v10.safetensors [cd4e694d]',
+        'sdv1_4.ckpt [7460a6fa]',
+        'v1-5-pruned-emaonly.safetensors [d7049739]',
+        'v1-5-inpainting.safetensors [21c7ab71]',
+        'shoninsBeautiful_v10.safetensors [25d8c546]',
+        'theallys-mix-ii-churned.safetensors [5d9225a4]',
+        'timeless-1.0.ckpt [7c4971d4]',
+        'toonyou_beta6.safetensors [980f6b15]',
+    ]
+
+    @classmethod
+    def get_model(cls, model: str) -> str:
+        if model in cls.models:
+            return model
+        elif model in cls.model_aliases:
+            return cls.model_aliases[model]
+        else:
+            return cls.default_model
+
+    @classmethod
+    async def create_async_generator(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncResult:
+        model = cls.get_model(model)
+        
+        headers = {
+            "accept": "*/*",
+            "accept-language": "en-US,en;q=0.9",
+            "origin": cls.url,
+            "referer": f"{cls.url}/",
+            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
+        }
+        
+        async with ClientSession(headers=headers) as session:
+            prompt = messages[-1]['content'] if messages else ""
+            
+            params = {
+                "new": "true",
+                "prompt": prompt,
+                "model": model,
+                "negative_prompt": kwargs.get("negative_prompt", ""),
+                "steps": kwargs.get("steps", 20),
+                "cfg": kwargs.get("cfg", 7),
+                "seed": kwargs.get("seed", int(time.time())),
+                "sampler": kwargs.get("sampler", "DPM++ 2M Karras"),
+                "aspect_ratio": kwargs.get("aspect_ratio", "square")
+            }
+            
+            async with session.get(cls.api_endpoint, params=params, proxy=proxy) as response:
+                response.raise_for_status()
+                job_data = await response.json()
+                job_id = job_data["job"]
+                
+                image_url = await cls._poll_job(session, job_id, proxy)
+                yield ImageResponse(image_url, alt=prompt)
+
+    @classmethod
+    async def _poll_job(cls, session: ClientSession, job_id: str, proxy: str, max_attempts: int = 30, delay: int = 2) -> str:
+        for _ in range(max_attempts):
+            async with session.get(f"https://api.prodia.com/job/{job_id}", proxy=proxy) as response:
+                response.raise_for_status()
+                job_status = await response.json()
+
+                if job_status["status"] == "succeeded":
+                    return f"https://images.prodia.xyz/{job_id}.png"
+                elif job_status["status"] == "failed":
+                    raise Exception("Image generation failed")
+
+            await asyncio.sleep(delay)
+
+        raise Exception("Timeout waiting for image generation")

+ 98 - 115
g4f/Provider/ReplicateHome.py

@@ -1,66 +1,60 @@
 from __future__ import annotations
 from __future__ import annotations
-from typing import Generator, Optional, Dict, Any, Union, List
-import random
+
+import json
 import asyncio
 import asyncio
-import base64
+from aiohttp import ClientSession, ContentTypeError
 
 
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
 from ..typing import AsyncResult, Messages
 from ..typing import AsyncResult, Messages
-from ..requests import StreamSession, raise_for_status
-from ..errors import ResponseError
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
 from ..image import ImageResponse
 from ..image import ImageResponse
 
 
 class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
 class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
     url = "https://replicate.com"
     url = "https://replicate.com"
-    parent = "Replicate"
+    api_endpoint = "https://homepage.replicate.com/api/prediction"
     working = True
     working = True
+    supports_stream = True
+    supports_system_message = True
+    supports_message_history = True
+    
     default_model = 'meta/meta-llama-3-70b-instruct'
     default_model = 'meta/meta-llama-3-70b-instruct'
-    models = [
-        # Models for image generation
-        'stability-ai/stable-diffusion-3',
-        'bytedance/sdxl-lightning-4step',
-        'playgroundai/playground-v2.5-1024px-aesthetic',
-        
-        # Models for image generation
+    
+    text_models = [
         'meta/meta-llama-3-70b-instruct',
         'meta/meta-llama-3-70b-instruct',
         'mistralai/mixtral-8x7b-instruct-v0.1',
         'mistralai/mixtral-8x7b-instruct-v0.1',
         'google-deepmind/gemma-2b-it',
         'google-deepmind/gemma-2b-it',
+        'yorickvp/llava-13b',
     ]
     ]
 
 
-    versions = {
-        # Model versions for generating images
-        'stability-ai/stable-diffusion-3': [
-            "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f"
-        ],
-        'bytedance/sdxl-lightning-4step': [
-            "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f"
-        ],
-        'playgroundai/playground-v2.5-1024px-aesthetic': [
-            "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24"
-        ],
-        
-        # Model versions for text generation
-        'meta/meta-llama-3-70b-instruct': [
-            "dp-cf04fe09351e25db628e8b6181276547"
-        ],
-        'mistralai/mixtral-8x7b-instruct-v0.1': [
-            "dp-89e00f489d498885048e94f9809fbc76"
-        ],
-        'google-deepmind/gemma-2b-it': [
-            "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626"
-        ]
-    }
-
-    image_models = {"stability-ai/stable-diffusion-3", "bytedance/sdxl-lightning-4step", "playgroundai/playground-v2.5-1024px-aesthetic"}
-    text_models = {"meta/meta-llama-3-70b-instruct", "mistralai/mixtral-8x7b-instruct-v0.1", "google-deepmind/gemma-2b-it"}
+    image_models = [
+        'black-forest-labs/flux-schnell',
+        'stability-ai/stable-diffusion-3',
+        'bytedance/sdxl-lightning-4step',
+        'playgroundai/playground-v2.5-1024px-aesthetic',
+    ]
 
 
+    models = text_models + image_models
+    
     model_aliases = {
     model_aliases = {
+        "flux-schnell": "black-forest-labs/flux-schnell",
         "sd-3": "stability-ai/stable-diffusion-3",
         "sd-3": "stability-ai/stable-diffusion-3",
         "sdxl": "bytedance/sdxl-lightning-4step",
         "sdxl": "bytedance/sdxl-lightning-4step",
         "playground-v2.5": "playgroundai/playground-v2.5-1024px-aesthetic",
         "playground-v2.5": "playgroundai/playground-v2.5-1024px-aesthetic",
         "llama-3-70b": "meta/meta-llama-3-70b-instruct",
         "llama-3-70b": "meta/meta-llama-3-70b-instruct",
         "mixtral-8x7b": "mistralai/mixtral-8x7b-instruct-v0.1",
         "mixtral-8x7b": "mistralai/mixtral-8x7b-instruct-v0.1",
         "gemma-2b": "google-deepmind/gemma-2b-it",
         "gemma-2b": "google-deepmind/gemma-2b-it",
+        "llava-13b": "yorickvp/llava-13b",
+    }
+
+    model_versions = {
+        "meta/meta-llama-3-70b-instruct": "fbfb20b472b2f3bdd101412a9f70a0ed4fc0ced78a77ff00970ee7a2383c575d",
+        "mistralai/mixtral-8x7b-instruct-v0.1": "5d78bcd7a992c4b793465bcdcf551dc2ab9668d12bb7aa714557a21c1e77041c",
+        "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626",
+        "yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb",
+        'black-forest-labs/flux-schnell': "f2ab8a5bfe79f02f0789a146cf5e73d2a4ff2684a98c2b303d1e1ff3814271db",
+        'stability-ai/stable-diffusion-3': "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f",
+        'bytedance/sdxl-lightning-4step': "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f",
+        'playgroundai/playground-v2.5-1024px-aesthetic': "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24",
     }
     }
 
 
     @classmethod
     @classmethod
@@ -77,84 +71,73 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
         cls,
         cls,
         model: str,
         model: str,
         messages: Messages,
         messages: Messages,
-        **kwargs: Any
-    ) -> Generator[Union[str, ImageResponse], None, None]:
-        yield await cls.create_async(messages[-1]["content"], model, **kwargs)
-
-    @classmethod
-    async def create_async(
-        cls,
-        prompt: str,
-        model: str,
-        api_key: Optional[str] = None,
-        proxy: Optional[str] = None,
-        timeout: int = 180,
-        version: Optional[str] = None,
-        extra_data: Dict[str, Any] = {},
-        **kwargs: Any
-    ) -> Union[str, ImageResponse]:
-        model = cls.get_model(model)  # Use the get_model method to resolve model name
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncResult:
+        model = cls.get_model(model)
+        
         headers = {
         headers = {
-            'Accept-Encoding': 'gzip, deflate, br',
-            'Accept-Language': 'en-US',
-            'Connection': 'keep-alive',
-            'Origin': cls.url,
-            'Referer': f'{cls.url}/',
-            'Sec-Fetch-Dest': 'empty',
-            'Sec-Fetch-Mode': 'cors',
-            'Sec-Fetch-Site': 'same-site',
-            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
-            'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
-            'sec-ch-ua-mobile': '?0',
-            'sec-ch-ua-platform': '"macOS"',
+            "accept": "*/*",
+            "accept-language": "en-US,en;q=0.9",
+            "cache-control": "no-cache",
+            "content-type": "application/json",
+            "origin": "https://replicate.com",
+            "pragma": "no-cache",
+            "priority": "u=1, i",
+            "referer": "https://replicate.com/",
+            "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
+            "sec-ch-ua-mobile": "?0",
+            "sec-ch-ua-platform": '"Linux"',
+            "sec-fetch-dest": "empty",
+            "sec-fetch-mode": "cors",
+            "sec-fetch-site": "same-site",
+            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
         }
         }
-
-        if version is None:
-            version = random.choice(cls.versions.get(model, []))
-        if api_key is not None:
-            headers["Authorization"] = f"Bearer {api_key}"
-
-        async with StreamSession(
-            proxies={"all": proxy},
-            headers=headers,
-            timeout=timeout
-        ) as session:
+        
+        async with ClientSession(headers=headers) as session:
+            if model in cls.image_models:
+                prompt = messages[-1]['content'] if messages else ""
+            else:
+                prompt = format_prompt(messages)
+            
             data = {
             data = {
-                "input": {
-                    "prompt": prompt,
-                    **extra_data
-                },
-                "version": version
+                "model": model,
+                "version": cls.model_versions[model],
+                "input": {"prompt": prompt},
             }
             }
-            if api_key is None:
-                data["model"] = model
-                url = "https://homepage.replicate.com/api/prediction"
-            else:
-                url = "https://api.replicate.com/v1/predictions"
-            async with session.post(url, json=data) as response:
-                await raise_for_status(response)
+            
+            async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+                response.raise_for_status()
                 result = await response.json()
                 result = await response.json()
-            if "id" not in result:
-                raise ResponseError(f"Invalid response: {result}")
+                prediction_id = result['id']
+            
+            poll_url = f"https://homepage.replicate.com/api/poll?id={prediction_id}"
+            max_attempts = 30
+            delay = 5
+            for _ in range(max_attempts):
+                async with session.get(poll_url, proxy=proxy) as response:
+                    response.raise_for_status()
+                    try:
+                        result = await response.json()
+                    except ContentTypeError:
+                        text = await response.text()
+                        try:
+                            result = json.loads(text)
+                        except json.JSONDecodeError:
+                            raise ValueError(f"Unexpected response format: {text}")
 
 
-            while True:
-                if api_key is None:
-                    url = f"https://homepage.replicate.com/api/poll?id={result['id']}"
-                else:
-                    url = f"https://api.replicate.com/v1/predictions/{result['id']}"
-                async with session.get(url) as response:
-                    await raise_for_status(response)
-                    result = await response.json()
-                    if "status" not in result:
-                        raise ResponseError(f"Invalid response: {result}")
-                    if result["status"] == "succeeded":
-                        output = result['output']
-                        if model in cls.text_models:
-                            return ''.join(output) if isinstance(output, list) else output
-                        elif model in cls.image_models:
-                            images: List[Any] = output
-                            images = images[0] if len(images) == 1 else images
-                            return ImageResponse(images, prompt)
-                    elif result["status"] == "failed":
-                        raise ResponseError(f"Prediction failed: {result}")
-                    await asyncio.sleep(0.5)
+                    if result['status'] == 'succeeded':
+                        if model in cls.image_models:
+                            image_url = result['output'][0]
+                            yield ImageResponse(image_url, "Generated image")
+                            return
+                        else:
+                            for chunk in result['output']:
+                                yield chunk
+                        break
+                    elif result['status'] == 'failed':
+                        raise Exception(f"Prediction failed: {result.get('error')}")
+                await asyncio.sleep(delay)
+            
+            if result['status'] != 'succeeded':
+                raise Exception("Prediction timed out")

+ 0 - 70
g4f/Provider/Rocks.py

@@ -1,70 +0,0 @@
-import asyncio
-import json
-from aiohttp import ClientSession
-from ..typing import Messages, AsyncResult
-from .base_provider import AsyncGeneratorProvider
-
-class Rocks(AsyncGeneratorProvider):
-    url = "https://api.airforce"
-    api_endpoint = "/chat/completions"
-    supports_message_history = True
-    supports_gpt_35_turbo = True
-    supports_gpt_4 = True
-    supports_stream = True
-    supports_system_message = True
-    working = True
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        payload = {"messages":messages,"model":model,"max_tokens":4096,"temperature":1,"top_p":1,"stream":True}
-
-        headers = {
-            "Accept": "application/json",
-            "Accept-Encoding": "gzip, deflate, br, zstd",
-            "Accept-Language": "en-US,en;q=0.9",
-            "Authorization": "Bearer missing api key",
-            "Origin": "https://llmplayground.net",
-            "Referer": "https://llmplayground.net/",
-            "Sec-Fetch-Dest": "empty",
-            "Sec-Fetch-Mode": "cors",
-            "Sec-Fetch-Site": "same-origin",
-            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
-        }
-
-        async with ClientSession() as session:
-            async with session.post(
-                f"{cls.url}{cls.api_endpoint}",
-                json=payload,
-                proxy=proxy,
-                headers=headers
-            ) as response:
-                response.raise_for_status()
-                last_chunk_time = asyncio.get_event_loop().time()
-                
-                async for line in response.content:
-                    current_time = asyncio.get_event_loop().time()
-                    if current_time - last_chunk_time > 5:
-                        return
-                    
-                    if line.startswith(b"\n"):
-                        pass
-                    elif "discord.com/invite/" in line.decode() or "discord.gg/" in line.decode():
-                        pass # trolled
-                    elif line.startswith(b"data: "):
-                        try:
-                            line = json.loads(line[6:])
-                        except json.JSONDecodeError:
-                            continue
-                        chunk = line["choices"][0]["delta"].get("content")
-                        if chunk:
-                            yield chunk
-                            last_chunk_time = current_time
-                    else:
-                        raise Exception(f"Unexpected line: {line}")
-                return

+ 1 - 3
g4f/Provider/Snova.py

@@ -24,10 +24,9 @@ class Snova(AsyncGeneratorProvider, ProviderModelMixin):
         'Meta-Llama-3.1-70B-Instruct',
         'Meta-Llama-3.1-70B-Instruct',
         'Meta-Llama-3.1-405B-Instruct',
         'Meta-Llama-3.1-405B-Instruct',
         'Samba-CoE',
         'Samba-CoE',
-        'ignos/Mistral-T5-7B-v1',
+        'ignos/Mistral-T5-7B-v1', # Error with the answer
         'v1olet/v1olet_merged_dpo_7B',
         'v1olet/v1olet_merged_dpo_7B',
         'macadeliccc/WestLake-7B-v2-laser-truthy-dpo',
         'macadeliccc/WestLake-7B-v2-laser-truthy-dpo',
-        'cookinai/DonutLM-v1',
     ]
     ]
     
     
     model_aliases = {
     model_aliases = {
@@ -40,7 +39,6 @@ class Snova(AsyncGeneratorProvider, ProviderModelMixin):
         "samba-coe-v0.1": "Samba-CoE",
         "samba-coe-v0.1": "Samba-CoE",
         "v1olet-merged-7b": "v1olet/v1olet_merged_dpo_7B",
         "v1olet-merged-7b": "v1olet/v1olet_merged_dpo_7B",
         "westlake-7b-v2": "macadeliccc/WestLake-7B-v2-laser-truthy-dpo",
         "westlake-7b-v2": "macadeliccc/WestLake-7B-v2-laser-truthy-dpo",
-        "donutlm-v1": "cookinai/DonutLM-v1",
     }
     }
 
 
     @classmethod
     @classmethod

+ 2 - 3
g4f/Provider/__init__.py

@@ -13,6 +13,7 @@ from .AI365VIP         import AI365VIP
 from .Allyfy           import Allyfy
 from .Allyfy           import Allyfy
 from .AiChatOnline     import AiChatOnline
 from .AiChatOnline     import AiChatOnline
 from .AiChats          import AiChats
 from .AiChats          import AiChats
+from .Airforce         import Airforce
 from .Aura             import Aura
 from .Aura             import Aura
 from .Bing             import Bing
 from .Bing             import Bing
 from .BingCreateImages import BingCreateImages
 from .BingCreateImages import BingCreateImages
@@ -28,7 +29,6 @@ from .DDG              import DDG
 from .DeepInfra        import DeepInfra
 from .DeepInfra        import DeepInfra
 from .DeepInfraImage   import DeepInfraImage
 from .DeepInfraImage   import DeepInfraImage
 from .FlowGpt          import FlowGpt
 from .FlowGpt          import FlowGpt
-from .FluxAirforce     import FluxAirforce
 from .Free2GPT         import Free2GPT
 from .Free2GPT         import Free2GPT
 from .FreeChatgpt      import FreeChatgpt
 from .FreeChatgpt      import FreeChatgpt
 from .FreeGpt          import FreeGpt
 from .FreeGpt          import FreeGpt
@@ -41,7 +41,6 @@ from .HuggingFace      import HuggingFace
 from .Koala            import Koala
 from .Koala            import Koala
 from .Liaobots         import Liaobots
 from .Liaobots         import Liaobots
 from .LiteIcoding      import LiteIcoding
 from .LiteIcoding      import LiteIcoding
-from .Llama            import Llama
 from .Local            import Local
 from .Local            import Local
 from .MagickPen        import MagickPen
 from .MagickPen        import MagickPen
 from .MetaAI           import MetaAI
 from .MetaAI           import MetaAI
@@ -51,11 +50,11 @@ from .Ollama           import Ollama
 from .PerplexityLabs   import PerplexityLabs
 from .PerplexityLabs   import PerplexityLabs
 from .Pi               import Pi
 from .Pi               import Pi
 from .Pizzagpt         import Pizzagpt
 from .Pizzagpt         import Pizzagpt
+from .Prodia           import Prodia
 from .Reka             import Reka
 from .Reka             import Reka
 from .Snova            import Snova
 from .Snova            import Snova
 from .Replicate        import Replicate
 from .Replicate        import Replicate
 from .ReplicateHome    import ReplicateHome
 from .ReplicateHome    import ReplicateHome
-from .Rocks            import Rocks
 from .TeachAnything    import TeachAnything
 from .TeachAnything    import TeachAnything
 from .TwitterBio       import TwitterBio
 from .TwitterBio       import TwitterBio
 from .Upstage          import Upstage
 from .Upstage          import Upstage

+ 3 - 3
g4f/Provider/bing/conversation.py

@@ -33,9 +33,9 @@ async def create_conversation(session: StreamSession, headers: dict, tone: str)
     Conversation: An instance representing the created conversation.
     Conversation: An instance representing the created conversation.
     """
     """
     if tone == "Copilot":
     if tone == "Copilot":
-        url = "https://copilot.microsoft.com/turing/conversation/create?bundleVersion=1.1690.0"
+        url = "https://copilot.microsoft.com/turing/conversation/create?bundleVersion=1.1809.0"
     else:
     else:
-        url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1690.0"
+        url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1809.0"
     async with session.get(url, headers=headers) as response:
     async with session.get(url, headers=headers) as response:
         if response.status == 404:
         if response.status == 404:
             raise RateLimitError("Response 404: Do less requests and reuse conversations")
             raise RateLimitError("Response 404: Do less requests and reuse conversations")
@@ -90,4 +90,4 @@ async def delete_conversation(session: StreamSession, conversation: Conversation
             response = await response.json()
             response = await response.json()
             return response["result"]["value"] == "Success"
             return response["result"]["value"] == "Success"
     except:
     except:
-        return False
+        return False

+ 0 - 116
g4f/Provider/selenium/AItianhuSpace.py

@@ -1,116 +0,0 @@
-from __future__ import annotations
-
-import time
-import random
-
-from ...typing import CreateResult, Messages
-from ..base_provider import AbstractProvider
-from ..helper import format_prompt, get_random_string
-from ...webdriver import WebDriver, WebDriverSession, element_send_text
-from ... import debug
-
-class AItianhuSpace(AbstractProvider):
-    url = "https://chat3.aiyunos.top/"
-    working = True
-    supports_stream = True
-    supports_gpt_35_turbo = True
-    _domains = ["aitianhu.com", "aitianhu1.top"]
-
-    @classmethod
-    def create_completion(
-        cls,
-        model: str,
-        messages: Messages,
-        stream: bool,
-        domain: str = None,
-        proxy: str = None,
-        timeout: int = 120,
-        webdriver: WebDriver = None,
-        headless: bool = True,
-        **kwargs
-    ) -> CreateResult:
-        if not model:
-            model = "gpt-3.5-turbo"
-        if not domain:
-            rand = get_random_string(6)
-            domain = random.choice(cls._domains)
-            domain = f"{rand}.{domain}"
-        if debug.logging:
-            print(f"AItianhuSpace | using domain: {domain}")
-        url = f"https://{domain}"
-        prompt = format_prompt(messages)
-
-        with WebDriverSession(webdriver, "", headless=headless, proxy=proxy) as driver:
-            from selenium.webdriver.common.by import By
-            from selenium.webdriver.support.ui import WebDriverWait
-            from selenium.webdriver.support import expected_conditions as EC
-
-            wait = WebDriverWait(driver, timeout)
-
-            # Bypass devtools detection
-            driver.get("https://blank.page/")
-            wait.until(EC.visibility_of_element_located((By.ID, "sheet")))
-            driver.execute_script(f"""
-    document.getElementById('sheet').addEventListener('click', () => {{
-        window.open(arguments[0]);
-    }});
-            """, url)
-            driver.find_element(By.ID, "sheet").click()
-            time.sleep(10)
-
-            original_window = driver.current_window_handle
-            for window_handle in driver.window_handles:
-                if window_handle != original_window:
-                    driver.close()
-                    driver.switch_to.window(window_handle)
-                    break
-
-            # Wait for page load
-            wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea.n-input__textarea-el")))
-
-            # Register hook in XMLHttpRequest
-            script = """
-const _http_request_open = XMLHttpRequest.prototype.open;
-window._last_message = window._message = "";
-window._loadend = false;
-XMLHttpRequest.prototype.open = function(method, url) {
-    if (url == "/api/chat-process") {
-        this.addEventListener("progress", (event) => {
-            const lines = this.responseText.split("\\n");
-            try {
-                window._message = JSON.parse(lines[lines.length-1])["text"];
-            } catch(e) { }
-        });
-        this.addEventListener("loadend", (event) => {
-            window._loadend = true;
-        });
-    }
-    return _http_request_open.call(this, method, url);
-}
-"""
-            driver.execute_script(script)
-
-            # Submit prompt
-            element_send_text(driver.find_element(By.CSS_SELECTOR, "textarea.n-input__textarea-el"), prompt)
-
-            # Read response
-            while True:
-                chunk = driver.execute_script("""
-if (window._message && window._message != window._last_message) {
-    try {
-        return window._message.substring(window._last_message.length);
-    } finally {
-        window._last_message = window._message;
-    }
-}
-if (window._loadend) {
-    return null;
-}
-return "";
-""")
-                if chunk:
-                    yield chunk
-                elif chunk != "":
-                    break
-                else:
-                    time.sleep(0.1)

+ 0 - 1
g4f/Provider/selenium/__init__.py

@@ -1,4 +1,3 @@
-from .AItianhuSpace import AItianhuSpace
 from .MyShell import MyShell
 from .MyShell import MyShell
 from .PerplexityAi import PerplexityAi
 from .PerplexityAi import PerplexityAi
 from .Phind import Phind
 from .Phind import Phind

+ 184 - 59
g4f/models.py

@@ -5,6 +5,7 @@ from dataclasses import dataclass
 from .Provider import IterListProvider, ProviderType
 from .Provider import IterListProvider, ProviderType
 from .Provider import (
 from .Provider import (
     AiChatOnline,
     AiChatOnline,
+    Airforce,
     Allyfy,
     Allyfy,
     Bing,
     Bing,
     Binjie,
     Binjie,
@@ -18,7 +19,6 @@ from .Provider import (
     DDG,
     DDG,
     DeepInfra,
     DeepInfra,
     DeepInfraImage,
     DeepInfraImage,
-    FluxAirforce,
     Free2GPT,
     Free2GPT,
     FreeChatgpt,
     FreeChatgpt,
     FreeGpt,
     FreeGpt,
@@ -105,7 +105,7 @@ gpt_35_turbo = Model(
     name          = 'gpt-3.5-turbo',
     name          = 'gpt-3.5-turbo',
     base_provider = 'OpenAI',
     base_provider = 'OpenAI',
     best_provider = IterListProvider([
     best_provider = IterListProvider([
-        Allyfy, TwitterBio, Nexra, Bixin123, CodeNews,
+        Allyfy, TwitterBio, Nexra, Bixin123, CodeNews, Airforce,
     ])
     ])
 )
 )
 
 
@@ -114,7 +114,8 @@ gpt_4o = Model(
     name          = 'gpt-4o',
     name          = 'gpt-4o',
     base_provider = 'OpenAI',
     base_provider = 'OpenAI',
     best_provider = IterListProvider([
     best_provider = IterListProvider([
-        Liaobots, Chatgpt4o, OpenaiChat,
+        Liaobots, Chatgpt4o, Airforce, 
+        OpenaiChat,
     ])
     ])
 )
 )
 
 
@@ -122,8 +123,8 @@ gpt_4o_mini = Model(
     name          = 'gpt-4o-mini',
     name          = 'gpt-4o-mini',
     base_provider = 'OpenAI',
     base_provider = 'OpenAI',
     best_provider = IterListProvider([
     best_provider = IterListProvider([
-        DDG, Liaobots, You, FreeNetfly, Pizzagpt, ChatgptFree, AiChatOnline, CodeNews, 
-        MagickPen, OpenaiChat, Koala,       
+        DDG, Liaobots, You, FreeNetfly, Pizzagpt, ChatgptFree, AiChatOnline, CodeNews, MagickPen, Airforce,  
+        OpenaiChat, Koala,       
     ])
     ])
 )
 )
 
 
@@ -131,7 +132,7 @@ gpt_4_turbo = Model(
     name          = 'gpt-4-turbo',
     name          = 'gpt-4-turbo',
     base_provider = 'OpenAI',
     base_provider = 'OpenAI',
     best_provider = IterListProvider([
     best_provider = IterListProvider([
-        Nexra, Bixin123, Liaobots, Bing
+        Nexra, Bixin123, Liaobots, Airforce, Bing
     ])
     ])
 )
 )
 
 
@@ -139,7 +140,7 @@ gpt_4 = Model(
     name          = 'gpt-4',
     name          = 'gpt-4',
     base_provider = 'OpenAI',
     base_provider = 'OpenAI',
     best_provider = IterListProvider([
     best_provider = IterListProvider([
-        Chatgpt4Online, Nexra, Binjie, Bing,
+        Chatgpt4Online, Nexra, Binjie, Airforce, Bing,
         gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider
         gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider
     ])
     ])
 )
 )
@@ -159,54 +160,74 @@ meta = Model(
     best_provider = MetaAI
     best_provider = MetaAI
 )
 )
 
 
+llama_2_13b = Model(
+    name          = "llama-2-13b",
+    base_provider = "Meta",
+    best_provider = IterListProvider([Airforce])
+)
+
 llama_3_8b = Model(
 llama_3_8b = Model(
     name          = "llama-3-8b",
     name          = "llama-3-8b",
     base_provider = "Meta",
     base_provider = "Meta",
-    best_provider = IterListProvider([DeepInfra, Replicate])
+    best_provider = IterListProvider([Airforce, DeepInfra, Replicate])
 )
 )
 
 
 llama_3_70b = Model(
 llama_3_70b = Model(
     name          = "llama-3-70b",
     name          = "llama-3-70b",
     base_provider = "Meta",
     base_provider = "Meta",
-    best_provider = IterListProvider([ReplicateHome, DeepInfra, PerplexityLabs, Replicate])
+    best_provider = IterListProvider([ReplicateHome, Airforce, DeepInfra, Replicate])
 )
 )
 
 
 llama_3_1_8b = Model(
 llama_3_1_8b = Model(
     name          = "llama-3.1-8b",
     name          = "llama-3.1-8b",
     base_provider = "Meta",
     base_provider = "Meta",
-    best_provider = IterListProvider([Blackbox])
+    best_provider = IterListProvider([Blackbox, Airforce, PerplexityLabs])
 )
 )
 
 
 llama_3_1_70b = Model(
 llama_3_1_70b = Model(
     name          = "llama-3.1-70b",
     name          = "llama-3.1-70b",
     base_provider = "Meta",
     base_provider = "Meta",
-    best_provider = IterListProvider([DDG, HuggingChat, FreeGpt, Blackbox, TeachAnything, Free2GPT, HuggingFace])
+    best_provider = IterListProvider([DDG, HuggingChat, FreeGpt, Blackbox, TeachAnything, Free2GPT, Airforce, HuggingFace, PerplexityLabs])
 )
 )
 
 
 llama_3_1_405b = Model(
 llama_3_1_405b = Model(
     name          = "llama-3.1-405b",
     name          = "llama-3.1-405b",
     base_provider = "Meta",
     base_provider = "Meta",
-    best_provider = IterListProvider([HuggingChat, Blackbox, HuggingFace])
+    best_provider = IterListProvider([Blackbox, Airforce])
 )
 )
 
 
+
 ### Mistral ###
 ### Mistral ###
+mistral_7b = Model(
+    name          = "mistral-7b",
+    base_provider = "Mistral",
+    best_provider = IterListProvider([HuggingChat, Airforce, HuggingFace, DeepInfra])
+)
+
 mixtral_8x7b = Model(
 mixtral_8x7b = Model(
     name          = "mixtral-8x7b",
     name          = "mixtral-8x7b",
     base_provider = "Mistral",
     base_provider = "Mistral",
-    best_provider = IterListProvider([HuggingChat, DDG, ReplicateHome, TwitterBio, DeepInfra, HuggingFace,])
+    best_provider = IterListProvider([HuggingChat, DDG, ReplicateHome, TwitterBio, Airforce, DeepInfra, HuggingFace])
 )
 )
 
 
-mistral_7b = Model(
-    name          = "mistral-7b",
+mixtral_8x22b = Model(
+    name          = "mixtral-8x22b",
     base_provider = "Mistral",
     base_provider = "Mistral",
-    best_provider = IterListProvider([HuggingChat, HuggingFace, DeepInfra])
+    best_provider = IterListProvider([Airforce])
 )
 )
 
 
-### 01-ai ###
-yi_1_5_34b = Model(
-    name          = "yi-1.5-34b",
-    base_provider = "01-ai",
-    best_provider = IterListProvider([HuggingChat, HuggingFace])
+
+### NousResearch ###
+mixtral_8x7b_dpo = Model(
+    name          = "mixtral-8x7b-dpo",
+    base_provider = "NousResearch",
+    best_provider = IterListProvider([HuggingChat, Airforce, HuggingFace])
+)
+
+yi_34b = Model(
+    name = 'yi-34b',
+    base_provider = 'NousResearch',
+    best_provider = IterListProvider([Airforce])
 )
 )
 
 
 
 
@@ -214,35 +235,51 @@ yi_1_5_34b = Model(
 phi_3_mini_4k = Model(
 phi_3_mini_4k = Model(
     name          = "phi-3-mini-4k",
     name          = "phi-3-mini-4k",
     base_provider = "Microsoft",
     base_provider = "Microsoft",
-    best_provider = IterListProvider([HuggingFace, HuggingChat])
+    best_provider = IterListProvider([HuggingChat, HuggingFace])
 )
 )
 
 
 
 
 ### Google ###
 ### Google ###
 # gemini
 # gemini
-gemini = Model(
-    name          = 'gemini',
-    base_provider = 'Google',
-    best_provider = Gemini
-)
-
 gemini_pro = Model(
 gemini_pro = Model(
     name          = 'gemini-pro',
     name          = 'gemini-pro',
     base_provider = 'Google',
     base_provider = 'Google',
-    best_provider = IterListProvider([GeminiPro, ChatGot, Liaobots])
+    best_provider = IterListProvider([GeminiPro, ChatGot, Liaobots, Airforce])
 )
 )
 
 
 gemini_flash = Model(
 gemini_flash = Model(
     name          = 'gemini-flash',
     name          = 'gemini-flash',
     base_provider = 'Google',
     base_provider = 'Google',
-    best_provider = IterListProvider([Liaobots, Blackbox])
+    best_provider = IterListProvider([Blackbox, Liaobots, Airforce])
+)
+
+gemini = Model(
+    name          = 'gemini',
+    base_provider = 'Google',
+    best_provider = IterListProvider([
+        Gemini, 
+        gemini_flash.best_provider, gemini_pro.best_provider
+   ])
 )
 )
 
 
+
 # gemma
 # gemma
 gemma_2b = Model(
 gemma_2b = Model(
     name          = 'gemma-2b',
     name          = 'gemma-2b',
     base_provider = 'Google',
     base_provider = 'Google',
-    best_provider = IterListProvider([ReplicateHome])
+    best_provider = IterListProvider([ReplicateHome, Airforce])
+)
+
+gemma_2b_9b = Model(
+    name          = 'gemma-2b-9b',
+    base_provider = 'Google',
+    best_provider = IterListProvider([Airforce])
+)
+
+gemma_2b_27b = Model(
+    name          = 'gemma-2b-27b',
+    base_provider = 'Google',
+    best_provider = IterListProvider([Airforce])
 )
 )
 
 
 ### Anthropic ###
 ### Anthropic ###
@@ -309,7 +346,7 @@ blackbox = Model(
 dbrx_instruct = Model(
 dbrx_instruct = Model(
     name = 'dbrx-instruct',
     name = 'dbrx-instruct',
     base_provider = 'Databricks',
     base_provider = 'Databricks',
-    best_provider = IterListProvider([DeepInfra])
+    best_provider = IterListProvider([Airforce, DeepInfra])
 )
 )
 
 
 
 
@@ -325,7 +362,7 @@ command_r_plus = Model(
 sparkdesk_v1_1 = Model(
 sparkdesk_v1_1 = Model(
     name = 'sparkdesk-v1.1',
     name = 'sparkdesk-v1.1',
     base_provider = 'iFlytek',
     base_provider = 'iFlytek',
-    best_provider = IterListProvider([FreeChatgpt])
+    best_provider = IterListProvider([FreeChatgpt, Airforce])
 )
 )
 
 
 ### Qwen ###
 ### Qwen ###
@@ -335,6 +372,24 @@ qwen_1_5_14b = Model(
     best_provider = IterListProvider([FreeChatgpt])
     best_provider = IterListProvider([FreeChatgpt])
 )
 )
 
 
+qwen_1_5_72b = Model(
+    name = 'qwen-1.5-72b',
+    base_provider = 'Qwen',
+    best_provider = IterListProvider([Airforce])
+)
+
+qwen_1_5_110b = Model(
+    name = 'qwen-1.5-110b',
+    base_provider = 'Qwen',
+    best_provider = IterListProvider([Airforce])
+)
+
+qwen_2_72b = Model(
+    name = 'qwen-2-72b',
+    base_provider = 'Qwen',
+    best_provider = IterListProvider([Airforce])
+)
+
 qwen_turbo = Model(
 qwen_turbo = Model(
     name = 'qwen-turbo',
     name = 'qwen-turbo',
     base_provider = 'Qwen',
     base_provider = 'Qwen',
@@ -358,7 +413,10 @@ glm_4_9b = Model(
 glm_4 = Model(
 glm_4 = Model(
     name = 'glm-4',
     name = 'glm-4',
     base_provider = 'Zhipu AI',
     base_provider = 'Zhipu AI',
-    best_provider = IterListProvider([CodeNews, glm_4_9b.best_provider,])
+    best_provider = IterListProvider([
+        CodeNews, 
+        glm_3_6b.best_provider, glm_4_9b.best_provider
+    ])
 )
 )
 
 
 ### 01-ai ###
 ### 01-ai ###
@@ -368,14 +426,20 @@ yi_1_5_9b = Model(
     best_provider = IterListProvider([FreeChatgpt])
     best_provider = IterListProvider([FreeChatgpt])
 )
 )
 
 
-
-### Pi ###
+### Upstage ###
 solar_1_mini = Model(
 solar_1_mini = Model(
     name = 'solar-1-mini',
     name = 'solar-1-mini',
     base_provider = 'Upstage',
     base_provider = 'Upstage',
     best_provider = IterListProvider([Upstage])
     best_provider = IterListProvider([Upstage])
 )
 )
 
 
+solar_10_7b = Model(
+    name = 'solar-10-7b',
+    base_provider = 'Upstage',
+    best_provider = Airforce
+)
+
+
 ### Pi ###
 ### Pi ###
 pi = Model(
 pi = Model(
     name = 'pi',
     name = 'pi',
@@ -404,21 +468,33 @@ westlake_7b_v2 = Model(
     best_provider = Snova
     best_provider = Snova
 )
 )
 
 
-### CookinAI ###
-donutlm_v1 = Model(
-    name = 'donutlm-v1',
-    base_provider = 'CookinAI',
-    best_provider = Snova
-)
-
 ### DeepSeek ###
 ### DeepSeek ###
 deepseek = Model(
 deepseek = Model(
     name = 'deepseek',
     name = 'deepseek',
     base_provider = 'DeepSeek',
     base_provider = 'DeepSeek',
-    best_provider = CodeNews
+    best_provider = IterListProvider([CodeNews, Airforce])
+)
+
+### WizardLM ###
+wizardlm_2_8x22b = Model(
+    name = 'wizardlm-2-8x22b',
+    base_provider = 'WizardLM',
+    best_provider = Airforce
 )
 )
 
 
+### Together ###
+sh_n_7b = Model(
+    name = 'sh-n-7b',
+    base_provider = 'Together',
+    best_provider = Airforce
+)
 
 
+### Yorickvp ###
+llava_13b = Model(
+    name = 'llava-13b',
+    base_provider = 'Yorickvp',
+    best_provider = ReplicateHome
+)
 
 
 #############
 #############
 ### Image ###
 ### Image ###
@@ -451,35 +527,49 @@ playground_v2_5 = Model(
 flux = Model(
 flux = Model(
     name = 'flux',
     name = 'flux',
     base_provider = 'Flux AI',
     base_provider = 'Flux AI',
-    best_provider = IterListProvider([FluxAirforce])
+    best_provider = IterListProvider([Airforce])
     
     
 )
 )
 
 
 flux_realism = Model(
 flux_realism = Model(
     name = 'flux-realism',
     name = 'flux-realism',
     base_provider = 'Flux AI',
     base_provider = 'Flux AI',
-    best_provider = IterListProvider([FluxAirforce])
+    best_provider = IterListProvider([Airforce])
     
     
 )
 )
 
 
 flux_anime = Model(
 flux_anime = Model(
     name = 'flux-anime',
     name = 'flux-anime',
     base_provider = 'Flux AI',
     base_provider = 'Flux AI',
-    best_provider = IterListProvider([FluxAirforce])
+    best_provider = IterListProvider([Airforce])
     
     
 )
 )
 
 
 flux_3d = Model(
 flux_3d = Model(
     name = 'flux-3d',
     name = 'flux-3d',
     base_provider = 'Flux AI',
     base_provider = 'Flux AI',
-    best_provider = IterListProvider([FluxAirforce])
+    best_provider = IterListProvider([Airforce])
     
     
 )
 )
 
 
 flux_disney = Model(
 flux_disney = Model(
     name = 'flux-disney',
     name = 'flux-disney',
     base_provider = 'Flux AI',
     base_provider = 'Flux AI',
-    best_provider = IterListProvider([FluxAirforce])
+    best_provider = IterListProvider([Airforce])
+    
+)
+
+flux_pixel = Model(
+    name = 'flux-pixel',
+    base_provider = 'Flux AI',
+    best_provider = IterListProvider([Airforce])
+    
+)
+
+flux_schnell = Model(
+    name = 'flux-schnell',
+    base_provider = 'Flux AI',
+    best_provider = IterListProvider([ReplicateHome])
     
     
 )
 )
 
 
@@ -491,6 +581,13 @@ dalle = Model(
     
     
 )
 )
 
 
+dalle_2 = Model(
+    name = 'dalle-2',
+    base_provider = '',
+    best_provider = IterListProvider([Nexra])
+    
+)
+
 dalle_mini = Model(
 dalle_mini = Model(
     name = 'dalle-mini',
     name = 'dalle-mini',
     base_provider = '',
     base_provider = '',
@@ -505,6 +602,13 @@ emi = Model(
     
     
 )
 )
 
 
+any_dark = Model(
+    name = 'any-dark',
+    base_provider = '',
+    best_provider = IterListProvider([Airforce])
+    
+)
+
 class ModelUtils:
 class ModelUtils:
     """
     """
     Utility class for mapping string identifiers to Model instances.
     Utility class for mapping string identifiers to Model instances.
@@ -535,6 +639,9 @@ class ModelUtils:
 ### Meta ###
 ### Meta ###
 "meta-ai": meta,
 "meta-ai": meta,
 
 
+# llama-2
+'llama-2-13b': llama_2_13b,
+
 # llama-3
 # llama-3
 'llama-3-8b': llama_3_8b,
 'llama-3-8b': llama_3_8b,
 'llama-3-70b': llama_3_70b,
 'llama-3-70b': llama_3_70b,
@@ -543,15 +650,18 @@ class ModelUtils:
 'llama-3.1-8b': llama_3_1_8b,
 'llama-3.1-8b': llama_3_1_8b,
 'llama-3.1-70b': llama_3_1_70b,
 'llama-3.1-70b': llama_3_1_70b,
 'llama-3.1-405b': llama_3_1_405b,
 'llama-3.1-405b': llama_3_1_405b,
-        
+      
         
         
 ### Mistral ###
 ### Mistral ###
-'mixtral-8x7b': mixtral_8x7b,
 'mistral-7b': mistral_7b,
 'mistral-7b': mistral_7b,
-
-
-### 01-ai ###
-'yi-1.5-34b': yi_1_5_34b,
+'mixtral-8x7b': mixtral_8x7b,
+'mixtral-8x22b': mixtral_8x22b,
+     
+     
+### NousResearch ###
+'mixtral-8x7b-dpo': mixtral_8x7b_dpo,  
+ 
+'yi-34b': yi_34b,   
         
         
         
         
 ### Microsoft ###
 ### Microsoft ###
@@ -566,6 +676,8 @@ class ModelUtils:
         
         
 # gemma
 # gemma
 'gemma-2b': gemma_2b,
 'gemma-2b': gemma_2b,
+'gemma-2b-9b': gemma_2b_9b,
+'gemma-2b-27b': gemma_2b_27b,
 
 
 
 
 ### Anthropic ###
 ### Anthropic ###
@@ -575,8 +687,8 @@ class ModelUtils:
         
         
 'claude-3-opus': claude_3_opus,
 'claude-3-opus': claude_3_opus,
 'claude-3-sonnet': claude_3_sonnet,
 'claude-3-sonnet': claude_3_sonnet,
-'claude-3-5-sonnet': claude_3_5_sonnet,
 'claude-3-haiku': claude_3_haiku,
 'claude-3-haiku': claude_3_haiku,
+'claude-3-5-sonnet': claude_3_5_sonnet,
         
         
         
         
 ### Reka AI ###
 ### Reka AI ###
@@ -605,6 +717,9 @@ class ModelUtils:
         
         
 ### Qwen ###
 ### Qwen ###
 'qwen-1.5-14b': qwen_1_5_14b,
 'qwen-1.5-14b': qwen_1_5_14b,
+'qwen-1.5-72b': qwen_1_5_72b,
+'qwen-1.5-110b': qwen_1_5_110b,
+'qwen-2-72b': qwen_2_72b,
 'qwen-turbo': qwen_turbo,
 'qwen-turbo': qwen_turbo,
         
         
         
         
@@ -620,6 +735,7 @@ class ModelUtils:
         
         
 ### Upstage ###
 ### Upstage ###
 'solar-1-mini': solar_1_mini,
 'solar-1-mini': solar_1_mini,
+'solar-10-7b': solar_10_7b,
 
 
 
 
 ### Pi ###
 ### Pi ###
@@ -638,11 +754,16 @@ class ModelUtils:
 'westlake-7b-v2': westlake_7b_v2,
 'westlake-7b-v2': westlake_7b_v2,
 
 
 
 
-### CookinAI ###
-'donutlm-v1': donutlm_v1,
-
 ### DeepSeek ###
 ### DeepSeek ###
 'deepseek': deepseek,
 'deepseek': deepseek,
+
+
+### Together ###
+'sh-n-7b': sh_n_7b,
+      
+        
+### Yorickvp ###
+'llava-13b': llava_13b,
         
         
         
         
         
         
@@ -665,12 +786,16 @@ class ModelUtils:
 'flux-anime': flux_anime,
 'flux-anime': flux_anime,
 'flux-3d': flux_3d,
 'flux-3d': flux_3d,
 'flux-disney': flux_disney,
 'flux-disney': flux_disney,
+'flux-pixel': flux_pixel,
+'flux-schnell': flux_schnell,
 
 
 
 
 ###  ###
 ###  ###
 'dalle': dalle,
 'dalle': dalle,
+'dalle-2': dalle_2,
 'dalle-mini': dalle_mini,
 'dalle-mini': dalle_mini,
 'emi': emi,
 'emi': emi,
+'any-dark': any_dark,
     }
     }
 
 
 _all_models = list(ModelUtils.convert.keys())
 _all_models = list(ModelUtils.convert.keys())