浏览代码

Merge pull request #2207 from kqlio67/main

Enhance and expand provider support, update models, and improve overall functionality
Tekky 1 月之前
父节点
当前提交
07fa87b4d1
共有 59 个文件被更改,包括 2007 次插入2152 次删除
  1. 1 2
      etc/testing/_providers.py
  2. 2 12
      etc/testing/test_all.py
  3. 2 2
      etc/testing/test_chat_completion.py
  4. 1 1
      etc/tool/create_provider.py
  5. 2 2
      etc/tool/improve_code.py
  6. 106 0
      g4f/Provider/AiChats.py
  7. 65 0
      g4f/Provider/Binjie.py
  8. 89 0
      g4f/Provider/Bixin123.py
  9. 79 14
      g4f/Provider/Blackbox.py
  10. 9 2
      g4f/Provider/ChatgptFree.py
  11. 94 0
      g4f/Provider/CodeNews.py
  12. 0 106
      g4f/Provider/Cohere.py
  13. 0 2
      g4f/Provider/DDG.py
  14. 82 0
      g4f/Provider/FluxAirforce.py
  15. 16 14
      g4f/Provider/GeminiProChat.py
  16. 10 12
      g4f/Provider/FreeChatgpt.py
  17. 11 5
      g4f/Provider/HuggingChat.py
  18. 8 4
      g4f/Provider/HuggingFace.py
  19. 5 4
      g4f/Provider/Koala.py
  20. 193 77
      g4f/Provider/Liaobots.py
  21. 7 2
      g4f/Provider/LiteIcoding.py
  22. 130 0
      g4f/Provider/MagickPen.py
  23. 0 51
      g4f/Provider/MagickPenAsk.py
  24. 0 50
      g4f/Provider/MagickPenChat.py
  25. 0 64
      g4f/Provider/Marsyoo.py
  26. 181 0
      g4f/Provider/Nexra.py
  27. 1 1
      g4f/Provider/Pizzagpt.py
  28. 23 5
      g4f/Provider/ReplicateHome.py
  29. 133 0
      g4f/Provider/Snova.py
  30. 17 3
      g4f/Provider/TeachAnything.py
  31. 103 0
      g4f/Provider/TwitterBio.py
  32. 74 0
      g4f/Provider/Upstage.py
  33. 0 1
      g4f/Provider/You.py
  34. 12 8
      g4f/Provider/__init__.py
  35. 0 79
      g4f/Provider/not_working/AItianhu.py
  36. 0 56
      g4f/Provider/not_working/Aichatos.py
  37. 0 56
      g4f/Provider/not_working/Bestim.py
  38. 0 61
      g4f/Provider/not_working/ChatBase.py
  39. 0 66
      g4f/Provider/not_working/ChatForAi.py
  40. 0 88
      g4f/Provider/not_working/ChatgptAi.py
  41. 0 70
      g4f/Provider/not_working/ChatgptDemo.py
  42. 0 56
      g4f/Provider/not_working/ChatgptDemoAi.py
  43. 0 78
      g4f/Provider/not_working/ChatgptLogin.py
  44. 0 66
      g4f/Provider/not_working/ChatgptNext.py
  45. 0 106
      g4f/Provider/not_working/ChatgptX.py
  46. 0 60
      g4f/Provider/not_working/Chatxyz.py
  47. 0 58
      g4f/Provider/not_working/Cnote.py
  48. 0 78
      g4f/Provider/not_working/Feedough.py
  49. 0 54
      g4f/Provider/not_working/Gpt6.py
  50. 0 35
      g4f/Provider/not_working/GptChatly.py
  51. 0 91
      g4f/Provider/not_working/GptForLove.py
  52. 0 66
      g4f/Provider/not_working/GptGo.py
  53. 0 61
      g4f/Provider/not_working/GptGod.py
  54. 0 57
      g4f/Provider/not_working/OnlineGpt.py
  55. 0 21
      g4f/Provider/not_working/__init__.py
  56. 170 41
      g4f/client/client.py
  57. 8 11
      g4f/client/image_models.py
  58. 28 13
      g4f/gui/server/api.py
  59. 345 280
      g4f/models.py

+ 1 - 2
etc/testing/_providers.py

@@ -35,7 +35,6 @@ def get_providers() -> list[ProviderType]:
         provider
         for provider in __providers__
         if provider.__name__ not in dir(Provider.deprecated)
-        and provider.__name__ not in dir(Provider.unfinished)
         and provider.url is not None
     ]
 
@@ -59,4 +58,4 @@ def test(provider: ProviderType) -> bool:
 
 if __name__ == "__main__":
     main()
-    
+    

+ 2 - 12
etc/testing/test_all.py

@@ -38,21 +38,11 @@ async def test(model: g4f.Model):
 
 async def start_test():
     models_to_test = [
-        # GPT-3.5 4K Context
+        # GPT-3.5
         g4f.models.gpt_35_turbo,
-        g4f.models.gpt_35_turbo_0613,
 
-        # GPT-3.5 16K Context
-        g4f.models.gpt_35_turbo_16k,
-        g4f.models.gpt_35_turbo_16k_0613,
-
-        # GPT-4 8K Context
+        # GPT-4
         g4f.models.gpt_4,
-        g4f.models.gpt_4_0613,
-
-        # GPT-4 32K Context
-        g4f.models.gpt_4_32k,
-        g4f.models.gpt_4_32k_0613,
     ]
 
     models_working = []

+ 2 - 2
etc/testing/test_chat_completion.py

@@ -8,7 +8,7 @@ import g4f, asyncio
 print("create:", end=" ", flush=True)
 for response in g4f.ChatCompletion.create(
     model=g4f.models.default,
-    provider=g4f.Provider.Bing,
+    #provider=g4f.Provider.Bing,
     messages=[{"role": "user", "content": "write a poem about a tree"}],
     stream=True
 ):
@@ -18,7 +18,7 @@ print()
 async def run_async():
     response = await g4f.ChatCompletion.create_async(
         model=g4f.models.default,
-        provider=g4f.Provider.Bing,
+        #provider=g4f.Provider.Bing,
         messages=[{"role": "user", "content": "hello!"}],
     )
     print("create_async:", response)

+ 1 - 1
etc/tool/create_provider.py

@@ -90,7 +90,7 @@ And replace "gpt-3.5-turbo" with `model`.
     print("Create code...")
     response = []
     for chunk in g4f.ChatCompletion.create(
-        model=g4f.models.gpt_35_long,
+        model=g4f.models.default,
         messages=[{"role": "user", "content": prompt}],
         timeout=300,
         stream=True,

+ 2 - 2
etc/tool/improve_code.py

@@ -30,7 +30,7 @@ Don't remove license comments.
 print("Create code...")
 response = []
 for chunk in g4f.ChatCompletion.create(
-    model=g4f.models.gpt_35_long,
+    model=g4f.models.default,
     messages=[{"role": "user", "content": prompt}],
     timeout=300,
     stream=True
@@ -42,4 +42,4 @@ response = "".join(response)
 
 if code := read_code(response):
     with open(path, "w") as file:
-        file.write(code)
+        file.write(code)

+ 106 - 0
g4f/Provider/AiChats.py

@@ -0,0 +1,106 @@
+from __future__ import annotations
+
+import json
+import base64
+from aiohttp import ClientSession
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse
+from .helper import format_prompt
+
+class AiChats(AsyncGeneratorProvider, ProviderModelMixin):
+    url = "https://ai-chats.org"
+    api_endpoint = "https://ai-chats.org/chat/send2/"
+    working = True
+    supports_gpt_4 = True
+    supports_message_history = True
+    default_model = 'gpt-4'
+    models = ['gpt-4', 'dalle']
+
+    @classmethod
+    async def create_async_generator(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncResult:
+        headers = {
+            "accept": "application/json, text/event-stream",
+            "accept-language": "en-US,en;q=0.9",
+            "cache-control": "no-cache",
+            "content-type": "application/json",
+            "origin": cls.url,
+            "pragma": "no-cache",
+            "referer": f"{cls.url}/{'image' if model == 'dalle' else 'chat'}/",
+            "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+            "sec-ch-ua-mobile": "?0",
+            "sec-ch-ua-platform": '"Linux"',
+            "sec-fetch-dest": "empty",
+            "sec-fetch-mode": "cors",
+            "sec-fetch-site": "same-origin",
+            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
+            'cookie': 'muVyak=LSFNvUWqdgKkGprbDBsfieIoEMzjOQ; LSFNvUWqdgKkGprbDBsfieIoEMzjOQ=ac28831b98143847e83dbe004404e619-1725548624-1725548621; muVyak_hits=9; ai-chat-front=9d714d5dc46a6b47607c9a55e7d12a95; _csrf-front=76c23dc0a013e5d1e21baad2e6ba2b5fdab8d3d8a1d1281aa292353f8147b057a%3A2%3A%7Bi%3A0%3Bs%3A11%3A%22_csrf-front%22%3Bi%3A1%3Bs%3A32%3A%22K9lz0ezsNPMNnfpd_8gT5yEeh-55-cch%22%3B%7D',
+        }
+
+        async with ClientSession(headers=headers) as session:
+            if model == 'dalle':
+                prompt = messages[-1]['content'] if messages else ""
+            else:
+                prompt = format_prompt(messages)
+
+            data = {
+                "type": "image" if model == 'dalle' else "chat",
+                "messagesHistory": [
+                    {
+                        "from": "you",
+                        "content": prompt
+                    }
+                ]
+            }
+
+            try:
+                async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+                    response.raise_for_status()
+
+                    if model == 'dalle':
+                        response_json = await response.json()
+
+                        if 'data' in response_json and response_json['data']:
+                            image_url = response_json['data'][0].get('url')
+                            if image_url:
+                                async with session.get(image_url) as img_response:
+                                    img_response.raise_for_status()
+                                    image_data = await img_response.read()
+
+                                base64_image = base64.b64encode(image_data).decode('utf-8')
+                                base64_url = f"data:image/png;base64,{base64_image}"
+                                yield ImageResponse(base64_url, prompt)
+                            else:
+                                yield f"Error: No image URL found in the response. Full response: {response_json}"
+                        else:
+                            yield f"Error: Unexpected response format. Full response: {response_json}"
+                    else:
+                        full_response = await response.text()
+                        message = ""
+                        for line in full_response.split('\n'):
+                            if line.startswith('data: ') and line != 'data: ':
+                                message += line[6:]
+
+                        message = message.strip()
+                        yield message
+            except Exception as e:
+                yield f"Error occurred: {str(e)}"
+
+    @classmethod
+    async def create_async(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> str:
+        async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
+            if isinstance(response, ImageResponse):
+                return response.images[0]
+            return response

+ 65 - 0
g4f/Provider/Binjie.py

@@ -0,0 +1,65 @@
+from __future__ import annotations
+
+import random
+from ..requests import StreamSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, format_prompt
+
+
+class Binjie(AsyncGeneratorProvider):
+    url = "https://chat18.aichatos8.com"
+    working = True
+    supports_gpt_4 = True
+    supports_stream = True
+    supports_system_message = True
+    supports_message_history = True
+
+    @staticmethod
+    async def create_async_generator(
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        timeout: int = 120,
+        **kwargs,
+    ) -> AsyncResult:
+        async with StreamSession(
+            headers=_create_header(), proxies={"https": proxy}, timeout=timeout
+        ) as session:
+            payload = _create_payload(messages, **kwargs)
+            async with session.post("https://api.binjie.fun/api/generateStream", json=payload) as response:
+                response.raise_for_status()
+                async for chunk in response.iter_content():
+                    if chunk:
+                        chunk = chunk.decode()
+                        if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
+                            raise RuntimeError("IP address is blocked by abuse detection.")
+                        yield chunk
+
+
+def _create_header():
+    return {
+        "accept"        : "application/json, text/plain, */*",
+        "content-type"  : "application/json",
+        "origin"        : "https://chat18.aichatos8.com",
+        "referer"       : "https://chat18.aichatos8.com/"
+    }
+
+
+def _create_payload(
+    messages: Messages,
+    system_message: str = "",
+    user_id: int = None,
+    **kwargs
+):
+    if not user_id:
+        user_id = random.randint(1690000544336, 2093025544336)
+    return {
+        "prompt": format_prompt(messages),
+        "network": True,
+        "system": system_message,
+        "withoutContext": False,
+        "stream": True,
+        "userId": f"#/chat/{user_id}"
+    }
+

+ 89 - 0
g4f/Provider/Bixin123.py

@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..typing import AsyncResult, Messages
+from .helper import format_prompt
+
+class Bixin123(AsyncGeneratorProvider, ProviderModelMixin):
+    url = "https://chat.bixin123.com"
+    api_endpoint = "https://chat.bixin123.com/api/chatgpt/chat-process"
+    working = True
+    supports_gpt_35_turbo = True
+    supports_gpt_4 = True
+
+    default_model = 'gpt-3.5-turbo-0125'
+    models = ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613', 'gpt-4-turbo', 'qwen-turbo']
+    
+    model_aliases = {
+        "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
+        "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
+    }
+
+    @classmethod
+    def get_model(cls, model: str) -> str:
+        if model in cls.models:
+            return model
+        elif model in cls.model_aliases:
+            return cls.model_aliases[model]
+        else:
+            return cls.default_model
+
+    @classmethod
+    async def create_async_generator(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncResult:
+        model = cls.get_model(model)
+
+        headers = {
+            "accept": "application/json, text/plain, */*",
+            "accept-language": "en-US,en;q=0.9",
+            "cache-control": "no-cache",
+            "content-type": "application/json",
+            "fingerprint": "988148794",
+            "origin": cls.url,
+            "pragma": "no-cache",
+            "priority": "u=1, i",
+            "referer": f"{cls.url}/chat",
+            "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+            "sec-ch-ua-mobile": "?0",
+            "sec-ch-ua-platform": '"Linux"',
+            "sec-fetch-dest": "empty",
+            "sec-fetch-mode": "cors",
+            "sec-fetch-site": "same-origin",
+            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
+            "x-website-domain": "chat.bixin123.com",
+        }
+
+        async with ClientSession(headers=headers) as session:
+            prompt = format_prompt(messages)
+            data = {
+                "prompt": prompt,
+                "options": {
+                    "usingNetwork": False,
+                    "file": ""
+                }
+            }
+            async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+                response.raise_for_status()
+                response_text = await response.text()
+                
+                lines = response_text.strip().split("\n")
+                last_json = None
+                for line in reversed(lines):
+                    try:
+                        last_json = json.loads(line)
+                        break
+                    except json.JSONDecodeError:
+                        pass
+                
+                if last_json:
+                    text = last_json.get("text", "")
+                    yield text
+                else:
+                    yield ""

+ 79 - 14
g4f/Provider/Blackbox.py

@@ -3,11 +3,12 @@ from __future__ import annotations
 import uuid
 import secrets
 import re
-from aiohttp import ClientSession, ClientResponse
+import base64
+from aiohttp import ClientSession
 from typing import AsyncGenerator, Optional
 
 from ..typing import AsyncResult, Messages, ImageType
-from ..image import to_data_uri
+from ..image import to_data_uri, ImageResponse
 from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
 
 class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
@@ -20,7 +21,43 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
         "llama-3.1-8b",
         'llama-3.1-70b',
         'llama-3.1-405b',
+        'ImageGeneration',
     ]
+    
+    model_aliases = {
+        "gemini-flash": "gemini-1.5-flash",
+    }
+    
+    agent_mode_map = {
+        'ImageGeneration': {"mode": True, "id": "ImageGenerationLV45LJp", "name": "Image Generation"},
+    }
+
+    model_id_map = {
+        "blackbox": {},
+        "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
+        "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
+        'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
+        'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}
+    }
+
+    @classmethod
+    def get_model(cls, model: str) -> str:
+        if model in cls.models:
+            return model
+        elif model in cls.model_aliases:
+            return cls.model_aliases[model]
+        else:
+            return cls.default_model
+
+    @classmethod
+    async def download_image_to_base64_url(cls, url: str) -> str:
+        async with ClientSession() as session:
+            async with session.get(url) as response:
+                image_data = await response.read()
+                base64_data = base64.b64encode(image_data).decode('utf-8')
+                mime_type = response.headers.get('Content-Type', 'image/jpeg')
+                return f"data:{mime_type};base64,{base64_data}"
+
     @classmethod
     async def create_async_generator(
         cls,
@@ -30,7 +67,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
         image: Optional[ImageType] = None,
         image_name: Optional[str] = None,
         **kwargs
-    ) -> AsyncGenerator[str, None]:
+    ) -> AsyncGenerator[AsyncResult, None]:
         if image is not None:
             messages[-1]["data"] = {
                 "fileText": image_name,
@@ -55,19 +92,15 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
         async with ClientSession(headers=headers) as session:
             random_id = secrets.token_hex(16)
             random_user_id = str(uuid.uuid4())
-            model_id_map = {
-                "blackbox": {},
-                "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
-                "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
-                'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
-                'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}
-            }
+            
+            model = cls.get_model(model)  # Resolve the model alias
+            
             data = {
                 "messages": messages,
                 "id": random_id,
                 "userId": random_user_id,
                 "codeModelMode": True,
-                "agentMode": {},
+                "agentMode": cls.agent_mode_map.get(model, {}),
                 "trendingAgentMode": {},
                 "isMicMode": False,
                 "isChromeExt": False,
@@ -75,7 +108,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
                 "webSearchMode": False,
                 "userSystemPrompt": "",
                 "githubToken": None,
-                "trendingAgentModel": model_id_map[model], # if you actually test this on the site, just ask each model "yo", weird behavior imo
+                "trendingAgentModel": cls.model_id_map.get(model, {}),
                 "maxTokens": None
             }
 
@@ -83,9 +116,41 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
                 f"{cls.url}/api/chat", json=data, proxy=proxy
             ) as response:
                 response.raise_for_status()
+                full_response = ""
+                buffer = ""
+                image_base64_url = None
                 async for chunk in response.content.iter_any():
                     if chunk:
-                        # Decode the chunk and clean up unwanted prefixes using a regex
                         decoded_chunk = chunk.decode()
                         cleaned_chunk = re.sub(r'\$@\$.+?\$@\$|\$@\$', '', decoded_chunk)
-                        yield cleaned_chunk
+                        
+                        buffer += cleaned_chunk
+                        
+                        # Check if there's a complete image line in the buffer
+                        image_match = re.search(r'!\[Generated Image\]\((https?://[^\s\)]+)\)', buffer)
+                        if image_match:
+                            image_url = image_match.group(1)
+                            # Download the image and convert to base64 URL
+                            image_base64_url = await cls.download_image_to_base64_url(image_url)
+                            
+                            # Remove the image line from the buffer
+                            buffer = re.sub(r'!\[Generated Image\]\(https?://[^\s\)]+\)', '', buffer)
+                        
+                        # Send text line by line
+                        lines = buffer.split('\n')
+                        for line in lines[:-1]:
+                            if line.strip():
+                                full_response += line + '\n'
+                                yield line + '\n'
+                        buffer = lines[-1]  # Keep the last incomplete line in the buffer
+
+                # Send the remaining buffer if it's not empty
+                if buffer.strip():
+                    full_response += buffer
+                    yield buffer
+
+                # If an image was found, send it as ImageResponse
+                if image_base64_url:
+                    alt_text = "Generated Image"
+                    image_response = ImageResponse(image_base64_url, alt=alt_text)
+                    yield image_response

+ 9 - 2
g4f/Provider/ChatgptFree.py

@@ -43,6 +43,7 @@ class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
             'sec-fetch-site': 'same-origin',
             'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
         }
+
         async with StreamSession(
                 headers=headers,
                 cookies=cookies,
@@ -55,6 +56,12 @@ class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
                 async with session.get(f"{cls.url}/") as response:
                     await raise_for_status(response)
                     response = await response.text()
+
+                    result = re.search(r'data-post-id="([0-9]+)"', response)
+                    if not result:
+                        raise RuntimeError("No post id found")
+                    cls._post_id = result.group(1)
+
                     result = re.search(r'data-nonce="(.*?)"', response)
                     if result:
                         cls._nonce = result.group(1)
@@ -70,7 +77,7 @@ class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
                 "message": prompt,
                 "bot_id": "0"
             }
-
+            
             async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
                 await raise_for_status(response)
                 buffer = ""
@@ -96,4 +103,4 @@ class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
                         if 'data' in json_response:
                             yield json_response['data']
                     except json.JSONDecodeError:
-                        print(f"Failed to decode final JSON. Buffer content: {buffer}")
+                        print(f"Failed to decode final JSON. Buffer content: {buffer}")

+ 94 - 0
g4f/Provider/CodeNews.py

@@ -0,0 +1,94 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+from asyncio import sleep
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class CodeNews(AsyncGeneratorProvider, ProviderModelMixin):
+    url = "https://codenews.cc"
+    api_endpoint = "https://codenews.cc/chatxyz13"
+    working = True
+    supports_gpt_35_turbo = True
+    supports_gpt_4 = False
+    supports_stream = True
+    supports_system_message = False
+    supports_message_history = False
+    
+    default_model = 'free_gpt'
+    models = ['free_gpt', 'gpt-4o-mini', 'deepseek-coder', 'chatpdf']
+    
+    model_aliases = {
+        "glm-4": "free_gpt",
+        "gpt-3.5-turbo": "chatpdf",
+        "deepseek": "deepseek-coder",
+    }
+
+    @classmethod
+    def get_model(cls, model: str) -> str:
+        if model in cls.models:
+            return model
+        elif model in cls.model_aliases:
+            return cls.model_aliases[model]
+        else:
+            return cls.default_model
+
+    @classmethod
+    async def create_async_generator(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncResult:
+        model = cls.get_model(model)
+        
+        headers = {
+            "accept": "application/json, text/javascript, */*; q=0.01",
+            "accept-language": "en-US,en;q=0.9",
+            "cache-control": "no-cache",
+            "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
+            "origin": cls.url,
+            "pragma": "no-cache",
+            "priority": "u=1, i",
+            "referer": f"{cls.url}/chatgpt",
+            "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+            "sec-ch-ua-mobile": "?0",
+            "sec-ch-ua-platform": '"Linux"',
+            "sec-fetch-dest": "empty",
+            "sec-fetch-mode": "cors",
+            "sec-fetch-site": "same-origin",
+            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
+            "x-requested-with": "XMLHttpRequest",
+        }
+        async with ClientSession(headers=headers) as session:
+            prompt = format_prompt(messages)
+            data = {
+                "chatgpt_input": prompt,
+                "qa_type2": model,
+                "chatgpt_version_value": "20240804",
+                "enable_web_search": "0",
+                "enable_agent": "0",
+                "dy_video_text_extract": "0",
+                "enable_summary": "0",
+            }
+            async with session.post(cls.api_endpoint, data=data, proxy=proxy) as response:
+                response.raise_for_status()
+                json_data = await response.json()
+                chat_id = json_data["data"]["id"]
+
+            headers["content-type"] = "application/x-www-form-urlencoded; charset=UTF-8"
+            data = {"current_req_count": "2"}
+            
+            while True:
+                async with session.post(f"{cls.url}/chat_stream", headers=headers, data=data, proxy=proxy) as response:
+                    response.raise_for_status()
+                    json_data = await response.json()
+                    if json_data["data"]:
+                        yield json_data["data"]
+                        break
+                    else:
+                        await sleep(1)  # Затримка перед наступним запитом

+ 0 - 106
g4f/Provider/Cohere.py

@@ -1,106 +0,0 @@
-from __future__ import annotations
-
-import json, random, requests, threading
-from aiohttp import ClientSession
-
-from ..typing import CreateResult, Messages
-from .base_provider import AbstractProvider
-from .helper import format_prompt
-
-class Cohere(AbstractProvider):
-    url                   = "https://cohereforai-c4ai-command-r-plus.hf.space"
-    working               = False
-    supports_gpt_35_turbo = False
-    supports_gpt_4        = False
-    supports_stream       = True
-    
-    @staticmethod
-    def create_completion(
-        model: str,
-        messages: Messages,
-        stream: bool,
-        proxy: str = None,
-        max_retries: int = 6,
-        **kwargs
-    ) -> CreateResult:
-        
-        prompt = format_prompt(messages)
-        
-        headers = {
-            'accept': 'text/event-stream',
-            'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
-            'cache-control': 'no-cache',
-            'pragma': 'no-cache',
-            'referer': 'https://cohereforai-c4ai-command-r-plus.hf.space/?__theme=light',
-            'sec-ch-ua': '"Google Chrome";v="123", "Not:A-Brand";v="8", "Chromium";v="123"',
-            'sec-ch-ua-mobile': '?0',
-            'sec-ch-ua-platform': '"macOS"',
-            'sec-fetch-dest': 'empty',
-            'sec-fetch-mode': 'cors',
-            'sec-fetch-site': 'same-origin',
-            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
-        }
-        
-        session_hash = ''.join(random.choices("abcdefghijklmnopqrstuvwxyz0123456789", k=11))
-
-        params = {
-            'fn_index': '1',
-            'session_hash': session_hash,
-        }
-
-        response = requests.get(
-            'https://cohereforai-c4ai-command-r-plus.hf.space/queue/join',
-            params=params,
-            headers=headers,
-            stream=True
-        )
-        
-        completion = ''
-
-        for line in response.iter_lines():
-            if line:
-                json_data = json.loads(line[6:])
-                
-                if b"send_data" in (line):
-                    event_id = json_data["event_id"]
-                    
-                    threading.Thread(target=send_data, args=[session_hash, event_id, prompt]).start()
-                
-                if b"process_generating" in line or b"process_completed" in line:
-                    token = (json_data['output']['data'][0][0][1])
-                    
-                    yield (token.replace(completion, ""))
-                    completion = token
-
-def send_data(session_hash, event_id, prompt):
-    headers = {
-        'accept': '*/*',
-        'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
-        'cache-control': 'no-cache',
-        'content-type': 'application/json',
-        'origin': 'https://cohereforai-c4ai-command-r-plus.hf.space',
-        'pragma': 'no-cache',
-        'referer': 'https://cohereforai-c4ai-command-r-plus.hf.space/?__theme=light',
-        'sec-ch-ua': '"Google Chrome";v="123", "Not:A-Brand";v="8", "Chromium";v="123"',
-        'sec-ch-ua-mobile': '?0',
-        'sec-ch-ua-platform': '"macOS"',
-        'sec-fetch-dest': 'empty',
-        'sec-fetch-mode': 'cors',
-        'sec-fetch-site': 'same-origin',
-        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
-    }
-
-    json_data = {
-        'data': [
-            prompt,
-            '',
-            [],
-        ],
-        'event_data': None,
-        'fn_index': 1,
-        'session_hash': session_hash,
-        'event_id': event_id
-    }
-    
-    requests.post('https://cohereforai-c4ai-command-r-plus.hf.space/queue/data',
-                    json = json_data, headers=headers)

+ 0 - 2
g4f/Provider/DDG.py

@@ -21,8 +21,6 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
     default_model = "gpt-4o-mini"
     models = ["gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
     model_aliases = {
-        "gpt-4": "gpt-4o-mini",
-        "gpt-4o": "gpt-4o-mini",
         "claude-3-haiku": "claude-3-haiku-20240307",
         "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
         "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1"

+ 82 - 0
g4f/Provider/FluxAirforce.py

@@ -0,0 +1,82 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession, ClientResponseError
+from urllib.parse import urlencode
+import io
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse, is_accepted_format
+
+class FluxAirforce(AsyncGeneratorProvider, ProviderModelMixin):
+    url = "https://flux.api.airforce/"
+    api_endpoint = "https://api.airforce/v1/imagine2"
+    working = True
+    default_model = 'flux-realism'
+    models = [
+        'flux',
+        'flux-realism',
+        'flux-anime',
+        'flux-3d',
+        'flux-disney'
+    ]
+
+    @classmethod
+    async def create_async_generator(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncResult:
+        headers = {
+            "accept": "*/*",
+            "accept-language": "en-US,en;q=0.9",
+            "origin": "https://flux.api.airforce",
+            "priority": "u=1, i",
+            "referer": "https://flux.api.airforce/",
+            "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+            "sec-ch-ua-mobile": "?0",
+            "sec-ch-ua-platform": '"Linux"',
+            "sec-fetch-dest": "empty",
+            "sec-fetch-mode": "cors",
+            "sec-fetch-site": "same-site",
+            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
+        }
+
+        prompt = messages[-1]['content'] if messages else ""
+
+        params = {
+            "prompt": prompt,
+            "size": kwargs.get("size", "1:1"),
+            "seed": kwargs.get("seed"),
+            "model": model
+        }
+
+        params = {k: v for k, v in params.items() if v is not None}
+
+        try:
+            async with ClientSession(headers=headers) as session:
+                async with session.get(f"{cls.api_endpoint}", params=params, proxy=proxy) as response:
+                    response.raise_for_status()
+                    
+                    content = await response.read()
+                    
+                    if response.content_type.startswith('image/'):
+                        image_url = str(response.url)
+                        yield ImageResponse(image_url, prompt)
+                    else:
+                        try:
+                            text = content.decode('utf-8', errors='ignore')
+                            yield f"Error: {text}"
+                        except Exception as decode_error:
+                            yield f"Error: Unable to decode response - {str(decode_error)}"
+
+        except ClientResponseError as e:
+            yield f"Error: HTTP {e.status}: {e.message}"
+        except Exception as e:
+            yield f"Unexpected error: {str(e)}"
+
+        finally:
+            if not session.closed:
+                await session.close()

+ 16 - 14
g4f/Provider/GeminiProChat.py

@@ -12,11 +12,11 @@ from ..typing import AsyncResult, Messages
 from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
 
 
-class GeminiProChat(AsyncGeneratorProvider, ProviderModelMixin):
-    url = "https://gemini-pro.chat/"
+class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin):
+    url = "https://chat10.free2gpt.xyz"
     working = True
     supports_message_history = True
-    default_model = 'gemini-pro'
+    default_model = 'llama-3.1-70b'
 
     @classmethod
     async def create_async_generator(
@@ -28,9 +28,9 @@ class GeminiProChat(AsyncGeneratorProvider, ProviderModelMixin):
         **kwargs,
     ) -> AsyncResult:
         headers = {
-            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
+            "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
             "Accept": "*/*",
-            "Accept-Language": "en-US,en;q=0.5",
+            "Accept-Language": "en-US,en;q=0.9",
             "Accept-Encoding": "gzip, deflate, br",
             "Content-Type": "text/plain;charset=UTF-8",
             "Referer": f"{cls.url}/",
@@ -38,21 +38,23 @@ class GeminiProChat(AsyncGeneratorProvider, ProviderModelMixin):
             "Sec-Fetch-Dest": "empty",
             "Sec-Fetch-Mode": "cors",
             "Sec-Fetch-Site": "same-origin",
-            "Connection": "keep-alive",
-            "TE": "trailers",
+            "Sec-Ch-Ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+            "Sec-Ch-Ua-Mobile": "?0",
+            "Sec-Ch-Ua-Platform": '"Linux"',
+            "Cache-Control": "no-cache",
+            "Pragma": "no-cache",
+            "Priority": "u=1, i",
         }
         async with ClientSession(
             connector=get_connector(connector, proxy), headers=headers
         ) as session:
             timestamp = int(time.time() * 1e3)
+            system_message = {
+                "role": "system",
+                "content": ""
+            }
             data = {
-                "messages": [
-                    {
-                        "role": "model" if message["role"] == "assistant" else "user",
-                        "parts": [{"text": message["content"]}],
-                    }
-                    for message in messages
-                ],
+                "messages": [system_message] + messages,
                 "time": timestamp,
                 "pass": None,
                 "sign": generate_signature(timestamp, messages[-1]["content"]),

+ 10 - 12
g4f/Provider/FreeChatgpt.py

@@ -10,25 +10,22 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
     url = "https://chat.chatgpt.org.uk"
     api_endpoint = "/api/openai/v1/chat/completions"
     working = True
-    supports_gpt_35_turbo = True
-    default_model = 'gpt-3.5-turbo'
+    default_model = '@cf/qwen/qwen1.5-14b-chat-awq'
     models = [
-        'gpt-3.5-turbo',
-        'SparkDesk-v1.1',
-        'deepseek-coder',
         '@cf/qwen/qwen1.5-14b-chat-awq',
-        'deepseek-chat',
+        'SparkDesk-v1.1',
         'Qwen2-7B-Instruct',
         'glm4-9B-chat',
         'chatglm3-6B',
         'Yi-1.5-9B-Chat',
     ]
+    
     model_aliases = {
         "qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq",
         "sparkdesk-v1.1": "SparkDesk-v1.1",
-        "qwen2-7b": "Qwen2-7B-Instruct",
-        "glm4-9b": "glm4-9B-chat",
-        "chatglm3-6b": "chatglm3-6B",
+        "qwen-2-7b": "Qwen2-7B-Instruct",
+        "glm-4-9b": "glm4-9B-chat",
+        "glm-3-6b": "chatglm3-6B",
         "yi-1.5-9b": "Yi-1.5-9B-Chat",
     }
 
@@ -40,7 +37,7 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
             return cls.model_aliases[model.lower()]
         else:
             return cls.default_model
-        
+
     @classmethod
     async def create_async_generator(
         cls,
@@ -49,6 +46,8 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
         proxy: str = None,
         **kwargs
     ) -> AsyncResult:
+        model = cls.get_model(model)
+        
         headers = {
             "accept": "application/json, text/event-stream",
             "accept-language": "en-US,en;q=0.9",
@@ -64,7 +63,6 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
             "sec-fetch-site": "same-origin",
             "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
         }
-        model = cls.get_model(model)
         async with ClientSession(headers=headers) as session:
             prompt = format_prompt(messages)
             data = {
@@ -93,6 +91,6 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
                                 chunk = json.loads(line_str[6:])
                                 delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
                                 accumulated_text += delta_content
-                                yield delta_content
+                                yield delta_content  # Yield each chunk of content
                             except json.JSONDecodeError:
                                 pass

+ 11 - 5
g4f/Provider/HuggingChat.py

@@ -22,7 +22,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
         'mistralai/Mistral-7B-Instruct-v0.3',
         'microsoft/Phi-3-mini-4k-instruct',
     ]
-
+    
     model_aliases = {
         "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
         "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
@@ -42,7 +42,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
             return cls.model_aliases[model]
         else:
             return cls.default_model
-
+            
     @classmethod
     def create_completion(
         cls,
@@ -52,7 +52,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
         **kwargs
     ) -> CreateResult:
         model = cls.get_model(model)
-
+        
         if model in cls.models:
             session = cf_reqs.Session()
             session.headers = {
@@ -71,12 +71,17 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
                 'sec-fetch-site': 'same-origin',
                 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
             }
+
+            print(model)
             json_data = {
                 'model': model,
             }
+
             response = session.post('https://huggingface.co/chat/conversation', json=json_data)
             conversationId = response.json()['conversationId']
+
             response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=01',)
+
             data: list = (response.json())["nodes"][1]["data"]
             keys: list[int] = data[data[0]["messages"]]
             message_keys: dict = data[keys[0]]
@@ -117,6 +122,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
                 headers=headers,
                 files=files,
             )
+
             first_token = True
             for line in response.iter_lines():
                 line = json.loads(line)
@@ -133,6 +139,6 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
                         token = token.replace('\u0000', '')
 
                     yield token
-
+                
                 elif line["type"] == "finalAnswer":
-                    break
+                    break

+ 8 - 4
g4f/Provider/HuggingFace.py

@@ -1,11 +1,14 @@
 from __future__ import annotations
+
 import json
 from aiohttp import ClientSession, BaseConnector
+
 from ..typing import AsyncResult, Messages
 from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
 from .helper import get_connector
 from ..errors import RateLimitError, ModelNotFoundError
 from ..requests.raise_for_status import raise_for_status
+
 class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
     url = "https://huggingface.co/chat"
     working = True
@@ -22,7 +25,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
         'mistralai/Mistral-7B-Instruct-v0.3',
         'microsoft/Phi-3-mini-4k-instruct',
     ]
-
+    
     model_aliases = {
         "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
         "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
@@ -76,7 +79,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
         }
         if api_key is not None:
             headers["Authorization"] = f"Bearer {api_key}"
-
+        
         params = {
             "return_full_text": False,
             "max_new_tokens": max_new_tokens,
@@ -84,7 +87,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
             **kwargs
         }
         payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream}
-
+        
         async with ClientSession(
             headers=headers,
             connector=get_connector(connector, proxy)
@@ -106,6 +109,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
                                 yield chunk
                 else:
                     yield (await response.json())[0]["generated_text"].strip()
+
 def format_prompt(messages: Messages) -> str:
     system_messages = [message["content"] for message in messages if message["role"] == "system"]
     question = " ".join([messages[-1]["content"], *system_messages])
@@ -114,4 +118,4 @@ def format_prompt(messages: Messages) -> str:
         for idx, message in enumerate(messages)
         if message["role"] == "assistant"
     ])
-    return f"{history}<s>[INST] {question} [/INST]"
+    return f"{history}<s>[INST] {question} [/INST]"

+ 5 - 4
g4f/Provider/Koala.py

@@ -4,16 +4,17 @@ import json
 from typing import AsyncGenerator, Optional, List, Dict, Union, Any
 from aiohttp import ClientSession, BaseConnector, ClientResponse
 
-from ..typing import Messages
-from .base_provider import AsyncGeneratorProvider
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
 from .helper import get_random_string, get_connector
 from ..requests import raise_for_status
 
-class Koala(AsyncGeneratorProvider):
+class Koala(AsyncGeneratorProvider, ProviderModelMixin):
     url = "https://koala.sh"
     working = True
-    supports_gpt_35_turbo = True
     supports_message_history = True
+    supports_gpt_4 = True
+    default_model = 'gpt-4o-mini'
 
     @classmethod
     async def create_async_generator(

+ 193 - 77
g4f/Provider/Liaobots.py

@@ -1,7 +1,6 @@
 from __future__ import annotations
 
 import uuid
-import requests
 from aiohttp import ClientSession, BaseConnector
 
 from ..typing import AsyncResult, Messages
@@ -9,22 +8,170 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
 from .helper import get_connector
 from ..requests import raise_for_status
 
+models = {
+    "gpt-4o-mini-free": {
+        "id": "gpt-4o-mini-free",
+        "name": "GPT-4o-Mini-Free",
+        "model": "ChatGPT",
+        "provider": "OpenAI",
+        "maxLength": 31200,
+        "tokenLimit": 7800,
+        "context": "8K",
+    },
+    "gpt-4o-mini": {
+        "id": "gpt-4o-mini",
+        "name": "GPT-4o-Mini",
+        "model": "ChatGPT",
+        "provider": "OpenAI",
+        "maxLength": 260000,
+        "tokenLimit": 126000,
+        "context": "128K",
+    },
+    "gpt-4o-free": {
+        "id": "gpt-4o-free",
+        "name": "GPT-4o-free",
+        "model": "ChatGPT",
+        "provider": "OpenAI",
+        "maxLength": 31200,
+        "tokenLimit": 7800,
+        "context": "8K",
+    },
+    "gpt-4-turbo-2024-04-09": {
+        "id": "gpt-4-turbo-2024-04-09",
+        "name": "GPT-4-Turbo",
+        "model": "ChatGPT",
+        "provider": "OpenAI",
+        "maxLength": 260000,
+        "tokenLimit": 126000,
+        "context": "128K",
+    },
+    "gpt-4o-2024-08-06": {
+        "id": "gpt-4o-2024-08-06",
+        "name": "GPT-4o",
+        "model": "ChatGPT",
+        "provider": "OpenAI",
+        "maxLength": 260000,
+        "tokenLimit": 126000,
+        "context": "128K",
+    },
+    "gpt-4-0613": {
+        "id": "gpt-4-0613",
+        "name": "GPT-4-0613",
+        "model": "ChatGPT",
+        "provider": "OpenAI",
+        "maxLength": 32000,
+        "tokenLimit": 7600,
+        "context": "8K",
+    },
+    "claude-3-opus-20240229": {
+        "id": "claude-3-opus-20240229",
+        "name": "Claude-3-Opus",
+        "model": "Claude",
+        "provider": "Anthropic",
+        "maxLength": 800000,
+        "tokenLimit": 200000,
+        "context": "200K",
+    },
+    "claude-3-opus-20240229-aws": {
+        "id": "claude-3-opus-20240229-aws",
+        "name": "Claude-3-Opus-Aws",
+        "model": "Claude",
+        "provider": "Anthropic",
+        "maxLength": 800000,
+        "tokenLimit": 200000,
+        "context": "200K",
+    },
+    "claude-3-opus-20240229-gcp": {
+        "id": "claude-3-opus-20240229-gcp",
+        "name": "Claude-3-Opus-Gcp",
+        "model": "Claude",
+        "provider": "Anthropic",
+        "maxLength": 800000,
+        "tokenLimit": 200000,
+        "context": "200K",
+    },
+    "claude-3-sonnet-20240229": {
+        "id": "claude-3-sonnet-20240229",
+        "name": "Claude-3-Sonnet",
+        "model": "Claude",
+        "provider": "Anthropic",
+        "maxLength": 800000,
+        "tokenLimit": 200000,
+        "context": "200K",
+    },
+    "claude-3-5-sonnet-20240620": {
+        "id": "claude-3-5-sonnet-20240620",
+        "name": "Claude-3.5-Sonnet",
+        "model": "Claude",
+        "provider": "Anthropic",
+        "maxLength": 800000,
+        "tokenLimit": 200000,
+        "context": "200K",
+    },
+    "claude-3-haiku-20240307": {
+        "id": "claude-3-haiku-20240307",
+        "name": "Claude-3-Haiku",
+        "model": "Claude",
+        "provider": "Anthropic",
+        "maxLength": 800000,
+        "tokenLimit": 200000,
+        "context": "200K",
+    },
+    "claude-2.1": {
+        "id": "claude-2.1",
+        "name": "Claude-2.1-200k",
+        "model": "Claude",
+        "provider": "Anthropic",
+        "maxLength": 800000,
+        "tokenLimit": 200000,
+        "context": "200K",
+    },
+    "gemini-1.0-pro-latest": {
+        "id": "gemini-1.0-pro-latest",
+        "name": "Gemini-Pro",
+        "model": "Gemini",
+        "provider": "Google",
+        "maxLength": 120000,
+        "tokenLimit": 30000,
+        "context": "32K",
+    },
+    "gemini-1.5-flash-latest": {
+        "id": "gemini-1.5-flash-latest",
+        "name": "Gemini-1.5-Flash-1M",
+        "model": "Gemini",
+        "provider": "Google",
+        "maxLength": 4000000,
+        "tokenLimit": 1000000,
+        "context": "1024K",
+    },
+    "gemini-1.5-pro-latest": {
+        "id": "gemini-1.5-pro-latest",
+        "name": "Gemini-1.5-Pro-1M",
+        "model": "Gemini",
+        "provider": "Google",
+        "maxLength": 4000000,
+        "tokenLimit": 1000000,
+        "context": "1024K",
+    },
+}
+
+
 class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
     url = "https://liaobots.site"
     working = True
     supports_message_history = True
     supports_system_message = True
-    supports_gpt_35_turbo = True
     supports_gpt_4 = True
     default_model = "gpt-4o"
-    models = None
+    models = list(models.keys())
+    
     model_aliases = {
         "gpt-4o-mini": "gpt-4o-mini-free",
         "gpt-4o": "gpt-4o-free",
         "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
         "gpt-4o": "gpt-4o-2024-08-06",
         "gpt-4": "gpt-4-0613",
-
+        
         "claude-3-opus": "claude-3-opus-20240229",
         "claude-3-opus": "claude-3-opus-20240229-aws",
         "claude-3-opus": "claude-3-opus-20240229-gcp",
@@ -32,50 +179,32 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
         "claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
         "claude-3-haiku": "claude-3-haiku-20240307",
         "claude-2.1": "claude-2.1",
-
+        
         "gemini-pro": "gemini-1.0-pro-latest",
         "gemini-flash": "gemini-1.5-flash-latest",
         "gemini-pro": "gemini-1.5-pro-latest",
     }
+    
     _auth_code = ""
     _cookie_jar = None
 
     @classmethod
-    def get_models(cls):
-        if cls.models is None:
-            url = 'https://liaobots.work/api/models'
-            headers = {
-                'accept': '/',
-                'accept-language': 'en-US,en;q=0.9',
-                'content-type': 'application/json',
-                'cookie': 'gkp2=ehnhUPJtkCgMmod8Sbxn',
-                'origin': 'https://liaobots.work',
-                'priority': 'u=1, i',
-                'referer': 'https://liaobots.work/',
-                'sec-ch-ua': '"Chromium";v="127", "Not)A;Brand";v="99"',
-                'sec-ch-ua-mobile': '?0',
-                'sec-ch-ua-platform': '"Linux"',
-                'sec-fetch-dest': 'empty',
-                'sec-fetch-mode': 'cors',
-                'sec-fetch-site': 'same-origin',
-                'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
-            }
-            data = {'key': ''}
-
-            response = requests.post(url, headers=headers, json=data)
-
-            if response.status_code == 200:
-                try:
-                    models_data = response.json()
-                    cls.models = {model['id']: model for model in models_data}
-                except (ValueError, KeyError) as e:
-                    print(f"Error processing JSON response: {e}")
-                    cls.models = {}
-            else:
-                print(f"Request failed with status code: {response.status_code}")
-                cls.models = {}
+    def get_model(cls, model: str) -> str:
+        """
+        Retrieve the internal model identifier based on the provided model name or alias.
+        """
+        if model in cls.model_aliases:
+            model = cls.model_aliases[model]
+        if model not in models:
+            raise ValueError(f"Model '{model}' is not supported.")
+        return model
 
-        return cls.models
+    @classmethod
+    def is_supported(cls, model: str) -> bool:
+        """
+        Check if the given model is supported.
+        """
+        return model in models or model in cls.model_aliases
 
     @classmethod
     async def create_async_generator(
@@ -87,6 +216,8 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
         connector: BaseConnector = None,
         **kwargs
     ) -> AsyncResult:
+        model = cls.get_model(model)
+        
         headers = {
             "authority": "liaobots.com",
             "content-type": "application/json",
@@ -99,10 +230,9 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
             cookie_jar=cls._cookie_jar,
             connector=get_connector(connector, proxy, True)
         ) as session:
-            models = cls.get_models()
             data = {
                 "conversationId": str(uuid.uuid4()),
-                "model": models[cls.get_model(model)],
+                "model": models[model],
                 "messages": messages,
                 "key": "",
                 "prompt": kwargs.get("system_message", "You are a helpful assistant."),
@@ -115,11 +245,20 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
                 ) as response:
                     await raise_for_status(response)
             try:
-                await cls.ensure_auth_code(session)
+                async with session.post(
+                    "https://liaobots.work/api/user",
+                    json={"authcode": cls._auth_code},
+                    verify_ssl=False
+                ) as response:
+                    await raise_for_status(response)
+                    cls._auth_code = (await response.json(content_type=None))["authCode"]
+                    if not cls._auth_code:
+                        raise RuntimeError("Empty auth code")
+                    cls._cookie_jar = session.cookie_jar
                 async with session.post(
                     "https://liaobots.work/api/chat",
                     json=data,
-                        headers={"x-auth-code": cls._auth_code},
+                    headers={"x-auth-code": cls._auth_code},
                     verify_ssl=False
                 ) as response:
                     await raise_for_status(response)
@@ -129,7 +268,16 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
                         if chunk:
                             yield chunk.decode(errors="ignore")
             except:
-                await cls.initialize_auth_code(session)
+                async with session.post(
+                    "https://liaobots.work/api/user",
+                    json={"authcode": "pTIQr4FTnVRfr"},
+                    verify_ssl=False
+                ) as response:
+                    await raise_for_status(response)
+                    cls._auth_code = (await response.json(content_type=None))["authCode"]
+                    if not cls._auth_code:
+                        raise RuntimeError("Empty auth code")
+                    cls._cookie_jar = session.cookie_jar
                 async with session.post(
                     "https://liaobots.work/api/chat",
                     json=data,
@@ -142,24 +290,6 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
                             raise RuntimeError("Invalid session")
                         if chunk:
                             yield chunk.decode(errors="ignore")
-    @classmethod
-    def get_model(cls, model: str) -> str:
-        """
-        Retrieve the internal model identifier based on the provided model name or alias.
-        """
-        if model in cls.model_aliases:
-            model = cls.model_aliases[model]
-        models = cls.get_models()
-        if model not in models:
-            raise ValueError(f"Model '{model}' is not supported.")
-        return model
-    @classmethod
-    def is_supported(cls, model: str) -> bool:
-        """
-        Check if the given model is supported.
-        """
-        models = cls.get_models()
-        return model in models or model in cls.model_aliases
 
     @classmethod
     async def initialize_auth_code(cls, session: ClientSession) -> None:
@@ -176,6 +306,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
             if not cls._auth_code:
                 raise RuntimeError("Empty auth code")
             cls._cookie_jar = session.cookie_jar
+
     @classmethod
     async def ensure_auth_code(cls, session: ClientSession) -> None:
         """
@@ -183,18 +314,3 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
         """
         if not cls._auth_code:
             await cls.initialize_auth_code(session)
-
-    @classmethod
-    async def refresh_auth_code(cls, session: ClientSession) -> None:
-        """
-        Refresh the auth code by making a new request.
-        """
-        await cls.initialize_auth_code(session)
-
-    @classmethod
-    async def get_auth_code(cls, session: ClientSession) -> str:
-        """
-        Get the current auth code, initializing it if necessary.
-        """
-        await cls.ensure_auth_code(session)
-        return cls._auth_code

+ 7 - 2
g4f/Provider/LiteIcoding.py

@@ -1,6 +1,7 @@
 from __future__ import annotations
 
 from aiohttp import ClientSession, ClientResponseError
+import re
 from ..typing import AsyncResult, Messages
 from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
 from .helper import format_prompt
@@ -31,7 +32,7 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
         headers = {
             "Accept": "*/*",
             "Accept-Language": "en-US,en;q=0.9",
-            "Authorization": "Bearer b3b2712cf83640a5acfdc01e78369930",
+            "Authorization": "Bearer aa3020ee873e40cb8b3f515a0708ebc4",
             "Connection": "keep-alive",
             "Content-Type": "application/json;charset=utf-8",
             "DNT": "1",
@@ -97,7 +98,11 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
                                   .replace('\\"', '"')
                                   .strip()
                     )
-                    yield full_response.strip()
+                    # Add filter to remove unwanted text
+                    filtered_response = re.sub(r'\n---\n.*', '', full_response, flags=re.DOTALL)
+                    # Remove extra quotes at the beginning and end
+                    cleaned_response = filtered_response.strip().strip('"')
+                    yield cleaned_response
 
             except ClientResponseError as e:
                 raise RuntimeError(

+ 130 - 0
g4f/Provider/MagickPen.py

@@ -0,0 +1,130 @@
+from __future__ import annotations
+
+import time
+import random
+import hashlib
+import re
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
+    url = "https://magickpen.com"
+    api_endpoint_free = "https://api.magickpen.com/chat/free"
+    api_endpoint_ask = "https://api.magickpen.com/ask"
+    working = True
+    supports_gpt_4 = True
+    supports_stream = False
+    
+    default_model = 'free'
+    models = ['free', 'ask']
+    
+    model_aliases = {
+        "gpt-4o-mini": "free",
+        "gpt-4o-mini": "ask",
+    }
+
+    @classmethod
+    def get_model(cls, model: str) -> str:
+        if model in cls.models:
+            return model
+        elif model in cls.model_aliases:
+            return cls.model_aliases[model]
+        else:
+            return cls.default_model
+
+    @classmethod
+    async def get_secrets(cls):
+        url = 'https://magickpen.com/_nuxt/02c76dc.js'
+        async with ClientSession() as session:
+            async with session.get(url) as response:
+                if response.status == 200:
+                    text = await response.text()
+                    x_api_secret_match = re.search(r'"X-API-Secret":"([^"]+)"', text)
+                    secret_match = re.search(r'secret:\s*"([^"]+)"', text)
+                    
+                    x_api_secret = x_api_secret_match.group(1) if x_api_secret_match else None
+                    secret = secret_match.group(1) if secret_match else None
+                    
+                    # Generate timestamp and nonce dynamically
+                    timestamp = str(int(time.time() * 1000))
+                    nonce = str(random.random())
+                    
+                    # Generate signature
+                    signature_parts = ["TGDBU9zCgM", timestamp, nonce]
+                    signature_string = "".join(sorted(signature_parts))
+                    signature = hashlib.md5(signature_string.encode()).hexdigest()
+                    
+                    return {
+                        'X-API-Secret': x_api_secret,
+                        'signature': signature,
+                        'timestamp': timestamp,
+                        'nonce': nonce,
+                        'secret': secret
+                    }
+                else:
+                    print(f"Error while fetching the file: {response.status}")
+                    return None
+
+    @classmethod
+    async def create_async_generator(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncResult:
+        model = cls.get_model(model)
+        
+        secrets = await cls.get_secrets()
+        if not secrets:
+            raise Exception("Failed to obtain necessary secrets")
+
+        headers = {
+            "accept": "application/json, text/plain, */*",
+            "accept-language": "en-US,en;q=0.9",
+            "cache-control": "no-cache",
+            "content-type": "application/json",
+            "nonce": secrets['nonce'],
+            "origin": "https://magickpen.com",
+            "pragma": "no-cache",
+            "priority": "u=1, i",
+            "referer": "https://magickpen.com/",
+            "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+            "sec-ch-ua-mobile": "?0",
+            "sec-ch-ua-platform": '"Linux"',
+            "sec-fetch-dest": "empty",
+            "sec-fetch-mode": "cors",
+            "sec-fetch-site": "same-site",
+            "secret": secrets['secret'],
+            "signature": secrets['signature'],
+            "timestamp": secrets['timestamp'],
+            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
+            "x-api-secret": secrets['X-API-Secret']
+        }
+        
+        async with ClientSession(headers=headers) as session:
+            if model == 'free':
+                data = {
+                    "history": [{"role": "user", "content": format_prompt(messages)}]
+                }
+                async with session.post(cls.api_endpoint_free, json=data, proxy=proxy) as response:
+                    response.raise_for_status()
+                    result = await response.text()
+                    yield result
+            
+            elif model == 'ask':
+                data = {
+                    "query": format_prompt(messages),
+                    "plan": "Pay as you go"
+                }
+                async with session.post(cls.api_endpoint_ask, json=data, proxy=proxy) as response:
+                    response.raise_for_status()
+                    async for chunk in response.content:
+                        if chunk:
+                            yield chunk.decode()
+            
+            else:
+                raise ValueError(f"Unknown model: {model}")

+ 0 - 51
g4f/Provider/MagickPenAsk.py

@@ -1,51 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-
-class MagickPenAsk(AsyncGeneratorProvider, ProviderModelMixin):
-    url = "https://magickpen.com/ask"
-    api_endpoint = "https://api.magickpen.com/ask"
-    working = True
-    supports_gpt_4 = True
-    default_model = "gpt-4o-mini"
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "accept": "application/json, text/plain, */*",
-            "accept-language": "en-US,en;q=0.9",
-            "content-type": "application/json",
-            "dnt": "1",
-            "origin": "https://magickpen.com",
-            "priority": "u=1, i",
-            "referer": "https://magickpen.com/",
-            "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
-            "sec-ch-ua-mobile": "?0",
-            "sec-ch-ua-platform": '"Linux"',
-            "sec-fetch-dest": "empty",
-            "sec-fetch-mode": "cors",
-            "sec-fetch-site": "same-site",
-            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
-            'X-API-Secret': 'W252GY255JVYBS9NAM'
-        }
-        async with ClientSession(headers=headers) as session:
-            data = {
-                "query": format_prompt(messages),
-                "plan": "Pay as you go"
-            }
-            async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
-                response.raise_for_status()
-                async for chunk in response.content:
-                    if chunk:
-                        yield chunk.decode()

+ 0 - 50
g4f/Provider/MagickPenChat.py

@@ -1,50 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-
-class MagickPenChat(AsyncGeneratorProvider, ProviderModelMixin):
-    url = "https://magickpen.com/chat"
-    api_endpoint = "https://api.magickpen.com/chat/free"
-    working = True
-    supports_gpt_4 = True
-    default_model = "gpt-4o-mini"
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "accept": "application/json, text/plain, */*",
-            "accept-language": "en-US,en;q=0.9",
-            "access-control-allow-origin": "*",
-            "content-type": "application/json",
-            "dnt": "1",
-            "origin": "https://magickpen.com",
-            "priority": "u=1, i",
-            "referer": "https://magickpen.com/",
-            "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
-            "sec-ch-ua-mobile": "?0",
-            "sec-ch-ua-platform": '"Linux"',
-            "sec-fetch-dest": "empty",
-            "sec-fetch-mode": "cors",
-            "sec-fetch-site": "same-site",
-            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
-            'X-Api-Secret': 'W252GY255JVYBS9NAM'
-        }
-        async with ClientSession(headers=headers) as session:
-            data = {
-                "history": [{"role": "user", "content": format_prompt(messages)}]
-            }
-            async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
-                response.raise_for_status()
-                async for chunk in response.content:
-                    if chunk:
-                        yield chunk.decode()

+ 0 - 64
g4f/Provider/Marsyoo.py

@@ -1,64 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession, ClientResponseError
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-
-class Marsyoo(AsyncGeneratorProvider, ProviderModelMixin):
-    url = "https://aiagent.marsyoo.com"
-    api_endpoint = "/api/chat-messages"
-    working = True
-    supports_gpt_4 = True
-    default_model = 'gpt-4o'
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "Accept": "*/*",
-            "Accept-Language": "en-US,en;q=0.9",
-            "Connection": "keep-alive",
-            "DNT": "1",
-            "Origin": cls.url,
-            "Referer": f"{cls.url}/chat",
-            "Sec-Fetch-Dest": "empty",
-            "Sec-Fetch-Mode": "cors",
-            "Sec-Fetch-Site": "same-origin",
-            "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
-            "authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI0MWNkOTE3MS1mNTg1LTRjMTktOTY0Ni01NzgxMTBjYWViNTciLCJzdWIiOiJXZWIgQVBJIFBhc3Nwb3J0IiwiYXBwX2lkIjoiNDFjZDkxNzEtZjU4NS00YzE5LTk2NDYtNTc4MTEwY2FlYjU3IiwiYXBwX2NvZGUiOiJMakhzdWJqNjhMTXZCT0JyIiwiZW5kX3VzZXJfaWQiOiI4YjE5YjY2Mi05M2E1LTRhYTktOGNjNS03MDhmNWE0YmQxNjEifQ.pOzdQ4wTrQjjRlEv1XY9TZitkW5KW1K-wbcUJAoBJ5I",
-            "content-type": "application/json",
-            "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
-            "sec-ch-ua-mobile": "?0",
-            "sec-ch-ua-platform": "Linux",
-        }
-        async with ClientSession(headers=headers) as session:
-            prompt = format_prompt(messages)
-            data = {
-                "response_mode": "streaming",
-                "query": prompt,
-                "inputs": {},
-            }
-            try:
-                async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
-                    response.raise_for_status()
-                    async for line in response.content:
-                        if line:
-                            try:
-                                json_data = json.loads(line.decode('utf-8').strip().lstrip('data: '))
-                                if json_data['event'] == 'message':
-                                    yield json_data['answer']
-                                elif json_data['event'] == 'message_end':
-                                    return
-                            except json.JSONDecodeError:
-                                continue
-            except ClientResponseError as e:
-                yield f"Error: HTTP {e.status}: {e.message}"

+ 181 - 0
g4f/Provider/Nexra.py

@@ -0,0 +1,181 @@
+from __future__ import annotations
+
+import json
+import base64
+from aiohttp import ClientSession
+from typing import AsyncGenerator
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..image import ImageResponse
+from .helper import format_prompt
+
+class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
+    url = "https://nexra.aryahcr.cc"
+    api_endpoint_text = "https://nexra.aryahcr.cc/api/chat/gpt"
+    api_endpoint_image = "https://nexra.aryahcr.cc/api/image/complements"
+    working = True
+    supports_gpt_35_turbo = True
+    supports_gpt_4 = True
+    supports_stream = True
+    supports_system_message = True
+    supports_message_history = True
+    
+    default_model = 'gpt-3.5-turbo'
+    models = [
+        # Text models
+        'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314',
+        'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
+        'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002',
+        'text-curie-001', 'text-babbage-001', 'text-ada-001',
+        'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002',
+        # Image models
+        'dalle', 'dalle-mini', 'emi'
+    ]
+    
+    image_models = {"dalle", "dalle-mini", "emi"}
+    text_models = set(models) - image_models
+    
+    model_aliases = {
+        "gpt-4": "gpt-4-0613",
+        "gpt-4": "gpt-4-32k",
+        "gpt-4": "gpt-4-0314",
+        "gpt-4": "gpt-4-32k-0314",
+        
+        "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
+        "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
+        "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
+        "gpt-3.5-turbo": "gpt-3.5-turbo-0301",
+        
+        "gpt-3": "text-davinci-003",
+        "gpt-3": "text-davinci-002",
+        "gpt-3": "code-davinci-002",
+        "gpt-3": "text-curie-001",
+        "gpt-3": "text-babbage-001",
+        "gpt-3": "text-ada-001",
+        "gpt-3": "text-ada-001",
+        "gpt-3": "davinci",
+        "gpt-3": "curie",
+        "gpt-3": "babbage",
+        "gpt-3": "ada",
+        "gpt-3": "babbage-002",
+        "gpt-3": "davinci-002",
+    }
+
+    @classmethod
+    def get_model(cls, model: str) -> str:
+        if model in cls.models:
+            return model
+        elif model in cls.model_aliases:
+            return cls.model_aliases[model]
+        else:
+            return cls.default_model
+
+    @classmethod
+    async def create_async_generator(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncGenerator[str | ImageResponse, None]:
+        model = cls.get_model(model)
+        
+        if model in cls.image_models:
+            async for result in cls.create_image_async_generator(model, messages, proxy, **kwargs):
+                yield result
+        else:
+            async for result in cls.create_text_async_generator(model, messages, proxy, **kwargs):
+                yield result
+
+    @classmethod
+    async def create_text_async_generator(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncGenerator[str, None]:
+        headers = {
+            "Content-Type": "application/json",
+        }
+        async with ClientSession(headers=headers) as session:
+            data = {
+                "messages": messages,
+                "prompt": format_prompt(messages),
+                "model": model,
+                "markdown": False,
+                "stream": False,
+            }
+            async with session.post(cls.api_endpoint_text, json=data, proxy=proxy) as response:
+                response.raise_for_status()
+                result = await response.text()
+                json_result = json.loads(result)
+                yield json_result["gpt"]
+
+    @classmethod
+    async def create_image_async_generator(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncGenerator[ImageResponse | str, None]:
+        headers = {
+            "Content-Type": "application/json"
+        }
+
+        prompt = messages[-1]['content'] if messages else ""
+
+        data = {
+            "prompt": prompt,
+            "model": model
+        }
+
+        async def process_response(response_text: str) -> ImageResponse | None:
+            json_start = response_text.find('{')
+            if json_start != -1:
+                json_data = response_text[json_start:]
+                try:
+                    response_data = json.loads(json_data)
+                    image_data = response_data.get('images', [])[0]
+                    
+                    if image_data.startswith('data:image/'):
+                        return ImageResponse([image_data], "Generated image")
+                    
+                    try:
+                        base64.b64decode(image_data)
+                        data_uri = f"data:image/jpeg;base64,{image_data}"
+                        return ImageResponse([data_uri], "Generated image")
+                    except:
+                        print("Invalid base64 data")
+                        return None
+                except json.JSONDecodeError:
+                    print("Failed to parse JSON.")
+            else:
+                print("No JSON data found in the response.")
+            return None
+
+        async with ClientSession(headers=headers) as session:
+            async with session.post(cls.api_endpoint_image, json=data, proxy=proxy) as response:
+                response.raise_for_status()
+                response_text = await response.text()
+                
+                image_response = await process_response(response_text)
+                if image_response:
+                    yield image_response
+                else:
+                    yield "Failed to process image data."
+
+    @classmethod
+    async def create_async(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> str:
+        async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
+            if isinstance(response, ImageResponse):
+                return response.images[0]
+            return response

+ 1 - 1
g4f/Provider/Pizzagpt.py

@@ -47,4 +47,4 @@ class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin):
                 response.raise_for_status()
                 response_json = await response.json()
                 content = response_json.get("answer", {}).get("content", "")
-                yield content
+                yield content

+ 23 - 5
g4f/Provider/ReplicateHome.py

@@ -14,9 +14,9 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
     url = "https://replicate.com"
     parent = "Replicate"
     working = True
-    default_model = 'stability-ai/stable-diffusion-3'
+    default_model = 'meta/meta-llama-3-70b-instruct'
     models = [
-		# Models for image generation
+        # Models for image generation
         'stability-ai/stable-diffusion-3',
         'bytedance/sdxl-lightning-4step',
         'playgroundai/playground-v2.5-1024px-aesthetic',
@@ -28,7 +28,7 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
     ]
 
     versions = {
-		# Model versions for generating images
+        # Model versions for generating images
         'stability-ai/stable-diffusion-3': [
             "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f"
         ],
@@ -39,7 +39,6 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
             "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24"
         ],
         
-        
         # Model versions for text generation
         'meta/meta-llama-3-70b-instruct': [
             "dp-cf04fe09351e25db628e8b6181276547"
@@ -55,6 +54,24 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
     image_models = {"stability-ai/stable-diffusion-3", "bytedance/sdxl-lightning-4step", "playgroundai/playground-v2.5-1024px-aesthetic"}
     text_models = {"meta/meta-llama-3-70b-instruct", "mistralai/mixtral-8x7b-instruct-v0.1", "google-deepmind/gemma-2b-it"}
 
+    model_aliases = {
+        "sd-3": "stability-ai/stable-diffusion-3",
+        "sdxl": "bytedance/sdxl-lightning-4step",
+        "playground-v2.5": "playgroundai/playground-v2.5-1024px-aesthetic",
+        "llama-3-70b": "meta/meta-llama-3-70b-instruct",
+        "mixtral-8x7b": "mistralai/mixtral-8x7b-instruct-v0.1",
+        "gemma-2b": "google-deepmind/gemma-2b-it",
+    }
+
+    @classmethod
+    def get_model(cls, model: str) -> str:
+        if model in cls.models:
+            return model
+        elif model in cls.model_aliases:
+            return cls.model_aliases[model]
+        else:
+            return cls.default_model
+
     @classmethod
     async def create_async_generator(
         cls,
@@ -76,6 +93,7 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
         extra_data: Dict[str, Any] = {},
         **kwargs: Any
     ) -> Union[str, ImageResponse]:
+        model = cls.get_model(model)  # Use the get_model method to resolve model name
         headers = {
             'Accept-Encoding': 'gzip, deflate, br',
             'Accept-Language': 'en-US',
@@ -109,7 +127,7 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
                 "version": version
             }
             if api_key is None:
-                data["model"] = cls.get_model(model)
+                data["model"] = model
                 url = "https://homepage.replicate.com/api/prediction"
             else:
                 url = "https://api.replicate.com/v1/predictions"

+ 133 - 0
g4f/Provider/Snova.py

@@ -0,0 +1,133 @@
+from __future__ import annotations
+
+import json
+from typing import AsyncGenerator
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Snova(AsyncGeneratorProvider, ProviderModelMixin):
+    url = "https://fast.snova.ai"
+    api_endpoint = "https://fast.snova.ai/api/completion"
+    working = True
+    supports_stream = True
+    supports_system_message = True
+    supports_message_history = True
+    
+    default_model = 'Meta-Llama-3.1-8B-Instruct'
+    models = [
+        'Meta-Llama-3.1-8B-Instruct',
+        'Meta-Llama-3.1-70B-Instruct',
+        'Meta-Llama-3.1-405B-Instruct',
+        'Samba-CoE',
+        'ignos/Mistral-T5-7B-v1',
+        'v1olet/v1olet_merged_dpo_7B',
+        'macadeliccc/WestLake-7B-v2-laser-truthy-dpo',
+        'cookinai/DonutLM-v1',
+    ]
+    
+    model_aliases = {
+        "llama-3.1-8b": "Meta-Llama-3.1-8B-Instruct",
+        "llama-3.1-70b": "Meta-Llama-3.1-70B-Instruct",
+        "llama-3.1-405b": "Meta-Llama-3.1-405B-Instruct",
+        
+        "mistral-7b": "ignos/Mistral-T5-7B-v1",
+        
+        "samba-coe-v0.1": "Samba-CoE",
+        "v1olet-merged-7b": "v1olet/v1olet_merged_dpo_7B",
+        "westlake-7b-v2": "macadeliccc/WestLake-7B-v2-laser-truthy-dpo",
+        "donutlm-v1": "cookinai/DonutLM-v1",
+    }
+
+    @classmethod
+    def get_model(cls, model: str) -> str:
+        if model in cls.models:
+            return model
+        elif model in cls.model_aliases:
+            return cls.model_aliases[model]
+        else:
+            return cls.default_model
+
+    @classmethod
+    async def create_async_generator(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncGenerator[str, None]:
+        model = cls.get_model(model)
+        
+        headers = {
+            "accept": "text/event-stream",
+            "accept-language": "en-US,en;q=0.9",
+            "cache-control": "no-cache",
+            "content-type": "application/json",
+            "origin": cls.url,
+            "pragma": "no-cache",
+            "priority": "u=1, i",
+            "referer": f"{cls.url}/",
+            "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+            "sec-ch-ua-mobile": "?0",
+            "sec-ch-ua-platform": '"Linux"',
+            "sec-fetch-dest": "empty",
+            "sec-fetch-mode": "cors",
+            "sec-fetch-site": "same-origin",
+            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
+        }
+        async with ClientSession(headers=headers) as session:
+            data = {
+                "body": {
+                    "messages": [
+                        {
+                            "role": "system",
+                            "content": "You are a helpful assistant."
+                        },
+                        {
+                            "role": "user",
+                            "content": format_prompt(messages),
+                            "id": "1-id",
+                            "ref": "1-ref",
+                            "revision": 1,
+                            "draft": False,
+                            "status": "done",
+                            "enableRealTimeChat": False,
+                            "meta": None
+                        }
+                    ],
+                    "max_tokens": 1000,
+                    "stop": ["<|eot_id|>"],
+                    "stream": True,
+                    "stream_options": {"include_usage": True},
+                    "model": model
+                },
+                "env_type": "tp16"
+            }
+            async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+                response.raise_for_status()
+                full_response = ""
+                async for line in response.content:
+                    line = line.decode().strip()
+                    if line.startswith("data: "):
+                        data = line[6:]
+                        if data == "[DONE]":
+                            break
+                        try:
+                            json_data = json.loads(data)
+                            choices = json_data.get("choices", [])
+                            if choices:
+                                delta = choices[0].get("delta", {})
+                                content = delta.get("content", "")
+                                full_response += content
+                        except json.JSONDecodeError:
+                            continue
+                        except Exception as e:
+                            print(f"Error processing chunk: {e}")
+                            print(f"Problematic data: {data}")
+                            continue
+                
+                yield full_response.strip()

+ 17 - 3
g4f/Provider/TeachAnything.py

@@ -13,7 +13,7 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
     url = "https://www.teach-anything.com"
     api_endpoint = "/api/generate"
     working = True
-    default_model = "llama-3-70b-instruct"
+    default_model = "llama-3.1-70b"
 
     @classmethod
     async def create_async_generator(
@@ -38,9 +38,23 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin):
                 timeout=timeout
             ) as response:
                 response.raise_for_status()
+                buffer = b""
                 async for chunk in response.content.iter_any():
-                    if chunk:
-                        yield chunk.decode()
+                    buffer += chunk
+                    try:
+                        decoded = buffer.decode('utf-8')
+                        yield decoded
+                        buffer = b""
+                    except UnicodeDecodeError:
+                        # If we can't decode, we'll wait for more data
+                        continue
+                
+                # Handle any remaining data in the buffer
+                if buffer:
+                    try:
+                        yield buffer.decode('utf-8', errors='replace')
+                    except Exception as e:
+                        print(f"Error decoding final buffer: {e}")
 
     @staticmethod
     def _get_headers() -> Dict[str, str]:

+ 103 - 0
g4f/Provider/TwitterBio.py

@@ -0,0 +1,103 @@
+from __future__ import annotations
+
+import json
+import re
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+class TwitterBio(AsyncGeneratorProvider, ProviderModelMixin):
+    url = "https://www.twitterbio.io"
+    api_endpoint_mistral = "https://www.twitterbio.io/api/mistral"
+    api_endpoint_openai = "https://www.twitterbio.io/api/openai"
+    working = True
+    supports_gpt_35_turbo = True
+    
+    default_model = 'gpt-3.5-turbo'
+    models = [
+        'mistralai/Mixtral-8x7B-Instruct-v0.1',
+        'gpt-3.5-turbo',
+    ]
+    
+    model_aliases = {
+        "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
+    }
+
+    @classmethod
+    def get_model(cls, model: str) -> str:
+        if model in cls.models:
+            return model
+        return cls.default_model
+
+    @staticmethod
+    def format_text(text: str) -> str:
+        text = re.sub(r'\s+', ' ', text.strip())
+        text = re.sub(r'\s+([,.!?])', r'\1', text)
+        return text
+
+    @classmethod
+    async def create_async_generator(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncResult:
+        model = cls.get_model(model)
+        
+        headers = {
+            "accept": "*/*",
+            "accept-language": "en-US,en;q=0.9",
+            "cache-control": "no-cache",
+            "content-type": "application/json",
+            "origin": cls.url,
+            "pragma": "no-cache",
+            "priority": "u=1, i",
+            "referer": f"{cls.url}/",
+            "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+            "sec-ch-ua-mobile": "?0",
+            "sec-ch-ua-platform": '"Linux"',
+            "sec-fetch-dest": "empty",
+            "sec-fetch-mode": "cors",
+            "sec-fetch-site": "same-origin",
+            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
+        }
+        async with ClientSession(headers=headers) as session:
+            prompt = format_prompt(messages)
+            data = {
+                "prompt": f'{prompt}.'
+            }
+            
+            if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1':
+                api_endpoint = cls.api_endpoint_mistral
+            elif model == 'gpt-3.5-turbo':
+                api_endpoint = cls.api_endpoint_openai
+            else:
+                raise ValueError(f"Unsupported model: {model}")
+            
+            async with session.post(api_endpoint, json=data, proxy=proxy) as response:
+                response.raise_for_status()
+                buffer = ""
+                async for line in response.content:
+                    line = line.decode('utf-8').strip()
+                    if line.startswith('data: '):
+                        try:
+                            json_data = json.loads(line[6:])
+                            if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1':
+                                if 'choices' in json_data and len(json_data['choices']) > 0:
+                                    text = json_data['choices'][0].get('text', '')
+                                    if text:
+                                        buffer += text
+                            elif model == 'gpt-3.5-turbo':
+                                text = json_data.get('text', '')
+                                if text:
+                                    buffer += text
+                        except json.JSONDecodeError:
+                            continue
+                    elif line == 'data: [DONE]':
+                        break
+                
+                if buffer:
+                    yield cls.format_text(buffer)

+ 74 - 0
g4f/Provider/Upstage.py

@@ -0,0 +1,74 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
+    url = "https://console.upstage.ai/playground/chat"
+    api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions"
+    working = True
+    default_model = 'upstage/solar-1-mini-chat'
+    models = [
+        'upstage/solar-1-mini-chat',
+        'upstage/solar-1-mini-chat-ja',
+    ]
+    model_aliases = {
+        "solar-1-mini": "upstage/solar-1-mini-chat",
+        "solar-1-mini": "upstage/solar-1-mini-chat-ja",
+    }
+
+    @classmethod
+    def get_model(cls, model: str) -> str:
+        if model in cls.models:
+            return model
+        elif model in cls.model_aliases:
+            return cls.model_aliases[model]
+        else:
+            return cls.default_model
+
+    @classmethod
+    async def create_async_generator(
+        cls,
+        model: str,
+        messages: Messages,
+        proxy: str = None,
+        **kwargs
+    ) -> AsyncResult:
+        model = cls.get_model(model)
+        
+        headers = {
+            "accept": "*/*",
+            "accept-language": "en-US,en;q=0.9",
+            "content-type": "application/json",
+            "origin": "https://console.upstage.ai",
+            "priority": "u=1, i",
+            "referer": "https://console.upstage.ai/",
+            "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+            "sec-ch-ua-mobile": "?0",
+            "sec-ch-ua-platform": '"Linux"',
+            "sec-fetch-dest": "empty",
+            "sec-fetch-mode": "cors",
+            "sec-fetch-site": "cross-site",
+            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
+        }
+        async with ClientSession(headers=headers) as session:
+            data = {
+                "stream": True,
+                "messages": [{"role": "user", "content": format_prompt(messages)}],
+                "model": model
+            }
+            async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
+                response.raise_for_status()
+                async for line in response.content:
+                    if line:
+                        line = line.decode('utf-8').strip()
+                        if line.startswith("data: ") and line != "data: [DONE]":
+                            data = json.loads(line[6:])
+                            content = data['choices'][0]['delta'].get('content', '')
+                            if content:
+                                yield content

+ 0 - 1
g4f/Provider/You.py

@@ -24,7 +24,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
     image_models = ["dall-e"]
     models = [
         default_model,
-        "gpt-4o-mini",
         "gpt-4o",
         "gpt-4-turbo",
         "gpt-4",

+ 12 - 8
g4f/Provider/__init__.py

@@ -6,55 +6,59 @@ from ..providers.base_provider  import AsyncProvider, AsyncGeneratorProvider
 from ..providers.create_images  import CreateImagesProvider
 
 from .deprecated      import *
-from .not_working     import *
 from .selenium        import *
 from .needs_auth      import *
 
 from .AI365VIP         import AI365VIP
 from .Allyfy           import Allyfy
 from .AiChatOnline     import AiChatOnline
+from .AiChats          import AiChats
 from .Aura             import Aura
 from .Bing             import Bing
 from .BingCreateImages import BingCreateImages
+from .Binjie           import Binjie
+from .Bixin123         import Bixin123
 from .Blackbox         import Blackbox
 from .ChatGot          import ChatGot
-from .Chatgpt4o        import Chatgpt4o
 from .Chatgpt4Online   import Chatgpt4Online
+from .Chatgpt4o        import Chatgpt4o
 from .ChatgptFree      import ChatgptFree
-from .Cohere           import Cohere
+from .CodeNews         import CodeNews
 from .DDG              import DDG
 from .DeepInfra        import DeepInfra
 from .DeepInfraImage   import DeepInfraImage
 from .FlowGpt          import FlowGpt
+from .FluxAirforce     import FluxAirforce
+from .Free2GPT         import Free2GPT
 from .FreeChatgpt      import FreeChatgpt
 from .FreeGpt          import FreeGpt
 from .FreeNetfly       import FreeNetfly
 from .GeminiPro        import GeminiPro
-from .GeminiProChat    import GeminiProChat
 from .GigaChat         import GigaChat
 from .GptTalkRu        import GptTalkRu
 from .HuggingChat      import HuggingChat
 from .HuggingFace      import HuggingFace
-from .HuggingFace      import HuggingFace
 from .Koala            import Koala
 from .Liaobots         import Liaobots
 from .LiteIcoding      import LiteIcoding
 from .Llama            import Llama
 from .Local            import Local
-from .MagickPenAsk     import MagickPenAsk
-from .MagickPenChat    import MagickPenChat
-from .Marsyoo          import Marsyoo
+from .MagickPen        import MagickPen
 from .MetaAI           import MetaAI
 from .MetaAIAccount    import MetaAIAccount
+from .Nexra            import Nexra
 from .Ollama           import Ollama
 from .PerplexityLabs   import PerplexityLabs
 from .Pi               import Pi
 from .Pizzagpt         import Pizzagpt
 from .Reka             import Reka
+from .Snova            import Snova
 from .Replicate        import Replicate
 from .ReplicateHome    import ReplicateHome
 from .Rocks            import Rocks
 from .TeachAnything    import TeachAnything
+from .TwitterBio       import TwitterBio
+from .Upstage          import Upstage
 from .Vercel           import Vercel
 from .WhiteRabbitNeo   import WhiteRabbitNeo
 from .You              import You

+ 0 - 79
g4f/Provider/not_working/AItianhu.py

@@ -1,79 +0,0 @@
-from __future__ import annotations
-
-import json
-
-from ...typing import AsyncResult, Messages
-from ...requests import StreamSession
-from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
-
-
-class AItianhu(AsyncGeneratorProvider):
-    url = "https://www.aitianhu.com"
-    working = False
-    supports_gpt_35_turbo = True
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        cookies: dict = None,
-        timeout: int = 120, **kwargs) -> AsyncResult:
-        
-        if not cookies:
-            cookies = get_cookies(domain_name='www.aitianhu.com')
-        if not cookies:
-            raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://www.aitianhu.com on chrome]")
-
-        data = {
-            "prompt": format_prompt(messages),
-            "options": {},
-            "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
-            "temperature": 0.8,
-            "top_p": 1,
-            **kwargs
-        }
-
-        headers = {
-            'authority': 'www.aitianhu.com',
-            'accept': 'application/json, text/plain, */*',
-            'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
-            'content-type': 'application/json',
-            'origin': 'https://www.aitianhu.com',
-            'referer': 'https://www.aitianhu.com/',
-            'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
-            'sec-ch-ua-mobile': '?0',
-            'sec-ch-ua-platform': '"macOS"',
-            'sec-fetch-dest': 'empty',
-            'sec-fetch-mode': 'cors',
-            'sec-fetch-site': 'same-origin',
-            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
-        }
-
-        async with StreamSession(headers=headers,
-                                        cookies=cookies,
-                                        timeout=timeout,
-                                        proxies={"https": proxy},
-                                        impersonate="chrome107", verify=False) as session:
-            
-            async with session.post(f"{cls.url}/api/chat-process", json=data) as response:
-                response.raise_for_status()
-
-                async for line in response.iter_lines():
-                    if line == b"<script>":
-                        raise RuntimeError("Solve challenge and pass cookies")
-
-                    if b"platform's risk control" in line:
-                        raise RuntimeError("Platform's Risk Control")
-
-                    line = json.loads(line)
-
-                    if "detail" not in line:
-                        raise RuntimeError(f"Response: {line}")
-
-                    content = line["detail"]["choices"][0]["delta"].get(
-                        "content"
-                    )
-                    if content:
-                        yield content

+ 0 - 56
g4f/Provider/not_working/Aichatos.py

@@ -1,56 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-from ..helper import format_prompt
-
-import random
-
-class Aichatos(AsyncGeneratorProvider):
-    url = "https://chat10.aichatos.xyz"
-    api = "https://api.binjie.fun"
-    working = False
-    supports_gpt_35_turbo = True
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
-            "Accept": "application/json, text/plain, */*",
-            "Accept-Language": "en-US,en;q=0.5",
-            "Accept-Encoding": "gzip, deflate, br",
-            "Content-Type": "application/json",
-            "Origin": "https://chat10.aichatos.xyz",
-            "DNT": "1",
-            "Sec-GPC": "1",
-            "Connection": "keep-alive",
-            "Sec-Fetch-Dest": "empty",
-            "Sec-Fetch-Mode": "cors",
-            "Sec-Fetch-Site": "cross-site",
-            "TE": "trailers",
-        }
-        async with ClientSession(headers=headers) as session:
-            prompt = format_prompt(messages)
-            userId = random.randint(1000000000000, 9999999999999)
-            system_message: str = "",
-            data = {
-                "prompt": prompt,
-                "userId": "#/chat/{userId}",
-                "network": True,
-                "system": system_message,
-                "withoutContext": False,
-                "stream": True,
-            }
-            async with session.post(f"{cls.api}/api/generateStream", json=data, proxy=proxy) as response:
-                response.raise_for_status()
-                async for chunk in response.content:
-                    if chunk:
-                        yield chunk.decode()

+ 0 - 56
g4f/Provider/not_working/Bestim.py

@@ -1,56 +0,0 @@
-from __future__         import annotations
-
-from ...typing           import Messages
-from ..base_provider     import BaseProvider, CreateResult
-from ...requests         import get_session_from_browser
-from uuid               import uuid4
-
-class Bestim(BaseProvider):
-    url = "https://chatgpt.bestim.org"
-    working = False
-    supports_gpt_35_turbo = True
-    supports_message_history = True
-    supports_stream = True
-
-    @classmethod
-    def create_completion(
-        cls,
-        model: str,
-        messages: Messages,
-        stream: bool,
-        proxy: str = None, 
-        **kwargs
-    ) -> CreateResult:
-        session = get_session_from_browser(cls.url, proxy=proxy)
-        headers = {
-            'Accept': 'application/json, text/event-stream',
-        }
-        data = {
-            "messagesHistory": [{
-                "id": str(uuid4()),
-                "content": m["content"],
-                "from": "you" if m["role"] == "user" else "bot"
-            } for m in messages],
-            "type": "chat",
-        }
-        response = session.post(
-            url="https://chatgpt.bestim.org/chat/send2/",
-            json=data,
-            headers=headers,
-            stream=True
-        )
-        response.raise_for_status()
-        for line in response.iter_lines():
-            if not line.startswith(b"event: trylimit"):
-                yield line.decode().removeprefix("data: ")
-
-
-
-
-
-
-            
-
-
-
-

文件差异内容过多而无法显示
+ 0 - 61
g4f/Provider/not_working/ChatBase.py


+ 0 - 66
g4f/Provider/not_working/ChatForAi.py

@@ -1,66 +0,0 @@
-from __future__ import annotations
-
-import time
-import hashlib
-import uuid
-
-from ...typing import AsyncResult, Messages
-from ...requests import StreamSession, raise_for_status
-from ...errors import RateLimitError
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-
-class ChatForAi(AsyncGeneratorProvider, ProviderModelMixin):
-    url = "https://chatforai.store"
-    working = False
-    default_model = "gpt-3.5-turbo"
-    supports_message_history = True
-    supports_gpt_35_turbo = True
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        timeout: int = 120,
-        temperature: float = 0.7,
-        top_p: float = 1,
-        **kwargs
-    ) -> AsyncResult:
-        model = cls.get_model(model)
-        headers = {
-            "Content-Type": "text/plain;charset=UTF-8",
-            "Origin": cls.url,
-            "Referer": f"{cls.url}/?r=b",
-        }
-        async with StreamSession(impersonate="chrome", headers=headers, proxies={"https": proxy}, timeout=timeout) as session:
-            timestamp = int(time.time() * 1e3)
-            conversation_id = str(uuid.uuid4())
-            data = {
-                "conversationId": conversation_id,
-                "conversationType": "chat_continuous",
-                "botId": "chat_continuous",
-                "globalSettings":{
-                    "baseUrl": "https://api.openai.com",
-                    "model": model,
-                    "messageHistorySize": 5,
-                    "temperature": temperature,
-                    "top_p": top_p,
-                    **kwargs
-                },
-                "prompt": "",
-                "messages": messages,
-                "timestamp": timestamp,
-                "sign": generate_signature(timestamp, "", conversation_id)
-            }
-            async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
-                await raise_for_status(response)
-                async for chunk in response.iter_content():
-                    if b"https://chatforai.store" in chunk:
-                        raise RuntimeError(f"Response: {chunk.decode(errors='ignore')}")
-                    yield chunk.decode(errors="ignore")
-
-    
-def generate_signature(timestamp: int, message: str, id: str):
-    buffer = f"{id}:{timestamp}:{message}:h496Jd6b"
-    return hashlib.sha256(buffer.encode()).hexdigest()

+ 0 - 88
g4f/Provider/not_working/ChatgptAi.py

@@ -1,88 +0,0 @@
-from __future__ import annotations
-
-import re, html, json, string, random
-from aiohttp import ClientSession
-
-from ...typing import Messages, AsyncResult
-from ...errors import RateLimitError
-from ..base_provider import AsyncGeneratorProvider
-from ..helper import get_random_string
-
-class ChatgptAi(AsyncGeneratorProvider):
-    url = "https://chatgpt.ai"
-    working = False
-    supports_message_history = True
-    supports_system_message = True,
-    supports_gpt_4 = True,
-    _system = None
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "authority"          : "chatgpt.ai",
-            "accept"             : "*/*",
-            "accept-language"    : "en-US",
-            "cache-control"      : "no-cache",
-            "origin"             : cls.url,
-            "pragma"             : "no-cache",
-            "referer"            : f"{cls.url}/",
-            "sec-ch-ua"          : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
-            "sec-ch-ua-mobile"   : "?0",
-            "sec-ch-ua-platform" : '"Windows"',
-            "sec-fetch-dest"     : "empty",
-            "sec-fetch-mode"     : "cors",
-            "sec-fetch-site"     : "same-origin",
-            "user-agent"         : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
-        }
-        async with ClientSession(
-                headers=headers
-            ) as session:
-            if not cls._system:
-                async with session.get(cls.url, proxy=proxy) as response:
-                    response.raise_for_status()
-                    text = await response.text()
-                result = re.search(r"data-system='(.*?)'", text)
-                if result :
-                    cls._system = json.loads(html.unescape(result.group(1)))
-            if not cls._system:
-                raise RuntimeError("System args not found")
-
-            data = {
-                "botId": cls._system["botId"],
-                "customId": cls._system["customId"],
-                "session": cls._system["sessionId"],
-                "chatId": get_random_string(),
-                "contextId": cls._system["contextId"],
-                "messages": messages[:-1],
-                "newMessage": messages[-1]["content"],
-                "newFileId": None,
-                "stream":True
-            }
-            async with session.post(
-               "https://chatgate.ai/wp-json/mwai-ui/v1/chats/submit",
-                proxy=proxy,
-                json=data,
-                headers={"X-Wp-Nonce": cls._system["restNonce"]}
-            ) as response:
-                response.raise_for_status()
-                async for line in response.content:
-                    if line.startswith(b"data: "):
-                        try:
-                            line = json.loads(line[6:])
-                            assert "type" in line
-                        except:
-                            raise RuntimeError(f"Broken line: {line.decode()}")
-                        if line["type"] == "error":
-                            if "https://chatgate.ai/login" in line["data"]:
-                                raise RateLimitError("Rate limit reached")
-                            raise RuntimeError(line["data"])
-                        if line["type"] == "live":
-                            yield line["data"]
-                        elif line["type"] == "end":
-                            break

+ 0 - 70
g4f/Provider/not_working/ChatgptDemo.py

@@ -1,70 +0,0 @@
-from __future__ import annotations
-
-import time, json, re, asyncio
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ...errors import RateLimitError
-from ..base_provider import AsyncGeneratorProvider
-from ..helper import format_prompt
-
-class ChatgptDemo(AsyncGeneratorProvider):
-    url = "https://chatgptdemo.info/chat"
-    working = False
-    supports_gpt_35_turbo = True
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "authority": "chatgptdemo.info",
-            "accept-language": "en-US",
-            "origin": "https://chatgptdemo.info",
-            "referer": "https://chatgptdemo.info/chat/",
-            "sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
-            "sec-ch-ua-mobile": "?0",
-            "sec-ch-ua-platform": '"Linux"',
-            "sec-fetch-dest": "empty",
-            "sec-fetch-mode": "cors",
-            "sec-fetch-site": "same-origin",
-            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
-        }
-        async with ClientSession(headers=headers) as session:
-            async with session.get(f"{cls.url}/", proxy=proxy) as response:
-                response.raise_for_status()
-                text = await response.text()
-            result = re.search(
-                r'<div id="USERID" style="display: none">(.*?)<\/div>',
-                text,
-            )
-            if result:
-                user_id = result.group(1)
-            else:
-                raise RuntimeError("No user id found")
-            async with session.post(f"https://chatgptdemo.info/chat/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
-                response.raise_for_status()
-                chat_id = (await response.json())["id_"]
-            if not chat_id:
-                raise RuntimeError("Could not create new chat")
-            await asyncio.sleep(10)
-            data = {
-                "question": format_prompt(messages),
-                "chat_id": chat_id,
-                "timestamp": int((time.time())*1e3),
-            }
-            async with session.post(f"https://chatgptdemo.info/chat/chat_api_stream", json=data, proxy=proxy) as response:
-                if response.status == 429:
-                    raise RateLimitError("Rate limit reached")
-                response.raise_for_status()
-                async for line in response.content:
-                    if line.startswith(b"data: "):
-                        line = json.loads(line[6:-1])
-
-                        chunk = line["choices"][0]["delta"].get("content")
-                        if chunk:
-                            yield chunk

+ 0 - 56
g4f/Provider/not_working/ChatgptDemoAi.py

@@ -1,56 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-from ..helper import get_random_string
-
-class ChatgptDemoAi(AsyncGeneratorProvider):
-    url = "https://chat.chatgptdemo.ai"
-    working = False
-    supports_gpt_35_turbo = True
-    supports_message_history = True
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
-            "Accept": "*/*",
-            "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
-            "Accept-Encoding": "gzip, deflate, br",
-            "Referer": f"{cls.url}/",
-            "Content-Type": "application/json",
-            "Origin": cls.url,
-            "Connection": "keep-alive",
-            "Sec-Fetch-Dest": "empty",
-            "Sec-Fetch-Mode": "cors",
-            "Sec-Fetch-Site": "same-origin",
-            "TE": "trailers"
-        }
-        async with ClientSession(headers=headers) as session:
-            data = {
-                "botId": "default",
-                "customId": "8824fe9bdb323a5d585a3223aaa0cb6e",
-                "session": "N/A",
-                "chatId": get_random_string(12),
-                "contextId": 2,
-                "messages": messages,
-                "newMessage": messages[-1]["content"],
-                "stream": True
-            }
-            async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
-                response.raise_for_status()
-                async for chunk in response.content:
-                    response.raise_for_status()
-                    if chunk.startswith(b"data: "):
-                        data = json.loads(chunk[6:])
-                        if data["type"] == "live":
-                            yield data["data"]

+ 0 - 78
g4f/Provider/not_working/ChatgptLogin.py

@@ -1,78 +0,0 @@
-from __future__ import annotations
-
-import re
-import time
-import json
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-from ..helper import format_prompt
-
-
-class ChatgptLogin(AsyncGeneratorProvider):
-    url                   = "https://chatgptlogin.ai"
-    working               = False
-    supports_gpt_35_turbo = True
-    _user_id              = None
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
-            "Accept": "*/*",
-            "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
-            "Accept-Encoding": "gzip, deflate, br",
-            "Referer": f"{cls.url}/chat/",
-            "Content-Type": "application/json",
-            "Origin": cls.url,
-            "Alt-Used": "chatgptlogin.ai",
-            "Connection": "keep-alive",
-            "Sec-Fetch-Dest": "empty",
-            "Sec-Fetch-Mode": "cors",
-            "Sec-Fetch-Site": "same-origin",
-            "Pragma": "no-cache",
-            "Cache-Control": "no-cache"
-        }
-        async with ClientSession(headers=headers) as session:
-            if not cls._user_id:
-                async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
-                    response.raise_for_status()
-                    response = await response.text()
-                result = re.search(
-                    r'<div id="USERID" style="display: none">(.*?)<\/div>',
-                    response,
-                )
-
-                if result:
-                    cls._user_id = result.group(1)
-                else:
-                    raise RuntimeError("No user id found")
-            async with session.post(f"{cls.url}/chat/new_chat", json={"user_id": cls._user_id}, proxy=proxy) as response:
-                response.raise_for_status()
-                chat_id = (await response.json())["id_"]
-            if not chat_id:
-                raise RuntimeError("Could not create new chat")
-            prompt = format_prompt(messages)
-            data = {
-                "question": prompt,
-                "chat_id": chat_id,
-                "timestamp": int(time.time() * 1e3),
-            }
-            async with session.post(f"{cls.url}/chat/chat_api_stream", json=data, proxy=proxy) as response:
-                response.raise_for_status()
-                async for line in response.content:
-                    if line.startswith(b"data: "):
-                        
-                        content = json.loads(line[6:])["choices"][0]["delta"].get("content")
-                        if content:
-                            yield content
-                        
-            async with session.post(f"{cls.url}/chat/delete_chat", json={"chat_id": chat_id}, proxy=proxy) as response:
-                response.raise_for_status()

+ 0 - 66
g4f/Provider/not_working/ChatgptNext.py

@@ -1,66 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-
-class ChatgptNext(AsyncGeneratorProvider):
-    url = "https://www.chatgpt-free.cc"
-    working = False
-    supports_gpt_35_turbo = True
-    supports_message_history = True
-    supports_system_message = True
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        max_tokens: int = None,
-        temperature: float = 0.7,
-        top_p: float = 1,
-        presence_penalty: float = 0,
-        frequency_penalty: float = 0,
-        **kwargs
-    ) -> AsyncResult:
-        if not model:
-            model = "gpt-3.5-turbo"
-        headers = {
-            "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
-            "Accept": "text/event-stream",
-            "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
-            "Accept-Encoding": "gzip, deflate, br",
-            "Content-Type": "application/json",
-            "Referer": "https://chat.fstha.com/",
-            "x-requested-with": "XMLHttpRequest",
-            "Origin": "https://chat.fstha.com",
-            "Sec-Fetch-Dest": "empty",
-            "Sec-Fetch-Mode": "cors",
-            "Sec-Fetch-Site": "same-origin",
-            "Authorization": "Bearer ak-chatgpt-nice",
-            "Connection": "keep-alive",
-            "Alt-Used": "chat.fstha.com",
-        }
-        async with ClientSession(headers=headers) as session:
-            data = {
-                "messages": messages,
-                "stream": True,
-                "model": model,
-                "temperature": temperature,
-                "presence_penalty": presence_penalty,
-                "frequency_penalty": frequency_penalty,
-                "top_p": top_p,
-                "max_tokens": max_tokens,
-            }
-            async with session.post(f"https://chat.fstha.com/api/openai/v1/chat/completions", json=data, proxy=proxy) as response:
-                response.raise_for_status()
-                async for chunk in response.content:
-                    if chunk.startswith(b"data: [DONE]"):
-                        break
-                    if chunk.startswith(b"data: "):
-                        content = json.loads(chunk[6:])["choices"][0]["delta"].get("content")
-                        if content:
-                            yield content

+ 0 - 106
g4f/Provider/not_working/ChatgptX.py

@@ -1,106 +0,0 @@
-from __future__ import annotations
-
-import re
-import json
-
-from aiohttp import ClientSession
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-from ..helper import format_prompt
-from ...errors import RateLimitError
-
-class ChatgptX(AsyncGeneratorProvider):
-    url = "https://chatgptx.de"
-    supports_gpt_35_turbo = True
-    working               = False
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US',
-            'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
-            'sec-ch-ua-mobile': '?0',
-            'sec-ch-ua-platform': 'Linux',
-            'sec-fetch-dest': 'empty',
-            'sec-fetch-mode': 'cors',
-            'sec-fetch-site': 'same-origin',
-            'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
-        }
-        async with ClientSession(headers=headers) as session:
-            async with session.get(f"{cls.url}/", proxy=proxy) as response:
-                response = await response.text()
-
-                result = re.search(
-                    r'<meta name="csrf-token" content="(.*?)"', response
-                )
-                if result:
-                    csrf_token = result.group(1)
-
-                result = re.search(r"openconversions\('(.*?)'\)", response)
-                if result:
-                    chat_id = result.group(1)
-
-                result = re.search(
-                    r'<input type="hidden" id="user_id" value="(.*?)"', response
-                )
-                if result:
-                    user_id = result.group(1)
-
-            if not csrf_token or not chat_id or not user_id:
-                raise RuntimeError("Missing csrf_token, chat_id or user_id")
-
-            data = {
-                '_token': csrf_token,
-                'user_id': user_id,
-                'chats_id': chat_id,
-                'prompt': format_prompt(messages),
-                'current_model': "gpt3"
-            }
-            headers = {
-                'authority': 'chatgptx.de',
-                'accept': 'application/json, text/javascript, */*; q=0.01',
-                'origin': cls.url,
-                'referer': f'{cls.url}/',
-                'x-csrf-token': csrf_token,
-                'x-requested-with': 'XMLHttpRequest'
-            }
-            async with session.post(f'{cls.url}/sendchat', data=data, headers=headers, proxy=proxy) as response:
-                response.raise_for_status()
-                chat = await response.json()
-                if "messages" in  chat and "Anfragelimit" in chat["messages"]:
-                    raise RateLimitError("Rate limit reached")
-                if "response" not in chat or not chat["response"]:
-                    raise RuntimeError(f'Response: {chat}')
-            headers = {
-                'authority': 'chatgptx.de',
-                'accept': 'text/event-stream',
-                'referer': f'{cls.url}/',
-                'x-csrf-token': csrf_token,
-                'x-requested-with': 'XMLHttpRequest'
-            }
-            data = {
-                "user_id": user_id,
-                "chats_id": chat_id,
-                "current_model": "gpt3",
-                "conversions_id": chat["conversions_id"],
-                "ass_conversions_id": chat["ass_conversions_id"],
-            }
-            async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers, proxy=proxy) as response:
-                response.raise_for_status()
-                async for line in response.content:
-                    if line.startswith(b"data: "):
-                        row = line[6:-1]
-                        if row == b"[DONE]":
-                            break
-                        try:
-                            content = json.loads(row)["choices"][0]["delta"].get("content")
-                        except:
-                            raise RuntimeError(f"Broken line: {line.decode()}")
-                        if content:
-                            yield content

+ 0 - 60
g4f/Provider/not_working/Chatxyz.py

@@ -1,60 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-
-class Chatxyz(AsyncGeneratorProvider):
-    url = "https://chat.3211000.xyz"
-    working = False
-    supports_gpt_35_turbo = True
-    supports_message_history = True
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            'Accept': 'text/event-stream',
-            'Accept-Encoding': 'gzip, deflate, br',
-            'Accept-Language': 'en-US,en;q=0.5',
-            'Alt-Used': 'chat.3211000.xyz',
-            'Content-Type': 'application/json',
-            'Host': 'chat.3211000.xyz',
-            'Origin': 'https://chat.3211000.xyz',
-            'Referer': 'https://chat.3211000.xyz/',
-            'Sec-Fetch-Dest': 'empty',
-            'Sec-Fetch-Mode': 'cors',
-            'Sec-Fetch-Site': 'same-origin',
-            'TE': 'trailers',
-            'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:121.0) Gecko/20100101 Firefox/121.0',
-            'x-requested-with': 'XMLHttpRequest'
-        }   
-        async with ClientSession(headers=headers) as session:
-            data = {
-                "messages": messages,
-                "stream": True,
-                "model": "gpt-3.5-turbo",
-                "temperature": 0.5,
-                "presence_penalty": 0,
-                "frequency_penalty": 0,
-                "top_p": 1,
-                **kwargs
-            }    
-            async with session.post(f'{cls.url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response:
-                response.raise_for_status()
-                async for chunk in response.content:
-                    line = chunk.decode() 
-                    if line.startswith("data: [DONE]"):
-                            break
-                    elif line.startswith("data: "):
-                            line = json.loads(line[6:])
-                            chunk = line["choices"][0]["delta"].get("content")
-                            if(chunk):
-                                yield chunk

+ 0 - 58
g4f/Provider/not_working/Cnote.py

@@ -1,58 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-from ..helper import format_prompt
-
-
-class Cnote(AsyncGeneratorProvider):
-    url = "https://f1.cnote.top"
-    api_url = "https://p1api.xjai.pro/freeapi/chat-process"
-    working = False
-    supports_gpt_35_turbo = True
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
-            "Accept": "application/json, text/plain, */*",
-            "Accept-Language": "en-US,en;q=0.5",
-            "Accept-Encoding": "gzip, deflate, br",
-            "Content-Type": "application/json",
-            "Origin": cls.url,
-            "DNT": "1",
-            "Sec-GPC": "1",
-            "Connection": "keep-alive",
-            "Sec-Fetch-Dest": "empty",
-            "Sec-Fetch-Mode": "cors",
-            "Sec-Fetch-Site": "cross-site",
-            "TE": "trailers",
-        }
-        async with ClientSession(headers=headers) as session:
-            prompt = format_prompt(messages)
-            system_message: str = "",
-            data = {
-                "prompt": prompt,
-                "systemMessage": system_message,
-                "temperature": 0.8,
-                "top_p": 1,
-            }
-            async with session.post(cls.api_url, json=data, proxy=proxy) as response:
-                response.raise_for_status()
-                async for chunk in response.content:
-                    if chunk:
-                        try:
-                            data = json.loads(chunk.decode().split("&KFw6loC9Qvy&")[-1])
-                            text = data.get("text", "")
-                            yield text
-                        except (json.JSONDecodeError, IndexError):
-                            pass

+ 0 - 78
g4f/Provider/not_working/Feedough.py

@@ -1,78 +0,0 @@
-from __future__ import annotations
-
-import json
-import asyncio
-from aiohttp import ClientSession, TCPConnector
-from urllib.parse import urlencode
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-
-
-class Feedough(AsyncGeneratorProvider, ProviderModelMixin):
-    url = "https://www.feedough.com"
-    api_endpoint = "/wp-admin/admin-ajax.php"
-    working = False
-    default_model = ''
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "accept": "*/*",
-            "accept-language": "en-US,en;q=0.9",
-            "content-type": "application/x-www-form-urlencoded;charset=UTF-8",
-            "dnt": "1",
-            "origin": cls.url,
-            "referer": f"{cls.url}/ai-prompt-generator/",
-            "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
-            "sec-ch-ua-mobile": "?0",
-            "sec-ch-ua-platform": '"Linux"',
-            "sec-fetch-dest": "empty",
-            "sec-fetch-mode": "cors",
-            "sec-fetch-site": "same-origin",
-            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
-        }
-
-        connector = TCPConnector(ssl=False)
-
-        async with ClientSession(headers=headers, connector=connector) as session:
-            data = {
-                "action": "aixg_generate",
-                "prompt": format_prompt(messages),
-                "aixg_generate_nonce": "110c021031"
-            }
-
-            try:
-                async with session.post(
-                    f"{cls.url}{cls.api_endpoint}",
-                    data=urlencode(data),
-                    proxy=proxy
-                ) as response:
-                    response.raise_for_status()
-                    response_text = await response.text()
-                    try:
-                        response_json = json.loads(response_text)
-                        if response_json.get("success") and "data" in response_json:
-                            message = response_json["data"].get("message", "")
-                            yield message
-                    except json.JSONDecodeError:
-                        yield response_text
-            except Exception as e:
-                print(f"An error occurred: {e}")
-
-    @classmethod
-    async def run(cls, *args, **kwargs):
-        async for item in cls.create_async_generator(*args, **kwargs):
-            yield item
-
-        tasks = asyncio.all_tasks()
-        for task in tasks:
-            if not task.done():
-                await task

+ 0 - 54
g4f/Provider/not_working/Gpt6.py

@@ -1,54 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-
-class Gpt6(AsyncGeneratorProvider):
-    url = "https://gpt6.ai"
-    working = False
-    supports_gpt_35_turbo = True
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
-            "Accept": "*/*",
-            "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
-            "Accept-Encoding": "gzip, deflate, br",
-            "Content-Type": "application/json",
-            "Origin": "https://gpt6.ai",
-            "Connection": "keep-alive",
-            "Referer": "https://gpt6.ai/",
-            "Sec-Fetch-Dest": "empty",
-            "Sec-Fetch-Mode": "cors",
-            "Sec-Fetch-Site": "cross-site",
-            "TE": "trailers",
-        }
-        async with ClientSession(headers=headers) as session:
-            data = {
-                "prompts":messages,
-                "geoInfo":{"ip":"100.90.100.222","hostname":"ip-100-090-100-222.um36.pools.vodafone-ip.de","city":"Muenchen","region":"North Rhine-Westphalia","country":"DE","loc":"44.0910,5.5827","org":"AS3209 Vodafone GmbH","postal":"41507","timezone":"Europe/Berlin"},
-                "paid":False,
-                "character":{"textContent":"","id":"52690ad6-22e4-4674-93d4-1784721e9944","name":"GPT6","htmlContent":""}
-            }
-            async with session.post(f"https://seahorse-app-d29hu.ondigitalocean.app/api/v1/query", json=data, proxy=proxy) as response:
-                response.raise_for_status()
-                async for line in response.content:
-                    print(line)
-                    if line.startswith(b"data: [DONE]"):
-                        break
-                    elif line.startswith(b"data: "):
-                        line = json.loads(line[6:-1])
-
-                        chunk = line["choices"][0]["delta"].get("content")
-                        if chunk:
-                            yield chunk

+ 0 - 35
g4f/Provider/not_working/GptChatly.py

@@ -1,35 +0,0 @@
-from __future__ import annotations
-
-from ...requests import Session, get_session_from_browser
-from ...typing       import Messages
-from ..base_provider import AsyncProvider
-
-
-class GptChatly(AsyncProvider):
-    url = "https://gptchatly.com"
-    working = False
-    supports_message_history = True
-    supports_gpt_35_turbo = True
-
-    @classmethod
-    async def create_async(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        timeout: int = 120,
-        session: Session = None,
-        **kwargs
-    ) -> str:
-        if not session:
-            session = get_session_from_browser(cls.url, proxy=proxy, timeout=timeout)
-        if model.startswith("gpt-4"):
-            chat_url = f"{cls.url}/fetch-gpt4-response"
-        else:
-            chat_url = f"{cls.url}/felch-response"
-        data = {
-            "past_conversations": messages
-        }
-        response = session.post(chat_url, json=data)
-        response.raise_for_status()
-        return response.json()["chatGPTResponse"]

+ 0 - 91
g4f/Provider/not_working/GptForLove.py

@@ -1,91 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import os
-import json
-try:
-    import execjs
-    has_requirements = True
-except ImportError:
-    has_requirements = False
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-from ..helper import format_prompt
-from ...errors import MissingRequirementsError
-
-class GptForLove(AsyncGeneratorProvider):
-    url = "https://ai18.gptforlove.com"
-    working = False
-    supports_gpt_35_turbo = True
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        if not has_requirements:
-            raise MissingRequirementsError('Install "PyExecJS" package')
-        if not model:
-            model = "gpt-3.5-turbo"
-        headers = {
-            "authority": "api.gptplus.one",
-            "accept": "application/json, text/plain, */*",
-            "accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6,nl;q=0.5,zh-CN;q=0.4,zh-TW;q=0.3,zh;q=0.2",
-            "content-type": "application/json",
-            "origin": cls.url,
-            "referer": f"{cls.url}/",
-            "sec-ch-ua": "\"Google Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"",
-            "sec-ch-ua-mobile": "?0",
-            "sec-ch-ua-platform": "Linux",
-            "sec-fetch-dest": "empty",
-            "sec-fetch-mode": "cors",
-            "sec-fetch-site": "cross-site",
-            "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
-        }
-        async with ClientSession(headers=headers) as session:
-            prompt = format_prompt(messages)
-            data = {
-                "prompt": prompt,
-                "options": {},
-                "systemMessage": kwargs.get("system_message", "You are ChatGPT, the version is GPT3.5, a large language model trained by OpenAI. Follow the user's instructions carefully."),
-                "temperature": kwargs.get("temperature", 0.8),
-                "top_p": kwargs.get("top_p", 1),
-                "secret": get_secret(),
-            }
-            async with session.post("https://api.gptplus.one/chat-process", json=data, proxy=proxy) as response:
-                response.raise_for_status()
-                async for line in response.content:
-                    try:
-                        line = json.loads(line)
-                    except:
-                        raise RuntimeError(f"Broken line: {line}")
-                    if "detail" in line:
-                        content = line["detail"]["choices"][0]["delta"].get("content")
-                        if content:
-                            yield content
-                    elif "10分钟内提问超过了5次" in line:
-                        raise RuntimeError("Rate limit reached")
-                    else:
-                        raise RuntimeError(f"Response: {line}")
-
-
-def get_secret() -> str:
-    dir = os.path.dirname(__file__)
-    include = f'{dir}/npm/node_modules/crypto-js/crypto-js'
-    source = """
-CryptoJS = require({include})
-var k = 'fjfsdwiuhfwf'
-    , e = Math.floor(new Date().getTime() / 1e3);
-var t = CryptoJS.enc.Utf8.parse(e)
-    , o = CryptoJS.AES.encrypt(t, k, {
-    mode: CryptoJS.mode.ECB,
-    padding: CryptoJS.pad.Pkcs7
-});
-return o.toString()
-"""
-    source = source.replace('{include}', json.dumps(include))
-    return execjs.compile(source).call('')

+ 0 - 66
g4f/Provider/not_working/GptGo.py

@@ -1,66 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import json
-import base64
-
-from ...typing       import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, format_prompt
-
-
-class GptGo(AsyncGeneratorProvider):
-    url                   = "https://gptgo.ai"
-    working               = False
-    supports_gpt_35_turbo = True
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
-            "Accept": "*/*",
-            "Accept-language": "en-US",
-            "Origin": cls.url,
-            "Referer": f"{cls.url}/",
-            "sec-ch-ua": '"Google Chrome";v="116", "Chromium";v="116", "Not?A_Brand";v="24"',
-            "sec-ch-ua-mobile": "?0",
-            "sec-ch-ua-platform": '"Windows"',
-            "Sec-Fetch-Dest": "empty",
-            "Sec-Fetch-Mode": "cors",
-            "Sec-Fetch-Site": "same-origin",
-        }
-        async with ClientSession(
-                headers=headers
-            ) as session:
-            async with session.post(
-                "https://gptgo.ai/get_token.php",
-                data={"ask": format_prompt(messages)},
-                proxy=proxy
-            ) as response:
-                response.raise_for_status()
-                token = await response.text();
-                if token == "error token":
-                    raise RuntimeError(f"Response: {token}")
-                token = base64.b64decode(token[10:-20]).decode()
-
-            async with session.get(
-                "https://api.gptgo.ai/web.php",
-                params={"array_chat": token},
-                proxy=proxy
-            ) as response:
-                response.raise_for_status()
-                async for line in response.content:
-                    if line.startswith(b"data: [DONE]"):
-                        break
-                    if line.startswith(b"data: "):
-                        line = json.loads(line[6:])
-                        if "choices" not in line:
-                            raise RuntimeError(f"Response: {line}")
-                        content = line["choices"][0]["delta"].get("content")
-                        if content and content != "\n#GPTGO ":
-                            yield content

+ 0 - 61
g4f/Provider/not_working/GptGod.py

@@ -1,61 +0,0 @@
-from __future__ import annotations
-
-import secrets
-import json
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-from ..helper import format_prompt
-
-class GptGod(AsyncGeneratorProvider):
-    url = "https://gptgod.site"
-    working = False
-    supports_gpt_35_turbo = True
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        
-        headers = {
-            "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
-            "Accept": "text/event-stream",
-            "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
-            "Accept-Encoding": "gzip, deflate, br",
-            "Alt-Used": "gptgod.site",
-            "Connection": "keep-alive",
-            "Referer": f"{cls.url}/",
-            "Sec-Fetch-Dest": "empty",
-            "Sec-Fetch-Mode": "cors",
-            "Sec-Fetch-Site": "same-origin",
-            "Pragma": "no-cache",
-            "Cache-Control": "no-cache",
-        }
-
-        async with ClientSession(headers=headers) as session:
-            prompt = format_prompt(messages)
-            data = {
-                "content": prompt,
-                "id": secrets.token_hex(16).zfill(32)
-            }
-            async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data, proxy=proxy) as response:
-                response.raise_for_status()
-                event = None
-                async for line in response.content:
-                   # print(line)
-
-                    if line.startswith(b'event: '):
-                        event = line[7:-1]
-                    
-                    elif event == b"data" and line.startswith(b"data: "):
-                        data = json.loads(line[6:-1])
-                        if data:
-                            yield data
-                    
-                    elif event == b"done":
-                        break

+ 0 - 57
g4f/Provider/not_working/OnlineGpt.py

@@ -1,57 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider
-from ..helper import get_random_string
-
-class OnlineGpt(AsyncGeneratorProvider):
-    url = "https://onlinegpt.org"
-    working = False
-    supports_gpt_35_turbo = True
-    supports_message_history = False
-
-    @classmethod
-    async def create_async_generator(
-        cls,
-        model: str,
-        messages: Messages,
-        proxy: str = None,
-        **kwargs
-    ) -> AsyncResult:
-        headers = {
-            "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
-            "Accept": "text/event-stream",
-            "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
-            "Accept-Encoding": "gzip, deflate, br",
-            "Referer": f"{cls.url}/chat/",
-            "Content-Type": "application/json",
-            "Origin": cls.url,
-            "Alt-Used": "onlinegpt.org",
-            "Connection": "keep-alive",
-            "Sec-Fetch-Dest": "empty",
-            "Sec-Fetch-Mode": "cors",
-            "Sec-Fetch-Site": "same-origin",
-            "TE": "trailers"
-        }
-        async with ClientSession(headers=headers) as session:
-            data = {
-                "botId": "default",
-                "customId": None,
-                "session": get_random_string(12),
-                "chatId": get_random_string(),
-                "contextId": 9,
-                "messages": messages,
-                "newMessage": messages[-1]["content"],
-                "newImageId": None,
-                "stream": True
-            }
-            async with session.post(f"{cls.url}/chatgpt/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
-                response.raise_for_status()
-                async for chunk in response.content:
-                    if chunk.startswith(b"data: "):
-                        data = json.loads(chunk[6:])
-                        if data["type"] == "live":
-                            yield data["data"]

+ 0 - 21
g4f/Provider/not_working/__init__.py

@@ -1,21 +0,0 @@
-
-from .AItianhu        import AItianhu
-from .Aichatos        import Aichatos
-from .Bestim          import Bestim
-from .ChatBase        import ChatBase
-from .ChatForAi       import ChatForAi
-from .ChatgptAi       import ChatgptAi
-from .ChatgptDemo     import ChatgptDemo
-from .ChatgptDemoAi   import ChatgptDemoAi
-from .ChatgptLogin    import ChatgptLogin
-from .ChatgptNext     import ChatgptNext
-from .ChatgptX        import ChatgptX
-from .Chatxyz         import Chatxyz
-from .Cnote           import Cnote
-from .Feedough        import Feedough
-from .Gpt6            import Gpt6
-from .GptChatly       import GptChatly
-from .GptForLove      import GptForLove
-from .GptGo           import GptGo
-from .GptGod          import GptGod
-from .OnlineGpt       import OnlineGpt

+ 170 - 41
g4f/client/client.py

@@ -1,9 +1,14 @@
 from __future__ import annotations
 
+import os
 import time
 import random
 import string
-
+import logging
+import asyncio
+from typing import Union
+from ..providers.base_provider import AsyncGeneratorProvider
+from ..image import ImageResponse, to_image, to_data_uri
 from ..typing import Union, Iterator, Messages, ImageType
 from ..providers.types import BaseProvider, ProviderType, FinishReason
 from ..providers.conversation import BaseConversation
@@ -15,9 +20,12 @@ from .types import IterResponse, ImageProvider
 from .types import Client as BaseClient
 from .service import get_model_and_provider, get_last_provider
 from .helper import find_stop, filter_json, filter_none
+from ..models import ModelUtils
+from ..Provider import IterListProvider
+
 
 def iter_response(
-    response: iter[str],
+    response: Iterator[str],
     stream: bool,
     response_format: dict = None,
     max_tokens: int = None,
@@ -26,6 +34,7 @@ def iter_response(
     content = ""
     finish_reason = None
     completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
+    
     for idx, chunk in enumerate(response):
         if isinstance(chunk, FinishReason):
             finish_reason = chunk.reason
@@ -33,17 +42,25 @@ def iter_response(
         elif isinstance(chunk, BaseConversation):
             yield chunk
             continue
+        
         content += str(chunk)
+        
         if max_tokens is not None and idx + 1 >= max_tokens:
             finish_reason = "length"
+        
         first, content, chunk = find_stop(stop, content, chunk if stream else None)
+        
         if first != -1:
             finish_reason = "stop"
+        
         if stream:
             yield ChatCompletionChunk(chunk, None, completion_id, int(time.time()))
+        
         if finish_reason is not None:
             break
+    
     finish_reason = "stop" if finish_reason is None else finish_reason
+    
     if stream:
         yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time()))
     else:
@@ -52,14 +69,16 @@ def iter_response(
                 content = filter_json(content)
         yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
 
+
 def iter_append_model_and_provider(response: IterResponse) -> IterResponse:
     last_provider = None
     for chunk in response:
         last_provider = get_last_provider(True) if last_provider is None else last_provider
         chunk.model = last_provider.get("model")
-        chunk.provider =  last_provider.get("name")
+        chunk.provider = last_provider.get("name")
         yield chunk
 
+
 class Client(BaseClient):
     def __init__(
         self,
@@ -69,9 +88,17 @@ class Client(BaseClient):
     ) -> None:
         super().__init__(**kwargs)
         self.chat: Chat = Chat(self, provider)
-        self.images: Images = Images(self, image_provider)
+        self._images: Images = Images(self, image_provider)
 
-class Completions():
+    @property
+    def images(self) -> Images:
+        return self._images
+
+    async def async_images(self) -> Images:
+        return self._images
+
+
+class Completions:
     def __init__(self, client: Client, provider: ProviderType = None):
         self.client: Client = client
         self.provider: ProviderType = provider
@@ -87,7 +114,7 @@ class Completions():
         max_tokens: int = None,
         stop: Union[list[str], str] = None,
         api_key: str = None,
-        ignored  : list[str] = None,
+        ignored: list[str] = None,
         ignore_working: bool = False,
         ignore_stream: bool = False,
         **kwargs
@@ -100,11 +127,13 @@ class Completions():
             ignore_working,
             ignore_stream,
         )
-        
+
         stop = [stop] if isinstance(stop, str) else stop
+        
         response = provider.create_completion(
-            model, messages,
-            stream=stream,            
+            model,
+            messages,
+            stream=stream,
             **filter_none(
                 proxy=self.client.get_proxy() if proxy is None else proxy,
                 max_tokens=max_tokens,
@@ -113,66 +142,166 @@ class Completions():
             ),
             **kwargs
         )
+        
         response = iter_response(response, stream, response_format, max_tokens, stop)
         response = iter_append_model_and_provider(response)
+        
         return response if stream else next(response)
 
-class Chat():
+
+class Chat:
     completions: Completions
 
     def __init__(self, client: Client, provider: ProviderType = None):
         self.completions = Completions(client, provider)
 
+
 def iter_image_response(response: Iterator) -> Union[ImagesResponse, None]:
-    for chunk in list(response):
+    logging.info("Starting iter_image_response")
+    response_list = list(response)
+    logging.info(f"Response list: {response_list}")
+    
+    for chunk in response_list:
+        logging.info(f"Processing chunk: {chunk}")
         if isinstance(chunk, ImageProviderResponse):
+            logging.info("Found ImageProviderResponse")
             return ImagesResponse([Image(image) for image in chunk.get_list()])
-
-def create_image(client: Client, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> Iterator:
+    
+    logging.warning("No ImageProviderResponse found in the response")
+    return None
 
 
+def create_image(client: Client, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> Iterator:
+    logging.info(f"Creating image with provider: {provider}, model: {model}, prompt: {prompt}")
+    
     if isinstance(provider, type) and provider.__name__ == "You":
         kwargs["chat_mode"] = "create"
     else:
-        prompt = f"create a image with: {prompt}"
-    return provider.create_completion(
+        prompt = f"create an image with: {prompt}"
+    
+    response = provider.create_completion(
         model,
         [{"role": "user", "content": prompt}],
         stream=True,
         proxy=client.get_proxy(),
         **kwargs
     )
+    
+    logging.info(f"Response from create_completion: {response}")
+    return response
 
-class Images():
-    def __init__(self, client: Client, provider: ImageProvider = None):
-        self.client: Client = client
+
+class Images:
+    def __init__(self, client: 'Client', provider: ImageProvider = None):
+        self.client: 'Client' = client
         self.provider: ImageProvider = provider
         self.models: ImageModels = ImageModels(client)
 
-    def generate(self, prompt, model: str = None, **kwargs) -> ImagesResponse:
+    def generate(self, prompt: str, model: str = None, **kwargs) -> ImagesResponse:
+        logging.info(f"Starting synchronous image generation for model: {model}, prompt: {prompt}")
+        try:
+            loop = asyncio.get_event_loop()
+        except RuntimeError:
+            loop = asyncio.new_event_loop()
+            asyncio.set_event_loop(loop)
+        
+        try:
+            result = loop.run_until_complete(self.async_generate(prompt, model, **kwargs))
+            logging.info(f"Synchronous image generation completed. Result: {result}")
+            return result
+        except Exception as e:
+            logging.error(f"Error in synchronous image generation: {str(e)}")
+            raise
+        finally:
+            if loop.is_running():
+                loop.close()
+
+    async def async_generate(self, prompt: str, model: str = None, **kwargs) -> ImagesResponse:
+        logging.info(f"Generating image for model: {model}, prompt: {prompt}")
         provider = self.models.get(model, self.provider)
-        if isinstance(provider, type) and issubclass(provider, BaseProvider):
-            response = create_image(self.client, provider, prompt, **kwargs)
+        if provider is None:
+            raise ValueError(f"Unknown model: {model}")
+        
+        logging.info(f"Provider: {provider}")
+        
+        if isinstance(provider, IterListProvider):
+            if provider.providers:
+                provider = provider.providers[0]
+                logging.info(f"Using first provider from IterListProvider: {provider}")
+            else:
+                raise ValueError(f"IterListProvider for model {model} has no providers")
+
+        if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
+            logging.info("Using AsyncGeneratorProvider")
+            messages = [{"role": "user", "content": prompt}]
+            async for response in provider.create_async_generator(model, messages, **kwargs):
+                if isinstance(response, ImageResponse):
+                    return self._process_image_response(response)
+                elif isinstance(response, str):
+                    image_response = ImageResponse([response], prompt)
+                    return self._process_image_response(image_response)
+        elif hasattr(provider, 'create'):
+            logging.info("Using provider's create method")
+            if asyncio.iscoroutinefunction(provider.create):
+                response = await provider.create(prompt)
+            else:
+                response = provider.create(prompt)
+            
+            if isinstance(response, ImageResponse):
+                return self._process_image_response(response)
+            elif isinstance(response, str):
+                image_response = ImageResponse([response], prompt)
+                return self._process_image_response(image_response)
         else:
-            response = list(provider.create(prompt))
-        image = iter_image_response(response)
-        if image is None:
-            raise NoImageResponseError()
-        return image
+            raise ValueError(f"Provider {provider} does not support image generation")
+        
+        logging.error(f"Unexpected response type: {type(response)}")
+        raise NoImageResponseError(f"Unexpected response type: {type(response)}")
+
+    def _process_image_response(self, response: ImageResponse) -> ImagesResponse:
+        processed_images = []
+        for image_data in response.get_list():
+            if image_data.startswith('http://') or image_data.startswith('https://'):
+                processed_images.append(Image(url=image_data))
+            else:
+                image = to_image(image_data)
+                file_name = self._save_image(image)
+                processed_images.append(Image(url=file_name))
+        return ImagesResponse(processed_images)
+
+    def _save_image(self, image: 'PILImage') -> str:
+        os.makedirs('generated_images', exist_ok=True)
+        file_name = f"generated_images/image_{int(time.time())}.png"
+        image.save(file_name)
+        return file_name
 
-    def create_variation(self, image: ImageType, model: str = None, **kwargs):
+    async def create_variation(self, image: Union[str, bytes], model: str = None, **kwargs):
         provider = self.models.get(model, self.provider)
-        result = None
-        if isinstance(provider, type) and issubclass(provider, BaseProvider):
-            response = provider.create_completion(
-                "",
-                [{"role": "user", "content": "create a image like this"}],
-                True,
-                image=image,
-                proxy=self.client.get_proxy(),
-                **kwargs
-            )
-            result = iter_image_response(response)
-        if result is None:
-            raise NoImageResponseError()
-        return result
+        if provider is None:
+            raise ValueError(f"Unknown model: {model}")
+
+        if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
+            messages = [{"role": "user", "content": "create a variation of this image"}]
+            image_data = to_data_uri(image)
+            async for response in provider.create_async_generator(model, messages, image=image_data, **kwargs):
+                if isinstance(response, ImageResponse):
+                    return self._process_image_response(response)
+                elif isinstance(response, str):
+                    image_response = ImageResponse([response], "Image variation")
+                    return self._process_image_response(image_response)
+        elif hasattr(provider, 'create_variation'):
+            if asyncio.iscoroutinefunction(provider.create_variation):
+                response = await provider.create_variation(image, **kwargs)
+            else:
+                response = provider.create_variation(image, **kwargs)
+            
+            if isinstance(response, ImageResponse):
+                return self._process_image_response(response)
+            elif isinstance(response, str):
+                image_response = ImageResponse([response], "Image variation")
+                return self._process_image_response(image_response)
+        else:
+            raise ValueError(f"Provider {provider} does not support image variation")
+
+        raise NoImageResponseError("Failed to create image variation")
+

+ 8 - 11
g4f/client/image_models.py

@@ -2,18 +2,15 @@ from __future__ import annotations
 
 from .types import Client, ImageProvider
 
-from ..Provider.BingCreateImages import BingCreateImages
-from ..Provider.needs_auth import Gemini, OpenaiChat
-from ..Provider.You import You
+from ..models import ModelUtils
 
 class ImageModels():
-    gemini = Gemini
-    openai = OpenaiChat
-    you = You
-
-    def __init__(self, client: Client) -> None:
+    def __init__(self, client):
         self.client = client
-        self.default = BingCreateImages(proxy=self.client.get_proxy())
+        self.models = ModelUtils.convert
 
-    def get(self, name: str, default: ImageProvider = None) -> ImageProvider:
-        return getattr(self, name) if hasattr(self, name) else default or self.default
+    def get(self, name, default=None):
+        model = self.models.get(name)
+        if model and model.best_provider:
+            return model.best_provider
+        return default

+ 28 - 13
g4f/gui/server/api.py

@@ -6,6 +6,7 @@ import os.path
 import uuid
 import asyncio
 import time
+import base64
 from aiohttp import ClientSession
 from typing import Iterator, Optional
 from flask import send_from_directory
@@ -195,18 +196,32 @@ class Api():
                             cookies=cookies
                         ) as session:
                             async def copy_image(image):
-                                async with session.get(image) as response:
-                                    target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
-                                    with open(target, "wb") as f:
-                                        async for chunk in response.content.iter_any():
-                                            f.write(chunk)
-                                    with open(target, "rb") as f:
-                                        extension = is_accepted_format(f.read(12)).split("/")[-1]
-                                        extension = "jpg" if extension == "jpeg" else extension
-                                    new_target = f"{target}.{extension}"
-                                    os.rename(target, new_target)
-                                    return f"/images/{os.path.basename(new_target)}"
-                            return await asyncio.gather(*[copy_image(image) for image in images])                                
+                                if image.startswith("data:"):
+                                    # Processing the data URL
+                                    data_uri_parts = image.split(",")
+                                    if len(data_uri_parts) == 2:
+                                        content_type, base64_data = data_uri_parts
+                                        extension = content_type.split("/")[-1].split(";")[0]
+                                        target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}.{extension}")
+                                        with open(target, "wb") as f:
+                                            f.write(base64.b64decode(base64_data))
+                                        return f"/images/{os.path.basename(target)}"
+                                    else:
+                                        return None
+                                else:
+                                    # Обробка звичайної URL-адреси
+                                    async with session.get(image) as response:
+                                        target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
+                                        with open(target, "wb") as f:
+                                            async for chunk in response.content.iter_any():
+                                                f.write(chunk)
+                                        with open(target, "rb") as f:
+                                            extension = is_accepted_format(f.read(12)).split("/")[-1]
+                                            extension = "jpg" if extension == "jpeg" else extension
+                                        new_target = f"{target}.{extension}"
+                                        os.rename(target, new_target)
+                                        return f"/images/{os.path.basename(new_target)}"
+                            return await asyncio.gather(*[copy_image(image) for image in images])
                     images = asyncio.run(copy_images(chunk.get_list(), chunk.options.get("cookies")))
                     yield self._format_json("content", str(ImageResponse(images, chunk.alt)))
                 elif not isinstance(chunk, FinishReason):
@@ -245,4 +260,4 @@ def get_error_message(exception: Exception) -> str:
     provider = get_last_provider()
     if provider is None:
         return message
-    return f"{provider.__name__}: {message}"
+    return f"{provider.__name__}: {message}"

+ 345 - 280
g4f/models.py

@@ -4,33 +4,35 @@ from dataclasses import dataclass
 
 from .Provider import IterListProvider, ProviderType
 from .Provider import (
-    AI365VIP,
+    AiChatOnline,
     Allyfy,
     Bing,
+    Binjie,
+    Bixin123,
     Blackbox,
     ChatGot,
-    Chatgpt4o,
     Chatgpt4Online,
+    Chatgpt4o,
     ChatgptFree,
+    CodeNews,
     DDG,
     DeepInfra,
     DeepInfraImage,
+    FluxAirforce,
+    Free2GPT,
     FreeChatgpt,
     FreeGpt,
     FreeNetfly,
     Gemini,
     GeminiPro,
-    GeminiProChat,
     GigaChat,
     HuggingChat,
     HuggingFace,
     Koala,
     Liaobots,
-    LiteIcoding,
-    MagickPenAsk,
-    MagickPenChat,
-    Marsyoo,
+    MagickPen,
     MetaAI,
+    Nexra,
     OpenaiChat,
     PerplexityLabs,
     Pi,
@@ -38,7 +40,10 @@ from .Provider import (
     Reka,
     Replicate,
     ReplicateHome,
+    Snova,
     TeachAnything,
+    TwitterBio,
+    Upstage,
     You,
 )
 
@@ -66,33 +71,18 @@ default = Model(
     name          = "",
     base_provider = "",
     best_provider = IterListProvider([
-        Bing,
-        You,
-        OpenaiChat,
-        FreeChatgpt,
-        AI365VIP,
-        Chatgpt4o,
         DDG,
-        ChatgptFree,
-        Koala,
-        Pizzagpt,
-    ])
-)
-
-# GPT-3.5 too, but all providers supports long requests and responses
-gpt_35_long = Model(
-    name          = 'gpt-3.5-turbo',
-    base_provider = 'openai',
-    best_provider = IterListProvider([
-        FreeGpt,
-        You,
-        Koala,
-        ChatgptFree,
         FreeChatgpt,
-        DDG,
-        AI365VIP,
+        HuggingChat,
         Pizzagpt,
-        Allyfy,
+        ChatgptFree,
+        ReplicateHome,
+        Upstage,
+        Blackbox,
+        Bixin123,
+        Binjie,
+        Free2GPT,
+        MagickPen,
     ])
 )
 
@@ -101,56 +91,59 @@ gpt_35_long = Model(
 ############
 
 ### OpenAI ###
-### GPT-3.5 / GPT-4 ###
+# gpt-3
+gpt_3 = Model(
+    name          = 'gpt-3',
+    base_provider = 'OpenAI',
+    best_provider = IterListProvider([
+        Nexra,
+    ])
+)
+
 # gpt-3.5
 gpt_35_turbo = Model(
     name          = 'gpt-3.5-turbo',
-    base_provider = 'openai',
+    base_provider = 'OpenAI',
     best_provider = IterListProvider([
-        FreeGpt,
-        You,
-        Koala,
-        ChatgptFree,
-        FreeChatgpt,
-        AI365VIP,
-        Pizzagpt,
-        Allyfy,
+        Allyfy, TwitterBio, Nexra, Bixin123, CodeNews,
     ])
 )
 
 # gpt-4
-gpt_4 = Model(
-    name          = 'gpt-4',
-    base_provider = 'openai',
+gpt_4o = Model(
+    name          = 'gpt-4o',
+    base_provider = 'OpenAI',
     best_provider = IterListProvider([
-        Bing, Chatgpt4Online
+        Liaobots, Chatgpt4o, OpenaiChat,
     ])
 )
 
-gpt_4_turbo = Model(
-    name          = 'gpt-4-turbo',
-    base_provider = 'openai',
+gpt_4o_mini = Model(
+    name          = 'gpt-4o-mini',
+    base_provider = 'OpenAI',
     best_provider = IterListProvider([
-        Bing, Liaobots, LiteIcoding
+        DDG, Liaobots, You, FreeNetfly, Pizzagpt, ChatgptFree, AiChatOnline, CodeNews, 
+        MagickPen, OpenaiChat, Koala,       
     ])
 )
-gpt_4o = Model(
-    name          = 'gpt-4o',
-    base_provider = 'openai',
+
+gpt_4_turbo = Model(
+    name          = 'gpt-4-turbo',
+    base_provider = 'OpenAI',
     best_provider = IterListProvider([
-        You, Liaobots, Chatgpt4o, AI365VIP, OpenaiChat, Marsyoo, LiteIcoding, MagickPenAsk,
+        Nexra, Bixin123, Liaobots, Bing
     ])
 )
 
-gpt_4o_mini = Model(
-    name          = 'gpt-4o-mini',
-    base_provider = 'openai',
+gpt_4 = Model(
+    name          = 'gpt-4',
+    base_provider = 'OpenAI',
     best_provider = IterListProvider([
-        DDG, Liaobots, OpenaiChat, You, FreeNetfly, MagickPenChat,
+        Chatgpt4Online, Nexra, Binjie, Bing,
+        gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider
     ])
 )
 
-
 ### GigaChat ###
 gigachat = Model(
     name          = 'GigaChat:latest',
@@ -161,81 +154,65 @@ gigachat = Model(
 
 ### Meta ###
 meta = Model(
-    name          = "meta",
-    base_provider = "meta",
+    name          = "meta-ai",
+    base_provider = "Meta",
     best_provider = MetaAI
 )
 
-llama_3_8b_instruct = Model(
-    name          = "meta-llama/Meta-Llama-3-8B-Instruct",
-    base_provider = "meta",
-    best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
+llama_3_8b = Model(
+    name          = "llama-3-8b",
+    base_provider = "Meta",
+    best_provider = IterListProvider([DeepInfra, Replicate])
 )
 
-llama_3_70b_instruct = Model(
-    name          = "meta-llama/Meta-Llama-3-70B-Instruct",
-    base_provider = "meta",
-    best_provider = IterListProvider([DeepInfra, PerplexityLabs, Replicate])
+llama_3_70b = Model(
+    name          = "llama-3-70b",
+    base_provider = "Meta",
+    best_provider = IterListProvider([ReplicateHome, DeepInfra, PerplexityLabs, Replicate])
 )
 
-llama_3_70b_instruct = Model(
-    name          = "meta/meta-llama-3-70b-instruct",
-    base_provider = "meta",
-    best_provider = IterListProvider([ReplicateHome, TeachAnything])
+llama_3_1_8b = Model(
+    name          = "llama-3.1-8b",
+    base_provider = "Meta",
+    best_provider = IterListProvider([Blackbox])
 )
 
-llama_3_70b_chat_hf = Model(
-    name          = "meta-llama/Llama-3-70b-chat-hf",
-    base_provider = "meta",
-    best_provider = IterListProvider([DDG])
+llama_3_1_70b = Model(
+    name          = "llama-3.1-70b",
+    base_provider = "Meta",
+    best_provider = IterListProvider([DDG, HuggingChat, FreeGpt, Blackbox, TeachAnything, Free2GPT, HuggingFace])
 )
 
-llama_3_1_70b_instruct = Model(
-    name          = "meta-llama/Meta-Llama-3.1-70B-Instruct",
-    base_provider = "meta",
-    best_provider = IterListProvider([HuggingChat, HuggingFace])
+llama_3_1_405b = Model(
+    name          = "llama-3.1-405b",
+    base_provider = "Meta",
+    best_provider = IterListProvider([HuggingChat, Blackbox, HuggingFace])
 )
 
-llama_3_1_405b_instruct_FP8 = Model(
-    name          = "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
-    base_provider = "meta",
-    best_provider = IterListProvider([HuggingChat, HuggingFace])
-)
-
-
 ### Mistral ###
 mixtral_8x7b = Model(
-    name          = "mistralai/Mixtral-8x7B-Instruct-v0.1",
-    base_provider = "huggingface",
-    best_provider = IterListProvider([DeepInfra, HuggingFace, PerplexityLabs, HuggingChat, DDG, ReplicateHome])
-)
-
-mistral_7b_v02 = Model(
-    name          = "mistralai/Mistral-7B-Instruct-v0.2",
-    base_provider = "huggingface",
-    best_provider = IterListProvider([DeepInfra, HuggingFace, HuggingChat])
+    name          = "mixtral-8x7b",
+    base_provider = "Mistral",
+    best_provider = IterListProvider([HuggingChat, DDG, ReplicateHome, TwitterBio, DeepInfra, HuggingFace,])
 )
 
-
-### NousResearch ###
-Nous_Hermes_2_Mixtral_8x7B_DPO = Model(
-    name          = "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
-    base_provider = "NousResearch",
-    best_provider = IterListProvider([HuggingFace, HuggingChat])
+mistral_7b = Model(
+    name          = "mistral-7b",
+    base_provider = "Mistral",
+    best_provider = IterListProvider([HuggingChat, HuggingFace, DeepInfra])
 )
 
-
 ### 01-ai ###
-Yi_1_5_34B_chat = Model(
-    name          = "01-ai/Yi-1.5-34B-Chat",
+yi_1_5_34b = Model(
+    name          = "yi-1.5-34b",
     base_provider = "01-ai",
-    best_provider = IterListProvider([HuggingFace, HuggingChat])
+    best_provider = IterListProvider([HuggingChat, HuggingFace])
 )
 
 
 ### Microsoft ###
-Phi_3_mini_4k_instruct = Model(
-    name          = "microsoft/Phi-3-mini-4k-instruct",
+phi_3_mini_4k = Model(
+    name          = "phi-3-mini-4k",
     base_provider = "Microsoft",
     best_provider = IterListProvider([HuggingFace, HuggingChat])
 )
@@ -252,41 +229,22 @@ gemini = Model(
 gemini_pro = Model(
     name          = 'gemini-pro',
     base_provider = 'Google',
-    best_provider = IterListProvider([GeminiPro, You, ChatGot, GeminiProChat, Liaobots, LiteIcoding])
+    best_provider = IterListProvider([GeminiPro, ChatGot, Liaobots])
 )
 
 gemini_flash = Model(
     name          = 'gemini-flash',
     base_provider = 'Google',
-    best_provider = IterListProvider([Liaobots])
-)
-
-gemini_1_5 = Model(
-    name          = 'gemini-1.5',
-    base_provider = 'Google',
-    best_provider = IterListProvider([LiteIcoding])
+    best_provider = IterListProvider([Liaobots, Blackbox])
 )
 
 # gemma
-gemma_2b_it = Model(
-    name          = 'gemma-2b-it',
+gemma_2b = Model(
+    name          = 'gemma-2b',
     base_provider = 'Google',
     best_provider = IterListProvider([ReplicateHome])
 )
 
-gemma_2_9b_it = Model(
-    name          = 'gemma-2-9b-it',
-    base_provider = 'Google',
-    best_provider = IterListProvider([PerplexityLabs])
-)
-
-gemma_2_27b_it = Model(
-    name          = 'gemma-2-27b-it',
-    base_provider = 'Google',
-    best_provider = IterListProvider([PerplexityLabs])
-)
-
-
 ### Anthropic ###
 claude_2 = Model(
     name          = 'claude-2',
@@ -309,13 +267,13 @@ claude_2_1 = Model(
 claude_3_opus = Model(
     name          = 'claude-3-opus',
     base_provider = 'Anthropic',
-    best_provider = IterListProvider([You, Liaobots])
+    best_provider = IterListProvider([Liaobots])
 )
 
 claude_3_sonnet = Model(
     name          = 'claude-3-sonnet',
     base_provider = 'Anthropic',
-    best_provider = IterListProvider([You, Liaobots])
+    best_provider = IterListProvider([Liaobots])
 )
 
 claude_3_5_sonnet = Model(
@@ -327,19 +285,7 @@ claude_3_5_sonnet = Model(
 claude_3_haiku = Model(
     name          = 'claude-3-haiku',
     base_provider = 'Anthropic',
-    best_provider = IterListProvider([DDG, AI365VIP, Liaobots])
-)
-
-claude_3 = Model(
-    name          = 'claude-3',
-    base_provider = 'Anthropic',
-    best_provider = IterListProvider([LiteIcoding])
-)
-
-claude_3_5 = Model(
-    name          = 'claude-3.5',
-    base_provider = 'Anthropic',
-    best_provider = IterListProvider([LiteIcoding])
+    best_provider = IterListProvider([DDG, Liaobots])
 )
 
 
@@ -351,14 +297,6 @@ reka_core = Model(
 )
 
 
-### NVIDIA ###
-nemotron_4_340b_instruct = Model(
-    name = 'nemotron-4-340b-instruct',
-    base_provider = 'NVIDIA',
-    best_provider = IterListProvider([PerplexityLabs])
-)
-
-
 ### Blackbox ###
 blackbox = Model(
     name = 'blackbox',
@@ -369,7 +307,7 @@ blackbox = Model(
 
 ### Databricks ###
 dbrx_instruct = Model(
-    name = 'databricks/dbrx-instruct',
+    name = 'dbrx-instruct',
     base_provider = 'Databricks',
     best_provider = IterListProvider([DeepInfra])
 )
@@ -377,71 +315,110 @@ dbrx_instruct = Model(
 
 ### CohereForAI ###
 command_r_plus = Model(
-    name = 'CohereForAI/c4ai-command-r-plus',
+    name = 'command-r-plus',
     base_provider = 'CohereForAI',
     best_provider = IterListProvider([HuggingChat])
 )
 
 
 ### iFlytek ###
-SparkDesk_v1_1 = Model(
-    name = 'SparkDesk-v1.1',
+sparkdesk_v1_1 = Model(
+    name = 'sparkdesk-v1.1',
     base_provider = 'iFlytek',
     best_provider = IterListProvider([FreeChatgpt])
 )
 
-
-### DeepSeek ###
-deepseek_coder = Model(
-    name = 'deepseek-coder',
-    base_provider = 'DeepSeek',
-    best_provider = IterListProvider([FreeChatgpt])
-)
-
-deepseek_chat = Model(
-    name = 'deepseek-chat',
-    base_provider = 'DeepSeek',
+### Qwen ###
+qwen_1_5_14b = Model(
+    name = 'qwen-1.5-14b',
+    base_provider = 'Qwen',
     best_provider = IterListProvider([FreeChatgpt])
 )
 
-
-### Qwen ###
-Qwen2_7B_instruct = Model(
-    name = 'Qwen2-7B-Instruct',
+qwen_turbo = Model(
+    name = 'qwen-turbo',
     base_provider = 'Qwen',
-    best_provider = IterListProvider([FreeChatgpt])
+    best_provider = IterListProvider([Bixin123])
 )
 
 
 ### Zhipu AI ###
-glm4_9B_chat = Model(
-    name = 'glm4-9B-chat',
+glm_3_6b = Model(
+    name = 'glm-3-6b',
     base_provider = 'Zhipu AI',
     best_provider = IterListProvider([FreeChatgpt])
 )
 
-chatglm3_6B = Model(
-    name = 'chatglm3-6B',
+glm_4_9b = Model(
+    name = 'glm-4-9B',
     base_provider = 'Zhipu AI',
     best_provider = IterListProvider([FreeChatgpt])
 )
 
+glm_4 = Model(
+    name = 'glm-4',
+    base_provider = 'Zhipu AI',
+    best_provider = IterListProvider([CodeNews, glm_4_9b.best_provider,])
+)
 
 ### 01-ai ###
-Yi_1_5_9B_chat = Model(
-    name = 'Yi-1.5-9B-Chat',
+yi_1_5_9b = Model(
+    name = 'yi-1.5-9b',
     base_provider = '01-ai',
     best_provider = IterListProvider([FreeChatgpt])
 )
 
 
-### Other ###
+### Pi ###
+solar_1_mini = Model(
+    name = 'solar-1-mini',
+    base_provider = 'Upstage',
+    best_provider = IterListProvider([Upstage])
+)
+
+### Pi ###
 pi = Model(
     name = 'pi',
     base_provider = 'inflection',
     best_provider = Pi
 )
 
+### SambaNova ###
+samba_coe_v0_1 = Model(
+    name = 'samba-coe-v0.1',
+    base_provider = 'SambaNova',
+    best_provider = Snova
+)
+
+### Trong-Hieu Nguyen-Mau ###
+v1olet_merged_7b = Model(
+    name = 'v1olet-merged-7b',
+    base_provider = 'Trong-Hieu Nguyen-Mau',
+    best_provider = Snova
+)
+
+### Macadeliccc ###
+westlake_7b_v2 = Model(
+    name = 'westlake-7b-v2',
+    base_provider = 'Macadeliccc',
+    best_provider = Snova
+)
+
+### CookinAI ###
+donutlm_v1 = Model(
+    name = 'donutlm-v1',
+    base_provider = 'CookinAI',
+    best_provider = Snova
+)
+
+### DeepSeek ###
+deepseek = Model(
+    name = 'deepseek',
+    base_provider = 'DeepSeek',
+    best_provider = CodeNews
+)
+
+
 
 #############
 ### Image ###
@@ -449,30 +426,82 @@ pi = Model(
 
 ### Stability AI ###
 sdxl = Model(
-    name = 'stability-ai/sdxl',
+    name = 'sdxl',
     base_provider = 'Stability AI',
-    best_provider = IterListProvider([DeepInfraImage])
+    best_provider = IterListProvider([ReplicateHome, DeepInfraImage])
     
 )
 
-stable_diffusion_3 = Model(
-    name = 'stability-ai/stable-diffusion-3',
+sd_3 = Model(
+    name = 'sd-3',
     base_provider = 'Stability AI',
     best_provider = IterListProvider([ReplicateHome])
     
 )
 
-sdxl_lightning_4step = Model(
-    name = 'bytedance/sdxl-lightning-4step',
+### Playground ###
+playground_v2_5 = Model(
+    name = 'playground-v2.5',
     base_provider = 'Stability AI',
     best_provider = IterListProvider([ReplicateHome])
     
 )
 
-playground_v2_5_1024px_aesthetic = Model(
-    name = 'playgroundai/playground-v2.5-1024px-aesthetic',
-    base_provider = 'Stability AI',
-    best_provider = IterListProvider([ReplicateHome])
+### Flux AI ###
+flux = Model(
+    name = 'flux',
+    base_provider = 'Flux AI',
+    best_provider = IterListProvider([FluxAirforce])
+    
+)
+
+flux_realism = Model(
+    name = 'flux-realism',
+    base_provider = 'Flux AI',
+    best_provider = IterListProvider([FluxAirforce])
+    
+)
+
+flux_anime = Model(
+    name = 'flux-anime',
+    base_provider = 'Flux AI',
+    best_provider = IterListProvider([FluxAirforce])
+    
+)
+
+flux_3d = Model(
+    name = 'flux-3d',
+    base_provider = 'Flux AI',
+    best_provider = IterListProvider([FluxAirforce])
+    
+)
+
+flux_disney = Model(
+    name = 'flux-disney',
+    base_provider = 'Flux AI',
+    best_provider = IterListProvider([FluxAirforce])
+    
+)
+
+### ###
+dalle = Model(
+    name = 'dalle',
+    base_provider = '',
+    best_provider = IterListProvider([Nexra])
+    
+)
+
+dalle_mini = Model(
+    name = 'dalle-mini',
+    base_provider = '',
+    best_provider = IterListProvider([Nexra])
+    
+)
+
+emi = Model(
+    name = 'emi',
+    base_provider = '',
+    best_provider = IterListProvider([Nexra])
     
 )
 
@@ -485,127 +514,163 @@ class ModelUtils:
     """
     convert: dict[str, Model] = {
     
-        ############
-        ### Text ###
-        ############
+############
+### Text ###
+############
         
-        ### OpenAI ###
-        ### GPT-3.5 / GPT-4 ###
-        # gpt-3.5
-        'gpt-3.5-turbo': gpt_35_turbo,
-        'gpt-3.5-long': gpt_35_long,
+### OpenAI ###
+# gpt-3
+'gpt-3': gpt_3,
+
+# gpt-3.5
+'gpt-3.5-turbo': gpt_35_turbo,
 
-        # gpt-4
-        'gpt-4o'         : gpt_4o,
-        'gpt-4o-mini'    : gpt_4o_mini,
-        'gpt-4'          : gpt_4,
-        'gpt-4-turbo'    : gpt_4_turbo,
+# gpt-4
+'gpt-4o'         : gpt_4o,
+'gpt-4o-mini'    : gpt_4o_mini,
+'gpt-4'          : gpt_4,
+'gpt-4-turbo'    : gpt_4_turbo,
         
-        ### Meta ###
-        "meta-ai": meta,
         
-        'llama-3-8b-instruct': llama_3_8b_instruct,
-        'llama-3-70b-instruct': llama_3_70b_instruct,
-        'llama-3-70b-chat': llama_3_70b_chat_hf, 
-        'llama-3-70b-instruct': llama_3_70b_instruct, 
+### Meta ###
+"meta-ai": meta,
+
+# llama-3
+'llama-3-8b': llama_3_8b,
+'llama-3-70b': llama_3_70b,
         
-        'llama-3.1-70b': llama_3_1_70b_instruct,
-        'llama-3.1-405b': llama_3_1_405b_instruct_FP8,
-        'llama-3.1-70b-instruct': llama_3_1_70b_instruct,
-        'llama-3.1-405b-instruct': llama_3_1_405b_instruct_FP8,
+# llama-3.1
+'llama-3.1-8b': llama_3_1_8b,
+'llama-3.1-70b': llama_3_1_70b,
+'llama-3.1-405b': llama_3_1_405b,
         
-        ### Mistral (Opensource) ###
-        'mixtral-8x7b': mixtral_8x7b,
-        'mistral-7b-v02': mistral_7b_v02,
         
-        ### NousResearch ###
-        'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO,
+### Mistral ###
+'mixtral-8x7b': mixtral_8x7b,
+'mistral-7b': mistral_7b,
+
 
-        ### 01-ai ###
-        'Yi-1.5-34b-chat': Yi_1_5_34B_chat,
+### 01-ai ###
+'yi-1.5-34b': yi_1_5_34b,
+        
         
-        ### Microsoft ###
-        'Phi-3-mini-4k-instruct': Phi_3_mini_4k_instruct,
+### Microsoft ###
+'phi-3-mini-4k': phi_3_mini_4k,
 
-        ### Google ###
-        # gemini
-        'gemini': gemini,
-        'gemini-pro': gemini_pro,
-        'gemini-pro': gemini_1_5,
-        'gemini-flash': gemini_flash,
+
+### Google ###
+# gemini
+'gemini': gemini,
+'gemini-pro': gemini_pro,
+'gemini-flash': gemini_flash,
         
-        # gemma
-        'gemma-2b': gemma_2b_it,
-        'gemma-2-9b': gemma_2_9b_it,
-        'gemma-2-27b': gemma_2_27b_it,
+# gemma
+'gemma-2b': gemma_2b,
+
 
-        ### Anthropic ###
-        'claude-2': claude_2,
-        'claude-2.0': claude_2_0,
-        'claude-2.1': claude_2_1,
+### Anthropic ###
+'claude-2': claude_2,
+'claude-2.0': claude_2_0,
+'claude-2.1': claude_2_1,
         
-        'claude-3-opus': claude_3_opus,
-        'claude-3-sonnet': claude_3_sonnet,
-        'claude-3-5-sonnet': claude_3_5_sonnet,
-        'claude-3-haiku': claude_3_haiku,
+'claude-3-opus': claude_3_opus,
+'claude-3-sonnet': claude_3_sonnet,
+'claude-3-5-sonnet': claude_3_5_sonnet,
+'claude-3-haiku': claude_3_haiku,
         
-        'claude-3-opus': claude_3,
-        'claude-3-5-sonnet': claude_3_5,
         
+### Reka AI ###
+'reka-core': reka_core,
+      
+        
+### Blackbox ###
+'blackbox': blackbox,
         
+        
+### CohereForAI ###
+'command-r+': command_r_plus,
+        
+        
+### Databricks ###
+'dbrx-instruct': dbrx_instruct,
 
-        ### Reka AI ###
-        'reka': reka_core,
 
-        ### NVIDIA ###
-        'nemotron-4-340b-instruct': nemotron_4_340b_instruct,
+### GigaChat ###
+'gigachat': gigachat,
         
-        ### Blackbox ###
-        'blackbox': blackbox,
         
-        ### CohereForAI ###
-        'command-r+': command_r_plus,
+### iFlytek ###
+'sparkdesk-v1.1': sparkdesk_v1_1,
         
-        ### Databricks ###
-        'dbrx-instruct': dbrx_instruct,
-
-        ### GigaChat ###
-        'gigachat': gigachat,
         
-        ### iFlytek ###
-        'SparkDesk-v1.1': SparkDesk_v1_1,
+### Qwen ###
+'qwen-1.5-14b': qwen_1_5_14b,
+'qwen-turbo': qwen_turbo,
         
-        ### DeepSeek ###
-        'deepseek-coder': deepseek_coder,
-        'deepseek-chat': deepseek_chat,
         
-        ### Qwen ###
-        'Qwen2-7b-instruct': Qwen2_7B_instruct,
+### Zhipu AI ###
+'glm-3-6b': glm_3_6b,
+'glm-4-9b': glm_4_9b,
+'glm-4': glm_4,
         
-        ### Zhipu AI ###
-        'glm4-9b-chat': glm4_9B_chat,
-        'chatglm3-6b': chatglm3_6B,
         
-        ### 01-ai ###
-        'Yi-1.5-9b-chat': Yi_1_5_9B_chat,
+### 01-ai ###
+'yi-1.5-9b': yi_1_5_9b,
         
-        # Other
-        'pi': pi,
         
-        #############
-        ### Image ###
-        #############
+### Upstage ###
+'solar-1-mini': solar_1_mini,
+
+
+### Pi ###
+'pi': pi,
+
+
+### SambaNova ###
+'samba-coe-v0.1': samba_coe_v0_1,
+
+
+### Trong-Hieu Nguyen-Mau ###
+'v1olet-merged-7b': v1olet_merged_7b,
+
+
+### Macadeliccc ###
+'westlake-7b-v2': westlake_7b_v2,
+
+
+### CookinAI ###
+'donutlm-v1': donutlm_v1,
+
+### DeepSeek ###
+'deepseek': deepseek,
+        
         
-        ### Stability AI ###
-        'sdxl': sdxl,
-        'stable-diffusion-3': stable_diffusion_3,
         
-        ### ByteDance ###
-        'sdxl-lightning': sdxl_lightning_4step,
+#############
+### Image ###
+#############
         
-        ### Playground ###
-        'playground-v2.5': playground_v2_5_1024px_aesthetic,
+### Stability AI ###
+'sdxl': sdxl,
+'sd-3': sd_3,
+        
+        
+### Playground ###
+'playground-v2.5': playground_v2_5,
+
+
+### Flux AI ###
+'flux': flux,
+'flux-realism': flux_realism,
+'flux-anime': flux_anime,
+'flux-3d': flux_3d,
+'flux-disney': flux_disney,
+
 
+###  ###
+'dalle': dalle,
+'dalle-mini': dalle_mini,
+'emi': emi,
     }
 
 _all_models = list(ModelUtils.convert.keys())