create_provider.py 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. import sys, re
  2. from pathlib import Path
  3. from os import path
  4. sys.path.append(str(Path(__file__).parent.parent.parent))
  5. import g4f
  6. g4f.debug.logging = True
  7. def read_code(text):
  8. if match := re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text):
  9. return match.group("code")
  10. def input_command():
  11. print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.")
  12. contents = []
  13. while True:
  14. try:
  15. line = input()
  16. except EOFError:
  17. break
  18. contents.append(line)
  19. return "\n".join(contents)
  20. name = input("Name: ")
  21. provider_path = f"g4f/Provider/{name}.py"
  22. example = """
  23. from __future__ import annotations
  24. from aiohttp import ClientSession
  25. from ..typing import AsyncResult, Messages
  26. from .base_provider import AsyncGeneratorProvider
  27. from .helper import format_prompt
  28. class ChatGpt(AsyncGeneratorProvider):
  29. url = "https://chat-gpt.com"
  30. working = True
  31. supports_gpt_35_turbo = True
  32. @classmethod
  33. async def create_async_generator(
  34. cls,
  35. model: str,
  36. messages: Messages,
  37. proxy: str = None,
  38. **kwargs
  39. ) -> AsyncResult:
  40. headers = {
  41. "authority": "chat-gpt.com",
  42. "accept": "application/json",
  43. "origin": cls.url,
  44. "referer": f"{cls.url}/chat",
  45. }
  46. async with ClientSession(headers=headers) as session:
  47. prompt = format_prompt(messages)
  48. data = {
  49. "prompt": prompt,
  50. "purpose": "",
  51. }
  52. async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
  53. response.raise_for_status()
  54. async for chunk in response.content:
  55. if chunk:
  56. yield chunk.decode()
  57. """
  58. if not path.isfile(provider_path):
  59. command = input_command()
  60. prompt = f"""
  61. Create a provider from a cURL command. The command is:
  62. ```bash
  63. {command}
  64. ```
  65. A example for a provider:
  66. ```py
  67. {example}
  68. ```
  69. The name for the provider class:
  70. {name}
  71. Replace "hello" with `format_prompt(messages)`.
  72. And replace "gpt-3.5-turbo" with `model`.
  73. """
  74. print("Create code...")
  75. response = []
  76. for chunk in g4f.ChatCompletion.create(
  77. model=g4f.models.default,
  78. messages=[{"role": "user", "content": prompt}],
  79. timeout=300,
  80. stream=True,
  81. ):
  82. print(chunk, end="", flush=True)
  83. response.append(chunk)
  84. print()
  85. response = "".join(response)
  86. if code := read_code(response):
  87. with open(provider_path, "w") as file:
  88. file.write(code)
  89. print("Saved at:", provider_path)
  90. with open("g4f/Provider/__init__.py", "a") as file:
  91. file.write(f"\nfrom .{name} import {name}")
  92. else:
  93. with open(provider_path, "r") as file:
  94. code = file.read()