Airforce.py 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. from __future__ import annotations
  2. import random
  3. import json
  4. import re
  5. from aiohttp import ClientSession
  6. from ..typing import AsyncResult, Messages
  7. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  8. from ..image import ImageResponse
  9. def split_long_message(message: str, max_length: int = 4000) -> list[str]:
  10. return [message[i:i+max_length] for i in range(0, len(message), max_length)]
  11. class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
  12. url = "https://api.airforce"
  13. image_api_endpoint = "https://api.airforce/imagine2"
  14. text_api_endpoint = "https://api.airforce/chat/completions"
  15. working = True
  16. default_model = 'llama-3-70b-chat'
  17. supports_gpt_35_turbo = True
  18. supports_gpt_4 = True
  19. supports_stream = True
  20. supports_system_message = True
  21. supports_message_history = True
  22. text_models = [
  23. 'claude-3-haiku-20240307',
  24. 'claude-3-sonnet-20240229',
  25. 'claude-3-5-sonnet-20240620',
  26. 'claude-3-opus-20240229',
  27. 'chatgpt-4o-latest',
  28. 'gpt-4',
  29. 'gpt-4-turbo',
  30. 'gpt-4o-mini-2024-07-18',
  31. 'gpt-4o-mini',
  32. 'gpt-3.5-turbo',
  33. 'gpt-3.5-turbo-0125',
  34. 'gpt-3.5-turbo-1106',
  35. default_model,
  36. 'llama-3-70b-chat-turbo',
  37. 'llama-3-8b-chat',
  38. 'llama-3-8b-chat-turbo',
  39. 'llama-3-70b-chat-lite',
  40. 'llama-3-8b-chat-lite',
  41. 'llama-2-13b-chat',
  42. 'llama-3.1-405b-turbo',
  43. 'llama-3.1-70b-turbo',
  44. 'llama-3.1-8b-turbo',
  45. 'LlamaGuard-2-8b',
  46. 'Llama-Guard-7b',
  47. 'Llama-3.2-90B-Vision-Instruct-Turbo',
  48. 'Mixtral-8x7B-Instruct-v0.1',
  49. 'Mixtral-8x22B-Instruct-v0.1',
  50. 'Mistral-7B-Instruct-v0.1',
  51. 'Mistral-7B-Instruct-v0.2',
  52. 'Mistral-7B-Instruct-v0.3',
  53. 'Qwen1.5-7B-Chat',
  54. 'Qwen1.5-14B-Chat',
  55. 'Qwen1.5-72B-Chat',
  56. 'Qwen1.5-110B-Chat',
  57. 'Qwen2-72B-Instruct',
  58. 'gemma-2b-it',
  59. 'gemma-2-9b-it',
  60. 'gemma-2-27b-it',
  61. 'gemini-1.5-flash',
  62. 'gemini-1.5-pro',
  63. 'deepseek-llm-67b-chat',
  64. 'Nous-Hermes-2-Mixtral-8x7B-DPO',
  65. 'Nous-Hermes-2-Yi-34B',
  66. 'WizardLM-2-8x22B',
  67. 'SOLAR-10.7B-Instruct-v1.0',
  68. 'MythoMax-L2-13b',
  69. 'cosmosrp',
  70. ]
  71. image_models = [
  72. 'flux',
  73. 'flux-realism',
  74. 'flux-anime',
  75. 'flux-3d',
  76. 'flux-disney',
  77. 'flux-pixel',
  78. 'flux-4o',
  79. 'any-dark',
  80. 'dall-e-3',
  81. ]
  82. models = [
  83. *text_models,
  84. *image_models,
  85. ]
  86. model_aliases = {
  87. "claude-3-haiku": "claude-3-haiku-20240307",
  88. "claude-3-sonnet": "claude-3-sonnet-20240229",
  89. "gpt-4o": "chatgpt-4o-latest",
  90. "llama-3-70b": "llama-3-70b-chat",
  91. "llama-3-8b": "llama-3-8b-chat",
  92. "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
  93. "qwen-1.5-7b": "Qwen1.5-7B-Chat",
  94. "gemma-2b": "gemma-2b-it",
  95. "gemini-flash": "gemini-1.5-flash",
  96. "mythomax-l2-13b": "MythoMax-L2-13b",
  97. "solar-10.7b": "SOLAR-10.7B-Instruct-v1.0",
  98. }
  99. @classmethod
  100. def get_model(cls, model: str) -> str:
  101. if model in cls.models:
  102. return model
  103. elif model in cls.model_aliases:
  104. return cls.model_aliases.get(model, cls.default_model)
  105. else:
  106. return cls.default_model
  107. @classmethod
  108. async def create_async_generator(
  109. cls,
  110. model: str,
  111. messages: Messages,
  112. proxy: str = None,
  113. seed: int = None,
  114. size: str = "1:1",
  115. stream: bool = False,
  116. **kwargs
  117. ) -> AsyncResult:
  118. model = cls.get_model(model)
  119. if model in cls.image_models:
  120. async for result in cls._generate_image(model, messages, proxy, seed, size):
  121. yield result
  122. elif model in cls.text_models:
  123. async for result in cls._generate_text(model, messages, proxy, stream):
  124. yield result
  125. @classmethod
  126. async def _generate_image(
  127. cls,
  128. model: str,
  129. messages: Messages,
  130. proxy: str = None,
  131. seed: int = None,
  132. size: str = "1:1",
  133. **kwargs
  134. ) -> AsyncResult:
  135. headers = {
  136. "accept": "*/*",
  137. "accept-language": "en-US,en;q=0.9",
  138. "cache-control": "no-cache",
  139. "origin": "https://llmplayground.net",
  140. "user-agent": "Mozilla/5.0"
  141. }
  142. if seed is None:
  143. seed = random.randint(0, 100000)
  144. prompt = messages[0]['content']
  145. async with ClientSession(headers=headers) as session:
  146. params = {
  147. "model": model,
  148. "prompt": prompt,
  149. "size": size,
  150. "seed": seed
  151. }
  152. async with session.get(f"{cls.image_api_endpoint}", params=params, proxy=proxy) as response:
  153. response.raise_for_status()
  154. content_type = response.headers.get('Content-Type', '').lower()
  155. if 'application/json' in content_type:
  156. async for chunk in response.content.iter_chunked(1024):
  157. if chunk:
  158. yield chunk.decode('utf-8')
  159. elif 'image' in content_type:
  160. image_data = b""
  161. async for chunk in response.content.iter_chunked(1024):
  162. if chunk:
  163. image_data += chunk
  164. image_url = f"{cls.image_api_endpoint}?model={model}&prompt={prompt}&size={size}&seed={seed}"
  165. alt_text = f"Generated image for prompt: {prompt}"
  166. yield ImageResponse(images=image_url, alt=alt_text)
  167. @classmethod
  168. async def _generate_text(
  169. cls,
  170. model: str,
  171. messages: Messages,
  172. proxy: str = None,
  173. stream: bool = False,
  174. **kwargs
  175. ) -> AsyncResult:
  176. headers = {
  177. "accept": "*/*",
  178. "accept-language": "en-US,en;q=0.9",
  179. "authorization": "Bearer missing api key",
  180. "content-type": "application/json",
  181. "user-agent": "Mozilla/5.0"
  182. }
  183. async with ClientSession(headers=headers) as session:
  184. formatted_prompt = cls._format_messages(messages)
  185. prompt_parts = split_long_message(formatted_prompt)
  186. full_response = ""
  187. for part in prompt_parts:
  188. data = {
  189. "messages": [{"role": "user", "content": part}],
  190. "model": model,
  191. "max_tokens": 4096,
  192. "temperature": 1,
  193. "top_p": 1,
  194. "stream": stream
  195. }
  196. async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
  197. response.raise_for_status()
  198. part_response = ""
  199. if stream:
  200. async for line in response.content:
  201. if line:
  202. line = line.decode('utf-8').strip()
  203. if line.startswith("data: ") and line != "data: [DONE]":
  204. json_data = json.loads(line[6:])
  205. content = json_data['choices'][0]['delta'].get('content', '')
  206. part_response += content
  207. else:
  208. json_data = await response.json()
  209. content = json_data['choices'][0]['message']['content']
  210. part_response = content
  211. # Видаляємо повідомлення про перевищення ліміту символів
  212. part_response = re.sub(
  213. r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
  214. '',
  215. part_response
  216. )
  217. part_response = re.sub(
  218. r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
  219. '',
  220. part_response
  221. )
  222. full_response += part_response
  223. yield full_response
  224. @classmethod
  225. def _format_messages(cls, messages: Messages) -> str:
  226. return " ".join([msg['content'] for msg in messages])