Airforce.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. import json
  2. import random
  3. import re
  4. import requests
  5. from aiohttp import ClientSession
  6. from typing import List
  7. from requests.packages.urllib3.exceptions import InsecureRequestWarning
  8. from ..typing import AsyncResult, Messages
  9. from ..image import ImageResponse
  10. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  11. from .. import debug
  12. requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
  13. def split_message(message: str, max_length: int = 1000) -> List[str]:
  14. """Splits the message into parts up to (max_length)."""
  15. chunks = []
  16. while len(message) > max_length:
  17. split_point = message.rfind(' ', 0, max_length)
  18. if split_point == -1:
  19. split_point = max_length
  20. chunks.append(message[:split_point])
  21. message = message[split_point:].strip()
  22. if message:
  23. chunks.append(message)
  24. return chunks
  25. class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
  26. url = "https://llmplayground.net"
  27. api_endpoint_completions = "https://api.airforce/chat/completions"
  28. api_endpoint_imagine2 = "https://api.airforce/imagine2"
  29. working = True
  30. needs_auth = True
  31. supports_stream = True
  32. supports_system_message = True
  33. supports_message_history = True
  34. default_model = "gpt-4o-mini"
  35. default_image_model = "flux"
  36. hidden_models = {"Flux-1.1-Pro"}
  37. additional_models_imagine = ["flux-1.1-pro", "midjourney", "dall-e-3"]
  38. model_aliases = {
  39. # Alias mappings for models
  40. "gpt-4": "gpt-4o",
  41. "openchat-3.5": "openchat-3.5-0106",
  42. "deepseek-coder": "deepseek-coder-6.7b-instruct",
  43. "hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
  44. "hermes-2-pro": "hermes-2-pro-mistral-7b",
  45. "openhermes-2.5": "openhermes-2.5-mistral-7b",
  46. "lfm-40b": "lfm-40b-moe",
  47. "german-7b": "discolm-german-7b-v1",
  48. "llama-2-7b": "llama-2-7b-chat-int8",
  49. "llama-3.1-70b": "llama-3.1-70b-turbo",
  50. "neural-7b": "neural-chat-7b-v3-1",
  51. "zephyr-7b": "zephyr-7b-beta",
  52. "evil": "any-uncensored",
  53. "sdxl": "stable-diffusion-xl-lightning",
  54. "sdxl": "stable-diffusion-xl-base",
  55. "flux-pro": "flux-1.1-pro",
  56. "llama-3.1-8b": "llama-3.1-8b-chat"
  57. }
  58. @classmethod
  59. def get_models(cls):
  60. if not cls.image_models:
  61. try:
  62. url = "https://api.airforce/imagine2/models"
  63. response = requests.get(url, verify=False)
  64. response.raise_for_status()
  65. cls.image_models = response.json()
  66. cls.image_models.extend(cls.additional_models_imagine)
  67. except Exception as e:
  68. debug.log(f"Error fetching image models: {e}")
  69. if not cls.models:
  70. try:
  71. url = "https://api.airforce/models"
  72. response = requests.get(url, verify=False)
  73. response.raise_for_status()
  74. data = response.json()
  75. cls.models = [model['id'] for model in data['data']]
  76. cls.models.extend(cls.image_models)
  77. cls.models = [model for model in cls.models if model not in cls.hidden_models]
  78. except Exception as e:
  79. debug.log(f"Error fetching text models: {e}")
  80. return cls.models
  81. @classmethod
  82. async def check_api_key(cls, api_key: str) -> bool:
  83. """
  84. Always returns True to allow all models.
  85. """
  86. if not api_key or api_key == "null":
  87. return True # No restrictions if no key.
  88. headers = {
  89. "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
  90. "Accept": "*/*",
  91. }
  92. try:
  93. async with ClientSession(headers=headers) as session:
  94. async with session.get(f"https://api.airforce/check?key={api_key}") as response:
  95. if response.status == 200:
  96. data = await response.json()
  97. return data.get('info') in ['Sponsor key', 'Premium key']
  98. return False
  99. except Exception as e:
  100. print(f"Error checking API key: {str(e)}")
  101. return False
  102. @classmethod
  103. def _filter_content(cls, part_response: str) -> str:
  104. """
  105. Filters out unwanted content from the partial response.
  106. """
  107. part_response = re.sub(
  108. r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+",
  109. '',
  110. part_response
  111. )
  112. part_response = re.sub(
  113. r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+",
  114. '',
  115. part_response
  116. )
  117. return part_response
  118. @classmethod
  119. def _filter_response(cls, response: str) -> str:
  120. """
  121. Filters the full response to remove system errors and other unwanted text.
  122. """
  123. filtered_response = re.sub(r"\[ERROR\] '\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'", '', response) # any-uncensored
  124. filtered_response = re.sub(r'<\|im_end\|>', '', filtered_response) # remove <|im_end|> token
  125. filtered_response = re.sub(r'</s>', '', filtered_response) # neural-chat-7b-v3-1
  126. filtered_response = re.sub(r'^(Assistant: |AI: |ANSWER: |Output: )', '', filtered_response) # phi-2
  127. filtered_response = cls._filter_content(filtered_response)
  128. return filtered_response
  129. @classmethod
  130. async def generate_image(
  131. cls,
  132. model: str,
  133. prompt: str,
  134. api_key: str,
  135. size: str,
  136. seed: int,
  137. proxy: str = None
  138. ) -> AsyncResult:
  139. headers = {
  140. "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
  141. "Accept": "image/avif,image/webp,image/png,image/svg+xml,image/*;q=0.8,*/*;q=0.5",
  142. "Accept-Language": "en-US,en;q=0.5",
  143. "Accept-Encoding": "gzip, deflate, br, zstd",
  144. "Content-Type": "application/json",
  145. "Authorization": f"Bearer {api_key}",
  146. }
  147. params = {"model": model, "prompt": prompt, "size": size, "seed": seed}
  148. async with ClientSession(headers=headers) as session:
  149. async with session.get(cls.api_endpoint_imagine2, params=params, proxy=proxy) as response:
  150. if response.status == 200:
  151. image_url = str(response.url)
  152. yield ImageResponse(images=image_url, alt=prompt)
  153. else:
  154. error_text = await response.text()
  155. raise RuntimeError(f"Image generation failed: {response.status} - {error_text}")
  156. @classmethod
  157. async def generate_text(
  158. cls,
  159. model: str,
  160. messages: Messages,
  161. max_tokens: int,
  162. temperature: float,
  163. top_p: float,
  164. stream: bool,
  165. api_key: str,
  166. proxy: str = None
  167. ) -> AsyncResult:
  168. """
  169. Generates text, buffers the response, filters it, and returns the final result.
  170. """
  171. headers = {
  172. "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
  173. "Accept": "application/json, text/event-stream",
  174. "Accept-Language": "en-US,en;q=0.5",
  175. "Accept-Encoding": "gzip, deflate, br, zstd",
  176. "Content-Type": "application/json",
  177. "Authorization": f"Bearer {api_key}",
  178. }
  179. full_message = "\n".join([msg['content'] for msg in messages])
  180. message_chunks = split_message(full_message, max_length=1000)
  181. data = {
  182. "messages": [{"role": "user", "content": chunk} for chunk in message_chunks],
  183. "model": model,
  184. "max_tokens": max_tokens,
  185. "temperature": temperature,
  186. "top_p": top_p,
  187. "stream": stream,
  188. }
  189. async with ClientSession(headers=headers) as session:
  190. async with session.post(cls.api_endpoint_completions, json=data, proxy=proxy) as response:
  191. response.raise_for_status()
  192. if stream:
  193. buffer = [] # Buffer to collect partial responses
  194. async for line in response.content:
  195. line = line.decode('utf-8').strip()
  196. if line.startswith('data: '):
  197. try:
  198. json_str = line[6:] # Remove 'data: ' prefix
  199. chunk = json.loads(json_str)
  200. if 'choices' in chunk and chunk['choices']:
  201. delta = chunk['choices'][0].get('delta', {})
  202. if 'content' in delta:
  203. buffer.append(delta['content'])
  204. except json.JSONDecodeError:
  205. continue
  206. # Combine the buffered response and filter it
  207. filtered_response = cls._filter_response(''.join(buffer))
  208. yield filtered_response
  209. else:
  210. # Non-streaming response
  211. result = await response.json()
  212. if 'choices' in result and result['choices']:
  213. message = result['choices'][0].get('message', {})
  214. content = message.get('content', '')
  215. filtered_response = cls._filter_response(content)
  216. yield filtered_response
  217. @classmethod
  218. async def create_async_generator(
  219. cls,
  220. model: str,
  221. messages: Messages,
  222. prompt: str = None,
  223. proxy: str = None,
  224. max_tokens: int = 4096,
  225. temperature: float = 1,
  226. top_p: float = 1,
  227. stream: bool = True,
  228. api_key: str = None,
  229. size: str = "1:1",
  230. seed: int = None,
  231. **kwargs
  232. ) -> AsyncResult:
  233. if not await cls.check_api_key(api_key):
  234. pass
  235. model = cls.get_model(model)
  236. if model in cls.image_models:
  237. if prompt is None:
  238. prompt = messages[-1]['content']
  239. if seed is None:
  240. seed = random.randint(0, 10000)
  241. async for result in cls.generate_image(model, prompt, api_key, size, seed, proxy):
  242. yield result
  243. else:
  244. async for result in cls.generate_text(model, messages, max_tokens, temperature, top_p, stream, api_key, proxy):
  245. yield result