Blackbox.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. from __future__ import annotations
  2. from aiohttp import ClientSession
  3. import random
  4. import string
  5. import json
  6. import re
  7. import aiohttp
  8. import json
  9. from pathlib import Path
  10. from ..typing import AsyncResult, Messages, ImagesType
  11. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  12. from ..image import ImageResponse, to_data_uri
  13. from ..cookies import get_cookies_dir
  14. from .helper import format_prompt
  15. class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
  16. label = "Blackbox AI"
  17. url = "https://www.blackbox.ai"
  18. api_endpoint = "https://www.blackbox.ai/api/chat"
  19. working = True
  20. supports_stream = True
  21. supports_system_message = True
  22. supports_message_history = True
  23. default_model = 'blackboxai'
  24. default_vision_model = default_model
  25. default_image_model = 'flux'
  26. image_models = ['ImageGeneration', 'repomap']
  27. vision_models = [default_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
  28. userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
  29. agentMode = {
  30. 'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
  31. 'meta-llama/Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
  32. 'mistralai/Mistral-7B-Instruct-v0.2': {'mode': True, 'id': "mistralai/Mistral-7B-Instruct-v0.2", 'name': "Mistral-(7B)-Instruct-v0.2"},
  33. 'deepseek-ai/deepseek-llm-67b-chat': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"},
  34. 'databricks/dbrx-instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"},
  35. 'meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro': {'mode': True, 'id': "meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro", 'name': "Meta-Llama-3.1-405B-Instruct-Turbo"}, #
  36. 'Qwen/QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
  37. 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"}
  38. }
  39. trendingAgentMode = {
  40. "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
  41. "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
  42. 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
  43. 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"},
  44. #
  45. 'Python Agent': {'mode': True, 'id': "Python Agent"},
  46. 'Java Agent': {'mode': True, 'id': "Java Agent"},
  47. 'JavaScript Agent': {'mode': True, 'id': "JavaScript Agent"},
  48. 'HTML Agent': {'mode': True, 'id': "HTML Agent"},
  49. 'Google Cloud Agent': {'mode': True, 'id': "Google Cloud Agent"},
  50. 'Android Developer': {'mode': True, 'id': "Android Developer"},
  51. 'Swift Developer': {'mode': True, 'id': "Swift Developer"},
  52. 'Next.js Agent': {'mode': True, 'id': "Next.js Agent"},
  53. 'MongoDB Agent': {'mode': True, 'id': "MongoDB Agent"},
  54. 'PyTorch Agent': {'mode': True, 'id': "PyTorch Agent"},
  55. 'React Agent': {'mode': True, 'id': "React Agent"},
  56. 'Xcode Agent': {'mode': True, 'id': "Xcode Agent"},
  57. 'AngularJS Agent': {'mode': True, 'id': "AngularJS Agent"},
  58. #
  59. 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
  60. #
  61. 'repomap': {'mode': True, 'id': "repomap"},
  62. #
  63. 'Heroku Agent': {'mode': True, 'id': "Heroku Agent"},
  64. 'Godot Agent': {'mode': True, 'id': "Godot Agent"},
  65. 'Go Agent': {'mode': True, 'id': "Go Agent"},
  66. 'Gitlab Agent': {'mode': True, 'id': "Gitlab Agent"},
  67. 'Git Agent': {'mode': True, 'id': "Git Agent"},
  68. 'Flask Agent': {'mode': True, 'id': "Flask Agent"},
  69. 'Firebase Agent': {'mode': True, 'id': "Firebase Agent"},
  70. 'FastAPI Agent': {'mode': True, 'id': "FastAPI Agent"},
  71. 'Erlang Agent': {'mode': True, 'id': "Erlang Agent"},
  72. 'Electron Agent': {'mode': True, 'id': "Electron Agent"},
  73. 'Docker Agent': {'mode': True, 'id': "Docker Agent"},
  74. 'DigitalOcean Agent': {'mode': True, 'id': "DigitalOcean Agent"},
  75. 'Bitbucket Agent': {'mode': True, 'id': "Bitbucket Agent"},
  76. 'Azure Agent': {'mode': True, 'id': "Azure Agent"},
  77. 'Flutter Agent': {'mode': True, 'id': "Flutter Agent"},
  78. 'Youtube Agent': {'mode': True, 'id': "Youtube Agent"},
  79. 'builder Agent': {'mode': True, 'id': "builder Agent"},
  80. }
  81. additional_prefixes = {
  82. 'gpt-4o': '@GPT-4o',
  83. 'gemini-pro': '@Gemini-PRO',
  84. 'claude-sonnet-3.5': '@Claude-Sonnet-3.5'
  85. }
  86. model_prefixes = {
  87. **{
  88. mode: f"@{value['id']}" for mode, value in trendingAgentMode.items()
  89. if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"]
  90. },
  91. **additional_prefixes
  92. }
  93. models = list(dict.fromkeys([default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
  94. model_aliases = {
  95. ### chat ###
  96. "gpt-4": "gpt-4o",
  97. "gemini-flash": "gemini-1.5-flash",
  98. "claude-3.5-sonnet": "claude-sonnet-3.5",
  99. "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
  100. "mixtral-7b": "mistralai/Mistral-7B-Instruct-v0.2",
  101. "deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat",
  102. "dbrx-instruct": "databricks/dbrx-instruct",
  103. "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro",
  104. "qwq-32b": "Qwen/QwQ-32B-Preview",
  105. "hermes-2-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
  106. ### image ###
  107. "flux": "ImageGeneration",
  108. }
  109. @classmethod
  110. def _get_cache_file(cls) -> Path:
  111. dir = Path(get_cookies_dir())
  112. dir.mkdir(exist_ok=True)
  113. return dir / 'blackbox.json'
  114. @classmethod
  115. def _load_cached_value(cls) -> str | None:
  116. cache_file = cls._get_cache_file()
  117. if cache_file.exists():
  118. try:
  119. with open(cache_file, 'r') as f:
  120. data = json.load(f)
  121. return data.get('validated_value')
  122. except Exception as e:
  123. print(f"Error reading cache file: {e}")
  124. return None
  125. @classmethod
  126. def _save_cached_value(cls, value: str):
  127. cache_file = cls._get_cache_file()
  128. try:
  129. with open(cache_file, 'w') as f:
  130. json.dump({'validated_value': value}, f)
  131. except Exception as e:
  132. print(f"Error writing to cache file: {e}")
  133. @classmethod
  134. async def fetch_validated(cls):
  135. cached_value = cls._load_cached_value()
  136. if cached_value:
  137. return cached_value
  138. async with aiohttp.ClientSession() as session:
  139. try:
  140. async with session.get(cls.url) as response:
  141. if response.status != 200:
  142. print("Failed to load the page.")
  143. return cached_value
  144. page_content = await response.text()
  145. js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content)
  146. uuid_format = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']'
  147. def is_valid_context(text_around):
  148. return any(char + '=' in text_around for char in 'abcdefghijklmnopqrstuvwxyz')
  149. for js_file in js_files:
  150. js_url = f"{cls.url}/_next/{js_file}"
  151. async with session.get(js_url) as js_response:
  152. if js_response.status == 200:
  153. js_content = await js_response.text()
  154. for match in re.finditer(uuid_format, js_content):
  155. start = max(0, match.start() - 10)
  156. end = min(len(js_content), match.end() + 10)
  157. context = js_content[start:end]
  158. if is_valid_context(context):
  159. validated_value = match.group(1)
  160. cls._save_cached_value(validated_value)
  161. return validated_value
  162. except Exception as e:
  163. print(f"Error fetching validated value: {e}")
  164. return cached_value
  165. @staticmethod
  166. def generate_id(length=7):
  167. characters = string.ascii_letters + string.digits
  168. return ''.join(random.choice(characters) for _ in range(length))
  169. @classmethod
  170. def add_prefix_to_messages(cls, messages: Messages, model: str) -> Messages:
  171. prefix = cls.model_prefixes.get(model, "")
  172. if not prefix:
  173. return messages
  174. new_messages = []
  175. for message in messages:
  176. new_message = message.copy()
  177. if message['role'] == 'user':
  178. new_message['content'] = (prefix + " " + message['content']).strip()
  179. new_messages.append(new_message)
  180. return new_messages
  181. @classmethod
  182. async def create_async_generator(
  183. cls,
  184. model: str,
  185. messages: Messages,
  186. prompt: str = None,
  187. proxy: str = None,
  188. web_search: bool = False,
  189. images: ImagesType = None,
  190. top_p: float = None,
  191. temperature: float = None,
  192. max_tokens: int = None,
  193. **kwargs
  194. ) -> AsyncResult:
  195. message_id = cls.generate_id()
  196. messages = cls.add_prefix_to_messages(messages, model)
  197. validated_value = await cls.fetch_validated()
  198. formatted_message = format_prompt(messages)
  199. model = cls.get_model(model)
  200. messages = [{"id": message_id, "content": formatted_message, "role": "user"}]
  201. if images is not None:
  202. messages[-1]['data'] = {
  203. "imagesData": [
  204. {
  205. "filePath": f"MultipleFiles/{image_name}",
  206. "contents": to_data_uri(image)
  207. }
  208. for image, image_name in images
  209. ],
  210. "fileText": "",
  211. "title": ""
  212. }
  213. headers = {
  214. 'accept': '*/*',
  215. 'accept-language': 'en-US,en;q=0.9',
  216. 'content-type': 'application/json',
  217. 'origin': cls.url,
  218. 'referer': f'{cls.url}/',
  219. 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
  220. }
  221. data = {
  222. "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {},
  223. "clickedAnswer2": False,
  224. "clickedAnswer3": False,
  225. "clickedForceWebSearch": False,
  226. "codeModelMode": True,
  227. "deepSearchMode": False,
  228. "githubToken": None,
  229. "id": message_id,
  230. "imageGenerationMode": False,
  231. "isChromeExt": False,
  232. "isMicMode": False,
  233. "maxTokens": max_tokens,
  234. "messages": messages,
  235. "mobileClient": False,
  236. "playgroundTemperature": temperature,
  237. "playgroundTopP": top_p,
  238. "previewToken": None,
  239. "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {},
  240. "userId": None,
  241. "userSelectedModel": model if model in cls.userSelectedModel else None,
  242. "userSystemPrompt": None,
  243. "validated": validated_value,
  244. "visitFromDelta": False,
  245. "webSearchModePrompt": False,
  246. "webSearchMode": web_search
  247. }
  248. async with ClientSession(headers=headers) as session:
  249. async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
  250. response.raise_for_status()
  251. response_text = await response.text()
  252. if model in cls.image_models:
  253. image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text)
  254. if image_matches:
  255. image_url = image_matches[0]
  256. yield ImageResponse(image_url, prompt)
  257. return
  258. response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL)
  259. response_text = re.sub(r'and for API requests replace https://www.blackbox.ai with https://api.blackbox.ai', '', response_text, flags=re.DOTALL)
  260. json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL)
  261. if json_match:
  262. search_results = json.loads(json_match.group(1))
  263. answer = response_text.split('$~~~$')[-1].strip()
  264. formatted_response = f"{answer}\n\n**Source:**"
  265. for i, result in enumerate(search_results, 1):
  266. formatted_response += f"\n{i}. {result['title']}: {result['link']}"
  267. yield formatted_response
  268. else:
  269. yield response_text.strip()