Blackbox.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. from __future__ import annotations
  2. from aiohttp import ClientSession
  3. import re
  4. import json
  5. import random
  6. import string
  7. from pathlib import Path
  8. from ..typing import AsyncResult, Messages, ImagesType
  9. from ..requests.raise_for_status import raise_for_status
  10. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  11. from ..image import ImageResponse, to_data_uri
  12. from ..cookies import get_cookies_dir
  13. from .helper import format_prompt
  14. from ..providers.response import FinishReason, JsonConversation
  15. class Conversation(JsonConversation):
  16. validated_value: str = None
  17. chat_id: str = None
  18. message_history: Messages = []
  19. def __init__(self, model: str):
  20. self.model = model
  21. class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
  22. label = "Blackbox AI"
  23. url = "https://www.blackbox.ai"
  24. api_endpoint = "https://www.blackbox.ai/api/chat"
  25. working = True
  26. supports_stream = True
  27. supports_system_message = True
  28. supports_message_history = True
  29. default_model = "blackboxai"
  30. default_vision_model = default_model
  31. default_image_model = 'ImageGeneration'
  32. image_models = [default_image_model, "ImageGeneration2"]
  33. vision_models = [default_vision_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b', 'deepseek-r1']
  34. userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
  35. agentMode = {
  36. 'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
  37. #
  38. 'Meta-Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
  39. 'Mistral-(7B)-Instruct-v0.': {'mode': True, 'id': "mistralai/Mistral-7B-Instruct-v0.2", 'name': "Mistral-(7B)-Instruct-v0.2"},
  40. 'DeepSeek-LLM-Chat-(67B)': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"},
  41. 'DBRX-Instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"},
  42. 'Qwen-QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
  43. 'Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"},
  44. 'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"}
  45. }
  46. trendingAgentMode = {
  47. "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
  48. "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
  49. 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
  50. 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"},
  51. #
  52. 'Python Agent': {'mode': True, 'id': "Python Agent"},
  53. 'Java Agent': {'mode': True, 'id': "Java Agent"},
  54. 'JavaScript Agent': {'mode': True, 'id': "JavaScript Agent"},
  55. 'HTML Agent': {'mode': True, 'id': "HTML Agent"},
  56. 'Google Cloud Agent': {'mode': True, 'id': "Google Cloud Agent"},
  57. 'Android Developer': {'mode': True, 'id': "Android Developer"},
  58. 'Swift Developer': {'mode': True, 'id': "Swift Developer"},
  59. 'Next.js Agent': {'mode': True, 'id': "Next.js Agent"},
  60. 'MongoDB Agent': {'mode': True, 'id': "MongoDB Agent"},
  61. 'PyTorch Agent': {'mode': True, 'id': "PyTorch Agent"},
  62. 'React Agent': {'mode': True, 'id': "React Agent"},
  63. 'Xcode Agent': {'mode': True, 'id': "Xcode Agent"},
  64. 'AngularJS Agent': {'mode': True, 'id': "AngularJS Agent"},
  65. #
  66. 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
  67. #
  68. 'repomap': {'mode': True, 'id': "repomap"},
  69. #
  70. 'Heroku Agent': {'mode': True, 'id': "Heroku Agent"},
  71. 'Godot Agent': {'mode': True, 'id': "Godot Agent"},
  72. 'Go Agent': {'mode': True, 'id': "Go Agent"},
  73. 'Gitlab Agent': {'mode': True, 'id': "Gitlab Agent"},
  74. 'Git Agent': {'mode': True, 'id': "Git Agent"},
  75. 'Flask Agent': {'mode': True, 'id': "Flask Agent"},
  76. 'Firebase Agent': {'mode': True, 'id': "Firebase Agent"},
  77. 'FastAPI Agent': {'mode': True, 'id': "FastAPI Agent"},
  78. 'Erlang Agent': {'mode': True, 'id': "Erlang Agent"},
  79. 'Electron Agent': {'mode': True, 'id': "Electron Agent"},
  80. 'Docker Agent': {'mode': True, 'id': "Docker Agent"},
  81. 'DigitalOcean Agent': {'mode': True, 'id': "DigitalOcean Agent"},
  82. 'Bitbucket Agent': {'mode': True, 'id': "Bitbucket Agent"},
  83. 'Azure Agent': {'mode': True, 'id': "Azure Agent"},
  84. 'Flutter Agent': {'mode': True, 'id': "Flutter Agent"},
  85. 'Youtube Agent': {'mode': True, 'id': "Youtube Agent"},
  86. 'builder Agent': {'mode': True, 'id': "builder Agent"},
  87. }
  88. models = list(dict.fromkeys([default_model, *userSelectedModel, *image_models, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
  89. model_aliases = {
  90. ### chat ###
  91. "gpt-4": "gpt-4o",
  92. "gemini-1.5-flash": "gemini-1.5-flash",
  93. "gemini-1.5-pro": "gemini-pro",
  94. "claude-3.5-sonnet": "claude-sonnet-3.5",
  95. "llama-3.3-70b": "Meta-Llama-3.3-70B-Instruct-Turbo",
  96. "mixtral-7b": "Mistral-(7B)-Instruct-v0.",
  97. "deepseek-chat": "DeepSeek-LLM-Chat-(67B)",
  98. "dbrx-instruct": "DBRX-Instruct",
  99. "qwq-32b": "Qwen-QwQ-32B-Preview",
  100. "hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
  101. "deepseek-r1": "DeepSeek-R1",
  102. ### image ###
  103. "flux": "ImageGeneration",
  104. "flux": "ImageGeneration2",
  105. }
  106. @classmethod
  107. async def fetch_validated(
  108. cls,
  109. url: str = "https://www.blackbox.ai",
  110. force_refresh: bool = False
  111. ) -> Optional[str]:
  112. """
  113. Asynchronously retrieves the validated_value from the specified URL.
  114. """
  115. cache_file = Path(get_cookies_dir()) / 'blackbox.json'
  116. if not force_refresh and cache_file.exists():
  117. try:
  118. with open(cache_file, 'r') as f:
  119. data = json.load(f)
  120. if data.get('validated_value'):
  121. return data['validated_value']
  122. except Exception as e:
  123. print(f"Error reading cache: {e}")
  124. js_file_pattern = r'static/chunks/\d{4}-[a-fA-F0-9]+\.js'
  125. uuid_pattern = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']'
  126. def is_valid_context(text: str) -> bool:
  127. """Checks if the context is valid."""
  128. return any(char + '=' in text for char in 'abcdefghijklmnopqrstuvwxyz')
  129. async with ClientSession() as session:
  130. try:
  131. async with session.get(url) as response:
  132. if response.status != 200:
  133. print("Failed to load the page.")
  134. return None
  135. page_content = await response.text()
  136. js_files = re.findall(js_file_pattern, page_content)
  137. for js_file in js_files:
  138. js_url = f"{url}/_next/{js_file}"
  139. async with session.get(js_url) as js_response:
  140. if js_response.status == 200:
  141. js_content = await js_response.text()
  142. for match in re.finditer(uuid_pattern, js_content):
  143. start = max(0, match.start() - 10)
  144. end = min(len(js_content), match.end() + 10)
  145. context = js_content[start:end]
  146. if is_valid_context(context):
  147. validated_value = match.group(1)
  148. # Save to cache
  149. cache_file.parent.mkdir(exist_ok=True)
  150. try:
  151. with open(cache_file, 'w') as f:
  152. json.dump({'validated_value': validated_value}, f)
  153. except Exception as e:
  154. print(f"Error writing cache: {e}")
  155. return validated_value
  156. except Exception as e:
  157. print(f"Error retrieving validated_value: {e}")
  158. return None
  159. @classmethod
  160. def generate_chat_id(cls) -> str:
  161. """Generate a random chat ID"""
  162. chars = string.ascii_letters + string.digits
  163. return ''.join(random.choice(chars) for _ in range(7))
  164. @classmethod
  165. async def create_async_generator(
  166. cls,
  167. model: str,
  168. messages: Messages,
  169. prompt: str = None,
  170. proxy: str = None,
  171. web_search: bool = False,
  172. images: ImagesType = None,
  173. top_p: float = None,
  174. temperature: float = None,
  175. max_tokens: int = None,
  176. conversation: Conversation = None,
  177. return_conversation: bool = False,
  178. **kwargs
  179. ) -> AsyncResult:
  180. model = cls.get_model(model)
  181. headers = {
  182. 'accept': '*/*',
  183. 'accept-language': 'en-US,en;q=0.9',
  184. 'content-type': 'application/json',
  185. 'origin': 'https://www.blackbox.ai',
  186. 'referer': 'https://www.blackbox.ai/',
  187. 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
  188. }
  189. async with ClientSession(headers=headers) as session:
  190. if model == "ImageGeneration2":
  191. prompt = messages[-1]["content"]
  192. data = {
  193. "query": prompt,
  194. "agentMode": True
  195. }
  196. headers['content-type'] = 'text/plain;charset=UTF-8'
  197. async with session.post(
  198. "https://www.blackbox.ai/api/image-generator",
  199. json=data,
  200. proxy=proxy,
  201. headers=headers
  202. ) as response:
  203. await raise_for_status(response)
  204. response_json = await response.json()
  205. if "markdown" in response_json:
  206. image_url_match = re.search(r'!\[.*?\]\((.*?)\)', response_json["markdown"])
  207. if image_url_match:
  208. image_url = image_url_match.group(1)
  209. yield ImageResponse(images=[image_url], alt=prompt)
  210. return
  211. if conversation is None:
  212. conversation = Conversation(model)
  213. conversation.validated_value = await cls.fetch_validated()
  214. conversation.chat_id = cls.generate_chat_id()
  215. conversation.message_history = []
  216. current_messages = [{"id": conversation.chat_id, "content": format_prompt(messages), "role": "user"}]
  217. conversation.message_history.extend(messages)
  218. if images is not None:
  219. current_messages[-1]['data'] = {
  220. "imagesData": [
  221. {
  222. "filePath": f"/{image_name}",
  223. "contents": to_data_uri(image)
  224. }
  225. for image, image_name in images
  226. ],
  227. "fileText": "",
  228. "title": ""
  229. }
  230. data = {
  231. "messages": current_messages,
  232. "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {},
  233. "id": conversation.chat_id,
  234. "previewToken": None,
  235. "userId": None,
  236. "codeModelMode": True,
  237. "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {},
  238. "isMicMode": False,
  239. "userSystemPrompt": None,
  240. "maxTokens": max_tokens,
  241. "playgroundTopP": top_p,
  242. "playgroundTemperature": temperature,
  243. "isChromeExt": False,
  244. "githubToken": "",
  245. "clickedAnswer2": False,
  246. "clickedAnswer3": False,
  247. "clickedForceWebSearch": False,
  248. "visitFromDelta": False,
  249. "mobileClient": False,
  250. "userSelectedModel": model if model in cls.userSelectedModel else None,
  251. "validated": conversation.validated_value,
  252. "imageGenerationMode": False,
  253. "webSearchModePrompt": False,
  254. "deepSearchMode": False,
  255. "domains": None,
  256. "vscodeClient": False,
  257. "codeInterpreterMode": False,
  258. "webSearchMode": web_search
  259. }
  260. async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
  261. await raise_for_status(response)
  262. response_text = await response.text()
  263. parts = response_text.split('$~~~$')
  264. text_to_yield = parts[2] if len(parts) >= 3 else response_text
  265. if not text_to_yield or text_to_yield.isspace():
  266. return
  267. full_response = ""
  268. if model in cls.image_models:
  269. image_url_match = re.search(r'!\[.*?\]\((.*?)\)', text_to_yield)
  270. if image_url_match:
  271. image_url = image_url_match.group(1)
  272. prompt = messages[-1]["content"]
  273. yield ImageResponse(images=[image_url], alt=prompt)
  274. else:
  275. if "Generated by BLACKBOX.AI" in text_to_yield:
  276. conversation.validated_value = await cls.fetch_validated(force_refresh=True)
  277. if conversation.validated_value:
  278. data["validated"] = conversation.validated_value
  279. async with session.post(cls.api_endpoint, json=data, proxy=proxy) as new_response:
  280. await raise_for_status(new_response)
  281. new_response_text = await new_response.text()
  282. new_parts = new_response_text.split('$~~~$')
  283. new_text = new_parts[2] if len(new_parts) >= 3 else new_response_text
  284. if new_text and not new_text.isspace():
  285. yield new_text
  286. full_response = new_text
  287. else:
  288. if text_to_yield and not text_to_yield.isspace():
  289. yield text_to_yield
  290. full_response = text_to_yield
  291. else:
  292. if text_to_yield and not text_to_yield.isspace():
  293. yield text_to_yield
  294. full_response = text_to_yield
  295. if full_response:
  296. if max_tokens and len(full_response) >= max_tokens:
  297. reason = "length"
  298. else:
  299. reason = "stop"
  300. if return_conversation:
  301. conversation.message_history.append({"role": "assistant", "content": full_response})
  302. yield conversation
  303. yield FinishReason(reason)