Liaobots.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541
  1. from __future__ import annotations
  2. import uuid
  3. import json
  4. from aiohttp import ClientSession, BaseConnector
  5. from ..typing import AsyncResult, Messages
  6. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  7. from .helper import get_connector
  8. from ..requests import raise_for_status
  9. from ..errors import RateLimitError
  10. models = {
  11. "claude-3-5-sonnet-20241022": {
  12. "id": "claude-3-5-sonnet-20241022",
  13. "name": "claude-3-5-sonnet-20241022",
  14. "model": "claude-3-5-sonnet-20241022",
  15. "provider": "Anthropic",
  16. "maxLength": 0,
  17. "tokenLimit": 0,
  18. "context": 0,
  19. "success_rate": 100,
  20. "tps": 25.366666666666667,
  21. },
  22. "claude-3-5-sonnet-20241022-t": {
  23. "id": "claude-3-5-sonnet-20241022-t",
  24. "name": "claude-3-5-sonnet-20241022-t",
  25. "model": "claude-3-5-sonnet-20241022-t",
  26. "provider": "Anthropic",
  27. "maxLength": 0,
  28. "tokenLimit": 0,
  29. "context": 0,
  30. "success_rate": 100,
  31. "tps": 39.820754716981135,
  32. },
  33. "claude-3-7-sonnet-20250219": {
  34. "id": "claude-3-7-sonnet-20250219",
  35. "name": "claude-3-7-sonnet-20250219",
  36. "model": "claude-3-7-sonnet-20250219",
  37. "provider": "Anthropic",
  38. "maxLength": 0,
  39. "tokenLimit": 0,
  40. "context": 0,
  41. "success_rate": 100,
  42. "tps": 47.02970297029703,
  43. },
  44. "claude-3-7-sonnet-20250219-t": {
  45. "id": "claude-3-7-sonnet-20250219-t",
  46. "name": "claude-3-7-sonnet-20250219-t",
  47. "model": "claude-3-7-sonnet-20250219-t",
  48. "provider": "Anthropic",
  49. "maxLength": 0,
  50. "tokenLimit": 0,
  51. "context": 0,
  52. "success_rate": 100,
  53. "tps": 39.04289693593315,
  54. },
  55. "deepseek-v3": {
  56. "id": "deepseek-v3",
  57. "name": "deepseek-v3",
  58. "model": "deepseek-v3",
  59. "provider": "DeepSeek",
  60. "maxLength": 0,
  61. "tokenLimit": 0,
  62. "context": 0,
  63. "success_rate": 100,
  64. "tps": 40.484657419083646,
  65. },
  66. "gemini-1.0-pro-latest-123": {
  67. "id": "gemini-1.0-pro-latest-123",
  68. "name": "gemini-1.0-pro-latest-123",
  69. "model": "gemini-1.0-pro-latest-123",
  70. "provider": "Google",
  71. "maxLength": 0,
  72. "tokenLimit": 0,
  73. "context": 0,
  74. "success_rate": 100,
  75. "tps": 10,
  76. },
  77. "gemini-2.0-flash": {
  78. "id": "gemini-2.0-flash",
  79. "name": "gemini-2.0-flash",
  80. "model": "gemini-2.0-flash",
  81. "provider": "Google",
  82. "maxLength": 0,
  83. "tokenLimit": 0,
  84. "context": 0,
  85. "success_rate": 100,
  86. "tps": 216.44162436548223,
  87. },
  88. "gemini-2.0-flash-exp": {
  89. "id": "gemini-2.0-flash-exp",
  90. "name": "gemini-2.0-flash-exp",
  91. "model": "gemini-2.0-flash-exp",
  92. "provider": "Google",
  93. "maxLength": 0,
  94. "tokenLimit": 0,
  95. "context": 0,
  96. "success_rate": 0,
  97. "tps": 0,
  98. },
  99. "gemini-2.0-flash-thinking-exp": {
  100. "id": "gemini-2.0-flash-thinking-exp",
  101. "name": "gemini-2.0-flash-thinking-exp",
  102. "model": "gemini-2.0-flash-thinking-exp",
  103. "provider": "Google",
  104. "maxLength": 0,
  105. "tokenLimit": 0,
  106. "context": 0,
  107. "success_rate": 0,
  108. "tps": 0,
  109. },
  110. "gemini-2.5-flash-preview-04-17": {
  111. "id": "gemini-2.5-flash-preview-04-17",
  112. "name": "gemini-2.5-flash-preview-04-17",
  113. "model": "gemini-2.5-flash-preview-04-17",
  114. "provider": "Google",
  115. "maxLength": 0,
  116. "tokenLimit": 0,
  117. "context": 0,
  118. "success_rate": 100,
  119. "tps": 189.84010840108402,
  120. },
  121. "gemini-2.5-pro-official": {
  122. "id": "gemini-2.5-pro-official",
  123. "name": "gemini-2.5-pro-official",
  124. "model": "gemini-2.5-pro-official",
  125. "provider": "Google",
  126. "maxLength": 0,
  127. "tokenLimit": 0,
  128. "context": 0,
  129. "success_rate": 100,
  130. "tps": 91.00613496932516,
  131. },
  132. "gemini-2.5-pro-preview-03-25": {
  133. "id": "gemini-2.5-pro-preview-03-25",
  134. "name": "gemini-2.5-pro-preview-03-25",
  135. "model": "gemini-2.5-pro-preview-03-25",
  136. "provider": "Google",
  137. "maxLength": 0,
  138. "tokenLimit": 0,
  139. "context": 0,
  140. "success_rate": 99.05660377358491,
  141. "tps": 45.050511247443765,
  142. },
  143. "gemini-2.5-pro-preview-05-06": {
  144. "id": "gemini-2.5-pro-preview-05-06",
  145. "name": "gemini-2.5-pro-preview-05-06",
  146. "model": "gemini-2.5-pro-preview-05-06",
  147. "provider": "Google",
  148. "maxLength": 0,
  149. "tokenLimit": 0,
  150. "context": 0,
  151. "success_rate": 100,
  152. "tps": 99.29617834394904,
  153. },
  154. "gpt-4-turbo-2024-04-09": {
  155. "id": "gpt-4-turbo-2024-04-09",
  156. "name": "gpt-4-turbo-2024-04-09",
  157. "model": "gpt-4-turbo-2024-04-09",
  158. "provider": "OpenAI",
  159. "maxLength": 0,
  160. "tokenLimit": 0,
  161. "context": 0,
  162. "success_rate": 100,
  163. "tps": 1,
  164. },
  165. "gpt-4.1": {
  166. "id": "gpt-4.1",
  167. "name": "gpt-4.1",
  168. "model": "gpt-4.1",
  169. "provider": "OpenAI",
  170. "maxLength": 0,
  171. "tokenLimit": 0,
  172. "context": 0,
  173. "success_rate": 42.857142857142854,
  174. "tps": 19.58032786885246,
  175. },
  176. "gpt-4.1-mini": {
  177. "id": "gpt-4.1-mini",
  178. "name": "gpt-4.1-mini",
  179. "model": "gpt-4.1-mini",
  180. "provider": "OpenAI",
  181. "maxLength": 0,
  182. "tokenLimit": 0,
  183. "context": 0,
  184. "success_rate": 68.75,
  185. "tps": 12.677576601671309,
  186. },
  187. "gpt-4.1-mini-2025-04-14": {
  188. "id": "gpt-4.1-mini-2025-04-14",
  189. "name": "gpt-4.1-mini-2025-04-14",
  190. "model": "gpt-4.1-mini-2025-04-14",
  191. "provider": "OpenAI",
  192. "maxLength": 0,
  193. "tokenLimit": 0,
  194. "context": 0,
  195. "success_rate": 94.23076923076923,
  196. "tps": 8.297687861271676,
  197. },
  198. "gpt-4o-2024-11-20": {
  199. "id": "gpt-4o-2024-11-20",
  200. "name": "gpt-4o-2024-11-20",
  201. "model": "gpt-4o-2024-11-20",
  202. "provider": "OpenAI",
  203. "maxLength": 0,
  204. "tokenLimit": 0,
  205. "context": 0,
  206. "success_rate": 100,
  207. "tps": 73.3955223880597,
  208. },
  209. "gpt-4o-mini-2024-07-18": {
  210. "id": "gpt-4o-mini-2024-07-18",
  211. "name": "gpt-4o-mini-2024-07-18",
  212. "model": "gpt-4o-mini-2024-07-18",
  213. "provider": "OpenAI",
  214. "maxLength": 0,
  215. "tokenLimit": 0,
  216. "context": 0,
  217. "success_rate": 100,
  218. "tps": 26.874455100261553,
  219. },
  220. "grok-3": {
  221. "id": "grok-3",
  222. "name": "grok-3",
  223. "model": "grok-3",
  224. "provider": "xAI",
  225. "maxLength": 0,
  226. "tokenLimit": 0,
  227. "context": 0,
  228. "success_rate": 100,
  229. "tps": 51.110652663165794,
  230. },
  231. "grok-3-reason": {
  232. "id": "grok-3-reason",
  233. "name": "grok-3-reason",
  234. "model": "grok-3-reason",
  235. "provider": "xAI",
  236. "maxLength": 0,
  237. "tokenLimit": 0,
  238. "context": 0,
  239. "success_rate": 100,
  240. "tps": 62.81976744186046,
  241. },
  242. "o3-mini-2025-01-31": {
  243. "id": "o3-mini-2025-01-31",
  244. "name": "o3-mini-2025-01-31",
  245. "model": "o3-mini-2025-01-31",
  246. "provider": "Unknown",
  247. "maxLength": 0,
  248. "tokenLimit": 0,
  249. "context": 0,
  250. "success_rate": 100,
  251. "tps": 125.31410256410257,
  252. },
  253. "qwen3-235b-a22b": {
  254. "id": "qwen3-235b-a22b",
  255. "name": "qwen3-235b-a22b",
  256. "model": "qwen3-235b-a22b",
  257. "provider": "Alibaba",
  258. "maxLength": 0,
  259. "tokenLimit": 0,
  260. "context": 0,
  261. "success_rate": 100,
  262. "tps": 25.846153846153847,
  263. },
  264. }
  265. class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
  266. url = "https://liaobots.work"
  267. working = True
  268. supports_message_history = True
  269. supports_system_message = True
  270. default_model = "grok-3"
  271. models = list(models.keys())
  272. model_aliases = {
  273. # Anthropic
  274. "claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
  275. "claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
  276. "claude-3.7-sonnet": "claude-3-7-sonnet-20250219",
  277. "claude-3.7-sonnet": "claude-3-7-sonnet-20250219-t",
  278. # DeepSeek
  279. #"deepseek-v3": "deepseek-v3",
  280. # Google
  281. "gemini-1.0-pro": "gemini-1.0-pro-latest-123",
  282. "gemini-2.0-flash": "gemini-2.0-flash-exp",
  283. "gemini-2.0-flash-thinking": "gemini-2.0-flash-thinking-exp",
  284. "gemini-2.5-flash": "gemini-2.5-flash-preview-04-17",
  285. "gemini-2.5-pro": "gemini-2.5-pro-official",
  286. "gemini-2.5-pro": "gemini-2.5-pro-preview-03-25",
  287. "gemini-2.5-pro": "gemini-2.5-pro-preview-05-06",
  288. # OpenAI
  289. "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
  290. "gpt-4.1-mini": "gpt-4.1-mini-2025-04-14",
  291. "gpt-4": "gpt-4o-2024-11-20",
  292. "gpt-4o": "gpt-4o-2024-11-20",
  293. "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
  294. # xAI
  295. "grok-3-reason": "grok-3-reason",
  296. "o3-mini": "o3-mini-2025-01-31",
  297. "qwen-3-235b": "qwen3-235b-a22b",
  298. }
  299. _auth_code = None
  300. _cookie_jar = None
  301. @classmethod
  302. def is_supported(cls, model: str) -> bool:
  303. """
  304. Check if the given model is supported.
  305. """
  306. return model in models or model in cls.model_aliases
  307. @classmethod
  308. async def create_async_generator(
  309. cls,
  310. model: str,
  311. messages: Messages,
  312. proxy: str = None,
  313. connector: BaseConnector = None,
  314. **kwargs
  315. ) -> AsyncResult:
  316. model = cls.get_model(model)
  317. headers = {
  318. "accept": "*/*",
  319. "accept-language": "en-US,en;q=0.9",
  320. "content-type": "application/json",
  321. "dnt": "1",
  322. "origin": "https://liaobots.work",
  323. "priority": "u=1, i",
  324. "referer": "https://liaobots.work/en",
  325. "sec-ch-ua": "\"Chromium\";v=\"135\", \"Not-A.Brand\";v=\"8\"",
  326. "sec-ch-ua-mobile": "?0",
  327. "sec-ch-ua-platform": "\"Linux\"",
  328. "sec-fetch-dest": "empty",
  329. "sec-fetch-mode": "cors",
  330. "sec-fetch-site": "same-origin",
  331. "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"
  332. }
  333. async with ClientSession(
  334. headers=headers,
  335. cookie_jar=cls._cookie_jar,
  336. connector=get_connector(connector, proxy, True)
  337. ) as session:
  338. # First, get a valid auth code
  339. await cls.get_auth_code(session)
  340. # Create conversation ID
  341. conversation_id = str(uuid.uuid4())
  342. # Prepare request data
  343. data = {
  344. "conversationId": conversation_id,
  345. "models": [{
  346. "modelId": model,
  347. "provider": models[model]["provider"]
  348. }],
  349. "search": "false",
  350. "messages": messages,
  351. "key": "",
  352. "prompt": kwargs.get("system_message", "你是 {{model}},一个由 {{provider}} 训练的大型语言模型,请仔细遵循用户的指示。")
  353. }
  354. # Try to make the chat request
  355. try:
  356. # Make the chat request with the current auth code
  357. async with session.post(
  358. f"{cls.url}/api/chat",
  359. json=data,
  360. headers={"x-auth-code": cls._auth_code},
  361. ssl=False
  362. ) as response:
  363. # Check if we got a streaming response
  364. content_type = response.headers.get("Content-Type", "")
  365. if "text/event-stream" in content_type:
  366. async for line in response.content:
  367. if line.startswith(b"data: "):
  368. try:
  369. response_data = json.loads(line[6:])
  370. # Check for error response
  371. if response_data.get("error") is True:
  372. # Raise RateLimitError for payment required or other errors
  373. if "402" in str(response_data.get("res_status", "")):
  374. raise RateLimitError("This model requires payment or credits")
  375. else:
  376. error_msg = response_data.get('message', 'Unknown error')
  377. raise RateLimitError(f"Error: {error_msg}")
  378. # Process normal response
  379. if response_data.get("role") == "assistant" and "content" in response_data:
  380. content = response_data.get("content")
  381. yield content
  382. except json.JSONDecodeError:
  383. continue
  384. else:
  385. # Not a streaming response, might be an error or HTML
  386. response_text = await response.text()
  387. # If we got HTML, we need to bypass CAPTCHA
  388. if response_text.startswith("<!DOCTYPE html>"):
  389. await cls.bypass_captcha(session)
  390. # Get a fresh auth code
  391. await cls.get_auth_code(session)
  392. # Try the request again
  393. async with session.post(
  394. f"{cls.url}/api/chat",
  395. json=data,
  396. headers={"x-auth-code": cls._auth_code},
  397. ssl=False
  398. ) as response2:
  399. # Check if we got a streaming response
  400. content_type = response2.headers.get("Content-Type", "")
  401. if "text/event-stream" in content_type:
  402. async for line in response2.content:
  403. if line.startswith(b"data: "):
  404. try:
  405. response_data = json.loads(line[6:])
  406. # Check for error response
  407. if response_data.get("error") is True:
  408. # Raise RateLimitError for payment required or other errors
  409. if "402" in str(response_data.get("res_status", "")):
  410. raise RateLimitError("This model requires payment or credits")
  411. else:
  412. error_msg = response_data.get('message', 'Unknown error')
  413. raise RateLimitError(f"Error: {error_msg}")
  414. # Process normal response
  415. if response_data.get("role") == "assistant" and "content" in response_data:
  416. content = response_data.get("content")
  417. yield content
  418. except json.JSONDecodeError:
  419. continue
  420. else:
  421. raise RateLimitError("Failed to get streaming response")
  422. else:
  423. raise RateLimitError("Failed to connect to the service")
  424. except Exception as e:
  425. # If it's already a RateLimitError, re-raise it
  426. if isinstance(e, RateLimitError):
  427. raise
  428. # Otherwise, wrap it in a RateLimitError
  429. raise RateLimitError(f"Error processing request: {str(e)}")
  430. @classmethod
  431. async def bypass_captcha(cls, session: ClientSession) -> None:
  432. """
  433. Bypass the CAPTCHA verification by directly making the recaptcha API request.
  434. """
  435. try:
  436. # First, try the direct recaptcha API request
  437. async with session.post(
  438. f"{cls.url}/recaptcha/api/login",
  439. json={"token": "abcdefghijklmnopqrst"},
  440. ssl=False
  441. ) as response:
  442. if response.status == 200:
  443. try:
  444. response_text = await response.text()
  445. # Try to parse as JSON
  446. try:
  447. response_data = json.loads(response_text)
  448. # Check if we got a successful response
  449. if response_data.get("code") == 200:
  450. cls._cookie_jar = session.cookie_jar
  451. except json.JSONDecodeError:
  452. pass
  453. except Exception:
  454. pass
  455. except Exception:
  456. pass
  457. @classmethod
  458. async def get_auth_code(cls, session: ClientSession) -> None:
  459. """
  460. Get a valid auth code by sending a request with an empty authcode.
  461. """
  462. try:
  463. # Send request with empty authcode to get a new one
  464. auth_request_data = {
  465. "authcode": "",
  466. "recommendUrl": "https://liaobots.work/zh"
  467. }
  468. async with session.post(
  469. f"{cls.url}/api/user",
  470. json=auth_request_data,
  471. ssl=False
  472. ) as response:
  473. if response.status == 200:
  474. response_text = await response.text()
  475. try:
  476. response_data = json.loads(response_text)
  477. if "authCode" in response_data:
  478. cls._auth_code = response_data["authCode"]
  479. cls._cookie_jar = session.cookie_jar
  480. return
  481. except json.JSONDecodeError:
  482. # If we got HTML, it might be the CAPTCHA page
  483. if response_text.startswith("<!DOCTYPE html>"):
  484. await cls.bypass_captcha(session)
  485. # Try again after bypassing CAPTCHA
  486. async with session.post(
  487. f"{cls.url}/api/user",
  488. json=auth_request_data,
  489. ssl=False
  490. ) as response2:
  491. if response2.status == 200:
  492. response_text2 = await response2.text()
  493. try:
  494. response_data2 = json.loads(response_text2)
  495. if "authCode" in response_data2:
  496. cls._auth_code = response_data2["authCode"]
  497. cls._cookie_jar = session.cookie_jar
  498. return
  499. except json.JSONDecodeError:
  500. pass
  501. except Exception:
  502. pass
  503. # If we're here, we couldn't get a valid auth code
  504. # Set a default one as a fallback
  505. cls._auth_code = "DvS3A5GTE9f0D" # Fallback to one of the provided auth codes