Liaobots.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. from __future__ import annotations
  2. import uuid
  3. from aiohttp import ClientSession, BaseConnector
  4. from ..typing import AsyncResult, Messages
  5. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  6. from .helper import get_connector
  7. from ..requests import raise_for_status
  8. models = {
  9. "gpt-4o-mini-free": {
  10. "id": "gpt-4o-mini-free",
  11. "name": "GPT-4o-Mini-Free",
  12. "model": "ChatGPT",
  13. "provider": "OpenAI",
  14. "maxLength": 31200,
  15. "tokenLimit": 7800,
  16. "context": "8K",
  17. },
  18. "gpt-4o-2024-11-20": {
  19. "id": "gpt-4o-2024-11-20",
  20. "name": "GPT-4o",
  21. "model": "ChatGPT",
  22. "provider": "OpenAI",
  23. "maxLength": 260000,
  24. "tokenLimit": 126000,
  25. "context": "128K",
  26. },
  27. "gpt-4o-mini-2024-07-18": {
  28. "id": "gpt-4o-mini-2024-07-18",
  29. "name": "GPT-4o-Mini",
  30. "model": "ChatGPT",
  31. "provider": "OpenAI",
  32. "maxLength": 260000,
  33. "tokenLimit": 126000,
  34. "context": "128K",
  35. },
  36. "o1-preview-2024-09-12": {
  37. "id": "o1-preview-2024-09-12",
  38. "name": "o1-preview",
  39. "model": "o1",
  40. "provider": "OpenAI",
  41. "maxLength": 400000,
  42. "tokenLimit": 100000,
  43. "context": "128K",
  44. },
  45. "o1-mini-2024-09-12": {
  46. "id": "o1-mini-2024-09-12",
  47. "name": "o1-mini",
  48. "model": "o1",
  49. "provider": "OpenAI",
  50. "maxLength": 400000,
  51. "tokenLimit": 100000,
  52. "context": "128K",
  53. },
  54. "grok-2": {
  55. "id": "grok-2",
  56. "name": "Grok-2",
  57. "model": "Grok",
  58. "provider": "x.ai",
  59. "maxLength": 400000,
  60. "tokenLimit": 100000,
  61. "context": "100K",
  62. },
  63. "claude-3-opus-20240229": {
  64. "id": "claude-3-opus-20240229",
  65. "name": "Claude-3-Opus",
  66. "model": "Claude",
  67. "provider": "Anthropic",
  68. "maxLength": 800000,
  69. "tokenLimit": 200000,
  70. "context": "200K",
  71. },
  72. "claude-3-5-sonnet-20240620": {
  73. "id": "claude-3-5-sonnet-20240620",
  74. "name": "Claude-3.5-Sonnet",
  75. "model": "Claude",
  76. "provider": "Anthropic",
  77. "maxLength": 800000,
  78. "tokenLimit": 200000,
  79. "context": "200K",
  80. },
  81. "claude-3-5-sonnet-20241022": {
  82. "id": "claude-3-5-sonnet-20241022",
  83. "name": "Claude-3.5-Sonnet-V2",
  84. "model": "Claude",
  85. "provider": "Anthropic",
  86. "maxLength": 800000,
  87. "tokenLimit": 200000,
  88. "context": "200K",
  89. },
  90. "claude-3-sonnet-20240229": {
  91. "id": "claude-3-sonnet-20240229",
  92. "name": "Claude-3-Sonnet",
  93. "model": "Claude",
  94. "provider": "Anthropic",
  95. "maxLength": 800000,
  96. "tokenLimit": 200000,
  97. "context": "200K",
  98. },
  99. "claude-3-opus-20240229-t": {
  100. "id": "claude-3-opus-20240229-t",
  101. "name": "Claude-3-Opus-T",
  102. "model": "Claude",
  103. "provider": "Anthropic",
  104. "maxLength": 800000,
  105. "tokenLimit": 200000,
  106. "context": "200K",
  107. },
  108. "claude-3-5-sonnet-20241022-t": {
  109. "id": "claude-3-5-sonnet-20241022-t",
  110. "name": "Claude-3.5-Sonnet-V2-T",
  111. "model": "Claude",
  112. "provider": "Anthropic",
  113. "maxLength": 800000,
  114. "tokenLimit": 200000,
  115. "context": "200K",
  116. },
  117. "gemini-2.0-flash-exp": {
  118. "id": "gemini-2.0-flash-exp",
  119. "name": "Gemini-2.0-Flash-Exp",
  120. "model": "Gemini",
  121. "provider": "Google",
  122. "maxLength": 4000000,
  123. "tokenLimit": 1000000,
  124. "context": "1024K",
  125. },
  126. "gemini-2.0-flash-thinking-exp": {
  127. "id": "gemini-2.0-flash-thinking-exp",
  128. "name": "Gemini-2.0-Flash-Thinking-Exp",
  129. "model": "Gemini",
  130. "provider": "Google",
  131. "maxLength": 4000000,
  132. "tokenLimit": 1000000,
  133. "context": "1024K",
  134. },
  135. "gemini-1.5-flash-002": {
  136. "id": "gemini-1.5-flash-002",
  137. "name": "Gemini-1.5-Flash-1M",
  138. "model": "Gemini",
  139. "provider": "Google",
  140. "maxLength": 4000000,
  141. "tokenLimit": 1000000,
  142. "context": "1024K",
  143. },
  144. "gemini-1.5-pro-002": {
  145. "id": "gemini-1.5-pro-002",
  146. "name": "Gemini-1.5-Pro-1M",
  147. "model": "Gemini",
  148. "provider": "Google",
  149. "maxLength": 4000000,
  150. "tokenLimit": 1000000,
  151. "context": "1024K",
  152. },
  153. }
  154. class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
  155. url = "https://liaobots.site"
  156. working = True
  157. supports_message_history = True
  158. supports_system_message = True
  159. default_model = "gpt-4o-2024-11-20"
  160. models = list(models.keys())
  161. model_aliases = {
  162. "gpt-4o-mini": "gpt-4o-mini-free",
  163. "gpt-4o": default_model,
  164. "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
  165. "gpt-4": default_model,
  166. "o1-preview": "o1-preview-2024-09-12",
  167. "o1-mini": "o1-mini-2024-09-12",
  168. "claude-3-opus": "claude-3-opus-20240229",
  169. "claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
  170. "claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
  171. "claude-3-sonnet": "claude-3-sonnet-20240229",
  172. "claude-3-opus": "claude-3-opus-20240229-t",
  173. "claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
  174. "gemini-2.0-flash": "gemini-2.0-flash-exp",
  175. "gemini-2.0-flash-thinking": "gemini-2.0-flash-thinking-exp",
  176. "gemini-1.5-flash": "gemini-1.5-flash-002",
  177. "gemini-1.5-pro": "gemini-1.5-pro-002"
  178. }
  179. _auth_code = ""
  180. _cookie_jar = None
  181. @classmethod
  182. def get_model(cls, model: str) -> str:
  183. """
  184. Retrieve the internal model identifier based on the provided model name or alias.
  185. """
  186. if model in cls.model_aliases:
  187. model = cls.model_aliases[model]
  188. if model not in models:
  189. raise ValueError(f"Model '{model}' is not supported.")
  190. return model
  191. @classmethod
  192. def is_supported(cls, model: str) -> bool:
  193. """
  194. Check if the given model is supported.
  195. """
  196. return model in models or model in cls.model_aliases
  197. @classmethod
  198. async def create_async_generator(
  199. cls,
  200. model: str,
  201. messages: Messages,
  202. proxy: str = None,
  203. connector: BaseConnector = None,
  204. **kwargs
  205. ) -> AsyncResult:
  206. model = cls.get_model(model)
  207. headers = {
  208. "authority": "liaobots.com",
  209. "content-type": "application/json",
  210. "origin": cls.url,
  211. "referer": f"{cls.url}/",
  212. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
  213. }
  214. async with ClientSession(
  215. headers=headers,
  216. cookie_jar=cls._cookie_jar,
  217. connector=get_connector(connector, proxy, True)
  218. ) as session:
  219. data = {
  220. "conversationId": str(uuid.uuid4()),
  221. "model": models[model],
  222. "messages": messages,
  223. "key": "",
  224. "prompt": kwargs.get("system_message", "You are a helpful assistant."),
  225. }
  226. if not cls._auth_code:
  227. async with session.post(
  228. "https://liaobots.work/recaptcha/api/login",
  229. data={"token": "abcdefghijklmnopqrst"},
  230. verify_ssl=False
  231. ) as response:
  232. await raise_for_status(response)
  233. try:
  234. async with session.post(
  235. "https://liaobots.work/api/user",
  236. json={"authcode": cls._auth_code},
  237. verify_ssl=False
  238. ) as response:
  239. await raise_for_status(response)
  240. cls._auth_code = (await response.json(content_type=None))["authCode"]
  241. if not cls._auth_code:
  242. raise RuntimeError("Empty auth code")
  243. cls._cookie_jar = session.cookie_jar
  244. async with session.post(
  245. "https://liaobots.work/api/chat",
  246. json=data,
  247. headers={"x-auth-code": cls._auth_code},
  248. verify_ssl=False
  249. ) as response:
  250. await raise_for_status(response)
  251. async for chunk in response.content.iter_any():
  252. if b"<html coupert-item=" in chunk:
  253. raise RuntimeError("Invalid session")
  254. if chunk:
  255. yield chunk.decode(errors="ignore")
  256. except:
  257. async with session.post(
  258. "https://liaobots.work/api/user",
  259. json={"authcode": "pTIQr4FTnVRfr"},
  260. verify_ssl=False
  261. ) as response:
  262. await raise_for_status(response)
  263. cls._auth_code = (await response.json(content_type=None))["authCode"]
  264. if not cls._auth_code:
  265. raise RuntimeError("Empty auth code")
  266. cls._cookie_jar = session.cookie_jar
  267. async with session.post(
  268. "https://liaobots.work/api/chat",
  269. json=data,
  270. headers={"x-auth-code": cls._auth_code},
  271. verify_ssl=False
  272. ) as response:
  273. await raise_for_status(response)
  274. async for chunk in response.content.iter_any():
  275. if b"<html coupert-item=" in chunk:
  276. raise RuntimeError("Invalid session")
  277. if chunk:
  278. yield chunk.decode(errors="ignore")
  279. @classmethod
  280. async def initialize_auth_code(cls, session: ClientSession) -> None:
  281. """
  282. Initialize the auth code by making the necessary login requests.
  283. """
  284. async with session.post(
  285. "https://liaobots.work/api/user",
  286. json={"authcode": "pTIQr4FTnVRfr"},
  287. verify_ssl=False
  288. ) as response:
  289. await raise_for_status(response)
  290. cls._auth_code = (await response.json(content_type=None))["authCode"]
  291. if not cls._auth_code:
  292. raise RuntimeError("Empty auth code")
  293. cls._cookie_jar = session.cookie_jar
  294. @classmethod
  295. async def ensure_auth_code(cls, session: ClientSession) -> None:
  296. """
  297. Ensure the auth code is initialized, and if not, perform the initialization.
  298. """
  299. if not cls._auth_code:
  300. await cls.initialize_auth_code(session)