Liaobots.py 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. from __future__ import annotations
  2. import uuid
  3. from aiohttp import ClientSession, BaseConnector
  4. from ..typing import AsyncResult, Messages
  5. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  6. from .helper import get_connector
  7. from ..requests import raise_for_status
  8. models = {
  9. "gpt-4o-mini-free": {
  10. "id": "gpt-4o-mini-free",
  11. "name": "GPT-4o-Mini-Free",
  12. "model": "ChatGPT",
  13. "provider": "OpenAI",
  14. "maxLength": 31200,
  15. "tokenLimit": 7800,
  16. "context": "8K",
  17. },
  18. "gpt-4o-2024-08-06": {
  19. "id": "gpt-4o-2024-08-06",
  20. "name": "GPT-4o",
  21. "model": "ChatGPT",
  22. "provider": "OpenAI",
  23. "maxLength": 260000,
  24. "tokenLimit": 126000,
  25. "context": "128K",
  26. },
  27. "gpt-4o-mini-2024-07-18": {
  28. "id": "gpt-4o-mini-2024-07-18",
  29. "name": "GPT-4o-Mini",
  30. "model": "ChatGPT",
  31. "provider": "OpenAI",
  32. "maxLength": 260000,
  33. "tokenLimit": 126000,
  34. "context": "128K",
  35. },
  36. "o1-preview-2024-09-12": {
  37. "id": "o1-preview-2024-09-12",
  38. "name": "o1-preview",
  39. "model": "o1",
  40. "provider": "OpenAI",
  41. "maxLength": 400000,
  42. "tokenLimit": 100000,
  43. "context": "128K",
  44. },
  45. "o1-mini-2024-09-12": {
  46. "id": "o1-mini-2024-09-12",
  47. "name": "o1-mini",
  48. "model": "o1",
  49. "provider": "OpenAI",
  50. "maxLength": 400000,
  51. "tokenLimit": 100000,
  52. "context": "128K",
  53. },
  54. "grok-beta": {
  55. "id": "grok-beta",
  56. "name": "Grok-Beta",
  57. "model": "Grok",
  58. "provider": "x.ai",
  59. "maxLength": 400000,
  60. "tokenLimit": 100000,
  61. "context": "100K",
  62. },
  63. "claude-3-opus-20240229": {
  64. "id": "claude-3-opus-20240229",
  65. "name": "Claude-3-Opus",
  66. "model": "Claude",
  67. "provider": "Anthropic",
  68. "maxLength": 800000,
  69. "tokenLimit": 200000,
  70. "context": "200K",
  71. },
  72. "claude-3-5-sonnet-20240620": {
  73. "id": "claude-3-5-sonnet-20240620",
  74. "name": "Claude-3.5-Sonnet",
  75. "model": "Claude",
  76. "provider": "Anthropic",
  77. "maxLength": 800000,
  78. "tokenLimit": 200000,
  79. "context": "200K",
  80. },
  81. "claude-3-5-sonnet-20241022": {
  82. "id": "claude-3-5-sonnet-20241022",
  83. "name": "Claude-3.5-Sonnet-V2",
  84. "model": "Claude",
  85. "provider": "Anthropic",
  86. "maxLength": 800000,
  87. "tokenLimit": 200000,
  88. "context": "200K",
  89. },
  90. "claude-3-sonnet-20240229": {
  91. "id": "claude-3-sonnet-20240229",
  92. "name": "Claude-3-Sonnet",
  93. "model": "Claude",
  94. "provider": "Anthropic",
  95. "maxLength": 800000,
  96. "tokenLimit": 200000,
  97. "context": "200K",
  98. },
  99. "claude-3-opus-20240229-t": {
  100. "id": "claude-3-opus-20240229-t",
  101. "name": "Claude-3-Opus-T",
  102. "model": "Claude",
  103. "provider": "Anthropic",
  104. "maxLength": 800000,
  105. "tokenLimit": 200000,
  106. "context": "200K",
  107. },
  108. "claude-3-5-sonnet-20241022-t": {
  109. "id": "claude-3-5-sonnet-20241022-t",
  110. "name": "Claude-3.5-Sonnet-V2-T",
  111. "model": "Claude",
  112. "provider": "Anthropic",
  113. "maxLength": 800000,
  114. "tokenLimit": 200000,
  115. "context": "200K",
  116. },
  117. "gemini-1.5-flash-002": {
  118. "id": "gemini-1.5-flash-002",
  119. "name": "Gemini-1.5-Flash-1M",
  120. "model": "Gemini",
  121. "provider": "Google",
  122. "maxLength": 4000000,
  123. "tokenLimit": 1000000,
  124. "context": "1024K",
  125. },
  126. "gemini-1.5-pro-002": {
  127. "id": "gemini-1.5-pro-002",
  128. "name": "Gemini-1.5-Pro-1M",
  129. "model": "Gemini",
  130. "provider": "Google",
  131. "maxLength": 4000000,
  132. "tokenLimit": 1000000,
  133. "context": "1024K",
  134. }
  135. }
  136. class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
  137. url = "https://liaobots.site"
  138. working = True
  139. supports_message_history = True
  140. supports_system_message = True
  141. default_model = "gpt-4o-2024-08-06"
  142. models = list(models.keys())
  143. model_aliases = {
  144. "gpt-4o-mini": "gpt-4o-mini-free",
  145. "gpt-4o": "gpt-4o-2024-08-06",
  146. "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
  147. "gpt-4": "gpt-4o-2024-08-06",
  148. "o1-preview": "o1-preview-2024-09-12",
  149. "o1-mini": "o1-mini-2024-09-12",
  150. "claude-3-opus": "claude-3-opus-20240229",
  151. "claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
  152. "claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
  153. "claude-3-sonnet": "claude-3-sonnet-20240229",
  154. "claude-3-opus": "claude-3-opus-20240229-t",
  155. "claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
  156. "gemini-flash": "gemini-1.5-flash-002",
  157. "gemini-pro": "gemini-1.5-pro-002"
  158. }
  159. _auth_code = ""
  160. _cookie_jar = None
  161. @classmethod
  162. def get_model(cls, model: str) -> str:
  163. """
  164. Retrieve the internal model identifier based on the provided model name or alias.
  165. """
  166. if model in cls.model_aliases:
  167. model = cls.model_aliases[model]
  168. if model not in models:
  169. raise ValueError(f"Model '{model}' is not supported.")
  170. return model
  171. @classmethod
  172. def is_supported(cls, model: str) -> bool:
  173. """
  174. Check if the given model is supported.
  175. """
  176. return model in models or model in cls.model_aliases
  177. @classmethod
  178. async def create_async_generator(
  179. cls,
  180. model: str,
  181. messages: Messages,
  182. auth: str = None,
  183. proxy: str = None,
  184. connector: BaseConnector = None,
  185. **kwargs
  186. ) -> AsyncResult:
  187. model = cls.get_model(model)
  188. headers = {
  189. "authority": "liaobots.com",
  190. "content-type": "application/json",
  191. "origin": cls.url,
  192. "referer": f"{cls.url}/",
  193. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
  194. }
  195. async with ClientSession(
  196. headers=headers,
  197. cookie_jar=cls._cookie_jar,
  198. connector=get_connector(connector, proxy, True)
  199. ) as session:
  200. data = {
  201. "conversationId": str(uuid.uuid4()),
  202. "model": models[model],
  203. "messages": messages,
  204. "key": "",
  205. "prompt": kwargs.get("system_message", "You are a helpful assistant."),
  206. }
  207. if not cls._auth_code:
  208. async with session.post(
  209. "https://liaobots.work/recaptcha/api/login",
  210. data={"token": "abcdefghijklmnopqrst"},
  211. verify_ssl=False
  212. ) as response:
  213. await raise_for_status(response)
  214. try:
  215. async with session.post(
  216. "https://liaobots.work/api/user",
  217. json={"authcode": cls._auth_code},
  218. verify_ssl=False
  219. ) as response:
  220. await raise_for_status(response)
  221. cls._auth_code = (await response.json(content_type=None))["authCode"]
  222. if not cls._auth_code:
  223. raise RuntimeError("Empty auth code")
  224. cls._cookie_jar = session.cookie_jar
  225. async with session.post(
  226. "https://liaobots.work/api/chat",
  227. json=data,
  228. headers={"x-auth-code": cls._auth_code},
  229. verify_ssl=False
  230. ) as response:
  231. await raise_for_status(response)
  232. async for chunk in response.content.iter_any():
  233. if b"<html coupert-item=" in chunk:
  234. raise RuntimeError("Invalid session")
  235. if chunk:
  236. yield chunk.decode(errors="ignore")
  237. except:
  238. async with session.post(
  239. "https://liaobots.work/api/user",
  240. json={"authcode": "pTIQr4FTnVRfr"},
  241. verify_ssl=False
  242. ) as response:
  243. await raise_for_status(response)
  244. cls._auth_code = (await response.json(content_type=None))["authCode"]
  245. if not cls._auth_code:
  246. raise RuntimeError("Empty auth code")
  247. cls._cookie_jar = session.cookie_jar
  248. async with session.post(
  249. "https://liaobots.work/api/chat",
  250. json=data,
  251. headers={"x-auth-code": cls._auth_code},
  252. verify_ssl=False
  253. ) as response:
  254. await raise_for_status(response)
  255. async for chunk in response.content.iter_any():
  256. if b"<html coupert-item=" in chunk:
  257. raise RuntimeError("Invalid session")
  258. if chunk:
  259. yield chunk.decode(errors="ignore")
  260. @classmethod
  261. async def initialize_auth_code(cls, session: ClientSession) -> None:
  262. """
  263. Initialize the auth code by making the necessary login requests.
  264. """
  265. async with session.post(
  266. "https://liaobots.work/api/user",
  267. json={"authcode": "pTIQr4FTnVRfr"},
  268. verify_ssl=False
  269. ) as response:
  270. await raise_for_status(response)
  271. cls._auth_code = (await response.json(content_type=None))["authCode"]
  272. if not cls._auth_code:
  273. raise RuntimeError("Empty auth code")
  274. cls._cookie_jar = session.cookie_jar
  275. @classmethod
  276. async def ensure_auth_code(cls, session: ClientSession) -> None:
  277. """
  278. Ensure the auth code is initialized, and if not, perform the initialization.
  279. """
  280. if not cls._auth_code:
  281. await cls.initialize_auth_code(session)