Liaobots.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. from __future__ import annotations
  2. import uuid
  3. from aiohttp import ClientSession
  4. from ..typing import AsyncResult, Messages
  5. from .base_provider import AsyncGeneratorProvider
  6. models = {
  7. "gpt-4": {
  8. "id": "gpt-4",
  9. "name": "GPT-4",
  10. "maxLength": 24000,
  11. "tokenLimit": 8000,
  12. },
  13. "gpt-4-0613": {
  14. "id": "gpt-4-0613",
  15. "name": "GPT-4",
  16. "maxLength": 32000,
  17. "tokenLimit": 8000,
  18. },
  19. "gpt-3.5-turbo": {
  20. "id": "gpt-3.5-turbo",
  21. "name": "GPT-3.5-Turbo",
  22. "maxLength": 48000,
  23. "tokenLimit": 14000,
  24. "context": "16K",
  25. },
  26. "gpt-3.5-turbo-16k": {
  27. "id": "gpt-3.5-turbo-16k",
  28. "name": "GPT-3.5-16k",
  29. "maxLength": 48000,
  30. "tokenLimit": 16000,
  31. },
  32. "gpt-4-1106-preview": {
  33. "id": "gpt-4-1106-preview",
  34. "name": "GPT-4-Turbo",
  35. "maxLength": 260000,
  36. "tokenLimit": 126000,
  37. "context": "128K",
  38. },
  39. "gpt-4-plus": {
  40. "id": "gpt-4-plus",
  41. "name": "GPT-4-Plus",
  42. "maxLength": 130000,
  43. "tokenLimit": 31000,
  44. "context": "32K",
  45. },
  46. "gemini-pro": {
  47. "id": "gemini-pro",
  48. "name": "Gemini-Pro",
  49. "maxLength": 120000,
  50. "tokenLimit": 30000,
  51. "context": "32K",
  52. },
  53. "claude-2": {
  54. "id": "claude-2",
  55. "name": "Claude-2-200k",
  56. "maxLength": 800000,
  57. "tokenLimit": 200000,
  58. "context": "200K",
  59. },
  60. "claude-instant-1": {
  61. "id": "claude-instant-1",
  62. "name": "Claude-instant-1",
  63. "maxLength": 400000,
  64. "tokenLimit": 100000,
  65. "context": "100K",
  66. }
  67. }
  68. class Liaobots(AsyncGeneratorProvider):
  69. url = "https://liaobots.site"
  70. working = True
  71. supports_message_history = True
  72. supports_gpt_35_turbo = True
  73. supports_gpt_4 = True
  74. _auth_code = None
  75. _cookie_jar = None
  76. @classmethod
  77. async def create_async_generator(
  78. cls,
  79. model: str,
  80. messages: Messages,
  81. auth: str = None,
  82. proxy: str = None,
  83. **kwargs
  84. ) -> AsyncResult:
  85. model = model if model in models else "gpt-3.5-turbo"
  86. headers = {
  87. "authority": "liaobots.com",
  88. "content-type": "application/json",
  89. "origin": cls.url,
  90. "referer": f"{cls.url}/",
  91. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
  92. }
  93. async with ClientSession(
  94. headers=headers,
  95. cookie_jar=cls._cookie_jar
  96. ) as session:
  97. cls._auth_code = auth if isinstance(auth, str) else cls._auth_code
  98. if not cls._auth_code:
  99. async with session.post(
  100. "https://liaobots.work/recaptcha/api/login",
  101. proxy=proxy,
  102. data={"token": "abcdefghijklmnopqrst"},
  103. verify_ssl=False
  104. ) as response:
  105. response.raise_for_status()
  106. async with session.post(
  107. "https://liaobots.work/api/user",
  108. proxy=proxy,
  109. json={"authcode": ""},
  110. verify_ssl=False
  111. ) as response:
  112. response.raise_for_status()
  113. cls._auth_code = (await response.json(content_type=None))["authCode"]
  114. cls._cookie_jar = session.cookie_jar
  115. data = {
  116. "conversationId": str(uuid.uuid4()),
  117. "model": models[model],
  118. "messages": messages,
  119. "key": "",
  120. "prompt": kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully."),
  121. }
  122. async with session.post(
  123. "https://liaobots.work/api/chat",
  124. proxy=proxy,
  125. json=data,
  126. headers={"x-auth-code": cls._auth_code},
  127. verify_ssl=False
  128. ) as response:
  129. response.raise_for_status()
  130. async for chunk in response.content.iter_any():
  131. if b"<html coupert-item=" in chunk:
  132. raise RuntimeError("Invalid session")
  133. if chunk:
  134. yield chunk.decode()