Liaobots.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. from __future__ import annotations
  2. import uuid
  3. from aiohttp import ClientSession, BaseConnector
  4. from ..typing import AsyncResult, Messages
  5. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  6. from .helper import get_connector
  7. models = {
  8. "gpt-4": {
  9. "id": "gpt-4",
  10. "name": "GPT-4",
  11. "maxLength": 24000,
  12. "tokenLimit": 8000,
  13. },
  14. "gpt-4-0613": {
  15. "id": "gpt-4-0613",
  16. "name": "GPT-4",
  17. "maxLength": 32000,
  18. "tokenLimit": 8000,
  19. },
  20. "gpt-3.5-turbo": {
  21. "id": "gpt-3.5-turbo",
  22. "name": "GPT-3.5-Turbo",
  23. "maxLength": 48000,
  24. "tokenLimit": 14000,
  25. "context": "16K",
  26. },
  27. "gpt-3.5-turbo-16k": {
  28. "id": "gpt-3.5-turbo-16k",
  29. "name": "GPT-3.5-16k",
  30. "maxLength": 48000,
  31. "tokenLimit": 16000,
  32. },
  33. "gpt-4-1106-preview": {
  34. "id": "gpt-4-1106-preview",
  35. "name": "GPT-4-Turbo",
  36. "maxLength": 260000,
  37. "tokenLimit": 126000,
  38. "context": "128K",
  39. },
  40. "gpt-4-plus": {
  41. "id": "gpt-4-plus",
  42. "name": "GPT-4-Plus",
  43. "maxLength": 130000,
  44. "tokenLimit": 31000,
  45. "context": "32K",
  46. },
  47. "gemini-pro": {
  48. "id": "gemini-pro",
  49. "name": "Gemini-Pro",
  50. "maxLength": 120000,
  51. "tokenLimit": 30000,
  52. "context": "32K",
  53. },
  54. "claude-2": {
  55. "id": "claude-2",
  56. "name": "Claude-2-200k",
  57. "maxLength": 800000,
  58. "tokenLimit": 200000,
  59. "context": "200K",
  60. },
  61. "claude-instant-1": {
  62. "id": "claude-instant-1",
  63. "name": "Claude-instant-1",
  64. "maxLength": 400000,
  65. "tokenLimit": 100000,
  66. "context": "100K",
  67. }
  68. }
  69. class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
  70. url = "https://liaobots.site"
  71. working = True
  72. supports_message_history = True
  73. supports_gpt_35_turbo = True
  74. supports_gpt_4 = True
  75. default_model = "gpt-3.5-turbo"
  76. models = [m for m in models]
  77. model_aliases = {
  78. "claude-v2": "claude-2"
  79. }
  80. _auth_code = None
  81. _cookie_jar = None
  82. @classmethod
  83. async def create_async_generator(
  84. cls,
  85. model: str,
  86. messages: Messages,
  87. auth: str = None,
  88. proxy: str = None,
  89. connector: BaseConnector = None,
  90. **kwargs
  91. ) -> AsyncResult:
  92. headers = {
  93. "authority": "liaobots.com",
  94. "content-type": "application/json",
  95. "origin": cls.url,
  96. "referer": f"{cls.url}/",
  97. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
  98. }
  99. async with ClientSession(
  100. headers=headers,
  101. cookie_jar=cls._cookie_jar,
  102. connector=get_connector(connector, proxy)
  103. ) as session:
  104. cls._auth_code = auth if isinstance(auth, str) else cls._auth_code
  105. if not cls._auth_code:
  106. async with session.post(
  107. "https://liaobots.work/recaptcha/api/login",
  108. proxy=proxy,
  109. data={"token": "abcdefghijklmnopqrst"},
  110. verify_ssl=False
  111. ) as response:
  112. response.raise_for_status()
  113. async with session.post(
  114. "https://liaobots.work/api/user",
  115. proxy=proxy,
  116. json={"authcode": ""},
  117. verify_ssl=False
  118. ) as response:
  119. response.raise_for_status()
  120. cls._auth_code = (await response.json(content_type=None))["authCode"]
  121. cls._cookie_jar = session.cookie_jar
  122. data = {
  123. "conversationId": str(uuid.uuid4()),
  124. "model": models[cls.get_model(model)],
  125. "messages": messages,
  126. "key": "",
  127. "prompt": kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully."),
  128. }
  129. async with session.post(
  130. "https://liaobots.work/api/chat",
  131. proxy=proxy,
  132. json=data,
  133. headers={"x-auth-code": cls._auth_code},
  134. verify_ssl=False
  135. ) as response:
  136. response.raise_for_status()
  137. async for chunk in response.content.iter_any():
  138. if b"<html coupert-item=" in chunk:
  139. raise RuntimeError("Invalid session")
  140. if chunk:
  141. yield chunk.decode()