Websim.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. from __future__ import annotations
  2. import json
  3. import random
  4. import string
  5. import asyncio
  6. from aiohttp import ClientSession
  7. from ..typing import AsyncResult, Messages
  8. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  9. from ..requests.raise_for_status import raise_for_status
  10. from ..errors import ResponseStatusError
  11. from ..providers.response import ImageResponse
  12. from .helper import format_prompt, format_image_prompt
  13. class Websim(AsyncGeneratorProvider, ProviderModelMixin):
  14. url = "https://websim.ai"
  15. login_url = None
  16. chat_api_endpoint = "https://websim.ai/api/v1/inference/run_chat_completion"
  17. image_api_endpoint = "https://websim.ai/api/v1/inference/run_image_generation"
  18. working = True
  19. needs_auth = False
  20. use_nodriver = False
  21. supports_stream = False
  22. supports_system_message = True
  23. supports_message_history = True
  24. default_model = 'gemini-1.5-pro'
  25. default_image_model = 'flux'
  26. image_models = [default_image_model]
  27. models = [default_model, 'gemini-1.5-flash'] + image_models
  28. @staticmethod
  29. def generate_project_id(for_image=False):
  30. """
  31. Generate a project ID in the appropriate format
  32. For chat: format like 'ke3_xh5gai3gjkmruomu'
  33. For image: format like 'kx0m131_rzz66qb2xoy7'
  34. """
  35. chars = string.ascii_lowercase + string.digits
  36. if for_image:
  37. first_part = ''.join(random.choices(chars, k=7))
  38. second_part = ''.join(random.choices(chars, k=12))
  39. return f"{first_part}_{second_part}"
  40. else:
  41. prefix = ''.join(random.choices(chars, k=3))
  42. suffix = ''.join(random.choices(chars, k=15))
  43. return f"{prefix}_{suffix}"
  44. @classmethod
  45. async def create_async_generator(
  46. cls,
  47. model: str,
  48. messages: Messages,
  49. prompt: str = None,
  50. proxy: str = None,
  51. aspect_ratio: str = "1:1",
  52. project_id: str = None,
  53. **kwargs
  54. ) -> AsyncResult:
  55. is_image_request = model in cls.image_models
  56. if project_id is None:
  57. project_id = cls.generate_project_id(for_image=is_image_request)
  58. headers = {
  59. 'accept': '*/*',
  60. 'accept-language': 'en-US,en;q=0.9',
  61. 'content-type': 'text/plain;charset=UTF-8',
  62. 'origin': 'https://websim.ai',
  63. 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
  64. 'websim-flags;': ''
  65. }
  66. if is_image_request:
  67. headers['referer'] = 'https://websim.ai/@ISWEARIAMNOTADDICTEDTOPILLOW/ai-image-prompt-generator'
  68. async for result in cls._handle_image_request(
  69. project_id=project_id,
  70. messages=messages,
  71. prompt=prompt,
  72. aspect_ratio=aspect_ratio,
  73. headers=headers,
  74. proxy=proxy,
  75. **kwargs
  76. ):
  77. yield result
  78. else:
  79. headers['referer'] = 'https://websim.ai/@ISWEARIAMNOTADDICTEDTOPILLOW/zelos-ai-assistant'
  80. async for result in cls._handle_chat_request(
  81. project_id=project_id,
  82. messages=messages,
  83. headers=headers,
  84. proxy=proxy,
  85. **kwargs
  86. ):
  87. yield result
  88. @classmethod
  89. async def _handle_image_request(
  90. cls,
  91. project_id: str,
  92. messages: Messages,
  93. prompt: str,
  94. aspect_ratio: str,
  95. headers: dict,
  96. proxy: str = None,
  97. **kwargs
  98. ) -> AsyncResult:
  99. used_prompt = format_image_prompt(messages, prompt)
  100. async with ClientSession(headers=headers) as session:
  101. data = {
  102. "project_id": project_id,
  103. "prompt": used_prompt,
  104. "aspect_ratio": aspect_ratio
  105. }
  106. async with session.post(f"{cls.image_api_endpoint}", json=data, proxy=proxy) as response:
  107. await raise_for_status(response)
  108. response_text = await response.text()
  109. response_json = json.loads(response_text)
  110. image_url = response_json.get("url")
  111. if image_url:
  112. yield ImageResponse(urls=[image_url], alt=used_prompt)
  113. @classmethod
  114. async def _handle_chat_request(
  115. cls,
  116. project_id: str,
  117. messages: Messages,
  118. headers: dict,
  119. proxy: str = None,
  120. **kwargs
  121. ) -> AsyncResult:
  122. max_retries = 3
  123. retry_count = 0
  124. last_error = None
  125. while retry_count < max_retries:
  126. try:
  127. async with ClientSession(headers=headers) as session:
  128. data = {
  129. "project_id": project_id,
  130. "messages": messages
  131. }
  132. async with session.post(f"{cls.chat_api_endpoint}", json=data, proxy=proxy) as response:
  133. if response.status == 429:
  134. response_text = await response.text()
  135. last_error = ResponseStatusError(f"Response {response.status}: {response_text}")
  136. retry_count += 1
  137. if retry_count < max_retries:
  138. wait_time = 2 ** retry_count
  139. await asyncio.sleep(wait_time)
  140. continue
  141. else:
  142. raise last_error
  143. await raise_for_status(response)
  144. response_text = await response.text()
  145. try:
  146. response_json = json.loads(response_text)
  147. content = response_json.get("content", "")
  148. yield content.strip()
  149. break
  150. except json.JSONDecodeError:
  151. yield response_text
  152. break
  153. except ResponseStatusError as e:
  154. if "Rate limit exceeded" in str(e) and retry_count < max_retries:
  155. retry_count += 1
  156. wait_time = 2 ** retry_count
  157. await asyncio.sleep(wait_time)
  158. else:
  159. if retry_count >= max_retries:
  160. raise e
  161. else:
  162. raise
  163. except Exception as e:
  164. raise