AutonomousAI.py 3.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. from __future__ import annotations
  2. from aiohttp import ClientSession
  3. import base64
  4. import json
  5. from ..typing import AsyncResult, Messages
  6. from ..requests.raise_for_status import raise_for_status
  7. from ..providers.response import FinishReason
  8. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  9. class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin):
  10. url = "https://www.autonomous.ai/anon/"
  11. api_endpoints = {
  12. "llama": "https://chatgpt.autonomous.ai/api/v1/ai/chat",
  13. "qwen_coder": "https://chatgpt.autonomous.ai/api/v1/ai/chat",
  14. "hermes": "https://chatgpt.autonomous.ai/api/v1/ai/chat-hermes",
  15. "vision": "https://chatgpt.autonomous.ai/api/v1/ai/chat-vision",
  16. "summary": "https://chatgpt.autonomous.ai/api/v1/ai/summary"
  17. }
  18. working = True
  19. supports_stream = True
  20. supports_system_message = True
  21. supports_message_history = True
  22. default_model = "llama"
  23. models = [default_model, "qwen_coder", "hermes", "vision", "summary"]
  24. model_aliases = {
  25. "llama-3.3-70b": default_model,
  26. "qwen-2.5-coder-32b": "qwen_coder",
  27. "hermes-3": "hermes",
  28. "llama-3.2-90b": "vision",
  29. }
  30. @classmethod
  31. async def create_async_generator(
  32. cls,
  33. model: str,
  34. messages: Messages,
  35. proxy: str = None,
  36. stream: bool = False,
  37. **kwargs
  38. ) -> AsyncResult:
  39. api_endpoint = cls.api_endpoints[model]
  40. headers = {
  41. 'accept': '*/*',
  42. 'accept-language': 'en-US,en;q=0.9',
  43. 'content-type': 'application/json',
  44. 'country-code': 'US',
  45. 'origin': 'https://www.autonomous.ai',
  46. 'referer': 'https://www.autonomous.ai/',
  47. 'time-zone': 'America/New_York',
  48. 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
  49. }
  50. async with ClientSession(headers=headers) as session:
  51. message_json = json.dumps(messages)
  52. encoded_message = base64.b64encode(message_json.encode()).decode(errors="ignore")
  53. data = {
  54. "messages": encoded_message,
  55. "threadId": model,
  56. "stream": stream,
  57. "aiAgent": model
  58. }
  59. async with session.post(api_endpoint, json=data, proxy=proxy) as response:
  60. await raise_for_status(response)
  61. async for chunk in response.content:
  62. if chunk:
  63. chunk_str = chunk.decode()
  64. if chunk_str == "data: [DONE]":
  65. continue
  66. try:
  67. # Remove "data: " prefix and parse JSON
  68. chunk_data = json.loads(chunk_str.replace("data: ", ""))
  69. if "choices" in chunk_data and chunk_data["choices"]:
  70. delta = chunk_data["choices"][0].get("delta", {})
  71. if "content" in delta and delta["content"]:
  72. yield delta["content"]
  73. if "finish_reason" in chunk_data and chunk_data["finish_reason"]:
  74. yield FinishReason(chunk_data["finish_reason"])
  75. except json.JSONDecodeError:
  76. continue