Aura.py 1.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950
  1. from __future__ import annotations
  2. from aiohttp import ClientSession
  3. from ...typing import AsyncResult, Messages
  4. from ..base_provider import AsyncGeneratorProvider
  5. from ...requests import get_args_from_browser
  6. from ...webdriver import WebDriver
  7. class Aura(AsyncGeneratorProvider):
  8. url = "https://openchat.team"
  9. working = False
  10. @classmethod
  11. async def create_async_generator(
  12. cls,
  13. model: str,
  14. messages: Messages,
  15. proxy: str = None,
  16. temperature: float = 0.5,
  17. max_tokens: int = 8192,
  18. webdriver: WebDriver = None,
  19. **kwargs
  20. ) -> AsyncResult:
  21. args = get_args_from_browser(cls.url, webdriver, proxy)
  22. async with ClientSession(**args) as session:
  23. new_messages = []
  24. system_message = []
  25. for message in messages:
  26. if message["role"] == "system":
  27. system_message.append(message["content"])
  28. else:
  29. new_messages.append(message)
  30. data = {
  31. "model": {
  32. "id": "openchat_3.6",
  33. "name": "OpenChat 3.6 (latest)",
  34. "maxLength": 24576,
  35. "tokenLimit": max_tokens
  36. },
  37. "messages": new_messages,
  38. "key": "",
  39. "prompt": "\n".join(system_message),
  40. "temperature": temperature
  41. }
  42. async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
  43. response.raise_for_status()
  44. async for chunk in response.content.iter_any():
  45. yield chunk.decode(error="ignore")