DarkAI.py 2.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677
  1. from __future__ import annotations
  2. import json
  3. from aiohttp import ClientSession, ClientTimeout, StreamReader
  4. from ..typing import AsyncResult, Messages
  5. from ..requests.raise_for_status import raise_for_status
  6. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  7. from .helper import format_prompt
  8. class DarkAI(AsyncGeneratorProvider, ProviderModelMixin):
  9. url = "https://darkai.foundation/chat"
  10. api_endpoint = "https://darkai.foundation/chat"
  11. working = True
  12. supports_stream = True
  13. default_model = 'llama-3-70b'
  14. models = [
  15. 'gpt-4o',
  16. 'gpt-3.5-turbo',
  17. default_model,
  18. ]
  19. model_aliases = {
  20. "llama-3.1-70b": "llama-3-70b",
  21. }
  22. @classmethod
  23. async def create_async_generator(
  24. cls,
  25. model: str,
  26. messages: Messages,
  27. proxy: str = None,
  28. **kwargs
  29. ) -> AsyncResult:
  30. model = cls.get_model(model)
  31. headers = {
  32. "accept": "text/event-stream",
  33. "content-type": "application/json",
  34. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
  35. }
  36. timeout = ClientTimeout(total=600) # Increase timeout to 10 minutes
  37. async with ClientSession(headers=headers, timeout=timeout) as session:
  38. prompt = format_prompt(messages)
  39. data = {
  40. "query": prompt,
  41. "model": model,
  42. }
  43. async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
  44. await raise_for_status(response)
  45. reader: StreamReader = response.content
  46. buffer = b""
  47. while True:
  48. chunk = await reader.read(1024) # Read in smaller chunks
  49. if not chunk:
  50. break
  51. buffer += chunk
  52. while b"\n" in buffer:
  53. line, buffer = buffer.split(b"\n", 1)
  54. line = line.strip()
  55. if line:
  56. try:
  57. line_str = line.decode()
  58. if line_str.startswith('data: '):
  59. chunk_data = json.loads(line_str[6:])
  60. if chunk_data['event'] == 'text-chunk':
  61. chunk = chunk_data['data']['text']
  62. yield chunk
  63. elif chunk_data['event'] == 'stream-end':
  64. return
  65. except json.JSONDecodeError:
  66. pass
  67. except Exception:
  68. pass