DDG.py 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. from __future__ import annotations
  2. from aiohttp import ClientSession, ClientTimeout, ClientError
  3. import json
  4. from ..typing import AsyncResult, Messages
  5. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation
  6. from .helper import format_prompt
  7. class Conversation(BaseConversation):
  8. vqd: str = None
  9. message_history: Messages = []
  10. def __init__(self, model: str):
  11. self.model = model
  12. class DDG(AsyncGeneratorProvider, ProviderModelMixin):
  13. label = "DuckDuckGo AI Chat"
  14. url = "https://duckduckgo.com/aichat"
  15. api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
  16. working = True
  17. needs_auth = False
  18. supports_stream = True
  19. supports_system_message = True
  20. supports_message_history = True
  21. default_model = "gpt-4o-mini"
  22. models = [default_model, "claude-3-haiku-20240307", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
  23. model_aliases = {
  24. "gpt-4": "gpt-4o-mini",
  25. "claude-3-haiku": "claude-3-haiku-20240307",
  26. "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
  27. "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
  28. }
  29. @classmethod
  30. async def fetch_vqd(cls, session: ClientSession) -> str:
  31. """
  32. Fetches the required VQD token for the chat session.
  33. Args:
  34. session (ClientSession): The active HTTP session.
  35. Returns:
  36. str: The VQD token.
  37. Raises:
  38. Exception: If the token cannot be fetched.
  39. """
  40. async with session.get("https://duckduckgo.com/duckchat/v1/status", headers={"x-vqd-accept": "1"}) as response:
  41. if response.status == 200:
  42. vqd = response.headers.get("x-vqd-4", "")
  43. if not vqd:
  44. raise Exception("Failed to fetch VQD token: Empty token.")
  45. return vqd
  46. else:
  47. raise Exception(f"Failed to fetch VQD token: {response.status} {await response.text()}")
  48. @classmethod
  49. async def create_async_generator(
  50. cls,
  51. model: str,
  52. messages: Messages,
  53. conversation: Conversation = None,
  54. return_conversation: bool = False,
  55. proxy: str = None,
  56. **kwargs
  57. ) -> AsyncResult:
  58. headers = {
  59. "Content-Type": "application/json",
  60. }
  61. async with ClientSession(headers=headers, timeout=ClientTimeout(total=30)) as session:
  62. # Fetch VQD token
  63. if conversation is None:
  64. conversation = Conversation(model)
  65. if conversation.vqd is None:
  66. conversation.vqd = await cls.fetch_vqd(session)
  67. headers["x-vqd-4"] = conversation.vqd
  68. if return_conversation:
  69. yield conversation
  70. if len(messages) >= 2:
  71. conversation.message_history.extend([messages[-2], messages[-1]])
  72. elif len(messages) == 1:
  73. conversation.message_history.append(messages[-1])
  74. payload = {
  75. "model": conversation.model,
  76. "messages": conversation.message_history,
  77. }
  78. try:
  79. async with session.post(cls.api_endpoint, headers=headers, json=payload, proxy=proxy) as response:
  80. conversation.vqd = response.headers.get("x-vqd-4")
  81. response.raise_for_status()
  82. async for line in response.content:
  83. line = line.decode("utf-8").strip()
  84. if line.startswith("data:"):
  85. try:
  86. message = json.loads(line[5:].strip())
  87. if "message" in message:
  88. yield message["message"]
  89. except json.JSONDecodeError:
  90. continue
  91. except ClientError as e:
  92. raise Exception(f"HTTP ClientError occurred: {e}")
  93. except asyncio.TimeoutError:
  94. raise Exception("Request timed out.")