Ai4Chat.py 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. from __future__ import annotations
  2. import json
  3. import re
  4. import logging
  5. from aiohttp import ClientSession
  6. from ...typing import AsyncResult, Messages
  7. from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
  8. from ..helper import format_prompt
  9. logger = logging.getLogger(__name__)
  10. class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin):
  11. label = "AI4Chat"
  12. url = "https://www.ai4chat.co"
  13. api_endpoint = "https://www.ai4chat.co/generate-response"
  14. working = False
  15. supports_stream = True
  16. supports_system_message = True
  17. supports_message_history = True
  18. default_model = 'gpt-4'
  19. models = [default_model]
  20. model_aliases = {}
  21. @classmethod
  22. def get_model(cls, model: str) -> str:
  23. if model in cls.models:
  24. return model
  25. elif model in cls.model_aliases:
  26. return cls.model_aliases[model]
  27. else:
  28. return cls.default_model
  29. @classmethod
  30. async def create_async_generator(
  31. cls,
  32. model: str,
  33. messages: Messages,
  34. proxy: str = None,
  35. **kwargs
  36. ) -> AsyncResult:
  37. model = cls.get_model(model)
  38. headers = {
  39. "accept": "*/*",
  40. "accept-language": "en-US,en;q=0.9",
  41. "cache-control": "no-cache",
  42. "content-type": "application/json",
  43. "origin": "https://www.ai4chat.co",
  44. "pragma": "no-cache",
  45. "priority": "u=1, i",
  46. "referer": "https://www.ai4chat.co/gpt/talkdirtytome",
  47. "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
  48. "sec-ch-ua-mobile": "?0",
  49. "sec-ch-ua-platform": '"Linux"',
  50. "sec-fetch-dest": "empty",
  51. "sec-fetch-mode": "cors",
  52. "sec-fetch-site": "same-origin",
  53. "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
  54. }
  55. async with ClientSession(headers=headers) as session:
  56. data = {
  57. "messages": [
  58. {
  59. "role": "user",
  60. "content": format_prompt(messages)
  61. }
  62. ]
  63. }
  64. try:
  65. async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
  66. response.raise_for_status()
  67. result = await response.text()
  68. json_result = json.loads(result)
  69. message = json_result.get("message", "")
  70. clean_message = re.sub(r'<[^>]+>', '', message)
  71. yield clean_message
  72. except Exception as e:
  73. logger.exception("Error while calling AI 4Chat API: %s", e)
  74. yield f"Error: {e}"