AIUncensored.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. from __future__ import annotations
  2. from aiohttp import ClientSession
  3. import time
  4. import hmac
  5. import hashlib
  6. import json
  7. import random
  8. from ..typing import AsyncResult, Messages
  9. from ..requests.raise_for_status import raise_for_status
  10. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  11. from .helper import format_prompt
  12. from ..providers.response import FinishReason
  13. class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin):
  14. url = "https://www.aiuncensored.info/ai_uncensored"
  15. api_key = "62852b00cb9e44bca86f0ec7e7455dc6"
  16. working = True
  17. supports_stream = True
  18. supports_system_message = True
  19. supports_message_history = True
  20. default_model = "hermes3-70b"
  21. models = [default_model]
  22. model_aliases = {"hermes-3": "hermes3-70b"}
  23. @staticmethod
  24. def calculate_signature(timestamp: str, json_dict: dict) -> str:
  25. message = f"{timestamp}{json.dumps(json_dict)}"
  26. secret_key = b'your-super-secret-key-replace-in-production'
  27. signature = hmac.new(
  28. secret_key,
  29. message.encode('utf-8'),
  30. hashlib.sha256
  31. ).hexdigest()
  32. return signature
  33. @staticmethod
  34. def get_server_url() -> str:
  35. servers = [
  36. "https://llm-server-nov24-ibak.onrender.com",
  37. "https://llm-server-nov24-qv2w.onrender.com",
  38. "https://llm-server-nov24.onrender.com"
  39. ]
  40. return random.choice(servers)
  41. @classmethod
  42. async def create_async_generator(
  43. cls,
  44. model: str,
  45. messages: Messages,
  46. stream: bool = False,
  47. proxy: str = None,
  48. api_key: str = None,
  49. **kwargs
  50. ) -> AsyncResult:
  51. model = cls.get_model(model)
  52. timestamp = str(int(time.time()))
  53. json_dict = {
  54. "messages": [{"role": "user", "content": format_prompt(messages)}],
  55. "model": model,
  56. "stream": stream
  57. }
  58. signature = cls.calculate_signature(timestamp, json_dict)
  59. headers = {
  60. 'accept': '*/*',
  61. 'accept-language': 'en-US,en;q=0.9',
  62. 'content-type': 'application/json',
  63. 'origin': 'https://www.aiuncensored.info',
  64. 'referer': 'https://www.aiuncensored.info/',
  65. 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
  66. 'x-api-key': cls.api_key,
  67. 'x-timestamp': timestamp,
  68. 'x-signature': signature
  69. }
  70. url = f"{cls.get_server_url()}/api/chat"
  71. async with ClientSession(headers=headers) as session:
  72. async with session.post(url, json=json_dict, proxy=proxy) as response:
  73. await raise_for_status(response)
  74. if stream:
  75. full_response = ""
  76. async for line in response.content:
  77. if line:
  78. try:
  79. line_text = line.decode('utf-8')
  80. if line_text.startswith(''):
  81. data = line_text[6:]
  82. if data == '[DONE]':
  83. yield FinishReason("stop")
  84. break
  85. try:
  86. json_data = json.loads(data)
  87. if 'data' in json_data:
  88. yield json_data['data']
  89. full_response += json_data['data']
  90. except json.JSONDecodeError:
  91. continue
  92. except UnicodeDecodeError:
  93. continue
  94. if full_response:
  95. yield FinishReason("length")
  96. else:
  97. response_json = await response.json()
  98. if 'content' in response_json:
  99. yield response_json['content']
  100. yield FinishReason("length")