GPROChat.py 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. from __future__ import annotations
  2. import hashlib
  3. import time
  4. from aiohttp import ClientSession
  5. from ...typing import AsyncResult, Messages
  6. from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
  7. from ..helper import format_prompt
  8. class GPROChat(AsyncGeneratorProvider, ProviderModelMixin):
  9. label = "GPROChat"
  10. url = "https://gprochat.com"
  11. api_endpoint = "https://gprochat.com/api/generate"
  12. working = False
  13. supports_stream = True
  14. supports_message_history = True
  15. default_model = 'gemini-pro'
  16. @staticmethod
  17. def generate_signature(timestamp: int, message: str) -> str:
  18. secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8"
  19. hash_input = f"{timestamp}:{message}:{secret_key}"
  20. signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest()
  21. return signature
  22. @classmethod
  23. def get_model(cls, model: str) -> str:
  24. if model in cls.models:
  25. return model
  26. elif model in cls.model_aliases:
  27. return cls.model_aliases[model]
  28. else:
  29. return cls.default_model
  30. @classmethod
  31. async def create_async_generator(
  32. cls,
  33. model: str,
  34. messages: Messages,
  35. proxy: str = None,
  36. **kwargs
  37. ) -> AsyncResult:
  38. model = cls.get_model(model)
  39. timestamp = int(time.time() * 1000)
  40. prompt = format_prompt(messages)
  41. sign = cls.generate_signature(timestamp, prompt)
  42. headers = {
  43. "accept": "*/*",
  44. "origin": cls.url,
  45. "referer": f"{cls.url}/",
  46. "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
  47. "content-type": "text/plain;charset=UTF-8"
  48. }
  49. data = {
  50. "messages": [{"role": "user", "parts": [{"text": prompt}]}],
  51. "time": timestamp,
  52. "pass": None,
  53. "sign": sign
  54. }
  55. async with ClientSession(headers=headers) as session:
  56. async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
  57. response.raise_for_status()
  58. async for chunk in response.content.iter_any():
  59. if chunk:
  60. yield chunk.decode()