GeminiProChat.py 2.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657
  1. from __future__ import annotations
  2. import time
  3. from hashlib import sha256
  4. from aiohttp import ClientSession
  5. from ..typing import AsyncResult, Messages
  6. from .base_provider import AsyncGeneratorProvider
  7. class GeminiProChat(AsyncGeneratorProvider):
  8. url = "https://geminiprochat.com"
  9. working = True
  10. supports_gpt_35_turbo = True
  11. @classmethod
  12. async def create_async_generator(
  13. cls,
  14. model: str,
  15. messages: Messages,
  16. proxy: str = None,
  17. **kwargs
  18. ) -> AsyncResult:
  19. headers = {
  20. "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
  21. "Accept": "*/*",
  22. "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
  23. "Accept-Encoding": "gzip, deflate, br",
  24. "Content-Type": "text/plain;charset=UTF-8",
  25. "Referer": "https://geminiprochat.com/",
  26. "Origin": "https://geminiprochat.com",
  27. "Sec-Fetch-Dest": "empty",
  28. "Sec-Fetch-Mode": "cors",
  29. "Sec-Fetch-Site": "same-origin",
  30. "Connection": "keep-alive",
  31. "TE": "trailers",
  32. }
  33. async with ClientSession(headers=headers) as session:
  34. timestamp = int(time.time() * 1e3)
  35. data = {
  36. "messages":[{
  37. "role": "model" if message["role"] == "assistant" else "user",
  38. "parts": [{"text": message["content"]}]
  39. } for message in messages],
  40. "time": timestamp,
  41. "pass": None,
  42. "sign": generate_signature(timestamp, messages[-1]["content"]),
  43. }
  44. async with session.post(f"{cls.url}/api/generate", json=data, proxy=proxy) as response:
  45. response.raise_for_status()
  46. async for chunk in response.content.iter_any():
  47. yield chunk.decode()
  48. def generate_signature(time: int, text: str):
  49. message = f'{time}:{text}:9C4680FB-A4E1-6BC7-052A-7F68F9F5AD1F';
  50. return sha256(message.encode()).hexdigest()