PollinationsAI.py 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. from __future__ import annotations
  2. from urllib.parse import quote
  3. import random
  4. import requests
  5. from aiohttp import ClientSession
  6. from ..typing import AsyncResult, Messages
  7. from ..image import ImageResponse
  8. from ..requests.raise_for_status import raise_for_status
  9. from ..requests.aiohttp import get_connector
  10. from .needs_auth.OpenaiAPI import OpenaiAPI
  11. from .helper import format_prompt
  12. class PollinationsAI(OpenaiAPI):
  13. label = "Pollinations AI"
  14. url = "https://pollinations.ai"
  15. working = True
  16. needs_auth = False
  17. supports_stream = True
  18. default_model = "openai"
  19. additional_models_image = ["midjourney", "dall-e-3"]
  20. additional_models_text = ["sur", "sur-mistral", "claude"]
  21. model_aliases = {
  22. "gpt-4o": "openai",
  23. "mistral-nemo": "mistral",
  24. "llama-3.1-70b": "llama", #
  25. "gpt-4": "searchgpt",
  26. "gpt-4": "claude",
  27. "qwen-2.5-coder-32b": "qwen-coder",
  28. "claude-3.5-sonnet": "sur",
  29. }
  30. headers = {
  31. "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
  32. }
  33. @classmethod
  34. def get_models(cls):
  35. if not hasattr(cls, 'image_models'):
  36. cls.image_models = []
  37. if not cls.image_models:
  38. url = "https://image.pollinations.ai/models"
  39. response = requests.get(url, headers=cls.headers)
  40. raise_for_status(response)
  41. cls.image_models = response.json()
  42. cls.image_models.extend(cls.additional_models_image)
  43. if not hasattr(cls, 'models'):
  44. cls.models = []
  45. if not cls.models:
  46. url = "https://text.pollinations.ai/models"
  47. response = requests.get(url, headers=cls.headers)
  48. raise_for_status(response)
  49. cls.models = [model.get("name") for model in response.json()]
  50. cls.models.extend(cls.image_models)
  51. cls.models.extend(cls.additional_models_text)
  52. return cls.models
  53. @classmethod
  54. async def create_async_generator(
  55. cls,
  56. model: str,
  57. messages: Messages,
  58. prompt: str = None,
  59. api_base: str = "https://text.pollinations.ai/openai",
  60. api_key: str = None,
  61. proxy: str = None,
  62. seed: str = None,
  63. width: int = 1024,
  64. height: int = 1024,
  65. **kwargs
  66. ) -> AsyncResult:
  67. model = cls.get_model(model)
  68. if model in cls.image_models:
  69. async for response in cls._generate_image(model, messages, prompt, seed, width, height):
  70. yield response
  71. elif model in cls.models:
  72. async for response in cls._generate_text(model, messages, api_base, api_key, proxy, **kwargs):
  73. yield response
  74. else:
  75. raise ValueError(f"Unknown model: {model}")
  76. @classmethod
  77. async def _generate_image(cls, model: str, messages: Messages, prompt: str = None, seed: str = None, width: int = 1024, height: int = 1024):
  78. if prompt is None:
  79. prompt = messages[-1]["content"]
  80. if seed is None:
  81. seed = random.randint(0, 100000)
  82. image = f"https://image.pollinations.ai/prompt/{quote(prompt)}?width={width}&height={height}&seed={int(seed)}&nofeed=true&nologo=true&model={quote(model)}"
  83. yield ImageResponse(image, prompt)
  84. @classmethod
  85. async def _generate_text(cls, model: str, messages: Messages, api_base: str, api_key: str = None, proxy: str = None, **kwargs):
  86. if api_key is None:
  87. async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session:
  88. prompt = format_prompt(messages)
  89. async with session.get(f"https://text.pollinations.ai/{quote(prompt)}?model={quote(model)}") as response:
  90. await raise_for_status(response)
  91. async for line in response.content.iter_any():
  92. yield line.decode(errors="ignore")
  93. else:
  94. async for chunk in super().create_async_generator(
  95. model, messages, api_base=api_base, proxy=proxy, **kwargs
  96. ):
  97. yield chunk