G4F.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. from __future__ import annotations
  2. from aiohttp import ClientSession
  3. import time
  4. import random
  5. import asyncio
  6. from ...typing import AsyncResult, Messages
  7. from ...providers.response import ImageResponse, Reasoning, JsonConversation
  8. from ..helper import format_image_prompt, get_random_string
  9. from .DeepseekAI_JanusPro7b import DeepseekAI_JanusPro7b, get_zerogpu_token
  10. from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
  11. from .raise_for_status import raise_for_status
  12. class FluxDev(BlackForestLabs_Flux1Dev):
  13. url = "https://roxky-flux-1-dev.hf.space"
  14. space = "roxky/FLUX.1-dev"
  15. referer = f"{url}/?__theme=light"
  16. class G4F(DeepseekAI_JanusPro7b):
  17. label = "G4F framework"
  18. space = "roxky/Janus-Pro-7B"
  19. url = f"https://huggingface.co/spaces/roxky/g4f-space"
  20. api_url = "https://roxky-janus-pro-7b.hf.space"
  21. url_flux = "https://roxky-g4f-flux.hf.space/run/predict"
  22. referer = f"{api_url}?__theme=light"
  23. default_model = "flux"
  24. model_aliases = {"flux-schnell": default_model}
  25. image_models = [DeepseekAI_JanusPro7b.default_image_model, default_model, "flux-dev", *model_aliases.keys()]
  26. models = [DeepseekAI_JanusPro7b.default_model, *image_models]
  27. @classmethod
  28. async def create_async_generator(
  29. cls,
  30. model: str,
  31. messages: Messages,
  32. proxy: str = None,
  33. prompt: str = None,
  34. aspect_ratio: str = "1:1",
  35. width: int = None,
  36. height: int = None,
  37. seed: int = None,
  38. cookies: dict = None,
  39. api_key: str = None,
  40. zerogpu_uuid: str = "[object Object]",
  41. **kwargs
  42. ) -> AsyncResult:
  43. if model in ("flux", "flux-dev"):
  44. async for chunk in FluxDev.create_async_generator(
  45. model, messages,
  46. proxy=proxy,
  47. prompt=prompt,
  48. aspect_ratio=aspect_ratio,
  49. width=width,
  50. height=height,
  51. seed=seed,
  52. cookies=cookies,
  53. api_key=api_key,
  54. zerogpu_uuid=zerogpu_uuid,
  55. **kwargs
  56. ):
  57. yield chunk
  58. return
  59. if cls.default_model not in model:
  60. async for chunk in super().create_async_generator(
  61. model, messages,
  62. proxy=proxy,
  63. prompt=prompt,
  64. seed=seed,
  65. cookies=cookies,
  66. api_key=api_key,
  67. zerogpu_uuid=zerogpu_uuid,
  68. **kwargs
  69. ):
  70. yield chunk
  71. return
  72. model = cls.get_model(model)
  73. width = max(32, width - (width % 8))
  74. height = max(32, height - (height % 8))
  75. if prompt is None:
  76. prompt = format_image_prompt(messages)
  77. if seed is None:
  78. seed = random.randint(9999, 2**32 - 1)
  79. payload = {
  80. "data": [
  81. prompt,
  82. seed,
  83. width,
  84. height,
  85. True,
  86. 1
  87. ],
  88. "event_data": None,
  89. "fn_index": 3,
  90. "session_hash": get_random_string(),
  91. "trigger_id": 10
  92. }
  93. async with ClientSession() as session:
  94. if api_key is None:
  95. yield Reasoning(status="Acquiring GPU Token")
  96. zerogpu_uuid, api_key = await get_zerogpu_token(cls.space, session, JsonConversation(), cookies)
  97. headers = {
  98. "x-zerogpu-token": api_key,
  99. "x-zerogpu-uuid": zerogpu_uuid,
  100. }
  101. headers = {k: v for k, v in headers.items() if v is not None}
  102. async def generate():
  103. async with session.post(cls.url_flux, json=payload, proxy=proxy, headers=headers) as response:
  104. await raise_for_status(response)
  105. response_data = await response.json()
  106. image_url = response_data["data"][0]['url']
  107. return ImageResponse(image_url, alt=prompt)
  108. background_tasks = set()
  109. started = time.time()
  110. task = asyncio.create_task(generate())
  111. background_tasks.add(task)
  112. task.add_done_callback(background_tasks.discard)
  113. while background_tasks:
  114. yield Reasoning(status=f"Generating {time.time() - started:.2f}s")
  115. await asyncio.sleep(0.2)
  116. yield await task
  117. yield Reasoning(status=f"Finished {time.time() - started:.2f}s")