G4F.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. from __future__ import annotations
  2. from aiohttp import ClientSession
  3. import time
  4. import random
  5. import asyncio
  6. from ...typing import AsyncResult, Messages
  7. from ...providers.response import ImageResponse, Reasoning, JsonConversation
  8. from ..helper import format_image_prompt, get_random_string
  9. from .Janus_Pro_7B import Janus_Pro_7B, get_zerogpu_token
  10. from .BlackForestLabsFlux1Dev import BlackForestLabsFlux1Dev
  11. from .raise_for_status import raise_for_status
  12. class FluxDev(BlackForestLabsFlux1Dev):
  13. url = "https://roxky-flux-1-dev.hf.space"
  14. space = "roxky/FLUX.1-dev"
  15. referer = f"{url}/?__theme=light"
  16. class G4F(Janus_Pro_7B):
  17. label = "G4F framework"
  18. space = "roxky/Janus-Pro-7B"
  19. url = f"https://huggingface.co/spaces/roxky/g4f-space"
  20. api_url = "https://roxky-janus-pro-7b.hf.space"
  21. url_flux = "https://roxky-g4f-flux.hf.space/run/predict"
  22. referer = f"{api_url}?__theme=light"
  23. default_model = "flux"
  24. model_aliases = {"flux-schnell": default_model}
  25. image_models = [Janus_Pro_7B.default_image_model, default_model, "flux-dev", *model_aliases.keys()]
  26. models = [Janus_Pro_7B.default_model, *image_models]
  27. @classmethod
  28. async def create_async_generator(
  29. cls,
  30. model: str,
  31. messages: Messages,
  32. proxy: str = None,
  33. prompt: str = None,
  34. width: int = 1024,
  35. height: int = 1024,
  36. seed: int = None,
  37. cookies: dict = None,
  38. api_key: str = None,
  39. zerogpu_uuid: str = "[object Object]",
  40. **kwargs
  41. ) -> AsyncResult:
  42. if model in ("flux", "flux-dev"):
  43. async for chunk in FluxDev.create_async_generator(
  44. model, messages,
  45. proxy=proxy,
  46. prompt=prompt,
  47. width=width,
  48. height=height,
  49. seed=seed,
  50. cookies=cookies,
  51. api_key=api_key,
  52. zerogpu_uuid=zerogpu_uuid,
  53. **kwargs
  54. ):
  55. yield chunk
  56. return
  57. if cls.default_model not in model:
  58. async for chunk in super().create_async_generator(
  59. model, messages,
  60. proxy=proxy,
  61. prompt=prompt,
  62. seed=seed,
  63. cookies=cookies,
  64. api_key=api_key,
  65. zerogpu_uuid=zerogpu_uuid,
  66. **kwargs
  67. ):
  68. yield chunk
  69. return
  70. model = cls.get_model(model)
  71. width = max(32, width - (width % 8))
  72. height = max(32, height - (height % 8))
  73. if prompt is None:
  74. prompt = format_image_prompt(messages)
  75. if seed is None:
  76. seed = random.randint(9999, 2**32 - 1)
  77. payload = {
  78. "data": [
  79. prompt,
  80. seed,
  81. width,
  82. height,
  83. True,
  84. 1
  85. ],
  86. "event_data": None,
  87. "fn_index": 3,
  88. "session_hash": get_random_string(),
  89. "trigger_id": 10
  90. }
  91. async with ClientSession() as session:
  92. if api_key is None:
  93. yield Reasoning(status="Acquiring GPU Token")
  94. zerogpu_uuid, api_key = await get_zerogpu_token(cls.space, session, JsonConversation(), cookies)
  95. headers = {
  96. "x-zerogpu-token": api_key,
  97. "x-zerogpu-uuid": zerogpu_uuid,
  98. }
  99. headers = {k: v for k, v in headers.items() if v is not None}
  100. async def generate():
  101. async with session.post(cls.url_flux, json=payload, proxy=proxy, headers=headers) as response:
  102. await raise_for_status(response)
  103. response_data = await response.json()
  104. image_url = response_data["data"][0]['url']
  105. return ImageResponse(image_url, alt=prompt)
  106. background_tasks = set()
  107. started = time.time()
  108. task = asyncio.create_task(generate())
  109. background_tasks.add(task)
  110. task.add_done_callback(background_tasks.discard)
  111. while background_tasks:
  112. yield Reasoning(status=f"Generating {time.time() - started:.2f}s")
  113. await asyncio.sleep(0.2)
  114. yield await task
  115. yield Reasoning(status=f"Finished {time.time() - started:.2f}s")