__init__.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. from __future__ import annotations
  2. import time
  3. import random
  4. import string
  5. import asyncio
  6. import aiohttp
  7. import base64
  8. from typing import Union, AsyncIterator, Iterator, Awaitable, Optional
  9. from ..image.copy_images import copy_media
  10. from ..typing import Messages, ImageType
  11. from ..providers.types import ProviderType, BaseRetryProvider
  12. from ..providers.response import *
  13. from ..errors import NoMediaResponseError
  14. from ..providers.retry_provider import IterListProvider
  15. from ..providers.asyncio import to_sync_generator
  16. from ..Provider.needs_auth import BingCreateImages, OpenaiAccount
  17. from ..tools.run_tools import async_iter_run_tools, iter_run_tools
  18. from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse, UsageModel, ToolCallModel
  19. from .models import ClientModels
  20. from .types import IterResponse, ImageProvider, Client as BaseClient
  21. from .service import get_model_and_provider, convert_to_provider
  22. from .helper import find_stop, filter_json, filter_none, safe_aclose
  23. from .. import debug
  24. ChatCompletionResponseType = Iterator[Union[ChatCompletion, ChatCompletionChunk, BaseConversation]]
  25. AsyncChatCompletionResponseType = AsyncIterator[Union[ChatCompletion, ChatCompletionChunk, BaseConversation]]
  26. try:
  27. anext # Python 3.8+
  28. except NameError:
  29. async def anext(aiter):
  30. try:
  31. return await aiter.__anext__()
  32. except StopAsyncIteration:
  33. raise StopIteration
  34. def add_chunk(content, chunk):
  35. if content == "" and isinstance(chunk, (MediaResponse, AudioResponse)):
  36. content = chunk
  37. else:
  38. content = str(content) + str(chunk)
  39. return content
  40. # Synchronous iter_response function
  41. def iter_response(
  42. response: Union[Iterator[Union[str, ResponseType]]],
  43. stream: bool,
  44. response_format: Optional[dict] = None,
  45. max_tokens: Optional[int] = None,
  46. stop: Optional[list[str]] = None
  47. ) -> ChatCompletionResponseType:
  48. content = ""
  49. finish_reason = None
  50. tool_calls = None
  51. usage = None
  52. provider: ProviderInfo = None
  53. conversation: JsonConversation = None
  54. completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
  55. idx = 0
  56. if hasattr(response, '__aiter__'):
  57. response = to_sync_generator(response)
  58. for chunk in response:
  59. if isinstance(chunk, FinishReason):
  60. finish_reason = chunk.reason
  61. break
  62. elif isinstance(chunk, JsonConversation):
  63. conversation = chunk
  64. continue
  65. elif isinstance(chunk, ToolCalls):
  66. tool_calls = chunk.get_list()
  67. continue
  68. elif isinstance(chunk, Usage):
  69. usage = chunk
  70. continue
  71. elif isinstance(chunk, ProviderInfo):
  72. provider = chunk
  73. continue
  74. elif isinstance(chunk, BaseConversation):
  75. yield chunk
  76. continue
  77. elif isinstance(chunk, HiddenResponse):
  78. continue
  79. elif isinstance(chunk, Exception):
  80. continue
  81. content = add_chunk(content, chunk)
  82. if not content:
  83. continue
  84. idx += 1
  85. if max_tokens is not None and idx >= max_tokens:
  86. finish_reason = "length"
  87. first, content, chunk = find_stop(stop, content, chunk if stream else None)
  88. if first != -1:
  89. finish_reason = "stop"
  90. if stream:
  91. chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
  92. if provider is not None:
  93. chunk.provider = provider.name
  94. chunk.model = provider.model
  95. yield chunk
  96. if finish_reason is not None:
  97. break
  98. if usage is None:
  99. usage = UsageModel.model_construct(completion_tokens=idx, total_tokens=idx)
  100. else:
  101. usage = UsageModel.model_construct(**usage.get_dict())
  102. finish_reason = "stop" if finish_reason is None else finish_reason
  103. if stream:
  104. chat_completion = ChatCompletionChunk.model_construct(
  105. None, finish_reason, completion_id, int(time.time()), usage=usage
  106. )
  107. else:
  108. if response_format is not None and "type" in response_format:
  109. if response_format["type"] == "json_object":
  110. content = filter_json(content)
  111. chat_completion = ChatCompletion.model_construct(
  112. content, finish_reason, completion_id, int(time.time()), usage=usage,
  113. **filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {},
  114. conversation=None if conversation is None else conversation.get_dict()
  115. )
  116. if provider is not None:
  117. chat_completion.provider = provider.name
  118. chat_completion.model = provider.model
  119. yield chat_completion
  120. # Synchronous iter_append_model_and_provider function
  121. def iter_append_model_and_provider(response: ChatCompletionResponseType, last_model: str, last_provider: ProviderType) -> ChatCompletionResponseType:
  122. if isinstance(last_provider, BaseRetryProvider):
  123. yield from response
  124. return
  125. for chunk in response:
  126. if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
  127. if chunk.provider is None and last_provider is not None:
  128. chunk.model = getattr(last_provider, "last_model", last_model)
  129. chunk.provider = last_provider.__name__
  130. yield chunk
  131. async def async_iter_response(
  132. response: AsyncIterator[Union[str, ResponseType]],
  133. stream: bool,
  134. response_format: Optional[dict] = None,
  135. max_tokens: Optional[int] = None,
  136. stop: Optional[list[str]] = None
  137. ) -> AsyncChatCompletionResponseType:
  138. content = ""
  139. finish_reason = None
  140. completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
  141. idx = 0
  142. tool_calls = None
  143. usage = None
  144. provider: ProviderInfo = None
  145. conversation: JsonConversation = None
  146. try:
  147. async for chunk in response:
  148. if isinstance(chunk, FinishReason):
  149. finish_reason = chunk.reason
  150. break
  151. elif isinstance(chunk, JsonConversation):
  152. conversation = chunk
  153. continue
  154. elif isinstance(chunk, ToolCalls):
  155. tool_calls = chunk.get_list()
  156. continue
  157. elif isinstance(chunk, Usage):
  158. usage = chunk
  159. continue
  160. elif isinstance(chunk, ProviderInfo):
  161. provider = chunk
  162. continue
  163. elif isinstance(chunk, HiddenResponse):
  164. continue
  165. elif isinstance(chunk, Exception):
  166. continue
  167. content = add_chunk(content, chunk)
  168. if not content:
  169. continue
  170. idx += 1
  171. if max_tokens is not None and idx >= max_tokens:
  172. finish_reason = "length"
  173. first, content, chunk = find_stop(stop, content, chunk if stream else None)
  174. if first != -1:
  175. finish_reason = "stop"
  176. if stream:
  177. chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
  178. if provider is not None:
  179. chunk.provider = provider.name
  180. chunk.model = provider.model
  181. yield chunk
  182. if finish_reason is not None:
  183. break
  184. finish_reason = "stop" if finish_reason is None else finish_reason
  185. if usage is None:
  186. usage = UsageModel.model_construct(completion_tokens=idx, total_tokens=idx)
  187. else:
  188. usage = UsageModel.model_construct(**usage.get_dict())
  189. if stream:
  190. chat_completion = ChatCompletionChunk.model_construct(
  191. None, finish_reason, completion_id, int(time.time()), usage=usage
  192. )
  193. else:
  194. if response_format is not None and "type" in response_format:
  195. if response_format["type"] == "json_object":
  196. content = filter_json(content)
  197. chat_completion = ChatCompletion.model_construct(
  198. content, finish_reason, completion_id, int(time.time()), usage=usage,
  199. **filter_none(
  200. tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]
  201. ) if tool_calls is not None else {},
  202. conversation=None if conversation is None else conversation.get_dict()
  203. )
  204. if provider is not None:
  205. chat_completion.provider = provider.name
  206. chat_completion.model = provider.model
  207. yield chat_completion
  208. finally:
  209. await safe_aclose(response)
  210. async def async_iter_append_model_and_provider(
  211. response: AsyncChatCompletionResponseType,
  212. last_model: str,
  213. last_provider: ProviderType
  214. ) -> AsyncChatCompletionResponseType:
  215. try:
  216. if isinstance(last_provider, BaseRetryProvider):
  217. async for chunk in response:
  218. yield chunk
  219. return
  220. async for chunk in response:
  221. if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
  222. if chunk.provider is None and last_provider is not None:
  223. chunk.model = getattr(last_provider, "last_model", last_model)
  224. chunk.provider = last_provider.__name__
  225. yield chunk
  226. finally:
  227. await safe_aclose(response)
  228. class Client(BaseClient):
  229. def __init__(
  230. self,
  231. provider: Optional[ProviderType] = None,
  232. image_provider: Optional[ImageProvider] = None,
  233. **kwargs
  234. ) -> None:
  235. super().__init__(**kwargs)
  236. self.chat: Chat = Chat(self, provider)
  237. if image_provider is None:
  238. image_provider = provider
  239. self.models: ClientModels = ClientModels(self, provider, image_provider)
  240. self.images: Images = Images(self, image_provider)
  241. self.media: Images = self.images
  242. class Completions:
  243. def __init__(self, client: Client, provider: Optional[ProviderType] = None):
  244. self.client: Client = client
  245. self.provider: ProviderType = provider
  246. def create(
  247. self,
  248. messages: Messages,
  249. model: str = "",
  250. provider: Optional[ProviderType] = None,
  251. stream: Optional[bool] = False,
  252. proxy: Optional[str] = None,
  253. image: Optional[ImageType] = None,
  254. image_name: Optional[str] = None,
  255. response_format: Optional[dict] = None,
  256. max_tokens: Optional[int] = None,
  257. stop: Optional[Union[list[str], str]] = None,
  258. api_key: Optional[str] = None,
  259. ignore_working: Optional[bool] = False,
  260. ignore_stream: Optional[bool] = False,
  261. **kwargs
  262. ) -> ChatCompletion:
  263. if isinstance(messages, str):
  264. messages = [{"role": "user", "content": messages}]
  265. if image is not None:
  266. kwargs["media"] = [(image, image_name)]
  267. elif "images" in kwargs:
  268. kwargs["media"] = kwargs.pop("images")
  269. model, provider = get_model_and_provider(
  270. model,
  271. self.provider if provider is None else provider,
  272. stream,
  273. ignore_working,
  274. ignore_stream,
  275. has_images="media" in kwargs
  276. )
  277. stop = [stop] if isinstance(stop, str) else stop
  278. if ignore_stream:
  279. kwargs["ignore_stream"] = True
  280. response = iter_run_tools(
  281. provider.get_create_function(),
  282. model=model,
  283. messages=messages,
  284. stream=stream,
  285. **filter_none(
  286. proxy=self.client.proxy if proxy is None else proxy,
  287. max_tokens=max_tokens,
  288. stop=stop,
  289. api_key=self.client.api_key if api_key is None else api_key
  290. ),
  291. **kwargs
  292. )
  293. response = iter_response(response, stream, response_format, max_tokens, stop)
  294. response = iter_append_model_and_provider(response, model, provider)
  295. if stream:
  296. return response
  297. else:
  298. return next(response)
  299. def stream(
  300. self,
  301. messages: Messages,
  302. model: str = "",
  303. **kwargs
  304. ) -> IterResponse:
  305. return self.create(messages, model, stream=True, **kwargs)
  306. class Chat:
  307. completions: Completions
  308. def __init__(self, client: Client, provider: Optional[ProviderType] = None):
  309. self.completions = Completions(client, provider)
  310. class Images:
  311. def __init__(self, client: Client, provider: Optional[ProviderType] = None):
  312. self.client: Client = client
  313. self.provider: Optional[ProviderType] = provider
  314. def generate(
  315. self,
  316. prompt: str,
  317. model: str = None,
  318. provider: Optional[ProviderType] = None,
  319. response_format: Optional[str] = None,
  320. proxy: Optional[str] = None,
  321. **kwargs
  322. ) -> ImagesResponse:
  323. """
  324. Synchronous generate method that runs the async_generate method in an event loop.
  325. """
  326. return asyncio.run(self.async_generate(prompt, model, provider, response_format, proxy, **kwargs))
  327. async def get_provider_handler(self, model: Optional[str], provider: Optional[ImageProvider], default: ImageProvider) -> ImageProvider:
  328. if provider is None:
  329. provider_handler = self.provider
  330. if provider_handler is None:
  331. provider_handler = self.client.models.get(model, default)
  332. elif isinstance(provider, str):
  333. provider_handler = convert_to_provider(provider)
  334. else:
  335. provider_handler = provider
  336. if provider_handler is None:
  337. return default
  338. return provider_handler
  339. async def async_generate(
  340. self,
  341. prompt: str,
  342. model: Optional[str] = None,
  343. provider: Optional[ProviderType] = None,
  344. response_format: Optional[str] = None,
  345. proxy: Optional[str] = None,
  346. api_key: Optional[str] = None,
  347. **kwargs
  348. ) -> ImagesResponse:
  349. provider_handler = await self.get_provider_handler(model, provider, BingCreateImages)
  350. provider_name = provider_handler.__name__ if hasattr(provider_handler, "__name__") else type(provider_handler).__name__
  351. if proxy is None:
  352. proxy = self.client.proxy
  353. if api_key is None:
  354. api_key = self.client.api_key
  355. error = None
  356. response = None
  357. if isinstance(provider_handler, IterListProvider):
  358. for provider in provider_handler.providers:
  359. try:
  360. response = await self._generate_image_response(provider, provider.__name__, model, prompt, proxy=proxy, **kwargs)
  361. if response is not None:
  362. provider_name = provider.__name__
  363. break
  364. except Exception as e:
  365. error = e
  366. debug.error(f"{provider.__name__} {type(e).__name__}: {e}")
  367. else:
  368. response = await self._generate_image_response(provider_handler, provider_name, model, prompt, proxy=proxy, api_key=api_key, **kwargs)
  369. if isinstance(response, MediaResponse):
  370. return await self._process_image_response(
  371. response,
  372. model,
  373. provider_name,
  374. response_format,
  375. proxy
  376. )
  377. if response is None:
  378. if error is not None:
  379. raise error
  380. raise NoMediaResponseError(f"No image response from {provider_name}")
  381. raise NoMediaResponseError(f"Unexpected response type: {type(response)}")
  382. async def _generate_image_response(
  383. self,
  384. provider_handler,
  385. provider_name,
  386. model: str,
  387. prompt: str,
  388. prompt_prefix: str = "Generate a image: ",
  389. **kwargs
  390. ) -> MediaResponse:
  391. messages = [{"role": "user", "content": f"{prompt_prefix}{prompt}"}]
  392. items: list[MediaResponse] = []
  393. if hasattr(provider_handler, "create_async_generator"):
  394. async for item in provider_handler.create_async_generator(
  395. model,
  396. messages,
  397. stream=True,
  398. prompt=prompt,
  399. **kwargs
  400. ):
  401. if isinstance(item, MediaResponse):
  402. items.append(item)
  403. elif hasattr(provider_handler, "create_completion"):
  404. for item in provider_handler.create_completion(
  405. model,
  406. messages,
  407. True,
  408. prompt=prompt,
  409. **kwargs
  410. ):
  411. if isinstance(item, MediaResponse):
  412. items.append(item)
  413. else:
  414. raise ValueError(f"Provider {provider_name} does not support image generation")
  415. urls = []
  416. for item in items:
  417. if isinstance(item.urls, str):
  418. urls.append(item.urls)
  419. elif isinstance(item.urls, list):
  420. urls.extend(item.urls)
  421. if not urls:
  422. return None
  423. return MediaResponse(urls, items[0].alt, items[0].options)
  424. def create_variation(
  425. self,
  426. image: ImageType,
  427. model: str = None,
  428. provider: Optional[ProviderType] = None,
  429. response_format: Optional[str] = None,
  430. **kwargs
  431. ) -> ImagesResponse:
  432. return asyncio.run(self.async_create_variation(
  433. image, model, provider, response_format, **kwargs
  434. ))
  435. async def async_create_variation(
  436. self,
  437. image: ImageType,
  438. model: Optional[str] = None,
  439. provider: Optional[ProviderType] = None,
  440. response_format: Optional[str] = None,
  441. proxy: Optional[str] = None,
  442. **kwargs
  443. ) -> ImagesResponse:
  444. provider_handler = await self.get_provider_handler(model, provider, OpenaiAccount)
  445. provider_name = provider_handler.__name__ if hasattr(provider_handler, "__name__") else type(provider_handler).__name__
  446. if proxy is None:
  447. proxy = self.client.proxy
  448. prompt = "create a variation of this image"
  449. if image is not None:
  450. kwargs["media"] = [(image, None)]
  451. error = None
  452. response = None
  453. if isinstance(provider_handler, IterListProvider):
  454. for provider in provider_handler.providers:
  455. try:
  456. response = await self._generate_image_response(provider, provider.__name__, model, prompt, **kwargs)
  457. if response is not None:
  458. provider_name = provider.__name__
  459. break
  460. except Exception as e:
  461. error = e
  462. debug.error(f"{provider.__name__} {type(e).__name__}: {e}")
  463. else:
  464. response = await self._generate_image_response(provider_handler, provider_name, model, prompt, **kwargs)
  465. if isinstance(response, MediaResponse):
  466. return await self._process_image_response(response, model, provider_name, response_format, proxy)
  467. if response is None:
  468. if error is not None:
  469. raise error
  470. raise NoMediaResponseError(f"No image response from {provider_name}")
  471. raise NoMediaResponseError(f"Unexpected response type: {type(response)}")
  472. async def _process_image_response(
  473. self,
  474. response: MediaResponse,
  475. model: str,
  476. provider: str,
  477. response_format: Optional[str] = None,
  478. proxy: str = None
  479. ) -> ImagesResponse:
  480. if response_format == "url":
  481. # Return original URLs without saving locally
  482. images = [Image.model_construct(url=image, revised_prompt=response.alt) for image in response.get_list()]
  483. elif response_format == "b64_json":
  484. # Convert URLs directly to base64 without saving
  485. async def get_b64_from_url(url: str) -> Image:
  486. async with aiohttp.ClientSession(cookies=response.get("cookies")) as session:
  487. async with session.get(url, proxy=proxy) as resp:
  488. if resp.status == 200:
  489. image_data = await resp.read()
  490. b64_data = base64.b64encode(image_data).decode()
  491. return Image.model_construct(b64_json=b64_data, revised_prompt=response.alt)
  492. images = await asyncio.gather(*[get_b64_from_url(image) for image in response.get_list()])
  493. else:
  494. # Save locally for None (default) case
  495. images = await copy_media(response.get_list(), response.get("cookies"), response.get("headers"), proxy, response.alt)
  496. images = [Image.model_construct(url=image, revised_prompt=response.alt) for image in images]
  497. return ImagesResponse.model_construct(
  498. created=int(time.time()),
  499. data=images,
  500. model=model,
  501. provider=provider
  502. )
  503. class AsyncClient(BaseClient):
  504. def __init__(
  505. self,
  506. provider: Optional[ProviderType] = None,
  507. image_provider: Optional[ImageProvider] = None,
  508. **kwargs
  509. ) -> None:
  510. super().__init__(**kwargs)
  511. self.chat: AsyncChat = AsyncChat(self, provider)
  512. if image_provider is None:
  513. image_provider = provider
  514. self.models: ClientModels = ClientModels(self, provider, image_provider)
  515. self.images: AsyncImages = AsyncImages(self, image_provider)
  516. self.media: AsyncImages = self.images
  517. class AsyncChat:
  518. completions: AsyncCompletions
  519. def __init__(self, client: AsyncClient, provider: Optional[ProviderType] = None):
  520. self.completions = AsyncCompletions(client, provider)
  521. class AsyncCompletions:
  522. def __init__(self, client: AsyncClient, provider: Optional[ProviderType] = None):
  523. self.client: AsyncClient = client
  524. self.provider: ProviderType = provider
  525. def create(
  526. self,
  527. messages: Messages,
  528. model: str = "",
  529. provider: Optional[ProviderType] = None,
  530. stream: Optional[bool] = False,
  531. proxy: Optional[str] = None,
  532. image: Optional[ImageType] = None,
  533. image_name: Optional[str] = None,
  534. response_format: Optional[dict] = None,
  535. max_tokens: Optional[int] = None,
  536. stop: Optional[Union[list[str], str]] = None,
  537. api_key: Optional[str] = None,
  538. ignore_working: Optional[bool] = False,
  539. ignore_stream: Optional[bool] = False,
  540. **kwargs
  541. ) -> Awaitable[ChatCompletion]:
  542. if isinstance(messages, str):
  543. messages = [{"role": "user", "content": messages}]
  544. if image is not None:
  545. kwargs["media"] = [(image, image_name)]
  546. elif "images" in kwargs:
  547. kwargs["media"] = kwargs.pop("images")
  548. model, provider = get_model_and_provider(
  549. model,
  550. self.provider if provider is None else provider,
  551. stream,
  552. ignore_working,
  553. ignore_stream,
  554. has_images="media" in kwargs,
  555. )
  556. stop = [stop] if isinstance(stop, str) else stop
  557. if ignore_stream:
  558. kwargs["ignore_stream"] = True
  559. response = async_iter_run_tools(
  560. provider,
  561. model=model,
  562. messages=messages,
  563. stream=stream,
  564. **filter_none(
  565. proxy=self.client.proxy if proxy is None else proxy,
  566. max_tokens=max_tokens,
  567. stop=stop,
  568. api_key=self.client.api_key if api_key is None else api_key
  569. ),
  570. **kwargs
  571. )
  572. response = async_iter_response(response, stream, response_format, max_tokens, stop)
  573. response = async_iter_append_model_and_provider(response, model, provider)
  574. if stream:
  575. return response
  576. else:
  577. return anext(response)
  578. def stream(
  579. self,
  580. messages: Messages,
  581. model: str = "",
  582. **kwargs
  583. ) -> AsyncIterator[ChatCompletionChunk]:
  584. return self.create(messages, model, stream=True, **kwargs)
  585. class AsyncImages(Images):
  586. def __init__(self, client: AsyncClient, provider: Optional[ProviderType] = None):
  587. self.client: AsyncClient = client
  588. self.provider: Optional[ProviderType] = provider
  589. async def generate(
  590. self,
  591. prompt: str,
  592. model: Optional[str] = None,
  593. provider: Optional[ProviderType] = None,
  594. response_format: Optional[str] = None,
  595. **kwargs
  596. ) -> ImagesResponse:
  597. return await self.async_generate(prompt, model, provider, response_format, **kwargs)
  598. async def create_variation(
  599. self,
  600. image: ImageType,
  601. model: str = None,
  602. provider: ProviderType = None,
  603. response_format: Optional[str] = None,
  604. **kwargs
  605. ) -> ImagesResponse:
  606. return await self.async_create_variation(
  607. image, model, provider, response_format, **kwargs
  608. )