retry_provider.py 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. from __future__ import annotations
  2. import random
  3. from ..typing import Type, List, CreateResult, Messages, AsyncResult
  4. from .types import BaseProvider, BaseRetryProvider, ProviderType
  5. from .response import ImageResponse, ProviderInfo
  6. from .. import debug
  7. from ..errors import RetryProviderError, RetryNoProviderError
  8. class IterListProvider(BaseRetryProvider):
  9. def __init__(
  10. self,
  11. providers: List[Type[BaseProvider]],
  12. shuffle: bool = True
  13. ) -> None:
  14. """
  15. Initialize the BaseRetryProvider.
  16. Args:
  17. providers (List[Type[BaseProvider]]): List of providers to use.
  18. shuffle (bool): Whether to shuffle the providers list.
  19. single_provider_retry (bool): Whether to retry a single provider if it fails.
  20. max_retries (int): Maximum number of retries for a single provider.
  21. """
  22. self.providers = providers
  23. self.shuffle = shuffle
  24. self.working = True
  25. self.last_provider: Type[BaseProvider] = None
  26. def create_completion(
  27. self,
  28. model: str,
  29. messages: Messages,
  30. stream: bool = False,
  31. ignore_stream: bool = False,
  32. ignored: list[str] = [],
  33. **kwargs,
  34. ) -> CreateResult:
  35. """
  36. Create a completion using available providers, with an option to stream the response.
  37. Args:
  38. model (str): The model to be used for completion.
  39. messages (Messages): The messages to be used for generating completion.
  40. stream (bool, optional): Flag to indicate if the response should be streamed. Defaults to False.
  41. Yields:
  42. CreateResult: Tokens or results from the completion.
  43. Raises:
  44. Exception: Any exception encountered during the completion process.
  45. """
  46. exceptions = {}
  47. started: bool = False
  48. for provider in self.get_providers(stream and not ignore_stream, ignored):
  49. self.last_provider = provider
  50. debug.log(f"Using {provider.__name__} provider")
  51. yield ProviderInfo(**provider.get_dict(), model=model if model else getattr(provider, "default_model"))
  52. try:
  53. response = provider.get_create_function()(model, messages, stream=stream, **kwargs)
  54. for chunk in response:
  55. if chunk:
  56. yield chunk
  57. if isinstance(chunk, str) or isinstance(chunk, ImageResponse):
  58. started = True
  59. if started:
  60. return
  61. except Exception as e:
  62. exceptions[provider.__name__] = e
  63. debug.log(f"{provider.__name__}: {e.__class__.__name__}: {e}")
  64. if started:
  65. raise e
  66. yield e
  67. raise_exceptions(exceptions)
  68. async def create_async_generator(
  69. self,
  70. model: str,
  71. messages: Messages,
  72. stream: bool = True,
  73. ignore_stream: bool = False,
  74. ignored: list[str] = [],
  75. **kwargs
  76. ) -> AsyncResult:
  77. exceptions = {}
  78. started: bool = False
  79. for provider in self.get_providers(stream and not ignore_stream, ignored):
  80. self.last_provider = provider
  81. debug.log(f"Using {provider.__name__} provider")
  82. yield ProviderInfo(**provider.get_dict())
  83. try:
  84. response = provider.get_async_create_function()(model, messages, stream=stream, **kwargs)
  85. if hasattr(response, "__aiter__"):
  86. async for chunk in response:
  87. if chunk:
  88. yield chunk
  89. if isinstance(chunk, str) or isinstance(chunk, ImageResponse):
  90. started = True
  91. elif response:
  92. response = await response
  93. if response:
  94. yield response
  95. started = True
  96. if started:
  97. return
  98. except Exception as e:
  99. exceptions[provider.__name__] = e
  100. debug.log(f"{provider.__name__}: {e.__class__.__name__}: {e}")
  101. if started:
  102. raise e
  103. yield e
  104. raise_exceptions(exceptions)
  105. def get_create_function(self) -> callable:
  106. return self.create_completion
  107. def get_async_create_function(self) -> callable:
  108. return self.create_async_generator
  109. def get_providers(self, stream: bool, ignored: list[str]) -> list[ProviderType]:
  110. providers = [p for p in self.providers if (p.supports_stream or not stream) and p.__name__ not in ignored]
  111. if self.shuffle:
  112. random.shuffle(providers)
  113. return providers
  114. class RetryProvider(IterListProvider):
  115. def __init__(
  116. self,
  117. providers: List[Type[BaseProvider]],
  118. shuffle: bool = True,
  119. single_provider_retry: bool = False,
  120. max_retries: int = 3,
  121. ) -> None:
  122. """
  123. Initialize the BaseRetryProvider.
  124. Args:
  125. providers (List[Type[BaseProvider]]): List of providers to use.
  126. shuffle (bool): Whether to shuffle the providers list.
  127. single_provider_retry (bool): Whether to retry a single provider if it fails.
  128. max_retries (int): Maximum number of retries for a single provider.
  129. """
  130. super().__init__(providers, shuffle)
  131. self.single_provider_retry = single_provider_retry
  132. self.max_retries = max_retries
  133. def create_completion(
  134. self,
  135. model: str,
  136. messages: Messages,
  137. stream: bool = False,
  138. **kwargs,
  139. ) -> CreateResult:
  140. """
  141. Create a completion using available providers, with an option to stream the response.
  142. Args:
  143. model (str): The model to be used for completion.
  144. messages (Messages): The messages to be used for generating completion.
  145. stream (bool, optional): Flag to indicate if the response should be streamed. Defaults to False.
  146. Yields:
  147. CreateResult: Tokens or results from the completion.
  148. Raises:
  149. Exception: Any exception encountered during the completion process.
  150. """
  151. if self.single_provider_retry:
  152. exceptions = {}
  153. started: bool = False
  154. provider = self.providers[0]
  155. self.last_provider = provider
  156. for attempt in range(self.max_retries):
  157. try:
  158. if debug.logging:
  159. print(f"Using {provider.__name__} provider (attempt {attempt + 1})")
  160. response = provider.get_create_function()(model, messages, stream=stream, **kwargs)
  161. for chunk in response:
  162. if isinstance(chunk, str) or isinstance(chunk, ImageResponse):
  163. yield chunk
  164. started = True
  165. if started:
  166. return
  167. except Exception as e:
  168. exceptions[provider.__name__] = e
  169. if debug.logging:
  170. print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
  171. if started:
  172. raise e
  173. raise_exceptions(exceptions)
  174. else:
  175. yield from super().create_completion(model, messages, stream, **kwargs)
  176. async def create_async_generator(
  177. self,
  178. model: str,
  179. messages: Messages,
  180. stream: bool = True,
  181. **kwargs
  182. ) -> AsyncResult:
  183. exceptions = {}
  184. started = False
  185. if self.single_provider_retry:
  186. provider = self.providers[0]
  187. self.last_provider = provider
  188. for attempt in range(self.max_retries):
  189. try:
  190. debug.log(f"Using {provider.__name__} provider (attempt {attempt + 1})")
  191. response = provider.get_async_create_function()(model, messages, stream=stream, **kwargs)
  192. if hasattr(response, "__aiter__"):
  193. async for chunk in response:
  194. if isinstance(chunk, str) or isinstance(chunk, ImageResponse):
  195. yield chunk
  196. started = True
  197. else:
  198. response = await response
  199. if response:
  200. yield response
  201. started = True
  202. if started:
  203. return
  204. except Exception as e:
  205. exceptions[provider.__name__] = e
  206. if debug.logging:
  207. print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
  208. raise_exceptions(exceptions)
  209. else:
  210. async for chunk in super().create_async_generator(model, messages, stream, **kwargs):
  211. yield chunk
  212. def raise_exceptions(exceptions: dict) -> None:
  213. """
  214. Raise a combined exception if any occurred during retries.
  215. Raises:
  216. RetryProviderError: If any provider encountered an exception.
  217. RetryNoProviderError: If no provider is found.
  218. """
  219. if exceptions:
  220. raise RetryProviderError("RetryProvider failed:\n" + "\n".join([
  221. f"{p}: {type(exception).__name__}: {exception}" for p, exception in exceptions.items()
  222. ]))
  223. raise RetryNoProviderError("No provider found")