DeepInfraChat.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. from __future__ import annotations
  2. from ..typing import AsyncResult, Messages, ImagesType
  3. from .template import OpenaiTemplate
  4. from ..image import to_data_uri
  5. class DeepInfraChat(OpenaiTemplate):
  6. url = "https://deepinfra.com/chat"
  7. api_base = "https://api.deepinfra.com/v1/openai"
  8. working = True
  9. default_model = 'deepseek-ai/DeepSeek-V3'
  10. default_vision_model = 'meta-llama/Llama-3.2-90B-Vision-Instruct'
  11. vision_models = [default_vision_model, 'openbmb/MiniCPM-Llama3-V-2_5']
  12. models = [
  13. 'meta-llama/Meta-Llama-3.1-8B-Instruct',
  14. 'meta-llama/Llama-3.3-70B-Instruct-Turbo',
  15. 'meta-llama/Llama-3.3-70B-Instruct',
  16. default_model,
  17. 'mistralai/Mistral-Small-24B-Instruct-2501',
  18. 'deepseek-ai/DeepSeek-R1',
  19. 'deepseek-ai/DeepSeek-R1-Turbo',
  20. 'deepseek-ai/DeepSeek-R1-Distill-Llama-70B',
  21. 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
  22. 'microsoft/phi-4',
  23. 'microsoft/WizardLM-2-8x22B',
  24. 'Qwen/Qwen2.5-72B-Instruct',
  25. '01-ai/Yi-34B-Chat',
  26. 'Qwen/Qwen2-72B-Instruct',
  27. 'cognitivecomputations/dolphin-2.6-mixtral-8x7b',
  28. 'cognitivecomputations/dolphin-2.9.1-llama-3-70b',
  29. 'databricks/dbrx-instruct',
  30. 'deepinfra/airoboros-70b',
  31. 'lizpreciatior/lzlv_70b_fp16_hf',
  32. 'microsoft/WizardLM-2-7B',
  33. 'mistralai/Mixtral-8x22B-Instruct-v0.1',
  34. ] + vision_models
  35. model_aliases = {
  36. "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
  37. "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct",
  38. "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
  39. "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct",
  40. "deepseek-v3": default_model,
  41. "mixtral-small-28b": "mistralai/Mistral-Small-24B-Instruct-2501",
  42. "deepseek-r1": "deepseek-ai/DeepSeek-R1-Turbo",
  43. "deepseek-r1": "deepseek-ai/DeepSeek-R1",
  44. "deepseek-r1-distill-llama": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
  45. "deepseek-r1-distill-qwen": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
  46. "phi-4": "microsoft/phi-4",
  47. "wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
  48. "yi-34b": "01-ai/Yi-34B-Chat",
  49. "qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
  50. "dolphin-2.6": "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
  51. "dolphin-2.9": "cognitivecomputations/dolphin-2.9.1-llama-3-70b",
  52. "dbrx-instruct": "databricks/dbrx-instruct",
  53. "airoboros-70b": "deepinfra/airoboros-70b",
  54. "lzlv-70b": "lizpreciatior/lzlv_70b_fp16_hf",
  55. "wizardlm-2-7b": "microsoft/WizardLM-2-7B",
  56. "mixtral-8x22b": "mistralai/Mixtral-8x22B-Instruct-v0.1",
  57. "minicpm-2.5": "openbmb/MiniCPM-Llama3-V-2_5",
  58. }
  59. @classmethod
  60. async def create_async_generator(
  61. cls,
  62. model: str,
  63. messages: Messages,
  64. stream: bool = True,
  65. top_p: float = 0.9,
  66. temperature: float = 0.7,
  67. max_tokens: int = None,
  68. headers: dict = {},
  69. images: ImagesType = None,
  70. **kwargs
  71. ) -> AsyncResult:
  72. headers = {
  73. 'Accept-Language': 'en-US,en;q=0.9',
  74. 'Origin': 'https://deepinfra.com',
  75. 'Referer': 'https://deepinfra.com/',
  76. 'X-Deepinfra-Source': 'web-page',
  77. 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
  78. **headers
  79. }
  80. if images is not None:
  81. if not model or model not in cls.models:
  82. model = cls.default_vision_model
  83. if messages:
  84. last_message = messages[-1].copy()
  85. last_message["content"] = [
  86. *[{
  87. "type": "image_url",
  88. "image_url": {"url": to_data_uri(image)}
  89. } for image, _ in images],
  90. {
  91. "type": "text",
  92. "text": last_message["content"]
  93. }
  94. ]
  95. messages[-1] = last_message
  96. async for chunk in super().create_async_generator(
  97. model,
  98. messages,
  99. headers=headers,
  100. stream=stream,
  101. top_p=top_p,
  102. temperature=temperature,
  103. max_tokens=max_tokens,
  104. **kwargs
  105. ):
  106. yield chunk