DeepInfraChat.py 3.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. from __future__ import annotations
  2. from aiohttp import ClientSession, ClientResponseError
  3. import json
  4. from ..typing import AsyncResult, Messages, ImageType
  5. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  6. class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
  7. url = "https://deepinfra.com/chat"
  8. api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
  9. working = True
  10. supports_stream = True
  11. supports_system_message = True
  12. supports_message_history = True
  13. default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
  14. models = [
  15. 'meta-llama/Meta-Llama-3.1-8B-Instruct',
  16. default_model,
  17. 'Qwen/QwQ-32B-Preview',
  18. 'microsoft/WizardLM-2-8x22B',
  19. 'Qwen/Qwen2.5-72B-Instruct',
  20. 'Qwen/Qwen2.5-Coder-32B-Instruct',
  21. 'nvidia/Llama-3.1-Nemotron-70B-Instruct',
  22. ]
  23. model_aliases = {
  24. "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
  25. "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
  26. "qwq-32b": "Qwen/QwQ-32B-Preview",
  27. "wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
  28. "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
  29. "qwen-2.5-coder-32b": "Qwen2.5-Coder-32B-Instruct",
  30. "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct",
  31. }
  32. @classmethod
  33. async def create_async_generator(
  34. cls,
  35. model: str,
  36. messages: Messages,
  37. proxy: str = None,
  38. **kwargs
  39. ) -> AsyncResult:
  40. headers = {
  41. 'Accept-Language': 'en-US,en;q=0.9',
  42. 'Content-Type': 'application/json',
  43. 'Origin': 'https://deepinfra.com',
  44. 'Referer': 'https://deepinfra.com/',
  45. 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
  46. 'X-Deepinfra-Source': 'web-page',
  47. 'accept': 'text/event-stream',
  48. }
  49. data = {
  50. 'model': model,
  51. 'messages': messages,
  52. 'stream': True
  53. }
  54. async with ClientSession(headers=headers) as session:
  55. async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
  56. response.raise_for_status()
  57. async for line in response.content:
  58. if line:
  59. decoded_line = line.decode('utf-8').strip()
  60. if decoded_line.startswith('data:'):
  61. json_part = decoded_line[5:].strip()
  62. if json_part == '[DONE]':
  63. break
  64. try:
  65. data = json.loads(json_part)
  66. choices = data.get('choices', [])
  67. if choices:
  68. delta = choices[0].get('delta', {})
  69. content = delta.get('content', '')
  70. if content:
  71. yield content
  72. except json.JSONDecodeError:
  73. print(f"JSON decode error: {json_part}")