OIVSCode.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. from __future__ import annotations
  2. import json
  3. from aiohttp import ClientSession
  4. from ..image import to_data_uri
  5. from ..typing import AsyncResult, Messages, ImagesType
  6. from ..requests.raise_for_status import raise_for_status
  7. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  8. from .helper import format_prompt
  9. from ..providers.response import FinishReason
  10. class OIVSCode(AsyncGeneratorProvider, ProviderModelMixin):
  11. label = "OI VSCode Server"
  12. url = "https://oi-vscode-server.onrender.com"
  13. api_endpoint = "https://oi-vscode-server.onrender.com/v1/chat/completions"
  14. working = True
  15. supports_stream = True
  16. supports_system_message = True
  17. supports_message_history = True
  18. default_model = "gpt-4o-mini-2024-07-18"
  19. default_vision_model = default_model
  20. vision_models = [default_model, "gpt-4o-mini"]
  21. models = vision_models
  22. model_aliases = {"gpt-4o-mini": "gpt-4o-mini-2024-07-18"}
  23. @classmethod
  24. async def create_async_generator(
  25. cls,
  26. model: str,
  27. messages: Messages,
  28. stream: bool = False,
  29. images: ImagesType = None,
  30. proxy: str = None,
  31. **kwargs
  32. ) -> AsyncResult:
  33. headers = {
  34. "accept": "*/*",
  35. "accept-language": "en-US,en;q=0.9",
  36. "content-type": "application/json",
  37. "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
  38. }
  39. async with ClientSession(headers=headers) as session:
  40. if images is not None:
  41. messages[-1]['content'] = [
  42. {
  43. "type": "text",
  44. "text": messages[-1]['content']
  45. },
  46. *[
  47. {
  48. "type": "image_url",
  49. "image_url": {
  50. "url": to_data_uri(image)
  51. }
  52. }
  53. for image, _ in images
  54. ]
  55. ]
  56. data = {
  57. "model": model,
  58. "stream": stream,
  59. "messages": messages
  60. }
  61. async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
  62. await raise_for_status(response)
  63. full_response = ""
  64. if stream:
  65. async for line in response.content:
  66. if line:
  67. line = line.decode()
  68. if line.startswith("data: "):
  69. if line.strip() == "data: [DONE]":
  70. break
  71. try:
  72. data = json.loads(line[6:])
  73. if content := data["choices"][0]["delta"].get("content"):
  74. yield content
  75. full_response += content
  76. except:
  77. continue
  78. reason = "length" if len(full_response) > 0 else "stop"
  79. yield FinishReason(reason)
  80. else:
  81. response_data = await response.json()
  82. full_response = response_data["choices"][0]["message"]["content"]
  83. yield full_response
  84. reason = "length" if len(full_response) > 0 else "stop"
  85. yield FinishReason(reason)