ChatGLM.py 3.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. from __future__ import annotations
  2. import uuid
  3. import json
  4. from aiohttp import ClientSession
  5. from ..typing import AsyncResult, Messages
  6. from ..requests.raise_for_status import raise_for_status
  7. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  8. from ..providers.response import FinishReason
  9. class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin):
  10. url = "https://chatglm.cn"
  11. api_endpoint = "https://chatglm.cn/chatglm/mainchat-api/guest/stream"
  12. working = True
  13. supports_stream = True
  14. supports_system_message = False
  15. supports_message_history = False
  16. default_model = "glm-4"
  17. models = [default_model]
  18. @classmethod
  19. async def create_async_generator(
  20. cls,
  21. model: str,
  22. messages: Messages,
  23. proxy: str = None,
  24. **kwargs
  25. ) -> AsyncResult:
  26. device_id = str(uuid.uuid4()).replace('-', '')
  27. headers = {
  28. 'Accept-Language': 'en-US,en;q=0.9',
  29. 'App-Name': 'chatglm',
  30. 'Authorization': 'undefined',
  31. 'Content-Type': 'application/json',
  32. 'Origin': 'https://chatglm.cn',
  33. 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
  34. 'X-App-Platform': 'pc',
  35. 'X-App-Version': '0.0.1',
  36. 'X-Device-Id': device_id,
  37. 'Accept': 'text/event-stream'
  38. }
  39. async with ClientSession(headers=headers) as session:
  40. data = {
  41. "assistant_id": "65940acff94777010aa6b796",
  42. "conversation_id": "",
  43. "meta_data": {
  44. "if_plus_model": False,
  45. "is_test": False,
  46. "input_question_type": "xxxx",
  47. "channel": "",
  48. "draft_id": "",
  49. "quote_log_id": "",
  50. "platform": "pc"
  51. },
  52. "messages": [
  53. {
  54. "role": message["role"],
  55. "content": [
  56. {
  57. "type": "text",
  58. "text": message["content"]
  59. }
  60. ]
  61. }
  62. for message in messages
  63. ]
  64. }
  65. yield_text = 0
  66. async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
  67. await raise_for_status(response)
  68. async for chunk in response.content:
  69. if chunk:
  70. decoded_chunk = chunk.decode('utf-8')
  71. if decoded_chunk.startswith('data: '):
  72. try:
  73. json_data = json.loads(decoded_chunk[6:])
  74. parts = json_data.get('parts', [])
  75. if parts:
  76. content = parts[0].get('content', [])
  77. if content:
  78. text_content = content[0].get('text', '')
  79. text = text_content[yield_text:]
  80. if text:
  81. yield text
  82. yield_text += len(text)
  83. # Yield FinishReason when status is 'finish'
  84. if json_data.get('status') == 'finish':
  85. yield FinishReason("stop")
  86. except json.JSONDecodeError:
  87. pass