GlhfChat.py 1.6 KB

12345678910111213141516171819202122232425262728293031
  1. from __future__ import annotations
  2. from .OpenaiAPI import OpenaiAPI
  3. class GlhfChat(OpenaiAPI):
  4. label = "GlhfChat"
  5. url = "https://glhf.chat"
  6. login_url = "https://glhf.chat/users/settings/api"
  7. api_base = "https://glhf.chat/api/openai/v1"
  8. working = True
  9. model_aliases = {
  10. 'Qwen2.5-Coder-32B-Instruct': 'hf:Qwen/Qwen2.5-Coder-32B-Instruct',
  11. 'Llama-3.1-405B-Instruct': 'hf:meta-llama/Llama-3.1-405B-Instruct',
  12. 'Llama-3.1-70B-Instruct': 'hf:meta-llama/Llama-3.1-70B-Instruct',
  13. 'Llama-3.1-8B-Instruct': 'hf:meta-llama/Llama-3.1-8B-Instruct',
  14. 'Llama-3.2-3B-Instruct': 'hf:meta-llama/Llama-3.2-3B-Instruct',
  15. 'Llama-3.2-11B-Vision-Instruct': 'hf:meta-llama/Llama-3.2-11B-Vision-Instruct',
  16. 'Llama-3.2-90B-Vision-Instruct': 'hf:meta-llama/Llama-3.2-90B-Vision-Instruct',
  17. 'Qwen2.5-72B-Instruct': 'hf:Qwen/Qwen2.5-72B-Instruct',
  18. 'Llama-3.3-70B-Instruct': 'hf:meta-llama/Llama-3.3-70B-Instruct',
  19. 'gemma-2-9b-it': 'hf:google/gemma-2-9b-it',
  20. 'gemma-2-27b-it': 'hf:google/gemma-2-27b-it',
  21. 'Mistral-7B-Instruct-v0.3': 'hf:mistralai/Mistral-7B-Instruct-v0.3',
  22. 'Mixtral-8x7B-Instruct-v0.1': 'hf:mistralai/Mixtral-8x7B-Instruct-v0.1',
  23. 'Mixtral-8x22B-Instruct-v0.1': 'hf:mistralai/Mixtral-8x22B-Instruct-v0.1',
  24. 'Nous-Hermes-2-Mixtral-8x7B-DPO': 'hf:NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO',
  25. 'Qwen2.5-7B-Instruct': 'hf:Qwen/Qwen2.5-7B-Instruct',
  26. 'SOLAR-10.7B-Instruct-v1.0': 'hf:upstage/SOLAR-10.7B-Instruct-v1.0',
  27. 'Llama-3.1-Nemotron-70B-Instruct-HF': 'hf:nvidia/Llama-3.1-Nemotron-70B-Instruct-HF'
  28. }