Vercel.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. from __future__ import annotations
  2. import json, base64, requests, execjs, random, uuid
  3. from ..typing import Messages, TypedDict, CreateResult, Any
  4. from .base_provider import AbstractProvider
  5. from ..debug import logging
  6. class Vercel(AbstractProvider):
  7. url = 'https://sdk.vercel.ai'
  8. working = False
  9. supports_message_history = True
  10. supports_gpt_35_turbo = True
  11. supports_stream = True
  12. @staticmethod
  13. def create_completion(
  14. model: str,
  15. messages: Messages,
  16. stream: bool,
  17. proxy: str = None,
  18. **kwargs
  19. ) -> CreateResult:
  20. if not model:
  21. model = "gpt-3.5-turbo"
  22. elif model not in model_info:
  23. raise ValueError(f"Vercel does not support {model}")
  24. headers = {
  25. 'authority': 'sdk.vercel.ai',
  26. 'accept': '*/*',
  27. 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
  28. 'cache-control': 'no-cache',
  29. 'content-type': 'application/json',
  30. 'custom-encoding': get_anti_bot_token(),
  31. 'origin': 'https://sdk.vercel.ai',
  32. 'pragma': 'no-cache',
  33. 'referer': 'https://sdk.vercel.ai/',
  34. 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
  35. 'sec-ch-ua-mobile': '?0',
  36. 'sec-ch-ua-platform': '"macOS"',
  37. 'sec-fetch-dest': 'empty',
  38. 'sec-fetch-mode': 'cors',
  39. 'sec-fetch-site': 'same-origin',
  40. 'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
  41. }
  42. json_data = {
  43. 'model' : model_info[model]['id'],
  44. 'messages' : messages,
  45. 'playgroundId': str(uuid.uuid4()),
  46. 'chatIndex' : 0,
  47. **model_info[model]['default_params'],
  48. **kwargs
  49. }
  50. max_retries = kwargs.get('max_retries', 20)
  51. for _ in range(max_retries):
  52. response = requests.post('https://sdk.vercel.ai/api/generate',
  53. headers=headers, json=json_data, stream=True, proxies={"https": proxy})
  54. try:
  55. response.raise_for_status()
  56. except:
  57. continue
  58. for token in response.iter_content(chunk_size=None):
  59. yield token.decode()
  60. break
  61. def get_anti_bot_token() -> str:
  62. headers = {
  63. 'authority': 'sdk.vercel.ai',
  64. 'accept': '*/*',
  65. 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
  66. 'cache-control': 'no-cache',
  67. 'pragma': 'no-cache',
  68. 'referer': 'https://sdk.vercel.ai/',
  69. 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
  70. 'sec-ch-ua-mobile': '?0',
  71. 'sec-ch-ua-platform': '"macOS"',
  72. 'sec-fetch-dest': 'empty',
  73. 'sec-fetch-mode': 'cors',
  74. 'sec-fetch-site': 'same-origin',
  75. 'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
  76. }
  77. response = requests.get('https://sdk.vercel.ai/openai.jpeg',
  78. headers=headers).text
  79. raw_data = json.loads(base64.b64decode(response,
  80. validate=True))
  81. js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
  82. return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
  83. raw_token = json.dumps({'r': execjs.compile(js_script).call(''), 't': raw_data['t']},
  84. separators = (",", ":"))
  85. return base64.b64encode(raw_token.encode('utf-16le')).decode()
  86. class ModelInfo(TypedDict):
  87. id: str
  88. default_params: dict[str, Any]
  89. model_info: dict[str, ModelInfo] = {
  90. # 'claude-instant-v1': {
  91. # 'id': 'anthropic:claude-instant-v1',
  92. # 'default_params': {
  93. # 'temperature': 1,
  94. # 'maximumLength': 1024,
  95. # 'topP': 1,
  96. # 'topK': 1,
  97. # 'presencePenalty': 1,
  98. # 'frequencyPenalty': 1,
  99. # 'stopSequences': ['\n\nHuman:'],
  100. # },
  101. # },
  102. # 'claude-v1': {
  103. # 'id': 'anthropic:claude-v1',
  104. # 'default_params': {
  105. # 'temperature': 1,
  106. # 'maximumLength': 1024,
  107. # 'topP': 1,
  108. # 'topK': 1,
  109. # 'presencePenalty': 1,
  110. # 'frequencyPenalty': 1,
  111. # 'stopSequences': ['\n\nHuman:'],
  112. # },
  113. # },
  114. # 'claude-v2': {
  115. # 'id': 'anthropic:claude-v2',
  116. # 'default_params': {
  117. # 'temperature': 1,
  118. # 'maximumLength': 1024,
  119. # 'topP': 1,
  120. # 'topK': 1,
  121. # 'presencePenalty': 1,
  122. # 'frequencyPenalty': 1,
  123. # 'stopSequences': ['\n\nHuman:'],
  124. # },
  125. # },
  126. 'replicate/llama70b-v2-chat': {
  127. 'id': 'replicate:replicate/llama-2-70b-chat',
  128. 'default_params': {
  129. 'temperature': 0.75,
  130. 'maximumLength': 3000,
  131. 'topP': 1,
  132. 'repetitionPenalty': 1,
  133. },
  134. },
  135. 'a16z-infra/llama7b-v2-chat': {
  136. 'id': 'replicate:a16z-infra/llama7b-v2-chat',
  137. 'default_params': {
  138. 'temperature': 0.75,
  139. 'maximumLength': 3000,
  140. 'topP': 1,
  141. 'repetitionPenalty': 1,
  142. },
  143. },
  144. 'a16z-infra/llama13b-v2-chat': {
  145. 'id': 'replicate:a16z-infra/llama13b-v2-chat',
  146. 'default_params': {
  147. 'temperature': 0.75,
  148. 'maximumLength': 3000,
  149. 'topP': 1,
  150. 'repetitionPenalty': 1,
  151. },
  152. },
  153. 'replicate/llama-2-70b-chat': {
  154. 'id': 'replicate:replicate/llama-2-70b-chat',
  155. 'default_params': {
  156. 'temperature': 0.75,
  157. 'maximumLength': 3000,
  158. 'topP': 1,
  159. 'repetitionPenalty': 1,
  160. },
  161. },
  162. 'bigscience/bloom': {
  163. 'id': 'huggingface:bigscience/bloom',
  164. 'default_params': {
  165. 'temperature': 0.5,
  166. 'maximumLength': 1024,
  167. 'topP': 0.95,
  168. 'topK': 4,
  169. 'repetitionPenalty': 1.03,
  170. },
  171. },
  172. 'google/flan-t5-xxl': {
  173. 'id': 'huggingface:google/flan-t5-xxl',
  174. 'default_params': {
  175. 'temperature': 0.5,
  176. 'maximumLength': 1024,
  177. 'topP': 0.95,
  178. 'topK': 4,
  179. 'repetitionPenalty': 1.03,
  180. },
  181. },
  182. 'EleutherAI/gpt-neox-20b': {
  183. 'id': 'huggingface:EleutherAI/gpt-neox-20b',
  184. 'default_params': {
  185. 'temperature': 0.5,
  186. 'maximumLength': 1024,
  187. 'topP': 0.95,
  188. 'topK': 4,
  189. 'repetitionPenalty': 1.03,
  190. 'stopSequences': [],
  191. },
  192. },
  193. 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {
  194. 'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
  195. 'default_params': {
  196. 'maximumLength': 1024,
  197. 'typicalP': 0.2,
  198. 'repetitionPenalty': 1,
  199. },
  200. },
  201. 'OpenAssistant/oasst-sft-1-pythia-12b': {
  202. 'id': 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
  203. 'default_params': {
  204. 'maximumLength': 1024,
  205. 'typicalP': 0.2,
  206. 'repetitionPenalty': 1,
  207. },
  208. },
  209. 'bigcode/santacoder': {
  210. 'id': 'huggingface:bigcode/santacoder',
  211. 'default_params': {
  212. 'temperature': 0.5,
  213. 'maximumLength': 1024,
  214. 'topP': 0.95,
  215. 'topK': 4,
  216. 'repetitionPenalty': 1.03,
  217. },
  218. },
  219. 'command-light-nightly': {
  220. 'id': 'cohere:command-light-nightly',
  221. 'default_params': {
  222. 'temperature': 0.9,
  223. 'maximumLength': 1024,
  224. 'topP': 1,
  225. 'topK': 0,
  226. 'presencePenalty': 0,
  227. 'frequencyPenalty': 0,
  228. 'stopSequences': [],
  229. },
  230. },
  231. 'command-nightly': {
  232. 'id': 'cohere:command-nightly',
  233. 'default_params': {
  234. 'temperature': 0.9,
  235. 'maximumLength': 1024,
  236. 'topP': 1,
  237. 'topK': 0,
  238. 'presencePenalty': 0,
  239. 'frequencyPenalty': 0,
  240. 'stopSequences': [],
  241. },
  242. },
  243. # 'gpt-4': {
  244. # 'id': 'openai:gpt-4',
  245. # 'default_params': {
  246. # 'temperature': 0.7,
  247. # 'maximumLength': 8192,
  248. # 'topP': 1,
  249. # 'presencePenalty': 0,
  250. # 'frequencyPenalty': 0,
  251. # 'stopSequences': [],
  252. # },
  253. # },
  254. # 'gpt-4-0613': {
  255. # 'id': 'openai:gpt-4-0613',
  256. # 'default_params': {
  257. # 'temperature': 0.7,
  258. # 'maximumLength': 8192,
  259. # 'topP': 1,
  260. # 'presencePenalty': 0,
  261. # 'frequencyPenalty': 0,
  262. # 'stopSequences': [],
  263. # },
  264. # },
  265. 'code-davinci-002': {
  266. 'id': 'openai:code-davinci-002',
  267. 'default_params': {
  268. 'temperature': 0.5,
  269. 'maximumLength': 1024,
  270. 'topP': 1,
  271. 'presencePenalty': 0,
  272. 'frequencyPenalty': 0,
  273. 'stopSequences': [],
  274. },
  275. },
  276. 'gpt-3.5-turbo': {
  277. 'id': 'openai:gpt-3.5-turbo',
  278. 'default_params': {
  279. 'temperature': 0.7,
  280. 'maximumLength': 4096,
  281. 'topP': 1,
  282. 'topK': 1,
  283. 'presencePenalty': 1,
  284. 'frequencyPenalty': 1,
  285. 'stopSequences': [],
  286. },
  287. },
  288. 'gpt-3.5-turbo-16k': {
  289. 'id': 'openai:gpt-3.5-turbo-16k',
  290. 'default_params': {
  291. 'temperature': 0.7,
  292. 'maximumLength': 16280,
  293. 'topP': 1,
  294. 'topK': 1,
  295. 'presencePenalty': 1,
  296. 'frequencyPenalty': 1,
  297. 'stopSequences': [],
  298. },
  299. },
  300. 'gpt-3.5-turbo-16k-0613': {
  301. 'id': 'openai:gpt-3.5-turbo-16k-0613',
  302. 'default_params': {
  303. 'temperature': 0.7,
  304. 'maximumLength': 16280,
  305. 'topP': 1,
  306. 'topK': 1,
  307. 'presencePenalty': 1,
  308. 'frequencyPenalty': 1,
  309. 'stopSequences': [],
  310. },
  311. },
  312. 'text-ada-001': {
  313. 'id': 'openai:text-ada-001',
  314. 'default_params': {
  315. 'temperature': 0.5,
  316. 'maximumLength': 1024,
  317. 'topP': 1,
  318. 'presencePenalty': 0,
  319. 'frequencyPenalty': 0,
  320. 'stopSequences': [],
  321. },
  322. },
  323. 'text-babbage-001': {
  324. 'id': 'openai:text-babbage-001',
  325. 'default_params': {
  326. 'temperature': 0.5,
  327. 'maximumLength': 1024,
  328. 'topP': 1,
  329. 'presencePenalty': 0,
  330. 'frequencyPenalty': 0,
  331. 'stopSequences': [],
  332. },
  333. },
  334. 'text-curie-001': {
  335. 'id': 'openai:text-curie-001',
  336. 'default_params': {
  337. 'temperature': 0.5,
  338. 'maximumLength': 1024,
  339. 'topP': 1,
  340. 'presencePenalty': 0,
  341. 'frequencyPenalty': 0,
  342. 'stopSequences': [],
  343. },
  344. },
  345. 'text-davinci-002': {
  346. 'id': 'openai:text-davinci-002',
  347. 'default_params': {
  348. 'temperature': 0.5,
  349. 'maximumLength': 1024,
  350. 'topP': 1,
  351. 'presencePenalty': 0,
  352. 'frequencyPenalty': 0,
  353. 'stopSequences': [],
  354. },
  355. },
  356. 'text-davinci-003': {
  357. 'id': 'openai:text-davinci-003',
  358. 'default_params': {
  359. 'temperature': 0.5,
  360. 'maximumLength': 4097,
  361. 'topP': 1,
  362. 'presencePenalty': 0,
  363. 'frequencyPenalty': 0,
  364. 'stopSequences': [],
  365. },
  366. },
  367. }