Vercel.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. from __future__ import annotations
  2. import json, base64, requests, random, uuid
  3. try:
  4. import execjs
  5. has_requirements = True
  6. except ImportError:
  7. has_requirements = False
  8. from ...typing import Messages, TypedDict, CreateResult, Any
  9. from ..base_provider import AbstractProvider
  10. from ...errors import MissingRequirementsError
  11. class Vercel(AbstractProvider):
  12. url = 'https://sdk.vercel.ai'
  13. working = False
  14. supports_message_history = True
  15. supports_gpt_35_turbo = True
  16. supports_stream = True
  17. @staticmethod
  18. def create_completion(
  19. model: str,
  20. messages: Messages,
  21. stream: bool,
  22. proxy: str = None,
  23. **kwargs
  24. ) -> CreateResult:
  25. if not has_requirements:
  26. raise MissingRequirementsError('Install "PyExecJS" package')
  27. if not model:
  28. model = "gpt-3.5-turbo"
  29. elif model not in model_info:
  30. raise ValueError(f"Vercel does not support {model}")
  31. headers = {
  32. 'authority': 'sdk.vercel.ai',
  33. 'accept': '*/*',
  34. 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
  35. 'cache-control': 'no-cache',
  36. 'content-type': 'application/json',
  37. 'custom-encoding': get_anti_bot_token(),
  38. 'origin': 'https://sdk.vercel.ai',
  39. 'pragma': 'no-cache',
  40. 'referer': 'https://sdk.vercel.ai/',
  41. 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
  42. 'sec-ch-ua-mobile': '?0',
  43. 'sec-ch-ua-platform': '"macOS"',
  44. 'sec-fetch-dest': 'empty',
  45. 'sec-fetch-mode': 'cors',
  46. 'sec-fetch-site': 'same-origin',
  47. 'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
  48. }
  49. json_data = {
  50. 'model' : model_info[model]['id'],
  51. 'messages' : messages,
  52. 'playgroundId': str(uuid.uuid4()),
  53. 'chatIndex' : 0,
  54. **model_info[model]['default_params'],
  55. **kwargs
  56. }
  57. max_retries = kwargs.get('max_retries', 20)
  58. for _ in range(max_retries):
  59. response = requests.post('https://chat.vercel.ai/api/chat',
  60. headers=headers, json=json_data, stream=True, proxies={"https": proxy})
  61. try:
  62. response.raise_for_status()
  63. except:
  64. continue
  65. for token in response.iter_content(chunk_size=None):
  66. yield token.decode()
  67. break
  68. def get_anti_bot_token() -> str:
  69. headers = {
  70. 'authority': 'sdk.vercel.ai',
  71. 'accept': '*/*',
  72. 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
  73. 'cache-control': 'no-cache',
  74. 'pragma': 'no-cache',
  75. 'referer': 'https://sdk.vercel.ai/',
  76. 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
  77. 'sec-ch-ua-mobile': '?0',
  78. 'sec-ch-ua-platform': '"macOS"',
  79. 'sec-fetch-dest': 'empty',
  80. 'sec-fetch-mode': 'cors',
  81. 'sec-fetch-site': 'same-origin',
  82. 'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
  83. }
  84. response = requests.get('https://sdk.vercel.ai/openai.jpeg',
  85. headers=headers).text
  86. raw_data = json.loads(base64.b64decode(response,
  87. validate=True))
  88. js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
  89. return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
  90. raw_token = json.dumps({'r': execjs.compile(js_script).call(''), 't': raw_data['t']},
  91. separators = (",", ":"))
  92. return base64.b64encode(raw_token.encode('utf-16le')).decode()
  93. class ModelInfo(TypedDict):
  94. id: str
  95. default_params: dict[str, Any]
  96. model_info: dict[str, ModelInfo] = {
  97. # 'claude-instant-v1': {
  98. # 'id': 'anthropic:claude-instant-v1',
  99. # 'default_params': {
  100. # 'temperature': 1,
  101. # 'maximumLength': 1024,
  102. # 'topP': 1,
  103. # 'topK': 1,
  104. # 'presencePenalty': 1,
  105. # 'frequencyPenalty': 1,
  106. # 'stopSequences': ['\n\nHuman:'],
  107. # },
  108. # },
  109. # 'claude-v1': {
  110. # 'id': 'anthropic:claude-v1',
  111. # 'default_params': {
  112. # 'temperature': 1,
  113. # 'maximumLength': 1024,
  114. # 'topP': 1,
  115. # 'topK': 1,
  116. # 'presencePenalty': 1,
  117. # 'frequencyPenalty': 1,
  118. # 'stopSequences': ['\n\nHuman:'],
  119. # },
  120. # },
  121. # 'claude-v2': {
  122. # 'id': 'anthropic:claude-v2',
  123. # 'default_params': {
  124. # 'temperature': 1,
  125. # 'maximumLength': 1024,
  126. # 'topP': 1,
  127. # 'topK': 1,
  128. # 'presencePenalty': 1,
  129. # 'frequencyPenalty': 1,
  130. # 'stopSequences': ['\n\nHuman:'],
  131. # },
  132. # },
  133. 'replicate/llama70b-v2-chat': {
  134. 'id': 'replicate:replicate/llama-2-70b-chat',
  135. 'default_params': {
  136. 'temperature': 0.75,
  137. 'maximumLength': 3000,
  138. 'topP': 1,
  139. 'repetitionPenalty': 1,
  140. },
  141. },
  142. 'a16z-infra/llama7b-v2-chat': {
  143. 'id': 'replicate:a16z-infra/llama7b-v2-chat',
  144. 'default_params': {
  145. 'temperature': 0.75,
  146. 'maximumLength': 3000,
  147. 'topP': 1,
  148. 'repetitionPenalty': 1,
  149. },
  150. },
  151. 'a16z-infra/llama13b-v2-chat': {
  152. 'id': 'replicate:a16z-infra/llama13b-v2-chat',
  153. 'default_params': {
  154. 'temperature': 0.75,
  155. 'maximumLength': 3000,
  156. 'topP': 1,
  157. 'repetitionPenalty': 1,
  158. },
  159. },
  160. 'replicate/llama-2-70b-chat': {
  161. 'id': 'replicate:replicate/llama-2-70b-chat',
  162. 'default_params': {
  163. 'temperature': 0.75,
  164. 'maximumLength': 3000,
  165. 'topP': 1,
  166. 'repetitionPenalty': 1,
  167. },
  168. },
  169. 'bigscience/bloom': {
  170. 'id': 'huggingface:bigscience/bloom',
  171. 'default_params': {
  172. 'temperature': 0.5,
  173. 'maximumLength': 1024,
  174. 'topP': 0.95,
  175. 'topK': 4,
  176. 'repetitionPenalty': 1.03,
  177. },
  178. },
  179. 'google/flan-t5-xxl': {
  180. 'id': 'huggingface:google/flan-t5-xxl',
  181. 'default_params': {
  182. 'temperature': 0.5,
  183. 'maximumLength': 1024,
  184. 'topP': 0.95,
  185. 'topK': 4,
  186. 'repetitionPenalty': 1.03,
  187. },
  188. },
  189. 'EleutherAI/gpt-neox-20b': {
  190. 'id': 'huggingface:EleutherAI/gpt-neox-20b',
  191. 'default_params': {
  192. 'temperature': 0.5,
  193. 'maximumLength': 1024,
  194. 'topP': 0.95,
  195. 'topK': 4,
  196. 'repetitionPenalty': 1.03,
  197. 'stopSequences': [],
  198. },
  199. },
  200. 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {
  201. 'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
  202. 'default_params': {
  203. 'maximumLength': 1024,
  204. 'typicalP': 0.2,
  205. 'repetitionPenalty': 1,
  206. },
  207. },
  208. 'OpenAssistant/oasst-sft-1-pythia-12b': {
  209. 'id': 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
  210. 'default_params': {
  211. 'maximumLength': 1024,
  212. 'typicalP': 0.2,
  213. 'repetitionPenalty': 1,
  214. },
  215. },
  216. 'bigcode/santacoder': {
  217. 'id': 'huggingface:bigcode/santacoder',
  218. 'default_params': {
  219. 'temperature': 0.5,
  220. 'maximumLength': 1024,
  221. 'topP': 0.95,
  222. 'topK': 4,
  223. 'repetitionPenalty': 1.03,
  224. },
  225. },
  226. 'command-light-nightly': {
  227. 'id': 'cohere:command-light-nightly',
  228. 'default_params': {
  229. 'temperature': 0.9,
  230. 'maximumLength': 1024,
  231. 'topP': 1,
  232. 'topK': 0,
  233. 'presencePenalty': 0,
  234. 'frequencyPenalty': 0,
  235. 'stopSequences': [],
  236. },
  237. },
  238. 'command-nightly': {
  239. 'id': 'cohere:command-nightly',
  240. 'default_params': {
  241. 'temperature': 0.9,
  242. 'maximumLength': 1024,
  243. 'topP': 1,
  244. 'topK': 0,
  245. 'presencePenalty': 0,
  246. 'frequencyPenalty': 0,
  247. 'stopSequences': [],
  248. },
  249. },
  250. # 'gpt-4': {
  251. # 'id': 'openai:gpt-4',
  252. # 'default_params': {
  253. # 'temperature': 0.7,
  254. # 'maximumLength': 8192,
  255. # 'topP': 1,
  256. # 'presencePenalty': 0,
  257. # 'frequencyPenalty': 0,
  258. # 'stopSequences': [],
  259. # },
  260. # },
  261. # 'gpt-4-0613': {
  262. # 'id': 'openai:gpt-4-0613',
  263. # 'default_params': {
  264. # 'temperature': 0.7,
  265. # 'maximumLength': 8192,
  266. # 'topP': 1,
  267. # 'presencePenalty': 0,
  268. # 'frequencyPenalty': 0,
  269. # 'stopSequences': [],
  270. # },
  271. # },
  272. 'code-davinci-002': {
  273. 'id': 'openai:code-davinci-002',
  274. 'default_params': {
  275. 'temperature': 0.5,
  276. 'maximumLength': 1024,
  277. 'topP': 1,
  278. 'presencePenalty': 0,
  279. 'frequencyPenalty': 0,
  280. 'stopSequences': [],
  281. },
  282. },
  283. 'gpt-3.5-turbo': {
  284. 'id': 'openai:gpt-3.5-turbo',
  285. 'default_params': {
  286. 'temperature': 0.7,
  287. 'maximumLength': 4096,
  288. 'topP': 1,
  289. 'topK': 1,
  290. 'presencePenalty': 1,
  291. 'frequencyPenalty': 1,
  292. 'stopSequences': [],
  293. },
  294. },
  295. 'gpt-3.5-turbo-16k': {
  296. 'id': 'openai:gpt-3.5-turbo-16k',
  297. 'default_params': {
  298. 'temperature': 0.7,
  299. 'maximumLength': 16280,
  300. 'topP': 1,
  301. 'topK': 1,
  302. 'presencePenalty': 1,
  303. 'frequencyPenalty': 1,
  304. 'stopSequences': [],
  305. },
  306. },
  307. 'gpt-3.5-turbo-16k-0613': {
  308. 'id': 'openai:gpt-3.5-turbo-16k-0613',
  309. 'default_params': {
  310. 'temperature': 0.7,
  311. 'maximumLength': 16280,
  312. 'topP': 1,
  313. 'topK': 1,
  314. 'presencePenalty': 1,
  315. 'frequencyPenalty': 1,
  316. 'stopSequences': [],
  317. },
  318. },
  319. 'text-ada-001': {
  320. 'id': 'openai:text-ada-001',
  321. 'default_params': {
  322. 'temperature': 0.5,
  323. 'maximumLength': 1024,
  324. 'topP': 1,
  325. 'presencePenalty': 0,
  326. 'frequencyPenalty': 0,
  327. 'stopSequences': [],
  328. },
  329. },
  330. 'text-babbage-001': {
  331. 'id': 'openai:text-babbage-001',
  332. 'default_params': {
  333. 'temperature': 0.5,
  334. 'maximumLength': 1024,
  335. 'topP': 1,
  336. 'presencePenalty': 0,
  337. 'frequencyPenalty': 0,
  338. 'stopSequences': [],
  339. },
  340. },
  341. 'text-curie-001': {
  342. 'id': 'openai:text-curie-001',
  343. 'default_params': {
  344. 'temperature': 0.5,
  345. 'maximumLength': 1024,
  346. 'topP': 1,
  347. 'presencePenalty': 0,
  348. 'frequencyPenalty': 0,
  349. 'stopSequences': [],
  350. },
  351. },
  352. 'text-davinci-002': {
  353. 'id': 'openai:text-davinci-002',
  354. 'default_params': {
  355. 'temperature': 0.5,
  356. 'maximumLength': 1024,
  357. 'topP': 1,
  358. 'presencePenalty': 0,
  359. 'frequencyPenalty': 0,
  360. 'stopSequences': [],
  361. },
  362. },
  363. 'text-davinci-003': {
  364. 'id': 'openai:text-davinci-003',
  365. 'default_params': {
  366. 'temperature': 0.5,
  367. 'maximumLength': 4097,
  368. 'topP': 1,
  369. 'presencePenalty': 0,
  370. 'frequencyPenalty': 0,
  371. 'stopSequences': [],
  372. },
  373. },
  374. }