update.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. # File : update.py
  4. # Author: DaShenHan&道长-----先苦后甜,任凭晚风拂柳颜------
  5. # Date : 2022/9/6
  6. import re
  7. from time import time as getTime
  8. import sys
  9. import requests
  10. import os
  11. import zipfile
  12. import shutil # https://blog.csdn.net/weixin_33130113/article/details/112336581
  13. from utils.log import logger
  14. from utils.download_progress import file_downloads
  15. from utils.web import get_interval
  16. from utils.htmlParser import jsoup
  17. import ujson
  18. headers = {
  19. 'Referer': 'https://gitcode.net/',
  20. 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',
  21. }
  22. def getHotSuggest1(url='http://4g.v.sogou.com/hotsugg', size=0):
  23. jsp = jsoup(url)
  24. pdfh = jsp.pdfh
  25. pdfa = jsp.pdfa
  26. pd = jsp.pd
  27. try:
  28. r = requests.get(url, headers=headers, timeout=2)
  29. html = r.text
  30. data = pdfa(html, 'ul.hot-list&&li')
  31. suggs = [{'title': pdfh(dt, 'a&&Text'), 'url': pd(dt, 'a&&href')} for dt in data]
  32. # print(html)
  33. # print(suggs)
  34. return suggs
  35. except:
  36. return []
  37. def getHotSuggest2(url='https://pbaccess.video.qq.com/trpc.videosearch.hot_rank.HotRankServantHttp/HotRankHttp',
  38. size=0):
  39. size = int(size) if size else 50
  40. pdata = ujson.dumps({"pageNum": 0, "pageSize": size})
  41. try:
  42. r = requests.post(url, headers={
  43. 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',
  44. 'content-type': 'application/json'}, data=pdata, timeout=2)
  45. html = r.json()
  46. # print(html)
  47. data = html['data']['navItemList'][0]['hotRankResult']['rankItemList']
  48. suggs = [{'title': dt['title'], 'url': dt['url']} for dt in data]
  49. # print(html)
  50. # print(suggs)
  51. return suggs
  52. except:
  53. return []
  54. def getHotSuggest(s_from, size):
  55. if s_from == 'sougou':
  56. return getHotSuggest1(size=size)
  57. else:
  58. return getHotSuggest2(size=size)
  59. def getLocalVer():
  60. base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
  61. version_path = os.path.join(base_path, f'js/version.txt')
  62. if not os.path.exists(version_path):
  63. with open(version_path, mode='w+', encoding='utf-8') as f:
  64. version = '1.0.0'
  65. f.write(version)
  66. else:
  67. with open(version_path, encoding='utf-8') as f:
  68. version = f.read()
  69. return version
  70. def getOnlineVer(update_proxy='https://ghproxy.liuzhicong.com/'):
  71. ver = '1.0.1'
  72. msg = ''
  73. update_proxy = (update_proxy or '').strip()
  74. logger.info(f'update_proxy:{update_proxy}')
  75. try:
  76. # r = requests.get('https://gitcode.net/qq_32394351/dr_py/-/raw/master/js/version.txt',timeout=(2,2))
  77. # r = requests.get('https://code.gitlink.org.cn/api/v1/repos/hjdhnx/dr_py/raw/master/js/version.txt',timeout=(2,2))
  78. url = f'{update_proxy}https://raw.githubusercontent.com/hjdhnx/dr_py/main/js/version.txt'
  79. logger.info(f'开始检查线上版本号:{url}')
  80. r = requests.get(url, headers=headers, timeout=(2, 2), verify=False)
  81. ver = r.text
  82. except Exception as e:
  83. # print(f'{e}')
  84. msg = f'{e}'
  85. logger.info(msg)
  86. return ver, msg
  87. def checkUpdate():
  88. local_ver = getLocalVer()
  89. online_ver, msg = getOnlineVer()
  90. if local_ver != online_ver:
  91. return True
  92. return False
  93. def del_file(filepath):
  94. """
  95. 删除execl目录下的所有文件或文件夹
  96. :param filepath: 路径
  97. :return:
  98. """
  99. del_list = os.listdir(filepath)
  100. for f in del_list:
  101. file_path = os.path.join(filepath, f)
  102. if os.path.isfile(file_path):
  103. os.remove(file_path)
  104. else:
  105. try:
  106. shutil.rmtree(file_path)
  107. except Exception as e:
  108. logger.info(f'删除{file_path}发生错误:{e}')
  109. def copytree(src, dst, ignore=None):
  110. if ignore is None:
  111. ignore = []
  112. dirs = os.listdir(src) # 获取目录下的所有文件包括文件夹
  113. logger.info(f'{dirs}')
  114. for dir in dirs: # 遍历文件或文件夹
  115. from_dir = os.path.join(src, dir) # 将要复制的文件夹或文件路径
  116. to_dir = os.path.join(dst, dir) # 将要复制到的文件夹或文件路径
  117. if os.path.isdir(from_dir): # 判断是否为文件夹
  118. if not os.path.exists(to_dir): # 判断目标文件夹是否存在,不存在则创建
  119. os.mkdir(to_dir)
  120. copytree(from_dir, to_dir, ignore) # 迭代 遍历子文件夹并复制文件
  121. elif os.path.isfile(from_dir): # 如果为文件,则直接复制文件
  122. if ignore:
  123. regxp = '|'.join(ignore).replace('\\', '/') # 组装正则
  124. to_dir_str = str(to_dir).replace('\\', '/')
  125. if not re.search(rf'{regxp}', to_dir_str, re.M):
  126. shutil.copy(from_dir, to_dir) # 复制文件
  127. else:
  128. shutil.copy(from_dir, to_dir) # 复制文件
  129. def force_copy_files(from_path, to_path, exclude_files=None):
  130. # print(f'开始拷贝文件{from_path}=>{to_path}')
  131. if exclude_files is None:
  132. exclude_files = []
  133. logger.info(f'开始拷贝文件{from_path}=>{to_path}')
  134. if not os.path.exists(to_path):
  135. os.makedirs(to_path, exist_ok=True)
  136. try:
  137. if sys.version_info < (3, 8):
  138. copytree(from_path, to_path, exclude_files)
  139. else:
  140. if len(exclude_files) > 0:
  141. shutil.copytree(from_path, to_path, dirs_exist_ok=True, ignore=shutil.ignore_patterns(*exclude_files))
  142. else:
  143. shutil.copytree(from_path, to_path, dirs_exist_ok=True)
  144. except Exception as e:
  145. logger.info(f'拷贝文件{from_path}=>{to_path}发生错误:{e}')
  146. def copy_to_update():
  147. base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
  148. tmp_path = os.path.join(base_path, f'tmp')
  149. dr_path = os.path.join(tmp_path, f'dr_py-main')
  150. if not os.path.exists(dr_path):
  151. # print(f'升级失败,找不到目录{dr_path}')
  152. logger.info(f'升级失败,找不到目录{dr_path}')
  153. return False
  154. js_path = os.path.join(base_path, 'js')
  155. files = os.listdir(js_path)
  156. jsd_list = list(filter(lambda x: str(x).endswith('.jsd'), files))
  157. try:
  158. for jsd in jsd_list:
  159. os.remove(jsd)
  160. logger.info(f'升级过程中共计清理jsd文件数:{len(jsd_list)}')
  161. except Exception as e:
  162. logger.info(f'升级过程中清理jsd文件发生错误:{e}')
  163. # 千万不能覆盖super,base
  164. paths = ['js', 'models', 'controllers', 'libs', 'static', 'templates', 'utils', 'txt', 'jiexi', 'py', 'whl', 'doc']
  165. exclude_files = ['txt/pycms0.json', 'txt/pycms1.json', 'txt/pycms2.json', 'base/rules.db']
  166. for path in paths:
  167. force_copy_files(os.path.join(dr_path, path), os.path.join(base_path, path), exclude_files)
  168. try:
  169. shutil.copy(os.path.join(dr_path, 'app.py'), os.path.join(base_path, 'app.py')) # 复制文件
  170. shutil.copy(os.path.join(dr_path, 'requirements.txt'), os.path.join(base_path, 'requirements.txt')) # 复制文件
  171. except Exception as e:
  172. logger.info(f'更新app.py发生错误:{e}')
  173. logger.info(f'升级程序执行完毕,全部文件已拷贝覆盖')
  174. return True
  175. def download_new_version(update_proxy='https://ghproxy.liuzhicong.com/',force_up=False):
  176. update_proxy = (update_proxy or '').strip()
  177. logger.info(f'update_proxy:{update_proxy}')
  178. t1 = getTime()
  179. base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
  180. tmp_path = os.path.join(base_path, f'tmp')
  181. os.makedirs(tmp_path, exist_ok=True)
  182. # url = 'https://gitcode.net/qq_32394351/dr_py/-/archive/master/dr_py-master.zip'
  183. # url = 'https://code.gitlink.org.cn/api/v1/repos/hjdhnx/dr_py/archive/master.zip'
  184. url = f'{update_proxy}https://github.com/hjdhnx/dr_py/archive/refs/heads/main.zip'
  185. # tmp_files = os.listdir(tmp_path)
  186. # for tp in tmp_files:
  187. # print(f'清除缓存文件:{tp}')
  188. # os.remove(os.path.join(tmp_path, tp))
  189. msg = ''
  190. try:
  191. # print(f'开始下载:{url}')
  192. logger.info(f'开始下载:{url}')
  193. download_path = os.path.join(tmp_path, 'dr_py.zip')
  194. # 2023/11/18 改为带进度条的下载
  195. download_ok = file_downloads([{'url': url, 'name': 'dr_py.zip'}], tmp_path)
  196. if not download_ok and not force_up:
  197. return '带进度条的下载升级文件失败,并没有启用强制下载功能。具体参考后台日志'
  198. elif not download_ok and force_up:
  199. r = requests.get(url, headers=headers, timeout=(20, 20), verify=False)
  200. rb = r.content
  201. # 保存文件前清空目录
  202. del_file(tmp_path)
  203. with open(download_path,mode='wb+') as f:
  204. f.write(rb)
  205. # print(f'开始解压文件:{download_path}')
  206. logger.info(f'开始解压文件:{download_path}')
  207. f = zipfile.ZipFile(download_path, 'r') # 压缩文件位置
  208. for file in f.namelist():
  209. f.extract(file, tmp_path) # 解压位置
  210. f.close()
  211. # print('解压完毕,开始升级')
  212. logger.info('解压完毕,开始升级')
  213. ret = copy_to_update()
  214. logger.info(f'升级完毕,结果为:{ret}')
  215. # print(f'升级完毕,结果为:{ret}')
  216. msg = '升级成功'
  217. except Exception as e:
  218. msg = f'升级失败:{e}'
  219. logger.info(f'系统升级共计耗时:{get_interval(t1)}毫秒')
  220. return msg
  221. def download_lives(live_url: str):
  222. t1 = getTime()
  223. base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
  224. live_path = os.path.join(base_path, f'base/直播.txt')
  225. logger.info(f'尝试同步{live_url}远程内容到{live_path}')
  226. try:
  227. r = requests.get(live_url, headers=headers, timeout=3)
  228. auto_encoding = r.apparent_encoding
  229. if auto_encoding.lower() in ['utf-8', 'gbk', 'bg2312', 'gb18030']:
  230. r.encoding = auto_encoding
  231. # print(r.encoding)
  232. html = r.text
  233. # print(len(html))
  234. if re.search('cctv|.m3u8', html, re.M | re.I) and len(html) > 1000:
  235. logger.info(f'直播源同步成功,耗时{get_interval(t1)}毫秒')
  236. with open(live_path, mode='w+', encoding='utf-8') as f:
  237. f.write(html)
  238. return True
  239. else:
  240. logger.info(f'直播源同步失败,远程文件看起来不是直播源。耗时{get_interval(t1)}毫秒')
  241. return False
  242. except Exception as e:
  243. logger.info(f'直播源同步失败,耗时{get_interval(t1)}毫秒\n{e}')
  244. return False