everything_else.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300
  1. from .common import (get, multi_get, deep_get, multi_deep_get,
  2. liberal_update, conservative_update, remove_redirect, normalize_url,
  3. extract_str, extract_formatted_text, extract_int, extract_approx_int,
  4. extract_date, check_missing_keys, extract_item_info, extract_items,
  5. extract_response)
  6. from youtube import proto
  7. import re
  8. import urllib
  9. from math import ceil
  10. def extract_channel_info(polymer_json, tab):
  11. response, err = extract_response(polymer_json)
  12. if err:
  13. return {'error': err}
  14. metadata = deep_get(response, 'metadata', 'channelMetadataRenderer',
  15. default={})
  16. if not metadata:
  17. metadata = deep_get(response, 'microformat', 'microformatDataRenderer',
  18. default={})
  19. # channel doesn't exist or was terminated
  20. # example terminated channel: https://www.youtube.com/channel/UCnKJeK_r90jDdIuzHXC0Org
  21. if not metadata:
  22. if response.get('alerts'):
  23. error_string = ' '.join(
  24. extract_str(deep_get(alert, 'alertRenderer', 'text'), default='')
  25. for alert in response['alerts']
  26. )
  27. if not error_string:
  28. error_string = 'Failed to extract error'
  29. return {'error': error_string}
  30. elif deep_get(response, 'responseContext', 'errors'):
  31. for error in response['responseContext']['errors'].get('error', []):
  32. if error.get('code') == 'INVALID_VALUE' and error.get('location') == 'browse_id':
  33. return {'error': 'This channel does not exist'}
  34. return {'error': 'Failure getting metadata'}
  35. info = {'error': None}
  36. info['current_tab'] = tab
  37. info['approx_subscriber_count'] = extract_approx_int(deep_get(response,
  38. 'header', 'c4TabbedHeaderRenderer', 'subscriberCountText'))
  39. # stuff from microformat (info given by youtube for every page on channel)
  40. info['short_description'] = metadata.get('description')
  41. if info['short_description'] and len(info['short_description']) > 730:
  42. info['short_description'] = info['short_description'][0:730] + '...'
  43. info['channel_name'] = metadata.get('title')
  44. info['avatar'] = normalize_url(multi_deep_get(metadata,
  45. ['avatar', 'thumbnails', 0, 'url'],
  46. ['thumbnail', 'thumbnails', 0, 'url'],
  47. ))
  48. channel_url = multi_get(metadata, 'urlCanonical', 'channelUrl')
  49. if channel_url:
  50. channel_id = get(channel_url.rstrip('/').split('/'), -1)
  51. info['channel_id'] = channel_id
  52. else:
  53. info['channel_id'] = metadata.get('externalId')
  54. if info['channel_id']:
  55. info['channel_url'] = 'https://www.youtube.com/channel/' + channel_id
  56. else:
  57. info['channel_url'] = None
  58. # get items
  59. info['items'] = []
  60. info['ctoken'] = None
  61. # empty channel
  62. if 'contents' not in response and 'continuationContents' not in response:
  63. return info
  64. if tab in ('videos', 'playlists', 'search'):
  65. items, ctoken = extract_items(response)
  66. additional_info = {
  67. 'author': info['channel_name'],
  68. 'author_id': info['channel_id'],
  69. 'author_url': info['channel_url'],
  70. }
  71. info['items'] = [extract_item_info(renderer, additional_info) for renderer in items]
  72. info['ctoken'] = ctoken
  73. if tab in ('search', 'playlists'):
  74. info['is_last_page'] = (ctoken is None)
  75. elif tab == 'about':
  76. items, _ = extract_items(response, item_types={'channelAboutFullMetadataRenderer'})
  77. if not items:
  78. info['error'] = 'Could not find channelAboutFullMetadataRenderer'
  79. return info
  80. channel_metadata = items[0]['channelAboutFullMetadataRenderer']
  81. info['links'] = []
  82. for link_json in channel_metadata.get('primaryLinks', ()):
  83. url = remove_redirect(deep_get(link_json, 'navigationEndpoint', 'urlEndpoint', 'url'))
  84. if not (url.startswith('http://') or url.startswith('https://')):
  85. url = 'http://' + url
  86. text = extract_str(link_json.get('title'))
  87. info['links'].append( (text, url) )
  88. info['date_joined'] = extract_date(channel_metadata.get('joinedDateText'))
  89. info['view_count'] = extract_int(channel_metadata.get('viewCountText'))
  90. info['description'] = extract_str(channel_metadata.get('description'), default='')
  91. else:
  92. raise NotImplementedError('Unknown or unsupported channel tab: ' + tab)
  93. return info
  94. def extract_search_info(polymer_json):
  95. response, err = extract_response(polymer_json)
  96. if err:
  97. return {'error': err}
  98. info = {'error': None}
  99. info['estimated_results'] = int(response['estimatedResults'])
  100. info['estimated_pages'] = ceil(info['estimated_results']/20)
  101. results, _ = extract_items(response)
  102. info['items'] = []
  103. info['corrections'] = {'type': None}
  104. for renderer in results:
  105. type = list(renderer.keys())[0]
  106. if type == 'shelfRenderer':
  107. continue
  108. if type == 'didYouMeanRenderer':
  109. renderer = renderer[type]
  110. info['corrections'] = {
  111. 'type': 'did_you_mean',
  112. 'corrected_query': renderer['correctedQueryEndpoint']['searchEndpoint']['query'],
  113. 'corrected_query_text': renderer['correctedQuery']['runs'],
  114. }
  115. continue
  116. if type == 'showingResultsForRenderer':
  117. renderer = renderer[type]
  118. info['corrections'] = {
  119. 'type': 'showing_results_for',
  120. 'corrected_query_text': renderer['correctedQuery']['runs'],
  121. 'original_query_text': renderer['originalQuery']['simpleText'],
  122. }
  123. continue
  124. i_info = extract_item_info(renderer)
  125. if i_info.get('type') != 'unsupported':
  126. info['items'].append(i_info)
  127. return info
  128. def extract_playlist_metadata(polymer_json):
  129. response, err = extract_response(polymer_json)
  130. if err:
  131. return {'error': err}
  132. metadata = {'error': None}
  133. header = deep_get(response, 'header', 'playlistHeaderRenderer', default={})
  134. metadata['title'] = extract_str(header.get('title'))
  135. metadata['first_video_id'] = deep_get(header, 'playEndpoint', 'watchEndpoint', 'videoId')
  136. first_id = re.search(r'([a-z_\-]{11})', deep_get(header,
  137. 'thumbnail', 'thumbnails', 0, 'url', default=''))
  138. if first_id:
  139. conservative_update(metadata, 'first_video_id', first_id.group(1))
  140. if metadata['first_video_id'] is None:
  141. metadata['thumbnail'] = None
  142. else:
  143. metadata['thumbnail'] = 'https://i.ytimg.com/vi/' + metadata['first_video_id'] + '/mqdefault.jpg'
  144. metadata['video_count'] = extract_int(header.get('numVideosText'))
  145. metadata['description'] = extract_str(header.get('descriptionText'), default='')
  146. metadata['author'] = extract_str(header.get('ownerText'))
  147. metadata['author_id'] = multi_deep_get(header,
  148. ['ownerText', 'runs', 0, 'navigationEndpoint', 'browseEndpoint', 'browseId'],
  149. ['ownerEndpoint', 'browseEndpoint', 'browseId'])
  150. if metadata['author_id']:
  151. metadata['author_url'] = 'https://www.youtube.com/channel/' + metadata['author_id']
  152. else:
  153. metadata['author_url'] = None
  154. metadata['view_count'] = extract_int(header.get('viewCountText'))
  155. metadata['like_count'] = extract_int(header.get('likesCountWithoutLikeText'))
  156. for stat in header.get('stats', ()):
  157. text = extract_str(stat)
  158. if 'videos' in text:
  159. conservative_update(metadata, 'video_count', extract_int(text))
  160. elif 'views' in text:
  161. conservative_update(metadata, 'view_count', extract_int(text))
  162. elif 'updated' in text:
  163. metadata['time_published'] = extract_date(text)
  164. return metadata
  165. def extract_playlist_info(polymer_json):
  166. response, err = extract_response(polymer_json)
  167. if err:
  168. return {'error': err}
  169. info = {'error': None}
  170. first_page = 'continuationContents' not in response
  171. video_list, _ = extract_items(response)
  172. info['items'] = [extract_item_info(renderer) for renderer in video_list]
  173. if first_page:
  174. info['metadata'] = extract_playlist_metadata(polymer_json)
  175. return info
  176. def _ctoken_metadata(ctoken):
  177. result = dict()
  178. params = proto.parse(proto.b64_to_bytes(ctoken))
  179. result['video_id'] = proto.parse(params[2])[2].decode('ascii')
  180. offset_information = proto.parse(params[6])
  181. result['offset'] = offset_information.get(5, 0)
  182. result['is_replies'] = False
  183. if (3 in offset_information) and (2 in proto.parse(offset_information[3])):
  184. result['is_replies'] = True
  185. result['sort'] = None
  186. else:
  187. try:
  188. result['sort'] = proto.parse(offset_information[4])[6]
  189. except KeyError:
  190. result['sort'] = 0
  191. return result
  192. def extract_comments_info(polymer_json, ctoken=None):
  193. response, err = extract_response(polymer_json)
  194. if err:
  195. return {'error': err}
  196. info = {'error': None}
  197. if ctoken:
  198. metadata = _ctoken_metadata(ctoken)
  199. else:
  200. metadata = {}
  201. info['video_id'] = metadata.get('video_id')
  202. info['offset'] = metadata.get('offset')
  203. info['is_replies'] = metadata.get('is_replies')
  204. info['sort'] = metadata.get('sort')
  205. info['video_title'] = None
  206. comments, ctoken = extract_items(response,
  207. item_types={'commentThreadRenderer', 'commentRenderer'})
  208. info['comments'] = []
  209. info['ctoken'] = ctoken
  210. for comment in comments:
  211. comment_info = {}
  212. if 'commentThreadRenderer' in comment: # top level comments
  213. conservative_update(info, 'is_replies', False)
  214. comment_thread = comment['commentThreadRenderer']
  215. info['video_title'] = extract_str(comment_thread.get('commentTargetTitle'))
  216. if 'replies' not in comment_thread:
  217. comment_info['reply_count'] = 0
  218. comment_info['reply_ctoken'] = None
  219. else:
  220. comment_info['reply_count'] = extract_int(deep_get(comment_thread,
  221. 'replies', 'commentRepliesRenderer', 'moreText'
  222. ), default=1) # With 1 reply, the text reads "View reply"
  223. comment_info['reply_ctoken'] = multi_deep_get(
  224. comment_thread,
  225. ['replies', 'commentRepliesRenderer', 'contents', 0,
  226. 'continuationItemRenderer', 'button', 'buttonRenderer',
  227. 'command', 'continuationCommand', 'token'],
  228. ['replies', 'commentRepliesRenderer', 'continuations', 0,
  229. 'nextContinuationData', 'continuation']
  230. )
  231. comment_renderer = deep_get(comment_thread, 'comment', 'commentRenderer', default={})
  232. elif 'commentRenderer' in comment: # replies
  233. comment_info['reply_count'] = 0 # replyCount, below, not present for replies even if the reply has further replies to it
  234. comment_info['reply_ctoken'] = None
  235. conservative_update(info, 'is_replies', True)
  236. comment_renderer = comment['commentRenderer']
  237. else:
  238. comment_renderer = {}
  239. # These 3 are sometimes absent, likely because the channel was deleted
  240. comment_info['author'] = extract_str(comment_renderer.get('authorText'))
  241. comment_info['author_url'] = normalize_url(deep_get(comment_renderer,
  242. 'authorEndpoint', 'commandMetadata', 'webCommandMetadata', 'url'))
  243. comment_info['author_id'] = deep_get(comment_renderer,
  244. 'authorEndpoint', 'browseEndpoint', 'browseId')
  245. comment_info['author_avatar'] = normalize_url(deep_get(
  246. comment_renderer, 'authorThumbnail', 'thumbnails', 0, 'url'))
  247. comment_info['id'] = comment_renderer.get('commentId')
  248. comment_info['text'] = extract_formatted_text(comment_renderer.get('contentText'))
  249. comment_info['time_published'] = extract_str(comment_renderer.get('publishedTimeText'))
  250. comment_info['like_count'] = comment_renderer.get('likeCount')
  251. comment_info['approx_like_count'] = extract_approx_int(
  252. comment_renderer.get('voteCount'))
  253. liberal_update(comment_info, 'reply_count', comment_renderer.get('replyCount'))
  254. info['comments'].append(comment_info)
  255. return info