nhk.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. import re
  2. from .common import InfoExtractor
  3. from ..utils import (
  4. parse_duration,
  5. traverse_obj,
  6. unescapeHTML,
  7. unified_timestamp,
  8. urljoin
  9. )
  10. class NhkBaseIE(InfoExtractor):
  11. _API_URL_TEMPLATE = 'https://nwapi.nhk.jp/nhkworld/%sod%slist/v7b/%s/%s/%s/all%s.json'
  12. _BASE_URL_REGEX = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand'
  13. _TYPE_REGEX = r'/(?P<type>video|audio)/'
  14. def _call_api(self, m_id, lang, is_video, is_episode, is_clip):
  15. return self._download_json(
  16. self._API_URL_TEMPLATE % (
  17. 'v' if is_video else 'r',
  18. 'clip' if is_clip else 'esd',
  19. 'episode' if is_episode else 'program',
  20. m_id, lang, '/all' if is_video else ''),
  21. m_id, query={'apikey': 'EJfK8jdS57GqlupFgAfAAwr573q01y6k'})['data']['episodes'] or []
  22. def _extract_episode_info(self, url, episode=None):
  23. fetch_episode = episode is None
  24. lang, m_type, episode_id = NhkVodIE._match_valid_url(url).groups()
  25. if len(episode_id) == 7:
  26. episode_id = episode_id[:4] + '-' + episode_id[4:]
  27. is_video = m_type == 'video'
  28. if fetch_episode:
  29. episode = self._call_api(
  30. episode_id, lang, is_video, True, episode_id[:4] == '9999')[0]
  31. title = episode.get('sub_title_clean') or episode['sub_title']
  32. def get_clean_field(key):
  33. return episode.get(key + '_clean') or episode.get(key)
  34. series = get_clean_field('title')
  35. thumbnails = []
  36. for s, w, h in [('', 640, 360), ('_l', 1280, 720)]:
  37. img_path = episode.get('image' + s)
  38. if not img_path:
  39. continue
  40. thumbnails.append({
  41. 'id': '%dp' % h,
  42. 'height': h,
  43. 'width': w,
  44. 'url': 'https://www3.nhk.or.jp' + img_path,
  45. })
  46. info = {
  47. 'id': episode_id + '-' + lang,
  48. 'title': '%s - %s' % (series, title) if series and title else title,
  49. 'description': get_clean_field('description'),
  50. 'thumbnails': thumbnails,
  51. 'series': series,
  52. 'episode': title,
  53. }
  54. if is_video:
  55. vod_id = episode['vod_id']
  56. info.update({
  57. '_type': 'url_transparent',
  58. 'ie_key': 'Piksel',
  59. 'url': 'https://player.piksel.com/v/refid/nhkworld/prefid/' + vod_id,
  60. 'id': vod_id,
  61. })
  62. else:
  63. if fetch_episode:
  64. audio_path = episode['audio']['audio']
  65. info['formats'] = self._extract_m3u8_formats(
  66. 'https://nhkworld-vh.akamaihd.net/i%s/master.m3u8' % audio_path,
  67. episode_id, 'm4a', entry_protocol='m3u8_native',
  68. m3u8_id='hls', fatal=False)
  69. for f in info['formats']:
  70. f['language'] = lang
  71. else:
  72. info.update({
  73. '_type': 'url_transparent',
  74. 'ie_key': NhkVodIE.ie_key(),
  75. 'url': url,
  76. })
  77. return info
  78. class NhkVodIE(NhkBaseIE):
  79. # the 7-character IDs can have alphabetic chars too: assume [a-z] rather than just [a-f], eg
  80. _VALID_URL = r'%s%s(?P<id>[0-9a-z]{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
  81. # Content available only for a limited period of time. Visit
  82. # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
  83. _TESTS = [{
  84. # video clip
  85. 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/',
  86. 'md5': '7a90abcfe610ec22a6bfe15bd46b30ca',
  87. 'info_dict': {
  88. 'id': 'a95j5iza',
  89. 'ext': 'mp4',
  90. 'title': "Dining with the Chef - Chef Saito's Family recipe: MENCHI-KATSU",
  91. 'description': 'md5:5aee4a9f9d81c26281862382103b0ea5',
  92. 'timestamp': 1565965194,
  93. 'upload_date': '20190816',
  94. },
  95. }, {
  96. # audio clip
  97. 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/r_inventions-20201104-1/',
  98. 'info_dict': {
  99. 'id': 'r_inventions-20201104-1-en',
  100. 'ext': 'm4a',
  101. 'title': "Japan's Top Inventions - Miniature Video Cameras",
  102. 'description': 'md5:07ea722bdbbb4936fdd360b6a480c25b',
  103. },
  104. 'params': {
  105. # m3u8 download
  106. 'skip_download': True,
  107. },
  108. }, {
  109. 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2015173/',
  110. 'only_matching': True,
  111. }, {
  112. 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/plugin-20190404-1/',
  113. 'only_matching': True,
  114. }, {
  115. 'url': 'https://www3.nhk.or.jp/nhkworld/fr/ondemand/audio/plugin-20190404-1/',
  116. 'only_matching': True,
  117. }, {
  118. 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/',
  119. 'only_matching': True,
  120. }, {
  121. # video, alphabetic character in ID #29670
  122. 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/',
  123. 'only_matching': True,
  124. 'info_dict': {
  125. 'id': 'qfjay6cg',
  126. 'ext': 'mp4',
  127. 'title': 'DESIGN TALKS plus - Fishermen’s Finery',
  128. 'description': 'md5:8a8f958aaafb0d7cb59d38de53f1e448',
  129. 'thumbnail': r're:^https?:/(/[a-z0-9.-]+)+\.jpg\?w=1920&h=1080$',
  130. 'upload_date': '20210615',
  131. 'timestamp': 1623722008,
  132. }
  133. }]
  134. def _real_extract(self, url):
  135. return self._extract_episode_info(url)
  136. class NhkVodProgramIE(NhkBaseIE):
  137. _VALID_URL = r'%s/program%s(?P<id>[0-9a-z]+)(?:.+?\btype=(?P<episode_type>clip|(?:radio|tv)Episode))?' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
  138. _TESTS = [{
  139. # video program episodes
  140. 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway',
  141. 'info_dict': {
  142. 'id': 'japanrailway',
  143. 'title': 'Japan Railway Journal',
  144. },
  145. 'playlist_mincount': 1,
  146. }, {
  147. # video program clips
  148. 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway/?type=clip',
  149. 'info_dict': {
  150. 'id': 'japanrailway',
  151. 'title': 'Japan Railway Journal',
  152. },
  153. 'playlist_mincount': 5,
  154. }, {
  155. 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/10yearshayaomiyazaki/',
  156. 'only_matching': True,
  157. }, {
  158. # audio program
  159. 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/audio/listener/',
  160. 'only_matching': True,
  161. }]
  162. def _real_extract(self, url):
  163. lang, m_type, program_id, episode_type = self._match_valid_url(url).groups()
  164. episodes = self._call_api(
  165. program_id, lang, m_type == 'video', False, episode_type == 'clip')
  166. entries = []
  167. for episode in episodes:
  168. episode_path = episode.get('url')
  169. if not episode_path:
  170. continue
  171. entries.append(self._extract_episode_info(
  172. urljoin(url, episode_path), episode))
  173. program_title = None
  174. if entries:
  175. program_title = entries[0].get('series')
  176. return self.playlist_result(entries, program_id, program_title)
  177. class NhkForSchoolBangumiIE(InfoExtractor):
  178. _VALID_URL = r'https?://www2\.nhk\.or\.jp/school/movie/(?P<type>bangumi|clip)\.cgi\?das_id=(?P<id>[a-zA-Z0-9_-]+)'
  179. _TESTS = [{
  180. 'url': 'https://www2.nhk.or.jp/school/movie/bangumi.cgi?das_id=D0005150191_00000',
  181. 'info_dict': {
  182. 'id': 'D0005150191_00003',
  183. 'title': 'にている かな',
  184. 'duration': 599.999,
  185. 'timestamp': 1396414800,
  186. 'upload_date': '20140402',
  187. 'ext': 'mp4',
  188. 'chapters': 'count:12'
  189. },
  190. 'params': {
  191. # m3u8 download
  192. 'skip_download': True,
  193. },
  194. }]
  195. def _real_extract(self, url):
  196. program_type, video_id = self._match_valid_url(url).groups()
  197. webpage = self._download_webpage(
  198. f'https://www2.nhk.or.jp/school/movie/{program_type}.cgi?das_id={video_id}', video_id)
  199. # searches all variables
  200. base_values = {g.group(1): g.group(2) for g in re.finditer(r'var\s+([a-zA-Z_]+)\s*=\s*"([^"]+?)";', webpage)}
  201. # and programObj values too
  202. program_values = {g.group(1): g.group(3) for g in re.finditer(r'(?:program|clip)Obj\.([a-zA-Z_]+)\s*=\s*(["\'])([^"]+?)\2;', webpage)}
  203. # extract all chapters
  204. chapter_durations = [parse_duration(g.group(1)) for g in re.finditer(r'chapterTime\.push\(\'([0-9:]+?)\'\);', webpage)]
  205. chapter_titles = [' '.join([g.group(1) or '', unescapeHTML(g.group(2))]).strip() for g in re.finditer(r'<div class="cpTitle"><span>(scene\s*\d+)?</span>([^<]+?)</div>', webpage)]
  206. # this is how player_core.js is actually doing (!)
  207. version = base_values.get('r_version') or program_values.get('version')
  208. if version:
  209. video_id = f'{video_id.split("_")[0]}_{version}'
  210. formats = self._extract_m3u8_formats(
  211. f'https://nhks-vh.akamaihd.net/i/das/{video_id[0:8]}/{video_id}_V_000.f4v/master.m3u8',
  212. video_id, ext='mp4', m3u8_id='hls')
  213. duration = parse_duration(base_values.get('r_duration'))
  214. chapters = None
  215. if chapter_durations and chapter_titles and len(chapter_durations) == len(chapter_titles):
  216. start_time = chapter_durations
  217. end_time = chapter_durations[1:] + [duration]
  218. chapters = [{
  219. 'start_time': s,
  220. 'end_time': e,
  221. 'title': t,
  222. } for s, e, t in zip(start_time, end_time, chapter_titles)]
  223. return {
  224. 'id': video_id,
  225. 'title': program_values.get('name'),
  226. 'duration': parse_duration(base_values.get('r_duration')),
  227. 'timestamp': unified_timestamp(base_values['r_upload']),
  228. 'formats': formats,
  229. 'chapters': chapters,
  230. }
  231. class NhkForSchoolSubjectIE(InfoExtractor):
  232. IE_DESC = 'Portal page for each school subjects, like Japanese (kokugo, 国語) or math (sansuu/suugaku or 算数・数学)'
  233. KNOWN_SUBJECTS = (
  234. 'rika', 'syakai', 'kokugo',
  235. 'sansuu', 'seikatsu', 'doutoku',
  236. 'ongaku', 'taiiku', 'zukou',
  237. 'gijutsu', 'katei', 'sougou',
  238. 'eigo', 'tokkatsu',
  239. 'tokushi', 'sonota',
  240. )
  241. _VALID_URL = r'https?://www\.nhk\.or\.jp/school/(?P<id>%s)/?(?:[\?#].*)?$' % '|'.join(re.escape(s) for s in KNOWN_SUBJECTS)
  242. _TESTS = [{
  243. 'url': 'https://www.nhk.or.jp/school/sougou/',
  244. 'info_dict': {
  245. 'id': 'sougou',
  246. 'title': '総合的な学習の時間',
  247. },
  248. 'playlist_mincount': 16,
  249. }, {
  250. 'url': 'https://www.nhk.or.jp/school/rika/',
  251. 'info_dict': {
  252. 'id': 'rika',
  253. 'title': '理科',
  254. },
  255. 'playlist_mincount': 15,
  256. }]
  257. def _real_extract(self, url):
  258. subject_id = self._match_id(url)
  259. webpage = self._download_webpage(url, subject_id)
  260. return self.playlist_from_matches(
  261. re.finditer(rf'href="((?:https?://www\.nhk\.or\.jp)?/school/{re.escape(subject_id)}/[^/]+/)"', webpage),
  262. subject_id,
  263. self._html_search_regex(r'(?s)<span\s+class="subjectName">\s*<img\s*[^<]+>\s*([^<]+?)</span>', webpage, 'title', fatal=False),
  264. lambda g: urljoin(url, g.group(1)))
  265. class NhkForSchoolProgramListIE(InfoExtractor):
  266. _VALID_URL = r'https?://www\.nhk\.or\.jp/school/(?P<id>(?:%s)/[a-zA-Z0-9_-]+)' % (
  267. '|'.join(re.escape(s) for s in NhkForSchoolSubjectIE.KNOWN_SUBJECTS)
  268. )
  269. _TESTS = [{
  270. 'url': 'https://www.nhk.or.jp/school/sougou/q/',
  271. 'info_dict': {
  272. 'id': 'sougou/q',
  273. 'title': 'Q~こどものための哲学',
  274. },
  275. 'playlist_mincount': 20,
  276. }]
  277. def _real_extract(self, url):
  278. program_id = self._match_id(url)
  279. webpage = self._download_webpage(f'https://www.nhk.or.jp/school/{program_id}/', program_id)
  280. title = (self._generic_title('', webpage)
  281. or self._html_search_regex(r'<h3>([^<]+?)とは?\s*</h3>', webpage, 'title', fatal=False))
  282. title = re.sub(r'\s*\|\s*NHK\s+for\s+School\s*$', '', title) if title else None
  283. description = self._html_search_regex(
  284. r'(?s)<div\s+class="programDetail\s*">\s*<p>[^<]+</p>',
  285. webpage, 'description', fatal=False, group=0)
  286. bangumi_list = self._download_json(
  287. f'https://www.nhk.or.jp/school/{program_id}/meta/program.json', program_id)
  288. # they're always bangumi
  289. bangumis = [
  290. self.url_result(f'https://www2.nhk.or.jp/school/movie/bangumi.cgi?das_id={x}')
  291. for x in traverse_obj(bangumi_list, ('part', ..., 'part-video-dasid')) or []]
  292. return self.playlist_result(bangumis, program_id, title, description)