polskieradio.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. import itertools
  2. import json
  3. import math
  4. import re
  5. from .common import InfoExtractor
  6. from ..compat import (
  7. compat_str,
  8. compat_urllib_parse_unquote,
  9. compat_urlparse
  10. )
  11. from ..utils import (
  12. extract_attributes,
  13. ExtractorError,
  14. InAdvancePagedList,
  15. int_or_none,
  16. js_to_json,
  17. parse_iso8601,
  18. strip_or_none,
  19. unified_timestamp,
  20. unescapeHTML,
  21. url_or_none,
  22. )
  23. class PolskieRadioBaseExtractor(InfoExtractor):
  24. def _extract_webpage_player_entries(self, webpage, playlist_id, base_data):
  25. media_urls = set()
  26. for data_media in re.findall(r'<[^>]+data-media="?({[^>]+})"?', webpage):
  27. media = self._parse_json(data_media, playlist_id, transform_source=unescapeHTML, fatal=False)
  28. if not media.get('file') or not media.get('desc'):
  29. continue
  30. media_url = self._proto_relative_url(media['file'])
  31. if media_url in media_urls:
  32. continue
  33. media_urls.add(media_url)
  34. entry = base_data.copy()
  35. entry.update({
  36. 'id': compat_str(media['id']),
  37. 'url': media_url,
  38. 'duration': int_or_none(media.get('length')),
  39. 'vcodec': 'none' if media.get('provider') == 'audio' else None,
  40. })
  41. entry_title = compat_urllib_parse_unquote(media['desc'])
  42. if entry_title:
  43. entry['title'] = entry_title
  44. yield entry
  45. class PolskieRadioIE(PolskieRadioBaseExtractor):
  46. _VALID_URL = r'https?://(?:www\.)?polskieradio(?:24)?\.pl/\d+/\d+/Artykul/(?P<id>[0-9]+)'
  47. _TESTS = [{ # Old-style single broadcast.
  48. 'url': 'http://www.polskieradio.pl/7/5102/Artykul/1587943,Prof-Andrzej-Nowak-o-historii-nie-da-sie-myslec-beznamietnie',
  49. 'info_dict': {
  50. 'id': '1587943',
  51. 'title': 'Prof. Andrzej Nowak: o historii nie da się myśleć beznamiętnie',
  52. 'description': 'md5:12f954edbf3120c5e7075e17bf9fc5c5',
  53. },
  54. 'playlist': [{
  55. 'md5': '2984ee6ce9046d91fc233bc1a864a09a',
  56. 'info_dict': {
  57. 'id': '1540576',
  58. 'ext': 'mp3',
  59. 'title': 'md5:d4623290d4ac983bf924061c75c23a0d',
  60. 'timestamp': 1456594200,
  61. 'upload_date': '20160227',
  62. 'duration': 2364,
  63. 'thumbnail': r're:^https?://static\.prsa\.pl/images/.*\.jpg$'
  64. },
  65. }],
  66. }, { # New-style single broadcast.
  67. 'url': 'https://www.polskieradio.pl/8/2382/Artykul/2534482,Zagarysci-Poezja-jak-spoiwo',
  68. 'info_dict': {
  69. 'id': '2534482',
  70. 'title': 'Żagaryści. Poezja jak spoiwo',
  71. 'description': 'md5:f18d95d5dcba747a09b635e21a4c0695',
  72. },
  73. 'playlist': [{
  74. 'md5': 'd07559829f61d5a93a75755987ded760',
  75. 'info_dict': {
  76. 'id': '2516679',
  77. 'ext': 'mp3',
  78. 'title': 'md5:c6e1234e0b747ad883cb91b7ad06b98c',
  79. 'timestamp': 1592654400,
  80. 'upload_date': '20200620',
  81. 'duration': 1430,
  82. 'thumbnail': r're:^https?://static\.prsa\.pl/images/.*\.jpg$'
  83. },
  84. }],
  85. }, {
  86. # PR4 audition - other frontend
  87. 'url': 'https://www.polskieradio.pl/10/6071/Artykul/2610977,Poglos-29-pazdziernika-godz-2301',
  88. 'info_dict': {
  89. 'id': '2610977',
  90. 'ext': 'mp3',
  91. 'title': 'Pogłos 29 października godz. 23:01',
  92. },
  93. }, {
  94. 'url': 'http://polskieradio.pl/9/305/Artykul/1632955,Bardzo-popularne-slowo-remis',
  95. 'only_matching': True,
  96. }, {
  97. 'url': 'http://www.polskieradio.pl/7/5102/Artykul/1587943',
  98. 'only_matching': True,
  99. }, {
  100. # with mp4 video
  101. 'url': 'http://www.polskieradio.pl/9/299/Artykul/1634903,Brexit-Leszek-Miller-swiat-sie-nie-zawali-Europa-bedzie-trwac-dalej',
  102. 'only_matching': True,
  103. }, {
  104. 'url': 'https://polskieradio24.pl/130/4503/Artykul/2621876,Narusza-nasza-suwerennosc-Publicysci-o-uzaleznieniu-funduszy-UE-od-praworzadnosci',
  105. 'only_matching': True,
  106. }]
  107. def _real_extract(self, url):
  108. playlist_id = self._match_id(url)
  109. webpage = self._download_webpage(url, playlist_id)
  110. content = self._search_regex(
  111. r'(?s)<div[^>]+class="\s*this-article\s*"[^>]*>(.+?)<div[^>]+class="tags"[^>]*>',
  112. webpage, 'content', default=None)
  113. timestamp = unified_timestamp(self._html_search_regex(
  114. r'(?s)<span[^>]+id="datetime2"[^>]*>(.+?)</span>',
  115. webpage, 'timestamp', default=None))
  116. thumbnail_url = self._og_search_thumbnail(webpage, default=None)
  117. title = self._og_search_title(webpage).strip()
  118. description = strip_or_none(self._og_search_description(webpage, default=None))
  119. description = description.replace('\xa0', ' ') if description is not None else None
  120. if not content:
  121. return {
  122. 'id': playlist_id,
  123. 'url': self._proto_relative_url(
  124. self._search_regex(
  125. r"source:\s*'(//static\.prsa\.pl/[^']+)'",
  126. webpage, 'audition record url')),
  127. 'title': title,
  128. 'description': description,
  129. 'timestamp': timestamp,
  130. 'thumbnail': thumbnail_url,
  131. }
  132. entries = self._extract_webpage_player_entries(content, playlist_id, {
  133. 'title': title,
  134. 'timestamp': timestamp,
  135. 'thumbnail': thumbnail_url,
  136. })
  137. return self.playlist_result(entries, playlist_id, title, description)
  138. class PolskieRadioCategoryIE(InfoExtractor):
  139. _VALID_URL = r'https?://(?:www\.)?polskieradio\.pl/\d+(?:,[^/]+)?/(?P<id>\d+)'
  140. _TESTS = [{
  141. 'url': 'http://www.polskieradio.pl/7/5102,HISTORIA-ZYWA',
  142. 'info_dict': {
  143. 'id': '5102',
  144. 'title': 'HISTORIA ŻYWA',
  145. },
  146. 'playlist_mincount': 38,
  147. }, {
  148. 'url': 'http://www.polskieradio.pl/7/4807',
  149. 'info_dict': {
  150. 'id': '4807',
  151. 'title': 'Vademecum 1050. rocznicy Chrztu Polski'
  152. },
  153. 'playlist_mincount': 5
  154. }, {
  155. 'url': 'http://www.polskieradio.pl/7/129,Sygnaly-dnia?ref=source',
  156. 'only_matching': True
  157. }, {
  158. 'url': 'http://www.polskieradio.pl/37,RedakcjaKatolicka/4143,Kierunek-Krakow',
  159. 'info_dict': {
  160. 'id': '4143',
  161. 'title': 'Kierunek Kraków',
  162. },
  163. 'playlist_mincount': 61
  164. }, {
  165. 'url': 'http://www.polskieradio.pl/10,czworka/214,muzyka',
  166. 'info_dict': {
  167. 'id': '214',
  168. 'title': 'Muzyka',
  169. },
  170. 'playlist_mincount': 61
  171. }, {
  172. 'url': 'http://www.polskieradio.pl/7,Jedynka/5102,HISTORIA-ZYWA',
  173. 'only_matching': True,
  174. }, {
  175. 'url': 'http://www.polskieradio.pl/8,Dwojka/196,Publicystyka',
  176. 'only_matching': True,
  177. }]
  178. @classmethod
  179. def suitable(cls, url):
  180. return False if PolskieRadioIE.suitable(url) else super(PolskieRadioCategoryIE, cls).suitable(url)
  181. def _entries(self, url, page, category_id):
  182. content = page
  183. for page_num in itertools.count(2):
  184. for a_entry, entry_id in re.findall(
  185. r'(?s)<article[^>]+>.*?(<a[^>]+href=["\']/\d+/\d+/Artykul/(\d+)[^>]+>).*?</article>',
  186. content):
  187. entry = extract_attributes(a_entry)
  188. href = entry.get('href')
  189. if not href:
  190. continue
  191. yield self.url_result(
  192. compat_urlparse.urljoin(url, href), PolskieRadioIE.ie_key(),
  193. entry_id, entry.get('title'))
  194. mobj = re.search(
  195. r'<div[^>]+class=["\']next["\'][^>]*>\s*<a[^>]+href=(["\'])(?P<url>(?:(?!\1).)+)\1',
  196. content)
  197. if not mobj:
  198. break
  199. next_url = compat_urlparse.urljoin(url, mobj.group('url'))
  200. content = self._download_webpage(
  201. next_url, category_id, 'Downloading page %s' % page_num)
  202. def _real_extract(self, url):
  203. category_id = self._match_id(url)
  204. webpage = self._download_webpage(url, category_id)
  205. title = self._html_search_regex(
  206. r'<title>([^<]+) - [^<]+ - [^<]+</title>',
  207. webpage, 'title', fatal=False)
  208. return self.playlist_result(
  209. self._entries(url, webpage, category_id),
  210. category_id, title)
  211. class PolskieRadioPlayerIE(InfoExtractor):
  212. IE_NAME = 'polskieradio:player'
  213. _VALID_URL = r'https?://player\.polskieradio\.pl/anteny/(?P<id>[^/]+)'
  214. _BASE_URL = 'https://player.polskieradio.pl'
  215. _PLAYER_URL = 'https://player.polskieradio.pl/main.bundle.js'
  216. _STATIONS_API_URL = 'https://apipr.polskieradio.pl/api/stacje'
  217. _TESTS = [{
  218. 'url': 'https://player.polskieradio.pl/anteny/trojka',
  219. 'info_dict': {
  220. 'id': '3',
  221. 'ext': 'm4a',
  222. 'title': 'Trójka',
  223. },
  224. 'params': {
  225. 'format': 'bestaudio',
  226. 'skip_download': 'endless stream',
  227. },
  228. }]
  229. def _get_channel_list(self, channel_url='no_channel'):
  230. player_code = self._download_webpage(
  231. self._PLAYER_URL, channel_url,
  232. note='Downloading js player')
  233. channel_list = js_to_json(self._search_regex(
  234. r';var r="anteny",a=(\[.+?\])},', player_code, 'channel list'))
  235. return self._parse_json(channel_list, channel_url)
  236. def _real_extract(self, url):
  237. channel_url = self._match_id(url)
  238. channel_list = self._get_channel_list(channel_url)
  239. channel = next((c for c in channel_list if c.get('url') == channel_url), None)
  240. if not channel:
  241. raise ExtractorError('Channel not found')
  242. station_list = self._download_json(self._STATIONS_API_URL, channel_url,
  243. note='Downloading stream url list',
  244. headers={
  245. 'Accept': 'application/json',
  246. 'Referer': url,
  247. 'Origin': self._BASE_URL,
  248. })
  249. station = next((s for s in station_list
  250. if s.get('Name') == (channel.get('streamName') or channel.get('name'))), None)
  251. if not station:
  252. raise ExtractorError('Station not found even though we extracted channel')
  253. formats = []
  254. for stream_url in station['Streams']:
  255. stream_url = self._proto_relative_url(stream_url)
  256. if stream_url.endswith('/playlist.m3u8'):
  257. formats.extend(self._extract_m3u8_formats(stream_url, channel_url, live=True))
  258. elif stream_url.endswith('/manifest.f4m'):
  259. formats.extend(self._extract_mpd_formats(stream_url, channel_url))
  260. elif stream_url.endswith('/Manifest'):
  261. formats.extend(self._extract_ism_formats(stream_url, channel_url))
  262. else:
  263. formats.append({
  264. 'url': stream_url,
  265. })
  266. return {
  267. 'id': compat_str(channel['id']),
  268. 'formats': formats,
  269. 'title': channel.get('name') or channel.get('streamName'),
  270. 'display_id': channel_url,
  271. 'thumbnail': f'{self._BASE_URL}/images/{channel_url}-color-logo.png',
  272. 'is_live': True,
  273. }
  274. class PolskieRadioPodcastBaseExtractor(InfoExtractor):
  275. _API_BASE = 'https://apipodcasts.polskieradio.pl/api'
  276. def _parse_episode(self, data):
  277. return {
  278. 'id': data['guid'],
  279. 'formats': [{
  280. 'url': data['url'],
  281. 'filesize': int_or_none(data.get('fileSize')),
  282. }],
  283. 'title': data['title'],
  284. 'description': data.get('description'),
  285. 'duration': int_or_none(data.get('length')),
  286. 'timestamp': parse_iso8601(data.get('publishDate')),
  287. 'thumbnail': url_or_none(data.get('image')),
  288. 'series': data.get('podcastTitle'),
  289. 'episode': data['title'],
  290. }
  291. class PolskieRadioPodcastListIE(PolskieRadioPodcastBaseExtractor):
  292. IE_NAME = 'polskieradio:podcast:list'
  293. _VALID_URL = r'https?://podcasty\.polskieradio\.pl/podcast/(?P<id>\d+)'
  294. _TESTS = [{
  295. 'url': 'https://podcasty.polskieradio.pl/podcast/8/',
  296. 'info_dict': {
  297. 'id': '8',
  298. 'title': 'Śniadanie w Trójce',
  299. 'description': 'md5:57abcc27bc4c6a6b25baa3061975b9ef',
  300. 'uploader': 'Beata Michniewicz',
  301. },
  302. 'playlist_mincount': 714,
  303. }]
  304. _PAGE_SIZE = 10
  305. def _call_api(self, podcast_id, page):
  306. return self._download_json(
  307. f'{self._API_BASE}/Podcasts/{podcast_id}/?pageSize={self._PAGE_SIZE}&page={page}',
  308. podcast_id, f'Downloading page {page}')
  309. def _real_extract(self, url):
  310. podcast_id = self._match_id(url)
  311. data = self._call_api(podcast_id, 1)
  312. def get_page(page_num):
  313. page_data = self._call_api(podcast_id, page_num + 1) if page_num else data
  314. yield from (self._parse_episode(ep) for ep in page_data['items'])
  315. return {
  316. '_type': 'playlist',
  317. 'entries': InAdvancePagedList(
  318. get_page, math.ceil(data['itemCount'] / self._PAGE_SIZE), self._PAGE_SIZE),
  319. 'id': str(data['id']),
  320. 'title': data['title'],
  321. 'description': data.get('description'),
  322. 'uploader': data.get('announcer'),
  323. }
  324. class PolskieRadioPodcastIE(PolskieRadioPodcastBaseExtractor):
  325. IE_NAME = 'polskieradio:podcast'
  326. _VALID_URL = r'https?://podcasty\.polskieradio\.pl/track/(?P<id>[a-f\d]{8}(?:-[a-f\d]{4}){4}[a-f\d]{8})'
  327. _TESTS = [{
  328. 'url': 'https://podcasty.polskieradio.pl/track/6eafe403-cb8f-4756-b896-4455c3713c32',
  329. 'info_dict': {
  330. 'id': '6eafe403-cb8f-4756-b896-4455c3713c32',
  331. 'ext': 'mp3',
  332. 'title': 'Theresa May rezygnuje. Co dalej z brexitem?',
  333. 'description': 'md5:e41c409a29d022b70ef0faa61dbded60',
  334. },
  335. }]
  336. def _real_extract(self, url):
  337. podcast_id = self._match_id(url)
  338. data = self._download_json(
  339. f'{self._API_BASE}/audio',
  340. podcast_id, 'Downloading podcast metadata',
  341. data=json.dumps({
  342. 'guids': [podcast_id],
  343. }).encode('utf-8'),
  344. headers={
  345. 'Content-Type': 'application/json',
  346. })
  347. return self._parse_episode(data[0])
  348. class PolskieRadioRadioKierowcowIE(PolskieRadioBaseExtractor):
  349. _VALID_URL = r'https?://(?:www\.)?radiokierowcow\.pl/artykul/(?P<id>[0-9]+)'
  350. IE_NAME = 'polskieradio:kierowcow'
  351. _TESTS = [{
  352. 'url': 'https://radiokierowcow.pl/artykul/2694529',
  353. 'info_dict': {
  354. 'id': '2694529',
  355. 'title': 'Zielona fala reliktem przeszłości?',
  356. 'description': 'md5:343950a8717c9818fdfd4bd2b8ca9ff2',
  357. },
  358. 'playlist_count': 3,
  359. }]
  360. def _real_extract(self, url):
  361. media_id = self._match_id(url)
  362. webpage = self._download_webpage(url, media_id)
  363. nextjs_build = self._search_nextjs_data(webpage, media_id)['buildId']
  364. article = self._download_json(
  365. f'https://radiokierowcow.pl/_next/data/{nextjs_build}/artykul/{media_id}.json?articleId={media_id}',
  366. media_id)
  367. data = article['pageProps']['data']
  368. title = data['title']
  369. entries = self._extract_webpage_player_entries(data['content'], media_id, {
  370. 'title': title,
  371. })
  372. return {
  373. '_type': 'playlist',
  374. 'id': media_id,
  375. 'entries': entries,
  376. 'title': title,
  377. 'description': data.get('lead'),
  378. }