iwara.py 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. import itertools
  2. import re
  3. import urllib.parse
  4. from .common import InfoExtractor
  5. from ..utils import (
  6. int_or_none,
  7. mimetype2ext,
  8. remove_end,
  9. strip_or_none,
  10. unified_strdate,
  11. url_or_none,
  12. urljoin,
  13. )
  14. class IwaraBaseIE(InfoExtractor):
  15. _BASE_REGEX = r'(?P<base_url>https?://(?:www\.|ecchi\.)?iwara\.tv)'
  16. def _extract_playlist(self, base_url, webpage):
  17. for path in re.findall(r'class="title">\s*<a[^<]+href="([^"]+)', webpage):
  18. yield self.url_result(urljoin(base_url, path))
  19. class IwaraIE(IwaraBaseIE):
  20. _VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/videos/(?P<id>[a-zA-Z0-9]+)'
  21. _TESTS = [{
  22. 'url': 'http://iwara.tv/videos/amVwUl1EHpAD9RD',
  23. # md5 is unstable
  24. 'info_dict': {
  25. 'id': 'amVwUl1EHpAD9RD',
  26. 'ext': 'mp4',
  27. 'title': '【MMD R-18】ガールフレンド carry_me_off',
  28. 'age_limit': 18,
  29. 'thumbnail': 'https://i.iwara.tv/sites/default/files/videos/thumbnails/7951/thumbnail-7951_0001.png',
  30. 'uploader': 'Reimu丨Action',
  31. 'upload_date': '20150828',
  32. 'description': 'md5:1d4905ce48c66c9299c617f08e106e0f',
  33. },
  34. }, {
  35. 'url': 'http://ecchi.iwara.tv/videos/Vb4yf2yZspkzkBO',
  36. 'md5': '7e5f1f359cd51a027ba4a7b7710a50f0',
  37. 'info_dict': {
  38. 'id': '0B1LvuHnL-sRFNXB1WHNqbGw4SXc',
  39. 'ext': 'mp4',
  40. 'title': '[3D Hentai] Kyonyu × Genkai × Emaki Shinobi Girls.mp4',
  41. 'age_limit': 18,
  42. },
  43. 'add_ie': ['GoogleDrive'],
  44. }, {
  45. 'url': 'http://www.iwara.tv/videos/nawkaumd6ilezzgq',
  46. # md5 is unstable
  47. 'info_dict': {
  48. 'id': '6liAP9s2Ojc',
  49. 'ext': 'mp4',
  50. 'age_limit': 18,
  51. 'title': '[MMD] Do It Again Ver.2 [1080p 60FPS] (Motion,Camera,Wav+DL)',
  52. 'description': 'md5:590c12c0df1443d833fbebe05da8c47a',
  53. 'upload_date': '20160910',
  54. 'uploader': 'aMMDsork',
  55. 'uploader_id': 'UCVOFyOSCyFkXTYYHITtqB7A',
  56. },
  57. 'add_ie': ['Youtube'],
  58. }]
  59. def _real_extract(self, url):
  60. video_id = self._match_id(url)
  61. webpage, urlh = self._download_webpage_handle(url, video_id)
  62. hostname = urllib.parse.urlparse(urlh.geturl()).hostname
  63. # ecchi is 'sexy' in Japanese
  64. age_limit = 18 if hostname.split('.')[0] == 'ecchi' else 0
  65. video_data = self._download_json('http://www.iwara.tv/api/video/%s' % video_id, video_id)
  66. if not video_data:
  67. iframe_url = self._html_search_regex(
  68. r'<iframe[^>]+src=([\'"])(?P<url>[^\'"]+)\1',
  69. webpage, 'iframe URL', group='url')
  70. return {
  71. '_type': 'url_transparent',
  72. 'url': iframe_url,
  73. 'age_limit': age_limit,
  74. }
  75. title = remove_end(self._html_extract_title(webpage), ' | Iwara')
  76. thumbnail = self._html_search_regex(
  77. r'poster=[\'"]([^\'"]+)', webpage, 'thumbnail', default=None)
  78. uploader = self._html_search_regex(
  79. r'class="username">([^<]+)', webpage, 'uploader', fatal=False)
  80. upload_date = unified_strdate(self._html_search_regex(
  81. r'作成日:([^\s]+)', webpage, 'upload_date', fatal=False))
  82. description = strip_or_none(self._search_regex(
  83. r'<p>(.+?(?=</div))', webpage, 'description', fatal=False,
  84. flags=re.DOTALL))
  85. formats = []
  86. for a_format in video_data:
  87. format_uri = url_or_none(a_format.get('uri'))
  88. if not format_uri:
  89. continue
  90. format_id = a_format.get('resolution')
  91. height = int_or_none(self._search_regex(
  92. r'(\d+)p', format_id, 'height', default=None))
  93. formats.append({
  94. 'url': self._proto_relative_url(format_uri, 'https:'),
  95. 'format_id': format_id,
  96. 'ext': mimetype2ext(a_format.get('mime')) or 'mp4',
  97. 'height': height,
  98. 'width': int_or_none(height / 9.0 * 16.0 if height else None),
  99. 'quality': 1 if format_id == 'Source' else 0,
  100. })
  101. return {
  102. 'id': video_id,
  103. 'title': title,
  104. 'age_limit': age_limit,
  105. 'formats': formats,
  106. 'thumbnail': self._proto_relative_url(thumbnail, 'https:'),
  107. 'uploader': uploader,
  108. 'upload_date': upload_date,
  109. 'description': description,
  110. }
  111. class IwaraPlaylistIE(IwaraBaseIE):
  112. _VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/playlist/(?P<id>[^/?#&]+)'
  113. IE_NAME = 'iwara:playlist'
  114. _TESTS = [{
  115. 'url': 'https://ecchi.iwara.tv/playlist/best-enf',
  116. 'info_dict': {
  117. 'title': 'Best enf',
  118. 'uploader': 'Jared98112',
  119. 'id': 'best-enf',
  120. },
  121. 'playlist_mincount': 1097,
  122. }, {
  123. # urlencoded
  124. 'url': 'https://ecchi.iwara.tv/playlist/%E3%83%97%E3%83%AC%E3%82%A4%E3%83%AA%E3%82%B9%E3%83%88-2',
  125. 'info_dict': {
  126. 'id': 'プレイリスト-2',
  127. 'title': 'プレイリスト',
  128. 'uploader': 'mainyu',
  129. },
  130. 'playlist_mincount': 91,
  131. }]
  132. def _real_extract(self, url):
  133. playlist_id, base_url = self._match_valid_url(url).group('id', 'base_url')
  134. playlist_id = urllib.parse.unquote(playlist_id)
  135. webpage = self._download_webpage(url, playlist_id)
  136. return {
  137. '_type': 'playlist',
  138. 'id': playlist_id,
  139. 'title': self._html_search_regex(r'class="title"[^>]*>([^<]+)', webpage, 'title', fatal=False),
  140. 'uploader': self._html_search_regex(r'<h2>([^<]+)', webpage, 'uploader', fatal=False),
  141. 'entries': self._extract_playlist(base_url, webpage),
  142. }
  143. class IwaraUserIE(IwaraBaseIE):
  144. _VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/users/(?P<id>[^/?#&]+)'
  145. IE_NAME = 'iwara:user'
  146. _TESTS = [{
  147. 'note': 'number of all videos page is just 1 page. less than 40 videos',
  148. 'url': 'https://ecchi.iwara.tv/users/infinityyukarip',
  149. 'info_dict': {
  150. 'title': 'Uploaded videos from Infinity_YukariP',
  151. 'id': 'infinityyukarip',
  152. 'uploader': 'Infinity_YukariP',
  153. 'uploader_id': 'infinityyukarip',
  154. },
  155. 'playlist_mincount': 39,
  156. }, {
  157. 'note': 'no even all videos page. probably less than 10 videos',
  158. 'url': 'https://ecchi.iwara.tv/users/mmd-quintet',
  159. 'info_dict': {
  160. 'title': 'Uploaded videos from mmd quintet',
  161. 'id': 'mmd-quintet',
  162. 'uploader': 'mmd quintet',
  163. 'uploader_id': 'mmd-quintet',
  164. },
  165. 'playlist_mincount': 6,
  166. }, {
  167. 'note': 'has paging. more than 40 videos',
  168. 'url': 'https://ecchi.iwara.tv/users/theblackbirdcalls',
  169. 'info_dict': {
  170. 'title': 'Uploaded videos from TheBlackbirdCalls',
  171. 'id': 'theblackbirdcalls',
  172. 'uploader': 'TheBlackbirdCalls',
  173. 'uploader_id': 'theblackbirdcalls',
  174. },
  175. 'playlist_mincount': 420,
  176. }, {
  177. 'note': 'foreign chars in URL. there must be foreign characters in URL',
  178. 'url': 'https://ecchi.iwara.tv/users/ぶた丼',
  179. 'info_dict': {
  180. 'title': 'Uploaded videos from ぶた丼',
  181. 'id': 'ぶた丼',
  182. 'uploader': 'ぶた丼',
  183. 'uploader_id': 'ぶた丼',
  184. },
  185. 'playlist_mincount': 170,
  186. }]
  187. def _entries(self, playlist_id, base_url):
  188. webpage = self._download_webpage(
  189. f'{base_url}/users/{playlist_id}', playlist_id)
  190. videos_url = self._search_regex(r'<a href="(/users/[^/]+/videos)(?:\?[^"]+)?">', webpage, 'all videos url', default=None)
  191. if not videos_url:
  192. yield from self._extract_playlist(base_url, webpage)
  193. return
  194. videos_url = urljoin(base_url, videos_url)
  195. for n in itertools.count(1):
  196. page = self._download_webpage(
  197. videos_url, playlist_id, note=f'Downloading playlist page {n}',
  198. query={'page': str(n - 1)} if n > 1 else {})
  199. yield from self._extract_playlist(
  200. base_url, page)
  201. if f'page={n}' not in page:
  202. break
  203. def _real_extract(self, url):
  204. playlist_id, base_url = self._match_valid_url(url).group('id', 'base_url')
  205. playlist_id = urllib.parse.unquote(playlist_id)
  206. return self.playlist_result(
  207. self._entries(playlist_id, base_url), playlist_id)