xhamster.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453
  1. import itertools
  2. import re
  3. from .common import InfoExtractor
  4. from ..compat import compat_str
  5. from ..utils import (
  6. clean_html,
  7. determine_ext,
  8. dict_get,
  9. extract_attributes,
  10. ExtractorError,
  11. float_or_none,
  12. int_or_none,
  13. parse_duration,
  14. str_or_none,
  15. try_get,
  16. unified_strdate,
  17. url_or_none,
  18. urljoin,
  19. )
  20. class XHamsterIE(InfoExtractor):
  21. _DOMAINS = r'(?:xhamster\.(?:com|one|desi)|xhms\.pro|xhamster\d+\.com|xhday\.com)'
  22. _VALID_URL = r'''(?x)
  23. https?://
  24. (?:.+?\.)?%s/
  25. (?:
  26. movies/(?P<id>[\dA-Za-z]+)/(?P<display_id>[^/]*)\.html|
  27. videos/(?P<display_id_2>[^/]*)-(?P<id_2>[\dA-Za-z]+)
  28. )
  29. ''' % _DOMAINS
  30. _TESTS = [{
  31. 'url': 'https://xhamster.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
  32. 'md5': '34e1ab926db5dc2750fed9e1f34304bb',
  33. 'info_dict': {
  34. 'id': '1509445',
  35. 'display_id': 'femaleagent-shy-beauty-takes-the-bait',
  36. 'ext': 'mp4',
  37. 'title': 'FemaleAgent Shy beauty takes the bait',
  38. 'timestamp': 1350194821,
  39. 'upload_date': '20121014',
  40. 'uploader': 'Ruseful2011',
  41. 'uploader_id': 'ruseful2011',
  42. 'duration': 893,
  43. 'age_limit': 18,
  44. },
  45. }, {
  46. 'url': 'https://xhamster.com/videos/britney-spears-sexy-booty-2221348?hd=',
  47. 'info_dict': {
  48. 'id': '2221348',
  49. 'display_id': 'britney-spears-sexy-booty',
  50. 'ext': 'mp4',
  51. 'title': 'Britney Spears Sexy Booty',
  52. 'timestamp': 1379123460,
  53. 'upload_date': '20130914',
  54. 'uploader': 'jojo747400',
  55. 'duration': 200,
  56. 'age_limit': 18,
  57. },
  58. 'params': {
  59. 'skip_download': True,
  60. },
  61. }, {
  62. # empty seo, unavailable via new URL schema
  63. 'url': 'http://xhamster.com/movies/5667973/.html',
  64. 'info_dict': {
  65. 'id': '5667973',
  66. 'ext': 'mp4',
  67. 'title': '....',
  68. 'timestamp': 1454948101,
  69. 'upload_date': '20160208',
  70. 'uploader': 'parejafree',
  71. 'uploader_id': 'parejafree',
  72. 'duration': 72,
  73. 'age_limit': 18,
  74. },
  75. 'params': {
  76. 'skip_download': True,
  77. },
  78. }, {
  79. # mobile site
  80. 'url': 'https://m.xhamster.com/videos/cute-teen-jacqueline-solo-masturbation-8559111',
  81. 'only_matching': True,
  82. }, {
  83. 'url': 'https://xhamster.com/movies/2272726/amber_slayed_by_the_knight.html',
  84. 'only_matching': True,
  85. }, {
  86. # This video is visible for marcoalfa123456's friends only
  87. 'url': 'https://it.xhamster.com/movies/7263980/la_mia_vicina.html',
  88. 'only_matching': True,
  89. }, {
  90. # new URL schema
  91. 'url': 'https://pt.xhamster.com/videos/euro-pedal-pumping-7937821',
  92. 'only_matching': True,
  93. }, {
  94. 'url': 'https://xhamster.one/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
  95. 'only_matching': True,
  96. }, {
  97. 'url': 'https://xhamster.desi/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
  98. 'only_matching': True,
  99. }, {
  100. 'url': 'https://xhamster2.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
  101. 'only_matching': True,
  102. }, {
  103. 'url': 'https://xhamster11.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
  104. 'only_matching': True,
  105. }, {
  106. 'url': 'https://xhamster26.com/videos/femaleagent-shy-beauty-takes-the-bait-1509445',
  107. 'only_matching': True,
  108. }, {
  109. 'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
  110. 'only_matching': True,
  111. }, {
  112. 'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
  113. 'only_matching': True,
  114. }, {
  115. 'url': 'http://de.xhamster.com/videos/skinny-girl-fucks-herself-hard-in-the-forest-xhnBJZx',
  116. 'only_matching': True,
  117. }, {
  118. 'url': 'https://xhday.com/videos/strapless-threesome-xhh7yVf',
  119. 'only_matching': True,
  120. }]
  121. def _real_extract(self, url):
  122. mobj = self._match_valid_url(url)
  123. video_id = mobj.group('id') or mobj.group('id_2')
  124. display_id = mobj.group('display_id') or mobj.group('display_id_2')
  125. desktop_url = re.sub(r'^(https?://(?:.+?\.)?)m\.', r'\1', url)
  126. webpage, urlh = self._download_webpage_handle(desktop_url, video_id)
  127. error = self._html_search_regex(
  128. r'<div[^>]+id=["\']videoClosed["\'][^>]*>(.+?)</div>',
  129. webpage, 'error', default=None)
  130. if error:
  131. raise ExtractorError(error, expected=True)
  132. age_limit = self._rta_search(webpage)
  133. def get_height(s):
  134. return int_or_none(self._search_regex(
  135. r'^(\d+)[pP]', s, 'height', default=None))
  136. initials = self._parse_json(
  137. self._search_regex(
  138. (r'window\.initials\s*=\s*({.+?})\s*;\s*</script>',
  139. r'window\.initials\s*=\s*({.+?})\s*;'), webpage, 'initials',
  140. default='{}'),
  141. video_id, fatal=False)
  142. if initials:
  143. video = initials['videoModel']
  144. title = video['title']
  145. formats = []
  146. format_urls = set()
  147. format_sizes = {}
  148. sources = try_get(video, lambda x: x['sources'], dict) or {}
  149. for format_id, formats_dict in sources.items():
  150. if not isinstance(formats_dict, dict):
  151. continue
  152. download_sources = try_get(sources, lambda x: x['download'], dict) or {}
  153. for quality, format_dict in download_sources.items():
  154. if not isinstance(format_dict, dict):
  155. continue
  156. format_sizes[quality] = float_or_none(format_dict.get('size'))
  157. for quality, format_item in formats_dict.items():
  158. if format_id == 'download':
  159. # Download link takes some time to be generated,
  160. # skipping for now
  161. continue
  162. format_url = format_item
  163. format_url = url_or_none(format_url)
  164. if not format_url or format_url in format_urls:
  165. continue
  166. format_urls.add(format_url)
  167. formats.append({
  168. 'format_id': '%s-%s' % (format_id, quality),
  169. 'url': format_url,
  170. 'ext': determine_ext(format_url, 'mp4'),
  171. 'height': get_height(quality),
  172. 'filesize': format_sizes.get(quality),
  173. 'http_headers': {
  174. 'Referer': urlh.geturl(),
  175. },
  176. })
  177. xplayer_sources = try_get(
  178. initials, lambda x: x['xplayerSettings']['sources'], dict)
  179. if xplayer_sources:
  180. hls_sources = xplayer_sources.get('hls')
  181. if isinstance(hls_sources, dict):
  182. for hls_format_key in ('url', 'fallback'):
  183. hls_url = hls_sources.get(hls_format_key)
  184. if not hls_url:
  185. continue
  186. hls_url = urljoin(url, hls_url)
  187. if not hls_url or hls_url in format_urls:
  188. continue
  189. format_urls.add(hls_url)
  190. formats.extend(self._extract_m3u8_formats(
  191. hls_url, video_id, 'mp4', entry_protocol='m3u8_native',
  192. m3u8_id='hls', fatal=False))
  193. standard_sources = xplayer_sources.get('standard')
  194. if isinstance(standard_sources, dict):
  195. for format_id, formats_list in standard_sources.items():
  196. if not isinstance(formats_list, list):
  197. continue
  198. for standard_format in formats_list:
  199. if not isinstance(standard_format, dict):
  200. continue
  201. for standard_format_key in ('url', 'fallback'):
  202. standard_url = standard_format.get(standard_format_key)
  203. if not standard_url:
  204. continue
  205. standard_url = urljoin(url, standard_url)
  206. if not standard_url or standard_url in format_urls:
  207. continue
  208. format_urls.add(standard_url)
  209. ext = determine_ext(standard_url, 'mp4')
  210. if ext == 'm3u8':
  211. formats.extend(self._extract_m3u8_formats(
  212. standard_url, video_id, 'mp4', entry_protocol='m3u8_native',
  213. m3u8_id='hls', fatal=False))
  214. continue
  215. quality = (str_or_none(standard_format.get('quality'))
  216. or str_or_none(standard_format.get('label'))
  217. or '')
  218. formats.append({
  219. 'format_id': '%s-%s' % (format_id, quality),
  220. 'url': standard_url,
  221. 'ext': ext,
  222. 'height': get_height(quality),
  223. 'filesize': format_sizes.get(quality),
  224. 'http_headers': {
  225. 'Referer': standard_url,
  226. },
  227. })
  228. categories_list = video.get('categories')
  229. if isinstance(categories_list, list):
  230. categories = []
  231. for c in categories_list:
  232. if not isinstance(c, dict):
  233. continue
  234. c_name = c.get('name')
  235. if isinstance(c_name, compat_str):
  236. categories.append(c_name)
  237. else:
  238. categories = None
  239. uploader_url = url_or_none(try_get(video, lambda x: x['author']['pageURL']))
  240. return {
  241. 'id': video_id,
  242. 'display_id': display_id,
  243. 'title': title,
  244. 'description': video.get('description'),
  245. 'timestamp': int_or_none(video.get('created')),
  246. 'uploader': try_get(
  247. video, lambda x: x['author']['name'], compat_str),
  248. 'uploader_url': uploader_url,
  249. 'uploader_id': uploader_url.split('/')[-1] if uploader_url else None,
  250. 'thumbnail': video.get('thumbURL'),
  251. 'duration': int_or_none(video.get('duration')),
  252. 'view_count': int_or_none(video.get('views')),
  253. 'like_count': int_or_none(try_get(
  254. video, lambda x: x['rating']['likes'], int)),
  255. 'dislike_count': int_or_none(try_get(
  256. video, lambda x: x['rating']['dislikes'], int)),
  257. 'comment_count': int_or_none(video.get('views')),
  258. 'age_limit': age_limit if age_limit is not None else 18,
  259. 'categories': categories,
  260. 'formats': formats,
  261. }
  262. # Old layout fallback
  263. title = self._html_search_regex(
  264. [r'<h1[^>]*>([^<]+)</h1>',
  265. r'<meta[^>]+itemprop=".*?caption.*?"[^>]+content="(.+?)"',
  266. r'<title[^>]*>(.+?)(?:,\s*[^,]*?\s*Porn\s*[^,]*?:\s*xHamster[^<]*| - xHamster\.com)</title>'],
  267. webpage, 'title')
  268. formats = []
  269. format_urls = set()
  270. sources = self._parse_json(
  271. self._search_regex(
  272. r'sources\s*:\s*({.+?})\s*,?\s*\n', webpage, 'sources',
  273. default='{}'),
  274. video_id, fatal=False)
  275. for format_id, format_url in sources.items():
  276. format_url = url_or_none(format_url)
  277. if not format_url:
  278. continue
  279. if format_url in format_urls:
  280. continue
  281. format_urls.add(format_url)
  282. formats.append({
  283. 'format_id': format_id,
  284. 'url': format_url,
  285. 'height': get_height(format_id),
  286. })
  287. video_url = self._search_regex(
  288. [r'''file\s*:\s*(?P<q>["'])(?P<mp4>.+?)(?P=q)''',
  289. r'''<a\s+href=(?P<q>["'])(?P<mp4>.+?)(?P=q)\s+class=["']mp4Thumb''',
  290. r'''<video[^>]+file=(?P<q>["'])(?P<mp4>.+?)(?P=q)[^>]*>'''],
  291. webpage, 'video url', group='mp4', default=None)
  292. if video_url and video_url not in format_urls:
  293. formats.append({
  294. 'url': video_url,
  295. })
  296. # Only a few videos have an description
  297. mobj = re.search(r'<span>Description: </span>([^<]+)', webpage)
  298. description = mobj.group(1) if mobj else None
  299. upload_date = unified_strdate(self._search_regex(
  300. r'hint=["\'](\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}',
  301. webpage, 'upload date', fatal=False))
  302. uploader = self._html_search_regex(
  303. r'<span[^>]+itemprop=["\']author[^>]+><a[^>]+><span[^>]+>([^<]+)',
  304. webpage, 'uploader', default='anonymous')
  305. thumbnail = self._search_regex(
  306. [r'''["']thumbUrl["']\s*:\s*(?P<q>["'])(?P<thumbnail>.+?)(?P=q)''',
  307. r'''<video[^>]+"poster"=(?P<q>["'])(?P<thumbnail>.+?)(?P=q)[^>]*>'''],
  308. webpage, 'thumbnail', fatal=False, group='thumbnail')
  309. duration = parse_duration(self._search_regex(
  310. [r'<[^<]+\bitemprop=["\']duration["\'][^<]+\bcontent=["\'](.+?)["\']',
  311. r'Runtime:\s*</span>\s*([\d:]+)'], webpage,
  312. 'duration', fatal=False))
  313. view_count = int_or_none(self._search_regex(
  314. r'content=["\']User(?:View|Play)s:(\d+)',
  315. webpage, 'view count', fatal=False))
  316. mobj = re.search(r'hint=[\'"](?P<likecount>\d+) Likes / (?P<dislikecount>\d+) Dislikes', webpage)
  317. (like_count, dislike_count) = (mobj.group('likecount'), mobj.group('dislikecount')) if mobj else (None, None)
  318. mobj = re.search(r'</label>Comments \((?P<commentcount>\d+)\)</div>', webpage)
  319. comment_count = mobj.group('commentcount') if mobj else 0
  320. categories_html = self._search_regex(
  321. r'(?s)<table.+?(<span>Categories:.+?)</table>', webpage,
  322. 'categories', default=None)
  323. categories = [clean_html(category) for category in re.findall(
  324. r'<a[^>]+>(.+?)</a>', categories_html)] if categories_html else None
  325. return {
  326. 'id': video_id,
  327. 'display_id': display_id,
  328. 'title': title,
  329. 'description': description,
  330. 'upload_date': upload_date,
  331. 'uploader': uploader,
  332. 'uploader_id': uploader.lower() if uploader else None,
  333. 'thumbnail': thumbnail,
  334. 'duration': duration,
  335. 'view_count': view_count,
  336. 'like_count': int_or_none(like_count),
  337. 'dislike_count': int_or_none(dislike_count),
  338. 'comment_count': int_or_none(comment_count),
  339. 'age_limit': age_limit,
  340. 'categories': categories,
  341. 'formats': formats,
  342. }
  343. class XHamsterEmbedIE(InfoExtractor):
  344. _VALID_URL = r'https?://(?:.+?\.)?%s/xembed\.php\?video=(?P<id>\d+)' % XHamsterIE._DOMAINS
  345. _EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?xhamster\.com/xembed\.php\?video=\d+)\1']
  346. _TEST = {
  347. 'url': 'http://xhamster.com/xembed.php?video=3328539',
  348. 'info_dict': {
  349. 'id': '3328539',
  350. 'ext': 'mp4',
  351. 'title': 'Pen Masturbation',
  352. 'timestamp': 1406581861,
  353. 'upload_date': '20140728',
  354. 'uploader': 'ManyakisArt',
  355. 'duration': 5,
  356. 'age_limit': 18,
  357. }
  358. }
  359. def _real_extract(self, url):
  360. video_id = self._match_id(url)
  361. webpage = self._download_webpage(url, video_id)
  362. video_url = self._search_regex(
  363. r'href="(https?://xhamster\.com/(?:movies/{0}/[^"]*\.html|videos/[^/]*-{0})[^"]*)"'.format(video_id),
  364. webpage, 'xhamster url', default=None)
  365. if not video_url:
  366. vars = self._parse_json(
  367. self._search_regex(r'vars\s*:\s*({.+?})\s*,\s*\n', webpage, 'vars'),
  368. video_id)
  369. video_url = dict_get(vars, ('downloadLink', 'homepageLink', 'commentsLink', 'shareUrl'))
  370. return self.url_result(video_url, 'XHamster')
  371. class XHamsterUserIE(InfoExtractor):
  372. _VALID_URL = r'https?://(?:.+?\.)?%s/users/(?P<id>[^/?#&]+)' % XHamsterIE._DOMAINS
  373. _TESTS = [{
  374. # Paginated user profile
  375. 'url': 'https://xhamster.com/users/netvideogirls/videos',
  376. 'info_dict': {
  377. 'id': 'netvideogirls',
  378. },
  379. 'playlist_mincount': 267,
  380. }, {
  381. # Non-paginated user profile
  382. 'url': 'https://xhamster.com/users/firatkaan/videos',
  383. 'info_dict': {
  384. 'id': 'firatkaan',
  385. },
  386. 'playlist_mincount': 1,
  387. }, {
  388. 'url': 'https://xhday.com/users/mobhunter',
  389. 'only_matching': True,
  390. }]
  391. def _entries(self, user_id):
  392. next_page_url = 'https://xhamster.com/users/%s/videos/1' % user_id
  393. for pagenum in itertools.count(1):
  394. page = self._download_webpage(
  395. next_page_url, user_id, 'Downloading page %s' % pagenum)
  396. for video_tag in re.findall(
  397. r'(<a[^>]+class=["\'].*?\bvideo-thumb__image-container[^>]+>)',
  398. page):
  399. video = extract_attributes(video_tag)
  400. video_url = url_or_none(video.get('href'))
  401. if not video_url or not XHamsterIE.suitable(video_url):
  402. continue
  403. video_id = XHamsterIE._match_id(video_url)
  404. yield self.url_result(
  405. video_url, ie=XHamsterIE.ie_key(), video_id=video_id)
  406. mobj = re.search(r'<a[^>]+data-page=["\']next[^>]+>', page)
  407. if not mobj:
  408. break
  409. next_page = extract_attributes(mobj.group(0))
  410. next_page_url = url_or_none(next_page.get('href'))
  411. if not next_page_url:
  412. break
  413. def _real_extract(self, url):
  414. user_id = self._match_id(url)
  415. return self.playlist_result(self._entries(user_id), user_id)