motherless.py 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. import datetime
  2. import re
  3. from .common import InfoExtractor
  4. from ..compat import compat_urlparse
  5. from ..utils import (
  6. ExtractorError,
  7. InAdvancePagedList,
  8. orderedSet,
  9. str_to_int,
  10. unified_strdate,
  11. )
  12. class MotherlessIE(InfoExtractor):
  13. _VALID_URL = r'https?://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)'
  14. _TESTS = [{
  15. 'url': 'http://motherless.com/AC3FFE1',
  16. 'md5': '310f62e325a9fafe64f68c0bccb6e75f',
  17. 'info_dict': {
  18. 'id': 'AC3FFE1',
  19. 'ext': 'mp4',
  20. 'title': 'Fucked in the ass while playing PS3',
  21. 'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
  22. 'upload_date': '20100913',
  23. 'uploader_id': 'famouslyfuckedup',
  24. 'thumbnail': r're:https?://.*\.jpg',
  25. 'age_limit': 18,
  26. }
  27. }, {
  28. 'url': 'http://motherless.com/532291B',
  29. 'md5': 'bc59a6b47d1f958e61fbd38a4d31b131',
  30. 'info_dict': {
  31. 'id': '532291B',
  32. 'ext': 'mp4',
  33. 'title': 'Amazing girl playing the omegle game, PERFECT!',
  34. 'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen',
  35. 'game', 'hairy'],
  36. 'upload_date': '20140622',
  37. 'uploader_id': 'Sulivana7x',
  38. 'thumbnail': r're:https?://.*\.jpg',
  39. 'age_limit': 18,
  40. },
  41. 'skip': '404',
  42. }, {
  43. 'url': 'http://motherless.com/g/cosplay/633979F',
  44. 'md5': '0b2a43f447a49c3e649c93ad1fafa4a0',
  45. 'info_dict': {
  46. 'id': '633979F',
  47. 'ext': 'mp4',
  48. 'title': 'Turtlette',
  49. 'categories': ['superheroine heroine superher'],
  50. 'upload_date': '20140827',
  51. 'uploader_id': 'shade0230',
  52. 'thumbnail': r're:https?://.*\.jpg',
  53. 'age_limit': 18,
  54. }
  55. }, {
  56. # no keywords
  57. 'url': 'http://motherless.com/8B4BBC1',
  58. 'only_matching': True,
  59. }, {
  60. # see https://motherless.com/videos/recent for recent videos with
  61. # uploaded date in "ago" format
  62. 'url': 'https://motherless.com/3C3E2CF',
  63. 'info_dict': {
  64. 'id': '3C3E2CF',
  65. 'ext': 'mp4',
  66. 'title': 'a/ Hot Teens',
  67. 'categories': list,
  68. 'upload_date': '20210104',
  69. 'uploader_id': 'anonymous',
  70. 'thumbnail': r're:https?://.*\.jpg',
  71. 'age_limit': 18,
  72. },
  73. 'params': {
  74. 'skip_download': True,
  75. },
  76. }]
  77. def _real_extract(self, url):
  78. video_id = self._match_id(url)
  79. webpage = self._download_webpage(url, video_id)
  80. if any(p in webpage for p in (
  81. '<title>404 - MOTHERLESS.COM<',
  82. ">The page you're looking for cannot be found.<")):
  83. raise ExtractorError('Video %s does not exist' % video_id, expected=True)
  84. if '>The content you are trying to view is for friends only.' in webpage:
  85. raise ExtractorError('Video %s is for friends only' % video_id, expected=True)
  86. title = self._html_search_regex(
  87. (r'(?s)<div[^>]+\bclass=["\']media-meta-title[^>]+>(.+?)</div>',
  88. r'id="view-upload-title">\s+([^<]+)<'), webpage, 'title')
  89. video_url = (self._html_search_regex(
  90. (r'setup\(\{\s*["\']file["\']\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
  91. r'fileurl\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1'),
  92. webpage, 'video URL', default=None, group='url')
  93. or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
  94. age_limit = self._rta_search(webpage)
  95. view_count = str_to_int(self._html_search_regex(
  96. (r'>([\d,.]+)\s+Views<', r'<strong>Views</strong>\s+([^<]+)<'),
  97. webpage, 'view count', fatal=False))
  98. like_count = str_to_int(self._html_search_regex(
  99. (r'>([\d,.]+)\s+Favorites<',
  100. r'<strong>Favorited</strong>\s+([^<]+)<'),
  101. webpage, 'like count', fatal=False))
  102. upload_date = unified_strdate(self._search_regex(
  103. r'class=["\']count[^>]+>(\d+\s+[a-zA-Z]{3}\s+\d{4})<', webpage,
  104. 'upload date', default=None))
  105. if not upload_date:
  106. uploaded_ago = self._search_regex(
  107. r'>\s*(\d+[hd])\s+[aA]go\b', webpage, 'uploaded ago',
  108. default=None)
  109. if uploaded_ago:
  110. delta = int(uploaded_ago[:-1])
  111. _AGO_UNITS = {
  112. 'h': 'hours',
  113. 'd': 'days',
  114. }
  115. kwargs = {_AGO_UNITS.get(uploaded_ago[-1]): delta}
  116. upload_date = (datetime.datetime.utcnow() - datetime.timedelta(**kwargs)).strftime('%Y%m%d')
  117. comment_count = len(re.findall(r'''class\s*=\s*['"]media-comment-contents\b''', webpage))
  118. uploader_id = self._html_search_regex(
  119. (r'''<span\b[^>]+\bclass\s*=\s*["']username\b[^>]*>([^<]+)</span>''',
  120. r'''(?s)['"](?:media-meta-member|thumb-member-username)\b[^>]+>\s*<a\b[^>]+\bhref\s*=\s*['"]/m/([^"']+)'''),
  121. webpage, 'uploader_id', fatal=False)
  122. categories = self._html_search_meta('keywords', webpage, default=None)
  123. if categories:
  124. categories = [cat.strip() for cat in categories.split(',')]
  125. return {
  126. 'id': video_id,
  127. 'title': title,
  128. 'upload_date': upload_date,
  129. 'uploader_id': uploader_id,
  130. 'thumbnail': self._og_search_thumbnail(webpage),
  131. 'categories': categories,
  132. 'view_count': view_count,
  133. 'like_count': like_count,
  134. 'comment_count': comment_count,
  135. 'age_limit': age_limit,
  136. 'url': video_url,
  137. }
  138. class MotherlessGroupIE(InfoExtractor):
  139. _VALID_URL = r'https?://(?:www\.)?motherless\.com/gv?/(?P<id>[a-z0-9_]+)'
  140. _TESTS = [{
  141. 'url': 'http://motherless.com/g/movie_scenes',
  142. 'info_dict': {
  143. 'id': 'movie_scenes',
  144. 'title': 'Movie Scenes',
  145. 'description': 'Hot and sexy scenes from "regular" movies... '
  146. 'Beautiful actresses fully nude... A looot of '
  147. 'skin! :)Enjoy!',
  148. },
  149. 'playlist_mincount': 662,
  150. }, {
  151. 'url': 'http://motherless.com/gv/sex_must_be_funny',
  152. 'info_dict': {
  153. 'id': 'sex_must_be_funny',
  154. 'title': 'Sex must be funny',
  155. 'description': 'Sex can be funny. Wide smiles,laugh, games, fun of '
  156. 'any kind!'
  157. },
  158. 'playlist_mincount': 0,
  159. 'expected_warnings': [
  160. 'This group has no videos.',
  161. ]
  162. }, {
  163. 'url': 'https://motherless.com/g/beautiful_cock',
  164. 'info_dict': {
  165. 'id': 'beautiful_cock',
  166. 'title': 'Beautiful Cock',
  167. 'description': 'Group for lovely cocks yours, mine, a friends anything human',
  168. },
  169. 'playlist_mincount': 2500,
  170. }]
  171. @classmethod
  172. def suitable(cls, url):
  173. return (False if MotherlessIE.suitable(url)
  174. else super(MotherlessGroupIE, cls).suitable(url))
  175. def _extract_entries(self, webpage, base):
  176. entries = []
  177. for mobj in re.finditer(
  178. r'href="(?P<href>/[^"]+)"[^>]*>(?:\s*<img[^>]+alt="[^-]+-\s(?P<title>[^"]+)")?',
  179. webpage):
  180. video_url = compat_urlparse.urljoin(base, mobj.group('href'))
  181. if not MotherlessIE.suitable(video_url):
  182. continue
  183. video_id = MotherlessIE._match_id(video_url)
  184. title = mobj.group('title')
  185. entries.append(self.url_result(
  186. video_url, ie=MotherlessIE.ie_key(), video_id=video_id,
  187. video_title=title))
  188. # Alternative fallback
  189. if not entries:
  190. entries = [
  191. self.url_result(
  192. compat_urlparse.urljoin(base, '/' + entry_id),
  193. ie=MotherlessIE.ie_key(), video_id=entry_id)
  194. for entry_id in orderedSet(re.findall(
  195. r'data-codename=["\']([A-Z0-9]+)', webpage))]
  196. return entries
  197. def _real_extract(self, url):
  198. group_id = self._match_id(url)
  199. page_url = compat_urlparse.urljoin(url, '/gv/%s' % group_id)
  200. webpage = self._download_webpage(page_url, group_id)
  201. title = self._search_regex(
  202. r'<title>([\w\s]+\w)\s+-', webpage, 'title', fatal=False)
  203. description = self._html_search_meta(
  204. 'description', webpage, fatal=False)
  205. page_count = str_to_int(self._search_regex(
  206. r'(\d+)\s*</(?:a|span)>\s*<(?:a|span)[^>]+(?:>\s*NEXT|\brel\s*=\s*["\']?next)\b',
  207. webpage, 'page_count', default=0))
  208. if not page_count:
  209. message = self._search_regex(
  210. r'''class\s*=\s*['"]error-page\b[^>]*>\s*<p[^>]*>\s*(?P<error_msg>[^<]+)(?<=\S)\s*''',
  211. webpage, 'error_msg', default=None) or 'This group has no videos.'
  212. self.report_warning(message, group_id)
  213. page_count = 1
  214. PAGE_SIZE = 80
  215. def _get_page(idx):
  216. if idx > 0:
  217. webpage = self._download_webpage(
  218. page_url, group_id, query={'page': idx + 1},
  219. note='Downloading page %d/%d' % (idx + 1, page_count)
  220. )
  221. for entry in self._extract_entries(webpage, url):
  222. yield entry
  223. playlist = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
  224. return {
  225. '_type': 'playlist',
  226. 'id': group_id,
  227. 'title': title,
  228. 'description': description,
  229. 'entries': playlist
  230. }