rottentomatoes.py 1.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """RottenTomatoes (movies)
  3. """
  4. from urllib.parse import quote_plus
  5. from lxml import html
  6. from searx.utils import eval_xpath, eval_xpath_list, extract_text
  7. # about
  8. about = {
  9. "website": 'https://www.rottentomatoes.com/',
  10. "wikidata_id": 'Q105584',
  11. "official_api_documentation": None,
  12. "use_official_api": False,
  13. "require_api_key": False,
  14. "results": 'HTML',
  15. }
  16. categories = ['movies']
  17. base_url = "https://www.rottentomatoes.com"
  18. results_xpath = "//search-page-media-row"
  19. url_xpath = "./a[1]/@href"
  20. title_xpath = "./a/img/@alt"
  21. thumbnail_xpath = "./a/img/@src"
  22. release_year_xpath = "concat('From ', string(./@releaseyear))"
  23. score_xpath = "concat('Score: ', string(./@tomatometerscore))"
  24. cast_xpath = "concat('Starring ', string(./@cast))"
  25. def request(query, params):
  26. params["url"] = f"{base_url}/search?search={quote_plus(query)}"
  27. return params
  28. def response(resp):
  29. results = []
  30. dom = html.fromstring(resp.text)
  31. for result in eval_xpath_list(dom, results_xpath):
  32. content = []
  33. for xpath in (release_year_xpath, score_xpath, cast_xpath):
  34. info = extract_text(eval_xpath(result, xpath))
  35. # a gap in the end means that no data was found
  36. if info and info[-1] != " ":
  37. content.append(info)
  38. results.append(
  39. {
  40. 'url': extract_text(eval_xpath(result, url_xpath)),
  41. 'title': extract_text(eval_xpath(result, title_xpath)),
  42. 'content': ', '.join(content),
  43. 'thumbnail': extract_text(eval_xpath(result, thumbnail_xpath)),
  44. }
  45. )
  46. return results