goodreads.py 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Goodreads (books)
  3. """
  4. from urllib.parse import urlencode
  5. from lxml import html
  6. from searx.utils import extract_text, eval_xpath, eval_xpath_list
  7. about = {
  8. 'website': 'https://www.goodreads.com',
  9. 'wikidata_id': 'Q2359213',
  10. 'official_api_documentation': None,
  11. 'use_official_api': False,
  12. 'require_api_key': False,
  13. 'results': 'HTML',
  14. }
  15. categories = []
  16. paging = True
  17. base_url = "https://www.goodreads.com"
  18. results_xpath = "//table//tr"
  19. thumbnail_xpath = ".//img[contains(@class, 'bookCover')]/@src"
  20. url_xpath = ".//a[contains(@class, 'bookTitle')]/@href"
  21. title_xpath = ".//a[contains(@class, 'bookTitle')]"
  22. author_xpath = ".//a[contains(@class, 'authorName')]"
  23. info_text_xpath = ".//span[contains(@class, 'uitext')]"
  24. def request(query, params):
  25. args = {
  26. 'q': query,
  27. 'page': params['pageno'],
  28. }
  29. params['url'] = f"{base_url}/search?{urlencode(args)}"
  30. return params
  31. def response(resp):
  32. results = []
  33. dom = html.fromstring(resp.text)
  34. for result in eval_xpath_list(dom, results_xpath):
  35. results.append(
  36. {
  37. 'url': base_url + extract_text(eval_xpath(result, url_xpath)),
  38. 'title': extract_text(eval_xpath(result, title_xpath)),
  39. 'thumbnail': extract_text(eval_xpath(result, thumbnail_xpath)),
  40. 'content': extract_text(eval_xpath(result, info_text_xpath)),
  41. 'metadata': extract_text(eval_xpath(result, author_xpath)),
  42. }
  43. )
  44. return results