lib_rs.py 1.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """lib.rs (packages)"""
  3. from urllib.parse import quote_plus
  4. from lxml import html
  5. from searx.utils import eval_xpath, eval_xpath_list, extract_text
  6. about = {
  7. 'website': 'https://lib.rs',
  8. 'wikidata_id': 'Q113486010',
  9. 'use_official_api': False,
  10. 'require_api_key': False,
  11. 'results': "HTML",
  12. }
  13. categories = ["it", "packages"]
  14. base_url = 'https://lib.rs'
  15. results_xpath = '/html/body/main/div/ol/li/a'
  16. url_xpath = './@href'
  17. title_xpath = './div[@class="h"]/h4'
  18. content_xpath = './div[@class="h"]/p'
  19. version_xpath = './div[@class="meta"]/span[contains(@class, "version")]'
  20. download_count_xpath = './div[@class="meta"]/span[@class="downloads"]'
  21. tags_xpath = './div[@class="meta"]/span[contains(@class, "k")]/text()'
  22. def request(query, params):
  23. params['url'] = f"{base_url}/search?q={quote_plus(query)}"
  24. return params
  25. def response(resp):
  26. results = []
  27. doc = html.fromstring(resp.text)
  28. for result in eval_xpath_list(doc, results_xpath):
  29. package_name = extract_text(eval_xpath(result, title_xpath))
  30. results.append(
  31. {
  32. 'template': 'packages.html',
  33. 'title': package_name,
  34. 'url': base_url + extract_text(eval_xpath(result, url_xpath)), # type: ignore
  35. 'content': extract_text(eval_xpath(result, content_xpath)),
  36. 'package_name': package_name,
  37. 'version': extract_text(eval_xpath(result, version_xpath)),
  38. 'popularity': extract_text(eval_xpath(result, download_count_xpath)),
  39. 'tags': eval_xpath_list(result, tags_xpath),
  40. }
  41. )
  42. return results