ahmia.py 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Ahmia (Onions)
  4. """
  5. from urllib.parse import urlencode, urlparse, parse_qs
  6. from lxml.html import fromstring
  7. from searx.engines.xpath import extract_url, extract_text, eval_xpath_list, eval_xpath
  8. # about
  9. about = {
  10. "website": 'http://juhanurmihxlp77nkq76byazcldy2hlmovfu2epvl5ankdibsot4csyd.onion',
  11. "wikidata_id": 'Q18693938',
  12. "official_api_documentation": None,
  13. "use_official_api": False,
  14. "require_api_key": False,
  15. "results": 'HTML',
  16. }
  17. # engine config
  18. categories = ['onions']
  19. paging = True
  20. page_size = 10
  21. # search url
  22. search_url = 'http://juhanurmihxlp77nkq76byazcldy2hlmovfu2epvl5ankdibsot4csyd.onion/search/?{query}'
  23. time_range_support = True
  24. time_range_dict = {'day': 1, 'week': 7, 'month': 30}
  25. # xpaths
  26. results_xpath = '//li[@class="result"]'
  27. url_xpath = './h4/a/@href'
  28. title_xpath = './h4/a[1]'
  29. content_xpath = './/p[1]'
  30. correction_xpath = '//*[@id="didYouMean"]//a'
  31. number_of_results_xpath = '//*[@id="totalResults"]'
  32. def request(query, params):
  33. params['url'] = search_url.format(query=urlencode({'q': query}))
  34. if params['time_range'] in time_range_dict:
  35. params['url'] += '&' + urlencode({'d': time_range_dict[params['time_range']]})
  36. return params
  37. def response(resp):
  38. results = []
  39. dom = fromstring(resp.text)
  40. # trim results so there's not way too many at once
  41. first_result_index = page_size * (resp.search_params.get('pageno', 1) - 1)
  42. all_results = eval_xpath_list(dom, results_xpath)
  43. trimmed_results = all_results[first_result_index : first_result_index + page_size]
  44. # get results
  45. for result in trimmed_results:
  46. # remove ahmia url and extract the actual url for the result
  47. raw_url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)
  48. cleaned_url = parse_qs(urlparse(raw_url).query).get('redirect_url', [''])[0]
  49. title = extract_text(eval_xpath(result, title_xpath))
  50. content = extract_text(eval_xpath(result, content_xpath))
  51. results.append({'url': cleaned_url, 'title': title, 'content': content, 'is_onion': True})
  52. # get spelling corrections
  53. for correction in eval_xpath_list(dom, correction_xpath):
  54. results.append({'correction': extract_text(correction)})
  55. # get number of results
  56. number_of_results = eval_xpath(dom, number_of_results_xpath)
  57. if number_of_results:
  58. try:
  59. results.append({'number_of_results': int(extract_text(number_of_results))})
  60. except: # pylint: disable=bare-except
  61. pass
  62. return results