ahmia.py 2.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Ahmia (Onions)
  4. """
  5. from urllib.parse import urlencode, urlparse, parse_qs
  6. from lxml.html import fromstring
  7. from searx.engines.xpath import extract_url, extract_text, eval_xpath_list, eval_xpath
  8. # about
  9. about = {
  10. "website": 'http://msydqstlz2kzerdg.onion',
  11. "wikidata_id": 'Q18693938',
  12. "official_api_documentation": None,
  13. "use_official_api": False,
  14. "require_api_key": False,
  15. "results": 'HTML',
  16. }
  17. # engine config
  18. categories = ['onions']
  19. paging = True
  20. page_size = 10
  21. # search url
  22. search_url = 'http://msydqstlz2kzerdg.onion/search/?{query}'
  23. time_range_support = True
  24. time_range_dict = {'day': 1,
  25. 'week': 7,
  26. 'month': 30}
  27. # xpaths
  28. results_xpath = '//li[@class="result"]'
  29. url_xpath = './h4/a/@href'
  30. title_xpath = './h4/a[1]'
  31. content_xpath = './/p[1]'
  32. correction_xpath = '//*[@id="didYouMean"]//a'
  33. number_of_results_xpath = '//*[@id="totalResults"]'
  34. def request(query, params):
  35. params['url'] = search_url.format(query=urlencode({'q': query}))
  36. if params['time_range'] in time_range_dict:
  37. params['url'] += '&' + urlencode({'d': time_range_dict[params['time_range']]})
  38. return params
  39. def response(resp):
  40. results = []
  41. dom = fromstring(resp.text)
  42. # trim results so there's not way too many at once
  43. first_result_index = page_size * (resp.search_params.get('pageno', 1) - 1)
  44. all_results = eval_xpath_list(dom, results_xpath)
  45. trimmed_results = all_results[first_result_index:first_result_index + page_size]
  46. # get results
  47. for result in trimmed_results:
  48. # remove ahmia url and extract the actual url for the result
  49. raw_url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)
  50. cleaned_url = parse_qs(urlparse(raw_url).query).get('redirect_url', [''])[0]
  51. title = extract_text(eval_xpath(result, title_xpath))
  52. content = extract_text(eval_xpath(result, content_xpath))
  53. results.append({'url': cleaned_url,
  54. 'title': title,
  55. 'content': content,
  56. 'is_onion': True})
  57. # get spelling corrections
  58. for correction in eval_xpath_list(dom, correction_xpath):
  59. results.append({'correction': extract_text(correction)})
  60. # get number of results
  61. number_of_results = eval_xpath(dom, number_of_results_xpath)
  62. if number_of_results:
  63. try:
  64. results.append({'number_of_results': int(extract_text(number_of_results))})
  65. except:
  66. pass
  67. return results