xpath.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. from lxml import html
  3. from urllib.parse import urlencode
  4. from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
  5. search_url = None
  6. lang_all = 'en'
  7. url_xpath = None
  8. content_xpath = None
  9. title_xpath = None
  10. thumbnail_xpath = False
  11. paging = False
  12. suggestion_xpath = ''
  13. results_xpath = ''
  14. cached_xpath = ''
  15. cached_url = ''
  16. soft_max_redirects = 0
  17. cookies = {}
  18. headers = {}
  19. '''Some engines might offer different result based on cookies or headers.
  20. Possible use-case: To set safesearch cookie or header to moderate.'''
  21. paging = False
  22. '''Engine supports paging [True or False].'''
  23. page_size = 1
  24. # number of the first page (usually 0 or 1)
  25. first_page_num = 1
  26. time_range_support = False
  27. '''Engine supports search time range.'''
  28. time_range_url = '&hours={time_range_val}'
  29. '''Time range URL parameter in the in :py:obj:`search_url`. If no time range is
  30. requested by the user, the URL parameter is an empty string. The
  31. ``{time_range_val}`` replacement is taken from the :py:obj:`time_range_map`.
  32. .. code:: yaml
  33. time_range_url : '&days={time_range_val}'
  34. '''
  35. time_range_map = {
  36. 'day': 24,
  37. 'week': 24 * 7,
  38. 'month': 24 * 30,
  39. 'year': 24 * 365,
  40. }
  41. '''Maps time range value from user to ``{time_range_val}`` in
  42. :py:obj:`time_range_url`.
  43. .. code:: yaml
  44. time_range_map:
  45. day: 1
  46. week: 7
  47. month: 30
  48. year: 365
  49. '''
  50. safe_search_support = False
  51. '''Engine supports safe-search.'''
  52. safe_search_map = {
  53. 0: '&filter=none',
  54. 1: '&filter=moderate',
  55. 2: '&filter=strict'
  56. }
  57. '''Maps safe-search value to ``{safe_search}`` in :py:obj:`search_url`.
  58. .. code:: yaml
  59. safesearch: true
  60. safes_search_map:
  61. 0: '&filter=none'
  62. 1: '&filter=moderate'
  63. 2: '&filter=strict'
  64. '''
  65. def request(query, params):
  66. query = urlencode({'q': query})[2:]
  67. fp = {'query': query}
  68. if paging and search_url.find('{pageno}') >= 0:
  69. fp['pageno'] = (params['pageno'] - 1) * page_size + first_page_num
  70. safe_search = ''
  71. if params['safesearch']:
  72. safe_search = safe_search_map[params['safesearch']]
  73. lang = lang_all
  74. if params['language'] != 'all':
  75. lang = params['language'][:2]
  76. time_range = ''
  77. if params.get('time_range'):
  78. time_range_val = time_range_map.get(params.get('time_range'))
  79. time_range = time_range_url.format(time_range_val=time_range_val)
  80. safe_search = ''
  81. if params['safesearch']:
  82. safe_search = safe_search_map[params['safesearch']]
  83. fargs = {
  84. 'query': urlencode({'q': query})[2:],
  85. 'lang': lang,
  86. 'pageno': (params['pageno'] - 1) * page_size + first_page_num,
  87. 'time_range': time_range,
  88. 'safe_search': safe_search,
  89. }
  90. params['cookies'].update(cookies)
  91. params['headers'].update(headers)
  92. params['url'] = search_url.format(**fargs)
  93. params['soft_max_redirects'] = soft_max_redirects
  94. return params
  95. def response(resp):
  96. results = []
  97. dom = html.fromstring(resp.text)
  98. is_onion = True if 'onions' in categories else False # pylint: disable=undefined-variable
  99. if results_xpath:
  100. for result in eval_xpath_list(dom, results_xpath):
  101. url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)
  102. title = extract_text(eval_xpath_list(result, title_xpath, min_len=1))
  103. content = extract_text(eval_xpath_list(result, content_xpath))
  104. tmp_result = {'url': url, 'title': title, 'content': content}
  105. # add thumbnail if available
  106. if thumbnail_xpath:
  107. thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath)
  108. if len(thumbnail_xpath_result) > 0:
  109. tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)
  110. # add alternative cached url if available
  111. if cached_xpath:
  112. tmp_result['cached_url'] = cached_url\
  113. + extract_text(eval_xpath_list(result, cached_xpath, min_len=1))
  114. if is_onion:
  115. tmp_result['is_onion'] = True
  116. results.append(tmp_result)
  117. else:
  118. if cached_xpath:
  119. for url, title, content, cached in zip(
  120. (extract_url(x, search_url) for
  121. x in eval_xpath_list(dom, url_xpath)),
  122. map(extract_text, eval_xpath_list(dom, title_xpath)),
  123. map(extract_text, eval_xpath_list(dom, content_xpath)),
  124. map(extract_text, eval_xpath_list(dom, cached_xpath))
  125. ):
  126. results.append({'url': url, 'title': title, 'content': content,
  127. 'cached_url': cached_url + cached, 'is_onion': is_onion})
  128. else:
  129. for url, title, content in zip(
  130. (extract_url(x, search_url) for
  131. x in eval_xpath_list(dom, url_xpath)),
  132. map(extract_text, eval_xpath_list(dom, title_xpath)),
  133. map(extract_text, eval_xpath_list(dom, content_xpath))
  134. ):
  135. results.append({'url': url, 'title': title, 'content': content, 'is_onion': is_onion})
  136. if not suggestion_xpath:
  137. return results
  138. for suggestion in eval_xpath(dom, suggestion_xpath):
  139. results.append({'suggestion': extract_text(suggestion)})
  140. return results