startpage.py 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Startpage (Web)
  4. """
  5. import re
  6. from time import time
  7. from urllib.parse import urlencode
  8. from unicodedata import normalize, combining
  9. from datetime import datetime, timedelta
  10. from dateutil import parser
  11. from lxml import html
  12. from babel import Locale
  13. from babel.localedata import locale_identifiers
  14. from searx import logger
  15. from searx.poolrequests import get
  16. from searx.utils import extract_text, eval_xpath, match_language
  17. from searx.exceptions import (
  18. SearxEngineResponseException,
  19. SearxEngineCaptchaException,
  20. )
  21. logger = logger.getChild('startpage')
  22. # about
  23. about = {
  24. "website": 'https://startpage.com',
  25. "wikidata_id": 'Q2333295',
  26. "official_api_documentation": None,
  27. "use_official_api": False,
  28. "require_api_key": False,
  29. "results": 'HTML',
  30. }
  31. # engine dependent config
  32. categories = ['general']
  33. # there is a mechanism to block "bot" search
  34. # (probably the parameter qid), require
  35. # storing of qid's between mulitble search-calls
  36. paging = True
  37. supported_languages_url = 'https://www.startpage.com/do/settings'
  38. # search-url
  39. base_url = 'https://startpage.com/'
  40. search_url = base_url + 'sp/search?'
  41. # specific xpath variables
  42. # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
  43. # not ads: div[@class="result"] are the direct children of div[@id="results"]
  44. results_xpath = '//div[@class="w-gl__result__main"]'
  45. link_xpath = './/a[@class="w-gl__result-title result-link"]'
  46. content_xpath = './/p[@class="w-gl__description"]'
  47. # timestamp of the last fetch of 'sc' code
  48. sc_code_ts = 0
  49. sc_code = ''
  50. def raise_captcha(resp):
  51. if str(resp.url).startswith('https://www.startpage.com/sp/captcha'):
  52. # suspend CAPTCHA for 7 days
  53. raise SearxEngineCaptchaException(suspended_time=7 * 24 * 3600)
  54. def get_sc_code(headers):
  55. """Get an actual `sc` argument from startpage's home page.
  56. Startpage puts a `sc` argument on every link. Without this argument
  57. startpage considers the request is from a bot. We do not know what is
  58. encoded in the value of the `sc` argument, but it seems to be a kind of a
  59. *time-stamp*. This *time-stamp* is valid for a few hours.
  60. This function scrap a new *time-stamp* from startpage's home page every hour
  61. (3000 sec).
  62. """
  63. global sc_code_ts, sc_code # pylint: disable=global-statement
  64. if time() > (sc_code_ts + 3000):
  65. logger.debug("query new sc time-stamp ...")
  66. resp = get(base_url, headers=headers)
  67. raise_captcha(resp)
  68. dom = html.fromstring(resp.text)
  69. try:
  70. sc_code = eval_xpath(dom, '//input[@name="sc"]')[0].get('value')
  71. except IndexError as exc:
  72. # suspend startpage API --> https://github.com/searxng/searxng/pull/695
  73. raise SearxEngineResponseException(
  74. suspended_time=7 * 24 * 3600, message="PR-695: query new sc time-stamp failed!"
  75. ) from exc
  76. sc_code_ts = time()
  77. logger.debug("new value is: %s", sc_code)
  78. return sc_code
  79. # do search-request
  80. def request(query, params):
  81. # pylint: disable=line-too-long
  82. # The format string from Startpage's FFox add-on [1]::
  83. #
  84. # https://www.startpage.com/do/dsearch?query={searchTerms}&cat=web&pl=ext-ff&language=__MSG_extensionUrlLanguage__&extVersion=1.3.0
  85. #
  86. # [1] https://addons.mozilla.org/en-US/firefox/addon/startpage-private-search/
  87. args = {
  88. 'query': query,
  89. 'page': params['pageno'],
  90. 'cat': 'web',
  91. # 'pl': 'ext-ff',
  92. # 'extVersion': '1.3.0',
  93. # 'abp': "-1",
  94. 'sc': get_sc_code(params['headers']),
  95. }
  96. # set language if specified
  97. if params['language'] != 'all':
  98. lang_code = match_language(params['language'], supported_languages, fallback=None)
  99. if lang_code:
  100. language_name = supported_languages[lang_code]['alias']
  101. args['language'] = language_name
  102. args['lui'] = language_name
  103. params['url'] = search_url + urlencode(args)
  104. return params
  105. # get response from search-request
  106. def response(resp):
  107. results = []
  108. dom = html.fromstring(resp.text)
  109. # parse results
  110. for result in eval_xpath(dom, results_xpath):
  111. links = eval_xpath(result, link_xpath)
  112. if not links:
  113. continue
  114. link = links[0]
  115. url = link.attrib.get('href')
  116. # block google-ad url's
  117. if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
  118. continue
  119. # block startpage search url's
  120. if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
  121. continue
  122. title = extract_text(link)
  123. if eval_xpath(result, content_xpath):
  124. content = extract_text(eval_xpath(result, content_xpath))
  125. else:
  126. content = ''
  127. published_date = None
  128. # check if search result starts with something like: "2 Sep 2014 ... "
  129. if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
  130. date_pos = content.find('...') + 4
  131. date_string = content[0:date_pos - 5]
  132. # fix content string
  133. content = content[date_pos:]
  134. try:
  135. published_date = parser.parse(date_string, dayfirst=True)
  136. except ValueError:
  137. pass
  138. # check if search result starts with something like: "5 days ago ... "
  139. elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
  140. date_pos = content.find('...') + 4
  141. date_string = content[0:date_pos - 5]
  142. # calculate datetime
  143. published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group()))
  144. # fix content string
  145. content = content[date_pos:]
  146. if published_date:
  147. # append result
  148. results.append({'url': url,
  149. 'title': title,
  150. 'content': content,
  151. 'publishedDate': published_date})
  152. else:
  153. # append result
  154. results.append({'url': url,
  155. 'title': title,
  156. 'content': content})
  157. # return results
  158. return results
  159. # get supported languages from their site
  160. def _fetch_supported_languages(resp):
  161. # startpage's language selector is a mess each option has a displayed name
  162. # and a value, either of which may represent the language name in the native
  163. # script, the language name in English, an English transliteration of the
  164. # native name, the English name of the writing script used by the language,
  165. # or occasionally something else entirely.
  166. # this cases are so special they need to be hardcoded, a couple of them are misspellings
  167. language_names = {
  168. 'english_uk': 'en-GB',
  169. 'fantizhengwen': ['zh-TW', 'zh-HK'],
  170. 'hangul': 'ko',
  171. 'malayam': 'ml',
  172. 'norsk': 'nb',
  173. 'sinhalese': 'si',
  174. 'sudanese': 'su'
  175. }
  176. # get the English name of every language known by babel
  177. language_names.update(
  178. {
  179. # fmt: off
  180. name.lower(): lang_code
  181. # pylint: disable=protected-access
  182. for lang_code, name in Locale('en')._data['languages'].items()
  183. # fmt: on
  184. }
  185. )
  186. # get the native name of every language known by babel
  187. for lang_code in filter(lambda lang_code: lang_code.find('_') == -1, locale_identifiers()):
  188. native_name = Locale(lang_code).get_language_name().lower()
  189. # add native name exactly as it is
  190. language_names[native_name] = lang_code
  191. # add "normalized" language name (i.e. français becomes francais and español becomes espanol)
  192. unaccented_name = ''.join(filter(lambda c: not combining(c), normalize('NFKD', native_name)))
  193. if len(unaccented_name) == len(unaccented_name.encode()):
  194. # add only if result is ascii (otherwise "normalization" didn't work)
  195. language_names[unaccented_name] = lang_code
  196. dom = html.fromstring(resp.text)
  197. sp_lang_names = []
  198. for option in dom.xpath('//form[@id="settings-form"]//select[@name="language"]/option'):
  199. sp_lang_names.append((option.get('value'), extract_text(option).lower()))
  200. supported_languages = {}
  201. for sp_option_value, sp_option_text in sp_lang_names:
  202. lang_code = language_names.get(sp_option_value) or language_names.get(sp_option_text)
  203. if isinstance(lang_code, str):
  204. supported_languages[lang_code] = {'alias': sp_option_value}
  205. elif isinstance(lang_code, list):
  206. for _lc in lang_code:
  207. supported_languages[_lc] = {'alias': sp_option_value}
  208. else:
  209. print('Unknown language option in Startpage: {} ({})'.format(sp_option_value, sp_option_text))
  210. return supported_languages