presearch.py 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Presearch supports the search types listed in :py:obj:`search_type` (general,
  3. images, videos, news).
  4. Configured ``presarch`` engines:
  5. .. code:: yaml
  6. - name: presearch
  7. engine: presearch
  8. search_type: search
  9. categories: [general, web]
  10. - name: presearch images
  11. ...
  12. search_type: images
  13. categories: [images, web]
  14. - name: presearch videos
  15. ...
  16. search_type: videos
  17. categories: [general, web]
  18. - name: presearch news
  19. ...
  20. search_type: news
  21. categories: [news, web]
  22. .. hint::
  23. By default Presearch's video category is intentionally placed into::
  24. categories: [general, web]
  25. Search type ``video``
  26. =====================
  27. The results in the video category are most often links to pages that contain a
  28. video, for instance many links from Preasearch's video category link content
  29. from facebook (aka Meta) or Twitter (aka X). Since these are not real links to
  30. video streams SearXNG can't use the video template for this and if SearXNG can't
  31. use this template, then the user doesn't want to see these hits in the videos
  32. category.
  33. Languages & Regions
  34. ===================
  35. In Presearch there are languages for the UI and regions for narrowing down the
  36. search. If we set "auto" for the region in the WEB-UI of Presearch and cookie
  37. ``use_local_search_results=false``, then the defaults are set for both (the
  38. language and the region) from the ``Accept-Language`` header.
  39. Since the region is already "auto" by default, we only need to set the
  40. ``use_local_search_results`` cookie and send the ``Accept-Language`` header. We
  41. have to set these values in both requests we send to Presearch; in the first
  42. request to get the request-ID from Presearch and in the final request to get the
  43. result list (see ``send_accept_language_header``).
  44. The time format returned by Presearch varies depending on the language set.
  45. Multiple different formats can be supported by using ``dateutil`` parser, but
  46. it doesn't support formats such as "N time ago", "vor N time" (German),
  47. "Hace N time" (Spanish). Because of this, the dates are simply joined together
  48. with the rest of other metadata.
  49. Implementations
  50. ===============
  51. """
  52. from urllib.parse import urlencode, urlparse
  53. from searx import locales
  54. from searx.network import get
  55. from searx.utils import gen_useragent, html_to_text
  56. about = {
  57. "website": "https://presearch.io",
  58. "wikidiata_id": "Q7240905",
  59. "official_api_documentation": "https://docs.presearch.io/nodes/api",
  60. "use_official_api": False,
  61. "require_api_key": False,
  62. "results": "JSON",
  63. }
  64. paging = True
  65. safesearch = True
  66. time_range_support = True
  67. send_accept_language_header = True
  68. categories = ["general", "web"] # general, images, videos, news
  69. search_type = "search"
  70. """must be any of ``search``, ``images``, ``videos``, ``news``"""
  71. base_url = "https://presearch.com"
  72. safesearch_map = {0: 'false', 1: 'true', 2: 'true'}
  73. def init(_):
  74. if search_type not in ['search', 'images', 'videos', 'news']:
  75. raise ValueError(f'presearch search_type: {search_type}')
  76. def _get_request_id(query, params):
  77. args = {
  78. "q": query,
  79. "page": params["pageno"],
  80. }
  81. if params["time_range"]:
  82. args["time"] = params["time_range"]
  83. url = f"{base_url}/{search_type}?{urlencode(args)}"
  84. headers = {
  85. 'User-Agent': gen_useragent(),
  86. 'Cookie': (
  87. f"b=1;"
  88. f" presearch_session=;"
  89. f" use_local_search_results=false;"
  90. f" use_safe_search={safesearch_map[params['safesearch']]}"
  91. ),
  92. }
  93. if params['searxng_locale'] != 'all':
  94. l = locales.get_locale(params['searxng_locale'])
  95. # Presearch narrows down the search by region. In SearXNG when the user
  96. # does not set a region (e.g. 'en-CA' / canada) we cannot hand over a region.
  97. # We could possibly use searx.locales.get_official_locales to determine
  98. # in which regions this language is an official one, but then we still
  99. # wouldn't know which region should be given more weight / Presearch
  100. # performs an IP-based geolocation of the user, we don't want that in
  101. # SearXNG ;-)
  102. if l.territory:
  103. headers['Accept-Language'] = f"{l.language}-{l.territory},{l.language};" "q=0.9,*;" "q=0.5"
  104. resp_text = get(url, headers=headers).text # type: ignore
  105. for line in resp_text.split("\n"):
  106. if "window.searchId = " in line:
  107. return line.split("= ")[1][:-1].replace('"', "")
  108. return None
  109. def request(query, params):
  110. request_id = _get_request_id(query, params)
  111. params["headers"]["Accept"] = "application/json"
  112. params["url"] = f"{base_url}/results?id={request_id}"
  113. return params
  114. def _strip_leading_strings(text):
  115. for x in ['wikipedia', 'google']:
  116. if text.lower().endswith(x):
  117. text = text[: -len(x)]
  118. return text.strip()
  119. def _fix_title(title, url):
  120. """
  121. Titles from Presearch shows domain + title without spacing, and HTML
  122. This function removes these 2 issues.
  123. Transforming "translate.google.co.in<em>Google</em> Translate" into "Google Translate"
  124. """
  125. parsed_url = urlparse(url)
  126. domain = parsed_url.netloc
  127. title = html_to_text(title)
  128. # Fixes issue where domain would show up in the title
  129. # translate.google.co.inGoogle Translate -> Google Translate
  130. if (
  131. title.startswith(domain)
  132. and len(title) > len(domain)
  133. and not title.startswith(domain + "/")
  134. and not title.startswith(domain + " ")
  135. ):
  136. title = title.removeprefix(domain)
  137. return title
  138. def parse_search_query(json_results):
  139. results = []
  140. for item in json_results.get('specialSections', {}).get('topStoriesCompact', {}).get('data', []):
  141. result = {
  142. 'url': item['link'],
  143. 'title': _fix_title(item['title'], item['link']),
  144. 'thumbnail': item['image'],
  145. 'content': '',
  146. 'metadata': item.get('source'),
  147. }
  148. results.append(result)
  149. for item in json_results.get('standardResults', []):
  150. result = {
  151. 'url': item['link'],
  152. 'title': _fix_title(item['title'], item['link']),
  153. 'content': html_to_text(item['description']),
  154. }
  155. results.append(result)
  156. info = json_results.get('infoSection', {}).get('data')
  157. if info:
  158. attributes = []
  159. for item in info.get('about', []):
  160. text = html_to_text(item)
  161. if ':' in text:
  162. # split text into key / value
  163. label, value = text.split(':', 1)
  164. else:
  165. # In other languages (tested with zh-TW) a colon is represented
  166. # by a different symbol --> then we split at the first space.
  167. label, value = text.split(' ', 1)
  168. label = label[:-1]
  169. value = _strip_leading_strings(value)
  170. attributes.append({'label': label, 'value': value})
  171. content = []
  172. for item in [info.get('subtitle'), info.get('description')]:
  173. if not item:
  174. continue
  175. item = _strip_leading_strings(html_to_text(item))
  176. if item:
  177. content.append(item)
  178. results.append(
  179. {
  180. 'infobox': info['title'],
  181. 'id': info['title'],
  182. 'img_src': info.get('image'),
  183. 'content': ' | '.join(content),
  184. 'attributes': attributes,
  185. }
  186. )
  187. return results
  188. def response(resp):
  189. results = []
  190. json_resp = resp.json()
  191. if search_type == 'search':
  192. results = parse_search_query(json_resp.get('results'))
  193. elif search_type == 'images':
  194. for item in json_resp.get('images', []):
  195. results.append(
  196. {
  197. 'template': 'images.html',
  198. 'title': html_to_text(item['title']),
  199. 'url': item.get('link'),
  200. 'img_src': item.get('image'),
  201. 'thumbnail_src': item.get('thumbnail'),
  202. }
  203. )
  204. elif search_type == 'videos':
  205. # The results in the video category are most often links to pages that contain
  206. # a video and not to a video stream --> SearXNG can't use the video template.
  207. for item in json_resp.get('videos', []):
  208. results.append(
  209. {
  210. 'title': html_to_text(item['title']),
  211. 'url': item.get('link'),
  212. 'content': item.get('description', ''),
  213. 'thumbnail': item.get('image'),
  214. 'length': item.get('duration'),
  215. }
  216. )
  217. elif search_type == 'news':
  218. for item in json_resp.get('news', []):
  219. source = item.get('source')
  220. # Bug on their end, time sometimes returns "</a>"
  221. time = html_to_text(item.get('time')).strip()
  222. metadata = [source]
  223. if time != "":
  224. metadata.append(time)
  225. results.append(
  226. {
  227. 'title': html_to_text(item['title']),
  228. 'url': item.get('link'),
  229. 'content': html_to_text(item.get('description', '')),
  230. 'metadata': ' / '.join(metadata),
  231. 'thumbnail': item.get('image'),
  232. }
  233. )
  234. return results