fetch_engine_descriptions.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. #!/usr/bin/env python
  2. import sys
  3. import json
  4. from urllib.parse import quote, urlparse
  5. from os.path import realpath, dirname
  6. import cld3
  7. from lxml.html import fromstring
  8. # set path
  9. sys.path.append(realpath(dirname(realpath(__file__)) + '/../'))
  10. from searx.engines.wikidata import send_wikidata_query
  11. from searx.utils import extract_text
  12. import searx
  13. import searx.search
  14. import searx.poolrequests
  15. SPARQL_WIKIPEDIA_ARTICLE = """
  16. SELECT DISTINCT ?item ?name
  17. WHERE {
  18. VALUES ?item { %IDS% }
  19. ?article schema:about ?item ;
  20. schema:inLanguage ?lang ;
  21. schema:name ?name ;
  22. schema:isPartOf [ wikibase:wikiGroup "wikipedia" ] .
  23. FILTER(?lang in (%LANGUAGES_SPARQL%)) .
  24. FILTER (!CONTAINS(?name, ':')) .
  25. }
  26. """
  27. SPARQL_DESCRIPTION = """
  28. SELECT DISTINCT ?item ?itemDescription
  29. WHERE {
  30. VALUES ?item { %IDS% }
  31. ?item schema:description ?itemDescription .
  32. FILTER (lang(?itemDescription) in (%LANGUAGES_SPARQL%))
  33. }
  34. ORDER BY ?itemLang
  35. """
  36. LANGUAGES = searx.settings['locales'].keys()
  37. LANGUAGES_SPARQL = ', '.join(set(map(lambda l: repr(l.split('_')[0]), LANGUAGES)))
  38. IDS = None
  39. descriptions = {}
  40. wd_to_engine_name = {}
  41. def normalize_description(description):
  42. for c in [chr(c) for c in range(0, 31)]:
  43. description = description.replace(c, ' ')
  44. description = ' '.join(description.strip().split())
  45. return description
  46. def update_description(engine_name, lang, description, source, replace=True):
  47. if replace or lang not in descriptions[engine_name]:
  48. descriptions[engine_name][lang] = [normalize_description(description), source]
  49. def get_wikipedia_summary(language, pageid):
  50. search_url = 'https://{language}.wikipedia.org/api/rest_v1/page/summary/{title}'
  51. url = search_url.format(title=quote(pageid), language=language)
  52. try:
  53. response = searx.poolrequests.get(url)
  54. response.raise_for_status()
  55. api_result = json.loads(response.text)
  56. return api_result.get('extract')
  57. except:
  58. return None
  59. def detect_language(text):
  60. r = cld3.get_language(str(text)) # pylint: disable=E1101
  61. if r is not None and r.probability >= 0.98 and r.is_reliable:
  62. return r.language
  63. return None
  64. def get_website_description(url, lang1, lang2=None):
  65. headers = {
  66. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0',
  67. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
  68. 'DNT': '1',
  69. 'Upgrade-Insecure-Requests': '1',
  70. 'Sec-GPC': '1',
  71. 'Cache-Control': 'max-age=0',
  72. }
  73. if lang1 is not None:
  74. lang_list = [lang1]
  75. if lang2 is not None:
  76. lang_list.append(lang2)
  77. headers['Accept-Language'] = f'{",".join(lang_list)};q=0.8'
  78. try:
  79. response = searx.poolrequests.get(url, headers=headers, timeout=10)
  80. response.raise_for_status()
  81. except Exception:
  82. return (None, None)
  83. try:
  84. html = fromstring(response.text)
  85. except ValueError:
  86. html = fromstring(response.content)
  87. description = extract_text(html.xpath('/html/head/meta[@name="description"]/@content'))
  88. if not description:
  89. description = extract_text(html.xpath('/html/head/meta[@property="og:description"]/@content'))
  90. if not description:
  91. description = extract_text(html.xpath('/html/head/title'))
  92. lang = extract_text(html.xpath('/html/@lang'))
  93. if lang is None and len(lang1) > 0:
  94. lang = lang1
  95. lang = detect_language(description) or lang or 'en'
  96. lang = lang.split('_')[0]
  97. lang = lang.split('-')[0]
  98. return (lang, description)
  99. def initialize():
  100. global descriptions, wd_to_engine_name, IDS
  101. searx.search.initialize()
  102. for engine_name, engine in searx.engines.engines.items():
  103. descriptions[engine_name] = {}
  104. wikidata_id = getattr(engine, "about", {}).get('wikidata_id')
  105. if wikidata_id is not None:
  106. wd_to_engine_name.setdefault(wikidata_id, set()).add(engine_name)
  107. IDS = ' '.join(list(map(lambda wd_id: 'wd:' + wd_id, wd_to_engine_name.keys())))
  108. def fetch_wikidata_descriptions():
  109. global IDS
  110. result = send_wikidata_query(SPARQL_DESCRIPTION
  111. .replace('%IDS%', IDS)
  112. .replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL))
  113. if result is not None:
  114. for binding in result['results']['bindings']:
  115. wikidata_id = binding['item']['value'].replace('http://www.wikidata.org/entity/', '')
  116. lang = binding['itemDescription']['xml:lang']
  117. description = binding['itemDescription']['value']
  118. if ' ' in description: # skip unique word description (like "website")
  119. for engine_name in wd_to_engine_name[wikidata_id]:
  120. update_description(engine_name, lang, description, 'wikidata')
  121. def fetch_wikipedia_descriptions():
  122. global IDS
  123. result = send_wikidata_query(SPARQL_WIKIPEDIA_ARTICLE
  124. .replace('%IDS%', IDS)
  125. .replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL))
  126. if result is not None:
  127. for binding in result['results']['bindings']:
  128. wikidata_id = binding['item']['value'].replace('http://www.wikidata.org/entity/', '')
  129. lang = binding['name']['xml:lang']
  130. pageid = binding['name']['value']
  131. description = get_wikipedia_summary(lang, pageid)
  132. if description is not None and ' ' in description:
  133. for engine_name in wd_to_engine_name[wikidata_id]:
  134. update_description(engine_name, lang, description, 'wikipedia')
  135. def normalize_url(url):
  136. url = url.replace('{language}', 'en')
  137. url = urlparse(url)._replace(path='/', params='', query='', fragment='').geturl()
  138. url = url.replace('https://api.', 'https://')
  139. return url
  140. def fetch_website_description(engine_name, website):
  141. default_lang, default_description = get_website_description(website, None, None)
  142. if default_lang is None or default_description is None:
  143. return
  144. if default_lang not in descriptions[engine_name]:
  145. descriptions[engine_name][default_lang] = [normalize_description(default_description), website]
  146. for request_lang in ('en-US', 'es-US', 'fr-FR', 'zh', 'ja', 'ru', 'ar', 'ko'):
  147. if request_lang.split('-')[0] not in descriptions[engine_name]:
  148. lang, desc = get_website_description(website, request_lang, request_lang.split('-')[0])
  149. if desc is not None and desc != default_description:
  150. update_description(engine_name, lang, desc, website, replace=False)
  151. else:
  152. break
  153. def fetch_website_descriptions():
  154. for engine_name, engine in searx.engines.engines.items():
  155. website = getattr(engine, "about", {}).get('website')
  156. if website is None:
  157. website = normalize_url(getattr(engine, "search_url"))
  158. if website is None:
  159. website = normalize_url(getattr(engine, "base_url"))
  160. if website is not None:
  161. fetch_website_description(engine_name, website)
  162. def main():
  163. initialize()
  164. fetch_wikidata_descriptions()
  165. fetch_wikipedia_descriptions()
  166. fetch_website_descriptions()
  167. sys.stdout.write(json.dumps(descriptions, indent=1, separators=(',', ':'), ensure_ascii=False))
  168. if __name__ == "__main__":
  169. main()