utils.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616
  1. # -*- coding: utf-8 -*-
  2. import sys
  3. import re
  4. import importlib
  5. from numbers import Number
  6. from os.path import splitext, join
  7. from random import choice
  8. from html.parser import HTMLParser
  9. from urllib.parse import urljoin, urlparse, urlunparse
  10. from lxml import html
  11. from lxml.etree import ElementBase, XPath, XPathError, XPathSyntaxError, _ElementStringResult, _ElementUnicodeResult
  12. from babel.core import get_global
  13. from searx import settings
  14. from searx.data import USER_AGENTS
  15. from searx.version import VERSION_STRING
  16. from searx.languages import language_codes
  17. from searx.exceptions import SearxXPathSyntaxException, SearxEngineXPathException
  18. from searx import logger
  19. logger = logger.getChild('utils')
  20. blocked_tags = ('script',
  21. 'style')
  22. ecma_unescape4_re = re.compile(r'%u([0-9a-fA-F]{4})', re.UNICODE)
  23. ecma_unescape2_re = re.compile(r'%([0-9a-fA-F]{2})', re.UNICODE)
  24. xpath_cache = dict()
  25. lang_to_lc_cache = dict()
  26. class NotSetClass:
  27. pass
  28. NOTSET = NotSetClass()
  29. def searx_useragent():
  30. """Return the searx User Agent"""
  31. return 'searx/{searx_version} {suffix}'.format(
  32. searx_version=VERSION_STRING,
  33. suffix=settings['outgoing'].get('useragent_suffix', ''))
  34. def gen_useragent(os=None):
  35. """Return a random browser User Agent
  36. See searx/data/useragents.json
  37. """
  38. return str(USER_AGENTS['ua'].format(os=os or choice(USER_AGENTS['os']), version=choice(USER_AGENTS['versions'])))
  39. class HTMLTextExtractorException(Exception):
  40. pass
  41. class HTMLTextExtractor(HTMLParser): # pylint: disable=W0223 # (see https://bugs.python.org/issue31844)
  42. def __init__(self):
  43. HTMLParser.__init__(self)
  44. self.result = []
  45. self.tags = []
  46. def handle_starttag(self, tag, attrs):
  47. self.tags.append(tag)
  48. def handle_endtag(self, tag):
  49. if not self.tags:
  50. return
  51. if tag != self.tags[-1]:
  52. raise HTMLTextExtractorException()
  53. self.tags.pop()
  54. def is_valid_tag(self):
  55. return not self.tags or self.tags[-1] not in blocked_tags
  56. def handle_data(self, data):
  57. if not self.is_valid_tag():
  58. return
  59. self.result.append(data)
  60. def handle_charref(self, name):
  61. if not self.is_valid_tag():
  62. return
  63. if name[0] in ('x', 'X'):
  64. codepoint = int(name[1:], 16)
  65. else:
  66. codepoint = int(name)
  67. self.result.append(chr(codepoint))
  68. def handle_entityref(self, name):
  69. if not self.is_valid_tag():
  70. return
  71. # codepoint = htmlentitydefs.name2codepoint[name]
  72. # self.result.append(chr(codepoint))
  73. self.result.append(name)
  74. def get_text(self):
  75. return ''.join(self.result).strip()
  76. def html_to_text(html_str):
  77. """Extract text from a HTML string
  78. Args:
  79. * html_str (str): string HTML
  80. Returns:
  81. * str: extracted text
  82. Examples:
  83. >>> html_to_text('Example <span id="42">#2</span>')
  84. 'Example #2'
  85. >>> html_to_text('<style>.span { color: red; }</style><span>Example</span>')
  86. 'Example'
  87. """
  88. html_str = html_str.replace('\n', ' ')
  89. html_str = ' '.join(html_str.split())
  90. s = HTMLTextExtractor()
  91. try:
  92. s.feed(html_str)
  93. except HTMLTextExtractorException:
  94. logger.debug("HTMLTextExtractor: invalid HTML\n%s", html_str)
  95. return s.get_text()
  96. def extract_text(xpath_results, allow_none=False):
  97. """Extract text from a lxml result
  98. * if xpath_results is list, extract the text from each result and concat the list
  99. * if xpath_results is a xml element, extract all the text node from it
  100. ( text_content() method from lxml )
  101. * if xpath_results is a string element, then it's already done
  102. """
  103. if isinstance(xpath_results, list):
  104. # it's list of result : concat everything using recursive call
  105. result = ''
  106. for e in xpath_results:
  107. result = result + extract_text(e)
  108. return result.strip()
  109. elif isinstance(xpath_results, ElementBase):
  110. # it's a element
  111. text = html.tostring(
  112. xpath_results, encoding='unicode', method='text', with_tail=False
  113. )
  114. text = text.strip().replace('\n', ' ')
  115. return ' '.join(text.split())
  116. elif isinstance(xpath_results, (_ElementStringResult, _ElementUnicodeResult, str, Number, bool)):
  117. return str(xpath_results)
  118. elif xpath_results is None and allow_none:
  119. return None
  120. elif xpath_results is None and not allow_none:
  121. raise ValueError('extract_text(None, allow_none=False)')
  122. else:
  123. raise ValueError('unsupported type')
  124. def normalize_url(url, base_url):
  125. """Normalize URL: add protocol, join URL with base_url, add trailing slash if there is no path
  126. Args:
  127. * url (str): Relative URL
  128. * base_url (str): Base URL, it must be an absolute URL.
  129. Example:
  130. >>> normalize_url('https://example.com', 'http://example.com/')
  131. 'https://example.com/'
  132. >>> normalize_url('//example.com', 'http://example.com/')
  133. 'http://example.com/'
  134. >>> normalize_url('//example.com', 'https://example.com/')
  135. 'https://example.com/'
  136. >>> normalize_url('/path?a=1', 'https://example.com')
  137. 'https://example.com/path?a=1'
  138. >>> normalize_url('', 'https://example.com')
  139. 'https://example.com/'
  140. >>> normalize_url('/test', '/path')
  141. raise ValueError
  142. Raises:
  143. * lxml.etree.ParserError
  144. Returns:
  145. * str: normalized URL
  146. """
  147. if url.startswith('//'):
  148. # add http or https to this kind of url //example.com/
  149. parsed_search_url = urlparse(base_url)
  150. url = '{0}:{1}'.format(parsed_search_url.scheme or 'http', url)
  151. elif url.startswith('/'):
  152. # fix relative url to the search engine
  153. url = urljoin(base_url, url)
  154. # fix relative urls that fall through the crack
  155. if '://' not in url:
  156. url = urljoin(base_url, url)
  157. parsed_url = urlparse(url)
  158. # add a / at this end of the url if there is no path
  159. if not parsed_url.netloc:
  160. raise ValueError('Cannot parse url')
  161. if not parsed_url.path:
  162. url += '/'
  163. return url
  164. def add_scheme_to_url(url, scheme="https"):
  165. """Add schema to URL: if scheme is missing from the URL, then add it."""
  166. parsed = urlparse(url)
  167. if parsed.scheme == '':
  168. parsed_with_scheme = parsed._replace(scheme=scheme)
  169. return urlunparse(parsed_with_scheme)
  170. return url
  171. def extract_url(xpath_results, base_url):
  172. """Extract and normalize URL from lxml Element
  173. Args:
  174. * xpath_results (Union[List[html.HtmlElement], html.HtmlElement]): lxml Element(s)
  175. * base_url (str): Base URL
  176. Example:
  177. >>> def f(s, search_url):
  178. >>> return searx.utils.extract_url(html.fromstring(s), search_url)
  179. >>> f('<span id="42">https://example.com</span>', 'http://example.com/')
  180. 'https://example.com/'
  181. >>> f('https://example.com', 'http://example.com/')
  182. 'https://example.com/'
  183. >>> f('//example.com', 'http://example.com/')
  184. 'http://example.com/'
  185. >>> f('//example.com', 'https://example.com/')
  186. 'https://example.com/'
  187. >>> f('/path?a=1', 'https://example.com')
  188. 'https://example.com/path?a=1'
  189. >>> f('', 'https://example.com')
  190. raise lxml.etree.ParserError
  191. >>> searx.utils.extract_url([], 'https://example.com')
  192. raise ValueError
  193. Raises:
  194. * ValueError
  195. * lxml.etree.ParserError
  196. Returns:
  197. * str: normalized URL
  198. """
  199. if xpath_results == []:
  200. raise ValueError('Empty url resultset')
  201. url = extract_text(xpath_results)
  202. return normalize_url(url, base_url)
  203. def dict_subset(d, properties):
  204. """Extract a subset of a dict
  205. Examples:
  206. >>> dict_subset({'A': 'a', 'B': 'b', 'C': 'c'}, ['A', 'C'])
  207. {'A': 'a', 'C': 'c'}
  208. >>> >> dict_subset({'A': 'a', 'B': 'b', 'C': 'c'}, ['A', 'D'])
  209. {'A': 'a'}
  210. """
  211. return {k: d[k] for k in properties if k in d}
  212. def get_torrent_size(filesize, filesize_multiplier):
  213. """
  214. Args:
  215. * filesize (str): size
  216. * filesize_multiplier (str): TB, GB, .... TiB, GiB...
  217. Returns:
  218. * int: number of bytes
  219. Example:
  220. >>> get_torrent_size('5', 'GB')
  221. 5368709120
  222. >>> get_torrent_size('3.14', 'MiB')
  223. 3140000
  224. """
  225. try:
  226. filesize = float(filesize)
  227. if filesize_multiplier == 'TB':
  228. filesize = int(filesize * 1024 * 1024 * 1024 * 1024)
  229. elif filesize_multiplier == 'GB':
  230. filesize = int(filesize * 1024 * 1024 * 1024)
  231. elif filesize_multiplier == 'MB':
  232. filesize = int(filesize * 1024 * 1024)
  233. elif filesize_multiplier == 'KB':
  234. filesize = int(filesize * 1024)
  235. elif filesize_multiplier == 'TiB':
  236. filesize = int(filesize * 1000 * 1000 * 1000 * 1000)
  237. elif filesize_multiplier == 'GiB':
  238. filesize = int(filesize * 1000 * 1000 * 1000)
  239. elif filesize_multiplier == 'MiB':
  240. filesize = int(filesize * 1000 * 1000)
  241. elif filesize_multiplier == 'KiB':
  242. filesize = int(filesize * 1000)
  243. except ValueError:
  244. filesize = None
  245. return filesize
  246. def convert_str_to_int(number_str):
  247. """Convert number_str to int or 0 if number_str is not a number."""
  248. if number_str.isdigit():
  249. return int(number_str)
  250. else:
  251. return 0
  252. def int_or_zero(num):
  253. """Convert num to int or 0. num can be either a str or a list.
  254. If num is a list, the first element is converted to int (or return 0 if the list is empty).
  255. If num is a str, see convert_str_to_int
  256. """
  257. if isinstance(num, list):
  258. if len(num) < 1:
  259. return 0
  260. num = num[0]
  261. return convert_str_to_int(num)
  262. def is_valid_lang(lang):
  263. """Return language code and name if lang describe a language.
  264. Examples:
  265. >>> is_valid_lang('zz')
  266. False
  267. >>> is_valid_lang('uk')
  268. (True, 'uk', 'ukrainian')
  269. >>> is_valid_lang(b'uk')
  270. (True, 'uk', 'ukrainian')
  271. >>> is_valid_lang('en')
  272. (True, 'en', 'english')
  273. >>> searx.utils.is_valid_lang('Español')
  274. (True, 'es', 'spanish')
  275. >>> searx.utils.is_valid_lang('Spanish')
  276. (True, 'es', 'spanish')
  277. """
  278. if isinstance(lang, bytes):
  279. lang = lang.decode()
  280. is_abbr = (len(lang) == 2)
  281. lang = lang.lower()
  282. if is_abbr:
  283. for l in language_codes:
  284. if l[0][:2] == lang:
  285. return (True, l[0][:2], l[3].lower())
  286. return False
  287. else:
  288. for l in language_codes:
  289. if l[1].lower() == lang or l[3].lower() == lang:
  290. return (True, l[0][:2], l[3].lower())
  291. return False
  292. def _get_lang_to_lc_dict(lang_list):
  293. key = str(lang_list)
  294. value = lang_to_lc_cache.get(key, None)
  295. if value is None:
  296. value = dict()
  297. for lc in lang_list:
  298. value.setdefault(lc.split('-')[0], lc)
  299. lang_to_lc_cache[key] = value
  300. return value
  301. def _match_language(lang_code, lang_list=[], custom_aliases={}): # pylint: disable=W0102
  302. """auxiliary function to match lang_code in lang_list"""
  303. # replace language code with a custom alias if necessary
  304. if lang_code in custom_aliases:
  305. lang_code = custom_aliases[lang_code]
  306. if lang_code in lang_list:
  307. return lang_code
  308. # try to get the most likely country for this language
  309. subtags = get_global('likely_subtags').get(lang_code)
  310. if subtags:
  311. subtag_parts = subtags.split('_')
  312. new_code = subtag_parts[0] + '-' + subtag_parts[-1]
  313. if new_code in custom_aliases:
  314. new_code = custom_aliases[new_code]
  315. if new_code in lang_list:
  316. return new_code
  317. # try to get the any supported country for this language
  318. return _get_lang_to_lc_dict(lang_list).get(lang_code, None)
  319. def match_language(locale_code, lang_list=[], custom_aliases={}, fallback='en-US'): # pylint: disable=W0102
  320. """get the language code from lang_list that best matches locale_code"""
  321. # try to get language from given locale_code
  322. language = _match_language(locale_code, lang_list, custom_aliases)
  323. if language:
  324. return language
  325. locale_parts = locale_code.split('-')
  326. lang_code = locale_parts[0]
  327. # try to get language using an equivalent country code
  328. if len(locale_parts) > 1:
  329. country_alias = get_global('territory_aliases').get(locale_parts[-1])
  330. if country_alias:
  331. language = _match_language(lang_code + '-' + country_alias[0], lang_list, custom_aliases)
  332. if language:
  333. return language
  334. # try to get language using an equivalent language code
  335. alias = get_global('language_aliases').get(lang_code)
  336. if alias:
  337. language = _match_language(alias, lang_list, custom_aliases)
  338. if language:
  339. return language
  340. if lang_code != locale_code:
  341. # try to get language from given language without giving the country
  342. language = _match_language(lang_code, lang_list, custom_aliases)
  343. return language or fallback
  344. def load_module(filename, module_dir):
  345. modname = splitext(filename)[0]
  346. if modname in sys.modules:
  347. del sys.modules[modname]
  348. filepath = join(module_dir, filename)
  349. # and https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
  350. spec = importlib.util.spec_from_file_location(modname, filepath)
  351. module = importlib.util.module_from_spec(spec)
  352. sys.modules[modname] = module
  353. spec.loader.exec_module(module)
  354. return module
  355. def to_string(obj):
  356. """Convert obj to its string representation."""
  357. if isinstance(obj, str):
  358. return obj
  359. if isinstance(obj, Number):
  360. return str(obj)
  361. if hasattr(obj, '__str__'):
  362. return obj.__str__()
  363. if hasattr(obj, '__repr__'):
  364. return obj.__repr__()
  365. def ecma_unescape(s):
  366. """Python implementation of the unescape javascript function
  367. https://www.ecma-international.org/ecma-262/6.0/#sec-unescape-string
  368. https://developer.mozilla.org/fr/docs/Web/JavaScript/Reference/Objets_globaux/unescape
  369. Examples:
  370. >>> ecma_unescape('%u5409')
  371. '吉'
  372. >>> ecma_unescape('%20')
  373. ' '
  374. >>> ecma_unescape('%F3')
  375. 'ó'
  376. """
  377. # "%u5409" becomes "吉"
  378. s = ecma_unescape4_re.sub(lambda e: chr(int(e.group(1), 16)), s)
  379. # "%20" becomes " ", "%F3" becomes "ó"
  380. s = ecma_unescape2_re.sub(lambda e: chr(int(e.group(1), 16)), s)
  381. return s
  382. def get_string_replaces_function(replaces):
  383. rep = {re.escape(k): v for k, v in replaces.items()}
  384. pattern = re.compile("|".join(rep.keys()))
  385. def f(text):
  386. return pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
  387. return f
  388. def get_engine_from_settings(name):
  389. """Return engine configuration from settings.yml of a given engine name"""
  390. if 'engines' not in settings:
  391. return {}
  392. for engine in settings['engines']:
  393. if 'name' not in engine:
  394. continue
  395. if name == engine['name']:
  396. return engine
  397. return {}
  398. def get_xpath(xpath_spec):
  399. """Return cached compiled XPath
  400. There is no thread lock.
  401. Worst case scenario, xpath_str is compiled more than one time.
  402. Args:
  403. * xpath_spec (str|lxml.etree.XPath): XPath as a str or lxml.etree.XPath
  404. Returns:
  405. * result (bool, float, list, str): Results.
  406. Raises:
  407. * TypeError: Raise when xpath_spec is neither a str nor a lxml.etree.XPath
  408. * SearxXPathSyntaxException: Raise when there is a syntax error in the XPath
  409. """
  410. if isinstance(xpath_spec, str):
  411. result = xpath_cache.get(xpath_spec, None)
  412. if result is None:
  413. try:
  414. result = XPath(xpath_spec)
  415. except XPathSyntaxError as e:
  416. raise SearxXPathSyntaxException(xpath_spec, str(e.msg)) from e
  417. xpath_cache[xpath_spec] = result
  418. return result
  419. if isinstance(xpath_spec, XPath):
  420. return xpath_spec
  421. raise TypeError('xpath_spec must be either a str or a lxml.etree.XPath')
  422. def eval_xpath(element, xpath_spec):
  423. """Equivalent of element.xpath(xpath_str) but compile xpath_str once for all.
  424. See https://lxml.de/xpathxslt.html#xpath-return-values
  425. Args:
  426. * element (ElementBase): [description]
  427. * xpath_spec (str|lxml.etree.XPath): XPath as a str or lxml.etree.XPath
  428. Returns:
  429. * result (bool, float, list, str): Results.
  430. Raises:
  431. * TypeError: Raise when xpath_spec is neither a str nor a lxml.etree.XPath
  432. * SearxXPathSyntaxException: Raise when there is a syntax error in the XPath
  433. * SearxEngineXPathException: Raise when the XPath can't be evaluated.
  434. """
  435. xpath = get_xpath(xpath_spec)
  436. try:
  437. return xpath(element)
  438. except XPathError as e:
  439. arg = ' '.join([str(i) for i in e.args])
  440. raise SearxEngineXPathException(xpath_spec, arg) from e
  441. def eval_xpath_list(element, xpath_spec, min_len=None):
  442. """Same as eval_xpath, check if the result is a list
  443. Args:
  444. * element (ElementBase): [description]
  445. * xpath_spec (str|lxml.etree.XPath): XPath as a str or lxml.etree.XPath
  446. * min_len (int, optional): [description]. Defaults to None.
  447. Raises:
  448. * TypeError: Raise when xpath_spec is neither a str nor a lxml.etree.XPath
  449. * SearxXPathSyntaxException: Raise when there is a syntax error in the XPath
  450. * SearxEngineXPathException: raise if the result is not a list
  451. Returns:
  452. * result (bool, float, list, str): Results.
  453. """
  454. result = eval_xpath(element, xpath_spec)
  455. if not isinstance(result, list):
  456. raise SearxEngineXPathException(xpath_spec, 'the result is not a list')
  457. if min_len is not None and min_len > len(result):
  458. raise SearxEngineXPathException(xpath_spec, 'len(xpath_str) < ' + str(min_len))
  459. return result
  460. def eval_xpath_getindex(elements, xpath_spec, index, default=NOTSET):
  461. """Call eval_xpath_list then get one element using the index parameter.
  462. If the index does not exist, either aise an exception is default is not set,
  463. other return the default value (can be None).
  464. Args:
  465. * elements (ElementBase): lxml element to apply the xpath.
  466. * xpath_spec (str|lxml.etree.XPath): XPath as a str or lxml.etree.XPath.
  467. * index (int): index to get
  468. * default (Object, optional): Defaults if index doesn't exist.
  469. Raises:
  470. * TypeError: Raise when xpath_spec is neither a str nor a lxml.etree.XPath
  471. * SearxXPathSyntaxException: Raise when there is a syntax error in the XPath
  472. * SearxEngineXPathException: if the index is not found. Also see eval_xpath.
  473. Returns:
  474. * result (bool, float, list, str): Results.
  475. """
  476. result = eval_xpath_list(elements, xpath_spec)
  477. if index >= -len(result) and index < len(result):
  478. return result[index]
  479. if default == NOTSET:
  480. # raise an SearxEngineXPathException instead of IndexError
  481. # to record xpath_spec
  482. raise SearxEngineXPathException(xpath_spec, 'index ' + str(index) + ' not found')
  483. return default