duden.py 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Duden
  3. """
  4. import re
  5. from urllib.parse import quote, urljoin
  6. from lxml import html
  7. from searx.utils import extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
  8. from searx.network import raise_for_httperror
  9. # about
  10. about = {
  11. "website": 'https://www.duden.de',
  12. "wikidata_id": 'Q73624591',
  13. "official_api_documentation": None,
  14. "use_official_api": False,
  15. "require_api_key": False,
  16. "results": 'HTML',
  17. "language": 'de',
  18. }
  19. categories = ['dictionaries']
  20. paging = True
  21. # search-url
  22. base_url = 'https://www.duden.de/'
  23. search_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}'
  24. def request(query, params):
  25. offset = params['pageno'] - 1
  26. if offset == 0:
  27. search_url_fmt = base_url + 'suchen/dudenonline/{query}'
  28. params['url'] = search_url_fmt.format(query=quote(query))
  29. else:
  30. params['url'] = search_url.format(offset=offset, query=quote(query))
  31. # after the last page of results, spelling corrections are returned after a HTTP redirect
  32. # whatever the page number is
  33. params['soft_max_redirects'] = 1
  34. params['raise_for_httperror'] = False
  35. return params
  36. def response(resp):
  37. results = []
  38. if resp.status_code == 404:
  39. return results
  40. raise_for_httperror(resp)
  41. dom = html.fromstring(resp.text)
  42. number_of_results_element = eval_xpath_getindex(
  43. dom, '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()', 0, default=None
  44. )
  45. if number_of_results_element is not None:
  46. number_of_results_string = re.sub('[^0-9]', '', number_of_results_element)
  47. results.append({'number_of_results': int(number_of_results_string)})
  48. for result in eval_xpath_list(dom, '//section[not(contains(@class, "essay"))]'):
  49. url = eval_xpath_getindex(result, './/h2/a', 0).get('href')
  50. url = urljoin(base_url, url)
  51. title = eval_xpath(result, 'string(.//h2/a)').strip()
  52. content = extract_text(eval_xpath(result, './/p'))
  53. # append result
  54. results.append({'url': url, 'title': title, 'content': content})
  55. return results