doku.py 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Doku Wiki
  4. """
  5. from urllib.parse import urlencode
  6. from lxml.html import fromstring
  7. from searx.utils import extract_text, eval_xpath
  8. # about
  9. about = {
  10. "website": 'https://www.dokuwiki.org/',
  11. "wikidata_id": 'Q851864',
  12. "official_api_documentation": 'https://www.dokuwiki.org/devel:xmlrpc',
  13. "use_official_api": False,
  14. "require_api_key": False,
  15. "results": 'HTML',
  16. }
  17. # engine dependent config
  18. categories = ['general'] # 'images', 'music', 'videos', 'files'
  19. paging = False
  20. number_of_results = 5
  21. # search-url
  22. # Doku is OpenSearch compatible
  23. base_url = 'http://localhost:8090'
  24. search_url = (
  25. # fmt: off
  26. '/?do=search'
  27. '&{query}'
  28. # fmt: on
  29. )
  30. # '&startRecord={offset}'
  31. # '&maximumRecords={limit}'
  32. # do search-request
  33. def request(query, params):
  34. params['url'] = base_url + search_url.format(query=urlencode({'id': query}))
  35. return params
  36. # get response from search-request
  37. def response(resp):
  38. results = []
  39. doc = fromstring(resp.text)
  40. # parse results
  41. # Quickhits
  42. for r in eval_xpath(doc, '//div[@class="search_quickresult"]/ul/li'):
  43. try:
  44. res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
  45. except: # pylint: disable=bare-except
  46. continue
  47. if not res_url:
  48. continue
  49. title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
  50. # append result
  51. results.append({'title': title, 'content': "", 'url': base_url + res_url})
  52. # Search results
  53. for r in eval_xpath(doc, '//dl[@class="search_results"]/*'):
  54. try:
  55. if r.tag == "dt":
  56. res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
  57. title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
  58. elif r.tag == "dd":
  59. content = extract_text(eval_xpath(r, '.'))
  60. # append result
  61. results.append({'title': title, 'content': content, 'url': base_url + res_url})
  62. except: # pylint: disable=bare-except
  63. continue
  64. if not res_url:
  65. continue
  66. # return results
  67. return results