doku.py 2.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Doku Wiki
  4. """
  5. from urllib.parse import urlencode
  6. from lxml.html import fromstring
  7. from searx.utils import extract_text, eval_xpath
  8. # about
  9. about = {
  10. "website": 'https://www.dokuwiki.org/',
  11. "wikidata_id": 'Q851864',
  12. "official_api_documentation": 'https://www.dokuwiki.org/devel:xmlrpc',
  13. "use_official_api": False,
  14. "require_api_key": False,
  15. "results": 'HTML',
  16. }
  17. # engine dependent config
  18. categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
  19. paging = False
  20. number_of_results = 5
  21. # search-url
  22. # Doku is OpenSearch compatible
  23. base_url = 'http://localhost:8090'
  24. search_url = '/?do=search'\
  25. '&{query}'
  26. # TODO '&startRecord={offset}'\
  27. # TODO '&maximumRecords={limit}'\
  28. # do search-request
  29. def request(query, params):
  30. params['url'] = base_url +\
  31. search_url.format(query=urlencode({'id': query}))
  32. return params
  33. # get response from search-request
  34. def response(resp):
  35. results = []
  36. doc = fromstring(resp.text)
  37. # parse results
  38. # Quickhits
  39. for r in eval_xpath(doc, '//div[@class="search_quickresult"]/ul/li'):
  40. try:
  41. res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
  42. except:
  43. continue
  44. if not res_url:
  45. continue
  46. title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
  47. # append result
  48. results.append({'title': title,
  49. 'content': "",
  50. 'url': base_url + res_url})
  51. # Search results
  52. for r in eval_xpath(doc, '//dl[@class="search_results"]/*'):
  53. try:
  54. if r.tag == "dt":
  55. res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
  56. title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
  57. elif r.tag == "dd":
  58. content = extract_text(eval_xpath(r, '.'))
  59. # append result
  60. results.append({'title': title,
  61. 'content': content,
  62. 'url': base_url + res_url})
  63. except:
  64. continue
  65. if not res_url:
  66. continue
  67. # return results
  68. return results