recoll.py 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Recoll (local search engine)
  4. """
  5. from datetime import date, timedelta
  6. from json import loads
  7. from urllib.parse import urlencode, quote
  8. # about
  9. about = {
  10. "website": None,
  11. "wikidata_id": 'Q15735774',
  12. "official_api_documentation": 'https://www.lesbonscomptes.com/recoll/',
  13. "use_official_api": True,
  14. "require_api_key": False,
  15. "results": 'JSON',
  16. }
  17. # engine dependent config
  18. paging = True
  19. time_range_support = True
  20. # parameters from settings.yml
  21. base_url = None
  22. search_dir = ''
  23. mount_prefix = None
  24. dl_prefix = None
  25. # embedded
  26. embedded_url = '<{ttype} controls height="166px" ' +\
  27. 'src="{url}" type="{mtype}"></{ttype}>'
  28. # helper functions
  29. def get_time_range(time_range):
  30. sw = {
  31. 'day': 1,
  32. 'week': 7,
  33. 'month': 30,
  34. 'year': 365
  35. }
  36. offset = sw.get(time_range, 0)
  37. if not offset:
  38. return ''
  39. return (date.today() - timedelta(days=offset)).isoformat()
  40. # do search-request
  41. def request(query, params):
  42. search_after = get_time_range(params['time_range'])
  43. search_url = base_url + 'json?{query}&highlight=0'
  44. params['url'] = search_url.format(query=urlencode({
  45. 'query': query,
  46. 'page': params['pageno'],
  47. 'after': search_after,
  48. 'dir': search_dir}))
  49. return params
  50. # get response from search-request
  51. def response(resp):
  52. results = []
  53. response_json = loads(resp.text)
  54. if not response_json:
  55. return []
  56. for result in response_json.get('results', []):
  57. title = result['label']
  58. url = result['url'].replace('file://' + mount_prefix, dl_prefix)
  59. content = '{}'.format(result['snippet'])
  60. # append result
  61. item = {'url': url,
  62. 'title': title,
  63. 'content': content,
  64. 'template': 'files.html'}
  65. if result['size']:
  66. item['size'] = int(result['size'])
  67. for parameter in ['filename', 'abstract', 'author', 'mtype', 'time']:
  68. if result[parameter]:
  69. item[parameter] = result[parameter]
  70. # facilitate preview support for known mime types
  71. if 'mtype' in result and '/' in result['mtype']:
  72. (mtype, subtype) = result['mtype'].split('/')
  73. item['mtype'] = mtype
  74. item['subtype'] = subtype
  75. if mtype in ['audio', 'video']:
  76. item['embedded'] = embedded_url.format(
  77. ttype=mtype,
  78. url=quote(url.encode('utf8'), '/:'),
  79. mtype=result['mtype'])
  80. if mtype in ['image'] and subtype in ['bmp', 'gif', 'jpeg', 'png']:
  81. item['img_src'] = url
  82. results.append(item)
  83. if 'nres' in response_json:
  84. results.append({'number_of_results': response_json['nres']})
  85. return results