base.py 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """BASE (Scholar publications)
  3. """
  4. from datetime import datetime
  5. import re
  6. from urllib.parse import urlencode
  7. from lxml import etree
  8. from searx.utils import searx_useragent
  9. # about
  10. about = {
  11. "website": 'https://base-search.net',
  12. "wikidata_id": 'Q448335',
  13. "official_api_documentation": 'https://api.base-search.net/',
  14. "use_official_api": True,
  15. "require_api_key": False,
  16. "results": 'XML',
  17. }
  18. categories = ['science']
  19. base_url = (
  20. 'https://api.base-search.net/cgi-bin/BaseHttpSearchInterface.fcgi'
  21. + '?func=PerformSearch&{query}&boost=oa&hits={hits}&offset={offset}'
  22. )
  23. # engine dependent config
  24. paging = True
  25. number_of_results = 10
  26. # shortcuts for advanced search
  27. shortcut_dict = {
  28. # user-friendly keywords
  29. 'format:': 'dcformat:',
  30. 'author:': 'dccreator:',
  31. 'collection:': 'dccollection:',
  32. 'hdate:': 'dchdate:',
  33. 'contributor:': 'dccontributor:',
  34. 'coverage:': 'dccoverage:',
  35. 'date:': 'dcdate:',
  36. 'abstract:': 'dcdescription:',
  37. 'urls:': 'dcidentifier:',
  38. 'language:': 'dclanguage:',
  39. 'publisher:': 'dcpublisher:',
  40. 'relation:': 'dcrelation:',
  41. 'rights:': 'dcrights:',
  42. 'source:': 'dcsource:',
  43. 'subject:': 'dcsubject:',
  44. 'title:': 'dctitle:',
  45. 'type:': 'dcdctype:',
  46. }
  47. def request(query, params):
  48. # replace shortcuts with API advanced search keywords
  49. for key, val in shortcut_dict.items():
  50. query = re.sub(key, val, query)
  51. # basic search
  52. offset = (params['pageno'] - 1) * number_of_results
  53. string_args = {
  54. 'query': urlencode({'query': query}),
  55. 'offset': offset,
  56. 'hits': number_of_results,
  57. }
  58. params['url'] = base_url.format(**string_args)
  59. params['headers']['User-Agent'] = searx_useragent()
  60. return params
  61. def response(resp):
  62. results = []
  63. search_results = etree.XML(resp.content)
  64. for entry in search_results.xpath('./result/doc'):
  65. content = "No description available"
  66. url = ""
  67. title = ""
  68. date = datetime.now() # needed in case no dcdate is available for an item
  69. for item in entry:
  70. if item.attrib["name"] == "dcdate":
  71. date = item.text
  72. elif item.attrib["name"] == "dctitle":
  73. title = item.text
  74. elif item.attrib["name"] == "dclink":
  75. url = item.text
  76. elif item.attrib["name"] == "dcdescription":
  77. content = item.text[:300]
  78. if len(item.text) > 300:
  79. content += "..."
  80. # dates returned by the BASE API are not several formats
  81. publishedDate = None
  82. for date_format in ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d', '%Y-%m', '%Y']:
  83. try:
  84. publishedDate = datetime.strptime(date, date_format)
  85. break
  86. except: # pylint: disable=bare-except
  87. pass
  88. if publishedDate is not None:
  89. res_dict = {'url': url, 'title': title, 'publishedDate': publishedDate, 'content': content}
  90. else:
  91. res_dict = {'url': url, 'title': title, 'content': content}
  92. results.append(res_dict)
  93. return results