gigablast.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Gigablast (Web)
  4. """
  5. # pylint: disable=missing-function-docstring, invalid-name
  6. import re
  7. from json import loads, JSONDecodeError
  8. from urllib.parse import urlencode
  9. from searx.exceptions import SearxEngineResponseException
  10. from searx.poolrequests import get
  11. # about
  12. about = {
  13. "website": 'https://www.gigablast.com',
  14. "wikidata_id": 'Q3105449',
  15. "official_api_documentation": 'https://gigablast.com/api.html',
  16. "use_official_api": True,
  17. "require_api_key": False,
  18. "results": 'JSON',
  19. }
  20. # engine dependent config
  21. categories = ['general']
  22. collections = 'main'
  23. search_type = ''
  24. fast = 0
  25. # gigablast's pagination is totally damaged, don't use it
  26. paging = False
  27. safesearch = True
  28. # search-url
  29. base_url = 'https://gigablast.com'
  30. # ugly hack: gigablast requires a random extra parameter which can be extracted
  31. # from the source code of the gigablast HTTP client
  32. extra_param = ''
  33. extra_param_path='/search?c=main&qlangcountry=en-us&q=south&s=10'
  34. _wait_for_results_msg = 'Loading results takes too long. Please enable fast option in gigablast engine.'
  35. def parse_extra_param(text):
  36. # example:
  37. #
  38. # var uxrl='/search?c=main&qlangcountry=en-us&q=south&s=10&rand=1590740241635&n';
  39. # uxrl=uxrl+'sab=730863287';
  40. #
  41. # extra_param --> "rand=1590740241635&nsab=730863287"
  42. global extra_param # pylint: disable=global-statement
  43. re_var= None
  44. for line in text.splitlines():
  45. if re_var is None and extra_param_path in line:
  46. var = line.split("=")[0].split()[1] # e.g. var --> 'uxrl'
  47. re_var = re.compile(var + "\\s*=\\s*" + var + "\\s*\\+\\s*'" + "(.*)" + "'(.*)")
  48. extra_param = line.split("'")[1][len(extra_param_path):]
  49. continue
  50. if re_var is not None and re_var.search(line):
  51. extra_param += re_var.search(line).group(1)
  52. break
  53. def init(engine_settings=None): # pylint: disable=unused-argument
  54. parse_extra_param(get(base_url + extra_param_path).text)
  55. # do search-request
  56. def request(query, params): # pylint: disable=unused-argument
  57. # see API http://www.gigablast.com/api.html#/search
  58. # Take into account, that the API has some quirks ..
  59. query_args = {
  60. 'c': collections,
  61. 'format': 'json',
  62. 'q': query,
  63. 'dr': 1 ,
  64. 'showgoodimages': 0,
  65. 'fast': fast,
  66. }
  67. if search_type != '':
  68. query_args['searchtype'] = search_type
  69. if params['language'] and params['language'] != 'all':
  70. query_args['qlangcountry'] = params['language']
  71. query_args['qlang'] = params['language'].split('-')[0]
  72. if params['safesearch'] >= 1:
  73. query_args['ff'] = 1
  74. search_url = '/search?' + urlencode(query_args)
  75. params['url'] = base_url + search_url + extra_param
  76. return params
  77. # get response from search-request
  78. def response(resp):
  79. results = []
  80. try:
  81. response_json = loads(resp.text)
  82. except JSONDecodeError as e:
  83. if 'Waiting for results' in resp.text:
  84. raise SearxEngineResponseException(message=_wait_for_results_msg) # pylint: disable=raise-missing-from
  85. raise e
  86. for result in response_json['results']:
  87. # see "Example JSON Output (&format=json)"
  88. # at http://www.gigablast.com/api.html#/search
  89. # sort out meaningless result
  90. title = result.get('title')
  91. if len(title) < 2:
  92. continue
  93. url = result.get('url')
  94. if len(url) < 9:
  95. continue
  96. content = result.get('sum')
  97. if len(content) < 5:
  98. continue
  99. # extend fields
  100. subtitle = result.get('title')
  101. if len(subtitle) > 3 and subtitle != title:
  102. title += " - " + subtitle
  103. results.append(dict(
  104. url = url
  105. , title = title
  106. , content = content
  107. ))
  108. return results