ask.py 1.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Ask.com"""
  3. from urllib.parse import urlencode
  4. import dateutil
  5. from lxml import html
  6. from searx import utils
  7. # Metadata
  8. about = {
  9. "website": "https://www.ask.com/",
  10. "wikidata_id": 'Q847564',
  11. "official_api_documentation": None,
  12. "use_official_api": False,
  13. "require_api_key": False,
  14. "results": "HTML",
  15. }
  16. # Engine Configuration
  17. categories = ['general']
  18. paging = True
  19. max_page = 5
  20. # Base URL
  21. base_url = "https://www.ask.com/web"
  22. def request(query, params):
  23. query_params = {
  24. "q": query,
  25. "page": params["pageno"],
  26. }
  27. params["url"] = f"{base_url}?{urlencode(query_params)}"
  28. return params
  29. def response(resp):
  30. start_tag = 'window.MESON.initialState = {'
  31. end_tag = '}};'
  32. dom = html.fromstring(resp.text)
  33. script = utils.eval_xpath_getindex(dom, '//script', 0, default=None).text
  34. pos = script.index(start_tag) + len(start_tag) - 1
  35. script = script[pos:]
  36. pos = script.index(end_tag) + len(end_tag) - 1
  37. script = script[:pos]
  38. json_resp = utils.js_variable_to_python(script)
  39. results = []
  40. for item in json_resp['search']['webResults']['results']:
  41. pubdate_original = item.get('pubdate_original')
  42. if pubdate_original:
  43. pubdate_original = dateutil.parser.parse(pubdate_original)
  44. metadata = [item.get(field) for field in ['category_l1', 'catsy'] if item.get(field)]
  45. results.append(
  46. {
  47. "url": item['url'].split('&ueid')[0],
  48. "title": item['title'],
  49. "content": item['abstract'],
  50. "publishedDate": pubdate_original,
  51. # "thumbnail": item.get('image_url') or None, # these are not thumbs / to large
  52. "metadata": ' | '.join(metadata),
  53. }
  54. )
  55. return results