emojipedia.py 1.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Emojipedia
  4. """
  5. from urllib.parse import urlencode
  6. from lxml import html
  7. from searx import logger
  8. from searx.utils import (
  9. eval_xpath_list,
  10. eval_xpath_getindex,
  11. extract_text,
  12. )
  13. logger = logger.getChild('Emojipedia engine')
  14. about = {
  15. "website": 'https://emojipedia.org',
  16. "wikidata_id": None,
  17. "official_api_documentation": None,
  18. "use_official_api": False,
  19. "require_api_key": False,
  20. "results": 'HTML',
  21. }
  22. categories = []
  23. paging = False
  24. time_range_support = False
  25. base_url = 'https://emojipedia.org'
  26. search_url = base_url + '/search/?{query}'
  27. def request(query, params):
  28. params['url'] = search_url.format(
  29. query=urlencode({'q': query}),
  30. )
  31. return params
  32. def response(resp):
  33. results = []
  34. dom = html.fromstring(resp.text)
  35. for result in eval_xpath_list(dom, "//ol[@class='search-results']/li"):
  36. extracted_desc = extract_text(eval_xpath_getindex(result, './/p', 0))
  37. if 'No results found.' in extracted_desc:
  38. break
  39. link = eval_xpath_getindex(result, './/h2/a', 0)
  40. url = base_url + link.attrib.get('href')
  41. title = extract_text(link)
  42. content = extracted_desc
  43. res = {
  44. 'url': url,
  45. 'title': title,
  46. 'content': content
  47. }
  48. results.append(res)
  49. return results