seznam.py 1.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Seznam
  3. """
  4. from urllib.parse import urlencode
  5. from lxml import html
  6. from searx.network import get
  7. from searx.exceptions import SearxEngineAccessDeniedException
  8. from searx.utils import (
  9. extract_text,
  10. eval_xpath_list,
  11. eval_xpath_getindex,
  12. )
  13. # about
  14. about = {
  15. "website": "https://www.seznam.cz/",
  16. "wikidata_id": "Q3490485",
  17. "official_api_documentation": "https://api.sklik.cz/",
  18. "use_official_api": False,
  19. "require_api_key": False,
  20. "results": "HTML",
  21. "language": "cz",
  22. }
  23. categories = ['general', 'web']
  24. base_url = 'https://search.seznam.cz/'
  25. def request(query, params):
  26. response_index = get(base_url, headers=params['headers'], raise_for_httperror=True)
  27. dom = html.fromstring(response_index.text)
  28. url_params = {
  29. 'q': query,
  30. 'oq': query,
  31. }
  32. for e in eval_xpath_list(dom, '//input[@type="hidden"]'):
  33. name = e.get('name')
  34. value = e.get('value')
  35. url_params[name] = value
  36. params['url'] = base_url + '?' + urlencode(url_params)
  37. params['cookies'] = response_index.cookies
  38. return params
  39. def response(resp):
  40. if resp.url.path.startswith('/verify'):
  41. raise SearxEngineAccessDeniedException()
  42. results = []
  43. dom = html.fromstring(resp.content.decode())
  44. for result_element in eval_xpath_list(
  45. dom, '//div[@id="searchpage-root"]//div[@class="Layout--left"]/div[@class="f2c528"]'
  46. ):
  47. result_data = eval_xpath_getindex(
  48. result_element, './/div[@class="c8774a" or @class="e69e8d a11657"]', 0, default=None
  49. )
  50. if result_data is None:
  51. continue
  52. title_element = eval_xpath_getindex(result_element, './/h3/a', 0)
  53. results.append(
  54. {
  55. 'url': title_element.get('href'),
  56. 'title': extract_text(title_element),
  57. 'content': extract_text(result_data),
  58. }
  59. )
  60. return results