360search.py 1.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # pylint: disable=invalid-name
  3. """360Search search engine for searxng"""
  4. from urllib.parse import urlencode
  5. from lxml import html
  6. from searx.utils import extract_text
  7. # Metadata
  8. about = {
  9. "website": "https://www.so.com/",
  10. "wikidata_id": "Q10846064",
  11. "use_official_api": False,
  12. "require_api_key": False,
  13. "results": "HTML",
  14. "language": "zh",
  15. }
  16. # Engine Configuration
  17. categories = ["general"]
  18. paging = True
  19. time_range_support = True
  20. time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}
  21. # Base URL
  22. base_url = "https://www.so.com"
  23. def request(query, params):
  24. query_params = {
  25. "pn": params["pageno"],
  26. "q": query,
  27. }
  28. if time_range_dict.get(params['time_range']):
  29. query_params["adv_t"] = time_range_dict.get(params['time_range'])
  30. params["url"] = f"{base_url}/s?{urlencode(query_params)}"
  31. return params
  32. def response(resp):
  33. dom = html.fromstring(resp.text)
  34. results = []
  35. for item in dom.xpath('//li[contains(@class, "res-list")]'):
  36. title = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a'))
  37. url = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a/@data-mdurl'))
  38. if not url:
  39. url = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a/@href'))
  40. content = extract_text(item.xpath('.//p[@class="res-desc"]'))
  41. if not content:
  42. content = extract_text(item.xpath('.//span[@class="res-list-summary"]'))
  43. if title and url:
  44. results.append(
  45. {
  46. "title": title,
  47. "url": url,
  48. "content": content,
  49. }
  50. )
  51. return results