chinaso.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """ChinaSo: A search engine from ChinaSo."""
  3. from urllib.parse import urlencode
  4. from datetime import datetime
  5. from searx.exceptions import SearxEngineAPIException
  6. from searx.utils import html_to_text
  7. about = {
  8. "website": "https://www.chinaso.com/",
  9. "wikidata_id": "Q10846064",
  10. "use_official_api": False,
  11. "require_api_key": False,
  12. "results": "JSON",
  13. "language": "zh",
  14. }
  15. paging = True
  16. time_range_support = True
  17. results_per_page = 10
  18. categories = []
  19. chinaso_category = 'news'
  20. """ChinaSo supports news, videos, images search.
  21. - ``news``: search for news
  22. - ``videos``: search for videos
  23. - ``images``: search for images
  24. """
  25. time_range_dict = {'day': '24h', 'week': '1w', 'month': '1m', 'year': '1y'}
  26. base_url = "https://www.chinaso.com"
  27. def init(_):
  28. if chinaso_category not in ('news', 'videos', 'images'):
  29. raise SearxEngineAPIException(f"Unsupported category: {chinaso_category}")
  30. def request(query, params):
  31. query_params = {"q": query}
  32. if time_range_dict.get(params['time_range']):
  33. query_params["stime"] = time_range_dict[params['time_range']]
  34. query_params["etime"] = 'now'
  35. category_config = {
  36. 'news': {'endpoint': '/v5/general/v1/web/search', 'params': {'pn': params["pageno"], 'ps': results_per_page}},
  37. 'images': {
  38. 'endpoint': '/v5/general/v1/search/image',
  39. 'params': {'start_index': (params["pageno"] - 1) * results_per_page, 'rn': results_per_page},
  40. },
  41. 'videos': {
  42. 'endpoint': '/v5/general/v1/search/video',
  43. 'params': {'start_index': (params["pageno"] - 1) * results_per_page, 'rn': results_per_page},
  44. },
  45. }
  46. query_params.update(category_config[chinaso_category]['params'])
  47. params["url"] = f"{base_url}{category_config[chinaso_category]['endpoint']}?{urlencode(query_params)}"
  48. return params
  49. def response(resp):
  50. try:
  51. data = resp.json()
  52. except Exception as e:
  53. raise SearxEngineAPIException(f"Invalid response: {e}") from e
  54. parsers = {'news': parse_news, 'images': parse_images, 'videos': parse_videos}
  55. return parsers[chinaso_category](data)
  56. def parse_news(data):
  57. results = []
  58. if not data.get("data", {}).get("data"):
  59. raise SearxEngineAPIException("Invalid response")
  60. for entry in data["data"]["data"]:
  61. published_date = None
  62. if entry.get("timestamp"):
  63. try:
  64. published_date = datetime.fromtimestamp(int(entry["timestamp"]))
  65. except (ValueError, TypeError):
  66. pass
  67. results.append(
  68. {
  69. 'title': html_to_text(entry["title"]),
  70. 'url': entry["url"],
  71. 'content': html_to_text(entry["snippet"]),
  72. 'publishedDate': published_date,
  73. }
  74. )
  75. return results
  76. def parse_images(data):
  77. results = []
  78. if not data.get("data", {}).get("arrRes"):
  79. raise SearxEngineAPIException("Invalid response")
  80. for entry in data["data"]["arrRes"]:
  81. results.append(
  82. {
  83. 'url': entry["web_url"],
  84. 'title': html_to_text(entry["title"]),
  85. 'content': html_to_text(entry["ImageInfo"]),
  86. 'template': 'images.html',
  87. 'img_src': entry["url"].replace("http://", "https://"),
  88. 'thumbnail_src': entry["largeimage"].replace("http://", "https://"),
  89. }
  90. )
  91. return results
  92. def parse_videos(data):
  93. results = []
  94. if not data.get("data", {}).get("arrRes"):
  95. raise SearxEngineAPIException("Invalid response")
  96. for entry in data["data"]["arrRes"]:
  97. published_date = None
  98. if entry.get("VideoPubDate"):
  99. try:
  100. published_date = datetime.fromtimestamp(int(entry["VideoPubDate"]))
  101. except (ValueError, TypeError):
  102. pass
  103. results.append(
  104. {
  105. 'url': entry["url"],
  106. 'title': html_to_text(entry["raw_title"]),
  107. 'template': 'videos.html',
  108. 'publishedDate': published_date,
  109. 'thumbnail': entry["image_src"].replace("http://", "https://"),
  110. }
  111. )
  112. return results