google_images.py 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. """
  2. Google (Images)
  3. @website https://www.google.com
  4. @provide-api yes (https://developers.google.com/custom-search/)
  5. @using-api no
  6. @results HTML chunks with JSON inside
  7. @stable no
  8. @parse url, title, img_src
  9. """
  10. from datetime import date, timedelta
  11. from json import loads
  12. from lxml import html
  13. from searx.url_utils import urlencode
  14. # engine dependent config
  15. categories = ['images']
  16. paging = True
  17. safesearch = True
  18. time_range_support = True
  19. number_of_results = 100
  20. search_url = 'https://www.google.com/search'\
  21. '?{query}'\
  22. '&asearch=ichunk'\
  23. '&async=_id:rg_s,_pms:s'\
  24. '&tbm=isch'\
  25. '&yv=2'\
  26. '&{search_options}'
  27. time_range_attr = "qdr:{range}"
  28. time_range_custom_attr = "cdr:1,cd_min:{start},cd_max{end}"
  29. time_range_dict = {'day': 'd',
  30. 'week': 'w',
  31. 'month': 'm'}
  32. # do search-request
  33. def request(query, params):
  34. search_options = {
  35. 'ijn': params['pageno'] - 1,
  36. 'start': (params['pageno'] - 1) * number_of_results
  37. }
  38. if params['time_range'] in time_range_dict:
  39. search_options['tbs'] = time_range_attr.format(range=time_range_dict[params['time_range']])
  40. elif params['time_range'] == 'year':
  41. now = date.today()
  42. then = now - timedelta(days=365)
  43. start = then.strftime('%m/%d/%Y')
  44. end = now.strftime('%m/%d/%Y')
  45. search_options['tbs'] = time_range_custom_attr.format(start=start, end=end)
  46. if safesearch and params['safesearch']:
  47. search_options['safe'] = 'on'
  48. params['url'] = search_url.format(query=urlencode({'q': query}),
  49. search_options=urlencode(search_options))
  50. return params
  51. # get response from search-request
  52. def response(resp):
  53. results = []
  54. g_result = loads(resp.text)
  55. dom = html.fromstring(g_result[1][1])
  56. # parse results
  57. for result in dom.xpath('//div[@data-ved]'):
  58. try:
  59. metadata = loads(''.join(result.xpath('./div[contains(@class, "rg_meta")]/text()')))
  60. except:
  61. continue
  62. thumbnail_src = metadata['tu']
  63. # http to https
  64. thumbnail_src = thumbnail_src.replace("http://", "https://")
  65. # append result
  66. results.append({'url': metadata['ru'],
  67. 'title': metadata['pt'],
  68. 'content': metadata['s'],
  69. 'thumbnail_src': thumbnail_src,
  70. 'img_src': metadata['ou'],
  71. 'template': 'images.html'})
  72. # return results
  73. return results