google_images.py 1.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172
  1. """
  2. Google (Images)
  3. @website https://www.google.com
  4. @provide-api yes (https://developers.google.com/custom-search/)
  5. @using-api no
  6. @results HTML chunks with JSON inside
  7. @stable no
  8. @parse url, title, img_src
  9. """
  10. from urllib import urlencode
  11. from urlparse import parse_qs
  12. from json import loads
  13. from lxml import html
  14. # engine dependent config
  15. categories = ['images']
  16. paging = True
  17. safesearch = True
  18. search_url = 'https://www.google.com/search'\
  19. '?{query}'\
  20. '&tbm=isch'\
  21. '&ijn=1'\
  22. '&start={offset}'
  23. # do search-request
  24. def request(query, params):
  25. offset = (params['pageno'] - 1) * 100
  26. params['url'] = search_url.format(query=urlencode({'q': query}),
  27. offset=offset,
  28. safesearch=safesearch)
  29. if safesearch and params['safesearch']:
  30. params['url'] += '&' + urlencode({'safe': 'active'})
  31. return params
  32. # get response from search-request
  33. def response(resp):
  34. results = []
  35. dom = html.fromstring(resp.text)
  36. # parse results
  37. for result in dom.xpath('//div[@data-ved]'):
  38. data_url = result.xpath('./a/@href')[0]
  39. data_query = {k: v[0] for k, v in parse_qs(data_url.split('?', 1)[1]).iteritems()}
  40. metadata = loads(result.xpath('./div[@class="rg_meta"]/text()')[0])
  41. thumbnail_src = metadata['tu']
  42. # http to https
  43. thumbnail_src = thumbnail_src.replace("http://", "https://")
  44. # append result
  45. results.append({'url': data_query['imgrefurl'],
  46. 'title': metadata['pt'],
  47. 'content': metadata['s'],
  48. 'thumbnail_src': metadata['tu'],
  49. 'img_src': data_query['imgurl'],
  50. 'template': 'images.html'})
  51. # return results
  52. return results