123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899 |
- """
- Bing (Images)
- @website https://www.bing.com/images
- @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
- max. 5000 query/month
- @using-api no (because of query limit)
- @results HTML (using search portal)
- @stable no (HTML can change)
- @parse url, title, img_src
- @todo currently there are up to 35 images receive per page,
- because bing does not parse count=10.
- limited response to 10 images
- """
- from urllib import urlencode
- from lxml import html
- from json import loads
- import re
- # engine dependent config
- categories = ['images']
- paging = True
- safesearch = True
- # search-url
- base_url = 'https://www.bing.com/'
- search_string = 'images/search?{query}&count=10&first={offset}'
- thumb_url = "https://www.bing.com/th?id={ihk}"
- # safesearch definitions
- safesearch_types = {2: 'STRICT',
- 1: 'DEMOTE',
- 0: 'OFF'}
- _quote_keys_regex = re.compile('({|,)([a-z][a-z0-9]*):(")', re.I | re.U)
- # do search-request
- def request(query, params):
- offset = (params['pageno'] - 1) * 10 + 1
- # required for cookie
- if params['language'] == 'all':
- language = 'en-US'
- else:
- language = params['language'].replace('_', '-')
- search_path = search_string.format(
- query=urlencode({'q': query}),
- offset=offset)
- params['cookies']['SRCHHPGUSR'] = \
- 'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0] +\
- '&ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
- params['url'] = base_url + search_path
- return params
- # get response from search-request
- def response(resp):
- results = []
- dom = html.fromstring(resp.text)
- # parse results
- for result in dom.xpath('//div[@class="dg_u"]'):
- link = result.xpath('./a')[0]
- # parse json-data (it is required to add a space, to make it parsable)
- json_data = loads(_quote_keys_regex.sub(r'\1"\2": \3', link.attrib.get('m')))
- title = link.attrib.get('t1')
- ihk = link.attrib.get('ihk')
- # url = 'http://' + link.attrib.get('t3')
- url = json_data.get('surl')
- img_src = json_data.get('imgurl')
- # append result
- results.append({'template': 'images.html',
- 'url': url,
- 'title': title,
- 'content': '',
- 'thumbnail_src': thumb_url.format(ihk=ihk),
- 'img_src': img_src})
- # TODO stop parsing if 10 images are found
- if len(results) >= 10:
- break
- # return results
- return results
|