bing.py 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. """
  2. Bing (Web)
  3. @website https://www.bing.com
  4. @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
  5. max. 5000 query/month
  6. @using-api no (because of query limit)
  7. @results HTML (using search portal)
  8. @stable no (HTML can change)
  9. @parse url, title, content
  10. @todo publishedDate
  11. """
  12. from lxml import html
  13. from searx.engines.xpath import extract_text
  14. from searx.url_utils import urlencode
  15. # engine dependent config
  16. categories = ['general']
  17. paging = True
  18. language_support = True
  19. supported_languages_url = 'https://www.bing.com/account/general'
  20. # search-url
  21. base_url = 'https://www.bing.com/'
  22. search_string = 'search?{query}&first={offset}'
  23. # do search-request
  24. def request(query, params):
  25. offset = (params['pageno'] - 1) * 10 + 1
  26. if params['language'] != 'all':
  27. lang = params['language'].split('-')[0].upper()
  28. else:
  29. lang = 'EN'
  30. query = u'language:{} {}'.format(lang, query.decode('utf-8')).encode('utf-8')
  31. search_path = search_string.format(
  32. query=urlencode({'q': query}),
  33. offset=offset)
  34. params['url'] = base_url + search_path
  35. return params
  36. # get response from search-request
  37. def response(resp):
  38. results = []
  39. dom = html.fromstring(resp.text)
  40. try:
  41. results.append({'number_of_results': int(dom.xpath('//span[@class="sb_count"]/text()')[0]
  42. .split()[0].replace(',', ''))})
  43. except:
  44. pass
  45. # parse results
  46. for result in dom.xpath('//div[@class="sa_cc"]'):
  47. link = result.xpath('.//h3/a')[0]
  48. url = link.attrib.get('href')
  49. title = extract_text(link)
  50. content = extract_text(result.xpath('.//p'))
  51. # append result
  52. results.append({'url': url,
  53. 'title': title,
  54. 'content': content})
  55. # parse results again if nothing is found yet
  56. for result in dom.xpath('//li[@class="b_algo"]'):
  57. link = result.xpath('.//h2/a')[0]
  58. url = link.attrib.get('href')
  59. title = extract_text(link)
  60. content = extract_text(result.xpath('.//p'))
  61. # append result
  62. results.append({'url': url,
  63. 'title': title,
  64. 'content': content})
  65. # return results
  66. return results
  67. # get supported languages from their site
  68. def _fetch_supported_languages(resp):
  69. supported_languages = []
  70. dom = html.fromstring(resp.text)
  71. options = dom.xpath('//div[@id="limit-languages"]//input')
  72. for option in options:
  73. code = option.xpath('./@id')[0].replace('_', '-')
  74. if code == 'nb':
  75. code = 'no'
  76. supported_languages.append(code)
  77. return supported_languages