wikidata.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489
  1. # -*- coding: utf-8 -*-
  2. """
  3. Wikidata
  4. @website https://wikidata.org
  5. @provide-api yes (https://wikidata.org/w/api.php)
  6. @using-api partially (most things require scraping)
  7. @results JSON, HTML
  8. @stable no (html can change)
  9. @parse url, infobox
  10. """
  11. from searx import logger
  12. from searx.poolrequests import get
  13. from searx.engines.xpath import extract_text
  14. from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url
  15. from searx.url_utils import urlencode
  16. from json import loads
  17. from lxml.html import fromstring
  18. logger = logger.getChild('wikidata')
  19. result_count = 1
  20. # urls
  21. wikidata_host = 'https://www.wikidata.org'
  22. url_search = wikidata_host \
  23. + '/wiki/Special:ItemDisambiguation?{query}'
  24. wikidata_api = wikidata_host + '/w/api.php'
  25. url_detail = wikidata_api\
  26. + '?action=parse&format=json&{query}'\
  27. + '&redirects=1&prop=text%7Cdisplaytitle%7Clanglinks%7Crevid'\
  28. + '&disableeditsection=1&disabletidy=1&preview=1&sectionpreview=1&disabletoc=1&utf8=1&formatversion=2'
  29. url_map = 'https://www.openstreetmap.org/'\
  30. + '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
  31. url_image = 'https://commons.wikimedia.org/wiki/Special:FilePath/{filename}?width=500&height=400'
  32. # xpaths
  33. wikidata_ids_xpath = '//div/ul[@class="wikibase-disambiguation"]/li/a/@title'
  34. title_xpath = '//*[contains(@class,"wikibase-title-label")]'
  35. description_xpath = '//div[contains(@class,"wikibase-entitytermsview-heading-description")]'
  36. property_xpath = '//div[@id="{propertyid}"]'
  37. label_xpath = './/div[contains(@class,"wikibase-statementgroupview-property-label")]/a'
  38. url_xpath = './/a[contains(@class,"external free") or contains(@class, "wb-external-id")]'
  39. wikilink_xpath = './/ul[contains(@class,"wikibase-sitelinklistview-listview")]'\
  40. + '/li[contains(@data-wb-siteid,"{wikiid}")]//a/@href'
  41. property_row_xpath = './/div[contains(@class,"wikibase-statementview")]'
  42. preferred_rank_xpath = './/span[contains(@class,"wikibase-rankselector-preferred")]'
  43. value_xpath = './/div[contains(@class,"wikibase-statementview-mainsnak")]'\
  44. + '/*/div[contains(@class,"wikibase-snakview-value")]'
  45. language_fallback_xpath = '//sup[contains(@class,"wb-language-fallback-indicator")]'
  46. calendar_name_xpath = './/sup[contains(@class,"wb-calendar-name")]'
  47. def request(query, params):
  48. language = params['language'].split('-')[0]
  49. if language == 'all':
  50. language = 'en'
  51. params['url'] = url_search.format(
  52. query=urlencode({'label': query, 'language': language}))
  53. return params
  54. def response(resp):
  55. results = []
  56. html = fromstring(resp.text)
  57. wikidata_ids = html.xpath(wikidata_ids_xpath)
  58. language = resp.search_params['language'].split('-')[0]
  59. if language == 'all':
  60. language = 'en'
  61. # TODO: make requests asynchronous to avoid timeout when result_count > 1
  62. for wikidata_id in wikidata_ids[:result_count]:
  63. url = url_detail.format(query=urlencode({'page': wikidata_id, 'uselang': language}))
  64. htmlresponse = get(url)
  65. jsonresponse = loads(htmlresponse.text)
  66. results += getDetail(jsonresponse, wikidata_id, language, resp.search_params['language'])
  67. return results
  68. def getDetail(jsonresponse, wikidata_id, language, locale):
  69. results = []
  70. urls = []
  71. attributes = []
  72. title = jsonresponse.get('parse', {}).get('displaytitle', {})
  73. result = jsonresponse.get('parse', {}).get('text', {})
  74. if not title or not result:
  75. return results
  76. title = fromstring(title)
  77. for elem in title.xpath(language_fallback_xpath):
  78. elem.getparent().remove(elem)
  79. title = extract_text(title.xpath(title_xpath))
  80. result = fromstring(result)
  81. for elem in result.xpath(language_fallback_xpath):
  82. elem.getparent().remove(elem)
  83. description = extract_text(result.xpath(description_xpath))
  84. # URLS
  85. # official website
  86. add_url(urls, result, 'P856', results=results)
  87. # wikipedia
  88. wikipedia_link_count = 0
  89. wikipedia_link = get_wikilink(result, language + 'wiki')
  90. if wikipedia_link:
  91. wikipedia_link_count += 1
  92. urls.append({'title': 'Wikipedia (' + language + ')',
  93. 'url': wikipedia_link})
  94. if language != 'en':
  95. wikipedia_en_link = get_wikilink(result, 'enwiki')
  96. if wikipedia_en_link:
  97. wikipedia_link_count += 1
  98. urls.append({'title': 'Wikipedia (en)',
  99. 'url': wikipedia_en_link})
  100. # TODO: get_wiki_firstlanguage
  101. # if wikipedia_link_count == 0:
  102. # more wikis
  103. add_url(urls, result, default_label='Wikivoyage (' + language + ')', link_type=language + 'wikivoyage')
  104. add_url(urls, result, default_label='Wikiquote (' + language + ')', link_type=language + 'wikiquote')
  105. add_url(urls, result, default_label='Wikimedia Commons', link_type='commonswiki')
  106. add_url(urls, result, 'P625', 'OpenStreetMap', link_type='geo')
  107. # musicbrainz
  108. add_url(urls, result, 'P434', 'MusicBrainz', 'http://musicbrainz.org/artist/')
  109. add_url(urls, result, 'P435', 'MusicBrainz', 'http://musicbrainz.org/work/')
  110. add_url(urls, result, 'P436', 'MusicBrainz', 'http://musicbrainz.org/release-group/')
  111. add_url(urls, result, 'P966', 'MusicBrainz', 'http://musicbrainz.org/label/')
  112. # IMDb
  113. add_url(urls, result, 'P345', 'IMDb', 'https://www.imdb.com/', link_type='imdb')
  114. # source code repository
  115. add_url(urls, result, 'P1324')
  116. # blog
  117. add_url(urls, result, 'P1581')
  118. # social media links
  119. add_url(urls, result, 'P2397', 'YouTube', 'https://www.youtube.com/channel/')
  120. add_url(urls, result, 'P1651', 'YouTube', 'https://www.youtube.com/watch?v=')
  121. add_url(urls, result, 'P2002', 'Twitter', 'https://twitter.com/')
  122. add_url(urls, result, 'P2013', 'Facebook', 'https://facebook.com/')
  123. add_url(urls, result, 'P2003', 'Instagram', 'https://instagram.com/')
  124. urls.append({'title': 'Wikidata',
  125. 'url': 'https://www.wikidata.org/wiki/'
  126. + wikidata_id + '?uselang=' + language})
  127. # INFOBOX ATTRIBUTES (ROWS)
  128. # DATES
  129. # inception date
  130. add_attribute(attributes, result, 'P571', date=True)
  131. # dissolution date
  132. add_attribute(attributes, result, 'P576', date=True)
  133. # start date
  134. add_attribute(attributes, result, 'P580', date=True)
  135. # end date
  136. add_attribute(attributes, result, 'P582', date=True)
  137. # date of birth
  138. add_attribute(attributes, result, 'P569', date=True)
  139. # date of death
  140. add_attribute(attributes, result, 'P570', date=True)
  141. # date of spacecraft launch
  142. add_attribute(attributes, result, 'P619', date=True)
  143. # date of spacecraft landing
  144. add_attribute(attributes, result, 'P620', date=True)
  145. # nationality
  146. add_attribute(attributes, result, 'P27')
  147. # country of origin
  148. add_attribute(attributes, result, 'P495')
  149. # country
  150. add_attribute(attributes, result, 'P17')
  151. # headquarters
  152. add_attribute(attributes, result, 'Q180')
  153. # PLACES
  154. # capital
  155. add_attribute(attributes, result, 'P36', trim=True)
  156. # head of state
  157. add_attribute(attributes, result, 'P35', trim=True)
  158. # head of government
  159. add_attribute(attributes, result, 'P6', trim=True)
  160. # type of government
  161. add_attribute(attributes, result, 'P122')
  162. # official language
  163. add_attribute(attributes, result, 'P37')
  164. # population
  165. add_attribute(attributes, result, 'P1082', trim=True)
  166. # area
  167. add_attribute(attributes, result, 'P2046')
  168. # currency
  169. add_attribute(attributes, result, 'P38', trim=True)
  170. # heigth (building)
  171. add_attribute(attributes, result, 'P2048')
  172. # MEDIA
  173. # platform (videogames)
  174. add_attribute(attributes, result, 'P400')
  175. # author
  176. add_attribute(attributes, result, 'P50')
  177. # creator
  178. add_attribute(attributes, result, 'P170')
  179. # director
  180. add_attribute(attributes, result, 'P57')
  181. # performer
  182. add_attribute(attributes, result, 'P175')
  183. # developer
  184. add_attribute(attributes, result, 'P178')
  185. # producer
  186. add_attribute(attributes, result, 'P162')
  187. # manufacturer
  188. add_attribute(attributes, result, 'P176')
  189. # screenwriter
  190. add_attribute(attributes, result, 'P58')
  191. # production company
  192. add_attribute(attributes, result, 'P272')
  193. # record label
  194. add_attribute(attributes, result, 'P264')
  195. # publisher
  196. add_attribute(attributes, result, 'P123')
  197. # original network
  198. add_attribute(attributes, result, 'P449')
  199. # distributor
  200. add_attribute(attributes, result, 'P750')
  201. # composer
  202. add_attribute(attributes, result, 'P86')
  203. # publication date
  204. add_attribute(attributes, result, 'P577', date=True)
  205. # genre
  206. add_attribute(attributes, result, 'P136')
  207. # original language
  208. add_attribute(attributes, result, 'P364')
  209. # isbn
  210. add_attribute(attributes, result, 'Q33057')
  211. # software license
  212. add_attribute(attributes, result, 'P275')
  213. # programming language
  214. add_attribute(attributes, result, 'P277')
  215. # version
  216. add_attribute(attributes, result, 'P348', trim=True)
  217. # narrative location
  218. add_attribute(attributes, result, 'P840')
  219. # LANGUAGES
  220. # number of speakers
  221. add_attribute(attributes, result, 'P1098')
  222. # writing system
  223. add_attribute(attributes, result, 'P282')
  224. # regulatory body
  225. add_attribute(attributes, result, 'P1018')
  226. # language code
  227. add_attribute(attributes, result, 'P218')
  228. # OTHER
  229. # ceo
  230. add_attribute(attributes, result, 'P169', trim=True)
  231. # founder
  232. add_attribute(attributes, result, 'P112')
  233. # legal form (company/organization)
  234. add_attribute(attributes, result, 'P1454')
  235. # operator
  236. add_attribute(attributes, result, 'P137')
  237. # crew members (tripulation)
  238. add_attribute(attributes, result, 'P1029')
  239. # taxon
  240. add_attribute(attributes, result, 'P225')
  241. # chemical formula
  242. add_attribute(attributes, result, 'P274')
  243. # winner (sports/contests)
  244. add_attribute(attributes, result, 'P1346')
  245. # number of deaths
  246. add_attribute(attributes, result, 'P1120')
  247. # currency code
  248. add_attribute(attributes, result, 'P498')
  249. image = add_image(result)
  250. if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
  251. results.append({
  252. 'url': urls[0]['url'],
  253. 'title': title,
  254. 'content': description
  255. })
  256. else:
  257. results.append({
  258. 'infobox': title,
  259. 'id': wikipedia_link,
  260. 'content': description,
  261. 'img_src': image,
  262. 'attributes': attributes,
  263. 'urls': urls
  264. })
  265. return results
  266. # only returns first match
  267. def add_image(result):
  268. # P15: route map, P242: locator map, P154: logo, P18: image, P242: map, P41: flag, P2716: collage, P2910: icon
  269. property_ids = ['P15', 'P242', 'P154', 'P18', 'P242', 'P41', 'P2716', 'P2910']
  270. for property_id in property_ids:
  271. image = result.xpath(property_xpath.replace('{propertyid}', property_id))
  272. if image:
  273. image_name = image[0].xpath(value_xpath)
  274. image_src = url_image.replace('{filename}', extract_text(image_name[0]))
  275. return image_src
  276. # setting trim will only returned high ranked rows OR the first row
  277. def add_attribute(attributes, result, property_id, default_label=None, date=False, trim=False):
  278. attribute = result.xpath(property_xpath.replace('{propertyid}', property_id))
  279. if attribute:
  280. if default_label:
  281. label = default_label
  282. else:
  283. label = extract_text(attribute[0].xpath(label_xpath))
  284. label = label[0].upper() + label[1:]
  285. if date:
  286. trim = True
  287. # remove calendar name
  288. calendar_name = attribute[0].xpath(calendar_name_xpath)
  289. for calendar in calendar_name:
  290. calendar.getparent().remove(calendar)
  291. concat_values = ""
  292. values = []
  293. first_value = None
  294. for row in attribute[0].xpath(property_row_xpath):
  295. if not first_value or not trim or row.xpath(preferred_rank_xpath):
  296. value = row.xpath(value_xpath)
  297. if not value:
  298. continue
  299. value = extract_text(value)
  300. # save first value in case no ranked row is found
  301. if trim and not first_value:
  302. first_value = value
  303. else:
  304. # to avoid duplicate values
  305. if value not in values:
  306. concat_values += value + ", "
  307. values.append(value)
  308. if trim and not values:
  309. attributes.append({'label': label,
  310. 'value': first_value})
  311. else:
  312. attributes.append({'label': label,
  313. 'value': concat_values[:-2]})
  314. # requires property_id unless it's a wiki link (defined in link_type)
  315. def add_url(urls, result, property_id=None, default_label=None, url_prefix=None, results=None, link_type=None):
  316. links = []
  317. # wiki links don't have property in wikidata page
  318. if link_type and 'wiki' in link_type:
  319. links.append(get_wikilink(result, link_type))
  320. else:
  321. dom_element = result.xpath(property_xpath.replace('{propertyid}', property_id))
  322. if dom_element:
  323. dom_element = dom_element[0]
  324. if not default_label:
  325. label = extract_text(dom_element.xpath(label_xpath))
  326. label = label[0].upper() + label[1:]
  327. if link_type == 'geo':
  328. links.append(get_geolink(dom_element))
  329. elif link_type == 'imdb':
  330. links.append(get_imdblink(dom_element, url_prefix))
  331. else:
  332. url_results = dom_element.xpath(url_xpath)
  333. for link in url_results:
  334. if link is not None:
  335. if url_prefix:
  336. link = url_prefix + extract_text(link)
  337. else:
  338. link = extract_text(link)
  339. links.append(link)
  340. # append urls
  341. for url in links:
  342. if url is not None:
  343. urls.append({'title': default_label or label,
  344. 'url': url})
  345. if results is not None:
  346. results.append({'title': default_label or label,
  347. 'url': url})
  348. def get_imdblink(result, url_prefix):
  349. imdb_id = result.xpath(value_xpath)
  350. if imdb_id:
  351. imdb_id = extract_text(imdb_id)
  352. id_prefix = imdb_id[:2]
  353. if id_prefix == 'tt':
  354. url = url_prefix + 'title/' + imdb_id
  355. elif id_prefix == 'nm':
  356. url = url_prefix + 'name/' + imdb_id
  357. elif id_prefix == 'ch':
  358. url = url_prefix + 'character/' + imdb_id
  359. elif id_prefix == 'co':
  360. url = url_prefix + 'company/' + imdb_id
  361. elif id_prefix == 'ev':
  362. url = url_prefix + 'event/' + imdb_id
  363. else:
  364. url = None
  365. return url
  366. def get_geolink(result):
  367. coordinates = result.xpath(value_xpath)
  368. if not coordinates:
  369. return None
  370. coordinates = extract_text(coordinates[0])
  371. latitude, longitude = coordinates.split(',')
  372. # convert to decimal
  373. lat = int(latitude[:latitude.find(u'°')])
  374. if latitude.find('\'') >= 0:
  375. lat += int(latitude[latitude.find(u'°') + 1:latitude.find('\'')] or 0) / 60.0
  376. if latitude.find('"') >= 0:
  377. lat += float(latitude[latitude.find('\'') + 1:latitude.find('"')] or 0) / 3600.0
  378. if latitude.find('S') >= 0:
  379. lat *= -1
  380. lon = int(longitude[:longitude.find(u'°')])
  381. if longitude.find('\'') >= 0:
  382. lon += int(longitude[longitude.find(u'°') + 1:longitude.find('\'')] or 0) / 60.0
  383. if longitude.find('"') >= 0:
  384. lon += float(longitude[longitude.find('\'') + 1:longitude.find('"')] or 0) / 3600.0
  385. if longitude.find('W') >= 0:
  386. lon *= -1
  387. # TODO: get precision
  388. precision = 0.0002
  389. # there is no zoom information, deduce from precision (error prone)
  390. # samples :
  391. # 13 --> 5
  392. # 1 --> 6
  393. # 0.016666666666667 --> 9
  394. # 0.00027777777777778 --> 19
  395. # wolframalpha :
  396. # quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
  397. # 14.1186-8.8322 x+0.625447 x^2
  398. if precision < 0.0003:
  399. zoom = 19
  400. else:
  401. zoom = int(15 - precision * 8.8322 + precision * precision * 0.625447)
  402. url = url_map\
  403. .replace('{latitude}', str(lat))\
  404. .replace('{longitude}', str(lon))\
  405. .replace('{zoom}', str(zoom))
  406. return url
  407. def get_wikilink(result, wikiid):
  408. url = result.xpath(wikilink_xpath.replace('{wikiid}', wikiid))
  409. if not url:
  410. return None
  411. url = url[0]
  412. if url.startswith('http://'):
  413. url = url.replace('http://', 'https://')
  414. elif url.startswith('//'):
  415. url = 'https:' + url
  416. return url