wikidata.py 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. import json
  2. from searx import logger
  3. from searx.poolrequests import get
  4. from searx.utils import format_date_by_locale
  5. from datetime import datetime
  6. from dateutil.parser import parse as dateutil_parse
  7. from urllib import urlencode
  8. logger = logger.getChild('wikidata')
  9. result_count = 1
  10. wikidata_host = 'https://www.wikidata.org'
  11. wikidata_api = wikidata_host + '/w/api.php'
  12. url_search = wikidata_api \
  13. + '?action=query&list=search&format=json'\
  14. + '&srnamespace=0&srprop=sectiontitle&{query}'
  15. url_detail = wikidata_api\
  16. + '?action=wbgetentities&format=json'\
  17. + '&props=labels%7Cinfo%7Csitelinks'\
  18. + '%7Csitelinks%2Furls%7Cdescriptions%7Cclaims'\
  19. + '&{query}'
  20. url_map = 'https://www.openstreetmap.org/'\
  21. + '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
  22. def request(query, params):
  23. params['url'] = url_search.format(
  24. query=urlencode({'srsearch': query,
  25. 'srlimit': result_count}))
  26. return params
  27. def response(resp):
  28. results = []
  29. search_res = json.loads(resp.text)
  30. wikidata_ids = set()
  31. for r in search_res.get('query', {}).get('search', {}):
  32. wikidata_ids.add(r.get('title', ''))
  33. language = resp.search_params['language'].split('_')[0]
  34. if language == 'all':
  35. language = 'en'
  36. url = url_detail.format(query=urlencode({'ids': '|'.join(wikidata_ids),
  37. 'languages': language + '|en'}))
  38. htmlresponse = get(url)
  39. jsonresponse = json.loads(htmlresponse.content)
  40. for wikidata_id in wikidata_ids:
  41. results = results + getDetail(jsonresponse, wikidata_id, language, resp.search_params['language'])
  42. return results
  43. def getDetail(jsonresponse, wikidata_id, language, locale):
  44. results = []
  45. urls = []
  46. attributes = []
  47. result = jsonresponse.get('entities', {}).get(wikidata_id, {})
  48. title = result.get('labels', {}).get(language, {}).get('value', None)
  49. if title is None:
  50. title = result.get('labels', {}).get('en', {}).get('value', None)
  51. if title is None:
  52. return results
  53. description = result\
  54. .get('descriptions', {})\
  55. .get(language, {})\
  56. .get('value', None)
  57. if description is None:
  58. description = result\
  59. .get('descriptions', {})\
  60. .get('en', {})\
  61. .get('value', '')
  62. claims = result.get('claims', {})
  63. official_website = get_string(claims, 'P856', None)
  64. if official_website is not None:
  65. urls.append({'title': 'Official site', 'url': official_website})
  66. results.append({'title': title, 'url': official_website})
  67. wikipedia_link_count = 0
  68. if language != 'en':
  69. wikipedia_link_count += add_url(urls,
  70. 'Wikipedia (' + language + ')',
  71. get_wikilink(result, language +
  72. 'wiki'))
  73. wikipedia_en_link = get_wikilink(result, 'enwiki')
  74. wikipedia_link_count += add_url(urls,
  75. 'Wikipedia (en)',
  76. wikipedia_en_link)
  77. if wikipedia_link_count == 0:
  78. misc_language = get_wiki_firstlanguage(result, 'wiki')
  79. if misc_language is not None:
  80. add_url(urls,
  81. 'Wikipedia (' + misc_language + ')',
  82. get_wikilink(result, misc_language + 'wiki'))
  83. if language != 'en':
  84. add_url(urls,
  85. 'Wiki voyage (' + language + ')',
  86. get_wikilink(result, language + 'wikivoyage'))
  87. add_url(urls,
  88. 'Wiki voyage (en)',
  89. get_wikilink(result, 'enwikivoyage'))
  90. if language != 'en':
  91. add_url(urls,
  92. 'Wikiquote (' + language + ')',
  93. get_wikilink(result, language + 'wikiquote'))
  94. add_url(urls,
  95. 'Wikiquote (en)',
  96. get_wikilink(result, 'enwikiquote'))
  97. add_url(urls,
  98. 'Commons wiki',
  99. get_wikilink(result, 'commonswiki'))
  100. add_url(urls,
  101. 'Location',
  102. get_geolink(claims, 'P625', None))
  103. add_url(urls,
  104. 'Wikidata',
  105. 'https://www.wikidata.org/wiki/'
  106. + wikidata_id + '?uselang=' + language)
  107. musicbrainz_work_id = get_string(claims, 'P435')
  108. if musicbrainz_work_id is not None:
  109. add_url(urls,
  110. 'MusicBrainz',
  111. 'http://musicbrainz.org/work/'
  112. + musicbrainz_work_id)
  113. musicbrainz_artist_id = get_string(claims, 'P434')
  114. if musicbrainz_artist_id is not None:
  115. add_url(urls,
  116. 'MusicBrainz',
  117. 'http://musicbrainz.org/artist/'
  118. + musicbrainz_artist_id)
  119. musicbrainz_release_group_id = get_string(claims, 'P436')
  120. if musicbrainz_release_group_id is not None:
  121. add_url(urls,
  122. 'MusicBrainz',
  123. 'http://musicbrainz.org/release-group/'
  124. + musicbrainz_release_group_id)
  125. musicbrainz_label_id = get_string(claims, 'P966')
  126. if musicbrainz_label_id is not None:
  127. add_url(urls,
  128. 'MusicBrainz',
  129. 'http://musicbrainz.org/label/'
  130. + musicbrainz_label_id)
  131. # musicbrainz_area_id = get_string(claims, 'P982')
  132. # P1407 MusicBrainz series ID
  133. # P1004 MusicBrainz place ID
  134. # P1330 MusicBrainz instrument ID
  135. # P1407 MusicBrainz series ID
  136. postal_code = get_string(claims, 'P281', None)
  137. if postal_code is not None:
  138. attributes.append({'label': 'Postal code(s)', 'value': postal_code})
  139. date_of_birth = get_time(claims, 'P569', locale, None)
  140. if date_of_birth is not None:
  141. attributes.append({'label': 'Date of birth', 'value': date_of_birth})
  142. date_of_death = get_time(claims, 'P570', locale, None)
  143. if date_of_death is not None:
  144. attributes.append({'label': 'Date of death', 'value': date_of_death})
  145. if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
  146. results.append({
  147. 'url': urls[0]['url'],
  148. 'title': title,
  149. 'content': description
  150. })
  151. else:
  152. results.append({
  153. 'infobox': title,
  154. 'id': wikipedia_en_link,
  155. 'content': description,
  156. 'attributes': attributes,
  157. 'urls': urls
  158. })
  159. return results
  160. def add_url(urls, title, url):
  161. if url is not None:
  162. urls.append({'title': title, 'url': url})
  163. return 1
  164. else:
  165. return 0
  166. def get_mainsnak(claims, propertyName):
  167. propValue = claims.get(propertyName, {})
  168. if len(propValue) == 0:
  169. return None
  170. propValue = propValue[0].get('mainsnak', None)
  171. return propValue
  172. def get_string(claims, propertyName, defaultValue=None):
  173. propValue = claims.get(propertyName, {})
  174. if len(propValue) == 0:
  175. return defaultValue
  176. result = []
  177. for e in propValue:
  178. mainsnak = e.get('mainsnak', {})
  179. datavalue = mainsnak.get('datavalue', {})
  180. if datavalue is not None:
  181. result.append(datavalue.get('value', ''))
  182. if len(result) == 0:
  183. return defaultValue
  184. else:
  185. # TODO handle multiple urls
  186. return result[0]
  187. def get_time(claims, propertyName, locale, defaultValue=None):
  188. propValue = claims.get(propertyName, {})
  189. if len(propValue) == 0:
  190. return defaultValue
  191. result = []
  192. for e in propValue:
  193. mainsnak = e.get('mainsnak', {})
  194. datavalue = mainsnak.get('datavalue', {})
  195. if datavalue is not None:
  196. value = datavalue.get('value', '')
  197. result.append(value.get('time', ''))
  198. if len(result) == 0:
  199. date_string = defaultValue
  200. else:
  201. date_string = ', '.join(result)
  202. try:
  203. parsed_date = datetime.strptime(date_string, "+%Y-%m-%dT%H:%M:%SZ")
  204. except:
  205. if date_string.startswith('-'):
  206. return date_string.split('T')[0]
  207. try:
  208. parsed_date = dateutil_parse(date_string, fuzzy=False, default=False)
  209. except:
  210. logger.debug('could not parse date %s', date_string)
  211. return date_string.split('T')[0]
  212. return format_date_by_locale(parsed_date, locale)
  213. def get_geolink(claims, propertyName, defaultValue=''):
  214. mainsnak = get_mainsnak(claims, propertyName)
  215. if mainsnak is None:
  216. return defaultValue
  217. datatype = mainsnak.get('datatype', '')
  218. datavalue = mainsnak.get('datavalue', {})
  219. if datatype != 'globe-coordinate':
  220. return defaultValue
  221. value = datavalue.get('value', {})
  222. precision = value.get('precision', 0.0002)
  223. # there is no zoom information, deduce from precision (error prone)
  224. # samples :
  225. # 13 --> 5
  226. # 1 --> 6
  227. # 0.016666666666667 --> 9
  228. # 0.00027777777777778 --> 19
  229. # wolframalpha :
  230. # quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
  231. # 14.1186-8.8322 x+0.625447 x^2
  232. if precision < 0.0003:
  233. zoom = 19
  234. else:
  235. zoom = int(15 - precision * 8.8322 + precision * precision * 0.625447)
  236. url = url_map\
  237. .replace('{latitude}', str(value.get('latitude', 0)))\
  238. .replace('{longitude}', str(value.get('longitude', 0)))\
  239. .replace('{zoom}', str(zoom))
  240. return url
  241. def get_wikilink(result, wikiid):
  242. url = result.get('sitelinks', {}).get(wikiid, {}).get('url', None)
  243. if url is None:
  244. return url
  245. elif url.startswith('http://'):
  246. url = url.replace('http://', 'https://')
  247. elif url.startswith('//'):
  248. url = 'https:' + url
  249. return url
  250. def get_wiki_firstlanguage(result, wikipatternid):
  251. for k in result.get('sitelinks', {}).keys():
  252. if k.endswith(wikipatternid) and len(k) == (2 + len(wikipatternid)):
  253. return k[0:2]
  254. return None