vmbus_drv.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976
  1. /*
  2. * Copyright (c) 2009, Microsoft Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Authors:
  18. * Haiyang Zhang <haiyangz@microsoft.com>
  19. * Hank Janssen <hjanssen@microsoft.com>
  20. * K. Y. Srinivasan <kys@microsoft.com>
  21. *
  22. */
  23. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24. #include <linux/init.h>
  25. #include <linux/module.h>
  26. #include <linux/device.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/sysctl.h>
  29. #include <linux/slab.h>
  30. #include <linux/acpi.h>
  31. #include <linux/completion.h>
  32. #include <linux/hyperv.h>
  33. #include <linux/kernel_stat.h>
  34. #include <linux/clockchips.h>
  35. #include <linux/cpu.h>
  36. #include <linux/sched/task_stack.h>
  37. #include <asm/mshyperv.h>
  38. #include <linux/notifier.h>
  39. #include <linux/ptrace.h>
  40. #include <linux/screen_info.h>
  41. #include <linux/kdebug.h>
  42. #include <linux/efi.h>
  43. #include <linux/random.h>
  44. #include "hyperv_vmbus.h"
  45. struct vmbus_dynid {
  46. struct list_head node;
  47. struct hv_vmbus_device_id id;
  48. };
  49. static struct acpi_device *hv_acpi_dev;
  50. static struct completion probe_event;
  51. static int hyperv_cpuhp_online;
  52. static void *hv_panic_page;
  53. static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
  54. void *args)
  55. {
  56. struct pt_regs *regs;
  57. regs = current_pt_regs();
  58. hyperv_report_panic(regs, val);
  59. return NOTIFY_DONE;
  60. }
  61. static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
  62. void *args)
  63. {
  64. struct die_args *die = (struct die_args *)args;
  65. struct pt_regs *regs = die->regs;
  66. hyperv_report_panic(regs, val);
  67. return NOTIFY_DONE;
  68. }
  69. static struct notifier_block hyperv_die_block = {
  70. .notifier_call = hyperv_die_event,
  71. };
  72. static struct notifier_block hyperv_panic_block = {
  73. .notifier_call = hyperv_panic_event,
  74. };
  75. static const char *fb_mmio_name = "fb_range";
  76. static struct resource *fb_mmio;
  77. static struct resource *hyperv_mmio;
  78. static DEFINE_SEMAPHORE(hyperv_mmio_lock);
  79. static int vmbus_exists(void)
  80. {
  81. if (hv_acpi_dev == NULL)
  82. return -ENODEV;
  83. return 0;
  84. }
  85. #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
  86. static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
  87. {
  88. int i;
  89. for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
  90. sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
  91. }
  92. static u8 channel_monitor_group(const struct vmbus_channel *channel)
  93. {
  94. return (u8)channel->offermsg.monitorid / 32;
  95. }
  96. static u8 channel_monitor_offset(const struct vmbus_channel *channel)
  97. {
  98. return (u8)channel->offermsg.monitorid % 32;
  99. }
  100. static u32 channel_pending(const struct vmbus_channel *channel,
  101. const struct hv_monitor_page *monitor_page)
  102. {
  103. u8 monitor_group = channel_monitor_group(channel);
  104. return monitor_page->trigger_group[monitor_group].pending;
  105. }
  106. static u32 channel_latency(const struct vmbus_channel *channel,
  107. const struct hv_monitor_page *monitor_page)
  108. {
  109. u8 monitor_group = channel_monitor_group(channel);
  110. u8 monitor_offset = channel_monitor_offset(channel);
  111. return monitor_page->latency[monitor_group][monitor_offset];
  112. }
  113. static u32 channel_conn_id(struct vmbus_channel *channel,
  114. struct hv_monitor_page *monitor_page)
  115. {
  116. u8 monitor_group = channel_monitor_group(channel);
  117. u8 monitor_offset = channel_monitor_offset(channel);
  118. return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
  119. }
  120. static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
  121. char *buf)
  122. {
  123. struct hv_device *hv_dev = device_to_hv_device(dev);
  124. if (!hv_dev->channel)
  125. return -ENODEV;
  126. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
  127. }
  128. static DEVICE_ATTR_RO(id);
  129. static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
  130. char *buf)
  131. {
  132. struct hv_device *hv_dev = device_to_hv_device(dev);
  133. if (!hv_dev->channel)
  134. return -ENODEV;
  135. return sprintf(buf, "%d\n", hv_dev->channel->state);
  136. }
  137. static DEVICE_ATTR_RO(state);
  138. static ssize_t monitor_id_show(struct device *dev,
  139. struct device_attribute *dev_attr, char *buf)
  140. {
  141. struct hv_device *hv_dev = device_to_hv_device(dev);
  142. if (!hv_dev->channel)
  143. return -ENODEV;
  144. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
  145. }
  146. static DEVICE_ATTR_RO(monitor_id);
  147. static ssize_t class_id_show(struct device *dev,
  148. struct device_attribute *dev_attr, char *buf)
  149. {
  150. struct hv_device *hv_dev = device_to_hv_device(dev);
  151. if (!hv_dev->channel)
  152. return -ENODEV;
  153. return sprintf(buf, "{%pUl}\n",
  154. hv_dev->channel->offermsg.offer.if_type.b);
  155. }
  156. static DEVICE_ATTR_RO(class_id);
  157. static ssize_t device_id_show(struct device *dev,
  158. struct device_attribute *dev_attr, char *buf)
  159. {
  160. struct hv_device *hv_dev = device_to_hv_device(dev);
  161. if (!hv_dev->channel)
  162. return -ENODEV;
  163. return sprintf(buf, "{%pUl}\n",
  164. hv_dev->channel->offermsg.offer.if_instance.b);
  165. }
  166. static DEVICE_ATTR_RO(device_id);
  167. static ssize_t modalias_show(struct device *dev,
  168. struct device_attribute *dev_attr, char *buf)
  169. {
  170. struct hv_device *hv_dev = device_to_hv_device(dev);
  171. char alias_name[VMBUS_ALIAS_LEN + 1];
  172. print_alias_name(hv_dev, alias_name);
  173. return sprintf(buf, "vmbus:%s\n", alias_name);
  174. }
  175. static DEVICE_ATTR_RO(modalias);
  176. #ifdef CONFIG_NUMA
  177. static ssize_t numa_node_show(struct device *dev,
  178. struct device_attribute *attr, char *buf)
  179. {
  180. struct hv_device *hv_dev = device_to_hv_device(dev);
  181. if (!hv_dev->channel)
  182. return -ENODEV;
  183. return sprintf(buf, "%d\n", hv_dev->channel->numa_node);
  184. }
  185. static DEVICE_ATTR_RO(numa_node);
  186. #endif
  187. static ssize_t server_monitor_pending_show(struct device *dev,
  188. struct device_attribute *dev_attr,
  189. char *buf)
  190. {
  191. struct hv_device *hv_dev = device_to_hv_device(dev);
  192. if (!hv_dev->channel)
  193. return -ENODEV;
  194. return sprintf(buf, "%d\n",
  195. channel_pending(hv_dev->channel,
  196. vmbus_connection.monitor_pages[1]));
  197. }
  198. static DEVICE_ATTR_RO(server_monitor_pending);
  199. static ssize_t client_monitor_pending_show(struct device *dev,
  200. struct device_attribute *dev_attr,
  201. char *buf)
  202. {
  203. struct hv_device *hv_dev = device_to_hv_device(dev);
  204. if (!hv_dev->channel)
  205. return -ENODEV;
  206. return sprintf(buf, "%d\n",
  207. channel_pending(hv_dev->channel,
  208. vmbus_connection.monitor_pages[1]));
  209. }
  210. static DEVICE_ATTR_RO(client_monitor_pending);
  211. static ssize_t server_monitor_latency_show(struct device *dev,
  212. struct device_attribute *dev_attr,
  213. char *buf)
  214. {
  215. struct hv_device *hv_dev = device_to_hv_device(dev);
  216. if (!hv_dev->channel)
  217. return -ENODEV;
  218. return sprintf(buf, "%d\n",
  219. channel_latency(hv_dev->channel,
  220. vmbus_connection.monitor_pages[0]));
  221. }
  222. static DEVICE_ATTR_RO(server_monitor_latency);
  223. static ssize_t client_monitor_latency_show(struct device *dev,
  224. struct device_attribute *dev_attr,
  225. char *buf)
  226. {
  227. struct hv_device *hv_dev = device_to_hv_device(dev);
  228. if (!hv_dev->channel)
  229. return -ENODEV;
  230. return sprintf(buf, "%d\n",
  231. channel_latency(hv_dev->channel,
  232. vmbus_connection.monitor_pages[1]));
  233. }
  234. static DEVICE_ATTR_RO(client_monitor_latency);
  235. static ssize_t server_monitor_conn_id_show(struct device *dev,
  236. struct device_attribute *dev_attr,
  237. char *buf)
  238. {
  239. struct hv_device *hv_dev = device_to_hv_device(dev);
  240. if (!hv_dev->channel)
  241. return -ENODEV;
  242. return sprintf(buf, "%d\n",
  243. channel_conn_id(hv_dev->channel,
  244. vmbus_connection.monitor_pages[0]));
  245. }
  246. static DEVICE_ATTR_RO(server_monitor_conn_id);
  247. static ssize_t client_monitor_conn_id_show(struct device *dev,
  248. struct device_attribute *dev_attr,
  249. char *buf)
  250. {
  251. struct hv_device *hv_dev = device_to_hv_device(dev);
  252. if (!hv_dev->channel)
  253. return -ENODEV;
  254. return sprintf(buf, "%d\n",
  255. channel_conn_id(hv_dev->channel,
  256. vmbus_connection.monitor_pages[1]));
  257. }
  258. static DEVICE_ATTR_RO(client_monitor_conn_id);
  259. static ssize_t out_intr_mask_show(struct device *dev,
  260. struct device_attribute *dev_attr, char *buf)
  261. {
  262. struct hv_device *hv_dev = device_to_hv_device(dev);
  263. struct hv_ring_buffer_debug_info outbound;
  264. int ret;
  265. if (!hv_dev->channel)
  266. return -ENODEV;
  267. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  268. &outbound);
  269. if (ret < 0)
  270. return ret;
  271. return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
  272. }
  273. static DEVICE_ATTR_RO(out_intr_mask);
  274. static ssize_t out_read_index_show(struct device *dev,
  275. struct device_attribute *dev_attr, char *buf)
  276. {
  277. struct hv_device *hv_dev = device_to_hv_device(dev);
  278. struct hv_ring_buffer_debug_info outbound;
  279. int ret;
  280. if (!hv_dev->channel)
  281. return -ENODEV;
  282. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  283. &outbound);
  284. if (ret < 0)
  285. return ret;
  286. return sprintf(buf, "%d\n", outbound.current_read_index);
  287. }
  288. static DEVICE_ATTR_RO(out_read_index);
  289. static ssize_t out_write_index_show(struct device *dev,
  290. struct device_attribute *dev_attr,
  291. char *buf)
  292. {
  293. struct hv_device *hv_dev = device_to_hv_device(dev);
  294. struct hv_ring_buffer_debug_info outbound;
  295. int ret;
  296. if (!hv_dev->channel)
  297. return -ENODEV;
  298. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  299. &outbound);
  300. if (ret < 0)
  301. return ret;
  302. return sprintf(buf, "%d\n", outbound.current_write_index);
  303. }
  304. static DEVICE_ATTR_RO(out_write_index);
  305. static ssize_t out_read_bytes_avail_show(struct device *dev,
  306. struct device_attribute *dev_attr,
  307. char *buf)
  308. {
  309. struct hv_device *hv_dev = device_to_hv_device(dev);
  310. struct hv_ring_buffer_debug_info outbound;
  311. int ret;
  312. if (!hv_dev->channel)
  313. return -ENODEV;
  314. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  315. &outbound);
  316. if (ret < 0)
  317. return ret;
  318. return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
  319. }
  320. static DEVICE_ATTR_RO(out_read_bytes_avail);
  321. static ssize_t out_write_bytes_avail_show(struct device *dev,
  322. struct device_attribute *dev_attr,
  323. char *buf)
  324. {
  325. struct hv_device *hv_dev = device_to_hv_device(dev);
  326. struct hv_ring_buffer_debug_info outbound;
  327. int ret;
  328. if (!hv_dev->channel)
  329. return -ENODEV;
  330. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  331. &outbound);
  332. if (ret < 0)
  333. return ret;
  334. return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
  335. }
  336. static DEVICE_ATTR_RO(out_write_bytes_avail);
  337. static ssize_t in_intr_mask_show(struct device *dev,
  338. struct device_attribute *dev_attr, char *buf)
  339. {
  340. struct hv_device *hv_dev = device_to_hv_device(dev);
  341. struct hv_ring_buffer_debug_info inbound;
  342. int ret;
  343. if (!hv_dev->channel)
  344. return -ENODEV;
  345. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  346. if (ret < 0)
  347. return ret;
  348. return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
  349. }
  350. static DEVICE_ATTR_RO(in_intr_mask);
  351. static ssize_t in_read_index_show(struct device *dev,
  352. struct device_attribute *dev_attr, char *buf)
  353. {
  354. struct hv_device *hv_dev = device_to_hv_device(dev);
  355. struct hv_ring_buffer_debug_info inbound;
  356. int ret;
  357. if (!hv_dev->channel)
  358. return -ENODEV;
  359. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  360. if (ret < 0)
  361. return ret;
  362. return sprintf(buf, "%d\n", inbound.current_read_index);
  363. }
  364. static DEVICE_ATTR_RO(in_read_index);
  365. static ssize_t in_write_index_show(struct device *dev,
  366. struct device_attribute *dev_attr, char *buf)
  367. {
  368. struct hv_device *hv_dev = device_to_hv_device(dev);
  369. struct hv_ring_buffer_debug_info inbound;
  370. int ret;
  371. if (!hv_dev->channel)
  372. return -ENODEV;
  373. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  374. if (ret < 0)
  375. return ret;
  376. return sprintf(buf, "%d\n", inbound.current_write_index);
  377. }
  378. static DEVICE_ATTR_RO(in_write_index);
  379. static ssize_t in_read_bytes_avail_show(struct device *dev,
  380. struct device_attribute *dev_attr,
  381. char *buf)
  382. {
  383. struct hv_device *hv_dev = device_to_hv_device(dev);
  384. struct hv_ring_buffer_debug_info inbound;
  385. int ret;
  386. if (!hv_dev->channel)
  387. return -ENODEV;
  388. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  389. if (ret < 0)
  390. return ret;
  391. return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
  392. }
  393. static DEVICE_ATTR_RO(in_read_bytes_avail);
  394. static ssize_t in_write_bytes_avail_show(struct device *dev,
  395. struct device_attribute *dev_attr,
  396. char *buf)
  397. {
  398. struct hv_device *hv_dev = device_to_hv_device(dev);
  399. struct hv_ring_buffer_debug_info inbound;
  400. int ret;
  401. if (!hv_dev->channel)
  402. return -ENODEV;
  403. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  404. if (ret < 0)
  405. return ret;
  406. return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
  407. }
  408. static DEVICE_ATTR_RO(in_write_bytes_avail);
  409. static ssize_t channel_vp_mapping_show(struct device *dev,
  410. struct device_attribute *dev_attr,
  411. char *buf)
  412. {
  413. struct hv_device *hv_dev = device_to_hv_device(dev);
  414. struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
  415. unsigned long flags;
  416. int buf_size = PAGE_SIZE, n_written, tot_written;
  417. struct list_head *cur;
  418. if (!channel)
  419. return -ENODEV;
  420. tot_written = snprintf(buf, buf_size, "%u:%u\n",
  421. channel->offermsg.child_relid, channel->target_cpu);
  422. spin_lock_irqsave(&channel->lock, flags);
  423. list_for_each(cur, &channel->sc_list) {
  424. if (tot_written >= buf_size - 1)
  425. break;
  426. cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
  427. n_written = scnprintf(buf + tot_written,
  428. buf_size - tot_written,
  429. "%u:%u\n",
  430. cur_sc->offermsg.child_relid,
  431. cur_sc->target_cpu);
  432. tot_written += n_written;
  433. }
  434. spin_unlock_irqrestore(&channel->lock, flags);
  435. return tot_written;
  436. }
  437. static DEVICE_ATTR_RO(channel_vp_mapping);
  438. static ssize_t vendor_show(struct device *dev,
  439. struct device_attribute *dev_attr,
  440. char *buf)
  441. {
  442. struct hv_device *hv_dev = device_to_hv_device(dev);
  443. return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
  444. }
  445. static DEVICE_ATTR_RO(vendor);
  446. static ssize_t device_show(struct device *dev,
  447. struct device_attribute *dev_attr,
  448. char *buf)
  449. {
  450. struct hv_device *hv_dev = device_to_hv_device(dev);
  451. return sprintf(buf, "0x%x\n", hv_dev->device_id);
  452. }
  453. static DEVICE_ATTR_RO(device);
  454. /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
  455. static struct attribute *vmbus_dev_attrs[] = {
  456. &dev_attr_id.attr,
  457. &dev_attr_state.attr,
  458. &dev_attr_monitor_id.attr,
  459. &dev_attr_class_id.attr,
  460. &dev_attr_device_id.attr,
  461. &dev_attr_modalias.attr,
  462. #ifdef CONFIG_NUMA
  463. &dev_attr_numa_node.attr,
  464. #endif
  465. &dev_attr_server_monitor_pending.attr,
  466. &dev_attr_client_monitor_pending.attr,
  467. &dev_attr_server_monitor_latency.attr,
  468. &dev_attr_client_monitor_latency.attr,
  469. &dev_attr_server_monitor_conn_id.attr,
  470. &dev_attr_client_monitor_conn_id.attr,
  471. &dev_attr_out_intr_mask.attr,
  472. &dev_attr_out_read_index.attr,
  473. &dev_attr_out_write_index.attr,
  474. &dev_attr_out_read_bytes_avail.attr,
  475. &dev_attr_out_write_bytes_avail.attr,
  476. &dev_attr_in_intr_mask.attr,
  477. &dev_attr_in_read_index.attr,
  478. &dev_attr_in_write_index.attr,
  479. &dev_attr_in_read_bytes_avail.attr,
  480. &dev_attr_in_write_bytes_avail.attr,
  481. &dev_attr_channel_vp_mapping.attr,
  482. &dev_attr_vendor.attr,
  483. &dev_attr_device.attr,
  484. NULL,
  485. };
  486. ATTRIBUTE_GROUPS(vmbus_dev);
  487. /*
  488. * vmbus_uevent - add uevent for our device
  489. *
  490. * This routine is invoked when a device is added or removed on the vmbus to
  491. * generate a uevent to udev in the userspace. The udev will then look at its
  492. * rule and the uevent generated here to load the appropriate driver
  493. *
  494. * The alias string will be of the form vmbus:guid where guid is the string
  495. * representation of the device guid (each byte of the guid will be
  496. * represented with two hex characters.
  497. */
  498. static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
  499. {
  500. struct hv_device *dev = device_to_hv_device(device);
  501. int ret;
  502. char alias_name[VMBUS_ALIAS_LEN + 1];
  503. print_alias_name(dev, alias_name);
  504. ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
  505. return ret;
  506. }
  507. static const uuid_le null_guid;
  508. static inline bool is_null_guid(const uuid_le *guid)
  509. {
  510. if (uuid_le_cmp(*guid, null_guid))
  511. return false;
  512. return true;
  513. }
  514. /*
  515. * Return a matching hv_vmbus_device_id pointer.
  516. * If there is no match, return NULL.
  517. */
  518. static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
  519. const uuid_le *guid)
  520. {
  521. const struct hv_vmbus_device_id *id = NULL;
  522. struct vmbus_dynid *dynid;
  523. /* Look at the dynamic ids first, before the static ones */
  524. spin_lock(&drv->dynids.lock);
  525. list_for_each_entry(dynid, &drv->dynids.list, node) {
  526. if (!uuid_le_cmp(dynid->id.guid, *guid)) {
  527. id = &dynid->id;
  528. break;
  529. }
  530. }
  531. spin_unlock(&drv->dynids.lock);
  532. if (id)
  533. return id;
  534. id = drv->id_table;
  535. if (id == NULL)
  536. return NULL; /* empty device table */
  537. for (; !is_null_guid(&id->guid); id++)
  538. if (!uuid_le_cmp(id->guid, *guid))
  539. return id;
  540. return NULL;
  541. }
  542. /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
  543. static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid)
  544. {
  545. struct vmbus_dynid *dynid;
  546. dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
  547. if (!dynid)
  548. return -ENOMEM;
  549. dynid->id.guid = *guid;
  550. spin_lock(&drv->dynids.lock);
  551. list_add_tail(&dynid->node, &drv->dynids.list);
  552. spin_unlock(&drv->dynids.lock);
  553. return driver_attach(&drv->driver);
  554. }
  555. static void vmbus_free_dynids(struct hv_driver *drv)
  556. {
  557. struct vmbus_dynid *dynid, *n;
  558. spin_lock(&drv->dynids.lock);
  559. list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
  560. list_del(&dynid->node);
  561. kfree(dynid);
  562. }
  563. spin_unlock(&drv->dynids.lock);
  564. }
  565. /*
  566. * store_new_id - sysfs frontend to vmbus_add_dynid()
  567. *
  568. * Allow GUIDs to be added to an existing driver via sysfs.
  569. */
  570. static ssize_t new_id_store(struct device_driver *driver, const char *buf,
  571. size_t count)
  572. {
  573. struct hv_driver *drv = drv_to_hv_drv(driver);
  574. uuid_le guid;
  575. ssize_t retval;
  576. retval = uuid_le_to_bin(buf, &guid);
  577. if (retval)
  578. return retval;
  579. if (hv_vmbus_get_id(drv, &guid))
  580. return -EEXIST;
  581. retval = vmbus_add_dynid(drv, &guid);
  582. if (retval)
  583. return retval;
  584. return count;
  585. }
  586. static DRIVER_ATTR_WO(new_id);
  587. /*
  588. * store_remove_id - remove a PCI device ID from this driver
  589. *
  590. * Removes a dynamic pci device ID to this driver.
  591. */
  592. static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
  593. size_t count)
  594. {
  595. struct hv_driver *drv = drv_to_hv_drv(driver);
  596. struct vmbus_dynid *dynid, *n;
  597. uuid_le guid;
  598. ssize_t retval;
  599. retval = uuid_le_to_bin(buf, &guid);
  600. if (retval)
  601. return retval;
  602. retval = -ENODEV;
  603. spin_lock(&drv->dynids.lock);
  604. list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
  605. struct hv_vmbus_device_id *id = &dynid->id;
  606. if (!uuid_le_cmp(id->guid, guid)) {
  607. list_del(&dynid->node);
  608. kfree(dynid);
  609. retval = count;
  610. break;
  611. }
  612. }
  613. spin_unlock(&drv->dynids.lock);
  614. return retval;
  615. }
  616. static DRIVER_ATTR_WO(remove_id);
  617. static struct attribute *vmbus_drv_attrs[] = {
  618. &driver_attr_new_id.attr,
  619. &driver_attr_remove_id.attr,
  620. NULL,
  621. };
  622. ATTRIBUTE_GROUPS(vmbus_drv);
  623. /*
  624. * vmbus_match - Attempt to match the specified device to the specified driver
  625. */
  626. static int vmbus_match(struct device *device, struct device_driver *driver)
  627. {
  628. struct hv_driver *drv = drv_to_hv_drv(driver);
  629. struct hv_device *hv_dev = device_to_hv_device(device);
  630. /* The hv_sock driver handles all hv_sock offers. */
  631. if (is_hvsock_channel(hv_dev->channel))
  632. return drv->hvsock;
  633. if (hv_vmbus_get_id(drv, &hv_dev->dev_type))
  634. return 1;
  635. return 0;
  636. }
  637. /*
  638. * vmbus_probe - Add the new vmbus's child device
  639. */
  640. static int vmbus_probe(struct device *child_device)
  641. {
  642. int ret = 0;
  643. struct hv_driver *drv =
  644. drv_to_hv_drv(child_device->driver);
  645. struct hv_device *dev = device_to_hv_device(child_device);
  646. const struct hv_vmbus_device_id *dev_id;
  647. dev_id = hv_vmbus_get_id(drv, &dev->dev_type);
  648. if (drv->probe) {
  649. ret = drv->probe(dev, dev_id);
  650. if (ret != 0)
  651. pr_err("probe failed for device %s (%d)\n",
  652. dev_name(child_device), ret);
  653. } else {
  654. pr_err("probe not set for driver %s\n",
  655. dev_name(child_device));
  656. ret = -ENODEV;
  657. }
  658. return ret;
  659. }
  660. /*
  661. * vmbus_remove - Remove a vmbus device
  662. */
  663. static int vmbus_remove(struct device *child_device)
  664. {
  665. struct hv_driver *drv;
  666. struct hv_device *dev = device_to_hv_device(child_device);
  667. if (child_device->driver) {
  668. drv = drv_to_hv_drv(child_device->driver);
  669. if (drv->remove)
  670. drv->remove(dev);
  671. }
  672. return 0;
  673. }
  674. /*
  675. * vmbus_shutdown - Shutdown a vmbus device
  676. */
  677. static void vmbus_shutdown(struct device *child_device)
  678. {
  679. struct hv_driver *drv;
  680. struct hv_device *dev = device_to_hv_device(child_device);
  681. /* The device may not be attached yet */
  682. if (!child_device->driver)
  683. return;
  684. drv = drv_to_hv_drv(child_device->driver);
  685. if (drv->shutdown)
  686. drv->shutdown(dev);
  687. }
  688. /*
  689. * vmbus_device_release - Final callback release of the vmbus child device
  690. */
  691. static void vmbus_device_release(struct device *device)
  692. {
  693. struct hv_device *hv_dev = device_to_hv_device(device);
  694. struct vmbus_channel *channel = hv_dev->channel;
  695. mutex_lock(&vmbus_connection.channel_mutex);
  696. hv_process_channel_removal(channel->offermsg.child_relid);
  697. mutex_unlock(&vmbus_connection.channel_mutex);
  698. kfree(hv_dev);
  699. }
  700. /* The one and only one */
  701. static struct bus_type hv_bus = {
  702. .name = "vmbus",
  703. .match = vmbus_match,
  704. .shutdown = vmbus_shutdown,
  705. .remove = vmbus_remove,
  706. .probe = vmbus_probe,
  707. .uevent = vmbus_uevent,
  708. .dev_groups = vmbus_dev_groups,
  709. .drv_groups = vmbus_drv_groups,
  710. };
  711. struct onmessage_work_context {
  712. struct work_struct work;
  713. struct hv_message msg;
  714. };
  715. static void vmbus_onmessage_work(struct work_struct *work)
  716. {
  717. struct onmessage_work_context *ctx;
  718. /* Do not process messages if we're in DISCONNECTED state */
  719. if (vmbus_connection.conn_state == DISCONNECTED)
  720. return;
  721. ctx = container_of(work, struct onmessage_work_context,
  722. work);
  723. vmbus_onmessage(&ctx->msg);
  724. kfree(ctx);
  725. }
  726. static void hv_process_timer_expiration(struct hv_message *msg,
  727. struct hv_per_cpu_context *hv_cpu)
  728. {
  729. struct clock_event_device *dev = hv_cpu->clk_evt;
  730. if (dev->event_handler)
  731. dev->event_handler(dev);
  732. vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
  733. }
  734. void vmbus_on_msg_dpc(unsigned long data)
  735. {
  736. struct hv_per_cpu_context *hv_cpu = (void *)data;
  737. void *page_addr = hv_cpu->synic_message_page;
  738. struct hv_message *msg = (struct hv_message *)page_addr +
  739. VMBUS_MESSAGE_SINT;
  740. struct vmbus_channel_message_header *hdr;
  741. const struct vmbus_channel_message_table_entry *entry;
  742. struct onmessage_work_context *ctx;
  743. u32 message_type = msg->header.message_type;
  744. if (message_type == HVMSG_NONE)
  745. /* no msg */
  746. return;
  747. hdr = (struct vmbus_channel_message_header *)msg->u.payload;
  748. trace_vmbus_on_msg_dpc(hdr);
  749. if (hdr->msgtype >= CHANNELMSG_COUNT) {
  750. WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
  751. goto msg_handled;
  752. }
  753. entry = &channel_message_table[hdr->msgtype];
  754. if (entry->handler_type == VMHT_BLOCKING) {
  755. ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
  756. if (ctx == NULL)
  757. return;
  758. INIT_WORK(&ctx->work, vmbus_onmessage_work);
  759. memcpy(&ctx->msg, msg, sizeof(*msg));
  760. /*
  761. * The host can generate a rescind message while we
  762. * may still be handling the original offer. We deal with
  763. * this condition by ensuring the processing is done on the
  764. * same CPU.
  765. */
  766. switch (hdr->msgtype) {
  767. case CHANNELMSG_RESCIND_CHANNELOFFER:
  768. /*
  769. * If we are handling the rescind message;
  770. * schedule the work on the global work queue.
  771. */
  772. schedule_work_on(vmbus_connection.connect_cpu,
  773. &ctx->work);
  774. break;
  775. case CHANNELMSG_OFFERCHANNEL:
  776. atomic_inc(&vmbus_connection.offer_in_progress);
  777. queue_work_on(vmbus_connection.connect_cpu,
  778. vmbus_connection.work_queue,
  779. &ctx->work);
  780. break;
  781. default:
  782. queue_work(vmbus_connection.work_queue, &ctx->work);
  783. }
  784. } else
  785. entry->message_handler(hdr);
  786. msg_handled:
  787. vmbus_signal_eom(msg, message_type);
  788. }
  789. /*
  790. * Direct callback for channels using other deferred processing
  791. */
  792. static void vmbus_channel_isr(struct vmbus_channel *channel)
  793. {
  794. void (*callback_fn)(void *);
  795. callback_fn = READ_ONCE(channel->onchannel_callback);
  796. if (likely(callback_fn != NULL))
  797. (*callback_fn)(channel->channel_callback_context);
  798. }
  799. /*
  800. * Schedule all channels with events pending
  801. */
  802. static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
  803. {
  804. unsigned long *recv_int_page;
  805. u32 maxbits, relid;
  806. if (vmbus_proto_version < VERSION_WIN8) {
  807. maxbits = MAX_NUM_CHANNELS_SUPPORTED;
  808. recv_int_page = vmbus_connection.recv_int_page;
  809. } else {
  810. /*
  811. * When the host is win8 and beyond, the event page
  812. * can be directly checked to get the id of the channel
  813. * that has the interrupt pending.
  814. */
  815. void *page_addr = hv_cpu->synic_event_page;
  816. union hv_synic_event_flags *event
  817. = (union hv_synic_event_flags *)page_addr +
  818. VMBUS_MESSAGE_SINT;
  819. maxbits = HV_EVENT_FLAGS_COUNT;
  820. recv_int_page = event->flags;
  821. }
  822. if (unlikely(!recv_int_page))
  823. return;
  824. for_each_set_bit(relid, recv_int_page, maxbits) {
  825. struct vmbus_channel *channel;
  826. if (!sync_test_and_clear_bit(relid, recv_int_page))
  827. continue;
  828. /* Special case - vmbus channel protocol msg */
  829. if (relid == 0)
  830. continue;
  831. rcu_read_lock();
  832. /* Find channel based on relid */
  833. list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
  834. if (channel->offermsg.child_relid != relid)
  835. continue;
  836. if (channel->rescind)
  837. continue;
  838. trace_vmbus_chan_sched(channel);
  839. ++channel->interrupts;
  840. switch (channel->callback_mode) {
  841. case HV_CALL_ISR:
  842. vmbus_channel_isr(channel);
  843. break;
  844. case HV_CALL_BATCHED:
  845. hv_begin_read(&channel->inbound);
  846. /* fallthrough */
  847. case HV_CALL_DIRECT:
  848. tasklet_schedule(&channel->callback_event);
  849. }
  850. }
  851. rcu_read_unlock();
  852. }
  853. }
  854. static void vmbus_isr(void)
  855. {
  856. struct hv_per_cpu_context *hv_cpu
  857. = this_cpu_ptr(hv_context.cpu_context);
  858. void *page_addr = hv_cpu->synic_event_page;
  859. struct hv_message *msg;
  860. union hv_synic_event_flags *event;
  861. bool handled = false;
  862. if (unlikely(page_addr == NULL))
  863. return;
  864. event = (union hv_synic_event_flags *)page_addr +
  865. VMBUS_MESSAGE_SINT;
  866. /*
  867. * Check for events before checking for messages. This is the order
  868. * in which events and messages are checked in Windows guests on
  869. * Hyper-V, and the Windows team suggested we do the same.
  870. */
  871. if ((vmbus_proto_version == VERSION_WS2008) ||
  872. (vmbus_proto_version == VERSION_WIN7)) {
  873. /* Since we are a child, we only need to check bit 0 */
  874. if (sync_test_and_clear_bit(0, event->flags))
  875. handled = true;
  876. } else {
  877. /*
  878. * Our host is win8 or above. The signaling mechanism
  879. * has changed and we can directly look at the event page.
  880. * If bit n is set then we have an interrup on the channel
  881. * whose id is n.
  882. */
  883. handled = true;
  884. }
  885. if (handled)
  886. vmbus_chan_sched(hv_cpu);
  887. page_addr = hv_cpu->synic_message_page;
  888. msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
  889. /* Check if there are actual msgs to be processed */
  890. if (msg->header.message_type != HVMSG_NONE) {
  891. if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
  892. hv_process_timer_expiration(msg, hv_cpu);
  893. else
  894. tasklet_schedule(&hv_cpu->msg_dpc);
  895. }
  896. add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
  897. }
  898. /*
  899. * Boolean to control whether to report panic messages over Hyper-V.
  900. *
  901. * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
  902. */
  903. static int sysctl_record_panic_msg = 1;
  904. /*
  905. * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
  906. * buffer and call into Hyper-V to transfer the data.
  907. */
  908. static void hv_kmsg_dump(struct kmsg_dumper *dumper,
  909. enum kmsg_dump_reason reason)
  910. {
  911. size_t bytes_written;
  912. phys_addr_t panic_pa;
  913. /* We are only interested in panics. */
  914. if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
  915. return;
  916. panic_pa = virt_to_phys(hv_panic_page);
  917. /*
  918. * Write dump contents to the page. No need to synchronize; panic should
  919. * be single-threaded.
  920. */
  921. kmsg_dump_get_buffer(dumper, true, hv_panic_page, PAGE_SIZE,
  922. &bytes_written);
  923. if (bytes_written)
  924. hyperv_report_panic_msg(panic_pa, bytes_written);
  925. }
  926. static struct kmsg_dumper hv_kmsg_dumper = {
  927. .dump = hv_kmsg_dump,
  928. };
  929. static struct ctl_table_header *hv_ctl_table_hdr;
  930. static int zero;
  931. static int one = 1;
  932. /*
  933. * sysctl option to allow the user to control whether kmsg data should be
  934. * reported to Hyper-V on panic.
  935. */
  936. static struct ctl_table hv_ctl_table[] = {
  937. {
  938. .procname = "hyperv_record_panic_msg",
  939. .data = &sysctl_record_panic_msg,
  940. .maxlen = sizeof(int),
  941. .mode = 0644,
  942. .proc_handler = proc_dointvec_minmax,
  943. .extra1 = &zero,
  944. .extra2 = &one
  945. },
  946. {}
  947. };
  948. static struct ctl_table hv_root_table[] = {
  949. {
  950. .procname = "kernel",
  951. .mode = 0555,
  952. .child = hv_ctl_table
  953. },
  954. {}
  955. };
  956. /*
  957. * vmbus_bus_init -Main vmbus driver initialization routine.
  958. *
  959. * Here, we
  960. * - initialize the vmbus driver context
  961. * - invoke the vmbus hv main init routine
  962. * - retrieve the channel offers
  963. */
  964. static int vmbus_bus_init(void)
  965. {
  966. int ret;
  967. /* Hypervisor initialization...setup hypercall page..etc */
  968. ret = hv_init();
  969. if (ret != 0) {
  970. pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
  971. return ret;
  972. }
  973. ret = bus_register(&hv_bus);
  974. if (ret)
  975. return ret;
  976. hv_setup_vmbus_irq(vmbus_isr);
  977. ret = hv_synic_alloc();
  978. if (ret)
  979. goto err_alloc;
  980. /*
  981. * Initialize the per-cpu interrupt state and
  982. * connect to the host.
  983. */
  984. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
  985. hv_synic_init, hv_synic_cleanup);
  986. if (ret < 0)
  987. goto err_alloc;
  988. hyperv_cpuhp_online = ret;
  989. ret = vmbus_connect();
  990. if (ret)
  991. goto err_connect;
  992. /*
  993. * Only register if the crash MSRs are available
  994. */
  995. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  996. u64 hyperv_crash_ctl;
  997. /*
  998. * Sysctl registration is not fatal, since by default
  999. * reporting is enabled.
  1000. */
  1001. hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
  1002. if (!hv_ctl_table_hdr)
  1003. pr_err("Hyper-V: sysctl table register error");
  1004. /*
  1005. * Register for panic kmsg callback only if the right
  1006. * capability is supported by the hypervisor.
  1007. */
  1008. hv_get_crash_ctl(hyperv_crash_ctl);
  1009. if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) {
  1010. hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL);
  1011. if (hv_panic_page) {
  1012. ret = kmsg_dump_register(&hv_kmsg_dumper);
  1013. if (ret)
  1014. pr_err("Hyper-V: kmsg dump register "
  1015. "error 0x%x\n", ret);
  1016. } else
  1017. pr_err("Hyper-V: panic message page memory "
  1018. "allocation failed");
  1019. }
  1020. register_die_notifier(&hyperv_die_block);
  1021. atomic_notifier_chain_register(&panic_notifier_list,
  1022. &hyperv_panic_block);
  1023. }
  1024. vmbus_request_offers();
  1025. return 0;
  1026. err_connect:
  1027. cpuhp_remove_state(hyperv_cpuhp_online);
  1028. err_alloc:
  1029. hv_synic_free();
  1030. hv_remove_vmbus_irq();
  1031. bus_unregister(&hv_bus);
  1032. free_page((unsigned long)hv_panic_page);
  1033. unregister_sysctl_table(hv_ctl_table_hdr);
  1034. hv_ctl_table_hdr = NULL;
  1035. return ret;
  1036. }
  1037. /**
  1038. * __vmbus_child_driver_register() - Register a vmbus's driver
  1039. * @hv_driver: Pointer to driver structure you want to register
  1040. * @owner: owner module of the drv
  1041. * @mod_name: module name string
  1042. *
  1043. * Registers the given driver with Linux through the 'driver_register()' call
  1044. * and sets up the hyper-v vmbus handling for this driver.
  1045. * It will return the state of the 'driver_register()' call.
  1046. *
  1047. */
  1048. int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
  1049. {
  1050. int ret;
  1051. pr_info("registering driver %s\n", hv_driver->name);
  1052. ret = vmbus_exists();
  1053. if (ret < 0)
  1054. return ret;
  1055. hv_driver->driver.name = hv_driver->name;
  1056. hv_driver->driver.owner = owner;
  1057. hv_driver->driver.mod_name = mod_name;
  1058. hv_driver->driver.bus = &hv_bus;
  1059. spin_lock_init(&hv_driver->dynids.lock);
  1060. INIT_LIST_HEAD(&hv_driver->dynids.list);
  1061. ret = driver_register(&hv_driver->driver);
  1062. return ret;
  1063. }
  1064. EXPORT_SYMBOL_GPL(__vmbus_driver_register);
  1065. /**
  1066. * vmbus_driver_unregister() - Unregister a vmbus's driver
  1067. * @hv_driver: Pointer to driver structure you want to
  1068. * un-register
  1069. *
  1070. * Un-register the given driver that was previous registered with a call to
  1071. * vmbus_driver_register()
  1072. */
  1073. void vmbus_driver_unregister(struct hv_driver *hv_driver)
  1074. {
  1075. pr_info("unregistering driver %s\n", hv_driver->name);
  1076. if (!vmbus_exists()) {
  1077. driver_unregister(&hv_driver->driver);
  1078. vmbus_free_dynids(hv_driver);
  1079. }
  1080. }
  1081. EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
  1082. /*
  1083. * Called when last reference to channel is gone.
  1084. */
  1085. static void vmbus_chan_release(struct kobject *kobj)
  1086. {
  1087. struct vmbus_channel *channel
  1088. = container_of(kobj, struct vmbus_channel, kobj);
  1089. kfree_rcu(channel, rcu);
  1090. }
  1091. struct vmbus_chan_attribute {
  1092. struct attribute attr;
  1093. ssize_t (*show)(const struct vmbus_channel *chan, char *buf);
  1094. ssize_t (*store)(struct vmbus_channel *chan,
  1095. const char *buf, size_t count);
  1096. };
  1097. #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
  1098. struct vmbus_chan_attribute chan_attr_##_name \
  1099. = __ATTR(_name, _mode, _show, _store)
  1100. #define VMBUS_CHAN_ATTR_RW(_name) \
  1101. struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
  1102. #define VMBUS_CHAN_ATTR_RO(_name) \
  1103. struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
  1104. #define VMBUS_CHAN_ATTR_WO(_name) \
  1105. struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
  1106. static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
  1107. struct attribute *attr, char *buf)
  1108. {
  1109. const struct vmbus_chan_attribute *attribute
  1110. = container_of(attr, struct vmbus_chan_attribute, attr);
  1111. const struct vmbus_channel *chan
  1112. = container_of(kobj, struct vmbus_channel, kobj);
  1113. if (!attribute->show)
  1114. return -EIO;
  1115. if (chan->state != CHANNEL_OPENED_STATE)
  1116. return -EINVAL;
  1117. return attribute->show(chan, buf);
  1118. }
  1119. static const struct sysfs_ops vmbus_chan_sysfs_ops = {
  1120. .show = vmbus_chan_attr_show,
  1121. };
  1122. static ssize_t out_mask_show(const struct vmbus_channel *channel, char *buf)
  1123. {
  1124. const struct hv_ring_buffer_info *rbi = &channel->outbound;
  1125. return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
  1126. }
  1127. static VMBUS_CHAN_ATTR_RO(out_mask);
  1128. static ssize_t in_mask_show(const struct vmbus_channel *channel, char *buf)
  1129. {
  1130. const struct hv_ring_buffer_info *rbi = &channel->inbound;
  1131. return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
  1132. }
  1133. static VMBUS_CHAN_ATTR_RO(in_mask);
  1134. static ssize_t read_avail_show(const struct vmbus_channel *channel, char *buf)
  1135. {
  1136. const struct hv_ring_buffer_info *rbi = &channel->inbound;
  1137. return sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
  1138. }
  1139. static VMBUS_CHAN_ATTR_RO(read_avail);
  1140. static ssize_t write_avail_show(const struct vmbus_channel *channel, char *buf)
  1141. {
  1142. const struct hv_ring_buffer_info *rbi = &channel->outbound;
  1143. return sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
  1144. }
  1145. static VMBUS_CHAN_ATTR_RO(write_avail);
  1146. static ssize_t show_target_cpu(const struct vmbus_channel *channel, char *buf)
  1147. {
  1148. return sprintf(buf, "%u\n", channel->target_cpu);
  1149. }
  1150. static VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
  1151. static ssize_t channel_pending_show(const struct vmbus_channel *channel,
  1152. char *buf)
  1153. {
  1154. return sprintf(buf, "%d\n",
  1155. channel_pending(channel,
  1156. vmbus_connection.monitor_pages[1]));
  1157. }
  1158. static VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
  1159. static ssize_t channel_latency_show(const struct vmbus_channel *channel,
  1160. char *buf)
  1161. {
  1162. return sprintf(buf, "%d\n",
  1163. channel_latency(channel,
  1164. vmbus_connection.monitor_pages[1]));
  1165. }
  1166. static VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
  1167. static ssize_t channel_interrupts_show(const struct vmbus_channel *channel, char *buf)
  1168. {
  1169. return sprintf(buf, "%llu\n", channel->interrupts);
  1170. }
  1171. static VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
  1172. static ssize_t channel_events_show(const struct vmbus_channel *channel, char *buf)
  1173. {
  1174. return sprintf(buf, "%llu\n", channel->sig_events);
  1175. }
  1176. static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
  1177. static ssize_t subchannel_monitor_id_show(const struct vmbus_channel *channel,
  1178. char *buf)
  1179. {
  1180. return sprintf(buf, "%u\n", channel->offermsg.monitorid);
  1181. }
  1182. static VMBUS_CHAN_ATTR(monitor_id, S_IRUGO, subchannel_monitor_id_show, NULL);
  1183. static ssize_t subchannel_id_show(const struct vmbus_channel *channel,
  1184. char *buf)
  1185. {
  1186. return sprintf(buf, "%u\n",
  1187. channel->offermsg.offer.sub_channel_index);
  1188. }
  1189. static VMBUS_CHAN_ATTR_RO(subchannel_id);
  1190. static struct attribute *vmbus_chan_attrs[] = {
  1191. &chan_attr_out_mask.attr,
  1192. &chan_attr_in_mask.attr,
  1193. &chan_attr_read_avail.attr,
  1194. &chan_attr_write_avail.attr,
  1195. &chan_attr_cpu.attr,
  1196. &chan_attr_pending.attr,
  1197. &chan_attr_latency.attr,
  1198. &chan_attr_interrupts.attr,
  1199. &chan_attr_events.attr,
  1200. &chan_attr_monitor_id.attr,
  1201. &chan_attr_subchannel_id.attr,
  1202. NULL
  1203. };
  1204. static struct kobj_type vmbus_chan_ktype = {
  1205. .sysfs_ops = &vmbus_chan_sysfs_ops,
  1206. .release = vmbus_chan_release,
  1207. .default_attrs = vmbus_chan_attrs,
  1208. };
  1209. /*
  1210. * vmbus_add_channel_kobj - setup a sub-directory under device/channels
  1211. */
  1212. int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
  1213. {
  1214. struct kobject *kobj = &channel->kobj;
  1215. u32 relid = channel->offermsg.child_relid;
  1216. int ret;
  1217. kobj->kset = dev->channels_kset;
  1218. ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
  1219. "%u", relid);
  1220. if (ret)
  1221. return ret;
  1222. kobject_uevent(kobj, KOBJ_ADD);
  1223. return 0;
  1224. }
  1225. /*
  1226. * vmbus_device_create - Creates and registers a new child device
  1227. * on the vmbus.
  1228. */
  1229. struct hv_device *vmbus_device_create(const uuid_le *type,
  1230. const uuid_le *instance,
  1231. struct vmbus_channel *channel)
  1232. {
  1233. struct hv_device *child_device_obj;
  1234. child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
  1235. if (!child_device_obj) {
  1236. pr_err("Unable to allocate device object for child device\n");
  1237. return NULL;
  1238. }
  1239. child_device_obj->channel = channel;
  1240. memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
  1241. memcpy(&child_device_obj->dev_instance, instance,
  1242. sizeof(uuid_le));
  1243. child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
  1244. return child_device_obj;
  1245. }
  1246. /*
  1247. * vmbus_device_register - Register the child device
  1248. */
  1249. int vmbus_device_register(struct hv_device *child_device_obj)
  1250. {
  1251. struct kobject *kobj = &child_device_obj->device.kobj;
  1252. int ret;
  1253. dev_set_name(&child_device_obj->device, "%pUl",
  1254. child_device_obj->channel->offermsg.offer.if_instance.b);
  1255. child_device_obj->device.bus = &hv_bus;
  1256. child_device_obj->device.parent = &hv_acpi_dev->dev;
  1257. child_device_obj->device.release = vmbus_device_release;
  1258. /*
  1259. * Register with the LDM. This will kick off the driver/device
  1260. * binding...which will eventually call vmbus_match() and vmbus_probe()
  1261. */
  1262. ret = device_register(&child_device_obj->device);
  1263. if (ret) {
  1264. pr_err("Unable to register child device\n");
  1265. return ret;
  1266. }
  1267. child_device_obj->channels_kset = kset_create_and_add("channels",
  1268. NULL, kobj);
  1269. if (!child_device_obj->channels_kset) {
  1270. ret = -ENOMEM;
  1271. goto err_dev_unregister;
  1272. }
  1273. ret = vmbus_add_channel_kobj(child_device_obj,
  1274. child_device_obj->channel);
  1275. if (ret) {
  1276. pr_err("Unable to register primary channeln");
  1277. goto err_kset_unregister;
  1278. }
  1279. return 0;
  1280. err_kset_unregister:
  1281. kset_unregister(child_device_obj->channels_kset);
  1282. err_dev_unregister:
  1283. device_unregister(&child_device_obj->device);
  1284. return ret;
  1285. }
  1286. /*
  1287. * vmbus_device_unregister - Remove the specified child device
  1288. * from the vmbus.
  1289. */
  1290. void vmbus_device_unregister(struct hv_device *device_obj)
  1291. {
  1292. pr_debug("child device %s unregistered\n",
  1293. dev_name(&device_obj->device));
  1294. kset_unregister(device_obj->channels_kset);
  1295. /*
  1296. * Kick off the process of unregistering the device.
  1297. * This will call vmbus_remove() and eventually vmbus_device_release()
  1298. */
  1299. device_unregister(&device_obj->device);
  1300. }
  1301. /*
  1302. * VMBUS is an acpi enumerated device. Get the information we
  1303. * need from DSDT.
  1304. */
  1305. #define VTPM_BASE_ADDRESS 0xfed40000
  1306. static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
  1307. {
  1308. resource_size_t start = 0;
  1309. resource_size_t end = 0;
  1310. struct resource *new_res;
  1311. struct resource **old_res = &hyperv_mmio;
  1312. struct resource **prev_res = NULL;
  1313. switch (res->type) {
  1314. /*
  1315. * "Address" descriptors are for bus windows. Ignore
  1316. * "memory" descriptors, which are for registers on
  1317. * devices.
  1318. */
  1319. case ACPI_RESOURCE_TYPE_ADDRESS32:
  1320. start = res->data.address32.address.minimum;
  1321. end = res->data.address32.address.maximum;
  1322. break;
  1323. case ACPI_RESOURCE_TYPE_ADDRESS64:
  1324. start = res->data.address64.address.minimum;
  1325. end = res->data.address64.address.maximum;
  1326. break;
  1327. default:
  1328. /* Unused resource type */
  1329. return AE_OK;
  1330. }
  1331. /*
  1332. * Ignore ranges that are below 1MB, as they're not
  1333. * necessary or useful here.
  1334. */
  1335. if (end < 0x100000)
  1336. return AE_OK;
  1337. new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
  1338. if (!new_res)
  1339. return AE_NO_MEMORY;
  1340. /* If this range overlaps the virtual TPM, truncate it. */
  1341. if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
  1342. end = VTPM_BASE_ADDRESS;
  1343. new_res->name = "hyperv mmio";
  1344. new_res->flags = IORESOURCE_MEM;
  1345. new_res->start = start;
  1346. new_res->end = end;
  1347. /*
  1348. * If two ranges are adjacent, merge them.
  1349. */
  1350. do {
  1351. if (!*old_res) {
  1352. *old_res = new_res;
  1353. break;
  1354. }
  1355. if (((*old_res)->end + 1) == new_res->start) {
  1356. (*old_res)->end = new_res->end;
  1357. kfree(new_res);
  1358. break;
  1359. }
  1360. if ((*old_res)->start == new_res->end + 1) {
  1361. (*old_res)->start = new_res->start;
  1362. kfree(new_res);
  1363. break;
  1364. }
  1365. if ((*old_res)->start > new_res->end) {
  1366. new_res->sibling = *old_res;
  1367. if (prev_res)
  1368. (*prev_res)->sibling = new_res;
  1369. *old_res = new_res;
  1370. break;
  1371. }
  1372. prev_res = old_res;
  1373. old_res = &(*old_res)->sibling;
  1374. } while (1);
  1375. return AE_OK;
  1376. }
  1377. static int vmbus_acpi_remove(struct acpi_device *device)
  1378. {
  1379. struct resource *cur_res;
  1380. struct resource *next_res;
  1381. if (hyperv_mmio) {
  1382. if (fb_mmio) {
  1383. __release_region(hyperv_mmio, fb_mmio->start,
  1384. resource_size(fb_mmio));
  1385. fb_mmio = NULL;
  1386. }
  1387. for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
  1388. next_res = cur_res->sibling;
  1389. kfree(cur_res);
  1390. }
  1391. }
  1392. return 0;
  1393. }
  1394. static void vmbus_reserve_fb(void)
  1395. {
  1396. int size;
  1397. /*
  1398. * Make a claim for the frame buffer in the resource tree under the
  1399. * first node, which will be the one below 4GB. The length seems to
  1400. * be underreported, particularly in a Generation 1 VM. So start out
  1401. * reserving a larger area and make it smaller until it succeeds.
  1402. */
  1403. if (screen_info.lfb_base) {
  1404. if (efi_enabled(EFI_BOOT))
  1405. size = max_t(__u32, screen_info.lfb_size, 0x800000);
  1406. else
  1407. size = max_t(__u32, screen_info.lfb_size, 0x4000000);
  1408. for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
  1409. fb_mmio = __request_region(hyperv_mmio,
  1410. screen_info.lfb_base, size,
  1411. fb_mmio_name, 0);
  1412. }
  1413. }
  1414. }
  1415. /**
  1416. * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
  1417. * @new: If successful, supplied a pointer to the
  1418. * allocated MMIO space.
  1419. * @device_obj: Identifies the caller
  1420. * @min: Minimum guest physical address of the
  1421. * allocation
  1422. * @max: Maximum guest physical address
  1423. * @size: Size of the range to be allocated
  1424. * @align: Alignment of the range to be allocated
  1425. * @fb_overlap_ok: Whether this allocation can be allowed
  1426. * to overlap the video frame buffer.
  1427. *
  1428. * This function walks the resources granted to VMBus by the
  1429. * _CRS object in the ACPI namespace underneath the parent
  1430. * "bridge" whether that's a root PCI bus in the Generation 1
  1431. * case or a Module Device in the Generation 2 case. It then
  1432. * attempts to allocate from the global MMIO pool in a way that
  1433. * matches the constraints supplied in these parameters and by
  1434. * that _CRS.
  1435. *
  1436. * Return: 0 on success, -errno on failure
  1437. */
  1438. int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
  1439. resource_size_t min, resource_size_t max,
  1440. resource_size_t size, resource_size_t align,
  1441. bool fb_overlap_ok)
  1442. {
  1443. struct resource *iter, *shadow;
  1444. resource_size_t range_min, range_max, start;
  1445. const char *dev_n = dev_name(&device_obj->device);
  1446. int retval;
  1447. retval = -ENXIO;
  1448. down(&hyperv_mmio_lock);
  1449. /*
  1450. * If overlaps with frame buffers are allowed, then first attempt to
  1451. * make the allocation from within the reserved region. Because it
  1452. * is already reserved, no shadow allocation is necessary.
  1453. */
  1454. if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
  1455. !(max < fb_mmio->start)) {
  1456. range_min = fb_mmio->start;
  1457. range_max = fb_mmio->end;
  1458. start = (range_min + align - 1) & ~(align - 1);
  1459. for (; start + size - 1 <= range_max; start += align) {
  1460. *new = request_mem_region_exclusive(start, size, dev_n);
  1461. if (*new) {
  1462. retval = 0;
  1463. goto exit;
  1464. }
  1465. }
  1466. }
  1467. for (iter = hyperv_mmio; iter; iter = iter->sibling) {
  1468. if ((iter->start >= max) || (iter->end <= min))
  1469. continue;
  1470. range_min = iter->start;
  1471. range_max = iter->end;
  1472. start = (range_min + align - 1) & ~(align - 1);
  1473. for (; start + size - 1 <= range_max; start += align) {
  1474. shadow = __request_region(iter, start, size, NULL,
  1475. IORESOURCE_BUSY);
  1476. if (!shadow)
  1477. continue;
  1478. *new = request_mem_region_exclusive(start, size, dev_n);
  1479. if (*new) {
  1480. shadow->name = (char *)*new;
  1481. retval = 0;
  1482. goto exit;
  1483. }
  1484. __release_region(iter, start, size);
  1485. }
  1486. }
  1487. exit:
  1488. up(&hyperv_mmio_lock);
  1489. return retval;
  1490. }
  1491. EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
  1492. /**
  1493. * vmbus_free_mmio() - Free a memory-mapped I/O range.
  1494. * @start: Base address of region to release.
  1495. * @size: Size of the range to be allocated
  1496. *
  1497. * This function releases anything requested by
  1498. * vmbus_mmio_allocate().
  1499. */
  1500. void vmbus_free_mmio(resource_size_t start, resource_size_t size)
  1501. {
  1502. struct resource *iter;
  1503. down(&hyperv_mmio_lock);
  1504. for (iter = hyperv_mmio; iter; iter = iter->sibling) {
  1505. if ((iter->start >= start + size) || (iter->end <= start))
  1506. continue;
  1507. __release_region(iter, start, size);
  1508. }
  1509. release_mem_region(start, size);
  1510. up(&hyperv_mmio_lock);
  1511. }
  1512. EXPORT_SYMBOL_GPL(vmbus_free_mmio);
  1513. static int vmbus_acpi_add(struct acpi_device *device)
  1514. {
  1515. acpi_status result;
  1516. int ret_val = -ENODEV;
  1517. struct acpi_device *ancestor;
  1518. hv_acpi_dev = device;
  1519. result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
  1520. vmbus_walk_resources, NULL);
  1521. if (ACPI_FAILURE(result))
  1522. goto acpi_walk_err;
  1523. /*
  1524. * Some ancestor of the vmbus acpi device (Gen1 or Gen2
  1525. * firmware) is the VMOD that has the mmio ranges. Get that.
  1526. */
  1527. for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
  1528. result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
  1529. vmbus_walk_resources, NULL);
  1530. if (ACPI_FAILURE(result))
  1531. continue;
  1532. if (hyperv_mmio) {
  1533. vmbus_reserve_fb();
  1534. break;
  1535. }
  1536. }
  1537. ret_val = 0;
  1538. acpi_walk_err:
  1539. complete(&probe_event);
  1540. if (ret_val)
  1541. vmbus_acpi_remove(device);
  1542. return ret_val;
  1543. }
  1544. static const struct acpi_device_id vmbus_acpi_device_ids[] = {
  1545. {"VMBUS", 0},
  1546. {"VMBus", 0},
  1547. {"", 0},
  1548. };
  1549. MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
  1550. static struct acpi_driver vmbus_acpi_driver = {
  1551. .name = "vmbus",
  1552. .ids = vmbus_acpi_device_ids,
  1553. .ops = {
  1554. .add = vmbus_acpi_add,
  1555. .remove = vmbus_acpi_remove,
  1556. },
  1557. };
  1558. static void hv_kexec_handler(void)
  1559. {
  1560. hv_synic_clockevents_cleanup();
  1561. vmbus_initiate_unload(false);
  1562. vmbus_connection.conn_state = DISCONNECTED;
  1563. /* Make sure conn_state is set as hv_synic_cleanup checks for it */
  1564. mb();
  1565. cpuhp_remove_state(hyperv_cpuhp_online);
  1566. hyperv_cleanup();
  1567. };
  1568. static void hv_crash_handler(struct pt_regs *regs)
  1569. {
  1570. vmbus_initiate_unload(true);
  1571. /*
  1572. * In crash handler we can't schedule synic cleanup for all CPUs,
  1573. * doing the cleanup for current CPU only. This should be sufficient
  1574. * for kdump.
  1575. */
  1576. vmbus_connection.conn_state = DISCONNECTED;
  1577. hv_synic_cleanup(smp_processor_id());
  1578. hyperv_cleanup();
  1579. };
  1580. static int __init hv_acpi_init(void)
  1581. {
  1582. int ret, t;
  1583. if (!hv_is_hyperv_initialized())
  1584. return -ENODEV;
  1585. init_completion(&probe_event);
  1586. /*
  1587. * Get ACPI resources first.
  1588. */
  1589. ret = acpi_bus_register_driver(&vmbus_acpi_driver);
  1590. if (ret)
  1591. return ret;
  1592. t = wait_for_completion_timeout(&probe_event, 5*HZ);
  1593. if (t == 0) {
  1594. ret = -ETIMEDOUT;
  1595. goto cleanup;
  1596. }
  1597. ret = vmbus_bus_init();
  1598. if (ret)
  1599. goto cleanup;
  1600. hv_setup_kexec_handler(hv_kexec_handler);
  1601. hv_setup_crash_handler(hv_crash_handler);
  1602. return 0;
  1603. cleanup:
  1604. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  1605. hv_acpi_dev = NULL;
  1606. return ret;
  1607. }
  1608. static void __exit vmbus_exit(void)
  1609. {
  1610. int cpu;
  1611. hv_remove_kexec_handler();
  1612. hv_remove_crash_handler();
  1613. vmbus_connection.conn_state = DISCONNECTED;
  1614. hv_synic_clockevents_cleanup();
  1615. vmbus_disconnect();
  1616. hv_remove_vmbus_irq();
  1617. for_each_online_cpu(cpu) {
  1618. struct hv_per_cpu_context *hv_cpu
  1619. = per_cpu_ptr(hv_context.cpu_context, cpu);
  1620. tasklet_kill(&hv_cpu->msg_dpc);
  1621. }
  1622. vmbus_free_channels();
  1623. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  1624. kmsg_dump_unregister(&hv_kmsg_dumper);
  1625. unregister_die_notifier(&hyperv_die_block);
  1626. atomic_notifier_chain_unregister(&panic_notifier_list,
  1627. &hyperv_panic_block);
  1628. }
  1629. free_page((unsigned long)hv_panic_page);
  1630. unregister_sysctl_table(hv_ctl_table_hdr);
  1631. hv_ctl_table_hdr = NULL;
  1632. bus_unregister(&hv_bus);
  1633. cpuhp_remove_state(hyperv_cpuhp_online);
  1634. hv_synic_free();
  1635. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  1636. }
  1637. MODULE_LICENSE("GPL");
  1638. subsys_initcall(hv_acpi_init);
  1639. module_exit(vmbus_exit);