xpc_uv.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) uv-based functions.
  10. *
  11. * Architecture specific implementation of common functions.
  12. *
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/delay.h>
  18. #include <linux/device.h>
  19. #include <linux/cpu.h>
  20. #include <linux/module.h>
  21. #include <linux/err.h>
  22. #include <linux/slab.h>
  23. #include <asm/uv/uv_hub.h>
  24. #if defined CONFIG_X86_64
  25. #include <asm/uv/bios.h>
  26. #include <asm/uv/uv_irq.h>
  27. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  28. #include <asm/sn/intr.h>
  29. #include <asm/sn/sn_sal.h>
  30. #endif
  31. #include "../sgi-gru/gru.h"
  32. #include "../sgi-gru/grukservices.h"
  33. #include "xpc.h"
  34. #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  35. struct uv_IO_APIC_route_entry {
  36. __u64 vector : 8,
  37. delivery_mode : 3,
  38. dest_mode : 1,
  39. delivery_status : 1,
  40. polarity : 1,
  41. __reserved_1 : 1,
  42. trigger : 1,
  43. mask : 1,
  44. __reserved_2 : 15,
  45. dest : 32;
  46. };
  47. #endif
  48. static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
  49. #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
  50. #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
  51. XPC_ACTIVATE_MSG_SIZE_UV)
  52. #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
  53. #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
  54. #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
  55. XPC_NOTIFY_MSG_SIZE_UV)
  56. #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
  57. static int xpc_mq_node = -1;
  58. static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
  59. static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
  60. static int
  61. xpc_setup_partitions_uv(void)
  62. {
  63. short partid;
  64. struct xpc_partition_uv *part_uv;
  65. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  66. part_uv = &xpc_partitions[partid].sn.uv;
  67. mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
  68. spin_lock_init(&part_uv->flags_lock);
  69. part_uv->remote_act_state = XPC_P_AS_INACTIVE;
  70. }
  71. return 0;
  72. }
  73. static void
  74. xpc_teardown_partitions_uv(void)
  75. {
  76. short partid;
  77. struct xpc_partition_uv *part_uv;
  78. unsigned long irq_flags;
  79. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  80. part_uv = &xpc_partitions[partid].sn.uv;
  81. if (part_uv->cached_activate_gru_mq_desc != NULL) {
  82. mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
  83. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  84. part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
  85. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  86. kfree(part_uv->cached_activate_gru_mq_desc);
  87. part_uv->cached_activate_gru_mq_desc = NULL;
  88. mutex_unlock(&part_uv->
  89. cached_activate_gru_mq_desc_mutex);
  90. }
  91. }
  92. }
  93. static int
  94. xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
  95. {
  96. int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  97. #if defined CONFIG_X86_64
  98. mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
  99. UV_AFFINITY_CPU);
  100. if (mq->irq < 0)
  101. return mq->irq;
  102. mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
  103. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  104. if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
  105. mq->irq = SGI_XPC_ACTIVATE;
  106. else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
  107. mq->irq = SGI_XPC_NOTIFY;
  108. else
  109. return -EINVAL;
  110. mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
  111. uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
  112. #else
  113. #error not a supported configuration
  114. #endif
  115. return 0;
  116. }
  117. static void
  118. xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
  119. {
  120. #if defined CONFIG_X86_64
  121. uv_teardown_irq(mq->irq);
  122. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  123. int mmr_pnode;
  124. unsigned long mmr_value;
  125. mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  126. mmr_value = 1UL << 16;
  127. uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
  128. #else
  129. #error not a supported configuration
  130. #endif
  131. }
  132. static int
  133. xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
  134. {
  135. int ret;
  136. #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  137. int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  138. ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
  139. mq->order, &mq->mmr_offset);
  140. if (ret < 0) {
  141. dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
  142. ret);
  143. return -EBUSY;
  144. }
  145. #elif defined CONFIG_X86_64
  146. ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
  147. mq->order, &mq->mmr_offset);
  148. if (ret < 0) {
  149. dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
  150. "ret=%d\n", ret);
  151. return ret;
  152. }
  153. #else
  154. #error not a supported configuration
  155. #endif
  156. mq->watchlist_num = ret;
  157. return 0;
  158. }
  159. static void
  160. xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
  161. {
  162. int ret;
  163. int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
  164. #if defined CONFIG_X86_64
  165. ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
  166. BUG_ON(ret != BIOS_STATUS_SUCCESS);
  167. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  168. ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
  169. BUG_ON(ret != SALRET_OK);
  170. #else
  171. #error not a supported configuration
  172. #endif
  173. }
  174. static struct xpc_gru_mq_uv *
  175. xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
  176. irq_handler_t irq_handler)
  177. {
  178. enum xp_retval xp_ret;
  179. int ret;
  180. int nid;
  181. int nasid;
  182. int pg_order;
  183. struct page *page;
  184. struct xpc_gru_mq_uv *mq;
  185. struct uv_IO_APIC_route_entry *mmr_value;
  186. mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
  187. if (mq == NULL) {
  188. dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
  189. "a xpc_gru_mq_uv structure\n");
  190. ret = -ENOMEM;
  191. goto out_0;
  192. }
  193. mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
  194. GFP_KERNEL);
  195. if (mq->gru_mq_desc == NULL) {
  196. dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
  197. "a gru_message_queue_desc structure\n");
  198. ret = -ENOMEM;
  199. goto out_1;
  200. }
  201. pg_order = get_order(mq_size);
  202. mq->order = pg_order + PAGE_SHIFT;
  203. mq_size = 1UL << mq->order;
  204. mq->mmr_blade = uv_cpu_to_blade_id(cpu);
  205. nid = cpu_to_node(cpu);
  206. page = __alloc_pages_node(nid,
  207. GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
  208. pg_order);
  209. if (page == NULL) {
  210. dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
  211. "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
  212. ret = -ENOMEM;
  213. goto out_2;
  214. }
  215. mq->address = page_address(page);
  216. /* enable generation of irq when GRU mq operation occurs to this mq */
  217. ret = xpc_gru_mq_watchlist_alloc_uv(mq);
  218. if (ret != 0)
  219. goto out_3;
  220. ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
  221. if (ret != 0)
  222. goto out_4;
  223. ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
  224. if (ret != 0) {
  225. dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
  226. mq->irq, -ret);
  227. goto out_5;
  228. }
  229. nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
  230. mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
  231. ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
  232. nasid, mmr_value->vector, mmr_value->dest);
  233. if (ret != 0) {
  234. dev_err(xpc_part, "gru_create_message_queue() returned "
  235. "error=%d\n", ret);
  236. ret = -EINVAL;
  237. goto out_6;
  238. }
  239. /* allow other partitions to access this GRU mq */
  240. xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
  241. if (xp_ret != xpSuccess) {
  242. ret = -EACCES;
  243. goto out_6;
  244. }
  245. return mq;
  246. /* something went wrong */
  247. out_6:
  248. free_irq(mq->irq, NULL);
  249. out_5:
  250. xpc_release_gru_mq_irq_uv(mq);
  251. out_4:
  252. xpc_gru_mq_watchlist_free_uv(mq);
  253. out_3:
  254. free_pages((unsigned long)mq->address, pg_order);
  255. out_2:
  256. kfree(mq->gru_mq_desc);
  257. out_1:
  258. kfree(mq);
  259. out_0:
  260. return ERR_PTR(ret);
  261. }
  262. static void
  263. xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
  264. {
  265. unsigned int mq_size;
  266. int pg_order;
  267. int ret;
  268. /* disallow other partitions to access GRU mq */
  269. mq_size = 1UL << mq->order;
  270. ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
  271. BUG_ON(ret != xpSuccess);
  272. /* unregister irq handler and release mq irq/vector mapping */
  273. free_irq(mq->irq, NULL);
  274. xpc_release_gru_mq_irq_uv(mq);
  275. /* disable generation of irq when GRU mq op occurs to this mq */
  276. xpc_gru_mq_watchlist_free_uv(mq);
  277. pg_order = mq->order - PAGE_SHIFT;
  278. free_pages((unsigned long)mq->address, pg_order);
  279. kfree(mq);
  280. }
  281. static enum xp_retval
  282. xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
  283. size_t msg_size)
  284. {
  285. enum xp_retval xp_ret;
  286. int ret;
  287. while (1) {
  288. ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
  289. if (ret == MQE_OK) {
  290. xp_ret = xpSuccess;
  291. break;
  292. }
  293. if (ret == MQE_QUEUE_FULL) {
  294. dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
  295. "error=MQE_QUEUE_FULL\n");
  296. /* !!! handle QLimit reached; delay & try again */
  297. /* ??? Do we add a limit to the number of retries? */
  298. (void)msleep_interruptible(10);
  299. } else if (ret == MQE_CONGESTION) {
  300. dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
  301. "error=MQE_CONGESTION\n");
  302. /* !!! handle LB Overflow; simply try again */
  303. /* ??? Do we add a limit to the number of retries? */
  304. } else {
  305. /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
  306. dev_err(xpc_chan, "gru_send_message_gpa() returned "
  307. "error=%d\n", ret);
  308. xp_ret = xpGruSendMqError;
  309. break;
  310. }
  311. }
  312. return xp_ret;
  313. }
  314. static void
  315. xpc_process_activate_IRQ_rcvd_uv(void)
  316. {
  317. unsigned long irq_flags;
  318. short partid;
  319. struct xpc_partition *part;
  320. u8 act_state_req;
  321. DBUG_ON(xpc_activate_IRQ_rcvd == 0);
  322. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  323. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  324. part = &xpc_partitions[partid];
  325. if (part->sn.uv.act_state_req == 0)
  326. continue;
  327. xpc_activate_IRQ_rcvd--;
  328. BUG_ON(xpc_activate_IRQ_rcvd < 0);
  329. act_state_req = part->sn.uv.act_state_req;
  330. part->sn.uv.act_state_req = 0;
  331. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  332. if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
  333. if (part->act_state == XPC_P_AS_INACTIVE)
  334. xpc_activate_partition(part);
  335. else if (part->act_state == XPC_P_AS_DEACTIVATING)
  336. XPC_DEACTIVATE_PARTITION(part, xpReactivating);
  337. } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
  338. if (part->act_state == XPC_P_AS_INACTIVE)
  339. xpc_activate_partition(part);
  340. else
  341. XPC_DEACTIVATE_PARTITION(part, xpReactivating);
  342. } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
  343. XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
  344. } else {
  345. BUG();
  346. }
  347. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  348. if (xpc_activate_IRQ_rcvd == 0)
  349. break;
  350. }
  351. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  352. }
  353. static void
  354. xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
  355. struct xpc_activate_mq_msghdr_uv *msg_hdr,
  356. int part_setup,
  357. int *wakeup_hb_checker)
  358. {
  359. unsigned long irq_flags;
  360. struct xpc_partition_uv *part_uv = &part->sn.uv;
  361. struct xpc_openclose_args *args;
  362. part_uv->remote_act_state = msg_hdr->act_state;
  363. switch (msg_hdr->type) {
  364. case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
  365. /* syncing of remote_act_state was just done above */
  366. break;
  367. case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
  368. struct xpc_activate_mq_msg_activate_req_uv *msg;
  369. /*
  370. * ??? Do we deal here with ts_jiffies being different
  371. * ??? if act_state != XPC_P_AS_INACTIVE instead of
  372. * ??? below?
  373. */
  374. msg = container_of(msg_hdr, struct
  375. xpc_activate_mq_msg_activate_req_uv, hdr);
  376. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  377. if (part_uv->act_state_req == 0)
  378. xpc_activate_IRQ_rcvd++;
  379. part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
  380. part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
  381. part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
  382. part_uv->heartbeat_gpa = msg->heartbeat_gpa;
  383. if (msg->activate_gru_mq_desc_gpa !=
  384. part_uv->activate_gru_mq_desc_gpa) {
  385. spin_lock(&part_uv->flags_lock);
  386. part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
  387. spin_unlock(&part_uv->flags_lock);
  388. part_uv->activate_gru_mq_desc_gpa =
  389. msg->activate_gru_mq_desc_gpa;
  390. }
  391. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  392. (*wakeup_hb_checker)++;
  393. break;
  394. }
  395. case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
  396. struct xpc_activate_mq_msg_deactivate_req_uv *msg;
  397. msg = container_of(msg_hdr, struct
  398. xpc_activate_mq_msg_deactivate_req_uv, hdr);
  399. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  400. if (part_uv->act_state_req == 0)
  401. xpc_activate_IRQ_rcvd++;
  402. part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
  403. part_uv->reason = msg->reason;
  404. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  405. (*wakeup_hb_checker)++;
  406. return;
  407. }
  408. case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
  409. struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
  410. if (!part_setup)
  411. break;
  412. msg = container_of(msg_hdr, struct
  413. xpc_activate_mq_msg_chctl_closerequest_uv,
  414. hdr);
  415. args = &part->remote_openclose_args[msg->ch_number];
  416. args->reason = msg->reason;
  417. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  418. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
  419. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  420. xpc_wakeup_channel_mgr(part);
  421. break;
  422. }
  423. case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
  424. struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
  425. if (!part_setup)
  426. break;
  427. msg = container_of(msg_hdr, struct
  428. xpc_activate_mq_msg_chctl_closereply_uv,
  429. hdr);
  430. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  431. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
  432. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  433. xpc_wakeup_channel_mgr(part);
  434. break;
  435. }
  436. case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
  437. struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
  438. if (!part_setup)
  439. break;
  440. msg = container_of(msg_hdr, struct
  441. xpc_activate_mq_msg_chctl_openrequest_uv,
  442. hdr);
  443. args = &part->remote_openclose_args[msg->ch_number];
  444. args->entry_size = msg->entry_size;
  445. args->local_nentries = msg->local_nentries;
  446. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  447. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
  448. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  449. xpc_wakeup_channel_mgr(part);
  450. break;
  451. }
  452. case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
  453. struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
  454. if (!part_setup)
  455. break;
  456. msg = container_of(msg_hdr, struct
  457. xpc_activate_mq_msg_chctl_openreply_uv, hdr);
  458. args = &part->remote_openclose_args[msg->ch_number];
  459. args->remote_nentries = msg->remote_nentries;
  460. args->local_nentries = msg->local_nentries;
  461. args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
  462. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  463. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
  464. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  465. xpc_wakeup_channel_mgr(part);
  466. break;
  467. }
  468. case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
  469. struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
  470. if (!part_setup)
  471. break;
  472. msg = container_of(msg_hdr, struct
  473. xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
  474. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  475. part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
  476. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  477. xpc_wakeup_channel_mgr(part);
  478. }
  479. case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
  480. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  481. part_uv->flags |= XPC_P_ENGAGED_UV;
  482. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  483. break;
  484. case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
  485. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  486. part_uv->flags &= ~XPC_P_ENGAGED_UV;
  487. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  488. break;
  489. default:
  490. dev_err(xpc_part, "received unknown activate_mq msg type=%d "
  491. "from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
  492. /* get hb checker to deactivate from the remote partition */
  493. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  494. if (part_uv->act_state_req == 0)
  495. xpc_activate_IRQ_rcvd++;
  496. part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
  497. part_uv->reason = xpBadMsgType;
  498. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  499. (*wakeup_hb_checker)++;
  500. return;
  501. }
  502. if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
  503. part->remote_rp_ts_jiffies != 0) {
  504. /*
  505. * ??? Does what we do here need to be sensitive to
  506. * ??? act_state or remote_act_state?
  507. */
  508. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  509. if (part_uv->act_state_req == 0)
  510. xpc_activate_IRQ_rcvd++;
  511. part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
  512. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  513. (*wakeup_hb_checker)++;
  514. }
  515. }
  516. static irqreturn_t
  517. xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
  518. {
  519. struct xpc_activate_mq_msghdr_uv *msg_hdr;
  520. short partid;
  521. struct xpc_partition *part;
  522. int wakeup_hb_checker = 0;
  523. int part_referenced;
  524. while (1) {
  525. msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
  526. if (msg_hdr == NULL)
  527. break;
  528. partid = msg_hdr->partid;
  529. if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
  530. dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
  531. "received invalid partid=0x%x in message\n",
  532. partid);
  533. } else {
  534. part = &xpc_partitions[partid];
  535. part_referenced = xpc_part_ref(part);
  536. xpc_handle_activate_mq_msg_uv(part, msg_hdr,
  537. part_referenced,
  538. &wakeup_hb_checker);
  539. if (part_referenced)
  540. xpc_part_deref(part);
  541. }
  542. gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
  543. }
  544. if (wakeup_hb_checker)
  545. wake_up_interruptible(&xpc_activate_IRQ_wq);
  546. return IRQ_HANDLED;
  547. }
  548. static enum xp_retval
  549. xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
  550. unsigned long gru_mq_desc_gpa)
  551. {
  552. enum xp_retval ret;
  553. ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
  554. sizeof(struct gru_message_queue_desc));
  555. if (ret == xpSuccess)
  556. gru_mq_desc->mq = NULL;
  557. return ret;
  558. }
  559. static enum xp_retval
  560. xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
  561. int msg_type)
  562. {
  563. struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
  564. struct xpc_partition_uv *part_uv = &part->sn.uv;
  565. struct gru_message_queue_desc *gru_mq_desc;
  566. unsigned long irq_flags;
  567. enum xp_retval ret;
  568. DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
  569. msg_hdr->type = msg_type;
  570. msg_hdr->partid = xp_partition_id;
  571. msg_hdr->act_state = part->act_state;
  572. msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
  573. mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
  574. again:
  575. if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
  576. gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
  577. if (gru_mq_desc == NULL) {
  578. gru_mq_desc = kmalloc(sizeof(struct
  579. gru_message_queue_desc),
  580. GFP_KERNEL);
  581. if (gru_mq_desc == NULL) {
  582. ret = xpNoMemory;
  583. goto done;
  584. }
  585. part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
  586. }
  587. ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
  588. part_uv->
  589. activate_gru_mq_desc_gpa);
  590. if (ret != xpSuccess)
  591. goto done;
  592. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  593. part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
  594. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  595. }
  596. /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
  597. ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
  598. msg_size);
  599. if (ret != xpSuccess) {
  600. smp_rmb(); /* ensure a fresh copy of part_uv->flags */
  601. if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
  602. goto again;
  603. }
  604. done:
  605. mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
  606. return ret;
  607. }
  608. static void
  609. xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
  610. size_t msg_size, int msg_type)
  611. {
  612. enum xp_retval ret;
  613. ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
  614. if (unlikely(ret != xpSuccess))
  615. XPC_DEACTIVATE_PARTITION(part, ret);
  616. }
  617. static void
  618. xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
  619. void *msg, size_t msg_size, int msg_type)
  620. {
  621. struct xpc_partition *part = &xpc_partitions[ch->partid];
  622. enum xp_retval ret;
  623. ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
  624. if (unlikely(ret != xpSuccess)) {
  625. if (irq_flags != NULL)
  626. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  627. XPC_DEACTIVATE_PARTITION(part, ret);
  628. if (irq_flags != NULL)
  629. spin_lock_irqsave(&ch->lock, *irq_flags);
  630. }
  631. }
  632. static void
  633. xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
  634. {
  635. unsigned long irq_flags;
  636. struct xpc_partition_uv *part_uv = &part->sn.uv;
  637. /*
  638. * !!! Make our side think that the remote partition sent an activate
  639. * !!! mq message our way by doing what the activate IRQ handler would
  640. * !!! do had one really been sent.
  641. */
  642. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  643. if (part_uv->act_state_req == 0)
  644. xpc_activate_IRQ_rcvd++;
  645. part_uv->act_state_req = act_state_req;
  646. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  647. wake_up_interruptible(&xpc_activate_IRQ_wq);
  648. }
  649. static enum xp_retval
  650. xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
  651. size_t *len)
  652. {
  653. s64 status;
  654. enum xp_retval ret;
  655. #if defined CONFIG_X86_64
  656. status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
  657. (u64 *)len);
  658. if (status == BIOS_STATUS_SUCCESS)
  659. ret = xpSuccess;
  660. else if (status == BIOS_STATUS_MORE_PASSES)
  661. ret = xpNeedMoreInfo;
  662. else
  663. ret = xpBiosError;
  664. #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
  665. status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
  666. if (status == SALRET_OK)
  667. ret = xpSuccess;
  668. else if (status == SALRET_MORE_PASSES)
  669. ret = xpNeedMoreInfo;
  670. else
  671. ret = xpSalError;
  672. #else
  673. #error not a supported configuration
  674. #endif
  675. return ret;
  676. }
  677. static int
  678. xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
  679. {
  680. xpc_heartbeat_uv =
  681. &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
  682. rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
  683. rp->sn.uv.activate_gru_mq_desc_gpa =
  684. uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
  685. return 0;
  686. }
  687. static void
  688. xpc_allow_hb_uv(short partid)
  689. {
  690. }
  691. static void
  692. xpc_disallow_hb_uv(short partid)
  693. {
  694. }
  695. static void
  696. xpc_disallow_all_hbs_uv(void)
  697. {
  698. }
  699. static void
  700. xpc_increment_heartbeat_uv(void)
  701. {
  702. xpc_heartbeat_uv->value++;
  703. }
  704. static void
  705. xpc_offline_heartbeat_uv(void)
  706. {
  707. xpc_increment_heartbeat_uv();
  708. xpc_heartbeat_uv->offline = 1;
  709. }
  710. static void
  711. xpc_online_heartbeat_uv(void)
  712. {
  713. xpc_increment_heartbeat_uv();
  714. xpc_heartbeat_uv->offline = 0;
  715. }
  716. static void
  717. xpc_heartbeat_init_uv(void)
  718. {
  719. xpc_heartbeat_uv->value = 1;
  720. xpc_heartbeat_uv->offline = 0;
  721. }
  722. static void
  723. xpc_heartbeat_exit_uv(void)
  724. {
  725. xpc_offline_heartbeat_uv();
  726. }
  727. static enum xp_retval
  728. xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
  729. {
  730. struct xpc_partition_uv *part_uv = &part->sn.uv;
  731. enum xp_retval ret;
  732. ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
  733. part_uv->heartbeat_gpa,
  734. sizeof(struct xpc_heartbeat_uv));
  735. if (ret != xpSuccess)
  736. return ret;
  737. if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
  738. !part_uv->cached_heartbeat.offline) {
  739. ret = xpNoHeartbeat;
  740. } else {
  741. part->last_heartbeat = part_uv->cached_heartbeat.value;
  742. }
  743. return ret;
  744. }
  745. static void
  746. xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
  747. unsigned long remote_rp_gpa, int nasid)
  748. {
  749. short partid = remote_rp->SAL_partid;
  750. struct xpc_partition *part = &xpc_partitions[partid];
  751. struct xpc_activate_mq_msg_activate_req_uv msg;
  752. part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
  753. part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
  754. part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
  755. part->sn.uv.activate_gru_mq_desc_gpa =
  756. remote_rp->sn.uv.activate_gru_mq_desc_gpa;
  757. /*
  758. * ??? Is it a good idea to make this conditional on what is
  759. * ??? potentially stale state information?
  760. */
  761. if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
  762. msg.rp_gpa = uv_gpa(xpc_rsvd_page);
  763. msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
  764. msg.activate_gru_mq_desc_gpa =
  765. xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
  766. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  767. XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
  768. }
  769. if (part->act_state == XPC_P_AS_INACTIVE)
  770. xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
  771. }
  772. static void
  773. xpc_request_partition_reactivation_uv(struct xpc_partition *part)
  774. {
  775. xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
  776. }
  777. static void
  778. xpc_request_partition_deactivation_uv(struct xpc_partition *part)
  779. {
  780. struct xpc_activate_mq_msg_deactivate_req_uv msg;
  781. /*
  782. * ??? Is it a good idea to make this conditional on what is
  783. * ??? potentially stale state information?
  784. */
  785. if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
  786. part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
  787. msg.reason = part->reason;
  788. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  789. XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
  790. }
  791. }
  792. static void
  793. xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
  794. {
  795. /* nothing needs to be done */
  796. return;
  797. }
  798. static void
  799. xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
  800. {
  801. head->first = NULL;
  802. head->last = NULL;
  803. spin_lock_init(&head->lock);
  804. head->n_entries = 0;
  805. }
  806. static void *
  807. xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
  808. {
  809. unsigned long irq_flags;
  810. struct xpc_fifo_entry_uv *first;
  811. spin_lock_irqsave(&head->lock, irq_flags);
  812. first = head->first;
  813. if (head->first != NULL) {
  814. head->first = first->next;
  815. if (head->first == NULL)
  816. head->last = NULL;
  817. head->n_entries--;
  818. BUG_ON(head->n_entries < 0);
  819. first->next = NULL;
  820. }
  821. spin_unlock_irqrestore(&head->lock, irq_flags);
  822. return first;
  823. }
  824. static void
  825. xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
  826. struct xpc_fifo_entry_uv *last)
  827. {
  828. unsigned long irq_flags;
  829. last->next = NULL;
  830. spin_lock_irqsave(&head->lock, irq_flags);
  831. if (head->last != NULL)
  832. head->last->next = last;
  833. else
  834. head->first = last;
  835. head->last = last;
  836. head->n_entries++;
  837. spin_unlock_irqrestore(&head->lock, irq_flags);
  838. }
  839. static int
  840. xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
  841. {
  842. return head->n_entries;
  843. }
  844. /*
  845. * Setup the channel structures that are uv specific.
  846. */
  847. static enum xp_retval
  848. xpc_setup_ch_structures_uv(struct xpc_partition *part)
  849. {
  850. struct xpc_channel_uv *ch_uv;
  851. int ch_number;
  852. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  853. ch_uv = &part->channels[ch_number].sn.uv;
  854. xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
  855. xpc_init_fifo_uv(&ch_uv->recv_msg_list);
  856. }
  857. return xpSuccess;
  858. }
  859. /*
  860. * Teardown the channel structures that are uv specific.
  861. */
  862. static void
  863. xpc_teardown_ch_structures_uv(struct xpc_partition *part)
  864. {
  865. /* nothing needs to be done */
  866. return;
  867. }
  868. static enum xp_retval
  869. xpc_make_first_contact_uv(struct xpc_partition *part)
  870. {
  871. struct xpc_activate_mq_msg_uv msg;
  872. /*
  873. * We send a sync msg to get the remote partition's remote_act_state
  874. * updated to our current act_state which at this point should
  875. * be XPC_P_AS_ACTIVATING.
  876. */
  877. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  878. XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
  879. while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
  880. (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
  881. dev_dbg(xpc_part, "waiting to make first contact with "
  882. "partition %d\n", XPC_PARTID(part));
  883. /* wait a 1/4 of a second or so */
  884. (void)msleep_interruptible(250);
  885. if (part->act_state == XPC_P_AS_DEACTIVATING)
  886. return part->reason;
  887. }
  888. return xpSuccess;
  889. }
  890. static u64
  891. xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
  892. {
  893. unsigned long irq_flags;
  894. union xpc_channel_ctl_flags chctl;
  895. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  896. chctl = part->chctl;
  897. if (chctl.all_flags != 0)
  898. part->chctl.all_flags = 0;
  899. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  900. return chctl.all_flags;
  901. }
  902. static enum xp_retval
  903. xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
  904. {
  905. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  906. struct xpc_send_msg_slot_uv *msg_slot;
  907. unsigned long irq_flags;
  908. int nentries;
  909. int entry;
  910. size_t nbytes;
  911. for (nentries = ch->local_nentries; nentries > 0; nentries--) {
  912. nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
  913. ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
  914. if (ch_uv->send_msg_slots == NULL)
  915. continue;
  916. for (entry = 0; entry < nentries; entry++) {
  917. msg_slot = &ch_uv->send_msg_slots[entry];
  918. msg_slot->msg_slot_number = entry;
  919. xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
  920. &msg_slot->next);
  921. }
  922. spin_lock_irqsave(&ch->lock, irq_flags);
  923. if (nentries < ch->local_nentries)
  924. ch->local_nentries = nentries;
  925. spin_unlock_irqrestore(&ch->lock, irq_flags);
  926. return xpSuccess;
  927. }
  928. return xpNoMemory;
  929. }
  930. static enum xp_retval
  931. xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
  932. {
  933. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  934. struct xpc_notify_mq_msg_uv *msg_slot;
  935. unsigned long irq_flags;
  936. int nentries;
  937. int entry;
  938. size_t nbytes;
  939. for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
  940. nbytes = nentries * ch->entry_size;
  941. ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
  942. if (ch_uv->recv_msg_slots == NULL)
  943. continue;
  944. for (entry = 0; entry < nentries; entry++) {
  945. msg_slot = ch_uv->recv_msg_slots +
  946. entry * ch->entry_size;
  947. msg_slot->hdr.msg_slot_number = entry;
  948. }
  949. spin_lock_irqsave(&ch->lock, irq_flags);
  950. if (nentries < ch->remote_nentries)
  951. ch->remote_nentries = nentries;
  952. spin_unlock_irqrestore(&ch->lock, irq_flags);
  953. return xpSuccess;
  954. }
  955. return xpNoMemory;
  956. }
  957. /*
  958. * Allocate msg_slots associated with the channel.
  959. */
  960. static enum xp_retval
  961. xpc_setup_msg_structures_uv(struct xpc_channel *ch)
  962. {
  963. static enum xp_retval ret;
  964. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  965. DBUG_ON(ch->flags & XPC_C_SETUP);
  966. ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
  967. gru_message_queue_desc),
  968. GFP_KERNEL);
  969. if (ch_uv->cached_notify_gru_mq_desc == NULL)
  970. return xpNoMemory;
  971. ret = xpc_allocate_send_msg_slot_uv(ch);
  972. if (ret == xpSuccess) {
  973. ret = xpc_allocate_recv_msg_slot_uv(ch);
  974. if (ret != xpSuccess) {
  975. kfree(ch_uv->send_msg_slots);
  976. xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
  977. }
  978. }
  979. return ret;
  980. }
  981. /*
  982. * Free up msg_slots and clear other stuff that were setup for the specified
  983. * channel.
  984. */
  985. static void
  986. xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
  987. {
  988. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  989. DBUG_ON(!spin_is_locked(&ch->lock));
  990. kfree(ch_uv->cached_notify_gru_mq_desc);
  991. ch_uv->cached_notify_gru_mq_desc = NULL;
  992. if (ch->flags & XPC_C_SETUP) {
  993. xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
  994. kfree(ch_uv->send_msg_slots);
  995. xpc_init_fifo_uv(&ch_uv->recv_msg_list);
  996. kfree(ch_uv->recv_msg_slots);
  997. }
  998. }
  999. static void
  1000. xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1001. {
  1002. struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
  1003. msg.ch_number = ch->number;
  1004. msg.reason = ch->reason;
  1005. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1006. XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
  1007. }
  1008. static void
  1009. xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1010. {
  1011. struct xpc_activate_mq_msg_chctl_closereply_uv msg;
  1012. msg.ch_number = ch->number;
  1013. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1014. XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
  1015. }
  1016. static void
  1017. xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1018. {
  1019. struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
  1020. msg.ch_number = ch->number;
  1021. msg.entry_size = ch->entry_size;
  1022. msg.local_nentries = ch->local_nentries;
  1023. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1024. XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
  1025. }
  1026. static void
  1027. xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1028. {
  1029. struct xpc_activate_mq_msg_chctl_openreply_uv msg;
  1030. msg.ch_number = ch->number;
  1031. msg.local_nentries = ch->local_nentries;
  1032. msg.remote_nentries = ch->remote_nentries;
  1033. msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
  1034. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1035. XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
  1036. }
  1037. static void
  1038. xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
  1039. {
  1040. struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
  1041. msg.ch_number = ch->number;
  1042. xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
  1043. XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
  1044. }
  1045. static void
  1046. xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
  1047. {
  1048. unsigned long irq_flags;
  1049. spin_lock_irqsave(&part->chctl_lock, irq_flags);
  1050. part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
  1051. spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
  1052. xpc_wakeup_channel_mgr(part);
  1053. }
  1054. static enum xp_retval
  1055. xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
  1056. unsigned long gru_mq_desc_gpa)
  1057. {
  1058. struct xpc_channel_uv *ch_uv = &ch->sn.uv;
  1059. DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
  1060. return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
  1061. gru_mq_desc_gpa);
  1062. }
  1063. static void
  1064. xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
  1065. {
  1066. struct xpc_activate_mq_msg_uv msg;
  1067. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  1068. XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
  1069. }
  1070. static void
  1071. xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
  1072. {
  1073. struct xpc_activate_mq_msg_uv msg;
  1074. xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
  1075. XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
  1076. }
  1077. static void
  1078. xpc_assume_partition_disengaged_uv(short partid)
  1079. {
  1080. struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
  1081. unsigned long irq_flags;
  1082. spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
  1083. part_uv->flags &= ~XPC_P_ENGAGED_UV;
  1084. spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
  1085. }
  1086. static int
  1087. xpc_partition_engaged_uv(short partid)
  1088. {
  1089. return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
  1090. }
  1091. static int
  1092. xpc_any_partition_engaged_uv(void)
  1093. {
  1094. struct xpc_partition_uv *part_uv;
  1095. short partid;
  1096. for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
  1097. part_uv = &xpc_partitions[partid].sn.uv;
  1098. if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
  1099. return 1;
  1100. }
  1101. return 0;
  1102. }
  1103. static enum xp_retval
  1104. xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
  1105. struct xpc_send_msg_slot_uv **address_of_msg_slot)
  1106. {
  1107. enum xp_retval ret;
  1108. struct xpc_send_msg_slot_uv *msg_slot;
  1109. struct xpc_fifo_entry_uv *entry;
  1110. while (1) {
  1111. entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
  1112. if (entry != NULL)
  1113. break;
  1114. if (flags & XPC_NOWAIT)
  1115. return xpNoWait;
  1116. ret = xpc_allocate_msg_wait(ch);
  1117. if (ret != xpInterrupted && ret != xpTimeout)
  1118. return ret;
  1119. }
  1120. msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
  1121. *address_of_msg_slot = msg_slot;
  1122. return xpSuccess;
  1123. }
  1124. static void
  1125. xpc_free_msg_slot_uv(struct xpc_channel *ch,
  1126. struct xpc_send_msg_slot_uv *msg_slot)
  1127. {
  1128. xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
  1129. /* wakeup anyone waiting for a free msg slot */
  1130. if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
  1131. wake_up(&ch->msg_allocate_wq);
  1132. }
  1133. static void
  1134. xpc_notify_sender_uv(struct xpc_channel *ch,
  1135. struct xpc_send_msg_slot_uv *msg_slot,
  1136. enum xp_retval reason)
  1137. {
  1138. xpc_notify_func func = msg_slot->func;
  1139. if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
  1140. atomic_dec(&ch->n_to_notify);
  1141. dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
  1142. "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
  1143. msg_slot->msg_slot_number, ch->partid, ch->number);
  1144. func(reason, ch->partid, ch->number, msg_slot->key);
  1145. dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
  1146. "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
  1147. msg_slot->msg_slot_number, ch->partid, ch->number);
  1148. }
  1149. }
  1150. static void
  1151. xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
  1152. struct xpc_notify_mq_msg_uv *msg)
  1153. {
  1154. struct xpc_send_msg_slot_uv *msg_slot;
  1155. int entry = msg->hdr.msg_slot_number % ch->local_nentries;
  1156. msg_slot = &ch->sn.uv.send_msg_slots[entry];
  1157. BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
  1158. msg_slot->msg_slot_number += ch->local_nentries;
  1159. if (msg_slot->func != NULL)
  1160. xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
  1161. xpc_free_msg_slot_uv(ch, msg_slot);
  1162. }
  1163. static void
  1164. xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
  1165. struct xpc_notify_mq_msg_uv *msg)
  1166. {
  1167. struct xpc_partition_uv *part_uv = &part->sn.uv;
  1168. struct xpc_channel *ch;
  1169. struct xpc_channel_uv *ch_uv;
  1170. struct xpc_notify_mq_msg_uv *msg_slot;
  1171. unsigned long irq_flags;
  1172. int ch_number = msg->hdr.ch_number;
  1173. if (unlikely(ch_number >= part->nchannels)) {
  1174. dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
  1175. "channel number=0x%x in message from partid=%d\n",
  1176. ch_number, XPC_PARTID(part));
  1177. /* get hb checker to deactivate from the remote partition */
  1178. spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  1179. if (part_uv->act_state_req == 0)
  1180. xpc_activate_IRQ_rcvd++;
  1181. part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
  1182. part_uv->reason = xpBadChannelNumber;
  1183. spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
  1184. wake_up_interruptible(&xpc_activate_IRQ_wq);
  1185. return;
  1186. }
  1187. ch = &part->channels[ch_number];
  1188. xpc_msgqueue_ref(ch);
  1189. if (!(ch->flags & XPC_C_CONNECTED)) {
  1190. xpc_msgqueue_deref(ch);
  1191. return;
  1192. }
  1193. /* see if we're really dealing with an ACK for a previously sent msg */
  1194. if (msg->hdr.size == 0) {
  1195. xpc_handle_notify_mq_ack_uv(ch, msg);
  1196. xpc_msgqueue_deref(ch);
  1197. return;
  1198. }
  1199. /* we're dealing with a normal message sent via the notify_mq */
  1200. ch_uv = &ch->sn.uv;
  1201. msg_slot = ch_uv->recv_msg_slots +
  1202. (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
  1203. BUG_ON(msg_slot->hdr.size != 0);
  1204. memcpy(msg_slot, msg, msg->hdr.size);
  1205. xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
  1206. if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
  1207. /*
  1208. * If there is an existing idle kthread get it to deliver
  1209. * the payload, otherwise we'll have to get the channel mgr
  1210. * for this partition to create a kthread to do the delivery.
  1211. */
  1212. if (atomic_read(&ch->kthreads_idle) > 0)
  1213. wake_up_nr(&ch->idle_wq, 1);
  1214. else
  1215. xpc_send_chctl_local_msgrequest_uv(part, ch->number);
  1216. }
  1217. xpc_msgqueue_deref(ch);
  1218. }
  1219. static irqreturn_t
  1220. xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
  1221. {
  1222. struct xpc_notify_mq_msg_uv *msg;
  1223. short partid;
  1224. struct xpc_partition *part;
  1225. while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
  1226. NULL) {
  1227. partid = msg->hdr.partid;
  1228. if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
  1229. dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
  1230. "invalid partid=0x%x in message\n", partid);
  1231. } else {
  1232. part = &xpc_partitions[partid];
  1233. if (xpc_part_ref(part)) {
  1234. xpc_handle_notify_mq_msg_uv(part, msg);
  1235. xpc_part_deref(part);
  1236. }
  1237. }
  1238. gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
  1239. }
  1240. return IRQ_HANDLED;
  1241. }
  1242. static int
  1243. xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
  1244. {
  1245. return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
  1246. }
  1247. static void
  1248. xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
  1249. {
  1250. struct xpc_channel *ch = &part->channels[ch_number];
  1251. int ndeliverable_payloads;
  1252. xpc_msgqueue_ref(ch);
  1253. ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
  1254. if (ndeliverable_payloads > 0 &&
  1255. (ch->flags & XPC_C_CONNECTED) &&
  1256. (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
  1257. xpc_activate_kthreads(ch, ndeliverable_payloads);
  1258. }
  1259. xpc_msgqueue_deref(ch);
  1260. }
  1261. static enum xp_retval
  1262. xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
  1263. u16 payload_size, u8 notify_type, xpc_notify_func func,
  1264. void *key)
  1265. {
  1266. enum xp_retval ret = xpSuccess;
  1267. struct xpc_send_msg_slot_uv *msg_slot = NULL;
  1268. struct xpc_notify_mq_msg_uv *msg;
  1269. u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
  1270. size_t msg_size;
  1271. DBUG_ON(notify_type != XPC_N_CALL);
  1272. msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
  1273. if (msg_size > ch->entry_size)
  1274. return xpPayloadTooBig;
  1275. xpc_msgqueue_ref(ch);
  1276. if (ch->flags & XPC_C_DISCONNECTING) {
  1277. ret = ch->reason;
  1278. goto out_1;
  1279. }
  1280. if (!(ch->flags & XPC_C_CONNECTED)) {
  1281. ret = xpNotConnected;
  1282. goto out_1;
  1283. }
  1284. ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
  1285. if (ret != xpSuccess)
  1286. goto out_1;
  1287. if (func != NULL) {
  1288. atomic_inc(&ch->n_to_notify);
  1289. msg_slot->key = key;
  1290. smp_wmb(); /* a non-NULL func must hit memory after the key */
  1291. msg_slot->func = func;
  1292. if (ch->flags & XPC_C_DISCONNECTING) {
  1293. ret = ch->reason;
  1294. goto out_2;
  1295. }
  1296. }
  1297. msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
  1298. msg->hdr.partid = xp_partition_id;
  1299. msg->hdr.ch_number = ch->number;
  1300. msg->hdr.size = msg_size;
  1301. msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
  1302. memcpy(&msg->payload, payload, payload_size);
  1303. ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
  1304. msg_size);
  1305. if (ret == xpSuccess)
  1306. goto out_1;
  1307. XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
  1308. out_2:
  1309. if (func != NULL) {
  1310. /*
  1311. * Try to NULL the msg_slot's func field. If we fail, then
  1312. * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
  1313. * case we need to pretend we succeeded to send the message
  1314. * since the user will get a callout for the disconnect error
  1315. * by xpc_notify_senders_of_disconnect_uv(), and to also get an
  1316. * error returned here will confuse them. Additionally, since
  1317. * in this case the channel is being disconnected we don't need
  1318. * to put the the msg_slot back on the free list.
  1319. */
  1320. if (cmpxchg(&msg_slot->func, func, NULL) != func) {
  1321. ret = xpSuccess;
  1322. goto out_1;
  1323. }
  1324. msg_slot->key = NULL;
  1325. atomic_dec(&ch->n_to_notify);
  1326. }
  1327. xpc_free_msg_slot_uv(ch, msg_slot);
  1328. out_1:
  1329. xpc_msgqueue_deref(ch);
  1330. return ret;
  1331. }
  1332. /*
  1333. * Tell the callers of xpc_send_notify() that the status of their payloads
  1334. * is unknown because the channel is now disconnecting.
  1335. *
  1336. * We don't worry about putting these msg_slots on the free list since the
  1337. * msg_slots themselves are about to be kfree'd.
  1338. */
  1339. static void
  1340. xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
  1341. {
  1342. struct xpc_send_msg_slot_uv *msg_slot;
  1343. int entry;
  1344. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
  1345. for (entry = 0; entry < ch->local_nentries; entry++) {
  1346. if (atomic_read(&ch->n_to_notify) == 0)
  1347. break;
  1348. msg_slot = &ch->sn.uv.send_msg_slots[entry];
  1349. if (msg_slot->func != NULL)
  1350. xpc_notify_sender_uv(ch, msg_slot, ch->reason);
  1351. }
  1352. }
  1353. /*
  1354. * Get the next deliverable message's payload.
  1355. */
  1356. static void *
  1357. xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
  1358. {
  1359. struct xpc_fifo_entry_uv *entry;
  1360. struct xpc_notify_mq_msg_uv *msg;
  1361. void *payload = NULL;
  1362. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  1363. entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
  1364. if (entry != NULL) {
  1365. msg = container_of(entry, struct xpc_notify_mq_msg_uv,
  1366. hdr.u.next);
  1367. payload = &msg->payload;
  1368. }
  1369. }
  1370. return payload;
  1371. }
  1372. static void
  1373. xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
  1374. {
  1375. struct xpc_notify_mq_msg_uv *msg;
  1376. enum xp_retval ret;
  1377. msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
  1378. /* return an ACK to the sender of this message */
  1379. msg->hdr.partid = xp_partition_id;
  1380. msg->hdr.size = 0; /* size of zero indicates this is an ACK */
  1381. ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
  1382. sizeof(struct xpc_notify_mq_msghdr_uv));
  1383. if (ret != xpSuccess)
  1384. XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
  1385. }
  1386. static struct xpc_arch_operations xpc_arch_ops_uv = {
  1387. .setup_partitions = xpc_setup_partitions_uv,
  1388. .teardown_partitions = xpc_teardown_partitions_uv,
  1389. .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
  1390. .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
  1391. .setup_rsvd_page = xpc_setup_rsvd_page_uv,
  1392. .allow_hb = xpc_allow_hb_uv,
  1393. .disallow_hb = xpc_disallow_hb_uv,
  1394. .disallow_all_hbs = xpc_disallow_all_hbs_uv,
  1395. .increment_heartbeat = xpc_increment_heartbeat_uv,
  1396. .offline_heartbeat = xpc_offline_heartbeat_uv,
  1397. .online_heartbeat = xpc_online_heartbeat_uv,
  1398. .heartbeat_init = xpc_heartbeat_init_uv,
  1399. .heartbeat_exit = xpc_heartbeat_exit_uv,
  1400. .get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
  1401. .request_partition_activation =
  1402. xpc_request_partition_activation_uv,
  1403. .request_partition_reactivation =
  1404. xpc_request_partition_reactivation_uv,
  1405. .request_partition_deactivation =
  1406. xpc_request_partition_deactivation_uv,
  1407. .cancel_partition_deactivation_request =
  1408. xpc_cancel_partition_deactivation_request_uv,
  1409. .setup_ch_structures = xpc_setup_ch_structures_uv,
  1410. .teardown_ch_structures = xpc_teardown_ch_structures_uv,
  1411. .make_first_contact = xpc_make_first_contact_uv,
  1412. .get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
  1413. .send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
  1414. .send_chctl_closereply = xpc_send_chctl_closereply_uv,
  1415. .send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
  1416. .send_chctl_openreply = xpc_send_chctl_openreply_uv,
  1417. .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
  1418. .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
  1419. .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
  1420. .setup_msg_structures = xpc_setup_msg_structures_uv,
  1421. .teardown_msg_structures = xpc_teardown_msg_structures_uv,
  1422. .indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
  1423. .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
  1424. .assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
  1425. .partition_engaged = xpc_partition_engaged_uv,
  1426. .any_partition_engaged = xpc_any_partition_engaged_uv,
  1427. .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
  1428. .send_payload = xpc_send_payload_uv,
  1429. .get_deliverable_payload = xpc_get_deliverable_payload_uv,
  1430. .received_payload = xpc_received_payload_uv,
  1431. .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
  1432. };
  1433. static int
  1434. xpc_init_mq_node(int nid)
  1435. {
  1436. int cpu;
  1437. get_online_cpus();
  1438. for_each_cpu(cpu, cpumask_of_node(nid)) {
  1439. xpc_activate_mq_uv =
  1440. xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
  1441. XPC_ACTIVATE_IRQ_NAME,
  1442. xpc_handle_activate_IRQ_uv);
  1443. if (!IS_ERR(xpc_activate_mq_uv))
  1444. break;
  1445. }
  1446. if (IS_ERR(xpc_activate_mq_uv)) {
  1447. put_online_cpus();
  1448. return PTR_ERR(xpc_activate_mq_uv);
  1449. }
  1450. for_each_cpu(cpu, cpumask_of_node(nid)) {
  1451. xpc_notify_mq_uv =
  1452. xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
  1453. XPC_NOTIFY_IRQ_NAME,
  1454. xpc_handle_notify_IRQ_uv);
  1455. if (!IS_ERR(xpc_notify_mq_uv))
  1456. break;
  1457. }
  1458. if (IS_ERR(xpc_notify_mq_uv)) {
  1459. xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
  1460. put_online_cpus();
  1461. return PTR_ERR(xpc_notify_mq_uv);
  1462. }
  1463. put_online_cpus();
  1464. return 0;
  1465. }
  1466. int
  1467. xpc_init_uv(void)
  1468. {
  1469. int nid;
  1470. int ret = 0;
  1471. xpc_arch_ops = xpc_arch_ops_uv;
  1472. if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
  1473. dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
  1474. XPC_MSG_HDR_MAX_SIZE);
  1475. return -E2BIG;
  1476. }
  1477. if (xpc_mq_node < 0)
  1478. for_each_online_node(nid) {
  1479. ret = xpc_init_mq_node(nid);
  1480. if (!ret)
  1481. break;
  1482. }
  1483. else
  1484. ret = xpc_init_mq_node(xpc_mq_node);
  1485. if (ret < 0)
  1486. dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
  1487. -ret);
  1488. return ret;
  1489. }
  1490. void
  1491. xpc_exit_uv(void)
  1492. {
  1493. xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
  1494. xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
  1495. }
  1496. module_param(xpc_mq_node, int, 0);
  1497. MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");