eq.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574
  1. /*
  2. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/interrupt.h>
  34. #include <linux/slab.h>
  35. #include <linux/export.h>
  36. #include <linux/mm.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/mlx4/cmd.h>
  39. #include <linux/cpu_rmap.h>
  40. #include "mlx4.h"
  41. #include "fw.h"
  42. enum {
  43. MLX4_IRQNAME_SIZE = 32
  44. };
  45. enum {
  46. MLX4_NUM_ASYNC_EQE = 0x100,
  47. MLX4_NUM_SPARE_EQE = 0x80,
  48. MLX4_EQ_ENTRY_SIZE = 0x20
  49. };
  50. #define MLX4_EQ_STATUS_OK ( 0 << 28)
  51. #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
  52. #define MLX4_EQ_OWNER_SW ( 0 << 24)
  53. #define MLX4_EQ_OWNER_HW ( 1 << 24)
  54. #define MLX4_EQ_FLAG_EC ( 1 << 18)
  55. #define MLX4_EQ_FLAG_OI ( 1 << 17)
  56. #define MLX4_EQ_STATE_ARMED ( 9 << 8)
  57. #define MLX4_EQ_STATE_FIRED (10 << 8)
  58. #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
  59. #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
  60. (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
  61. (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
  62. (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
  63. (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
  64. (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
  65. (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
  66. (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
  67. (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
  68. (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
  69. (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
  70. (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
  71. (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
  72. (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
  73. (1ull << MLX4_EVENT_TYPE_CMD) | \
  74. (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \
  75. (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
  76. (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
  77. (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
  78. static u64 get_async_ev_mask(struct mlx4_dev *dev)
  79. {
  80. u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
  81. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
  82. async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
  83. if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
  84. async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT);
  85. return async_ev_mask;
  86. }
  87. static void eq_set_ci(struct mlx4_eq *eq, int req_not)
  88. {
  89. __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
  90. req_not << 31),
  91. eq->doorbell);
  92. /* We still want ordering, just not swabbing, so add a barrier */
  93. mb();
  94. }
  95. static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor,
  96. u8 eqe_size)
  97. {
  98. /* (entry & (eq->nent - 1)) gives us a cyclic array */
  99. unsigned long offset = (entry & (eq->nent - 1)) * eqe_size;
  100. /* CX3 is capable of extending the EQE from 32 to 64 bytes with
  101. * strides of 64B,128B and 256B.
  102. * When 64B EQE is used, the first (in the lower addresses)
  103. * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
  104. * contain the legacy EQE information.
  105. * In all other cases, the first 32B contains the legacy EQE info.
  106. */
  107. return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
  108. }
  109. static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size)
  110. {
  111. struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size);
  112. return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
  113. }
  114. static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
  115. {
  116. struct mlx4_eqe *eqe =
  117. &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
  118. return (!!(eqe->owner & 0x80) ^
  119. !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
  120. eqe : NULL;
  121. }
  122. void mlx4_gen_slave_eqe(struct work_struct *work)
  123. {
  124. struct mlx4_mfunc_master_ctx *master =
  125. container_of(work, struct mlx4_mfunc_master_ctx,
  126. slave_event_work);
  127. struct mlx4_mfunc *mfunc =
  128. container_of(master, struct mlx4_mfunc, master);
  129. struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
  130. struct mlx4_dev *dev = &priv->dev;
  131. struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
  132. struct mlx4_eqe *eqe;
  133. u8 slave;
  134. int i, phys_port, slave_port;
  135. for (eqe = next_slave_event_eqe(slave_eq); eqe;
  136. eqe = next_slave_event_eqe(slave_eq)) {
  137. slave = eqe->slave_id;
  138. if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE &&
  139. eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN &&
  140. mlx4_is_bonded(dev)) {
  141. struct mlx4_port_cap port_cap;
  142. if (!mlx4_QUERY_PORT(dev, 1, &port_cap) && port_cap.link_state)
  143. goto consume;
  144. if (!mlx4_QUERY_PORT(dev, 2, &port_cap) && port_cap.link_state)
  145. goto consume;
  146. }
  147. /* All active slaves need to receive the event */
  148. if (slave == ALL_SLAVES) {
  149. for (i = 0; i <= dev->persist->num_vfs; i++) {
  150. phys_port = 0;
  151. if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT &&
  152. eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) {
  153. phys_port = eqe->event.port_mgmt_change.port;
  154. slave_port = mlx4_phys_to_slave_port(dev, i, phys_port);
  155. if (slave_port < 0) /* VF doesn't have this port */
  156. continue;
  157. eqe->event.port_mgmt_change.port = slave_port;
  158. }
  159. if (mlx4_GEN_EQE(dev, i, eqe))
  160. mlx4_warn(dev, "Failed to generate event for slave %d\n",
  161. i);
  162. if (phys_port)
  163. eqe->event.port_mgmt_change.port = phys_port;
  164. }
  165. } else {
  166. if (mlx4_GEN_EQE(dev, slave, eqe))
  167. mlx4_warn(dev, "Failed to generate event for slave %d\n",
  168. slave);
  169. }
  170. consume:
  171. ++slave_eq->cons;
  172. }
  173. }
  174. static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
  175. {
  176. struct mlx4_priv *priv = mlx4_priv(dev);
  177. struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
  178. struct mlx4_eqe *s_eqe;
  179. unsigned long flags;
  180. spin_lock_irqsave(&slave_eq->event_lock, flags);
  181. s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
  182. if ((!!(s_eqe->owner & 0x80)) ^
  183. (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
  184. mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
  185. slave);
  186. spin_unlock_irqrestore(&slave_eq->event_lock, flags);
  187. return;
  188. }
  189. memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
  190. s_eqe->slave_id = slave;
  191. /* ensure all information is written before setting the ownersip bit */
  192. dma_wmb();
  193. s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
  194. ++slave_eq->prod;
  195. queue_work(priv->mfunc.master.comm_wq,
  196. &priv->mfunc.master.slave_event_work);
  197. spin_unlock_irqrestore(&slave_eq->event_lock, flags);
  198. }
  199. static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
  200. struct mlx4_eqe *eqe)
  201. {
  202. struct mlx4_priv *priv = mlx4_priv(dev);
  203. if (slave < 0 || slave > dev->persist->num_vfs ||
  204. slave == dev->caps.function ||
  205. !priv->mfunc.master.slave_state[slave].active)
  206. return;
  207. slave_event(dev, slave, eqe);
  208. }
  209. #if defined(CONFIG_SMP)
  210. static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
  211. {
  212. int hint_err;
  213. struct mlx4_dev *dev = &priv->dev;
  214. struct mlx4_eq *eq = &priv->eq_table.eq[vec];
  215. if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
  216. return;
  217. hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
  218. if (hint_err)
  219. mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
  220. }
  221. #endif
  222. int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
  223. {
  224. struct mlx4_eqe eqe;
  225. struct mlx4_priv *priv = mlx4_priv(dev);
  226. struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave];
  227. if (!s_slave->active)
  228. return 0;
  229. memset(&eqe, 0, sizeof eqe);
  230. eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
  231. eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
  232. eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
  233. return mlx4_GEN_EQE(dev, slave, &eqe);
  234. }
  235. EXPORT_SYMBOL(mlx4_gen_pkey_eqe);
  236. int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
  237. {
  238. struct mlx4_eqe eqe;
  239. /*don't send if we don't have the that slave */
  240. if (dev->persist->num_vfs < slave)
  241. return 0;
  242. memset(&eqe, 0, sizeof eqe);
  243. eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
  244. eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
  245. eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
  246. return mlx4_GEN_EQE(dev, slave, &eqe);
  247. }
  248. EXPORT_SYMBOL(mlx4_gen_guid_change_eqe);
  249. int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
  250. u8 port_subtype_change)
  251. {
  252. struct mlx4_eqe eqe;
  253. u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port);
  254. /*don't send if we don't have the that slave */
  255. if (dev->persist->num_vfs < slave)
  256. return 0;
  257. memset(&eqe, 0, sizeof eqe);
  258. eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
  259. eqe.subtype = port_subtype_change;
  260. eqe.event.port_change.port = cpu_to_be32(slave_port << 28);
  261. mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
  262. port_subtype_change, slave, port);
  263. return mlx4_GEN_EQE(dev, slave, &eqe);
  264. }
  265. EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe);
  266. enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port)
  267. {
  268. struct mlx4_priv *priv = mlx4_priv(dev);
  269. struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
  270. struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
  271. if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
  272. port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
  273. pr_err("%s: Error: asking for slave:%d, port:%d\n",
  274. __func__, slave, port);
  275. return SLAVE_PORT_DOWN;
  276. }
  277. return s_state[slave].port_state[port];
  278. }
  279. EXPORT_SYMBOL(mlx4_get_slave_port_state);
  280. static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
  281. enum slave_port_state state)
  282. {
  283. struct mlx4_priv *priv = mlx4_priv(dev);
  284. struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
  285. struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
  286. if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
  287. port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
  288. pr_err("%s: Error: asking for slave:%d, port:%d\n",
  289. __func__, slave, port);
  290. return -1;
  291. }
  292. s_state[slave].port_state[port] = state;
  293. return 0;
  294. }
  295. static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
  296. {
  297. int i;
  298. enum slave_port_gen_event gen_event;
  299. struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
  300. port);
  301. for (i = 0; i < dev->persist->num_vfs + 1; i++)
  302. if (test_bit(i, slaves_pport.slaves))
  303. set_and_calc_slave_port_state(dev, i, port,
  304. event, &gen_event);
  305. }
  306. /**************************************************************************
  307. The function get as input the new event to that port,
  308. and according to the prev state change the slave's port state.
  309. The events are:
  310. MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
  311. MLX4_PORT_STATE_DEV_EVENT_PORT_UP
  312. MLX4_PORT_STATE_IB_EVENT_GID_VALID
  313. MLX4_PORT_STATE_IB_EVENT_GID_INVALID
  314. ***************************************************************************/
  315. int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
  316. u8 port, int event,
  317. enum slave_port_gen_event *gen_event)
  318. {
  319. struct mlx4_priv *priv = mlx4_priv(dev);
  320. struct mlx4_slave_state *ctx = NULL;
  321. unsigned long flags;
  322. int ret = -1;
  323. struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
  324. enum slave_port_state cur_state =
  325. mlx4_get_slave_port_state(dev, slave, port);
  326. *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
  327. if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
  328. port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
  329. pr_err("%s: Error: asking for slave:%d, port:%d\n",
  330. __func__, slave, port);
  331. return ret;
  332. }
  333. ctx = &priv->mfunc.master.slave_state[slave];
  334. spin_lock_irqsave(&ctx->lock, flags);
  335. switch (cur_state) {
  336. case SLAVE_PORT_DOWN:
  337. if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
  338. mlx4_set_slave_port_state(dev, slave, port,
  339. SLAVE_PENDING_UP);
  340. break;
  341. case SLAVE_PENDING_UP:
  342. if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event)
  343. mlx4_set_slave_port_state(dev, slave, port,
  344. SLAVE_PORT_DOWN);
  345. else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) {
  346. mlx4_set_slave_port_state(dev, slave, port,
  347. SLAVE_PORT_UP);
  348. *gen_event = SLAVE_PORT_GEN_EVENT_UP;
  349. }
  350. break;
  351. case SLAVE_PORT_UP:
  352. if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) {
  353. mlx4_set_slave_port_state(dev, slave, port,
  354. SLAVE_PORT_DOWN);
  355. *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
  356. } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID ==
  357. event) {
  358. mlx4_set_slave_port_state(dev, slave, port,
  359. SLAVE_PENDING_UP);
  360. *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
  361. }
  362. break;
  363. default:
  364. pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
  365. __func__, slave, port);
  366. goto out;
  367. }
  368. ret = mlx4_get_slave_port_state(dev, slave, port);
  369. out:
  370. spin_unlock_irqrestore(&ctx->lock, flags);
  371. return ret;
  372. }
  373. EXPORT_SYMBOL(set_and_calc_slave_port_state);
  374. int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr)
  375. {
  376. struct mlx4_eqe eqe;
  377. memset(&eqe, 0, sizeof eqe);
  378. eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
  379. eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO;
  380. eqe.event.port_mgmt_change.port = port;
  381. eqe.event.port_mgmt_change.params.port_info.changed_attr =
  382. cpu_to_be32((u32) attr);
  383. slave_event(dev, ALL_SLAVES, &eqe);
  384. return 0;
  385. }
  386. EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev);
  387. void mlx4_master_handle_slave_flr(struct work_struct *work)
  388. {
  389. struct mlx4_mfunc_master_ctx *master =
  390. container_of(work, struct mlx4_mfunc_master_ctx,
  391. slave_flr_event_work);
  392. struct mlx4_mfunc *mfunc =
  393. container_of(master, struct mlx4_mfunc, master);
  394. struct mlx4_priv *priv =
  395. container_of(mfunc, struct mlx4_priv, mfunc);
  396. struct mlx4_dev *dev = &priv->dev;
  397. struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
  398. int i;
  399. int err;
  400. unsigned long flags;
  401. mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
  402. for (i = 0 ; i < dev->num_slaves; i++) {
  403. if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
  404. mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
  405. i);
  406. /* In case of 'Reset flow' FLR can be generated for
  407. * a slave before mlx4_load_one is done.
  408. * make sure interface is up before trying to delete
  409. * slave resources which weren't allocated yet.
  410. */
  411. if (dev->persist->interface_state &
  412. MLX4_INTERFACE_STATE_UP)
  413. mlx4_delete_all_resources_for_slave(dev, i);
  414. /*return the slave to running mode*/
  415. spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
  416. slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
  417. slave_state[i].is_slave_going_down = 0;
  418. spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
  419. /*notify the FW:*/
  420. err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
  421. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  422. if (err)
  423. mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
  424. i);
  425. }
  426. }
  427. }
  428. static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
  429. {
  430. struct mlx4_priv *priv = mlx4_priv(dev);
  431. struct mlx4_eqe *eqe;
  432. int cqn = -1;
  433. int eqes_found = 0;
  434. int set_ci = 0;
  435. int port;
  436. int slave = 0;
  437. int ret;
  438. u32 flr_slave;
  439. u8 update_slave_state;
  440. int i;
  441. enum slave_port_gen_event gen_event;
  442. unsigned long flags;
  443. struct mlx4_vport_state *s_info;
  444. int eqe_size = dev->caps.eqe_size;
  445. while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) {
  446. /*
  447. * Make sure we read EQ entry contents after we've
  448. * checked the ownership bit.
  449. */
  450. dma_rmb();
  451. switch (eqe->type) {
  452. case MLX4_EVENT_TYPE_COMP:
  453. cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
  454. mlx4_cq_completion(dev, cqn);
  455. break;
  456. case MLX4_EVENT_TYPE_PATH_MIG:
  457. case MLX4_EVENT_TYPE_COMM_EST:
  458. case MLX4_EVENT_TYPE_SQ_DRAINED:
  459. case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
  460. case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
  461. case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
  462. case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
  463. case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
  464. mlx4_dbg(dev, "event %d arrived\n", eqe->type);
  465. if (mlx4_is_master(dev)) {
  466. /* forward only to slave owning the QP */
  467. ret = mlx4_get_slave_from_resource_id(dev,
  468. RES_QP,
  469. be32_to_cpu(eqe->event.qp.qpn)
  470. & 0xffffff, &slave);
  471. if (ret && ret != -ENOENT) {
  472. mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
  473. eqe->type, eqe->subtype,
  474. eq->eqn, eq->cons_index, ret);
  475. break;
  476. }
  477. if (!ret && slave != dev->caps.function) {
  478. mlx4_slave_event(dev, slave, eqe);
  479. break;
  480. }
  481. }
  482. mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
  483. 0xffffff, eqe->type);
  484. break;
  485. case MLX4_EVENT_TYPE_SRQ_LIMIT:
  486. mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
  487. __func__, be32_to_cpu(eqe->event.srq.srqn),
  488. eq->eqn);
  489. case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
  490. if (mlx4_is_master(dev)) {
  491. /* forward only to slave owning the SRQ */
  492. ret = mlx4_get_slave_from_resource_id(dev,
  493. RES_SRQ,
  494. be32_to_cpu(eqe->event.srq.srqn)
  495. & 0xffffff,
  496. &slave);
  497. if (ret && ret != -ENOENT) {
  498. mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
  499. eqe->type, eqe->subtype,
  500. eq->eqn, eq->cons_index, ret);
  501. break;
  502. }
  503. if (eqe->type ==
  504. MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
  505. mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
  506. __func__, slave,
  507. be32_to_cpu(eqe->event.srq.srqn),
  508. eqe->type, eqe->subtype);
  509. if (!ret && slave != dev->caps.function) {
  510. if (eqe->type ==
  511. MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
  512. mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
  513. __func__, eqe->type,
  514. eqe->subtype, slave);
  515. mlx4_slave_event(dev, slave, eqe);
  516. break;
  517. }
  518. }
  519. mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
  520. 0xffffff, eqe->type);
  521. break;
  522. case MLX4_EVENT_TYPE_CMD:
  523. mlx4_cmd_event(dev,
  524. be16_to_cpu(eqe->event.cmd.token),
  525. eqe->event.cmd.status,
  526. be64_to_cpu(eqe->event.cmd.out_param));
  527. break;
  528. case MLX4_EVENT_TYPE_PORT_CHANGE: {
  529. struct mlx4_slaves_pport slaves_port;
  530. port = be32_to_cpu(eqe->event.port_change.port) >> 28;
  531. slaves_port = mlx4_phys_to_slaves_pport(dev, port);
  532. if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
  533. mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
  534. port);
  535. mlx4_priv(dev)->sense.do_sense_port[port] = 1;
  536. if (!mlx4_is_master(dev))
  537. break;
  538. for (i = 0; i < dev->persist->num_vfs + 1;
  539. i++) {
  540. int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port);
  541. if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev))
  542. continue;
  543. if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
  544. if (i == mlx4_master_func_num(dev))
  545. continue;
  546. mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
  547. __func__, i, port);
  548. s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
  549. if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
  550. eqe->event.port_change.port =
  551. cpu_to_be32(
  552. (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
  553. | (reported_port << 28));
  554. mlx4_slave_event(dev, i, eqe);
  555. }
  556. } else { /* IB port */
  557. set_and_calc_slave_port_state(dev, i, port,
  558. MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
  559. &gen_event);
  560. /*we can be in pending state, then do not send port_down event*/
  561. if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) {
  562. if (i == mlx4_master_func_num(dev))
  563. continue;
  564. eqe->event.port_change.port =
  565. cpu_to_be32(
  566. (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
  567. | (mlx4_phys_to_slave_port(dev, i, port) << 28));
  568. mlx4_slave_event(dev, i, eqe);
  569. }
  570. }
  571. }
  572. } else {
  573. mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port);
  574. mlx4_priv(dev)->sense.do_sense_port[port] = 0;
  575. if (!mlx4_is_master(dev))
  576. break;
  577. if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
  578. for (i = 0;
  579. i < dev->persist->num_vfs + 1;
  580. i++) {
  581. int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port);
  582. if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev))
  583. continue;
  584. if (i == mlx4_master_func_num(dev))
  585. continue;
  586. s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
  587. if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
  588. eqe->event.port_change.port =
  589. cpu_to_be32(
  590. (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
  591. | (reported_port << 28));
  592. mlx4_slave_event(dev, i, eqe);
  593. }
  594. }
  595. else /* IB port */
  596. /* port-up event will be sent to a slave when the
  597. * slave's alias-guid is set. This is done in alias_GUID.c
  598. */
  599. set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
  600. }
  601. break;
  602. }
  603. case MLX4_EVENT_TYPE_CQ_ERROR:
  604. mlx4_warn(dev, "CQ %s on CQN %06x\n",
  605. eqe->event.cq_err.syndrome == 1 ?
  606. "overrun" : "access violation",
  607. be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
  608. if (mlx4_is_master(dev)) {
  609. ret = mlx4_get_slave_from_resource_id(dev,
  610. RES_CQ,
  611. be32_to_cpu(eqe->event.cq_err.cqn)
  612. & 0xffffff, &slave);
  613. if (ret && ret != -ENOENT) {
  614. mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
  615. eqe->type, eqe->subtype,
  616. eq->eqn, eq->cons_index, ret);
  617. break;
  618. }
  619. if (!ret && slave != dev->caps.function) {
  620. mlx4_slave_event(dev, slave, eqe);
  621. break;
  622. }
  623. }
  624. mlx4_cq_event(dev,
  625. be32_to_cpu(eqe->event.cq_err.cqn)
  626. & 0xffffff,
  627. eqe->type);
  628. break;
  629. case MLX4_EVENT_TYPE_EQ_OVERFLOW:
  630. mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
  631. break;
  632. case MLX4_EVENT_TYPE_OP_REQUIRED:
  633. atomic_inc(&priv->opreq_count);
  634. /* FW commands can't be executed from interrupt context
  635. * working in deferred task
  636. */
  637. queue_work(mlx4_wq, &priv->opreq_task);
  638. break;
  639. case MLX4_EVENT_TYPE_COMM_CHANNEL:
  640. if (!mlx4_is_master(dev)) {
  641. mlx4_warn(dev, "Received comm channel event for non master device\n");
  642. break;
  643. }
  644. memcpy(&priv->mfunc.master.comm_arm_bit_vector,
  645. eqe->event.comm_channel_arm.bit_vec,
  646. sizeof eqe->event.comm_channel_arm.bit_vec);
  647. queue_work(priv->mfunc.master.comm_wq,
  648. &priv->mfunc.master.comm_work);
  649. break;
  650. case MLX4_EVENT_TYPE_FLR_EVENT:
  651. flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
  652. if (!mlx4_is_master(dev)) {
  653. mlx4_warn(dev, "Non-master function received FLR event\n");
  654. break;
  655. }
  656. mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
  657. if (flr_slave >= dev->num_slaves) {
  658. mlx4_warn(dev,
  659. "Got FLR for unknown function: %d\n",
  660. flr_slave);
  661. update_slave_state = 0;
  662. } else
  663. update_slave_state = 1;
  664. spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
  665. if (update_slave_state) {
  666. priv->mfunc.master.slave_state[flr_slave].active = false;
  667. priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
  668. priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
  669. }
  670. spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
  671. mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
  672. flr_slave);
  673. queue_work(priv->mfunc.master.comm_wq,
  674. &priv->mfunc.master.slave_flr_event_work);
  675. break;
  676. case MLX4_EVENT_TYPE_FATAL_WARNING:
  677. if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
  678. if (mlx4_is_master(dev))
  679. for (i = 0; i < dev->num_slaves; i++) {
  680. mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
  681. __func__, i);
  682. if (i == dev->caps.function)
  683. continue;
  684. mlx4_slave_event(dev, i, eqe);
  685. }
  686. mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
  687. be16_to_cpu(eqe->event.warming.warning_threshold),
  688. be16_to_cpu(eqe->event.warming.current_temperature));
  689. } else
  690. mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
  691. eqe->type, eqe->subtype, eq->eqn,
  692. eq->cons_index, eqe->owner, eq->nent,
  693. eqe->slave_id,
  694. !!(eqe->owner & 0x80) ^
  695. !!(eq->cons_index & eq->nent) ? "HW" : "SW");
  696. break;
  697. case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
  698. mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
  699. (unsigned long) eqe);
  700. break;
  701. case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
  702. switch (eqe->subtype) {
  703. case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
  704. mlx4_warn(dev, "Bad cable detected on port %u\n",
  705. eqe->event.bad_cable.port);
  706. break;
  707. case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
  708. mlx4_warn(dev, "Unsupported cable detected\n");
  709. break;
  710. default:
  711. mlx4_dbg(dev,
  712. "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n",
  713. eqe->type, eqe->subtype, eq->eqn,
  714. eq->cons_index, eqe->owner, eq->nent,
  715. !!(eqe->owner & 0x80) ^
  716. !!(eq->cons_index & eq->nent) ? "HW" : "SW");
  717. break;
  718. }
  719. break;
  720. case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
  721. case MLX4_EVENT_TYPE_ECC_DETECT:
  722. default:
  723. mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
  724. eqe->type, eqe->subtype, eq->eqn,
  725. eq->cons_index, eqe->owner, eq->nent,
  726. eqe->slave_id,
  727. !!(eqe->owner & 0x80) ^
  728. !!(eq->cons_index & eq->nent) ? "HW" : "SW");
  729. break;
  730. };
  731. ++eq->cons_index;
  732. eqes_found = 1;
  733. ++set_ci;
  734. /*
  735. * The HCA will think the queue has overflowed if we
  736. * don't tell it we've been processing events. We
  737. * create our EQs with MLX4_NUM_SPARE_EQE extra
  738. * entries, so we must update our consumer index at
  739. * least that often.
  740. */
  741. if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
  742. eq_set_ci(eq, 0);
  743. set_ci = 0;
  744. }
  745. }
  746. eq_set_ci(eq, 1);
  747. /* cqn is 24bit wide but is initialized such that its higher bits
  748. * are ones too. Thus, if we got any event, cqn's high bits should be off
  749. * and we need to schedule the tasklet.
  750. */
  751. if (!(cqn & ~0xffffff))
  752. tasklet_schedule(&eq->tasklet_ctx.task);
  753. return eqes_found;
  754. }
  755. static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
  756. {
  757. struct mlx4_dev *dev = dev_ptr;
  758. struct mlx4_priv *priv = mlx4_priv(dev);
  759. int work = 0;
  760. int i;
  761. writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
  762. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
  763. work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
  764. return IRQ_RETVAL(work);
  765. }
  766. static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
  767. {
  768. struct mlx4_eq *eq = eq_ptr;
  769. struct mlx4_dev *dev = eq->dev;
  770. mlx4_eq_int(dev, eq);
  771. /* MSI-X vectors always belong to us */
  772. return IRQ_HANDLED;
  773. }
  774. int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
  775. struct mlx4_vhcr *vhcr,
  776. struct mlx4_cmd_mailbox *inbox,
  777. struct mlx4_cmd_mailbox *outbox,
  778. struct mlx4_cmd_info *cmd)
  779. {
  780. struct mlx4_priv *priv = mlx4_priv(dev);
  781. struct mlx4_slave_event_eq_info *event_eq =
  782. priv->mfunc.master.slave_state[slave].event_eq;
  783. u32 in_modifier = vhcr->in_modifier;
  784. u32 eqn = in_modifier & 0x3FF;
  785. u64 in_param = vhcr->in_param;
  786. int err = 0;
  787. int i;
  788. if (slave == dev->caps.function)
  789. err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
  790. 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
  791. MLX4_CMD_NATIVE);
  792. if (!err)
  793. for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
  794. if (in_param & (1LL << i))
  795. event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
  796. return err;
  797. }
  798. static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
  799. int eq_num)
  800. {
  801. return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
  802. 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
  803. MLX4_CMD_WRAPPED);
  804. }
  805. static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  806. int eq_num)
  807. {
  808. return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
  809. MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
  810. MLX4_CMD_WRAPPED);
  811. }
  812. static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num)
  813. {
  814. return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ,
  815. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  816. }
  817. static int mlx4_num_eq_uar(struct mlx4_dev *dev)
  818. {
  819. /*
  820. * Each UAR holds 4 EQ doorbells. To figure out how many UARs
  821. * we need to map, take the difference of highest index and
  822. * the lowest index we'll use and add 1.
  823. */
  824. return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
  825. dev->caps.reserved_eqs / 4 + 1;
  826. }
  827. static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
  828. {
  829. struct mlx4_priv *priv = mlx4_priv(dev);
  830. int index;
  831. index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
  832. if (!priv->eq_table.uar_map[index]) {
  833. priv->eq_table.uar_map[index] =
  834. ioremap(
  835. pci_resource_start(dev->persist->pdev, 2) +
  836. ((eq->eqn / 4) << (dev->uar_page_shift)),
  837. (1 << (dev->uar_page_shift)));
  838. if (!priv->eq_table.uar_map[index]) {
  839. mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
  840. eq->eqn);
  841. return NULL;
  842. }
  843. }
  844. return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
  845. }
  846. static void mlx4_unmap_uar(struct mlx4_dev *dev)
  847. {
  848. struct mlx4_priv *priv = mlx4_priv(dev);
  849. int i;
  850. for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
  851. if (priv->eq_table.uar_map[i]) {
  852. iounmap(priv->eq_table.uar_map[i]);
  853. priv->eq_table.uar_map[i] = NULL;
  854. }
  855. }
  856. static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
  857. u8 intr, struct mlx4_eq *eq)
  858. {
  859. struct mlx4_priv *priv = mlx4_priv(dev);
  860. struct mlx4_cmd_mailbox *mailbox;
  861. struct mlx4_eq_context *eq_context;
  862. int npages;
  863. u64 *dma_list = NULL;
  864. dma_addr_t t;
  865. u64 mtt_addr;
  866. int err = -ENOMEM;
  867. int i;
  868. eq->dev = dev;
  869. eq->nent = roundup_pow_of_two(max(nent, 2));
  870. /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
  871. * strides of 64B,128B and 256B.
  872. */
  873. npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE;
  874. eq->page_list = kmalloc(npages * sizeof *eq->page_list,
  875. GFP_KERNEL);
  876. if (!eq->page_list)
  877. goto err_out;
  878. for (i = 0; i < npages; ++i)
  879. eq->page_list[i].buf = NULL;
  880. dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
  881. if (!dma_list)
  882. goto err_out_free;
  883. mailbox = mlx4_alloc_cmd_mailbox(dev);
  884. if (IS_ERR(mailbox))
  885. goto err_out_free;
  886. eq_context = mailbox->buf;
  887. for (i = 0; i < npages; ++i) {
  888. eq->page_list[i].buf = dma_alloc_coherent(&dev->persist->
  889. pdev->dev,
  890. PAGE_SIZE, &t,
  891. GFP_KERNEL);
  892. if (!eq->page_list[i].buf)
  893. goto err_out_free_pages;
  894. dma_list[i] = t;
  895. eq->page_list[i].map = t;
  896. memset(eq->page_list[i].buf, 0, PAGE_SIZE);
  897. }
  898. eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
  899. if (eq->eqn == -1)
  900. goto err_out_free_pages;
  901. eq->doorbell = mlx4_get_eq_uar(dev, eq);
  902. if (!eq->doorbell) {
  903. err = -ENOMEM;
  904. goto err_out_free_eq;
  905. }
  906. err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
  907. if (err)
  908. goto err_out_free_eq;
  909. err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
  910. if (err)
  911. goto err_out_free_mtt;
  912. eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
  913. MLX4_EQ_STATE_ARMED);
  914. eq_context->log_eq_size = ilog2(eq->nent);
  915. eq_context->intr = intr;
  916. eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
  917. mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
  918. eq_context->mtt_base_addr_h = mtt_addr >> 32;
  919. eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
  920. err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
  921. if (err) {
  922. mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
  923. goto err_out_free_mtt;
  924. }
  925. kfree(dma_list);
  926. mlx4_free_cmd_mailbox(dev, mailbox);
  927. eq->cons_index = 0;
  928. INIT_LIST_HEAD(&eq->tasklet_ctx.list);
  929. INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
  930. spin_lock_init(&eq->tasklet_ctx.lock);
  931. tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb,
  932. (unsigned long)&eq->tasklet_ctx);
  933. return err;
  934. err_out_free_mtt:
  935. mlx4_mtt_cleanup(dev, &eq->mtt);
  936. err_out_free_eq:
  937. mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
  938. err_out_free_pages:
  939. for (i = 0; i < npages; ++i)
  940. if (eq->page_list[i].buf)
  941. dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
  942. eq->page_list[i].buf,
  943. eq->page_list[i].map);
  944. mlx4_free_cmd_mailbox(dev, mailbox);
  945. err_out_free:
  946. kfree(eq->page_list);
  947. kfree(dma_list);
  948. err_out:
  949. return err;
  950. }
  951. static void mlx4_free_eq(struct mlx4_dev *dev,
  952. struct mlx4_eq *eq)
  953. {
  954. struct mlx4_priv *priv = mlx4_priv(dev);
  955. int err;
  956. int i;
  957. /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
  958. * strides of 64B,128B and 256B
  959. */
  960. int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE;
  961. err = mlx4_HW2SW_EQ(dev, eq->eqn);
  962. if (err)
  963. mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
  964. synchronize_irq(eq->irq);
  965. tasklet_disable(&eq->tasklet_ctx.task);
  966. mlx4_mtt_cleanup(dev, &eq->mtt);
  967. for (i = 0; i < npages; ++i)
  968. dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
  969. eq->page_list[i].buf,
  970. eq->page_list[i].map);
  971. kfree(eq->page_list);
  972. mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
  973. }
  974. static void mlx4_free_irqs(struct mlx4_dev *dev)
  975. {
  976. struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
  977. int i;
  978. if (eq_table->have_irq)
  979. free_irq(dev->persist->pdev->irq, dev);
  980. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
  981. if (eq_table->eq[i].have_irq) {
  982. free_cpumask_var(eq_table->eq[i].affinity_mask);
  983. #if defined(CONFIG_SMP)
  984. irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
  985. #endif
  986. free_irq(eq_table->eq[i].irq, eq_table->eq + i);
  987. eq_table->eq[i].have_irq = 0;
  988. }
  989. kfree(eq_table->irq_names);
  990. }
  991. static int mlx4_map_clr_int(struct mlx4_dev *dev)
  992. {
  993. struct mlx4_priv *priv = mlx4_priv(dev);
  994. priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev,
  995. priv->fw.clr_int_bar) +
  996. priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
  997. if (!priv->clr_base) {
  998. mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
  999. return -ENOMEM;
  1000. }
  1001. return 0;
  1002. }
  1003. static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
  1004. {
  1005. struct mlx4_priv *priv = mlx4_priv(dev);
  1006. iounmap(priv->clr_base);
  1007. }
  1008. int mlx4_alloc_eq_table(struct mlx4_dev *dev)
  1009. {
  1010. struct mlx4_priv *priv = mlx4_priv(dev);
  1011. priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
  1012. sizeof *priv->eq_table.eq, GFP_KERNEL);
  1013. if (!priv->eq_table.eq)
  1014. return -ENOMEM;
  1015. return 0;
  1016. }
  1017. void mlx4_free_eq_table(struct mlx4_dev *dev)
  1018. {
  1019. kfree(mlx4_priv(dev)->eq_table.eq);
  1020. }
  1021. int mlx4_init_eq_table(struct mlx4_dev *dev)
  1022. {
  1023. struct mlx4_priv *priv = mlx4_priv(dev);
  1024. int err;
  1025. int i;
  1026. priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
  1027. sizeof *priv->eq_table.uar_map,
  1028. GFP_KERNEL);
  1029. if (!priv->eq_table.uar_map) {
  1030. err = -ENOMEM;
  1031. goto err_out_free;
  1032. }
  1033. err = mlx4_bitmap_init(&priv->eq_table.bitmap,
  1034. roundup_pow_of_two(dev->caps.num_eqs),
  1035. dev->caps.num_eqs - 1,
  1036. dev->caps.reserved_eqs,
  1037. roundup_pow_of_two(dev->caps.num_eqs) -
  1038. dev->caps.num_eqs);
  1039. if (err)
  1040. goto err_out_free;
  1041. for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
  1042. priv->eq_table.uar_map[i] = NULL;
  1043. if (!mlx4_is_slave(dev)) {
  1044. err = mlx4_map_clr_int(dev);
  1045. if (err)
  1046. goto err_out_bitmap;
  1047. priv->eq_table.clr_mask =
  1048. swab32(1 << (priv->eq_table.inta_pin & 31));
  1049. priv->eq_table.clr_int = priv->clr_base +
  1050. (priv->eq_table.inta_pin < 32 ? 4 : 0);
  1051. }
  1052. priv->eq_table.irq_names =
  1053. kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
  1054. GFP_KERNEL);
  1055. if (!priv->eq_table.irq_names) {
  1056. err = -ENOMEM;
  1057. goto err_out_clr_int;
  1058. }
  1059. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
  1060. if (i == MLX4_EQ_ASYNC) {
  1061. err = mlx4_create_eq(dev,
  1062. MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
  1063. 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
  1064. } else {
  1065. struct mlx4_eq *eq = &priv->eq_table.eq[i];
  1066. #ifdef CONFIG_RFS_ACCEL
  1067. int port = find_first_bit(eq->actv_ports.ports,
  1068. dev->caps.num_ports) + 1;
  1069. if (port <= dev->caps.num_ports) {
  1070. struct mlx4_port_info *info =
  1071. &mlx4_priv(dev)->port[port];
  1072. if (!info->rmap) {
  1073. info->rmap = alloc_irq_cpu_rmap(
  1074. mlx4_get_eqs_per_port(dev, port));
  1075. if (!info->rmap) {
  1076. mlx4_warn(dev, "Failed to allocate cpu rmap\n");
  1077. err = -ENOMEM;
  1078. goto err_out_unmap;
  1079. }
  1080. }
  1081. err = irq_cpu_rmap_add(
  1082. info->rmap, eq->irq);
  1083. if (err)
  1084. mlx4_warn(dev, "Failed adding irq rmap\n");
  1085. }
  1086. #endif
  1087. err = mlx4_create_eq(dev, dev->caps.num_cqs -
  1088. dev->caps.reserved_cqs +
  1089. MLX4_NUM_SPARE_EQE,
  1090. (dev->flags & MLX4_FLAG_MSI_X) ?
  1091. i + 1 - !!(i > MLX4_EQ_ASYNC) : 0,
  1092. eq);
  1093. }
  1094. if (err)
  1095. goto err_out_unmap;
  1096. }
  1097. if (dev->flags & MLX4_FLAG_MSI_X) {
  1098. const char *eq_name;
  1099. snprintf(priv->eq_table.irq_names +
  1100. MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE,
  1101. MLX4_IRQNAME_SIZE,
  1102. "mlx4-async@pci:%s",
  1103. pci_name(dev->persist->pdev));
  1104. eq_name = priv->eq_table.irq_names +
  1105. MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE;
  1106. err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq,
  1107. mlx4_msi_x_interrupt, 0, eq_name,
  1108. priv->eq_table.eq + MLX4_EQ_ASYNC);
  1109. if (err)
  1110. goto err_out_unmap;
  1111. priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1;
  1112. } else {
  1113. snprintf(priv->eq_table.irq_names,
  1114. MLX4_IRQNAME_SIZE,
  1115. DRV_NAME "@pci:%s",
  1116. pci_name(dev->persist->pdev));
  1117. err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
  1118. IRQF_SHARED, priv->eq_table.irq_names, dev);
  1119. if (err)
  1120. goto err_out_unmap;
  1121. priv->eq_table.have_irq = 1;
  1122. }
  1123. err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
  1124. priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
  1125. if (err)
  1126. mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
  1127. priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
  1128. /* arm ASYNC eq */
  1129. eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1);
  1130. return 0;
  1131. err_out_unmap:
  1132. while (i > 0)
  1133. mlx4_free_eq(dev, &priv->eq_table.eq[--i]);
  1134. #ifdef CONFIG_RFS_ACCEL
  1135. for (i = 1; i <= dev->caps.num_ports; i++) {
  1136. if (mlx4_priv(dev)->port[i].rmap) {
  1137. free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
  1138. mlx4_priv(dev)->port[i].rmap = NULL;
  1139. }
  1140. }
  1141. #endif
  1142. mlx4_free_irqs(dev);
  1143. err_out_clr_int:
  1144. if (!mlx4_is_slave(dev))
  1145. mlx4_unmap_clr_int(dev);
  1146. err_out_bitmap:
  1147. mlx4_unmap_uar(dev);
  1148. mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
  1149. err_out_free:
  1150. kfree(priv->eq_table.uar_map);
  1151. return err;
  1152. }
  1153. void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
  1154. {
  1155. struct mlx4_priv *priv = mlx4_priv(dev);
  1156. int i;
  1157. mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
  1158. priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
  1159. #ifdef CONFIG_RFS_ACCEL
  1160. for (i = 1; i <= dev->caps.num_ports; i++) {
  1161. if (mlx4_priv(dev)->port[i].rmap) {
  1162. free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
  1163. mlx4_priv(dev)->port[i].rmap = NULL;
  1164. }
  1165. }
  1166. #endif
  1167. mlx4_free_irqs(dev);
  1168. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
  1169. mlx4_free_eq(dev, &priv->eq_table.eq[i]);
  1170. if (!mlx4_is_slave(dev))
  1171. mlx4_unmap_clr_int(dev);
  1172. mlx4_unmap_uar(dev);
  1173. mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
  1174. kfree(priv->eq_table.uar_map);
  1175. }
  1176. /* A test that verifies that we can accept interrupts
  1177. * on the vector allocated for asynchronous events
  1178. */
  1179. int mlx4_test_async(struct mlx4_dev *dev)
  1180. {
  1181. return mlx4_NOP(dev);
  1182. }
  1183. EXPORT_SYMBOL(mlx4_test_async);
  1184. /* A test that verifies that we can accept interrupts
  1185. * on the given irq vector of the tested port.
  1186. * Interrupts are checked using the NOP command.
  1187. */
  1188. int mlx4_test_interrupt(struct mlx4_dev *dev, int vector)
  1189. {
  1190. struct mlx4_priv *priv = mlx4_priv(dev);
  1191. int err;
  1192. /* Temporary use polling for command completions */
  1193. mlx4_cmd_use_polling(dev);
  1194. /* Map the new eq to handle all asynchronous events */
  1195. err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
  1196. priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn);
  1197. if (err) {
  1198. mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
  1199. goto out;
  1200. }
  1201. /* Go back to using events */
  1202. mlx4_cmd_use_events(dev);
  1203. err = mlx4_NOP(dev);
  1204. /* Return to default */
  1205. mlx4_cmd_use_polling(dev);
  1206. out:
  1207. mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
  1208. priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
  1209. mlx4_cmd_use_events(dev);
  1210. return err;
  1211. }
  1212. EXPORT_SYMBOL(mlx4_test_interrupt);
  1213. bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
  1214. {
  1215. struct mlx4_priv *priv = mlx4_priv(dev);
  1216. vector = MLX4_CQ_TO_EQ_VECTOR(vector);
  1217. if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) ||
  1218. (vector == MLX4_EQ_ASYNC))
  1219. return false;
  1220. return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports);
  1221. }
  1222. EXPORT_SYMBOL(mlx4_is_eq_vector_valid);
  1223. u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port)
  1224. {
  1225. struct mlx4_priv *priv = mlx4_priv(dev);
  1226. unsigned int i;
  1227. unsigned int sum = 0;
  1228. for (i = 0; i < dev->caps.num_comp_vectors + 1; i++)
  1229. sum += !!test_bit(port - 1,
  1230. priv->eq_table.eq[i].actv_ports.ports);
  1231. return sum;
  1232. }
  1233. EXPORT_SYMBOL(mlx4_get_eqs_per_port);
  1234. int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector)
  1235. {
  1236. struct mlx4_priv *priv = mlx4_priv(dev);
  1237. vector = MLX4_CQ_TO_EQ_VECTOR(vector);
  1238. if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1))
  1239. return -EINVAL;
  1240. return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports,
  1241. dev->caps.num_ports) > 1);
  1242. }
  1243. EXPORT_SYMBOL(mlx4_is_eq_shared);
  1244. struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port)
  1245. {
  1246. return mlx4_priv(dev)->port[port].rmap;
  1247. }
  1248. EXPORT_SYMBOL(mlx4_get_cpu_rmap);
  1249. int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
  1250. {
  1251. struct mlx4_priv *priv = mlx4_priv(dev);
  1252. int err = 0, i = 0;
  1253. u32 min_ref_count_val = (u32)-1;
  1254. int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector);
  1255. int *prequested_vector = NULL;
  1256. mutex_lock(&priv->msix_ctl.pool_lock);
  1257. if (requested_vector < (dev->caps.num_comp_vectors + 1) &&
  1258. (requested_vector >= 0) &&
  1259. (requested_vector != MLX4_EQ_ASYNC)) {
  1260. if (test_bit(port - 1,
  1261. priv->eq_table.eq[requested_vector].actv_ports.ports)) {
  1262. prequested_vector = &requested_vector;
  1263. } else {
  1264. struct mlx4_eq *eq;
  1265. for (i = 1; i < port;
  1266. requested_vector += mlx4_get_eqs_per_port(dev, i++))
  1267. ;
  1268. eq = &priv->eq_table.eq[requested_vector];
  1269. if (requested_vector < dev->caps.num_comp_vectors + 1 &&
  1270. test_bit(port - 1, eq->actv_ports.ports)) {
  1271. prequested_vector = &requested_vector;
  1272. }
  1273. }
  1274. }
  1275. if (!prequested_vector) {
  1276. requested_vector = -1;
  1277. for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1;
  1278. i++) {
  1279. struct mlx4_eq *eq = &priv->eq_table.eq[i];
  1280. if (min_ref_count_val > eq->ref_count &&
  1281. test_bit(port - 1, eq->actv_ports.ports)) {
  1282. min_ref_count_val = eq->ref_count;
  1283. requested_vector = i;
  1284. }
  1285. }
  1286. if (requested_vector < 0) {
  1287. err = -ENOSPC;
  1288. goto err_unlock;
  1289. }
  1290. prequested_vector = &requested_vector;
  1291. }
  1292. if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) &&
  1293. dev->flags & MLX4_FLAG_MSI_X) {
  1294. set_bit(*prequested_vector, priv->msix_ctl.pool_bm);
  1295. snprintf(priv->eq_table.irq_names +
  1296. *prequested_vector * MLX4_IRQNAME_SIZE,
  1297. MLX4_IRQNAME_SIZE, "mlx4-%d@%s",
  1298. *prequested_vector, dev_name(&dev->persist->pdev->dev));
  1299. err = request_irq(priv->eq_table.eq[*prequested_vector].irq,
  1300. mlx4_msi_x_interrupt, 0,
  1301. &priv->eq_table.irq_names[*prequested_vector << 5],
  1302. priv->eq_table.eq + *prequested_vector);
  1303. if (err) {
  1304. clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
  1305. *prequested_vector = -1;
  1306. } else {
  1307. #if defined(CONFIG_SMP)
  1308. mlx4_set_eq_affinity_hint(priv, *prequested_vector);
  1309. #endif
  1310. eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
  1311. priv->eq_table.eq[*prequested_vector].have_irq = 1;
  1312. }
  1313. }
  1314. if (!err && *prequested_vector >= 0)
  1315. priv->eq_table.eq[*prequested_vector].ref_count++;
  1316. err_unlock:
  1317. mutex_unlock(&priv->msix_ctl.pool_lock);
  1318. if (!err && *prequested_vector >= 0)
  1319. *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector);
  1320. else
  1321. *vector = 0;
  1322. return err;
  1323. }
  1324. EXPORT_SYMBOL(mlx4_assign_eq);
  1325. int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec)
  1326. {
  1327. struct mlx4_priv *priv = mlx4_priv(dev);
  1328. return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq;
  1329. }
  1330. EXPORT_SYMBOL(mlx4_eq_get_irq);
  1331. void mlx4_release_eq(struct mlx4_dev *dev, int vec)
  1332. {
  1333. struct mlx4_priv *priv = mlx4_priv(dev);
  1334. int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec);
  1335. mutex_lock(&priv->msix_ctl.pool_lock);
  1336. priv->eq_table.eq[eq_vec].ref_count--;
  1337. /* once we allocated EQ, we don't release it because it might be binded
  1338. * to cpu_rmap.
  1339. */
  1340. mutex_unlock(&priv->msix_ctl.pool_lock);
  1341. }
  1342. EXPORT_SYMBOL(mlx4_release_eq);