mad.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187
  1. /*
  2. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <rdma/ib_mad.h>
  33. #include <rdma/ib_smi.h>
  34. #include <rdma/ib_sa.h>
  35. #include <rdma/ib_cache.h>
  36. #include <linux/random.h>
  37. #include <linux/mlx4/cmd.h>
  38. #include <linux/gfp.h>
  39. #include <rdma/ib_pma.h>
  40. #include "mlx4_ib.h"
  41. enum {
  42. MLX4_IB_VENDOR_CLASS1 = 0x9,
  43. MLX4_IB_VENDOR_CLASS2 = 0xa
  44. };
  45. #define MLX4_TUN_SEND_WRID_SHIFT 34
  46. #define MLX4_TUN_QPN_SHIFT 32
  47. #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
  48. #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
  49. #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
  50. #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
  51. /* Port mgmt change event handling */
  52. #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
  53. #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
  54. #define NUM_IDX_IN_PKEY_TBL_BLK 32
  55. #define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */
  56. #define GUID_TBL_BLK_NUM_ENTRIES 8
  57. #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
  58. struct mlx4_mad_rcv_buf {
  59. struct ib_grh grh;
  60. u8 payload[256];
  61. } __packed;
  62. struct mlx4_mad_snd_buf {
  63. u8 payload[256];
  64. } __packed;
  65. struct mlx4_tunnel_mad {
  66. struct ib_grh grh;
  67. struct mlx4_ib_tunnel_header hdr;
  68. struct ib_mad mad;
  69. } __packed;
  70. struct mlx4_rcv_tunnel_mad {
  71. struct mlx4_rcv_tunnel_hdr hdr;
  72. struct ib_grh grh;
  73. struct ib_mad mad;
  74. } __packed;
  75. static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
  76. static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
  77. static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
  78. int block, u32 change_bitmap);
  79. __be64 mlx4_ib_gen_node_guid(void)
  80. {
  81. #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
  82. return cpu_to_be64(NODE_GUID_HI | prandom_u32());
  83. }
  84. __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
  85. {
  86. return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
  87. cpu_to_be64(0xff00000000000000LL);
  88. }
  89. int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
  90. int port, const struct ib_wc *in_wc,
  91. const struct ib_grh *in_grh,
  92. const void *in_mad, void *response_mad)
  93. {
  94. struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
  95. void *inbox;
  96. int err;
  97. u32 in_modifier = port;
  98. u8 op_modifier = 0;
  99. inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
  100. if (IS_ERR(inmailbox))
  101. return PTR_ERR(inmailbox);
  102. inbox = inmailbox->buf;
  103. outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
  104. if (IS_ERR(outmailbox)) {
  105. mlx4_free_cmd_mailbox(dev->dev, inmailbox);
  106. return PTR_ERR(outmailbox);
  107. }
  108. memcpy(inbox, in_mad, 256);
  109. /*
  110. * Key check traps can't be generated unless we have in_wc to
  111. * tell us where to send the trap.
  112. */
  113. if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
  114. op_modifier |= 0x1;
  115. if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
  116. op_modifier |= 0x2;
  117. if (mlx4_is_mfunc(dev->dev) &&
  118. (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
  119. op_modifier |= 0x8;
  120. if (in_wc) {
  121. struct {
  122. __be32 my_qpn;
  123. u32 reserved1;
  124. __be32 rqpn;
  125. u8 sl;
  126. u8 g_path;
  127. u16 reserved2[2];
  128. __be16 pkey;
  129. u32 reserved3[11];
  130. u8 grh[40];
  131. } *ext_info;
  132. memset(inbox + 256, 0, 256);
  133. ext_info = inbox + 256;
  134. ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
  135. ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
  136. ext_info->sl = in_wc->sl << 4;
  137. ext_info->g_path = in_wc->dlid_path_bits |
  138. (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
  139. ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
  140. if (in_grh)
  141. memcpy(ext_info->grh, in_grh, 40);
  142. op_modifier |= 0x4;
  143. in_modifier |= in_wc->slid << 16;
  144. }
  145. err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
  146. mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
  147. MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
  148. (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
  149. if (!err)
  150. memcpy(response_mad, outmailbox->buf, 256);
  151. mlx4_free_cmd_mailbox(dev->dev, inmailbox);
  152. mlx4_free_cmd_mailbox(dev->dev, outmailbox);
  153. return err;
  154. }
  155. static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
  156. {
  157. struct ib_ah *new_ah;
  158. struct ib_ah_attr ah_attr;
  159. unsigned long flags;
  160. if (!dev->send_agent[port_num - 1][0])
  161. return;
  162. memset(&ah_attr, 0, sizeof ah_attr);
  163. ah_attr.dlid = lid;
  164. ah_attr.sl = sl;
  165. ah_attr.port_num = port_num;
  166. new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
  167. &ah_attr);
  168. if (IS_ERR(new_ah))
  169. return;
  170. spin_lock_irqsave(&dev->sm_lock, flags);
  171. if (dev->sm_ah[port_num - 1])
  172. ib_destroy_ah(dev->sm_ah[port_num - 1]);
  173. dev->sm_ah[port_num - 1] = new_ah;
  174. spin_unlock_irqrestore(&dev->sm_lock, flags);
  175. }
  176. /*
  177. * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
  178. * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
  179. */
  180. static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad,
  181. u16 prev_lid)
  182. {
  183. struct ib_port_info *pinfo;
  184. u16 lid;
  185. __be16 *base;
  186. u32 bn, pkey_change_bitmap;
  187. int i;
  188. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  189. if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
  190. mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
  191. mad->mad_hdr.method == IB_MGMT_METHOD_SET)
  192. switch (mad->mad_hdr.attr_id) {
  193. case IB_SMP_ATTR_PORT_INFO:
  194. pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
  195. lid = be16_to_cpu(pinfo->lid);
  196. update_sm_ah(dev, port_num,
  197. be16_to_cpu(pinfo->sm_lid),
  198. pinfo->neighbormtu_mastersmsl & 0xf);
  199. if (pinfo->clientrereg_resv_subnetto & 0x80)
  200. handle_client_rereg_event(dev, port_num);
  201. if (prev_lid != lid)
  202. handle_lid_change_event(dev, port_num);
  203. break;
  204. case IB_SMP_ATTR_PKEY_TABLE:
  205. if (!mlx4_is_mfunc(dev->dev)) {
  206. mlx4_ib_dispatch_event(dev, port_num,
  207. IB_EVENT_PKEY_CHANGE);
  208. break;
  209. }
  210. /* at this point, we are running in the master.
  211. * Slaves do not receive SMPs.
  212. */
  213. bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
  214. base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
  215. pkey_change_bitmap = 0;
  216. for (i = 0; i < 32; i++) {
  217. pr_debug("PKEY[%d] = x%x\n",
  218. i + bn*32, be16_to_cpu(base[i]));
  219. if (be16_to_cpu(base[i]) !=
  220. dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
  221. pkey_change_bitmap |= (1 << i);
  222. dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
  223. be16_to_cpu(base[i]);
  224. }
  225. }
  226. pr_debug("PKEY Change event: port=%d, "
  227. "block=0x%x, change_bitmap=0x%x\n",
  228. port_num, bn, pkey_change_bitmap);
  229. if (pkey_change_bitmap) {
  230. mlx4_ib_dispatch_event(dev, port_num,
  231. IB_EVENT_PKEY_CHANGE);
  232. if (!dev->sriov.is_going_down)
  233. __propagate_pkey_ev(dev, port_num, bn,
  234. pkey_change_bitmap);
  235. }
  236. break;
  237. case IB_SMP_ATTR_GUID_INFO:
  238. /* paravirtualized master's guid is guid 0 -- does not change */
  239. if (!mlx4_is_master(dev->dev))
  240. mlx4_ib_dispatch_event(dev, port_num,
  241. IB_EVENT_GID_CHANGE);
  242. /*if master, notify relevant slaves*/
  243. if (mlx4_is_master(dev->dev) &&
  244. !dev->sriov.is_going_down) {
  245. bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
  246. mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
  247. (u8 *)(&((struct ib_smp *)mad)->data));
  248. mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
  249. (u8 *)(&((struct ib_smp *)mad)->data));
  250. }
  251. break;
  252. default:
  253. break;
  254. }
  255. }
  256. static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
  257. int block, u32 change_bitmap)
  258. {
  259. int i, ix, slave, err;
  260. int have_event = 0;
  261. for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
  262. if (slave == mlx4_master_func_num(dev->dev))
  263. continue;
  264. if (!mlx4_is_slave_active(dev->dev, slave))
  265. continue;
  266. have_event = 0;
  267. for (i = 0; i < 32; i++) {
  268. if (!(change_bitmap & (1 << i)))
  269. continue;
  270. for (ix = 0;
  271. ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
  272. if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
  273. [ix] == i + 32 * block) {
  274. err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
  275. pr_debug("propagate_pkey_ev: slave %d,"
  276. " port %d, ix %d (%d)\n",
  277. slave, port_num, ix, err);
  278. have_event = 1;
  279. break;
  280. }
  281. }
  282. if (have_event)
  283. break;
  284. }
  285. }
  286. }
  287. static void node_desc_override(struct ib_device *dev,
  288. struct ib_mad *mad)
  289. {
  290. unsigned long flags;
  291. if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
  292. mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
  293. mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
  294. mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
  295. spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
  296. memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
  297. spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
  298. }
  299. }
  300. static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad)
  301. {
  302. int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
  303. struct ib_mad_send_buf *send_buf;
  304. struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
  305. int ret;
  306. unsigned long flags;
  307. if (agent) {
  308. send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
  309. IB_MGMT_MAD_DATA, GFP_ATOMIC,
  310. IB_MGMT_BASE_VERSION);
  311. if (IS_ERR(send_buf))
  312. return;
  313. /*
  314. * We rely here on the fact that MLX QPs don't use the
  315. * address handle after the send is posted (this is
  316. * wrong following the IB spec strictly, but we know
  317. * it's OK for our devices).
  318. */
  319. spin_lock_irqsave(&dev->sm_lock, flags);
  320. memcpy(send_buf->mad, mad, sizeof *mad);
  321. if ((send_buf->ah = dev->sm_ah[port_num - 1]))
  322. ret = ib_post_send_mad(send_buf, NULL);
  323. else
  324. ret = -EINVAL;
  325. spin_unlock_irqrestore(&dev->sm_lock, flags);
  326. if (ret)
  327. ib_free_send_mad(send_buf);
  328. }
  329. }
  330. static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
  331. struct ib_sa_mad *sa_mad)
  332. {
  333. int ret = 0;
  334. /* dispatch to different sa handlers */
  335. switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
  336. case IB_SA_ATTR_MC_MEMBER_REC:
  337. ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
  338. break;
  339. default:
  340. break;
  341. }
  342. return ret;
  343. }
  344. int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
  345. {
  346. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  347. int i;
  348. for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
  349. if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
  350. return i;
  351. }
  352. return -1;
  353. }
  354. static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
  355. u8 port, u16 pkey, u16 *ix)
  356. {
  357. int i, ret;
  358. u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
  359. u16 slot_pkey;
  360. if (slave == mlx4_master_func_num(dev->dev))
  361. return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
  362. unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
  363. for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
  364. if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
  365. continue;
  366. pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
  367. ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
  368. if (ret)
  369. continue;
  370. if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
  371. if (slot_pkey & 0x8000) {
  372. *ix = (u16) pkey_ix;
  373. return 0;
  374. } else {
  375. /* take first partial pkey index found */
  376. if (partial_ix == 0xFF)
  377. partial_ix = pkey_ix;
  378. }
  379. }
  380. }
  381. if (partial_ix < 0xFF) {
  382. *ix = (u16) partial_ix;
  383. return 0;
  384. }
  385. return -EINVAL;
  386. }
  387. int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
  388. enum ib_qp_type dest_qpt, struct ib_wc *wc,
  389. struct ib_grh *grh, struct ib_mad *mad)
  390. {
  391. struct ib_sge list;
  392. struct ib_send_wr wr, *bad_wr;
  393. struct mlx4_ib_demux_pv_ctx *tun_ctx;
  394. struct mlx4_ib_demux_pv_qp *tun_qp;
  395. struct mlx4_rcv_tunnel_mad *tun_mad;
  396. struct ib_ah_attr attr;
  397. struct ib_ah *ah;
  398. struct ib_qp *src_qp = NULL;
  399. unsigned tun_tx_ix = 0;
  400. int dqpn;
  401. int ret = 0;
  402. u16 tun_pkey_ix;
  403. u16 cached_pkey;
  404. u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
  405. if (dest_qpt > IB_QPT_GSI)
  406. return -EINVAL;
  407. tun_ctx = dev->sriov.demux[port-1].tun[slave];
  408. /* check if proxy qp created */
  409. if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
  410. return -EAGAIN;
  411. if (!dest_qpt)
  412. tun_qp = &tun_ctx->qp[0];
  413. else
  414. tun_qp = &tun_ctx->qp[1];
  415. /* compute P_Key index to put in tunnel header for slave */
  416. if (dest_qpt) {
  417. u16 pkey_ix;
  418. ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
  419. if (ret)
  420. return -EINVAL;
  421. ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
  422. if (ret)
  423. return -EINVAL;
  424. tun_pkey_ix = pkey_ix;
  425. } else
  426. tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
  427. dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
  428. /* get tunnel tx data buf for slave */
  429. src_qp = tun_qp->qp;
  430. /* create ah. Just need an empty one with the port num for the post send.
  431. * The driver will set the force loopback bit in post_send */
  432. memset(&attr, 0, sizeof attr);
  433. attr.port_num = port;
  434. if (is_eth) {
  435. memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
  436. attr.ah_flags = IB_AH_GRH;
  437. }
  438. ah = ib_create_ah(tun_ctx->pd, &attr);
  439. if (IS_ERR(ah))
  440. return -ENOMEM;
  441. /* allocate tunnel tx buf after pass failure returns */
  442. spin_lock(&tun_qp->tx_lock);
  443. if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
  444. (MLX4_NUM_TUNNEL_BUFS - 1))
  445. ret = -EAGAIN;
  446. else
  447. tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
  448. spin_unlock(&tun_qp->tx_lock);
  449. if (ret)
  450. goto out;
  451. tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
  452. if (tun_qp->tx_ring[tun_tx_ix].ah)
  453. ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
  454. tun_qp->tx_ring[tun_tx_ix].ah = ah;
  455. ib_dma_sync_single_for_cpu(&dev->ib_dev,
  456. tun_qp->tx_ring[tun_tx_ix].buf.map,
  457. sizeof (struct mlx4_rcv_tunnel_mad),
  458. DMA_TO_DEVICE);
  459. /* copy over to tunnel buffer */
  460. if (grh)
  461. memcpy(&tun_mad->grh, grh, sizeof *grh);
  462. memcpy(&tun_mad->mad, mad, sizeof *mad);
  463. /* adjust tunnel data */
  464. tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
  465. tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
  466. tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
  467. if (is_eth) {
  468. u16 vlan = 0;
  469. if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
  470. NULL)) {
  471. /* VST mode */
  472. if (vlan != wc->vlan_id)
  473. /* Packet vlan is not the VST-assigned vlan.
  474. * Drop the packet.
  475. */
  476. goto out;
  477. else
  478. /* Remove the vlan tag before forwarding
  479. * the packet to the VF.
  480. */
  481. vlan = 0xffff;
  482. } else {
  483. vlan = wc->vlan_id;
  484. }
  485. tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
  486. memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
  487. memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
  488. } else {
  489. tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
  490. tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
  491. }
  492. ib_dma_sync_single_for_device(&dev->ib_dev,
  493. tun_qp->tx_ring[tun_tx_ix].buf.map,
  494. sizeof (struct mlx4_rcv_tunnel_mad),
  495. DMA_TO_DEVICE);
  496. list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
  497. list.length = sizeof (struct mlx4_rcv_tunnel_mad);
  498. list.lkey = tun_ctx->mr->lkey;
  499. wr.wr.ud.ah = ah;
  500. wr.wr.ud.port_num = port;
  501. wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
  502. wr.wr.ud.remote_qpn = dqpn;
  503. wr.next = NULL;
  504. wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
  505. wr.sg_list = &list;
  506. wr.num_sge = 1;
  507. wr.opcode = IB_WR_SEND;
  508. wr.send_flags = IB_SEND_SIGNALED;
  509. ret = ib_post_send(src_qp, &wr, &bad_wr);
  510. out:
  511. if (ret)
  512. ib_destroy_ah(ah);
  513. return ret;
  514. }
  515. static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
  516. struct ib_wc *wc, struct ib_grh *grh,
  517. struct ib_mad *mad)
  518. {
  519. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  520. int err;
  521. int slave;
  522. u8 *slave_id;
  523. int is_eth = 0;
  524. if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
  525. is_eth = 0;
  526. else
  527. is_eth = 1;
  528. if (is_eth) {
  529. if (!(wc->wc_flags & IB_WC_GRH)) {
  530. mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
  531. return -EINVAL;
  532. }
  533. if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
  534. mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
  535. return -EINVAL;
  536. }
  537. if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) {
  538. mlx4_ib_warn(ibdev, "failed matching grh\n");
  539. return -ENOENT;
  540. }
  541. if (slave >= dev->dev->caps.sqp_demux) {
  542. mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
  543. slave, dev->dev->caps.sqp_demux);
  544. return -ENOENT;
  545. }
  546. if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
  547. return 0;
  548. err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
  549. if (err)
  550. pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
  551. slave, err);
  552. return 0;
  553. }
  554. /* Initially assume that this mad is for us */
  555. slave = mlx4_master_func_num(dev->dev);
  556. /* See if the slave id is encoded in a response mad */
  557. if (mad->mad_hdr.method & 0x80) {
  558. slave_id = (u8 *) &mad->mad_hdr.tid;
  559. slave = *slave_id;
  560. if (slave != 255) /*255 indicates the dom0*/
  561. *slave_id = 0; /* remap tid */
  562. }
  563. /* If a grh is present, we demux according to it */
  564. if (wc->wc_flags & IB_WC_GRH) {
  565. slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
  566. if (slave < 0) {
  567. mlx4_ib_warn(ibdev, "failed matching grh\n");
  568. return -ENOENT;
  569. }
  570. }
  571. /* Class-specific handling */
  572. switch (mad->mad_hdr.mgmt_class) {
  573. case IB_MGMT_CLASS_SUBN_LID_ROUTED:
  574. case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
  575. /* 255 indicates the dom0 */
  576. if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) {
  577. if (!mlx4_vf_smi_enabled(dev->dev, slave, port))
  578. return -EPERM;
  579. /* for a VF. drop unsolicited MADs */
  580. if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) {
  581. mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n",
  582. slave, mad->mad_hdr.mgmt_class,
  583. mad->mad_hdr.method);
  584. return -EINVAL;
  585. }
  586. }
  587. break;
  588. case IB_MGMT_CLASS_SUBN_ADM:
  589. if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
  590. (struct ib_sa_mad *) mad))
  591. return 0;
  592. break;
  593. case IB_MGMT_CLASS_CM:
  594. if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
  595. return 0;
  596. break;
  597. case IB_MGMT_CLASS_DEVICE_MGMT:
  598. if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
  599. return 0;
  600. break;
  601. default:
  602. /* Drop unsupported classes for slaves in tunnel mode */
  603. if (slave != mlx4_master_func_num(dev->dev)) {
  604. pr_debug("dropping unsupported ingress mad from class:%d "
  605. "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
  606. return 0;
  607. }
  608. }
  609. /*make sure that no slave==255 was not handled yet.*/
  610. if (slave >= dev->dev->caps.sqp_demux) {
  611. mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
  612. slave, dev->dev->caps.sqp_demux);
  613. return -ENOENT;
  614. }
  615. err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
  616. if (err)
  617. pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
  618. slave, err);
  619. return 0;
  620. }
  621. static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
  622. const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  623. const struct ib_mad *in_mad, struct ib_mad *out_mad)
  624. {
  625. u16 slid, prev_lid = 0;
  626. int err;
  627. struct ib_port_attr pattr;
  628. if (in_wc && in_wc->qp->qp_num) {
  629. pr_debug("received MAD: slid:%d sqpn:%d "
  630. "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
  631. in_wc->slid, in_wc->src_qp,
  632. in_wc->dlid_path_bits,
  633. in_wc->qp->qp_num,
  634. in_wc->wc_flags,
  635. in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
  636. be16_to_cpu(in_mad->mad_hdr.attr_id));
  637. if (in_wc->wc_flags & IB_WC_GRH) {
  638. pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
  639. be64_to_cpu(in_grh->sgid.global.subnet_prefix),
  640. be64_to_cpu(in_grh->sgid.global.interface_id));
  641. pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
  642. be64_to_cpu(in_grh->dgid.global.subnet_prefix),
  643. be64_to_cpu(in_grh->dgid.global.interface_id));
  644. }
  645. }
  646. slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
  647. if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
  648. forward_trap(to_mdev(ibdev), port_num, in_mad);
  649. return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
  650. }
  651. if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
  652. in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
  653. if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
  654. in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
  655. in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
  656. return IB_MAD_RESULT_SUCCESS;
  657. /*
  658. * Don't process SMInfo queries -- the SMA can't handle them.
  659. */
  660. if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
  661. return IB_MAD_RESULT_SUCCESS;
  662. } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
  663. in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
  664. in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
  665. in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
  666. if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
  667. in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
  668. return IB_MAD_RESULT_SUCCESS;
  669. } else
  670. return IB_MAD_RESULT_SUCCESS;
  671. if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
  672. in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
  673. in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
  674. in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
  675. !ib_query_port(ibdev, port_num, &pattr))
  676. prev_lid = pattr.lid;
  677. err = mlx4_MAD_IFC(to_mdev(ibdev),
  678. (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
  679. (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
  680. MLX4_MAD_IFC_NET_VIEW,
  681. port_num, in_wc, in_grh, in_mad, out_mad);
  682. if (err)
  683. return IB_MAD_RESULT_FAILURE;
  684. if (!out_mad->mad_hdr.status) {
  685. if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
  686. smp_snoop(ibdev, port_num, in_mad, prev_lid);
  687. /* slaves get node desc from FW */
  688. if (!mlx4_is_slave(to_mdev(ibdev)->dev))
  689. node_desc_override(ibdev, out_mad);
  690. }
  691. /* set return bit in status of directed route responses */
  692. if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
  693. out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
  694. if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
  695. /* no response for trap repress */
  696. return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
  697. return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
  698. }
  699. static void edit_counter(struct mlx4_counter *cnt,
  700. struct ib_pma_portcounters *pma_cnt)
  701. {
  702. ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
  703. (be64_to_cpu(cnt->tx_bytes) >> 2));
  704. ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
  705. (be64_to_cpu(cnt->rx_bytes) >> 2));
  706. ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
  707. be64_to_cpu(cnt->tx_frames));
  708. ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
  709. be64_to_cpu(cnt->rx_frames));
  710. }
  711. static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
  712. const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  713. const struct ib_mad *in_mad, struct ib_mad *out_mad)
  714. {
  715. struct mlx4_counter counter_stats;
  716. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  717. int err;
  718. if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
  719. return -EINVAL;
  720. memset(&counter_stats, 0, sizeof(counter_stats));
  721. err = mlx4_get_counter_stats(dev->dev,
  722. dev->counters[port_num - 1].index,
  723. &counter_stats, 0);
  724. if (err)
  725. err = IB_MAD_RESULT_FAILURE;
  726. else {
  727. memset(out_mad->data, 0, sizeof out_mad->data);
  728. switch (counter_stats.counter_mode & 0xf) {
  729. case 0:
  730. edit_counter(&counter_stats,
  731. (void *)(out_mad->data + 40));
  732. err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
  733. break;
  734. default:
  735. err = IB_MAD_RESULT_FAILURE;
  736. }
  737. }
  738. return err;
  739. }
  740. int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
  741. const struct ib_wc *in_wc, const struct ib_grh *in_grh,
  742. const struct ib_mad_hdr *in, size_t in_mad_size,
  743. struct ib_mad_hdr *out, size_t *out_mad_size,
  744. u16 *out_mad_pkey_index)
  745. {
  746. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  747. const struct ib_mad *in_mad = (const struct ib_mad *)in;
  748. struct ib_mad *out_mad = (struct ib_mad *)out;
  749. BUG_ON(in_mad_size != sizeof(*in_mad) ||
  750. *out_mad_size != sizeof(*out_mad));
  751. switch (rdma_port_get_link_layer(ibdev, port_num)) {
  752. case IB_LINK_LAYER_INFINIBAND:
  753. if (!mlx4_is_slave(dev->dev))
  754. return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
  755. in_grh, in_mad, out_mad);
  756. case IB_LINK_LAYER_ETHERNET:
  757. return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
  758. in_grh, in_mad, out_mad);
  759. default:
  760. return -EINVAL;
  761. }
  762. }
  763. static void send_handler(struct ib_mad_agent *agent,
  764. struct ib_mad_send_wc *mad_send_wc)
  765. {
  766. if (mad_send_wc->send_buf->context[0])
  767. ib_destroy_ah(mad_send_wc->send_buf->context[0]);
  768. ib_free_send_mad(mad_send_wc->send_buf);
  769. }
  770. int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
  771. {
  772. struct ib_mad_agent *agent;
  773. int p, q;
  774. int ret;
  775. enum rdma_link_layer ll;
  776. for (p = 0; p < dev->num_ports; ++p) {
  777. ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
  778. for (q = 0; q <= 1; ++q) {
  779. if (ll == IB_LINK_LAYER_INFINIBAND) {
  780. agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
  781. q ? IB_QPT_GSI : IB_QPT_SMI,
  782. NULL, 0, send_handler,
  783. NULL, NULL, 0);
  784. if (IS_ERR(agent)) {
  785. ret = PTR_ERR(agent);
  786. goto err;
  787. }
  788. dev->send_agent[p][q] = agent;
  789. } else
  790. dev->send_agent[p][q] = NULL;
  791. }
  792. }
  793. return 0;
  794. err:
  795. for (p = 0; p < dev->num_ports; ++p)
  796. for (q = 0; q <= 1; ++q)
  797. if (dev->send_agent[p][q])
  798. ib_unregister_mad_agent(dev->send_agent[p][q]);
  799. return ret;
  800. }
  801. void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
  802. {
  803. struct ib_mad_agent *agent;
  804. int p, q;
  805. for (p = 0; p < dev->num_ports; ++p) {
  806. for (q = 0; q <= 1; ++q) {
  807. agent = dev->send_agent[p][q];
  808. if (agent) {
  809. dev->send_agent[p][q] = NULL;
  810. ib_unregister_mad_agent(agent);
  811. }
  812. }
  813. if (dev->sm_ah[p])
  814. ib_destroy_ah(dev->sm_ah[p]);
  815. }
  816. }
  817. static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
  818. {
  819. mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
  820. if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
  821. mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
  822. MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
  823. }
  824. static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
  825. {
  826. /* re-configure the alias-guid and mcg's */
  827. if (mlx4_is_master(dev->dev)) {
  828. mlx4_ib_invalidate_all_guid_record(dev, port_num);
  829. if (!dev->sriov.is_going_down) {
  830. mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
  831. mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
  832. MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
  833. }
  834. }
  835. mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
  836. }
  837. static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
  838. struct mlx4_eqe *eqe)
  839. {
  840. __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
  841. GET_MASK_FROM_EQE(eqe));
  842. }
  843. static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
  844. u32 guid_tbl_blk_num, u32 change_bitmap)
  845. {
  846. struct ib_smp *in_mad = NULL;
  847. struct ib_smp *out_mad = NULL;
  848. u16 i;
  849. if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
  850. return;
  851. in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
  852. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  853. if (!in_mad || !out_mad) {
  854. mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
  855. goto out;
  856. }
  857. guid_tbl_blk_num *= 4;
  858. for (i = 0; i < 4; i++) {
  859. if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
  860. continue;
  861. memset(in_mad, 0, sizeof *in_mad);
  862. memset(out_mad, 0, sizeof *out_mad);
  863. in_mad->base_version = 1;
  864. in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
  865. in_mad->class_version = 1;
  866. in_mad->method = IB_MGMT_METHOD_GET;
  867. in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
  868. in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i);
  869. if (mlx4_MAD_IFC(dev,
  870. MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
  871. port_num, NULL, NULL, in_mad, out_mad)) {
  872. mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
  873. goto out;
  874. }
  875. mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
  876. port_num,
  877. (u8 *)(&((struct ib_smp *)out_mad)->data));
  878. mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
  879. port_num,
  880. (u8 *)(&((struct ib_smp *)out_mad)->data));
  881. }
  882. out:
  883. kfree(in_mad);
  884. kfree(out_mad);
  885. return;
  886. }
  887. void handle_port_mgmt_change_event(struct work_struct *work)
  888. {
  889. struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
  890. struct mlx4_ib_dev *dev = ew->ib_dev;
  891. struct mlx4_eqe *eqe = &(ew->ib_eqe);
  892. u8 port = eqe->event.port_mgmt_change.port;
  893. u32 changed_attr;
  894. u32 tbl_block;
  895. u32 change_bitmap;
  896. switch (eqe->subtype) {
  897. case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
  898. changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
  899. /* Update the SM ah - This should be done before handling
  900. the other changed attributes so that MADs can be sent to the SM */
  901. if (changed_attr & MSTR_SM_CHANGE_MASK) {
  902. u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
  903. u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
  904. update_sm_ah(dev, port, lid, sl);
  905. }
  906. /* Check if it is a lid change event */
  907. if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
  908. handle_lid_change_event(dev, port);
  909. /* Generate GUID changed event */
  910. if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
  911. mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
  912. /*if master, notify all slaves*/
  913. if (mlx4_is_master(dev->dev))
  914. mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
  915. MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
  916. }
  917. if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
  918. handle_client_rereg_event(dev, port);
  919. break;
  920. case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
  921. mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
  922. if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
  923. propagate_pkey_ev(dev, port, eqe);
  924. break;
  925. case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
  926. /* paravirtualized master's guid is guid 0 -- does not change */
  927. if (!mlx4_is_master(dev->dev))
  928. mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
  929. /*if master, notify relevant slaves*/
  930. else if (!dev->sriov.is_going_down) {
  931. tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
  932. change_bitmap = GET_MASK_FROM_EQE(eqe);
  933. handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
  934. }
  935. break;
  936. default:
  937. pr_warn("Unsupported subtype 0x%x for "
  938. "Port Management Change event\n", eqe->subtype);
  939. }
  940. kfree(ew);
  941. }
  942. void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
  943. enum ib_event_type type)
  944. {
  945. struct ib_event event;
  946. event.device = &dev->ib_dev;
  947. event.element.port_num = port_num;
  948. event.event = type;
  949. ib_dispatch_event(&event);
  950. }
  951. static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
  952. {
  953. unsigned long flags;
  954. struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
  955. struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
  956. spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
  957. if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
  958. queue_work(ctx->wq, &ctx->work);
  959. spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
  960. }
  961. static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
  962. struct mlx4_ib_demux_pv_qp *tun_qp,
  963. int index)
  964. {
  965. struct ib_sge sg_list;
  966. struct ib_recv_wr recv_wr, *bad_recv_wr;
  967. int size;
  968. size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
  969. sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
  970. sg_list.addr = tun_qp->ring[index].map;
  971. sg_list.length = size;
  972. sg_list.lkey = ctx->mr->lkey;
  973. recv_wr.next = NULL;
  974. recv_wr.sg_list = &sg_list;
  975. recv_wr.num_sge = 1;
  976. recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
  977. MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
  978. ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
  979. size, DMA_FROM_DEVICE);
  980. return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
  981. }
  982. static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
  983. int slave, struct ib_sa_mad *sa_mad)
  984. {
  985. int ret = 0;
  986. /* dispatch to different sa handlers */
  987. switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
  988. case IB_SA_ATTR_MC_MEMBER_REC:
  989. ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
  990. break;
  991. default:
  992. break;
  993. }
  994. return ret;
  995. }
  996. static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
  997. {
  998. int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
  999. return (qpn >= proxy_start && qpn <= proxy_start + 1);
  1000. }
  1001. int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
  1002. enum ib_qp_type dest_qpt, u16 pkey_index,
  1003. u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr,
  1004. u8 *s_mac, struct ib_mad *mad)
  1005. {
  1006. struct ib_sge list;
  1007. struct ib_send_wr wr, *bad_wr;
  1008. struct mlx4_ib_demux_pv_ctx *sqp_ctx;
  1009. struct mlx4_ib_demux_pv_qp *sqp;
  1010. struct mlx4_mad_snd_buf *sqp_mad;
  1011. struct ib_ah *ah;
  1012. struct ib_qp *send_qp = NULL;
  1013. unsigned wire_tx_ix = 0;
  1014. int ret = 0;
  1015. u16 wire_pkey_ix;
  1016. int src_qpnum;
  1017. u8 sgid_index;
  1018. sqp_ctx = dev->sriov.sqps[port-1];
  1019. /* check if proxy qp created */
  1020. if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
  1021. return -EAGAIN;
  1022. if (dest_qpt == IB_QPT_SMI) {
  1023. src_qpnum = 0;
  1024. sqp = &sqp_ctx->qp[0];
  1025. wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
  1026. } else {
  1027. src_qpnum = 1;
  1028. sqp = &sqp_ctx->qp[1];
  1029. wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
  1030. }
  1031. send_qp = sqp->qp;
  1032. /* create ah */
  1033. sgid_index = attr->grh.sgid_index;
  1034. attr->grh.sgid_index = 0;
  1035. ah = ib_create_ah(sqp_ctx->pd, attr);
  1036. if (IS_ERR(ah))
  1037. return -ENOMEM;
  1038. attr->grh.sgid_index = sgid_index;
  1039. to_mah(ah)->av.ib.gid_index = sgid_index;
  1040. /* get rid of force-loopback bit */
  1041. to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
  1042. spin_lock(&sqp->tx_lock);
  1043. if (sqp->tx_ix_head - sqp->tx_ix_tail >=
  1044. (MLX4_NUM_TUNNEL_BUFS - 1))
  1045. ret = -EAGAIN;
  1046. else
  1047. wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
  1048. spin_unlock(&sqp->tx_lock);
  1049. if (ret)
  1050. goto out;
  1051. sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
  1052. if (sqp->tx_ring[wire_tx_ix].ah)
  1053. ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
  1054. sqp->tx_ring[wire_tx_ix].ah = ah;
  1055. ib_dma_sync_single_for_cpu(&dev->ib_dev,
  1056. sqp->tx_ring[wire_tx_ix].buf.map,
  1057. sizeof (struct mlx4_mad_snd_buf),
  1058. DMA_TO_DEVICE);
  1059. memcpy(&sqp_mad->payload, mad, sizeof *mad);
  1060. ib_dma_sync_single_for_device(&dev->ib_dev,
  1061. sqp->tx_ring[wire_tx_ix].buf.map,
  1062. sizeof (struct mlx4_mad_snd_buf),
  1063. DMA_TO_DEVICE);
  1064. list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
  1065. list.length = sizeof (struct mlx4_mad_snd_buf);
  1066. list.lkey = sqp_ctx->mr->lkey;
  1067. wr.wr.ud.ah = ah;
  1068. wr.wr.ud.port_num = port;
  1069. wr.wr.ud.pkey_index = wire_pkey_ix;
  1070. wr.wr.ud.remote_qkey = qkey;
  1071. wr.wr.ud.remote_qpn = remote_qpn;
  1072. wr.next = NULL;
  1073. wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
  1074. wr.sg_list = &list;
  1075. wr.num_sge = 1;
  1076. wr.opcode = IB_WR_SEND;
  1077. wr.send_flags = IB_SEND_SIGNALED;
  1078. if (s_mac)
  1079. memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
  1080. ret = ib_post_send(send_qp, &wr, &bad_wr);
  1081. out:
  1082. if (ret)
  1083. ib_destroy_ah(ah);
  1084. return ret;
  1085. }
  1086. static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
  1087. {
  1088. if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
  1089. return slave;
  1090. return mlx4_get_base_gid_ix(dev->dev, slave, port);
  1091. }
  1092. static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
  1093. struct ib_ah_attr *ah_attr)
  1094. {
  1095. if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
  1096. ah_attr->grh.sgid_index = slave;
  1097. else
  1098. ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
  1099. }
  1100. static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
  1101. {
  1102. struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
  1103. struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
  1104. int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
  1105. struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
  1106. struct mlx4_ib_ah ah;
  1107. struct ib_ah_attr ah_attr;
  1108. u8 *slave_id;
  1109. int slave;
  1110. int port;
  1111. /* Get slave that sent this packet */
  1112. if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
  1113. wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
  1114. (wc->src_qp & 0x1) != ctx->port - 1 ||
  1115. wc->src_qp & 0x4) {
  1116. mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
  1117. return;
  1118. }
  1119. slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
  1120. if (slave != ctx->slave) {
  1121. mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
  1122. "belongs to another slave\n", wc->src_qp);
  1123. return;
  1124. }
  1125. /* Map transaction ID */
  1126. ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
  1127. sizeof (struct mlx4_tunnel_mad),
  1128. DMA_FROM_DEVICE);
  1129. switch (tunnel->mad.mad_hdr.method) {
  1130. case IB_MGMT_METHOD_SET:
  1131. case IB_MGMT_METHOD_GET:
  1132. case IB_MGMT_METHOD_REPORT:
  1133. case IB_SA_METHOD_GET_TABLE:
  1134. case IB_SA_METHOD_DELETE:
  1135. case IB_SA_METHOD_GET_MULTI:
  1136. case IB_SA_METHOD_GET_TRACE_TBL:
  1137. slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
  1138. if (*slave_id) {
  1139. mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
  1140. "class:%d slave:%d\n", *slave_id,
  1141. tunnel->mad.mad_hdr.mgmt_class, slave);
  1142. return;
  1143. } else
  1144. *slave_id = slave;
  1145. default:
  1146. /* nothing */;
  1147. }
  1148. /* Class-specific handling */
  1149. switch (tunnel->mad.mad_hdr.mgmt_class) {
  1150. case IB_MGMT_CLASS_SUBN_LID_ROUTED:
  1151. case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
  1152. if (slave != mlx4_master_func_num(dev->dev) &&
  1153. !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port))
  1154. return;
  1155. break;
  1156. case IB_MGMT_CLASS_SUBN_ADM:
  1157. if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
  1158. (struct ib_sa_mad *) &tunnel->mad))
  1159. return;
  1160. break;
  1161. case IB_MGMT_CLASS_CM:
  1162. if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
  1163. (struct ib_mad *) &tunnel->mad))
  1164. return;
  1165. break;
  1166. case IB_MGMT_CLASS_DEVICE_MGMT:
  1167. if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
  1168. tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
  1169. return;
  1170. break;
  1171. default:
  1172. /* Drop unsupported classes for slaves in tunnel mode */
  1173. if (slave != mlx4_master_func_num(dev->dev)) {
  1174. mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
  1175. "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
  1176. return;
  1177. }
  1178. }
  1179. /* We are using standard ib_core services to send the mad, so generate a
  1180. * stadard address handle by decoding the tunnelled mlx4_ah fields */
  1181. memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
  1182. ah.ibah.device = ctx->ib_dev;
  1183. port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
  1184. port = mlx4_slave_convert_port(dev->dev, slave, port);
  1185. if (port < 0)
  1186. return;
  1187. ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
  1188. mlx4_ib_query_ah(&ah.ibah, &ah_attr);
  1189. if (ah_attr.ah_flags & IB_AH_GRH)
  1190. fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
  1191. memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
  1192. ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan);
  1193. /* if slave have default vlan use it */
  1194. mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
  1195. &ah_attr.vlan_id, &ah_attr.sl);
  1196. mlx4_ib_send_to_wire(dev, slave, ctx->port,
  1197. is_proxy_qp0(dev, wc->src_qp, slave) ?
  1198. IB_QPT_SMI : IB_QPT_GSI,
  1199. be16_to_cpu(tunnel->hdr.pkey_index),
  1200. be32_to_cpu(tunnel->hdr.remote_qpn),
  1201. be32_to_cpu(tunnel->hdr.qkey),
  1202. &ah_attr, wc->smac, &tunnel->mad);
  1203. }
  1204. static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
  1205. enum ib_qp_type qp_type, int is_tun)
  1206. {
  1207. int i;
  1208. struct mlx4_ib_demux_pv_qp *tun_qp;
  1209. int rx_buf_size, tx_buf_size;
  1210. if (qp_type > IB_QPT_GSI)
  1211. return -EINVAL;
  1212. tun_qp = &ctx->qp[qp_type];
  1213. tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
  1214. GFP_KERNEL);
  1215. if (!tun_qp->ring)
  1216. return -ENOMEM;
  1217. tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
  1218. sizeof (struct mlx4_ib_tun_tx_buf),
  1219. GFP_KERNEL);
  1220. if (!tun_qp->tx_ring) {
  1221. kfree(tun_qp->ring);
  1222. tun_qp->ring = NULL;
  1223. return -ENOMEM;
  1224. }
  1225. if (is_tun) {
  1226. rx_buf_size = sizeof (struct mlx4_tunnel_mad);
  1227. tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
  1228. } else {
  1229. rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
  1230. tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
  1231. }
  1232. for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
  1233. tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
  1234. if (!tun_qp->ring[i].addr)
  1235. goto err;
  1236. tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
  1237. tun_qp->ring[i].addr,
  1238. rx_buf_size,
  1239. DMA_FROM_DEVICE);
  1240. if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
  1241. kfree(tun_qp->ring[i].addr);
  1242. goto err;
  1243. }
  1244. }
  1245. for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
  1246. tun_qp->tx_ring[i].buf.addr =
  1247. kmalloc(tx_buf_size, GFP_KERNEL);
  1248. if (!tun_qp->tx_ring[i].buf.addr)
  1249. goto tx_err;
  1250. tun_qp->tx_ring[i].buf.map =
  1251. ib_dma_map_single(ctx->ib_dev,
  1252. tun_qp->tx_ring[i].buf.addr,
  1253. tx_buf_size,
  1254. DMA_TO_DEVICE);
  1255. if (ib_dma_mapping_error(ctx->ib_dev,
  1256. tun_qp->tx_ring[i].buf.map)) {
  1257. kfree(tun_qp->tx_ring[i].buf.addr);
  1258. goto tx_err;
  1259. }
  1260. tun_qp->tx_ring[i].ah = NULL;
  1261. }
  1262. spin_lock_init(&tun_qp->tx_lock);
  1263. tun_qp->tx_ix_head = 0;
  1264. tun_qp->tx_ix_tail = 0;
  1265. tun_qp->proxy_qpt = qp_type;
  1266. return 0;
  1267. tx_err:
  1268. while (i > 0) {
  1269. --i;
  1270. ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
  1271. tx_buf_size, DMA_TO_DEVICE);
  1272. kfree(tun_qp->tx_ring[i].buf.addr);
  1273. }
  1274. kfree(tun_qp->tx_ring);
  1275. tun_qp->tx_ring = NULL;
  1276. i = MLX4_NUM_TUNNEL_BUFS;
  1277. err:
  1278. while (i > 0) {
  1279. --i;
  1280. ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
  1281. rx_buf_size, DMA_FROM_DEVICE);
  1282. kfree(tun_qp->ring[i].addr);
  1283. }
  1284. kfree(tun_qp->ring);
  1285. tun_qp->ring = NULL;
  1286. return -ENOMEM;
  1287. }
  1288. static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
  1289. enum ib_qp_type qp_type, int is_tun)
  1290. {
  1291. int i;
  1292. struct mlx4_ib_demux_pv_qp *tun_qp;
  1293. int rx_buf_size, tx_buf_size;
  1294. if (qp_type > IB_QPT_GSI)
  1295. return;
  1296. tun_qp = &ctx->qp[qp_type];
  1297. if (is_tun) {
  1298. rx_buf_size = sizeof (struct mlx4_tunnel_mad);
  1299. tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
  1300. } else {
  1301. rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
  1302. tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
  1303. }
  1304. for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
  1305. ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
  1306. rx_buf_size, DMA_FROM_DEVICE);
  1307. kfree(tun_qp->ring[i].addr);
  1308. }
  1309. for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
  1310. ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
  1311. tx_buf_size, DMA_TO_DEVICE);
  1312. kfree(tun_qp->tx_ring[i].buf.addr);
  1313. if (tun_qp->tx_ring[i].ah)
  1314. ib_destroy_ah(tun_qp->tx_ring[i].ah);
  1315. }
  1316. kfree(tun_qp->tx_ring);
  1317. kfree(tun_qp->ring);
  1318. }
  1319. static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
  1320. {
  1321. struct mlx4_ib_demux_pv_ctx *ctx;
  1322. struct mlx4_ib_demux_pv_qp *tun_qp;
  1323. struct ib_wc wc;
  1324. int ret;
  1325. ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
  1326. ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
  1327. while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
  1328. tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
  1329. if (wc.status == IB_WC_SUCCESS) {
  1330. switch (wc.opcode) {
  1331. case IB_WC_RECV:
  1332. mlx4_ib_multiplex_mad(ctx, &wc);
  1333. ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
  1334. wc.wr_id &
  1335. (MLX4_NUM_TUNNEL_BUFS - 1));
  1336. if (ret)
  1337. pr_err("Failed reposting tunnel "
  1338. "buf:%lld\n", wc.wr_id);
  1339. break;
  1340. case IB_WC_SEND:
  1341. pr_debug("received tunnel send completion:"
  1342. "wrid=0x%llx, status=0x%x\n",
  1343. wc.wr_id, wc.status);
  1344. ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
  1345. (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
  1346. tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
  1347. = NULL;
  1348. spin_lock(&tun_qp->tx_lock);
  1349. tun_qp->tx_ix_tail++;
  1350. spin_unlock(&tun_qp->tx_lock);
  1351. break;
  1352. default:
  1353. break;
  1354. }
  1355. } else {
  1356. pr_debug("mlx4_ib: completion error in tunnel: %d."
  1357. " status = %d, wrid = 0x%llx\n",
  1358. ctx->slave, wc.status, wc.wr_id);
  1359. if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
  1360. ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
  1361. (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
  1362. tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
  1363. = NULL;
  1364. spin_lock(&tun_qp->tx_lock);
  1365. tun_qp->tx_ix_tail++;
  1366. spin_unlock(&tun_qp->tx_lock);
  1367. }
  1368. }
  1369. }
  1370. }
  1371. static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
  1372. {
  1373. struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
  1374. /* It's worse than that! He's dead, Jim! */
  1375. pr_err("Fatal error (%d) on a MAD QP on port %d\n",
  1376. event->event, sqp->port);
  1377. }
  1378. static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
  1379. enum ib_qp_type qp_type, int create_tun)
  1380. {
  1381. int i, ret;
  1382. struct mlx4_ib_demux_pv_qp *tun_qp;
  1383. struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
  1384. struct ib_qp_attr attr;
  1385. int qp_attr_mask_INIT;
  1386. if (qp_type > IB_QPT_GSI)
  1387. return -EINVAL;
  1388. tun_qp = &ctx->qp[qp_type];
  1389. memset(&qp_init_attr, 0, sizeof qp_init_attr);
  1390. qp_init_attr.init_attr.send_cq = ctx->cq;
  1391. qp_init_attr.init_attr.recv_cq = ctx->cq;
  1392. qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
  1393. qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
  1394. qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
  1395. qp_init_attr.init_attr.cap.max_send_sge = 1;
  1396. qp_init_attr.init_attr.cap.max_recv_sge = 1;
  1397. if (create_tun) {
  1398. qp_init_attr.init_attr.qp_type = IB_QPT_UD;
  1399. qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
  1400. qp_init_attr.port = ctx->port;
  1401. qp_init_attr.slave = ctx->slave;
  1402. qp_init_attr.proxy_qp_type = qp_type;
  1403. qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
  1404. IB_QP_QKEY | IB_QP_PORT;
  1405. } else {
  1406. qp_init_attr.init_attr.qp_type = qp_type;
  1407. qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
  1408. qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
  1409. }
  1410. qp_init_attr.init_attr.port_num = ctx->port;
  1411. qp_init_attr.init_attr.qp_context = ctx;
  1412. qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
  1413. tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
  1414. if (IS_ERR(tun_qp->qp)) {
  1415. ret = PTR_ERR(tun_qp->qp);
  1416. tun_qp->qp = NULL;
  1417. pr_err("Couldn't create %s QP (%d)\n",
  1418. create_tun ? "tunnel" : "special", ret);
  1419. return ret;
  1420. }
  1421. memset(&attr, 0, sizeof attr);
  1422. attr.qp_state = IB_QPS_INIT;
  1423. ret = 0;
  1424. if (create_tun)
  1425. ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
  1426. ctx->port, IB_DEFAULT_PKEY_FULL,
  1427. &attr.pkey_index);
  1428. if (ret || !create_tun)
  1429. attr.pkey_index =
  1430. to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
  1431. attr.qkey = IB_QP1_QKEY;
  1432. attr.port_num = ctx->port;
  1433. ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
  1434. if (ret) {
  1435. pr_err("Couldn't change %s qp state to INIT (%d)\n",
  1436. create_tun ? "tunnel" : "special", ret);
  1437. goto err_qp;
  1438. }
  1439. attr.qp_state = IB_QPS_RTR;
  1440. ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
  1441. if (ret) {
  1442. pr_err("Couldn't change %s qp state to RTR (%d)\n",
  1443. create_tun ? "tunnel" : "special", ret);
  1444. goto err_qp;
  1445. }
  1446. attr.qp_state = IB_QPS_RTS;
  1447. attr.sq_psn = 0;
  1448. ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
  1449. if (ret) {
  1450. pr_err("Couldn't change %s qp state to RTS (%d)\n",
  1451. create_tun ? "tunnel" : "special", ret);
  1452. goto err_qp;
  1453. }
  1454. for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
  1455. ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
  1456. if (ret) {
  1457. pr_err(" mlx4_ib_post_pv_buf error"
  1458. " (err = %d, i = %d)\n", ret, i);
  1459. goto err_qp;
  1460. }
  1461. }
  1462. return 0;
  1463. err_qp:
  1464. ib_destroy_qp(tun_qp->qp);
  1465. tun_qp->qp = NULL;
  1466. return ret;
  1467. }
  1468. /*
  1469. * IB MAD completion callback for real SQPs
  1470. */
  1471. static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
  1472. {
  1473. struct mlx4_ib_demux_pv_ctx *ctx;
  1474. struct mlx4_ib_demux_pv_qp *sqp;
  1475. struct ib_wc wc;
  1476. struct ib_grh *grh;
  1477. struct ib_mad *mad;
  1478. ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
  1479. ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
  1480. while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
  1481. sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
  1482. if (wc.status == IB_WC_SUCCESS) {
  1483. switch (wc.opcode) {
  1484. case IB_WC_SEND:
  1485. ib_destroy_ah(sqp->tx_ring[wc.wr_id &
  1486. (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
  1487. sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
  1488. = NULL;
  1489. spin_lock(&sqp->tx_lock);
  1490. sqp->tx_ix_tail++;
  1491. spin_unlock(&sqp->tx_lock);
  1492. break;
  1493. case IB_WC_RECV:
  1494. mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
  1495. (sqp->ring[wc.wr_id &
  1496. (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
  1497. grh = &(((struct mlx4_mad_rcv_buf *)
  1498. (sqp->ring[wc.wr_id &
  1499. (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
  1500. mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
  1501. if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
  1502. (MLX4_NUM_TUNNEL_BUFS - 1)))
  1503. pr_err("Failed reposting SQP "
  1504. "buf:%lld\n", wc.wr_id);
  1505. break;
  1506. default:
  1507. BUG_ON(1);
  1508. break;
  1509. }
  1510. } else {
  1511. pr_debug("mlx4_ib: completion error in tunnel: %d."
  1512. " status = %d, wrid = 0x%llx\n",
  1513. ctx->slave, wc.status, wc.wr_id);
  1514. if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
  1515. ib_destroy_ah(sqp->tx_ring[wc.wr_id &
  1516. (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
  1517. sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
  1518. = NULL;
  1519. spin_lock(&sqp->tx_lock);
  1520. sqp->tx_ix_tail++;
  1521. spin_unlock(&sqp->tx_lock);
  1522. }
  1523. }
  1524. }
  1525. }
  1526. static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
  1527. struct mlx4_ib_demux_pv_ctx **ret_ctx)
  1528. {
  1529. struct mlx4_ib_demux_pv_ctx *ctx;
  1530. *ret_ctx = NULL;
  1531. ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
  1532. if (!ctx) {
  1533. pr_err("failed allocating pv resource context "
  1534. "for port %d, slave %d\n", port, slave);
  1535. return -ENOMEM;
  1536. }
  1537. ctx->ib_dev = &dev->ib_dev;
  1538. ctx->port = port;
  1539. ctx->slave = slave;
  1540. *ret_ctx = ctx;
  1541. return 0;
  1542. }
  1543. static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
  1544. {
  1545. if (dev->sriov.demux[port - 1].tun[slave]) {
  1546. kfree(dev->sriov.demux[port - 1].tun[slave]);
  1547. dev->sriov.demux[port - 1].tun[slave] = NULL;
  1548. }
  1549. }
  1550. static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
  1551. int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
  1552. {
  1553. int ret, cq_size;
  1554. struct ib_cq_init_attr cq_attr = {};
  1555. if (ctx->state != DEMUX_PV_STATE_DOWN)
  1556. return -EEXIST;
  1557. ctx->state = DEMUX_PV_STATE_STARTING;
  1558. /* have QP0 only if link layer is IB */
  1559. if (rdma_port_get_link_layer(ibdev, ctx->port) ==
  1560. IB_LINK_LAYER_INFINIBAND)
  1561. ctx->has_smi = 1;
  1562. if (ctx->has_smi) {
  1563. ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
  1564. if (ret) {
  1565. pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
  1566. goto err_out;
  1567. }
  1568. }
  1569. ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
  1570. if (ret) {
  1571. pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
  1572. goto err_out_qp0;
  1573. }
  1574. cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
  1575. if (ctx->has_smi)
  1576. cq_size *= 2;
  1577. cq_attr.cqe = cq_size;
  1578. ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
  1579. NULL, ctx, &cq_attr);
  1580. if (IS_ERR(ctx->cq)) {
  1581. ret = PTR_ERR(ctx->cq);
  1582. pr_err("Couldn't create tunnel CQ (%d)\n", ret);
  1583. goto err_buf;
  1584. }
  1585. ctx->pd = ib_alloc_pd(ctx->ib_dev);
  1586. if (IS_ERR(ctx->pd)) {
  1587. ret = PTR_ERR(ctx->pd);
  1588. pr_err("Couldn't create tunnel PD (%d)\n", ret);
  1589. goto err_cq;
  1590. }
  1591. ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
  1592. if (IS_ERR(ctx->mr)) {
  1593. ret = PTR_ERR(ctx->mr);
  1594. pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
  1595. goto err_pd;
  1596. }
  1597. if (ctx->has_smi) {
  1598. ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
  1599. if (ret) {
  1600. pr_err("Couldn't create %s QP0 (%d)\n",
  1601. create_tun ? "tunnel for" : "", ret);
  1602. goto err_mr;
  1603. }
  1604. }
  1605. ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
  1606. if (ret) {
  1607. pr_err("Couldn't create %s QP1 (%d)\n",
  1608. create_tun ? "tunnel for" : "", ret);
  1609. goto err_qp0;
  1610. }
  1611. if (create_tun)
  1612. INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
  1613. else
  1614. INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
  1615. ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
  1616. ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
  1617. if (ret) {
  1618. pr_err("Couldn't arm tunnel cq (%d)\n", ret);
  1619. goto err_wq;
  1620. }
  1621. ctx->state = DEMUX_PV_STATE_ACTIVE;
  1622. return 0;
  1623. err_wq:
  1624. ctx->wq = NULL;
  1625. ib_destroy_qp(ctx->qp[1].qp);
  1626. ctx->qp[1].qp = NULL;
  1627. err_qp0:
  1628. if (ctx->has_smi)
  1629. ib_destroy_qp(ctx->qp[0].qp);
  1630. ctx->qp[0].qp = NULL;
  1631. err_mr:
  1632. ib_dereg_mr(ctx->mr);
  1633. ctx->mr = NULL;
  1634. err_pd:
  1635. ib_dealloc_pd(ctx->pd);
  1636. ctx->pd = NULL;
  1637. err_cq:
  1638. ib_destroy_cq(ctx->cq);
  1639. ctx->cq = NULL;
  1640. err_buf:
  1641. mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
  1642. err_out_qp0:
  1643. if (ctx->has_smi)
  1644. mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
  1645. err_out:
  1646. ctx->state = DEMUX_PV_STATE_DOWN;
  1647. return ret;
  1648. }
  1649. static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
  1650. struct mlx4_ib_demux_pv_ctx *ctx, int flush)
  1651. {
  1652. if (!ctx)
  1653. return;
  1654. if (ctx->state > DEMUX_PV_STATE_DOWN) {
  1655. ctx->state = DEMUX_PV_STATE_DOWNING;
  1656. if (flush)
  1657. flush_workqueue(ctx->wq);
  1658. if (ctx->has_smi) {
  1659. ib_destroy_qp(ctx->qp[0].qp);
  1660. ctx->qp[0].qp = NULL;
  1661. mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
  1662. }
  1663. ib_destroy_qp(ctx->qp[1].qp);
  1664. ctx->qp[1].qp = NULL;
  1665. mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
  1666. ib_dereg_mr(ctx->mr);
  1667. ctx->mr = NULL;
  1668. ib_dealloc_pd(ctx->pd);
  1669. ctx->pd = NULL;
  1670. ib_destroy_cq(ctx->cq);
  1671. ctx->cq = NULL;
  1672. ctx->state = DEMUX_PV_STATE_DOWN;
  1673. }
  1674. }
  1675. static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
  1676. int port, int do_init)
  1677. {
  1678. int ret = 0;
  1679. if (!do_init) {
  1680. clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
  1681. /* for master, destroy real sqp resources */
  1682. if (slave == mlx4_master_func_num(dev->dev))
  1683. destroy_pv_resources(dev, slave, port,
  1684. dev->sriov.sqps[port - 1], 1);
  1685. /* destroy the tunnel qp resources */
  1686. destroy_pv_resources(dev, slave, port,
  1687. dev->sriov.demux[port - 1].tun[slave], 1);
  1688. return 0;
  1689. }
  1690. /* create the tunnel qp resources */
  1691. ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
  1692. dev->sriov.demux[port - 1].tun[slave]);
  1693. /* for master, create the real sqp resources */
  1694. if (!ret && slave == mlx4_master_func_num(dev->dev))
  1695. ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
  1696. dev->sriov.sqps[port - 1]);
  1697. return ret;
  1698. }
  1699. void mlx4_ib_tunnels_update_work(struct work_struct *work)
  1700. {
  1701. struct mlx4_ib_demux_work *dmxw;
  1702. dmxw = container_of(work, struct mlx4_ib_demux_work, work);
  1703. mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
  1704. dmxw->do_init);
  1705. kfree(dmxw);
  1706. return;
  1707. }
  1708. static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
  1709. struct mlx4_ib_demux_ctx *ctx,
  1710. int port)
  1711. {
  1712. char name[12];
  1713. int ret = 0;
  1714. int i;
  1715. ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
  1716. sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
  1717. if (!ctx->tun)
  1718. return -ENOMEM;
  1719. ctx->dev = dev;
  1720. ctx->port = port;
  1721. ctx->ib_dev = &dev->ib_dev;
  1722. for (i = 0;
  1723. i < min(dev->dev->caps.sqp_demux,
  1724. (u16)(dev->dev->persist->num_vfs + 1));
  1725. i++) {
  1726. struct mlx4_active_ports actv_ports =
  1727. mlx4_get_active_ports(dev->dev, i);
  1728. if (!test_bit(port - 1, actv_ports.ports))
  1729. continue;
  1730. ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
  1731. if (ret) {
  1732. ret = -ENOMEM;
  1733. goto err_mcg;
  1734. }
  1735. }
  1736. ret = mlx4_ib_mcg_port_init(ctx);
  1737. if (ret) {
  1738. pr_err("Failed initializing mcg para-virt (%d)\n", ret);
  1739. goto err_mcg;
  1740. }
  1741. snprintf(name, sizeof name, "mlx4_ibt%d", port);
  1742. ctx->wq = create_singlethread_workqueue(name);
  1743. if (!ctx->wq) {
  1744. pr_err("Failed to create tunnelling WQ for port %d\n", port);
  1745. ret = -ENOMEM;
  1746. goto err_wq;
  1747. }
  1748. snprintf(name, sizeof name, "mlx4_ibud%d", port);
  1749. ctx->ud_wq = create_singlethread_workqueue(name);
  1750. if (!ctx->ud_wq) {
  1751. pr_err("Failed to create up/down WQ for port %d\n", port);
  1752. ret = -ENOMEM;
  1753. goto err_udwq;
  1754. }
  1755. return 0;
  1756. err_udwq:
  1757. destroy_workqueue(ctx->wq);
  1758. ctx->wq = NULL;
  1759. err_wq:
  1760. mlx4_ib_mcg_port_cleanup(ctx, 1);
  1761. err_mcg:
  1762. for (i = 0; i < dev->dev->caps.sqp_demux; i++)
  1763. free_pv_object(dev, i, port);
  1764. kfree(ctx->tun);
  1765. ctx->tun = NULL;
  1766. return ret;
  1767. }
  1768. static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
  1769. {
  1770. if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
  1771. sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
  1772. flush_workqueue(sqp_ctx->wq);
  1773. if (sqp_ctx->has_smi) {
  1774. ib_destroy_qp(sqp_ctx->qp[0].qp);
  1775. sqp_ctx->qp[0].qp = NULL;
  1776. mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
  1777. }
  1778. ib_destroy_qp(sqp_ctx->qp[1].qp);
  1779. sqp_ctx->qp[1].qp = NULL;
  1780. mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
  1781. ib_dereg_mr(sqp_ctx->mr);
  1782. sqp_ctx->mr = NULL;
  1783. ib_dealloc_pd(sqp_ctx->pd);
  1784. sqp_ctx->pd = NULL;
  1785. ib_destroy_cq(sqp_ctx->cq);
  1786. sqp_ctx->cq = NULL;
  1787. sqp_ctx->state = DEMUX_PV_STATE_DOWN;
  1788. }
  1789. }
  1790. static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
  1791. {
  1792. int i;
  1793. if (ctx) {
  1794. struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
  1795. mlx4_ib_mcg_port_cleanup(ctx, 1);
  1796. for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
  1797. if (!ctx->tun[i])
  1798. continue;
  1799. if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
  1800. ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
  1801. }
  1802. flush_workqueue(ctx->wq);
  1803. for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
  1804. destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
  1805. free_pv_object(dev, i, ctx->port);
  1806. }
  1807. kfree(ctx->tun);
  1808. destroy_workqueue(ctx->ud_wq);
  1809. destroy_workqueue(ctx->wq);
  1810. }
  1811. }
  1812. static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
  1813. {
  1814. int i;
  1815. if (!mlx4_is_master(dev->dev))
  1816. return;
  1817. /* initialize or tear down tunnel QPs for the master */
  1818. for (i = 0; i < dev->dev->caps.num_ports; i++)
  1819. mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
  1820. return;
  1821. }
  1822. int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
  1823. {
  1824. int i = 0;
  1825. int err;
  1826. if (!mlx4_is_mfunc(dev->dev))
  1827. return 0;
  1828. dev->sriov.is_going_down = 0;
  1829. spin_lock_init(&dev->sriov.going_down_lock);
  1830. mlx4_ib_cm_paravirt_init(dev);
  1831. mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
  1832. if (mlx4_is_slave(dev->dev)) {
  1833. mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
  1834. return 0;
  1835. }
  1836. for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
  1837. if (i == mlx4_master_func_num(dev->dev))
  1838. mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
  1839. else
  1840. mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
  1841. }
  1842. err = mlx4_ib_init_alias_guid_service(dev);
  1843. if (err) {
  1844. mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
  1845. goto paravirt_err;
  1846. }
  1847. err = mlx4_ib_device_register_sysfs(dev);
  1848. if (err) {
  1849. mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
  1850. goto sysfs_err;
  1851. }
  1852. mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
  1853. dev->dev->caps.sqp_demux);
  1854. for (i = 0; i < dev->num_ports; i++) {
  1855. union ib_gid gid;
  1856. err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
  1857. if (err)
  1858. goto demux_err;
  1859. dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
  1860. err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
  1861. &dev->sriov.sqps[i]);
  1862. if (err)
  1863. goto demux_err;
  1864. err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
  1865. if (err)
  1866. goto free_pv;
  1867. }
  1868. mlx4_ib_master_tunnels(dev, 1);
  1869. return 0;
  1870. free_pv:
  1871. free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
  1872. demux_err:
  1873. while (--i >= 0) {
  1874. free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
  1875. mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
  1876. }
  1877. mlx4_ib_device_unregister_sysfs(dev);
  1878. sysfs_err:
  1879. mlx4_ib_destroy_alias_guid_service(dev);
  1880. paravirt_err:
  1881. mlx4_ib_cm_paravirt_clean(dev, -1);
  1882. return err;
  1883. }
  1884. void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
  1885. {
  1886. int i;
  1887. unsigned long flags;
  1888. if (!mlx4_is_mfunc(dev->dev))
  1889. return;
  1890. spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
  1891. dev->sriov.is_going_down = 1;
  1892. spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
  1893. if (mlx4_is_master(dev->dev)) {
  1894. for (i = 0; i < dev->num_ports; i++) {
  1895. flush_workqueue(dev->sriov.demux[i].ud_wq);
  1896. mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
  1897. kfree(dev->sriov.sqps[i]);
  1898. dev->sriov.sqps[i] = NULL;
  1899. mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
  1900. }
  1901. mlx4_ib_cm_paravirt_clean(dev, -1);
  1902. mlx4_ib_destroy_alias_guid_service(dev);
  1903. mlx4_ib_device_unregister_sysfs(dev);
  1904. }
  1905. }