ud.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090
  1. /*
  2. * Copyright(c) 2015 - 2018 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include <linux/net.h>
  48. #include <rdma/ib_smi.h>
  49. #include "hfi.h"
  50. #include "mad.h"
  51. #include "verbs_txreq.h"
  52. #include "trace_ibhdrs.h"
  53. #include "qp.h"
  54. /* We support only two types - 9B and 16B for now */
  55. static const hfi1_make_req hfi1_make_ud_req_tbl[2] = {
  56. [HFI1_PKT_TYPE_9B] = &hfi1_make_ud_req_9B,
  57. [HFI1_PKT_TYPE_16B] = &hfi1_make_ud_req_16B
  58. };
  59. /**
  60. * ud_loopback - handle send on loopback QPs
  61. * @sqp: the sending QP
  62. * @swqe: the send work request
  63. *
  64. * This is called from hfi1_make_ud_req() to forward a WQE addressed
  65. * to the same HFI.
  66. * Note that the receive interrupt handler may be calling hfi1_ud_rcv()
  67. * while this is being called.
  68. */
  69. static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
  70. {
  71. struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
  72. struct hfi1_pportdata *ppd;
  73. struct hfi1_qp_priv *priv = sqp->priv;
  74. struct rvt_qp *qp;
  75. struct rdma_ah_attr *ah_attr;
  76. unsigned long flags;
  77. struct rvt_sge_state ssge;
  78. struct rvt_sge *sge;
  79. struct ib_wc wc;
  80. u32 length;
  81. enum ib_qp_type sqptype, dqptype;
  82. rcu_read_lock();
  83. qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
  84. swqe->ud_wr.remote_qpn);
  85. if (!qp) {
  86. ibp->rvp.n_pkt_drops++;
  87. rcu_read_unlock();
  88. return;
  89. }
  90. sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
  91. IB_QPT_UD : sqp->ibqp.qp_type;
  92. dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
  93. IB_QPT_UD : qp->ibqp.qp_type;
  94. if (dqptype != sqptype ||
  95. !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
  96. ibp->rvp.n_pkt_drops++;
  97. goto drop;
  98. }
  99. ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
  100. ppd = ppd_from_ibp(ibp);
  101. if (qp->ibqp.qp_num > 1) {
  102. u16 pkey;
  103. u32 slid;
  104. u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
  105. pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index);
  106. slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
  107. ((1 << ppd->lmc) - 1));
  108. if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
  109. qp->s_pkey_index,
  110. slid, false))) {
  111. hfi1_bad_pkey(ibp, pkey,
  112. rdma_ah_get_sl(ah_attr),
  113. sqp->ibqp.qp_num, qp->ibqp.qp_num,
  114. slid, rdma_ah_get_dlid(ah_attr));
  115. goto drop;
  116. }
  117. }
  118. /*
  119. * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
  120. * Qkeys with the high order bit set mean use the
  121. * qkey from the QP context instead of the WR (see 10.2.5).
  122. */
  123. if (qp->ibqp.qp_num) {
  124. u32 qkey;
  125. qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
  126. sqp->qkey : swqe->ud_wr.remote_qkey;
  127. if (unlikely(qkey != qp->qkey))
  128. goto drop; /* silently drop per IBTA spec */
  129. }
  130. /*
  131. * A GRH is expected to precede the data even if not
  132. * present on the wire.
  133. */
  134. length = swqe->length;
  135. memset(&wc, 0, sizeof(wc));
  136. wc.byte_len = length + sizeof(struct ib_grh);
  137. if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
  138. wc.wc_flags = IB_WC_WITH_IMM;
  139. wc.ex.imm_data = swqe->wr.ex.imm_data;
  140. }
  141. spin_lock_irqsave(&qp->r_lock, flags);
  142. /*
  143. * Get the next work request entry to find where to put the data.
  144. */
  145. if (qp->r_flags & RVT_R_REUSE_SGE) {
  146. qp->r_flags &= ~RVT_R_REUSE_SGE;
  147. } else {
  148. int ret;
  149. ret = rvt_get_rwqe(qp, false);
  150. if (ret < 0) {
  151. rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
  152. goto bail_unlock;
  153. }
  154. if (!ret) {
  155. if (qp->ibqp.qp_num == 0)
  156. ibp->rvp.n_vl15_dropped++;
  157. goto bail_unlock;
  158. }
  159. }
  160. /* Silently drop packets which are too big. */
  161. if (unlikely(wc.byte_len > qp->r_len)) {
  162. qp->r_flags |= RVT_R_REUSE_SGE;
  163. ibp->rvp.n_pkt_drops++;
  164. goto bail_unlock;
  165. }
  166. if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
  167. struct ib_grh grh;
  168. struct ib_global_route grd = *(rdma_ah_read_grh(ah_attr));
  169. /*
  170. * For loopback packets with extended LIDs, the
  171. * sgid_index in the GRH is 0 and the dgid is
  172. * OPA GID of the sender. While creating a response
  173. * to the loopback packet, IB core creates the new
  174. * sgid_index from the DGID and that will be the
  175. * OPA_GID_INDEX. The new dgid is from the sgid
  176. * index and that will be in the IB GID format.
  177. *
  178. * We now have a case where the sent packet had a
  179. * different sgid_index and dgid compared to the
  180. * one that was received in response.
  181. *
  182. * Fix this inconsistency.
  183. */
  184. if (priv->hdr_type == HFI1_PKT_TYPE_16B) {
  185. if (grd.sgid_index == 0)
  186. grd.sgid_index = OPA_GID_INDEX;
  187. if (ib_is_opa_gid(&grd.dgid))
  188. grd.dgid.global.interface_id =
  189. cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]);
  190. }
  191. hfi1_make_grh(ibp, &grh, &grd, 0, 0);
  192. hfi1_copy_sge(&qp->r_sge, &grh,
  193. sizeof(grh), true, false);
  194. wc.wc_flags |= IB_WC_GRH;
  195. } else {
  196. rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
  197. }
  198. ssge.sg_list = swqe->sg_list + 1;
  199. ssge.sge = *swqe->sg_list;
  200. ssge.num_sge = swqe->wr.num_sge;
  201. sge = &ssge.sge;
  202. while (length) {
  203. u32 len = sge->length;
  204. if (len > length)
  205. len = length;
  206. if (len > sge->sge_length)
  207. len = sge->sge_length;
  208. WARN_ON_ONCE(len == 0);
  209. hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, true, false);
  210. sge->vaddr += len;
  211. sge->length -= len;
  212. sge->sge_length -= len;
  213. if (sge->sge_length == 0) {
  214. if (--ssge.num_sge)
  215. *sge = *ssge.sg_list++;
  216. } else if (sge->length == 0 && sge->mr->lkey) {
  217. if (++sge->n >= RVT_SEGSZ) {
  218. if (++sge->m >= sge->mr->mapsz)
  219. break;
  220. sge->n = 0;
  221. }
  222. sge->vaddr =
  223. sge->mr->map[sge->m]->segs[sge->n].vaddr;
  224. sge->length =
  225. sge->mr->map[sge->m]->segs[sge->n].length;
  226. }
  227. length -= len;
  228. }
  229. rvt_put_ss(&qp->r_sge);
  230. if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
  231. goto bail_unlock;
  232. wc.wr_id = qp->r_wr_id;
  233. wc.status = IB_WC_SUCCESS;
  234. wc.opcode = IB_WC_RECV;
  235. wc.qp = &qp->ibqp;
  236. wc.src_qp = sqp->ibqp.qp_num;
  237. if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) {
  238. if (sqp->ibqp.qp_type == IB_QPT_GSI ||
  239. sqp->ibqp.qp_type == IB_QPT_SMI)
  240. wc.pkey_index = swqe->ud_wr.pkey_index;
  241. else
  242. wc.pkey_index = sqp->s_pkey_index;
  243. } else {
  244. wc.pkey_index = 0;
  245. }
  246. wc.slid = (ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
  247. ((1 << ppd->lmc) - 1))) & U16_MAX;
  248. /* Check for loopback when the port lid is not set */
  249. if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
  250. wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
  251. wc.sl = rdma_ah_get_sl(ah_attr);
  252. wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
  253. wc.port_num = qp->port_num;
  254. /* Signal completion event if the solicited bit is set. */
  255. rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
  256. swqe->wr.send_flags & IB_SEND_SOLICITED);
  257. ibp->rvp.n_loop_pkts++;
  258. bail_unlock:
  259. spin_unlock_irqrestore(&qp->r_lock, flags);
  260. drop:
  261. rcu_read_unlock();
  262. }
  263. static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe,
  264. struct ib_other_headers *ohdr,
  265. u16 *pkey, u32 extra_bytes, bool bypass)
  266. {
  267. u32 bth0;
  268. struct hfi1_ibport *ibp;
  269. ibp = to_iport(qp->ibqp.device, qp->port_num);
  270. if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
  271. ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
  272. bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
  273. } else {
  274. bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
  275. }
  276. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  277. bth0 |= IB_BTH_SOLICITED;
  278. bth0 |= extra_bytes << 20;
  279. if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
  280. *pkey = hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
  281. else
  282. *pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
  283. if (!bypass)
  284. bth0 |= *pkey;
  285. ohdr->bth[0] = cpu_to_be32(bth0);
  286. ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
  287. ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn));
  288. /*
  289. * Qkeys with the high order bit set mean use the
  290. * qkey from the QP context instead of the WR (see 10.2.5).
  291. */
  292. ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
  293. qp->qkey : wqe->ud_wr.remote_qkey);
  294. ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
  295. }
  296. void hfi1_make_ud_req_9B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
  297. struct rvt_swqe *wqe)
  298. {
  299. u32 nwords, extra_bytes;
  300. u16 len, slid, dlid, pkey;
  301. u16 lrh0 = 0;
  302. u8 sc5;
  303. struct hfi1_qp_priv *priv = qp->priv;
  304. struct ib_other_headers *ohdr;
  305. struct rdma_ah_attr *ah_attr;
  306. struct hfi1_pportdata *ppd;
  307. struct hfi1_ibport *ibp;
  308. struct ib_grh *grh;
  309. ibp = to_iport(qp->ibqp.device, qp->port_num);
  310. ppd = ppd_from_ibp(ibp);
  311. ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
  312. extra_bytes = -wqe->length & 3;
  313. nwords = ((wqe->length + extra_bytes) >> 2) + SIZE_OF_CRC;
  314. /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
  315. ps->s_txreq->hdr_dwords = 7;
  316. if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
  317. ps->s_txreq->hdr_dwords++;
  318. if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
  319. grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
  320. ps->s_txreq->hdr_dwords +=
  321. hfi1_make_grh(ibp, grh, rdma_ah_read_grh(ah_attr),
  322. ps->s_txreq->hdr_dwords - LRH_9B_DWORDS,
  323. nwords);
  324. lrh0 = HFI1_LRH_GRH;
  325. ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
  326. } else {
  327. lrh0 = HFI1_LRH_BTH;
  328. ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
  329. }
  330. sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
  331. lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
  332. if (qp->ibqp.qp_type == IB_QPT_SMI) {
  333. lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
  334. priv->s_sc = 0xf;
  335. } else {
  336. lrh0 |= (sc5 & 0xf) << 12;
  337. priv->s_sc = sc5;
  338. }
  339. dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
  340. if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
  341. slid = be16_to_cpu(IB_LID_PERMISSIVE);
  342. } else {
  343. u16 lid = (u16)ppd->lid;
  344. if (lid) {
  345. lid |= rdma_ah_get_path_bits(ah_attr) &
  346. ((1 << ppd->lmc) - 1);
  347. slid = lid;
  348. } else {
  349. slid = be16_to_cpu(IB_LID_PERMISSIVE);
  350. }
  351. }
  352. hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, false);
  353. len = ps->s_txreq->hdr_dwords + nwords;
  354. /* Setup the packet */
  355. ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_9B;
  356. hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
  357. lrh0, len, dlid, slid);
  358. }
  359. void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
  360. struct rvt_swqe *wqe)
  361. {
  362. struct hfi1_qp_priv *priv = qp->priv;
  363. struct ib_other_headers *ohdr;
  364. struct rdma_ah_attr *ah_attr;
  365. struct hfi1_pportdata *ppd;
  366. struct hfi1_ibport *ibp;
  367. u32 dlid, slid, nwords, extra_bytes;
  368. u32 dest_qp = wqe->ud_wr.remote_qpn;
  369. u32 src_qp = qp->ibqp.qp_num;
  370. u16 len, pkey;
  371. u8 l4, sc5;
  372. bool is_mgmt = false;
  373. ibp = to_iport(qp->ibqp.device, qp->port_num);
  374. ppd = ppd_from_ibp(ibp);
  375. ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
  376. /*
  377. * Build 16B Management Packet if either the destination
  378. * or source queue pair number is 0 or 1.
  379. */
  380. if (dest_qp == 0 || src_qp == 0 || dest_qp == 1 || src_qp == 1) {
  381. /* header size in dwords 16B LRH+L4_FM = (16+8)/4. */
  382. ps->s_txreq->hdr_dwords = 6;
  383. is_mgmt = true;
  384. } else {
  385. /* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */
  386. ps->s_txreq->hdr_dwords = 9;
  387. if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
  388. ps->s_txreq->hdr_dwords++;
  389. }
  390. /* SW provides space for CRC and LT for bypass packets. */
  391. extra_bytes = hfi1_get_16b_padding((ps->s_txreq->hdr_dwords << 2),
  392. wqe->length);
  393. nwords = ((wqe->length + extra_bytes + SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
  394. if ((rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) &&
  395. hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) {
  396. struct ib_grh *grh;
  397. struct ib_global_route *grd = rdma_ah_retrieve_grh(ah_attr);
  398. /*
  399. * Ensure OPA GIDs are transformed to IB gids
  400. * before creating the GRH.
  401. */
  402. if (grd->sgid_index == OPA_GID_INDEX) {
  403. dd_dev_warn(ppd->dd, "Bad sgid_index. sgid_index: %d\n",
  404. grd->sgid_index);
  405. grd->sgid_index = 0;
  406. }
  407. grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
  408. ps->s_txreq->hdr_dwords += hfi1_make_grh(
  409. ibp, grh, grd,
  410. ps->s_txreq->hdr_dwords - LRH_16B_DWORDS,
  411. nwords);
  412. ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
  413. l4 = OPA_16B_L4_IB_GLOBAL;
  414. } else {
  415. ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
  416. l4 = OPA_16B_L4_IB_LOCAL;
  417. }
  418. sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
  419. if (qp->ibqp.qp_type == IB_QPT_SMI)
  420. priv->s_sc = 0xf;
  421. else
  422. priv->s_sc = sc5;
  423. dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 16B);
  424. if (!ppd->lid)
  425. slid = be32_to_cpu(OPA_LID_PERMISSIVE);
  426. else
  427. slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
  428. ((1 << ppd->lmc) - 1));
  429. if (is_mgmt) {
  430. l4 = OPA_16B_L4_FM;
  431. pkey = hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
  432. hfi1_16B_set_qpn(&ps->s_txreq->phdr.hdr.opah.u.mgmt,
  433. dest_qp, src_qp);
  434. } else {
  435. hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, true);
  436. }
  437. /* Convert dwords to flits */
  438. len = (ps->s_txreq->hdr_dwords + nwords) >> 1;
  439. /* Setup the packet */
  440. ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_16B;
  441. hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
  442. slid, dlid, len, pkey, 0, 0, l4, priv->s_sc);
  443. }
  444. /**
  445. * hfi1_make_ud_req - construct a UD request packet
  446. * @qp: the QP
  447. *
  448. * Assume s_lock is held.
  449. *
  450. * Return 1 if constructed; otherwise, return 0.
  451. */
  452. int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
  453. {
  454. struct hfi1_qp_priv *priv = qp->priv;
  455. struct rdma_ah_attr *ah_attr;
  456. struct hfi1_pportdata *ppd;
  457. struct hfi1_ibport *ibp;
  458. struct rvt_swqe *wqe;
  459. int next_cur;
  460. u32 lid;
  461. ps->s_txreq = get_txreq(ps->dev, qp);
  462. if (!ps->s_txreq)
  463. goto bail_no_tx;
  464. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
  465. if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
  466. goto bail;
  467. /* We are in the error state, flush the work request. */
  468. if (qp->s_last == READ_ONCE(qp->s_head))
  469. goto bail;
  470. /* If DMAs are in progress, we can't flush immediately. */
  471. if (iowait_sdma_pending(&priv->s_iowait)) {
  472. qp->s_flags |= RVT_S_WAIT_DMA;
  473. goto bail;
  474. }
  475. wqe = rvt_get_swqe_ptr(qp, qp->s_last);
  476. hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
  477. goto done_free_tx;
  478. }
  479. /* see post_one_send() */
  480. if (qp->s_cur == READ_ONCE(qp->s_head))
  481. goto bail;
  482. wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
  483. next_cur = qp->s_cur + 1;
  484. if (next_cur >= qp->s_size)
  485. next_cur = 0;
  486. /* Construct the header. */
  487. ibp = to_iport(qp->ibqp.device, qp->port_num);
  488. ppd = ppd_from_ibp(ibp);
  489. ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
  490. priv->hdr_type = hfi1_get_hdr_type(ppd->lid, ah_attr);
  491. if ((!hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) ||
  492. (rdma_ah_get_dlid(ah_attr) == be32_to_cpu(OPA_LID_PERMISSIVE))) {
  493. lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
  494. if (unlikely(!loopback &&
  495. ((lid == ppd->lid) ||
  496. ((lid == be32_to_cpu(OPA_LID_PERMISSIVE)) &&
  497. (qp->ibqp.qp_type == IB_QPT_GSI))))) {
  498. unsigned long tflags = ps->flags;
  499. /*
  500. * If DMAs are in progress, we can't generate
  501. * a completion for the loopback packet since
  502. * it would be out of order.
  503. * Instead of waiting, we could queue a
  504. * zero length descriptor so we get a callback.
  505. */
  506. if (iowait_sdma_pending(&priv->s_iowait)) {
  507. qp->s_flags |= RVT_S_WAIT_DMA;
  508. goto bail;
  509. }
  510. qp->s_cur = next_cur;
  511. spin_unlock_irqrestore(&qp->s_lock, tflags);
  512. ud_loopback(qp, wqe);
  513. spin_lock_irqsave(&qp->s_lock, tflags);
  514. ps->flags = tflags;
  515. hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
  516. goto done_free_tx;
  517. }
  518. }
  519. qp->s_cur = next_cur;
  520. ps->s_txreq->s_cur_size = wqe->length;
  521. ps->s_txreq->ss = &qp->s_sge;
  522. qp->s_srate = rdma_ah_get_static_rate(ah_attr);
  523. qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
  524. qp->s_wqe = wqe;
  525. qp->s_sge.sge = wqe->sg_list[0];
  526. qp->s_sge.sg_list = wqe->sg_list + 1;
  527. qp->s_sge.num_sge = wqe->wr.num_sge;
  528. qp->s_sge.total_len = wqe->length;
  529. /* Make the appropriate header */
  530. hfi1_make_ud_req_tbl[priv->hdr_type](qp, ps, qp->s_wqe);
  531. priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
  532. ps->s_txreq->sde = priv->s_sde;
  533. priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
  534. ps->s_txreq->psc = priv->s_sendcontext;
  535. /* disarm any ahg */
  536. priv->s_ahg->ahgcount = 0;
  537. priv->s_ahg->ahgidx = 0;
  538. priv->s_ahg->tx_flags = 0;
  539. return 1;
  540. done_free_tx:
  541. hfi1_put_txreq(ps->s_txreq);
  542. ps->s_txreq = NULL;
  543. return 1;
  544. bail:
  545. hfi1_put_txreq(ps->s_txreq);
  546. bail_no_tx:
  547. ps->s_txreq = NULL;
  548. qp->s_flags &= ~RVT_S_BUSY;
  549. return 0;
  550. }
  551. /*
  552. * Hardware can't check this so we do it here.
  553. *
  554. * This is a slightly different algorithm than the standard pkey check. It
  555. * special cases the management keys and allows for 0x7fff and 0xffff to be in
  556. * the table at the same time.
  557. *
  558. * @returns the index found or -1 if not found
  559. */
  560. int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
  561. {
  562. struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
  563. unsigned i;
  564. if (pkey == FULL_MGMT_P_KEY || pkey == LIM_MGMT_P_KEY) {
  565. unsigned lim_idx = -1;
  566. for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) {
  567. /* here we look for an exact match */
  568. if (ppd->pkeys[i] == pkey)
  569. return i;
  570. if (ppd->pkeys[i] == LIM_MGMT_P_KEY)
  571. lim_idx = i;
  572. }
  573. /* did not find 0xffff return 0x7fff idx if found */
  574. if (pkey == FULL_MGMT_P_KEY)
  575. return lim_idx;
  576. /* no match... */
  577. return -1;
  578. }
  579. pkey &= 0x7fff; /* remove limited/full membership bit */
  580. for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
  581. if ((ppd->pkeys[i] & 0x7fff) == pkey)
  582. return i;
  583. /*
  584. * Should not get here, this means hardware failed to validate pkeys.
  585. */
  586. return -1;
  587. }
  588. void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
  589. u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
  590. u8 sc5, const struct ib_grh *old_grh)
  591. {
  592. u64 pbc, pbc_flags = 0;
  593. u32 bth0, plen, vl, hwords = 7;
  594. u16 len;
  595. u8 l4;
  596. struct hfi1_opa_header hdr;
  597. struct ib_other_headers *ohdr;
  598. struct pio_buf *pbuf;
  599. struct send_context *ctxt = qp_to_send_context(qp, sc5);
  600. struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
  601. u32 nwords;
  602. hdr.hdr_type = HFI1_PKT_TYPE_16B;
  603. /* Populate length */
  604. nwords = ((hfi1_get_16b_padding(hwords << 2, 0) +
  605. SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
  606. if (old_grh) {
  607. struct ib_grh *grh = &hdr.opah.u.l.grh;
  608. grh->version_tclass_flow = old_grh->version_tclass_flow;
  609. grh->paylen = cpu_to_be16(
  610. (hwords - LRH_16B_DWORDS + nwords) << 2);
  611. grh->hop_limit = 0xff;
  612. grh->sgid = old_grh->dgid;
  613. grh->dgid = old_grh->sgid;
  614. ohdr = &hdr.opah.u.l.oth;
  615. l4 = OPA_16B_L4_IB_GLOBAL;
  616. hwords += sizeof(struct ib_grh) / sizeof(u32);
  617. } else {
  618. ohdr = &hdr.opah.u.oth;
  619. l4 = OPA_16B_L4_IB_LOCAL;
  620. }
  621. /* BIT 16 to 19 is TVER. Bit 20 to 22 is pad cnt */
  622. bth0 = (IB_OPCODE_CNP << 24) | (1 << 16) |
  623. (hfi1_get_16b_padding(hwords << 2, 0) << 20);
  624. ohdr->bth[0] = cpu_to_be32(bth0);
  625. ohdr->bth[1] = cpu_to_be32(remote_qpn);
  626. ohdr->bth[2] = 0; /* PSN 0 */
  627. /* Convert dwords to flits */
  628. len = (hwords + nwords) >> 1;
  629. hfi1_make_16b_hdr(&hdr.opah, slid, dlid, len, pkey, 1, 0, l4, sc5);
  630. plen = 2 /* PBC */ + hwords + nwords;
  631. pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
  632. vl = sc_to_vlt(ppd->dd, sc5);
  633. pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
  634. if (ctxt) {
  635. pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
  636. if (!IS_ERR_OR_NULL(pbuf)) {
  637. trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
  638. ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
  639. &hdr, hwords);
  640. }
  641. }
  642. }
  643. void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
  644. u16 pkey, u32 slid, u32 dlid, u8 sc5,
  645. const struct ib_grh *old_grh)
  646. {
  647. u64 pbc, pbc_flags = 0;
  648. u32 bth0, plen, vl, hwords = 5;
  649. u16 lrh0;
  650. u8 sl = ibp->sc_to_sl[sc5];
  651. struct hfi1_opa_header hdr;
  652. struct ib_other_headers *ohdr;
  653. struct pio_buf *pbuf;
  654. struct send_context *ctxt = qp_to_send_context(qp, sc5);
  655. struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
  656. hdr.hdr_type = HFI1_PKT_TYPE_9B;
  657. if (old_grh) {
  658. struct ib_grh *grh = &hdr.ibh.u.l.grh;
  659. grh->version_tclass_flow = old_grh->version_tclass_flow;
  660. grh->paylen = cpu_to_be16(
  661. (hwords - LRH_9B_DWORDS + SIZE_OF_CRC) << 2);
  662. grh->hop_limit = 0xff;
  663. grh->sgid = old_grh->dgid;
  664. grh->dgid = old_grh->sgid;
  665. ohdr = &hdr.ibh.u.l.oth;
  666. lrh0 = HFI1_LRH_GRH;
  667. hwords += sizeof(struct ib_grh) / sizeof(u32);
  668. } else {
  669. ohdr = &hdr.ibh.u.oth;
  670. lrh0 = HFI1_LRH_BTH;
  671. }
  672. lrh0 |= (sc5 & 0xf) << 12 | sl << 4;
  673. bth0 = pkey | (IB_OPCODE_CNP << 24);
  674. ohdr->bth[0] = cpu_to_be32(bth0);
  675. ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << IB_BECN_SHIFT));
  676. ohdr->bth[2] = 0; /* PSN 0 */
  677. hfi1_make_ib_hdr(&hdr.ibh, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
  678. plen = 2 /* PBC */ + hwords;
  679. pbc_flags |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
  680. vl = sc_to_vlt(ppd->dd, sc5);
  681. pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
  682. if (ctxt) {
  683. pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
  684. if (!IS_ERR_OR_NULL(pbuf)) {
  685. trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
  686. ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
  687. &hdr, hwords);
  688. }
  689. }
  690. }
  691. /*
  692. * opa_smp_check() - Do the regular pkey checking, and the additional
  693. * checks for SMPs specified in OPAv1 rev 1.0, 9/19/2016 update, section
  694. * 9.10.25 ("SMA Packet Checks").
  695. *
  696. * Note that:
  697. * - Checks are done using the pkey directly from the packet's BTH,
  698. * and specifically _not_ the pkey that we attach to the completion,
  699. * which may be different.
  700. * - These checks are specifically for "non-local" SMPs (i.e., SMPs
  701. * which originated on another node). SMPs which are sent from, and
  702. * destined to this node are checked in opa_local_smp_check().
  703. *
  704. * At the point where opa_smp_check() is called, we know:
  705. * - destination QP is QP0
  706. *
  707. * opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
  708. */
  709. static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
  710. struct rvt_qp *qp, u16 slid, struct opa_smp *smp)
  711. {
  712. struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
  713. /*
  714. * I don't think it's possible for us to get here with sc != 0xf,
  715. * but check it to be certain.
  716. */
  717. if (sc5 != 0xf)
  718. return 1;
  719. if (rcv_pkey_check(ppd, pkey, sc5, slid))
  720. return 1;
  721. /*
  722. * At this point we know (and so don't need to check again) that
  723. * the pkey is either LIM_MGMT_P_KEY, or FULL_MGMT_P_KEY
  724. * (see ingress_pkey_check).
  725. */
  726. if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE &&
  727. smp->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) {
  728. ingress_pkey_table_fail(ppd, pkey, slid);
  729. return 1;
  730. }
  731. /*
  732. * SMPs fall into one of four (disjoint) categories:
  733. * SMA request, SMA response, SMA trap, or SMA trap repress.
  734. * Our response depends, in part, on which type of SMP we're
  735. * processing.
  736. *
  737. * If this is an SMA response, skip the check here.
  738. *
  739. * If this is an SMA request or SMA trap repress:
  740. * - pkey != FULL_MGMT_P_KEY =>
  741. * increment port recv constraint errors, drop MAD
  742. *
  743. * Otherwise:
  744. * - accept if the port is running an SM
  745. * - drop MAD if it's an SMA trap
  746. * - pkey == FULL_MGMT_P_KEY =>
  747. * reply with unsupported method
  748. * - pkey != FULL_MGMT_P_KEY =>
  749. * increment port recv constraint errors, drop MAD
  750. */
  751. switch (smp->method) {
  752. case IB_MGMT_METHOD_GET_RESP:
  753. case IB_MGMT_METHOD_REPORT_RESP:
  754. break;
  755. case IB_MGMT_METHOD_GET:
  756. case IB_MGMT_METHOD_SET:
  757. case IB_MGMT_METHOD_REPORT:
  758. case IB_MGMT_METHOD_TRAP_REPRESS:
  759. if (pkey != FULL_MGMT_P_KEY) {
  760. ingress_pkey_table_fail(ppd, pkey, slid);
  761. return 1;
  762. }
  763. break;
  764. default:
  765. if (ibp->rvp.port_cap_flags & IB_PORT_SM)
  766. return 0;
  767. if (smp->method == IB_MGMT_METHOD_TRAP)
  768. return 1;
  769. if (pkey == FULL_MGMT_P_KEY) {
  770. smp->status |= IB_SMP_UNSUP_METHOD;
  771. return 0;
  772. }
  773. ingress_pkey_table_fail(ppd, pkey, slid);
  774. return 1;
  775. }
  776. return 0;
  777. }
  778. /**
  779. * hfi1_ud_rcv - receive an incoming UD packet
  780. * @ibp: the port the packet came in on
  781. * @hdr: the packet header
  782. * @rcv_flags: flags relevant to rcv processing
  783. * @data: the packet data
  784. * @tlen: the packet length
  785. * @qp: the QP the packet came on
  786. *
  787. * This is called from qp_rcv() to process an incoming UD packet
  788. * for the given QP.
  789. * Called at interrupt level.
  790. */
  791. void hfi1_ud_rcv(struct hfi1_packet *packet)
  792. {
  793. u32 hdrsize = packet->hlen;
  794. struct ib_wc wc;
  795. u32 src_qp;
  796. u16 pkey;
  797. int mgmt_pkey_idx = -1;
  798. struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
  799. struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
  800. void *data = packet->payload;
  801. u32 tlen = packet->tlen;
  802. struct rvt_qp *qp = packet->qp;
  803. u8 sc5 = packet->sc;
  804. u8 sl_from_sc;
  805. u8 opcode = packet->opcode;
  806. u8 sl = packet->sl;
  807. u32 dlid = packet->dlid;
  808. u32 slid = packet->slid;
  809. u8 extra_bytes;
  810. u8 l4 = 0;
  811. bool dlid_is_permissive;
  812. bool slid_is_permissive;
  813. bool solicited = false;
  814. extra_bytes = packet->pad + packet->extra_byte + (SIZE_OF_CRC << 2);
  815. if (packet->etype == RHF_RCV_TYPE_BYPASS) {
  816. u32 permissive_lid =
  817. opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B);
  818. l4 = hfi1_16B_get_l4(packet->hdr);
  819. pkey = hfi1_16B_get_pkey(packet->hdr);
  820. dlid_is_permissive = (dlid == permissive_lid);
  821. slid_is_permissive = (slid == permissive_lid);
  822. } else {
  823. pkey = ib_bth_get_pkey(packet->ohdr);
  824. dlid_is_permissive = (dlid == be16_to_cpu(IB_LID_PERMISSIVE));
  825. slid_is_permissive = (slid == be16_to_cpu(IB_LID_PERMISSIVE));
  826. }
  827. sl_from_sc = ibp->sc_to_sl[sc5];
  828. if (likely(l4 != OPA_16B_L4_FM)) {
  829. src_qp = ib_get_sqpn(packet->ohdr);
  830. solicited = ib_bth_is_solicited(packet->ohdr);
  831. } else {
  832. src_qp = hfi1_16B_get_src_qpn(packet->mgmt);
  833. }
  834. process_ecn(qp, packet);
  835. /*
  836. * Get the number of bytes the message was padded by
  837. * and drop incomplete packets.
  838. */
  839. if (unlikely(tlen < (hdrsize + extra_bytes)))
  840. goto drop;
  841. tlen -= hdrsize + extra_bytes;
  842. /*
  843. * Check that the permissive LID is only used on QP0
  844. * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
  845. */
  846. if (qp->ibqp.qp_num) {
  847. if (unlikely(dlid_is_permissive || slid_is_permissive))
  848. goto drop;
  849. if (qp->ibqp.qp_num > 1) {
  850. if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
  851. /*
  852. * Traps will not be sent for packets dropped
  853. * by the HW. This is fine, as sending trap
  854. * for invalid pkeys is optional according to
  855. * IB spec (release 1.3, section 10.9.4)
  856. */
  857. hfi1_bad_pkey(ibp,
  858. pkey, sl,
  859. src_qp, qp->ibqp.qp_num,
  860. slid, dlid);
  861. return;
  862. }
  863. } else {
  864. /* GSI packet */
  865. mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
  866. if (mgmt_pkey_idx < 0)
  867. goto drop;
  868. }
  869. if (unlikely(l4 != OPA_16B_L4_FM &&
  870. ib_get_qkey(packet->ohdr) != qp->qkey))
  871. return; /* Silent drop */
  872. /* Drop invalid MAD packets (see 13.5.3.1). */
  873. if (unlikely(qp->ibqp.qp_num == 1 &&
  874. (tlen > 2048 || (sc5 == 0xF))))
  875. goto drop;
  876. } else {
  877. /* Received on QP0, and so by definition, this is an SMP */
  878. struct opa_smp *smp = (struct opa_smp *)data;
  879. if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
  880. goto drop;
  881. if (tlen > 2048)
  882. goto drop;
  883. if ((dlid_is_permissive || slid_is_permissive) &&
  884. smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
  885. goto drop;
  886. /* look up SMI pkey */
  887. mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
  888. if (mgmt_pkey_idx < 0)
  889. goto drop;
  890. }
  891. if (qp->ibqp.qp_num > 1 &&
  892. opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
  893. wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
  894. wc.wc_flags = IB_WC_WITH_IMM;
  895. } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
  896. wc.ex.imm_data = 0;
  897. wc.wc_flags = 0;
  898. } else {
  899. goto drop;
  900. }
  901. /*
  902. * A GRH is expected to precede the data even if not
  903. * present on the wire.
  904. */
  905. wc.byte_len = tlen + sizeof(struct ib_grh);
  906. /*
  907. * Get the next work request entry to find where to put the data.
  908. */
  909. if (qp->r_flags & RVT_R_REUSE_SGE) {
  910. qp->r_flags &= ~RVT_R_REUSE_SGE;
  911. } else {
  912. int ret;
  913. ret = rvt_get_rwqe(qp, false);
  914. if (ret < 0) {
  915. rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
  916. return;
  917. }
  918. if (!ret) {
  919. if (qp->ibqp.qp_num == 0)
  920. ibp->rvp.n_vl15_dropped++;
  921. return;
  922. }
  923. }
  924. /* Silently drop packets which are too big. */
  925. if (unlikely(wc.byte_len > qp->r_len)) {
  926. qp->r_flags |= RVT_R_REUSE_SGE;
  927. goto drop;
  928. }
  929. if (packet->grh) {
  930. hfi1_copy_sge(&qp->r_sge, packet->grh,
  931. sizeof(struct ib_grh), true, false);
  932. wc.wc_flags |= IB_WC_GRH;
  933. } else if (packet->etype == RHF_RCV_TYPE_BYPASS) {
  934. struct ib_grh grh;
  935. /*
  936. * Assuming we only created 16B on the send side
  937. * if we want to use large LIDs, since GRH was stripped
  938. * out when creating 16B, add back the GRH here.
  939. */
  940. hfi1_make_ext_grh(packet, &grh, slid, dlid);
  941. hfi1_copy_sge(&qp->r_sge, &grh,
  942. sizeof(struct ib_grh), true, false);
  943. wc.wc_flags |= IB_WC_GRH;
  944. } else {
  945. rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
  946. }
  947. hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
  948. true, false);
  949. rvt_put_ss(&qp->r_sge);
  950. if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
  951. return;
  952. wc.wr_id = qp->r_wr_id;
  953. wc.status = IB_WC_SUCCESS;
  954. wc.opcode = IB_WC_RECV;
  955. wc.vendor_err = 0;
  956. wc.qp = &qp->ibqp;
  957. wc.src_qp = src_qp;
  958. if (qp->ibqp.qp_type == IB_QPT_GSI ||
  959. qp->ibqp.qp_type == IB_QPT_SMI) {
  960. if (mgmt_pkey_idx < 0) {
  961. if (net_ratelimit()) {
  962. struct hfi1_devdata *dd = ppd->dd;
  963. dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
  964. qp->ibqp.qp_type);
  965. mgmt_pkey_idx = 0;
  966. }
  967. }
  968. wc.pkey_index = (unsigned)mgmt_pkey_idx;
  969. } else {
  970. wc.pkey_index = 0;
  971. }
  972. if (slid_is_permissive)
  973. slid = be32_to_cpu(OPA_LID_PERMISSIVE);
  974. wc.slid = slid & U16_MAX;
  975. wc.sl = sl_from_sc;
  976. /*
  977. * Save the LMC lower bits if the destination LID is a unicast LID.
  978. */
  979. wc.dlid_path_bits = hfi1_check_mcast(dlid) ? 0 :
  980. dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
  981. wc.port_num = qp->port_num;
  982. /* Signal completion event if the solicited bit is set. */
  983. rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, solicited);
  984. return;
  985. drop:
  986. ibp->rvp.n_pkt_drops++;
  987. }