rc.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512
  1. /*
  2. * Copyright(c) 2015 - 2018 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include <linux/io.h>
  48. #include <rdma/rdma_vt.h>
  49. #include <rdma/rdmavt_qp.h>
  50. #include "hfi.h"
  51. #include "qp.h"
  52. #include "verbs_txreq.h"
  53. #include "trace.h"
  54. /* cut down ridiculously long IB macro names */
  55. #define OP(x) RC_OP(x)
  56. static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
  57. u32 psn, u32 pmtu)
  58. {
  59. u32 len;
  60. len = delta_psn(psn, wqe->psn) * pmtu;
  61. ss->sge = wqe->sg_list[0];
  62. ss->sg_list = wqe->sg_list + 1;
  63. ss->num_sge = wqe->wr.num_sge;
  64. ss->total_len = wqe->length;
  65. rvt_skip_sge(ss, len, false);
  66. return wqe->length - len;
  67. }
  68. /**
  69. * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
  70. * @dev: the device for this QP
  71. * @qp: a pointer to the QP
  72. * @ohdr: a pointer to the IB header being constructed
  73. * @ps: the xmit packet state
  74. *
  75. * Return 1 if constructed; otherwise, return 0.
  76. * Note that we are in the responder's side of the QP context.
  77. * Note the QP s_lock must be held.
  78. */
  79. static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
  80. struct ib_other_headers *ohdr,
  81. struct hfi1_pkt_state *ps)
  82. {
  83. struct rvt_ack_entry *e;
  84. u32 hwords;
  85. u32 len;
  86. u32 bth0;
  87. u32 bth2;
  88. int middle = 0;
  89. u32 pmtu = qp->pmtu;
  90. struct hfi1_qp_priv *priv = qp->priv;
  91. lockdep_assert_held(&qp->s_lock);
  92. /* Don't send an ACK if we aren't supposed to. */
  93. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
  94. goto bail;
  95. if (priv->hdr_type == HFI1_PKT_TYPE_9B)
  96. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  97. hwords = 5;
  98. else
  99. /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
  100. hwords = 7;
  101. switch (qp->s_ack_state) {
  102. case OP(RDMA_READ_RESPONSE_LAST):
  103. case OP(RDMA_READ_RESPONSE_ONLY):
  104. e = &qp->s_ack_queue[qp->s_tail_ack_queue];
  105. if (e->rdma_sge.mr) {
  106. rvt_put_mr(e->rdma_sge.mr);
  107. e->rdma_sge.mr = NULL;
  108. }
  109. /* FALLTHROUGH */
  110. case OP(ATOMIC_ACKNOWLEDGE):
  111. /*
  112. * We can increment the tail pointer now that the last
  113. * response has been sent instead of only being
  114. * constructed.
  115. */
  116. if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC)
  117. qp->s_tail_ack_queue = 0;
  118. /* FALLTHROUGH */
  119. case OP(SEND_ONLY):
  120. case OP(ACKNOWLEDGE):
  121. /* Check for no next entry in the queue. */
  122. if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
  123. if (qp->s_flags & RVT_S_ACK_PENDING)
  124. goto normal;
  125. goto bail;
  126. }
  127. e = &qp->s_ack_queue[qp->s_tail_ack_queue];
  128. if (e->opcode == OP(RDMA_READ_REQUEST)) {
  129. /*
  130. * If a RDMA read response is being resent and
  131. * we haven't seen the duplicate request yet,
  132. * then stop sending the remaining responses the
  133. * responder has seen until the requester re-sends it.
  134. */
  135. len = e->rdma_sge.sge_length;
  136. if (len && !e->rdma_sge.mr) {
  137. qp->s_tail_ack_queue = qp->r_head_ack_queue;
  138. goto bail;
  139. }
  140. /* Copy SGE state in case we need to resend */
  141. ps->s_txreq->mr = e->rdma_sge.mr;
  142. if (ps->s_txreq->mr)
  143. rvt_get_mr(ps->s_txreq->mr);
  144. qp->s_ack_rdma_sge.sge = e->rdma_sge;
  145. qp->s_ack_rdma_sge.num_sge = 1;
  146. ps->s_txreq->ss = &qp->s_ack_rdma_sge;
  147. if (len > pmtu) {
  148. len = pmtu;
  149. qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
  150. } else {
  151. qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
  152. e->sent = 1;
  153. }
  154. ohdr->u.aeth = rvt_compute_aeth(qp);
  155. hwords++;
  156. qp->s_ack_rdma_psn = e->psn;
  157. bth2 = mask_psn(qp->s_ack_rdma_psn++);
  158. } else {
  159. /* COMPARE_SWAP or FETCH_ADD */
  160. ps->s_txreq->ss = NULL;
  161. len = 0;
  162. qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
  163. ohdr->u.at.aeth = rvt_compute_aeth(qp);
  164. ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
  165. hwords += sizeof(ohdr->u.at) / sizeof(u32);
  166. bth2 = mask_psn(e->psn);
  167. e->sent = 1;
  168. }
  169. bth0 = qp->s_ack_state << 24;
  170. break;
  171. case OP(RDMA_READ_RESPONSE_FIRST):
  172. qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
  173. /* FALLTHROUGH */
  174. case OP(RDMA_READ_RESPONSE_MIDDLE):
  175. ps->s_txreq->ss = &qp->s_ack_rdma_sge;
  176. ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
  177. if (ps->s_txreq->mr)
  178. rvt_get_mr(ps->s_txreq->mr);
  179. len = qp->s_ack_rdma_sge.sge.sge_length;
  180. if (len > pmtu) {
  181. len = pmtu;
  182. middle = HFI1_CAP_IS_KSET(SDMA_AHG);
  183. } else {
  184. ohdr->u.aeth = rvt_compute_aeth(qp);
  185. hwords++;
  186. qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
  187. e = &qp->s_ack_queue[qp->s_tail_ack_queue];
  188. e->sent = 1;
  189. }
  190. bth0 = qp->s_ack_state << 24;
  191. bth2 = mask_psn(qp->s_ack_rdma_psn++);
  192. break;
  193. default:
  194. normal:
  195. /*
  196. * Send a regular ACK.
  197. * Set the s_ack_state so we wait until after sending
  198. * the ACK before setting s_ack_state to ACKNOWLEDGE
  199. * (see above).
  200. */
  201. qp->s_ack_state = OP(SEND_ONLY);
  202. qp->s_flags &= ~RVT_S_ACK_PENDING;
  203. ps->s_txreq->ss = NULL;
  204. if (qp->s_nak_state)
  205. ohdr->u.aeth =
  206. cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
  207. (qp->s_nak_state <<
  208. IB_AETH_CREDIT_SHIFT));
  209. else
  210. ohdr->u.aeth = rvt_compute_aeth(qp);
  211. hwords++;
  212. len = 0;
  213. bth0 = OP(ACKNOWLEDGE) << 24;
  214. bth2 = mask_psn(qp->s_ack_psn);
  215. }
  216. qp->s_rdma_ack_cnt++;
  217. ps->s_txreq->sde = priv->s_sde;
  218. ps->s_txreq->s_cur_size = len;
  219. ps->s_txreq->hdr_dwords = hwords;
  220. hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
  221. return 1;
  222. bail:
  223. qp->s_ack_state = OP(ACKNOWLEDGE);
  224. /*
  225. * Ensure s_rdma_ack_cnt changes are committed prior to resetting
  226. * RVT_S_RESP_PENDING
  227. */
  228. smp_wmb();
  229. qp->s_flags &= ~(RVT_S_RESP_PENDING
  230. | RVT_S_ACK_PENDING
  231. | HFI1_S_AHG_VALID);
  232. return 0;
  233. }
  234. /**
  235. * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
  236. * @qp: a pointer to the QP
  237. *
  238. * Assumes s_lock is held.
  239. *
  240. * Return 1 if constructed; otherwise, return 0.
  241. */
  242. int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
  243. {
  244. struct hfi1_qp_priv *priv = qp->priv;
  245. struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
  246. struct ib_other_headers *ohdr;
  247. struct rvt_sge_state *ss;
  248. struct rvt_swqe *wqe;
  249. u32 hwords;
  250. u32 len;
  251. u32 bth0 = 0;
  252. u32 bth2;
  253. u32 pmtu = qp->pmtu;
  254. char newreq;
  255. int middle = 0;
  256. int delta;
  257. lockdep_assert_held(&qp->s_lock);
  258. ps->s_txreq = get_txreq(ps->dev, qp);
  259. if (!ps->s_txreq)
  260. goto bail_no_tx;
  261. if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
  262. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  263. hwords = 5;
  264. if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
  265. ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
  266. else
  267. ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
  268. } else {
  269. /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
  270. hwords = 7;
  271. if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
  272. (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
  273. ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
  274. else
  275. ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
  276. }
  277. /* Sending responses has higher priority over sending requests. */
  278. if ((qp->s_flags & RVT_S_RESP_PENDING) &&
  279. make_rc_ack(dev, qp, ohdr, ps))
  280. return 1;
  281. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
  282. if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
  283. goto bail;
  284. /* We are in the error state, flush the work request. */
  285. if (qp->s_last == READ_ONCE(qp->s_head))
  286. goto bail;
  287. /* If DMAs are in progress, we can't flush immediately. */
  288. if (iowait_sdma_pending(&priv->s_iowait)) {
  289. qp->s_flags |= RVT_S_WAIT_DMA;
  290. goto bail;
  291. }
  292. clear_ahg(qp);
  293. wqe = rvt_get_swqe_ptr(qp, qp->s_last);
  294. hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
  295. IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
  296. /* will get called again */
  297. goto done_free_tx;
  298. }
  299. if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
  300. goto bail;
  301. if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
  302. if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
  303. qp->s_flags |= RVT_S_WAIT_PSN;
  304. goto bail;
  305. }
  306. qp->s_sending_psn = qp->s_psn;
  307. qp->s_sending_hpsn = qp->s_psn - 1;
  308. }
  309. /* Send a request. */
  310. wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
  311. switch (qp->s_state) {
  312. default:
  313. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
  314. goto bail;
  315. /*
  316. * Resend an old request or start a new one.
  317. *
  318. * We keep track of the current SWQE so that
  319. * we don't reset the "furthest progress" state
  320. * if we need to back up.
  321. */
  322. newreq = 0;
  323. if (qp->s_cur == qp->s_tail) {
  324. /* Check if send work queue is empty. */
  325. if (qp->s_tail == READ_ONCE(qp->s_head)) {
  326. clear_ahg(qp);
  327. goto bail;
  328. }
  329. /*
  330. * If a fence is requested, wait for previous
  331. * RDMA read and atomic operations to finish.
  332. */
  333. if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
  334. qp->s_num_rd_atomic) {
  335. qp->s_flags |= RVT_S_WAIT_FENCE;
  336. goto bail;
  337. }
  338. /*
  339. * Local operations are processed immediately
  340. * after all prior requests have completed
  341. */
  342. if (wqe->wr.opcode == IB_WR_REG_MR ||
  343. wqe->wr.opcode == IB_WR_LOCAL_INV) {
  344. int local_ops = 0;
  345. int err = 0;
  346. if (qp->s_last != qp->s_cur)
  347. goto bail;
  348. if (++qp->s_cur == qp->s_size)
  349. qp->s_cur = 0;
  350. if (++qp->s_tail == qp->s_size)
  351. qp->s_tail = 0;
  352. if (!(wqe->wr.send_flags &
  353. RVT_SEND_COMPLETION_ONLY)) {
  354. err = rvt_invalidate_rkey(
  355. qp,
  356. wqe->wr.ex.invalidate_rkey);
  357. local_ops = 1;
  358. }
  359. hfi1_send_complete(qp, wqe,
  360. err ? IB_WC_LOC_PROT_ERR
  361. : IB_WC_SUCCESS);
  362. if (local_ops)
  363. atomic_dec(&qp->local_ops_pending);
  364. goto done_free_tx;
  365. }
  366. newreq = 1;
  367. qp->s_psn = wqe->psn;
  368. }
  369. /*
  370. * Note that we have to be careful not to modify the
  371. * original work request since we may need to resend
  372. * it.
  373. */
  374. len = wqe->length;
  375. ss = &qp->s_sge;
  376. bth2 = mask_psn(qp->s_psn);
  377. switch (wqe->wr.opcode) {
  378. case IB_WR_SEND:
  379. case IB_WR_SEND_WITH_IMM:
  380. case IB_WR_SEND_WITH_INV:
  381. /* If no credit, return. */
  382. if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
  383. rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
  384. qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
  385. goto bail;
  386. }
  387. if (len > pmtu) {
  388. qp->s_state = OP(SEND_FIRST);
  389. len = pmtu;
  390. break;
  391. }
  392. if (wqe->wr.opcode == IB_WR_SEND) {
  393. qp->s_state = OP(SEND_ONLY);
  394. } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
  395. qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
  396. /* Immediate data comes after the BTH */
  397. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  398. hwords += 1;
  399. } else {
  400. qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
  401. /* Invalidate rkey comes after the BTH */
  402. ohdr->u.ieth = cpu_to_be32(
  403. wqe->wr.ex.invalidate_rkey);
  404. hwords += 1;
  405. }
  406. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  407. bth0 |= IB_BTH_SOLICITED;
  408. bth2 |= IB_BTH_REQ_ACK;
  409. if (++qp->s_cur == qp->s_size)
  410. qp->s_cur = 0;
  411. break;
  412. case IB_WR_RDMA_WRITE:
  413. if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
  414. qp->s_lsn++;
  415. goto no_flow_control;
  416. case IB_WR_RDMA_WRITE_WITH_IMM:
  417. /* If no credit, return. */
  418. if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
  419. rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
  420. qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
  421. goto bail;
  422. }
  423. no_flow_control:
  424. put_ib_reth_vaddr(
  425. wqe->rdma_wr.remote_addr,
  426. &ohdr->u.rc.reth);
  427. ohdr->u.rc.reth.rkey =
  428. cpu_to_be32(wqe->rdma_wr.rkey);
  429. ohdr->u.rc.reth.length = cpu_to_be32(len);
  430. hwords += sizeof(struct ib_reth) / sizeof(u32);
  431. if (len > pmtu) {
  432. qp->s_state = OP(RDMA_WRITE_FIRST);
  433. len = pmtu;
  434. break;
  435. }
  436. if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
  437. qp->s_state = OP(RDMA_WRITE_ONLY);
  438. } else {
  439. qp->s_state =
  440. OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
  441. /* Immediate data comes after RETH */
  442. ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
  443. hwords += 1;
  444. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  445. bth0 |= IB_BTH_SOLICITED;
  446. }
  447. bth2 |= IB_BTH_REQ_ACK;
  448. if (++qp->s_cur == qp->s_size)
  449. qp->s_cur = 0;
  450. break;
  451. case IB_WR_RDMA_READ:
  452. /*
  453. * Don't allow more operations to be started
  454. * than the QP limits allow.
  455. */
  456. if (newreq) {
  457. if (qp->s_num_rd_atomic >=
  458. qp->s_max_rd_atomic) {
  459. qp->s_flags |= RVT_S_WAIT_RDMAR;
  460. goto bail;
  461. }
  462. qp->s_num_rd_atomic++;
  463. if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
  464. qp->s_lsn++;
  465. }
  466. put_ib_reth_vaddr(
  467. wqe->rdma_wr.remote_addr,
  468. &ohdr->u.rc.reth);
  469. ohdr->u.rc.reth.rkey =
  470. cpu_to_be32(wqe->rdma_wr.rkey);
  471. ohdr->u.rc.reth.length = cpu_to_be32(len);
  472. qp->s_state = OP(RDMA_READ_REQUEST);
  473. hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
  474. ss = NULL;
  475. len = 0;
  476. bth2 |= IB_BTH_REQ_ACK;
  477. if (++qp->s_cur == qp->s_size)
  478. qp->s_cur = 0;
  479. break;
  480. case IB_WR_ATOMIC_CMP_AND_SWP:
  481. case IB_WR_ATOMIC_FETCH_AND_ADD:
  482. /*
  483. * Don't allow more operations to be started
  484. * than the QP limits allow.
  485. */
  486. if (newreq) {
  487. if (qp->s_num_rd_atomic >=
  488. qp->s_max_rd_atomic) {
  489. qp->s_flags |= RVT_S_WAIT_RDMAR;
  490. goto bail;
  491. }
  492. qp->s_num_rd_atomic++;
  493. if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
  494. qp->s_lsn++;
  495. }
  496. if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
  497. qp->s_state = OP(COMPARE_SWAP);
  498. put_ib_ateth_swap(wqe->atomic_wr.swap,
  499. &ohdr->u.atomic_eth);
  500. put_ib_ateth_compare(wqe->atomic_wr.compare_add,
  501. &ohdr->u.atomic_eth);
  502. } else {
  503. qp->s_state = OP(FETCH_ADD);
  504. put_ib_ateth_swap(wqe->atomic_wr.compare_add,
  505. &ohdr->u.atomic_eth);
  506. put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
  507. }
  508. put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
  509. &ohdr->u.atomic_eth);
  510. ohdr->u.atomic_eth.rkey = cpu_to_be32(
  511. wqe->atomic_wr.rkey);
  512. hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
  513. ss = NULL;
  514. len = 0;
  515. bth2 |= IB_BTH_REQ_ACK;
  516. if (++qp->s_cur == qp->s_size)
  517. qp->s_cur = 0;
  518. break;
  519. default:
  520. goto bail;
  521. }
  522. qp->s_sge.sge = wqe->sg_list[0];
  523. qp->s_sge.sg_list = wqe->sg_list + 1;
  524. qp->s_sge.num_sge = wqe->wr.num_sge;
  525. qp->s_sge.total_len = wqe->length;
  526. qp->s_len = wqe->length;
  527. if (newreq) {
  528. qp->s_tail++;
  529. if (qp->s_tail >= qp->s_size)
  530. qp->s_tail = 0;
  531. }
  532. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  533. qp->s_psn = wqe->lpsn + 1;
  534. else
  535. qp->s_psn++;
  536. break;
  537. case OP(RDMA_READ_RESPONSE_FIRST):
  538. /*
  539. * qp->s_state is normally set to the opcode of the
  540. * last packet constructed for new requests and therefore
  541. * is never set to RDMA read response.
  542. * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
  543. * thread to indicate a SEND needs to be restarted from an
  544. * earlier PSN without interfering with the sending thread.
  545. * See restart_rc().
  546. */
  547. qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
  548. /* FALLTHROUGH */
  549. case OP(SEND_FIRST):
  550. qp->s_state = OP(SEND_MIDDLE);
  551. /* FALLTHROUGH */
  552. case OP(SEND_MIDDLE):
  553. bth2 = mask_psn(qp->s_psn++);
  554. ss = &qp->s_sge;
  555. len = qp->s_len;
  556. if (len > pmtu) {
  557. len = pmtu;
  558. middle = HFI1_CAP_IS_KSET(SDMA_AHG);
  559. break;
  560. }
  561. if (wqe->wr.opcode == IB_WR_SEND) {
  562. qp->s_state = OP(SEND_LAST);
  563. } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
  564. qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
  565. /* Immediate data comes after the BTH */
  566. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  567. hwords += 1;
  568. } else {
  569. qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
  570. /* invalidate data comes after the BTH */
  571. ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
  572. hwords += 1;
  573. }
  574. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  575. bth0 |= IB_BTH_SOLICITED;
  576. bth2 |= IB_BTH_REQ_ACK;
  577. qp->s_cur++;
  578. if (qp->s_cur >= qp->s_size)
  579. qp->s_cur = 0;
  580. break;
  581. case OP(RDMA_READ_RESPONSE_LAST):
  582. /*
  583. * qp->s_state is normally set to the opcode of the
  584. * last packet constructed for new requests and therefore
  585. * is never set to RDMA read response.
  586. * RDMA_READ_RESPONSE_LAST is used by the ACK processing
  587. * thread to indicate a RDMA write needs to be restarted from
  588. * an earlier PSN without interfering with the sending thread.
  589. * See restart_rc().
  590. */
  591. qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
  592. /* FALLTHROUGH */
  593. case OP(RDMA_WRITE_FIRST):
  594. qp->s_state = OP(RDMA_WRITE_MIDDLE);
  595. /* FALLTHROUGH */
  596. case OP(RDMA_WRITE_MIDDLE):
  597. bth2 = mask_psn(qp->s_psn++);
  598. ss = &qp->s_sge;
  599. len = qp->s_len;
  600. if (len > pmtu) {
  601. len = pmtu;
  602. middle = HFI1_CAP_IS_KSET(SDMA_AHG);
  603. break;
  604. }
  605. if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
  606. qp->s_state = OP(RDMA_WRITE_LAST);
  607. } else {
  608. qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
  609. /* Immediate data comes after the BTH */
  610. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  611. hwords += 1;
  612. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  613. bth0 |= IB_BTH_SOLICITED;
  614. }
  615. bth2 |= IB_BTH_REQ_ACK;
  616. qp->s_cur++;
  617. if (qp->s_cur >= qp->s_size)
  618. qp->s_cur = 0;
  619. break;
  620. case OP(RDMA_READ_RESPONSE_MIDDLE):
  621. /*
  622. * qp->s_state is normally set to the opcode of the
  623. * last packet constructed for new requests and therefore
  624. * is never set to RDMA read response.
  625. * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
  626. * thread to indicate a RDMA read needs to be restarted from
  627. * an earlier PSN without interfering with the sending thread.
  628. * See restart_rc().
  629. */
  630. len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
  631. put_ib_reth_vaddr(
  632. wqe->rdma_wr.remote_addr + len,
  633. &ohdr->u.rc.reth);
  634. ohdr->u.rc.reth.rkey =
  635. cpu_to_be32(wqe->rdma_wr.rkey);
  636. ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
  637. qp->s_state = OP(RDMA_READ_REQUEST);
  638. hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
  639. bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
  640. qp->s_psn = wqe->lpsn + 1;
  641. ss = NULL;
  642. len = 0;
  643. qp->s_cur++;
  644. if (qp->s_cur == qp->s_size)
  645. qp->s_cur = 0;
  646. break;
  647. }
  648. qp->s_sending_hpsn = bth2;
  649. delta = delta_psn(bth2, wqe->psn);
  650. if (delta && delta % HFI1_PSN_CREDIT == 0)
  651. bth2 |= IB_BTH_REQ_ACK;
  652. if (qp->s_flags & RVT_S_SEND_ONE) {
  653. qp->s_flags &= ~RVT_S_SEND_ONE;
  654. qp->s_flags |= RVT_S_WAIT_ACK;
  655. bth2 |= IB_BTH_REQ_ACK;
  656. }
  657. qp->s_len -= len;
  658. ps->s_txreq->hdr_dwords = hwords;
  659. ps->s_txreq->sde = priv->s_sde;
  660. ps->s_txreq->ss = ss;
  661. ps->s_txreq->s_cur_size = len;
  662. hfi1_make_ruc_header(
  663. qp,
  664. ohdr,
  665. bth0 | (qp->s_state << 24),
  666. bth2,
  667. middle,
  668. ps);
  669. return 1;
  670. done_free_tx:
  671. hfi1_put_txreq(ps->s_txreq);
  672. ps->s_txreq = NULL;
  673. return 1;
  674. bail:
  675. hfi1_put_txreq(ps->s_txreq);
  676. bail_no_tx:
  677. ps->s_txreq = NULL;
  678. qp->s_flags &= ~RVT_S_BUSY;
  679. return 0;
  680. }
  681. static inline void hfi1_make_bth_aeth(struct rvt_qp *qp,
  682. struct ib_other_headers *ohdr,
  683. u32 bth0, u32 bth1)
  684. {
  685. if (qp->r_nak_state)
  686. ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
  687. (qp->r_nak_state <<
  688. IB_AETH_CREDIT_SHIFT));
  689. else
  690. ohdr->u.aeth = rvt_compute_aeth(qp);
  691. ohdr->bth[0] = cpu_to_be32(bth0);
  692. ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn);
  693. ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
  694. }
  695. static inline void hfi1_queue_rc_ack(struct hfi1_packet *packet, bool is_fecn)
  696. {
  697. struct rvt_qp *qp = packet->qp;
  698. struct hfi1_ibport *ibp;
  699. unsigned long flags;
  700. spin_lock_irqsave(&qp->s_lock, flags);
  701. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
  702. goto unlock;
  703. ibp = rcd_to_iport(packet->rcd);
  704. this_cpu_inc(*ibp->rvp.rc_qacks);
  705. qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
  706. qp->s_nak_state = qp->r_nak_state;
  707. qp->s_ack_psn = qp->r_ack_psn;
  708. if (is_fecn)
  709. qp->s_flags |= RVT_S_ECN;
  710. /* Schedule the send tasklet. */
  711. hfi1_schedule_send(qp);
  712. unlock:
  713. spin_unlock_irqrestore(&qp->s_lock, flags);
  714. }
  715. static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet,
  716. struct hfi1_opa_header *opa_hdr,
  717. u8 sc5, bool is_fecn,
  718. u64 *pbc_flags, u32 *hwords,
  719. u32 *nwords)
  720. {
  721. struct rvt_qp *qp = packet->qp;
  722. struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
  723. struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
  724. struct ib_header *hdr = &opa_hdr->ibh;
  725. struct ib_other_headers *ohdr;
  726. u16 lrh0 = HFI1_LRH_BTH;
  727. u16 pkey;
  728. u32 bth0, bth1;
  729. opa_hdr->hdr_type = HFI1_PKT_TYPE_9B;
  730. ohdr = &hdr->u.oth;
  731. /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
  732. *hwords = 6;
  733. if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
  734. *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
  735. rdma_ah_read_grh(&qp->remote_ah_attr),
  736. *hwords - 2, SIZE_OF_CRC);
  737. ohdr = &hdr->u.l.oth;
  738. lrh0 = HFI1_LRH_GRH;
  739. }
  740. /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
  741. *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
  742. /* read pkey_index w/o lock (its atomic) */
  743. pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
  744. lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT |
  745. (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) <<
  746. IB_SL_SHIFT;
  747. hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC,
  748. opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
  749. ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr));
  750. bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
  751. if (qp->s_mig_state == IB_MIG_MIGRATED)
  752. bth0 |= IB_BTH_MIG_REQ;
  753. bth1 = (!!is_fecn) << IB_BECN_SHIFT;
  754. hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
  755. }
  756. static inline void hfi1_make_rc_ack_16B(struct hfi1_packet *packet,
  757. struct hfi1_opa_header *opa_hdr,
  758. u8 sc5, bool is_fecn,
  759. u64 *pbc_flags, u32 *hwords,
  760. u32 *nwords)
  761. {
  762. struct rvt_qp *qp = packet->qp;
  763. struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
  764. struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
  765. struct hfi1_16b_header *hdr = &opa_hdr->opah;
  766. struct ib_other_headers *ohdr;
  767. u32 bth0, bth1 = 0;
  768. u16 len, pkey;
  769. bool becn = is_fecn;
  770. u8 l4 = OPA_16B_L4_IB_LOCAL;
  771. u8 extra_bytes;
  772. opa_hdr->hdr_type = HFI1_PKT_TYPE_16B;
  773. ohdr = &hdr->u.oth;
  774. /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */
  775. *hwords = 8;
  776. extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0);
  777. *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2);
  778. if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
  779. hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
  780. *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
  781. rdma_ah_read_grh(&qp->remote_ah_attr),
  782. *hwords - 4, *nwords);
  783. ohdr = &hdr->u.l.oth;
  784. l4 = OPA_16B_L4_IB_GLOBAL;
  785. }
  786. *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
  787. /* read pkey_index w/o lock (its atomic) */
  788. pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
  789. /* Convert dwords to flits */
  790. len = (*hwords + *nwords) >> 1;
  791. hfi1_make_16b_hdr(hdr, ppd->lid |
  792. (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
  793. ((1 << ppd->lmc) - 1)),
  794. opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
  795. 16B), len, pkey, becn, 0, l4, sc5);
  796. bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
  797. bth0 |= extra_bytes << 20;
  798. if (qp->s_mig_state == IB_MIG_MIGRATED)
  799. bth1 = OPA_BTH_MIG_REQ;
  800. hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
  801. }
  802. typedef void (*hfi1_make_rc_ack)(struct hfi1_packet *packet,
  803. struct hfi1_opa_header *opa_hdr,
  804. u8 sc5, bool is_fecn,
  805. u64 *pbc_flags, u32 *hwords,
  806. u32 *nwords);
  807. /* We support only two types - 9B and 16B for now */
  808. static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = {
  809. [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B,
  810. [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B
  811. };
  812. /**
  813. * hfi1_send_rc_ack - Construct an ACK packet and send it
  814. * @qp: a pointer to the QP
  815. *
  816. * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
  817. * Note that RDMA reads and atomics are handled in the
  818. * send side QP state and send engine.
  819. */
  820. void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
  821. {
  822. struct hfi1_ctxtdata *rcd = packet->rcd;
  823. struct rvt_qp *qp = packet->qp;
  824. struct hfi1_ibport *ibp = rcd_to_iport(rcd);
  825. struct hfi1_qp_priv *priv = qp->priv;
  826. struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
  827. u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
  828. u64 pbc, pbc_flags = 0;
  829. u32 hwords = 0;
  830. u32 nwords = 0;
  831. u32 plen;
  832. struct pio_buf *pbuf;
  833. struct hfi1_opa_header opa_hdr;
  834. /* clear the defer count */
  835. qp->r_adefered = 0;
  836. /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
  837. if (qp->s_flags & RVT_S_RESP_PENDING) {
  838. hfi1_queue_rc_ack(packet, is_fecn);
  839. return;
  840. }
  841. /* Ensure s_rdma_ack_cnt changes are committed */
  842. if (qp->s_rdma_ack_cnt) {
  843. hfi1_queue_rc_ack(packet, is_fecn);
  844. return;
  845. }
  846. /* Don't try to send ACKs if the link isn't ACTIVE */
  847. if (driver_lstate(ppd) != IB_PORT_ACTIVE)
  848. return;
  849. /* Make the appropriate header */
  850. hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn,
  851. &pbc_flags, &hwords, &nwords);
  852. plen = 2 /* PBC */ + hwords + nwords;
  853. pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
  854. sc_to_vlt(ppd->dd, sc5), plen);
  855. pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
  856. if (IS_ERR_OR_NULL(pbuf)) {
  857. /*
  858. * We have no room to send at the moment. Pass
  859. * responsibility for sending the ACK to the send engine
  860. * so that when enough buffer space becomes available,
  861. * the ACK is sent ahead of other outgoing packets.
  862. */
  863. hfi1_queue_rc_ack(packet, is_fecn);
  864. return;
  865. }
  866. trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
  867. &opa_hdr, ib_is_sc5(sc5));
  868. /* write the pbc and data */
  869. ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
  870. (priv->hdr_type == HFI1_PKT_TYPE_9B ?
  871. (void *)&opa_hdr.ibh :
  872. (void *)&opa_hdr.opah), hwords);
  873. return;
  874. }
  875. /**
  876. * reset_psn - reset the QP state to send starting from PSN
  877. * @qp: the QP
  878. * @psn: the packet sequence number to restart at
  879. *
  880. * This is called from hfi1_rc_rcv() to process an incoming RC ACK
  881. * for the given QP.
  882. * Called at interrupt level with the QP s_lock held.
  883. */
  884. static void reset_psn(struct rvt_qp *qp, u32 psn)
  885. {
  886. u32 n = qp->s_acked;
  887. struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
  888. u32 opcode;
  889. lockdep_assert_held(&qp->s_lock);
  890. qp->s_cur = n;
  891. /*
  892. * If we are starting the request from the beginning,
  893. * let the normal send code handle initialization.
  894. */
  895. if (cmp_psn(psn, wqe->psn) <= 0) {
  896. qp->s_state = OP(SEND_LAST);
  897. goto done;
  898. }
  899. /* Find the work request opcode corresponding to the given PSN. */
  900. opcode = wqe->wr.opcode;
  901. for (;;) {
  902. int diff;
  903. if (++n == qp->s_size)
  904. n = 0;
  905. if (n == qp->s_tail)
  906. break;
  907. wqe = rvt_get_swqe_ptr(qp, n);
  908. diff = cmp_psn(psn, wqe->psn);
  909. if (diff < 0)
  910. break;
  911. qp->s_cur = n;
  912. /*
  913. * If we are starting the request from the beginning,
  914. * let the normal send code handle initialization.
  915. */
  916. if (diff == 0) {
  917. qp->s_state = OP(SEND_LAST);
  918. goto done;
  919. }
  920. opcode = wqe->wr.opcode;
  921. }
  922. /*
  923. * Set the state to restart in the middle of a request.
  924. * Don't change the s_sge, s_cur_sge, or s_cur_size.
  925. * See hfi1_make_rc_req().
  926. */
  927. switch (opcode) {
  928. case IB_WR_SEND:
  929. case IB_WR_SEND_WITH_IMM:
  930. qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
  931. break;
  932. case IB_WR_RDMA_WRITE:
  933. case IB_WR_RDMA_WRITE_WITH_IMM:
  934. qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
  935. break;
  936. case IB_WR_RDMA_READ:
  937. qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
  938. break;
  939. default:
  940. /*
  941. * This case shouldn't happen since its only
  942. * one PSN per req.
  943. */
  944. qp->s_state = OP(SEND_LAST);
  945. }
  946. done:
  947. qp->s_psn = psn;
  948. /*
  949. * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
  950. * asynchronously before the send engine can get scheduled.
  951. * Doing it in hfi1_make_rc_req() is too late.
  952. */
  953. if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
  954. (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
  955. qp->s_flags |= RVT_S_WAIT_PSN;
  956. qp->s_flags &= ~HFI1_S_AHG_VALID;
  957. }
  958. /*
  959. * Back up requester to resend the last un-ACKed request.
  960. * The QP r_lock and s_lock should be held and interrupts disabled.
  961. */
  962. void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
  963. {
  964. struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  965. struct hfi1_ibport *ibp;
  966. lockdep_assert_held(&qp->r_lock);
  967. lockdep_assert_held(&qp->s_lock);
  968. if (qp->s_retry == 0) {
  969. if (qp->s_mig_state == IB_MIG_ARMED) {
  970. hfi1_migrate_qp(qp);
  971. qp->s_retry = qp->s_retry_cnt;
  972. } else if (qp->s_last == qp->s_acked) {
  973. hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
  974. rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
  975. return;
  976. } else { /* need to handle delayed completion */
  977. return;
  978. }
  979. } else {
  980. qp->s_retry--;
  981. }
  982. ibp = to_iport(qp->ibqp.device, qp->port_num);
  983. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  984. ibp->rvp.n_rc_resends++;
  985. else
  986. ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
  987. qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
  988. RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
  989. RVT_S_WAIT_ACK);
  990. if (wait)
  991. qp->s_flags |= RVT_S_SEND_ONE;
  992. reset_psn(qp, psn);
  993. }
  994. /*
  995. * Set qp->s_sending_psn to the next PSN after the given one.
  996. * This would be psn+1 except when RDMA reads are present.
  997. */
  998. static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
  999. {
  1000. struct rvt_swqe *wqe;
  1001. u32 n = qp->s_last;
  1002. lockdep_assert_held(&qp->s_lock);
  1003. /* Find the work request corresponding to the given PSN. */
  1004. for (;;) {
  1005. wqe = rvt_get_swqe_ptr(qp, n);
  1006. if (cmp_psn(psn, wqe->lpsn) <= 0) {
  1007. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  1008. qp->s_sending_psn = wqe->lpsn + 1;
  1009. else
  1010. qp->s_sending_psn = psn + 1;
  1011. break;
  1012. }
  1013. if (++n == qp->s_size)
  1014. n = 0;
  1015. if (n == qp->s_tail)
  1016. break;
  1017. }
  1018. }
  1019. /*
  1020. * This should be called with the QP s_lock held and interrupts disabled.
  1021. */
  1022. void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
  1023. {
  1024. struct ib_other_headers *ohdr;
  1025. struct hfi1_qp_priv *priv = qp->priv;
  1026. struct rvt_swqe *wqe;
  1027. struct ib_header *hdr = NULL;
  1028. struct hfi1_16b_header *hdr_16b = NULL;
  1029. u32 opcode;
  1030. u32 psn;
  1031. lockdep_assert_held(&qp->s_lock);
  1032. if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
  1033. return;
  1034. /* Find out where the BTH is */
  1035. if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
  1036. hdr = &opah->ibh;
  1037. if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
  1038. ohdr = &hdr->u.oth;
  1039. else
  1040. ohdr = &hdr->u.l.oth;
  1041. } else {
  1042. u8 l4;
  1043. hdr_16b = &opah->opah;
  1044. l4 = hfi1_16B_get_l4(hdr_16b);
  1045. if (l4 == OPA_16B_L4_IB_LOCAL)
  1046. ohdr = &hdr_16b->u.oth;
  1047. else
  1048. ohdr = &hdr_16b->u.l.oth;
  1049. }
  1050. opcode = ib_bth_get_opcode(ohdr);
  1051. if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
  1052. opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
  1053. WARN_ON(!qp->s_rdma_ack_cnt);
  1054. qp->s_rdma_ack_cnt--;
  1055. return;
  1056. }
  1057. psn = ib_bth_get_psn(ohdr);
  1058. reset_sending_psn(qp, psn);
  1059. /*
  1060. * Start timer after a packet requesting an ACK has been sent and
  1061. * there are still requests that haven't been acked.
  1062. */
  1063. if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
  1064. !(qp->s_flags &
  1065. (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
  1066. (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
  1067. rvt_add_retry_timer(qp);
  1068. while (qp->s_last != qp->s_acked) {
  1069. u32 s_last;
  1070. wqe = rvt_get_swqe_ptr(qp, qp->s_last);
  1071. if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
  1072. cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
  1073. break;
  1074. rvt_qp_wqe_unreserve(qp, wqe);
  1075. s_last = qp->s_last;
  1076. trace_hfi1_qp_send_completion(qp, wqe, s_last);
  1077. if (++s_last >= qp->s_size)
  1078. s_last = 0;
  1079. qp->s_last = s_last;
  1080. /* see post_send() */
  1081. barrier();
  1082. rvt_put_swqe(wqe);
  1083. rvt_qp_swqe_complete(qp,
  1084. wqe,
  1085. ib_hfi1_wc_opcode[wqe->wr.opcode],
  1086. IB_WC_SUCCESS);
  1087. }
  1088. /*
  1089. * If we were waiting for sends to complete before re-sending,
  1090. * and they are now complete, restart sending.
  1091. */
  1092. trace_hfi1_sendcomplete(qp, psn);
  1093. if (qp->s_flags & RVT_S_WAIT_PSN &&
  1094. cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
  1095. qp->s_flags &= ~RVT_S_WAIT_PSN;
  1096. qp->s_sending_psn = qp->s_psn;
  1097. qp->s_sending_hpsn = qp->s_psn - 1;
  1098. hfi1_schedule_send(qp);
  1099. }
  1100. }
  1101. static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
  1102. {
  1103. qp->s_last_psn = psn;
  1104. }
  1105. /*
  1106. * Generate a SWQE completion.
  1107. * This is similar to hfi1_send_complete but has to check to be sure
  1108. * that the SGEs are not being referenced if the SWQE is being resent.
  1109. */
  1110. static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
  1111. struct rvt_swqe *wqe,
  1112. struct hfi1_ibport *ibp)
  1113. {
  1114. lockdep_assert_held(&qp->s_lock);
  1115. /*
  1116. * Don't decrement refcount and don't generate a
  1117. * completion if the SWQE is being resent until the send
  1118. * is finished.
  1119. */
  1120. if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
  1121. cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
  1122. u32 s_last;
  1123. rvt_put_swqe(wqe);
  1124. rvt_qp_wqe_unreserve(qp, wqe);
  1125. s_last = qp->s_last;
  1126. trace_hfi1_qp_send_completion(qp, wqe, s_last);
  1127. if (++s_last >= qp->s_size)
  1128. s_last = 0;
  1129. qp->s_last = s_last;
  1130. /* see post_send() */
  1131. barrier();
  1132. rvt_qp_swqe_complete(qp,
  1133. wqe,
  1134. ib_hfi1_wc_opcode[wqe->wr.opcode],
  1135. IB_WC_SUCCESS);
  1136. } else {
  1137. struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
  1138. this_cpu_inc(*ibp->rvp.rc_delayed_comp);
  1139. /*
  1140. * If send progress not running attempt to progress
  1141. * SDMA queue.
  1142. */
  1143. if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
  1144. struct sdma_engine *engine;
  1145. u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr);
  1146. u8 sc5;
  1147. /* For now use sc to find engine */
  1148. sc5 = ibp->sl_to_sc[sl];
  1149. engine = qp_to_sdma_engine(qp, sc5);
  1150. sdma_engine_progress_schedule(engine);
  1151. }
  1152. }
  1153. qp->s_retry = qp->s_retry_cnt;
  1154. update_last_psn(qp, wqe->lpsn);
  1155. /*
  1156. * If we are completing a request which is in the process of
  1157. * being resent, we can stop re-sending it since we know the
  1158. * responder has already seen it.
  1159. */
  1160. if (qp->s_acked == qp->s_cur) {
  1161. if (++qp->s_cur >= qp->s_size)
  1162. qp->s_cur = 0;
  1163. qp->s_acked = qp->s_cur;
  1164. wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
  1165. if (qp->s_acked != qp->s_tail) {
  1166. qp->s_state = OP(SEND_LAST);
  1167. qp->s_psn = wqe->psn;
  1168. }
  1169. } else {
  1170. if (++qp->s_acked >= qp->s_size)
  1171. qp->s_acked = 0;
  1172. if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
  1173. qp->s_draining = 0;
  1174. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1175. }
  1176. return wqe;
  1177. }
  1178. /**
  1179. * do_rc_ack - process an incoming RC ACK
  1180. * @qp: the QP the ACK came in on
  1181. * @psn: the packet sequence number of the ACK
  1182. * @opcode: the opcode of the request that resulted in the ACK
  1183. *
  1184. * This is called from rc_rcv_resp() to process an incoming RC ACK
  1185. * for the given QP.
  1186. * May be called at interrupt level, with the QP s_lock held.
  1187. * Returns 1 if OK, 0 if current operation should be aborted (NAK).
  1188. */
  1189. static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
  1190. u64 val, struct hfi1_ctxtdata *rcd)
  1191. {
  1192. struct hfi1_ibport *ibp;
  1193. enum ib_wc_status status;
  1194. struct rvt_swqe *wqe;
  1195. int ret = 0;
  1196. u32 ack_psn;
  1197. int diff;
  1198. lockdep_assert_held(&qp->s_lock);
  1199. /*
  1200. * Note that NAKs implicitly ACK outstanding SEND and RDMA write
  1201. * requests and implicitly NAK RDMA read and atomic requests issued
  1202. * before the NAK'ed request. The MSN won't include the NAK'ed
  1203. * request but will include an ACK'ed request(s).
  1204. */
  1205. ack_psn = psn;
  1206. if (aeth >> IB_AETH_NAK_SHIFT)
  1207. ack_psn--;
  1208. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1209. ibp = rcd_to_iport(rcd);
  1210. /*
  1211. * The MSN might be for a later WQE than the PSN indicates so
  1212. * only complete WQEs that the PSN finishes.
  1213. */
  1214. while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
  1215. /*
  1216. * RDMA_READ_RESPONSE_ONLY is a special case since
  1217. * we want to generate completion events for everything
  1218. * before the RDMA read, copy the data, then generate
  1219. * the completion for the read.
  1220. */
  1221. if (wqe->wr.opcode == IB_WR_RDMA_READ &&
  1222. opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
  1223. diff == 0) {
  1224. ret = 1;
  1225. goto bail_stop;
  1226. }
  1227. /*
  1228. * If this request is a RDMA read or atomic, and the ACK is
  1229. * for a later operation, this ACK NAKs the RDMA read or
  1230. * atomic. In other words, only a RDMA_READ_LAST or ONLY
  1231. * can ACK a RDMA read and likewise for atomic ops. Note
  1232. * that the NAK case can only happen if relaxed ordering is
  1233. * used and requests are sent after an RDMA read or atomic
  1234. * is sent but before the response is received.
  1235. */
  1236. if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
  1237. (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
  1238. ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1239. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
  1240. (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
  1241. /* Retry this request. */
  1242. if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
  1243. qp->r_flags |= RVT_R_RDMAR_SEQ;
  1244. hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
  1245. if (list_empty(&qp->rspwait)) {
  1246. qp->r_flags |= RVT_R_RSP_SEND;
  1247. rvt_get_qp(qp);
  1248. list_add_tail(&qp->rspwait,
  1249. &rcd->qp_wait_list);
  1250. }
  1251. }
  1252. /*
  1253. * No need to process the ACK/NAK since we are
  1254. * restarting an earlier request.
  1255. */
  1256. goto bail_stop;
  1257. }
  1258. if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1259. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
  1260. u64 *vaddr = wqe->sg_list[0].vaddr;
  1261. *vaddr = val;
  1262. }
  1263. if (qp->s_num_rd_atomic &&
  1264. (wqe->wr.opcode == IB_WR_RDMA_READ ||
  1265. wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1266. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
  1267. qp->s_num_rd_atomic--;
  1268. /* Restart sending task if fence is complete */
  1269. if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
  1270. !qp->s_num_rd_atomic) {
  1271. qp->s_flags &= ~(RVT_S_WAIT_FENCE |
  1272. RVT_S_WAIT_ACK);
  1273. hfi1_schedule_send(qp);
  1274. } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
  1275. qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
  1276. RVT_S_WAIT_ACK);
  1277. hfi1_schedule_send(qp);
  1278. }
  1279. }
  1280. wqe = do_rc_completion(qp, wqe, ibp);
  1281. if (qp->s_acked == qp->s_tail)
  1282. break;
  1283. }
  1284. switch (aeth >> IB_AETH_NAK_SHIFT) {
  1285. case 0: /* ACK */
  1286. this_cpu_inc(*ibp->rvp.rc_acks);
  1287. if (qp->s_acked != qp->s_tail) {
  1288. /*
  1289. * We are expecting more ACKs so
  1290. * mod the retry timer.
  1291. */
  1292. rvt_mod_retry_timer(qp);
  1293. /*
  1294. * We can stop re-sending the earlier packets and
  1295. * continue with the next packet the receiver wants.
  1296. */
  1297. if (cmp_psn(qp->s_psn, psn) <= 0)
  1298. reset_psn(qp, psn + 1);
  1299. } else {
  1300. /* No more acks - kill all timers */
  1301. rvt_stop_rc_timers(qp);
  1302. if (cmp_psn(qp->s_psn, psn) <= 0) {
  1303. qp->s_state = OP(SEND_LAST);
  1304. qp->s_psn = psn + 1;
  1305. }
  1306. }
  1307. if (qp->s_flags & RVT_S_WAIT_ACK) {
  1308. qp->s_flags &= ~RVT_S_WAIT_ACK;
  1309. hfi1_schedule_send(qp);
  1310. }
  1311. rvt_get_credit(qp, aeth);
  1312. qp->s_rnr_retry = qp->s_rnr_retry_cnt;
  1313. qp->s_retry = qp->s_retry_cnt;
  1314. update_last_psn(qp, psn);
  1315. return 1;
  1316. case 1: /* RNR NAK */
  1317. ibp->rvp.n_rnr_naks++;
  1318. if (qp->s_acked == qp->s_tail)
  1319. goto bail_stop;
  1320. if (qp->s_flags & RVT_S_WAIT_RNR)
  1321. goto bail_stop;
  1322. if (qp->s_rnr_retry == 0) {
  1323. status = IB_WC_RNR_RETRY_EXC_ERR;
  1324. goto class_b;
  1325. }
  1326. if (qp->s_rnr_retry_cnt < 7)
  1327. qp->s_rnr_retry--;
  1328. /* The last valid PSN is the previous PSN. */
  1329. update_last_psn(qp, psn - 1);
  1330. ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
  1331. reset_psn(qp, psn);
  1332. qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
  1333. rvt_stop_rc_timers(qp);
  1334. rvt_add_rnr_timer(qp, aeth);
  1335. return 0;
  1336. case 3: /* NAK */
  1337. if (qp->s_acked == qp->s_tail)
  1338. goto bail_stop;
  1339. /* The last valid PSN is the previous PSN. */
  1340. update_last_psn(qp, psn - 1);
  1341. switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
  1342. IB_AETH_CREDIT_MASK) {
  1343. case 0: /* PSN sequence error */
  1344. ibp->rvp.n_seq_naks++;
  1345. /*
  1346. * Back up to the responder's expected PSN.
  1347. * Note that we might get a NAK in the middle of an
  1348. * RDMA READ response which terminates the RDMA
  1349. * READ.
  1350. */
  1351. hfi1_restart_rc(qp, psn, 0);
  1352. hfi1_schedule_send(qp);
  1353. break;
  1354. case 1: /* Invalid Request */
  1355. status = IB_WC_REM_INV_REQ_ERR;
  1356. ibp->rvp.n_other_naks++;
  1357. goto class_b;
  1358. case 2: /* Remote Access Error */
  1359. status = IB_WC_REM_ACCESS_ERR;
  1360. ibp->rvp.n_other_naks++;
  1361. goto class_b;
  1362. case 3: /* Remote Operation Error */
  1363. status = IB_WC_REM_OP_ERR;
  1364. ibp->rvp.n_other_naks++;
  1365. class_b:
  1366. if (qp->s_last == qp->s_acked) {
  1367. hfi1_send_complete(qp, wqe, status);
  1368. rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
  1369. }
  1370. break;
  1371. default:
  1372. /* Ignore other reserved NAK error codes */
  1373. goto reserved;
  1374. }
  1375. qp->s_retry = qp->s_retry_cnt;
  1376. qp->s_rnr_retry = qp->s_rnr_retry_cnt;
  1377. goto bail_stop;
  1378. default: /* 2: reserved */
  1379. reserved:
  1380. /* Ignore reserved NAK codes. */
  1381. goto bail_stop;
  1382. }
  1383. /* cannot be reached */
  1384. bail_stop:
  1385. rvt_stop_rc_timers(qp);
  1386. return ret;
  1387. }
  1388. /*
  1389. * We have seen an out of sequence RDMA read middle or last packet.
  1390. * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
  1391. */
  1392. static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
  1393. struct hfi1_ctxtdata *rcd)
  1394. {
  1395. struct rvt_swqe *wqe;
  1396. lockdep_assert_held(&qp->s_lock);
  1397. /* Remove QP from retry timer */
  1398. rvt_stop_rc_timers(qp);
  1399. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1400. while (cmp_psn(psn, wqe->lpsn) > 0) {
  1401. if (wqe->wr.opcode == IB_WR_RDMA_READ ||
  1402. wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1403. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
  1404. break;
  1405. wqe = do_rc_completion(qp, wqe, ibp);
  1406. }
  1407. ibp->rvp.n_rdma_seq++;
  1408. qp->r_flags |= RVT_R_RDMAR_SEQ;
  1409. hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
  1410. if (list_empty(&qp->rspwait)) {
  1411. qp->r_flags |= RVT_R_RSP_SEND;
  1412. rvt_get_qp(qp);
  1413. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  1414. }
  1415. }
  1416. /**
  1417. * rc_rcv_resp - process an incoming RC response packet
  1418. * @packet: data packet information
  1419. *
  1420. * This is called from hfi1_rc_rcv() to process an incoming RC response
  1421. * packet for the given QP.
  1422. * Called at interrupt level.
  1423. */
  1424. static void rc_rcv_resp(struct hfi1_packet *packet)
  1425. {
  1426. struct hfi1_ctxtdata *rcd = packet->rcd;
  1427. void *data = packet->payload;
  1428. u32 tlen = packet->tlen;
  1429. struct rvt_qp *qp = packet->qp;
  1430. struct hfi1_ibport *ibp;
  1431. struct ib_other_headers *ohdr = packet->ohdr;
  1432. struct rvt_swqe *wqe;
  1433. enum ib_wc_status status;
  1434. unsigned long flags;
  1435. int diff;
  1436. u64 val;
  1437. u32 aeth;
  1438. u32 psn = ib_bth_get_psn(packet->ohdr);
  1439. u32 pmtu = qp->pmtu;
  1440. u16 hdrsize = packet->hlen;
  1441. u8 opcode = packet->opcode;
  1442. u8 pad = packet->pad;
  1443. u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
  1444. spin_lock_irqsave(&qp->s_lock, flags);
  1445. trace_hfi1_ack(qp, psn);
  1446. /* Ignore invalid responses. */
  1447. if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
  1448. goto ack_done;
  1449. /* Ignore duplicate responses. */
  1450. diff = cmp_psn(psn, qp->s_last_psn);
  1451. if (unlikely(diff <= 0)) {
  1452. /* Update credits for "ghost" ACKs */
  1453. if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
  1454. aeth = be32_to_cpu(ohdr->u.aeth);
  1455. if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
  1456. rvt_get_credit(qp, aeth);
  1457. }
  1458. goto ack_done;
  1459. }
  1460. /*
  1461. * Skip everything other than the PSN we expect, if we are waiting
  1462. * for a reply to a restarted RDMA read or atomic op.
  1463. */
  1464. if (qp->r_flags & RVT_R_RDMAR_SEQ) {
  1465. if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
  1466. goto ack_done;
  1467. qp->r_flags &= ~RVT_R_RDMAR_SEQ;
  1468. }
  1469. if (unlikely(qp->s_acked == qp->s_tail))
  1470. goto ack_done;
  1471. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1472. status = IB_WC_SUCCESS;
  1473. switch (opcode) {
  1474. case OP(ACKNOWLEDGE):
  1475. case OP(ATOMIC_ACKNOWLEDGE):
  1476. case OP(RDMA_READ_RESPONSE_FIRST):
  1477. aeth = be32_to_cpu(ohdr->u.aeth);
  1478. if (opcode == OP(ATOMIC_ACKNOWLEDGE))
  1479. val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
  1480. else
  1481. val = 0;
  1482. if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
  1483. opcode != OP(RDMA_READ_RESPONSE_FIRST))
  1484. goto ack_done;
  1485. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1486. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1487. goto ack_op_err;
  1488. /*
  1489. * If this is a response to a resent RDMA read, we
  1490. * have to be careful to copy the data to the right
  1491. * location.
  1492. */
  1493. qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
  1494. wqe, psn, pmtu);
  1495. goto read_middle;
  1496. case OP(RDMA_READ_RESPONSE_MIDDLE):
  1497. /* no AETH, no ACK */
  1498. if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
  1499. goto ack_seq_err;
  1500. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1501. goto ack_op_err;
  1502. read_middle:
  1503. if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
  1504. goto ack_len_err;
  1505. if (unlikely(pmtu >= qp->s_rdma_read_len))
  1506. goto ack_len_err;
  1507. /*
  1508. * We got a response so update the timeout.
  1509. * 4.096 usec. * (1 << qp->timeout)
  1510. */
  1511. rvt_mod_retry_timer(qp);
  1512. if (qp->s_flags & RVT_S_WAIT_ACK) {
  1513. qp->s_flags &= ~RVT_S_WAIT_ACK;
  1514. hfi1_schedule_send(qp);
  1515. }
  1516. if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
  1517. qp->s_retry = qp->s_retry_cnt;
  1518. /*
  1519. * Update the RDMA receive state but do the copy w/o
  1520. * holding the locks and blocking interrupts.
  1521. */
  1522. qp->s_rdma_read_len -= pmtu;
  1523. update_last_psn(qp, psn);
  1524. spin_unlock_irqrestore(&qp->s_lock, flags);
  1525. hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, false, false);
  1526. goto bail;
  1527. case OP(RDMA_READ_RESPONSE_ONLY):
  1528. aeth = be32_to_cpu(ohdr->u.aeth);
  1529. if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
  1530. goto ack_done;
  1531. /*
  1532. * Check that the data size is >= 0 && <= pmtu.
  1533. * Remember to account for ICRC (4).
  1534. */
  1535. if (unlikely(tlen < (hdrsize + extra_bytes)))
  1536. goto ack_len_err;
  1537. /*
  1538. * If this is a response to a resent RDMA read, we
  1539. * have to be careful to copy the data to the right
  1540. * location.
  1541. */
  1542. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1543. qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
  1544. wqe, psn, pmtu);
  1545. goto read_last;
  1546. case OP(RDMA_READ_RESPONSE_LAST):
  1547. /* ACKs READ req. */
  1548. if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
  1549. goto ack_seq_err;
  1550. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1551. goto ack_op_err;
  1552. /*
  1553. * Check that the data size is >= 1 && <= pmtu.
  1554. * Remember to account for ICRC (4).
  1555. */
  1556. if (unlikely(tlen <= (hdrsize + extra_bytes)))
  1557. goto ack_len_err;
  1558. read_last:
  1559. tlen -= hdrsize + extra_bytes;
  1560. if (unlikely(tlen != qp->s_rdma_read_len))
  1561. goto ack_len_err;
  1562. aeth = be32_to_cpu(ohdr->u.aeth);
  1563. hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, false, false);
  1564. WARN_ON(qp->s_rdma_read_sge.num_sge);
  1565. (void)do_rc_ack(qp, aeth, psn,
  1566. OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
  1567. goto ack_done;
  1568. }
  1569. ack_op_err:
  1570. status = IB_WC_LOC_QP_OP_ERR;
  1571. goto ack_err;
  1572. ack_seq_err:
  1573. ibp = rcd_to_iport(rcd);
  1574. rdma_seq_err(qp, ibp, psn, rcd);
  1575. goto ack_done;
  1576. ack_len_err:
  1577. status = IB_WC_LOC_LEN_ERR;
  1578. ack_err:
  1579. if (qp->s_last == qp->s_acked) {
  1580. hfi1_send_complete(qp, wqe, status);
  1581. rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
  1582. }
  1583. ack_done:
  1584. spin_unlock_irqrestore(&qp->s_lock, flags);
  1585. bail:
  1586. return;
  1587. }
  1588. static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
  1589. struct rvt_qp *qp)
  1590. {
  1591. if (list_empty(&qp->rspwait)) {
  1592. qp->r_flags |= RVT_R_RSP_NAK;
  1593. rvt_get_qp(qp);
  1594. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  1595. }
  1596. }
  1597. static inline void rc_cancel_ack(struct rvt_qp *qp)
  1598. {
  1599. qp->r_adefered = 0;
  1600. if (list_empty(&qp->rspwait))
  1601. return;
  1602. list_del_init(&qp->rspwait);
  1603. qp->r_flags &= ~RVT_R_RSP_NAK;
  1604. rvt_put_qp(qp);
  1605. }
  1606. /**
  1607. * rc_rcv_error - process an incoming duplicate or error RC packet
  1608. * @ohdr: the other headers for this packet
  1609. * @data: the packet data
  1610. * @qp: the QP for this packet
  1611. * @opcode: the opcode for this packet
  1612. * @psn: the packet sequence number for this packet
  1613. * @diff: the difference between the PSN and the expected PSN
  1614. *
  1615. * This is called from hfi1_rc_rcv() to process an unexpected
  1616. * incoming RC packet for the given QP.
  1617. * Called at interrupt level.
  1618. * Return 1 if no more processing is needed; otherwise return 0 to
  1619. * schedule a response to be sent.
  1620. */
  1621. static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
  1622. struct rvt_qp *qp, u32 opcode, u32 psn,
  1623. int diff, struct hfi1_ctxtdata *rcd)
  1624. {
  1625. struct hfi1_ibport *ibp = rcd_to_iport(rcd);
  1626. struct rvt_ack_entry *e;
  1627. unsigned long flags;
  1628. u8 i, prev;
  1629. int old_req;
  1630. trace_hfi1_rcv_error(qp, psn);
  1631. if (diff > 0) {
  1632. /*
  1633. * Packet sequence error.
  1634. * A NAK will ACK earlier sends and RDMA writes.
  1635. * Don't queue the NAK if we already sent one.
  1636. */
  1637. if (!qp->r_nak_state) {
  1638. ibp->rvp.n_rc_seqnak++;
  1639. qp->r_nak_state = IB_NAK_PSN_ERROR;
  1640. /* Use the expected PSN. */
  1641. qp->r_ack_psn = qp->r_psn;
  1642. /*
  1643. * Wait to send the sequence NAK until all packets
  1644. * in the receive queue have been processed.
  1645. * Otherwise, we end up propagating congestion.
  1646. */
  1647. rc_defered_ack(rcd, qp);
  1648. }
  1649. goto done;
  1650. }
  1651. /*
  1652. * Handle a duplicate request. Don't re-execute SEND, RDMA
  1653. * write or atomic op. Don't NAK errors, just silently drop
  1654. * the duplicate request. Note that r_sge, r_len, and
  1655. * r_rcv_len may be in use so don't modify them.
  1656. *
  1657. * We are supposed to ACK the earliest duplicate PSN but we
  1658. * can coalesce an outstanding duplicate ACK. We have to
  1659. * send the earliest so that RDMA reads can be restarted at
  1660. * the requester's expected PSN.
  1661. *
  1662. * First, find where this duplicate PSN falls within the
  1663. * ACKs previously sent.
  1664. * old_req is true if there is an older response that is scheduled
  1665. * to be sent before sending this one.
  1666. */
  1667. e = NULL;
  1668. old_req = 1;
  1669. ibp->rvp.n_rc_dupreq++;
  1670. spin_lock_irqsave(&qp->s_lock, flags);
  1671. for (i = qp->r_head_ack_queue; ; i = prev) {
  1672. if (i == qp->s_tail_ack_queue)
  1673. old_req = 0;
  1674. if (i)
  1675. prev = i - 1;
  1676. else
  1677. prev = HFI1_MAX_RDMA_ATOMIC;
  1678. if (prev == qp->r_head_ack_queue) {
  1679. e = NULL;
  1680. break;
  1681. }
  1682. e = &qp->s_ack_queue[prev];
  1683. if (!e->opcode) {
  1684. e = NULL;
  1685. break;
  1686. }
  1687. if (cmp_psn(psn, e->psn) >= 0) {
  1688. if (prev == qp->s_tail_ack_queue &&
  1689. cmp_psn(psn, e->lpsn) <= 0)
  1690. old_req = 0;
  1691. break;
  1692. }
  1693. }
  1694. switch (opcode) {
  1695. case OP(RDMA_READ_REQUEST): {
  1696. struct ib_reth *reth;
  1697. u32 offset;
  1698. u32 len;
  1699. /*
  1700. * If we didn't find the RDMA read request in the ack queue,
  1701. * we can ignore this request.
  1702. */
  1703. if (!e || e->opcode != OP(RDMA_READ_REQUEST))
  1704. goto unlock_done;
  1705. /* RETH comes after BTH */
  1706. reth = &ohdr->u.rc.reth;
  1707. /*
  1708. * Address range must be a subset of the original
  1709. * request and start on pmtu boundaries.
  1710. * We reuse the old ack_queue slot since the requester
  1711. * should not back up and request an earlier PSN for the
  1712. * same request.
  1713. */
  1714. offset = delta_psn(psn, e->psn) * qp->pmtu;
  1715. len = be32_to_cpu(reth->length);
  1716. if (unlikely(offset + len != e->rdma_sge.sge_length))
  1717. goto unlock_done;
  1718. if (e->rdma_sge.mr) {
  1719. rvt_put_mr(e->rdma_sge.mr);
  1720. e->rdma_sge.mr = NULL;
  1721. }
  1722. if (len != 0) {
  1723. u32 rkey = be32_to_cpu(reth->rkey);
  1724. u64 vaddr = get_ib_reth_vaddr(reth);
  1725. int ok;
  1726. ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
  1727. IB_ACCESS_REMOTE_READ);
  1728. if (unlikely(!ok))
  1729. goto unlock_done;
  1730. } else {
  1731. e->rdma_sge.vaddr = NULL;
  1732. e->rdma_sge.length = 0;
  1733. e->rdma_sge.sge_length = 0;
  1734. }
  1735. e->psn = psn;
  1736. if (old_req)
  1737. goto unlock_done;
  1738. qp->s_tail_ack_queue = prev;
  1739. break;
  1740. }
  1741. case OP(COMPARE_SWAP):
  1742. case OP(FETCH_ADD): {
  1743. /*
  1744. * If we didn't find the atomic request in the ack queue
  1745. * or the send engine is already backed up to send an
  1746. * earlier entry, we can ignore this request.
  1747. */
  1748. if (!e || e->opcode != (u8)opcode || old_req)
  1749. goto unlock_done;
  1750. qp->s_tail_ack_queue = prev;
  1751. break;
  1752. }
  1753. default:
  1754. /*
  1755. * Ignore this operation if it doesn't request an ACK
  1756. * or an earlier RDMA read or atomic is going to be resent.
  1757. */
  1758. if (!(psn & IB_BTH_REQ_ACK) || old_req)
  1759. goto unlock_done;
  1760. /*
  1761. * Resend the most recent ACK if this request is
  1762. * after all the previous RDMA reads and atomics.
  1763. */
  1764. if (i == qp->r_head_ack_queue) {
  1765. spin_unlock_irqrestore(&qp->s_lock, flags);
  1766. qp->r_nak_state = 0;
  1767. qp->r_ack_psn = qp->r_psn - 1;
  1768. goto send_ack;
  1769. }
  1770. /*
  1771. * Resend the RDMA read or atomic op which
  1772. * ACKs this duplicate request.
  1773. */
  1774. qp->s_tail_ack_queue = i;
  1775. break;
  1776. }
  1777. qp->s_ack_state = OP(ACKNOWLEDGE);
  1778. qp->s_flags |= RVT_S_RESP_PENDING;
  1779. qp->r_nak_state = 0;
  1780. hfi1_schedule_send(qp);
  1781. unlock_done:
  1782. spin_unlock_irqrestore(&qp->s_lock, flags);
  1783. done:
  1784. return 1;
  1785. send_ack:
  1786. return 0;
  1787. }
  1788. static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
  1789. {
  1790. unsigned next;
  1791. next = n + 1;
  1792. if (next > HFI1_MAX_RDMA_ATOMIC)
  1793. next = 0;
  1794. qp->s_tail_ack_queue = next;
  1795. qp->s_ack_state = OP(ACKNOWLEDGE);
  1796. }
  1797. static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
  1798. u32 lqpn, u32 rqpn, u8 svc_type)
  1799. {
  1800. struct opa_hfi1_cong_log_event_internal *cc_event;
  1801. unsigned long flags;
  1802. if (sl >= OPA_MAX_SLS)
  1803. return;
  1804. spin_lock_irqsave(&ppd->cc_log_lock, flags);
  1805. ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
  1806. ppd->threshold_event_counter++;
  1807. cc_event = &ppd->cc_events[ppd->cc_log_idx++];
  1808. if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
  1809. ppd->cc_log_idx = 0;
  1810. cc_event->lqpn = lqpn & RVT_QPN_MASK;
  1811. cc_event->rqpn = rqpn & RVT_QPN_MASK;
  1812. cc_event->sl = sl;
  1813. cc_event->svc_type = svc_type;
  1814. cc_event->rlid = rlid;
  1815. /* keep timestamp in units of 1.024 usec */
  1816. cc_event->timestamp = ktime_get_ns() / 1024;
  1817. spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
  1818. }
  1819. void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
  1820. u32 rqpn, u8 svc_type)
  1821. {
  1822. struct cca_timer *cca_timer;
  1823. u16 ccti, ccti_incr, ccti_timer, ccti_limit;
  1824. u8 trigger_threshold;
  1825. struct cc_state *cc_state;
  1826. unsigned long flags;
  1827. if (sl >= OPA_MAX_SLS)
  1828. return;
  1829. cc_state = get_cc_state(ppd);
  1830. if (!cc_state)
  1831. return;
  1832. /*
  1833. * 1) increase CCTI (for this SL)
  1834. * 2) select IPG (i.e., call set_link_ipg())
  1835. * 3) start timer
  1836. */
  1837. ccti_limit = cc_state->cct.ccti_limit;
  1838. ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
  1839. ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
  1840. trigger_threshold =
  1841. cc_state->cong_setting.entries[sl].trigger_threshold;
  1842. spin_lock_irqsave(&ppd->cca_timer_lock, flags);
  1843. cca_timer = &ppd->cca_timer[sl];
  1844. if (cca_timer->ccti < ccti_limit) {
  1845. if (cca_timer->ccti + ccti_incr <= ccti_limit)
  1846. cca_timer->ccti += ccti_incr;
  1847. else
  1848. cca_timer->ccti = ccti_limit;
  1849. set_link_ipg(ppd);
  1850. }
  1851. ccti = cca_timer->ccti;
  1852. if (!hrtimer_active(&cca_timer->hrtimer)) {
  1853. /* ccti_timer is in units of 1.024 usec */
  1854. unsigned long nsec = 1024 * ccti_timer;
  1855. hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
  1856. HRTIMER_MODE_REL_PINNED);
  1857. }
  1858. spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
  1859. if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
  1860. log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
  1861. }
  1862. /**
  1863. * hfi1_rc_rcv - process an incoming RC packet
  1864. * @packet: data packet information
  1865. *
  1866. * This is called from qp_rcv() to process an incoming RC packet
  1867. * for the given QP.
  1868. * May be called at interrupt level.
  1869. */
  1870. void hfi1_rc_rcv(struct hfi1_packet *packet)
  1871. {
  1872. struct hfi1_ctxtdata *rcd = packet->rcd;
  1873. void *data = packet->payload;
  1874. u32 tlen = packet->tlen;
  1875. struct rvt_qp *qp = packet->qp;
  1876. struct hfi1_ibport *ibp = rcd_to_iport(rcd);
  1877. struct ib_other_headers *ohdr = packet->ohdr;
  1878. u32 opcode = packet->opcode;
  1879. u32 hdrsize = packet->hlen;
  1880. u32 psn = ib_bth_get_psn(packet->ohdr);
  1881. u32 pad = packet->pad;
  1882. struct ib_wc wc;
  1883. u32 pmtu = qp->pmtu;
  1884. int diff;
  1885. struct ib_reth *reth;
  1886. unsigned long flags;
  1887. int ret;
  1888. bool copy_last = false, fecn;
  1889. u32 rkey;
  1890. u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
  1891. lockdep_assert_held(&qp->r_lock);
  1892. if (hfi1_ruc_check_hdr(ibp, packet))
  1893. return;
  1894. fecn = process_ecn(qp, packet);
  1895. /*
  1896. * Process responses (ACKs) before anything else. Note that the
  1897. * packet sequence number will be for something in the send work
  1898. * queue rather than the expected receive packet sequence number.
  1899. * In other words, this QP is the requester.
  1900. */
  1901. if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
  1902. opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
  1903. rc_rcv_resp(packet);
  1904. return;
  1905. }
  1906. /* Compute 24 bits worth of difference. */
  1907. diff = delta_psn(psn, qp->r_psn);
  1908. if (unlikely(diff)) {
  1909. if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
  1910. return;
  1911. goto send_ack;
  1912. }
  1913. /* Check for opcode sequence errors. */
  1914. switch (qp->r_state) {
  1915. case OP(SEND_FIRST):
  1916. case OP(SEND_MIDDLE):
  1917. if (opcode == OP(SEND_MIDDLE) ||
  1918. opcode == OP(SEND_LAST) ||
  1919. opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
  1920. opcode == OP(SEND_LAST_WITH_INVALIDATE))
  1921. break;
  1922. goto nack_inv;
  1923. case OP(RDMA_WRITE_FIRST):
  1924. case OP(RDMA_WRITE_MIDDLE):
  1925. if (opcode == OP(RDMA_WRITE_MIDDLE) ||
  1926. opcode == OP(RDMA_WRITE_LAST) ||
  1927. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  1928. break;
  1929. goto nack_inv;
  1930. default:
  1931. if (opcode == OP(SEND_MIDDLE) ||
  1932. opcode == OP(SEND_LAST) ||
  1933. opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
  1934. opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
  1935. opcode == OP(RDMA_WRITE_MIDDLE) ||
  1936. opcode == OP(RDMA_WRITE_LAST) ||
  1937. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  1938. goto nack_inv;
  1939. /*
  1940. * Note that it is up to the requester to not send a new
  1941. * RDMA read or atomic operation before receiving an ACK
  1942. * for the previous operation.
  1943. */
  1944. break;
  1945. }
  1946. if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
  1947. rvt_comm_est(qp);
  1948. /* OK, process the packet. */
  1949. switch (opcode) {
  1950. case OP(SEND_FIRST):
  1951. ret = rvt_get_rwqe(qp, false);
  1952. if (ret < 0)
  1953. goto nack_op_err;
  1954. if (!ret)
  1955. goto rnr_nak;
  1956. qp->r_rcv_len = 0;
  1957. /* FALLTHROUGH */
  1958. case OP(SEND_MIDDLE):
  1959. case OP(RDMA_WRITE_MIDDLE):
  1960. send_middle:
  1961. /* Check for invalid length PMTU or posted rwqe len. */
  1962. /*
  1963. * There will be no padding for 9B packet but 16B packets
  1964. * will come in with some padding since we always add
  1965. * CRC and LT bytes which will need to be flit aligned
  1966. */
  1967. if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
  1968. goto nack_inv;
  1969. qp->r_rcv_len += pmtu;
  1970. if (unlikely(qp->r_rcv_len > qp->r_len))
  1971. goto nack_inv;
  1972. hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
  1973. break;
  1974. case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
  1975. /* consume RWQE */
  1976. ret = rvt_get_rwqe(qp, true);
  1977. if (ret < 0)
  1978. goto nack_op_err;
  1979. if (!ret)
  1980. goto rnr_nak;
  1981. goto send_last_imm;
  1982. case OP(SEND_ONLY):
  1983. case OP(SEND_ONLY_WITH_IMMEDIATE):
  1984. case OP(SEND_ONLY_WITH_INVALIDATE):
  1985. ret = rvt_get_rwqe(qp, false);
  1986. if (ret < 0)
  1987. goto nack_op_err;
  1988. if (!ret)
  1989. goto rnr_nak;
  1990. qp->r_rcv_len = 0;
  1991. if (opcode == OP(SEND_ONLY))
  1992. goto no_immediate_data;
  1993. if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
  1994. goto send_last_inv;
  1995. /* FALLTHROUGH -- for SEND_ONLY_WITH_IMMEDIATE */
  1996. case OP(SEND_LAST_WITH_IMMEDIATE):
  1997. send_last_imm:
  1998. wc.ex.imm_data = ohdr->u.imm_data;
  1999. wc.wc_flags = IB_WC_WITH_IMM;
  2000. goto send_last;
  2001. case OP(SEND_LAST_WITH_INVALIDATE):
  2002. send_last_inv:
  2003. rkey = be32_to_cpu(ohdr->u.ieth);
  2004. if (rvt_invalidate_rkey(qp, rkey))
  2005. goto no_immediate_data;
  2006. wc.ex.invalidate_rkey = rkey;
  2007. wc.wc_flags = IB_WC_WITH_INVALIDATE;
  2008. goto send_last;
  2009. case OP(RDMA_WRITE_LAST):
  2010. copy_last = rvt_is_user_qp(qp);
  2011. /* fall through */
  2012. case OP(SEND_LAST):
  2013. no_immediate_data:
  2014. wc.wc_flags = 0;
  2015. wc.ex.imm_data = 0;
  2016. send_last:
  2017. /* Check for invalid length. */
  2018. /* LAST len should be >= 1 */
  2019. if (unlikely(tlen < (hdrsize + extra_bytes)))
  2020. goto nack_inv;
  2021. /* Don't count the CRC(and padding and LT byte for 16B). */
  2022. tlen -= (hdrsize + extra_bytes);
  2023. wc.byte_len = tlen + qp->r_rcv_len;
  2024. if (unlikely(wc.byte_len > qp->r_len))
  2025. goto nack_inv;
  2026. hfi1_copy_sge(&qp->r_sge, data, tlen, true, copy_last);
  2027. rvt_put_ss(&qp->r_sge);
  2028. qp->r_msn++;
  2029. if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
  2030. break;
  2031. wc.wr_id = qp->r_wr_id;
  2032. wc.status = IB_WC_SUCCESS;
  2033. if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
  2034. opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
  2035. wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
  2036. else
  2037. wc.opcode = IB_WC_RECV;
  2038. wc.qp = &qp->ibqp;
  2039. wc.src_qp = qp->remote_qpn;
  2040. wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
  2041. /*
  2042. * It seems that IB mandates the presence of an SL in a
  2043. * work completion only for the UD transport (see section
  2044. * 11.4.2 of IBTA Vol. 1).
  2045. *
  2046. * However, the way the SL is chosen below is consistent
  2047. * with the way that IB/qib works and is trying avoid
  2048. * introducing incompatibilities.
  2049. *
  2050. * See also OPA Vol. 1, section 9.7.6, and table 9-17.
  2051. */
  2052. wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
  2053. /* zero fields that are N/A */
  2054. wc.vendor_err = 0;
  2055. wc.pkey_index = 0;
  2056. wc.dlid_path_bits = 0;
  2057. wc.port_num = 0;
  2058. /* Signal completion event if the solicited bit is set. */
  2059. rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
  2060. ib_bth_is_solicited(ohdr));
  2061. break;
  2062. case OP(RDMA_WRITE_ONLY):
  2063. copy_last = rvt_is_user_qp(qp);
  2064. /* fall through */
  2065. case OP(RDMA_WRITE_FIRST):
  2066. case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
  2067. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
  2068. goto nack_inv;
  2069. /* consume RWQE */
  2070. reth = &ohdr->u.rc.reth;
  2071. qp->r_len = be32_to_cpu(reth->length);
  2072. qp->r_rcv_len = 0;
  2073. qp->r_sge.sg_list = NULL;
  2074. if (qp->r_len != 0) {
  2075. u32 rkey = be32_to_cpu(reth->rkey);
  2076. u64 vaddr = get_ib_reth_vaddr(reth);
  2077. int ok;
  2078. /* Check rkey & NAK */
  2079. ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
  2080. rkey, IB_ACCESS_REMOTE_WRITE);
  2081. if (unlikely(!ok))
  2082. goto nack_acc;
  2083. qp->r_sge.num_sge = 1;
  2084. } else {
  2085. qp->r_sge.num_sge = 0;
  2086. qp->r_sge.sge.mr = NULL;
  2087. qp->r_sge.sge.vaddr = NULL;
  2088. qp->r_sge.sge.length = 0;
  2089. qp->r_sge.sge.sge_length = 0;
  2090. }
  2091. if (opcode == OP(RDMA_WRITE_FIRST))
  2092. goto send_middle;
  2093. else if (opcode == OP(RDMA_WRITE_ONLY))
  2094. goto no_immediate_data;
  2095. ret = rvt_get_rwqe(qp, true);
  2096. if (ret < 0)
  2097. goto nack_op_err;
  2098. if (!ret) {
  2099. /* peer will send again */
  2100. rvt_put_ss(&qp->r_sge);
  2101. goto rnr_nak;
  2102. }
  2103. wc.ex.imm_data = ohdr->u.rc.imm_data;
  2104. wc.wc_flags = IB_WC_WITH_IMM;
  2105. goto send_last;
  2106. case OP(RDMA_READ_REQUEST): {
  2107. struct rvt_ack_entry *e;
  2108. u32 len;
  2109. u8 next;
  2110. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
  2111. goto nack_inv;
  2112. next = qp->r_head_ack_queue + 1;
  2113. /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
  2114. if (next > HFI1_MAX_RDMA_ATOMIC)
  2115. next = 0;
  2116. spin_lock_irqsave(&qp->s_lock, flags);
  2117. if (unlikely(next == qp->s_tail_ack_queue)) {
  2118. if (!qp->s_ack_queue[next].sent)
  2119. goto nack_inv_unlck;
  2120. update_ack_queue(qp, next);
  2121. }
  2122. e = &qp->s_ack_queue[qp->r_head_ack_queue];
  2123. if (e->rdma_sge.mr) {
  2124. rvt_put_mr(e->rdma_sge.mr);
  2125. e->rdma_sge.mr = NULL;
  2126. }
  2127. reth = &ohdr->u.rc.reth;
  2128. len = be32_to_cpu(reth->length);
  2129. if (len) {
  2130. u32 rkey = be32_to_cpu(reth->rkey);
  2131. u64 vaddr = get_ib_reth_vaddr(reth);
  2132. int ok;
  2133. /* Check rkey & NAK */
  2134. ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
  2135. rkey, IB_ACCESS_REMOTE_READ);
  2136. if (unlikely(!ok))
  2137. goto nack_acc_unlck;
  2138. /*
  2139. * Update the next expected PSN. We add 1 later
  2140. * below, so only add the remainder here.
  2141. */
  2142. qp->r_psn += rvt_div_mtu(qp, len - 1);
  2143. } else {
  2144. e->rdma_sge.mr = NULL;
  2145. e->rdma_sge.vaddr = NULL;
  2146. e->rdma_sge.length = 0;
  2147. e->rdma_sge.sge_length = 0;
  2148. }
  2149. e->opcode = opcode;
  2150. e->sent = 0;
  2151. e->psn = psn;
  2152. e->lpsn = qp->r_psn;
  2153. /*
  2154. * We need to increment the MSN here instead of when we
  2155. * finish sending the result since a duplicate request would
  2156. * increment it more than once.
  2157. */
  2158. qp->r_msn++;
  2159. qp->r_psn++;
  2160. qp->r_state = opcode;
  2161. qp->r_nak_state = 0;
  2162. qp->r_head_ack_queue = next;
  2163. /* Schedule the send engine. */
  2164. qp->s_flags |= RVT_S_RESP_PENDING;
  2165. if (fecn)
  2166. qp->s_flags |= RVT_S_ECN;
  2167. hfi1_schedule_send(qp);
  2168. spin_unlock_irqrestore(&qp->s_lock, flags);
  2169. return;
  2170. }
  2171. case OP(COMPARE_SWAP):
  2172. case OP(FETCH_ADD): {
  2173. struct ib_atomic_eth *ateth;
  2174. struct rvt_ack_entry *e;
  2175. u64 vaddr;
  2176. atomic64_t *maddr;
  2177. u64 sdata;
  2178. u32 rkey;
  2179. u8 next;
  2180. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
  2181. goto nack_inv;
  2182. next = qp->r_head_ack_queue + 1;
  2183. if (next > HFI1_MAX_RDMA_ATOMIC)
  2184. next = 0;
  2185. spin_lock_irqsave(&qp->s_lock, flags);
  2186. if (unlikely(next == qp->s_tail_ack_queue)) {
  2187. if (!qp->s_ack_queue[next].sent)
  2188. goto nack_inv_unlck;
  2189. update_ack_queue(qp, next);
  2190. }
  2191. e = &qp->s_ack_queue[qp->r_head_ack_queue];
  2192. if (e->rdma_sge.mr) {
  2193. rvt_put_mr(e->rdma_sge.mr);
  2194. e->rdma_sge.mr = NULL;
  2195. }
  2196. ateth = &ohdr->u.atomic_eth;
  2197. vaddr = get_ib_ateth_vaddr(ateth);
  2198. if (unlikely(vaddr & (sizeof(u64) - 1)))
  2199. goto nack_inv_unlck;
  2200. rkey = be32_to_cpu(ateth->rkey);
  2201. /* Check rkey & NAK */
  2202. if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
  2203. vaddr, rkey,
  2204. IB_ACCESS_REMOTE_ATOMIC)))
  2205. goto nack_acc_unlck;
  2206. /* Perform atomic OP and save result. */
  2207. maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
  2208. sdata = get_ib_ateth_swap(ateth);
  2209. e->atomic_data = (opcode == OP(FETCH_ADD)) ?
  2210. (u64)atomic64_add_return(sdata, maddr) - sdata :
  2211. (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
  2212. get_ib_ateth_compare(ateth),
  2213. sdata);
  2214. rvt_put_mr(qp->r_sge.sge.mr);
  2215. qp->r_sge.num_sge = 0;
  2216. e->opcode = opcode;
  2217. e->sent = 0;
  2218. e->psn = psn;
  2219. e->lpsn = psn;
  2220. qp->r_msn++;
  2221. qp->r_psn++;
  2222. qp->r_state = opcode;
  2223. qp->r_nak_state = 0;
  2224. qp->r_head_ack_queue = next;
  2225. /* Schedule the send engine. */
  2226. qp->s_flags |= RVT_S_RESP_PENDING;
  2227. if (fecn)
  2228. qp->s_flags |= RVT_S_ECN;
  2229. hfi1_schedule_send(qp);
  2230. spin_unlock_irqrestore(&qp->s_lock, flags);
  2231. return;
  2232. }
  2233. default:
  2234. /* NAK unknown opcodes. */
  2235. goto nack_inv;
  2236. }
  2237. qp->r_psn++;
  2238. qp->r_state = opcode;
  2239. qp->r_ack_psn = psn;
  2240. qp->r_nak_state = 0;
  2241. /* Send an ACK if requested or required. */
  2242. if (psn & IB_BTH_REQ_ACK || fecn) {
  2243. if (packet->numpkt == 0 || fecn ||
  2244. qp->r_adefered >= HFI1_PSN_CREDIT) {
  2245. rc_cancel_ack(qp);
  2246. goto send_ack;
  2247. }
  2248. qp->r_adefered++;
  2249. rc_defered_ack(rcd, qp);
  2250. }
  2251. return;
  2252. rnr_nak:
  2253. qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
  2254. qp->r_ack_psn = qp->r_psn;
  2255. /* Queue RNR NAK for later */
  2256. rc_defered_ack(rcd, qp);
  2257. return;
  2258. nack_op_err:
  2259. rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
  2260. qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
  2261. qp->r_ack_psn = qp->r_psn;
  2262. /* Queue NAK for later */
  2263. rc_defered_ack(rcd, qp);
  2264. return;
  2265. nack_inv_unlck:
  2266. spin_unlock_irqrestore(&qp->s_lock, flags);
  2267. nack_inv:
  2268. rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
  2269. qp->r_nak_state = IB_NAK_INVALID_REQUEST;
  2270. qp->r_ack_psn = qp->r_psn;
  2271. /* Queue NAK for later */
  2272. rc_defered_ack(rcd, qp);
  2273. return;
  2274. nack_acc_unlck:
  2275. spin_unlock_irqrestore(&qp->s_lock, flags);
  2276. nack_acc:
  2277. rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
  2278. qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
  2279. qp->r_ack_psn = qp->r_psn;
  2280. send_ack:
  2281. hfi1_send_rc_ack(packet, fecn);
  2282. }
  2283. void hfi1_rc_hdrerr(
  2284. struct hfi1_ctxtdata *rcd,
  2285. struct hfi1_packet *packet,
  2286. struct rvt_qp *qp)
  2287. {
  2288. struct hfi1_ibport *ibp = rcd_to_iport(rcd);
  2289. int diff;
  2290. u32 opcode;
  2291. u32 psn;
  2292. if (hfi1_ruc_check_hdr(ibp, packet))
  2293. return;
  2294. psn = ib_bth_get_psn(packet->ohdr);
  2295. opcode = ib_bth_get_opcode(packet->ohdr);
  2296. /* Only deal with RDMA Writes for now */
  2297. if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
  2298. diff = delta_psn(psn, qp->r_psn);
  2299. if (!qp->r_nak_state && diff >= 0) {
  2300. ibp->rvp.n_rc_seqnak++;
  2301. qp->r_nak_state = IB_NAK_PSN_ERROR;
  2302. /* Use the expected PSN. */
  2303. qp->r_ack_psn = qp->r_psn;
  2304. /*
  2305. * Wait to send the sequence
  2306. * NAK until all packets
  2307. * in the receive queue have
  2308. * been processed.
  2309. * Otherwise, we end up
  2310. * propagating congestion.
  2311. */
  2312. rc_defered_ack(rcd, qp);
  2313. } /* Out of sequence NAK */
  2314. } /* QP Request NAKs */
  2315. }