qib_rc.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157
  1. /*
  2. * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/io.h>
  34. #include "qib.h"
  35. /* cut down ridiculously long IB macro names */
  36. #define OP(x) IB_OPCODE_RC_##x
  37. static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
  38. u32 psn, u32 pmtu)
  39. {
  40. u32 len;
  41. len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
  42. ss->sge = wqe->sg_list[0];
  43. ss->sg_list = wqe->sg_list + 1;
  44. ss->num_sge = wqe->wr.num_sge;
  45. ss->total_len = wqe->length;
  46. rvt_skip_sge(ss, len, false);
  47. return wqe->length - len;
  48. }
  49. /**
  50. * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
  51. * @dev: the device for this QP
  52. * @qp: a pointer to the QP
  53. * @ohdr: a pointer to the IB header being constructed
  54. * @pmtu: the path MTU
  55. *
  56. * Return 1 if constructed; otherwise, return 0.
  57. * Note that we are in the responder's side of the QP context.
  58. * Note the QP s_lock must be held.
  59. */
  60. static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
  61. struct ib_other_headers *ohdr, u32 pmtu)
  62. {
  63. struct rvt_ack_entry *e;
  64. u32 hwords;
  65. u32 len;
  66. u32 bth0;
  67. u32 bth2;
  68. /* Don't send an ACK if we aren't supposed to. */
  69. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
  70. goto bail;
  71. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  72. hwords = 5;
  73. switch (qp->s_ack_state) {
  74. case OP(RDMA_READ_RESPONSE_LAST):
  75. case OP(RDMA_READ_RESPONSE_ONLY):
  76. e = &qp->s_ack_queue[qp->s_tail_ack_queue];
  77. if (e->rdma_sge.mr) {
  78. rvt_put_mr(e->rdma_sge.mr);
  79. e->rdma_sge.mr = NULL;
  80. }
  81. /* FALLTHROUGH */
  82. case OP(ATOMIC_ACKNOWLEDGE):
  83. /*
  84. * We can increment the tail pointer now that the last
  85. * response has been sent instead of only being
  86. * constructed.
  87. */
  88. if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
  89. qp->s_tail_ack_queue = 0;
  90. /* FALLTHROUGH */
  91. case OP(SEND_ONLY):
  92. case OP(ACKNOWLEDGE):
  93. /* Check for no next entry in the queue. */
  94. if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
  95. if (qp->s_flags & RVT_S_ACK_PENDING)
  96. goto normal;
  97. goto bail;
  98. }
  99. e = &qp->s_ack_queue[qp->s_tail_ack_queue];
  100. if (e->opcode == OP(RDMA_READ_REQUEST)) {
  101. /*
  102. * If a RDMA read response is being resent and
  103. * we haven't seen the duplicate request yet,
  104. * then stop sending the remaining responses the
  105. * responder has seen until the requester resends it.
  106. */
  107. len = e->rdma_sge.sge_length;
  108. if (len && !e->rdma_sge.mr) {
  109. qp->s_tail_ack_queue = qp->r_head_ack_queue;
  110. goto bail;
  111. }
  112. /* Copy SGE state in case we need to resend */
  113. qp->s_rdma_mr = e->rdma_sge.mr;
  114. if (qp->s_rdma_mr)
  115. rvt_get_mr(qp->s_rdma_mr);
  116. qp->s_ack_rdma_sge.sge = e->rdma_sge;
  117. qp->s_ack_rdma_sge.num_sge = 1;
  118. qp->s_cur_sge = &qp->s_ack_rdma_sge;
  119. if (len > pmtu) {
  120. len = pmtu;
  121. qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
  122. } else {
  123. qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
  124. e->sent = 1;
  125. }
  126. ohdr->u.aeth = rvt_compute_aeth(qp);
  127. hwords++;
  128. qp->s_ack_rdma_psn = e->psn;
  129. bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
  130. } else {
  131. /* COMPARE_SWAP or FETCH_ADD */
  132. qp->s_cur_sge = NULL;
  133. len = 0;
  134. qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
  135. ohdr->u.at.aeth = rvt_compute_aeth(qp);
  136. ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
  137. hwords += sizeof(ohdr->u.at) / sizeof(u32);
  138. bth2 = e->psn & QIB_PSN_MASK;
  139. e->sent = 1;
  140. }
  141. bth0 = qp->s_ack_state << 24;
  142. break;
  143. case OP(RDMA_READ_RESPONSE_FIRST):
  144. qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
  145. /* FALLTHROUGH */
  146. case OP(RDMA_READ_RESPONSE_MIDDLE):
  147. qp->s_cur_sge = &qp->s_ack_rdma_sge;
  148. qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
  149. if (qp->s_rdma_mr)
  150. rvt_get_mr(qp->s_rdma_mr);
  151. len = qp->s_ack_rdma_sge.sge.sge_length;
  152. if (len > pmtu)
  153. len = pmtu;
  154. else {
  155. ohdr->u.aeth = rvt_compute_aeth(qp);
  156. hwords++;
  157. qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
  158. e = &qp->s_ack_queue[qp->s_tail_ack_queue];
  159. e->sent = 1;
  160. }
  161. bth0 = qp->s_ack_state << 24;
  162. bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
  163. break;
  164. default:
  165. normal:
  166. /*
  167. * Send a regular ACK.
  168. * Set the s_ack_state so we wait until after sending
  169. * the ACK before setting s_ack_state to ACKNOWLEDGE
  170. * (see above).
  171. */
  172. qp->s_ack_state = OP(SEND_ONLY);
  173. qp->s_flags &= ~RVT_S_ACK_PENDING;
  174. qp->s_cur_sge = NULL;
  175. if (qp->s_nak_state)
  176. ohdr->u.aeth =
  177. cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
  178. (qp->s_nak_state <<
  179. IB_AETH_CREDIT_SHIFT));
  180. else
  181. ohdr->u.aeth = rvt_compute_aeth(qp);
  182. hwords++;
  183. len = 0;
  184. bth0 = OP(ACKNOWLEDGE) << 24;
  185. bth2 = qp->s_ack_psn & QIB_PSN_MASK;
  186. }
  187. qp->s_rdma_ack_cnt++;
  188. qp->s_hdrwords = hwords;
  189. qp->s_cur_size = len;
  190. qib_make_ruc_header(qp, ohdr, bth0, bth2);
  191. return 1;
  192. bail:
  193. qp->s_ack_state = OP(ACKNOWLEDGE);
  194. qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
  195. return 0;
  196. }
  197. /**
  198. * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
  199. * @qp: a pointer to the QP
  200. *
  201. * Assumes the s_lock is held.
  202. *
  203. * Return 1 if constructed; otherwise, return 0.
  204. */
  205. int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
  206. {
  207. struct qib_qp_priv *priv = qp->priv;
  208. struct qib_ibdev *dev = to_idev(qp->ibqp.device);
  209. struct ib_other_headers *ohdr;
  210. struct rvt_sge_state *ss;
  211. struct rvt_swqe *wqe;
  212. u32 hwords;
  213. u32 len;
  214. u32 bth0;
  215. u32 bth2;
  216. u32 pmtu = qp->pmtu;
  217. char newreq;
  218. int ret = 0;
  219. int delta;
  220. ohdr = &priv->s_hdr->u.oth;
  221. if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
  222. ohdr = &priv->s_hdr->u.l.oth;
  223. /* Sending responses has higher priority over sending requests. */
  224. if ((qp->s_flags & RVT_S_RESP_PENDING) &&
  225. qib_make_rc_ack(dev, qp, ohdr, pmtu))
  226. goto done;
  227. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
  228. if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
  229. goto bail;
  230. /* We are in the error state, flush the work request. */
  231. if (qp->s_last == READ_ONCE(qp->s_head))
  232. goto bail;
  233. /* If DMAs are in progress, we can't flush immediately. */
  234. if (atomic_read(&priv->s_dma_busy)) {
  235. qp->s_flags |= RVT_S_WAIT_DMA;
  236. goto bail;
  237. }
  238. wqe = rvt_get_swqe_ptr(qp, qp->s_last);
  239. qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
  240. IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
  241. /* will get called again */
  242. goto done;
  243. }
  244. if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
  245. goto bail;
  246. if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
  247. if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
  248. qp->s_flags |= RVT_S_WAIT_PSN;
  249. goto bail;
  250. }
  251. qp->s_sending_psn = qp->s_psn;
  252. qp->s_sending_hpsn = qp->s_psn - 1;
  253. }
  254. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  255. hwords = 5;
  256. bth0 = 0;
  257. /* Send a request. */
  258. wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
  259. switch (qp->s_state) {
  260. default:
  261. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
  262. goto bail;
  263. /*
  264. * Resend an old request or start a new one.
  265. *
  266. * We keep track of the current SWQE so that
  267. * we don't reset the "furthest progress" state
  268. * if we need to back up.
  269. */
  270. newreq = 0;
  271. if (qp->s_cur == qp->s_tail) {
  272. /* Check if send work queue is empty. */
  273. if (qp->s_tail == READ_ONCE(qp->s_head))
  274. goto bail;
  275. /*
  276. * If a fence is requested, wait for previous
  277. * RDMA read and atomic operations to finish.
  278. */
  279. if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
  280. qp->s_num_rd_atomic) {
  281. qp->s_flags |= RVT_S_WAIT_FENCE;
  282. goto bail;
  283. }
  284. newreq = 1;
  285. qp->s_psn = wqe->psn;
  286. }
  287. /*
  288. * Note that we have to be careful not to modify the
  289. * original work request since we may need to resend
  290. * it.
  291. */
  292. len = wqe->length;
  293. ss = &qp->s_sge;
  294. bth2 = qp->s_psn & QIB_PSN_MASK;
  295. switch (wqe->wr.opcode) {
  296. case IB_WR_SEND:
  297. case IB_WR_SEND_WITH_IMM:
  298. /* If no credit, return. */
  299. if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
  300. rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
  301. qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
  302. goto bail;
  303. }
  304. if (len > pmtu) {
  305. qp->s_state = OP(SEND_FIRST);
  306. len = pmtu;
  307. break;
  308. }
  309. if (wqe->wr.opcode == IB_WR_SEND)
  310. qp->s_state = OP(SEND_ONLY);
  311. else {
  312. qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
  313. /* Immediate data comes after the BTH */
  314. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  315. hwords += 1;
  316. }
  317. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  318. bth0 |= IB_BTH_SOLICITED;
  319. bth2 |= IB_BTH_REQ_ACK;
  320. if (++qp->s_cur == qp->s_size)
  321. qp->s_cur = 0;
  322. break;
  323. case IB_WR_RDMA_WRITE:
  324. if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
  325. qp->s_lsn++;
  326. goto no_flow_control;
  327. case IB_WR_RDMA_WRITE_WITH_IMM:
  328. /* If no credit, return. */
  329. if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
  330. rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
  331. qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
  332. goto bail;
  333. }
  334. no_flow_control:
  335. ohdr->u.rc.reth.vaddr =
  336. cpu_to_be64(wqe->rdma_wr.remote_addr);
  337. ohdr->u.rc.reth.rkey =
  338. cpu_to_be32(wqe->rdma_wr.rkey);
  339. ohdr->u.rc.reth.length = cpu_to_be32(len);
  340. hwords += sizeof(struct ib_reth) / sizeof(u32);
  341. if (len > pmtu) {
  342. qp->s_state = OP(RDMA_WRITE_FIRST);
  343. len = pmtu;
  344. break;
  345. }
  346. if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE)
  347. qp->s_state = OP(RDMA_WRITE_ONLY);
  348. else {
  349. qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
  350. /* Immediate data comes after RETH */
  351. ohdr->u.rc.imm_data =
  352. wqe->rdma_wr.wr.ex.imm_data;
  353. hwords += 1;
  354. if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED)
  355. bth0 |= IB_BTH_SOLICITED;
  356. }
  357. bth2 |= IB_BTH_REQ_ACK;
  358. if (++qp->s_cur == qp->s_size)
  359. qp->s_cur = 0;
  360. break;
  361. case IB_WR_RDMA_READ:
  362. /*
  363. * Don't allow more operations to be started
  364. * than the QP limits allow.
  365. */
  366. if (newreq) {
  367. if (qp->s_num_rd_atomic >=
  368. qp->s_max_rd_atomic) {
  369. qp->s_flags |= RVT_S_WAIT_RDMAR;
  370. goto bail;
  371. }
  372. qp->s_num_rd_atomic++;
  373. if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
  374. qp->s_lsn++;
  375. }
  376. ohdr->u.rc.reth.vaddr =
  377. cpu_to_be64(wqe->rdma_wr.remote_addr);
  378. ohdr->u.rc.reth.rkey =
  379. cpu_to_be32(wqe->rdma_wr.rkey);
  380. ohdr->u.rc.reth.length = cpu_to_be32(len);
  381. qp->s_state = OP(RDMA_READ_REQUEST);
  382. hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
  383. ss = NULL;
  384. len = 0;
  385. bth2 |= IB_BTH_REQ_ACK;
  386. if (++qp->s_cur == qp->s_size)
  387. qp->s_cur = 0;
  388. break;
  389. case IB_WR_ATOMIC_CMP_AND_SWP:
  390. case IB_WR_ATOMIC_FETCH_AND_ADD:
  391. /*
  392. * Don't allow more operations to be started
  393. * than the QP limits allow.
  394. */
  395. if (newreq) {
  396. if (qp->s_num_rd_atomic >=
  397. qp->s_max_rd_atomic) {
  398. qp->s_flags |= RVT_S_WAIT_RDMAR;
  399. goto bail;
  400. }
  401. qp->s_num_rd_atomic++;
  402. if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
  403. qp->s_lsn++;
  404. }
  405. if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
  406. qp->s_state = OP(COMPARE_SWAP);
  407. put_ib_ateth_swap(wqe->atomic_wr.swap,
  408. &ohdr->u.atomic_eth);
  409. put_ib_ateth_compare(wqe->atomic_wr.compare_add,
  410. &ohdr->u.atomic_eth);
  411. } else {
  412. qp->s_state = OP(FETCH_ADD);
  413. put_ib_ateth_swap(wqe->atomic_wr.compare_add,
  414. &ohdr->u.atomic_eth);
  415. put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
  416. }
  417. put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
  418. &ohdr->u.atomic_eth);
  419. ohdr->u.atomic_eth.rkey = cpu_to_be32(
  420. wqe->atomic_wr.rkey);
  421. hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
  422. ss = NULL;
  423. len = 0;
  424. bth2 |= IB_BTH_REQ_ACK;
  425. if (++qp->s_cur == qp->s_size)
  426. qp->s_cur = 0;
  427. break;
  428. default:
  429. goto bail;
  430. }
  431. qp->s_sge.sge = wqe->sg_list[0];
  432. qp->s_sge.sg_list = wqe->sg_list + 1;
  433. qp->s_sge.num_sge = wqe->wr.num_sge;
  434. qp->s_sge.total_len = wqe->length;
  435. qp->s_len = wqe->length;
  436. if (newreq) {
  437. qp->s_tail++;
  438. if (qp->s_tail >= qp->s_size)
  439. qp->s_tail = 0;
  440. }
  441. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  442. qp->s_psn = wqe->lpsn + 1;
  443. else
  444. qp->s_psn++;
  445. break;
  446. case OP(RDMA_READ_RESPONSE_FIRST):
  447. /*
  448. * qp->s_state is normally set to the opcode of the
  449. * last packet constructed for new requests and therefore
  450. * is never set to RDMA read response.
  451. * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
  452. * thread to indicate a SEND needs to be restarted from an
  453. * earlier PSN without interferring with the sending thread.
  454. * See qib_restart_rc().
  455. */
  456. qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
  457. /* FALLTHROUGH */
  458. case OP(SEND_FIRST):
  459. qp->s_state = OP(SEND_MIDDLE);
  460. /* FALLTHROUGH */
  461. case OP(SEND_MIDDLE):
  462. bth2 = qp->s_psn++ & QIB_PSN_MASK;
  463. ss = &qp->s_sge;
  464. len = qp->s_len;
  465. if (len > pmtu) {
  466. len = pmtu;
  467. break;
  468. }
  469. if (wqe->wr.opcode == IB_WR_SEND)
  470. qp->s_state = OP(SEND_LAST);
  471. else {
  472. qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
  473. /* Immediate data comes after the BTH */
  474. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  475. hwords += 1;
  476. }
  477. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  478. bth0 |= IB_BTH_SOLICITED;
  479. bth2 |= IB_BTH_REQ_ACK;
  480. qp->s_cur++;
  481. if (qp->s_cur >= qp->s_size)
  482. qp->s_cur = 0;
  483. break;
  484. case OP(RDMA_READ_RESPONSE_LAST):
  485. /*
  486. * qp->s_state is normally set to the opcode of the
  487. * last packet constructed for new requests and therefore
  488. * is never set to RDMA read response.
  489. * RDMA_READ_RESPONSE_LAST is used by the ACK processing
  490. * thread to indicate a RDMA write needs to be restarted from
  491. * an earlier PSN without interferring with the sending thread.
  492. * See qib_restart_rc().
  493. */
  494. qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
  495. /* FALLTHROUGH */
  496. case OP(RDMA_WRITE_FIRST):
  497. qp->s_state = OP(RDMA_WRITE_MIDDLE);
  498. /* FALLTHROUGH */
  499. case OP(RDMA_WRITE_MIDDLE):
  500. bth2 = qp->s_psn++ & QIB_PSN_MASK;
  501. ss = &qp->s_sge;
  502. len = qp->s_len;
  503. if (len > pmtu) {
  504. len = pmtu;
  505. break;
  506. }
  507. if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
  508. qp->s_state = OP(RDMA_WRITE_LAST);
  509. else {
  510. qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
  511. /* Immediate data comes after the BTH */
  512. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  513. hwords += 1;
  514. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  515. bth0 |= IB_BTH_SOLICITED;
  516. }
  517. bth2 |= IB_BTH_REQ_ACK;
  518. qp->s_cur++;
  519. if (qp->s_cur >= qp->s_size)
  520. qp->s_cur = 0;
  521. break;
  522. case OP(RDMA_READ_RESPONSE_MIDDLE):
  523. /*
  524. * qp->s_state is normally set to the opcode of the
  525. * last packet constructed for new requests and therefore
  526. * is never set to RDMA read response.
  527. * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
  528. * thread to indicate a RDMA read needs to be restarted from
  529. * an earlier PSN without interferring with the sending thread.
  530. * See qib_restart_rc().
  531. */
  532. len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
  533. ohdr->u.rc.reth.vaddr =
  534. cpu_to_be64(wqe->rdma_wr.remote_addr + len);
  535. ohdr->u.rc.reth.rkey =
  536. cpu_to_be32(wqe->rdma_wr.rkey);
  537. ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
  538. qp->s_state = OP(RDMA_READ_REQUEST);
  539. hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
  540. bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
  541. qp->s_psn = wqe->lpsn + 1;
  542. ss = NULL;
  543. len = 0;
  544. qp->s_cur++;
  545. if (qp->s_cur == qp->s_size)
  546. qp->s_cur = 0;
  547. break;
  548. }
  549. qp->s_sending_hpsn = bth2;
  550. delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
  551. if (delta && delta % QIB_PSN_CREDIT == 0)
  552. bth2 |= IB_BTH_REQ_ACK;
  553. if (qp->s_flags & RVT_S_SEND_ONE) {
  554. qp->s_flags &= ~RVT_S_SEND_ONE;
  555. qp->s_flags |= RVT_S_WAIT_ACK;
  556. bth2 |= IB_BTH_REQ_ACK;
  557. }
  558. qp->s_len -= len;
  559. qp->s_hdrwords = hwords;
  560. qp->s_cur_sge = ss;
  561. qp->s_cur_size = len;
  562. qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
  563. done:
  564. return 1;
  565. bail:
  566. qp->s_flags &= ~RVT_S_BUSY;
  567. return ret;
  568. }
  569. /**
  570. * qib_send_rc_ack - Construct an ACK packet and send it
  571. * @qp: a pointer to the QP
  572. *
  573. * This is called from qib_rc_rcv() and qib_kreceive().
  574. * Note that RDMA reads and atomics are handled in the
  575. * send side QP state and tasklet.
  576. */
  577. void qib_send_rc_ack(struct rvt_qp *qp)
  578. {
  579. struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
  580. struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  581. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  582. u64 pbc;
  583. u16 lrh0;
  584. u32 bth0;
  585. u32 hwords;
  586. u32 pbufn;
  587. u32 __iomem *piobuf;
  588. struct ib_header hdr;
  589. struct ib_other_headers *ohdr;
  590. u32 control;
  591. unsigned long flags;
  592. spin_lock_irqsave(&qp->s_lock, flags);
  593. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
  594. goto unlock;
  595. /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
  596. if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
  597. goto queue_ack;
  598. /* Construct the header with s_lock held so APM doesn't change it. */
  599. ohdr = &hdr.u.oth;
  600. lrh0 = QIB_LRH_BTH;
  601. /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
  602. hwords = 6;
  603. if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
  604. IB_AH_GRH)) {
  605. hwords += qib_make_grh(ibp, &hdr.u.l.grh,
  606. rdma_ah_read_grh(&qp->remote_ah_attr),
  607. hwords, 0);
  608. ohdr = &hdr.u.l.oth;
  609. lrh0 = QIB_LRH_GRH;
  610. }
  611. /* read pkey_index w/o lock (its atomic) */
  612. bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
  613. if (qp->s_mig_state == IB_MIG_MIGRATED)
  614. bth0 |= IB_BTH_MIG_REQ;
  615. if (qp->r_nak_state)
  616. ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
  617. (qp->r_nak_state <<
  618. IB_AETH_CREDIT_SHIFT));
  619. else
  620. ohdr->u.aeth = rvt_compute_aeth(qp);
  621. lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
  622. rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
  623. hdr.lrh[0] = cpu_to_be16(lrh0);
  624. hdr.lrh[1] = cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
  625. hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
  626. hdr.lrh[3] = cpu_to_be16(ppd->lid |
  627. rdma_ah_get_path_bits(&qp->remote_ah_attr));
  628. ohdr->bth[0] = cpu_to_be32(bth0);
  629. ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
  630. ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
  631. spin_unlock_irqrestore(&qp->s_lock, flags);
  632. /* Don't try to send ACKs if the link isn't ACTIVE */
  633. if (!(ppd->lflags & QIBL_LINKACTIVE))
  634. goto done;
  635. control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
  636. qp->s_srate, lrh0 >> 12);
  637. /* length is + 1 for the control dword */
  638. pbc = ((u64) control << 32) | (hwords + 1);
  639. piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
  640. if (!piobuf) {
  641. /*
  642. * We are out of PIO buffers at the moment.
  643. * Pass responsibility for sending the ACK to the
  644. * send tasklet so that when a PIO buffer becomes
  645. * available, the ACK is sent ahead of other outgoing
  646. * packets.
  647. */
  648. spin_lock_irqsave(&qp->s_lock, flags);
  649. goto queue_ack;
  650. }
  651. /*
  652. * Write the pbc.
  653. * We have to flush after the PBC for correctness
  654. * on some cpus or WC buffer can be written out of order.
  655. */
  656. writeq(pbc, piobuf);
  657. if (dd->flags & QIB_PIO_FLUSH_WC) {
  658. u32 *hdrp = (u32 *) &hdr;
  659. qib_flush_wc();
  660. qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
  661. qib_flush_wc();
  662. __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
  663. } else
  664. qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
  665. if (dd->flags & QIB_USE_SPCL_TRIG) {
  666. u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
  667. qib_flush_wc();
  668. __raw_writel(0xaebecede, piobuf + spcl_off);
  669. }
  670. qib_flush_wc();
  671. qib_sendbuf_done(dd, pbufn);
  672. this_cpu_inc(ibp->pmastats->n_unicast_xmit);
  673. goto done;
  674. queue_ack:
  675. if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
  676. this_cpu_inc(*ibp->rvp.rc_qacks);
  677. qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
  678. qp->s_nak_state = qp->r_nak_state;
  679. qp->s_ack_psn = qp->r_ack_psn;
  680. /* Schedule the send tasklet. */
  681. qib_schedule_send(qp);
  682. }
  683. unlock:
  684. spin_unlock_irqrestore(&qp->s_lock, flags);
  685. done:
  686. return;
  687. }
  688. /**
  689. * reset_psn - reset the QP state to send starting from PSN
  690. * @qp: the QP
  691. * @psn: the packet sequence number to restart at
  692. *
  693. * This is called from qib_rc_rcv() to process an incoming RC ACK
  694. * for the given QP.
  695. * Called at interrupt level with the QP s_lock held.
  696. */
  697. static void reset_psn(struct rvt_qp *qp, u32 psn)
  698. {
  699. u32 n = qp->s_acked;
  700. struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
  701. u32 opcode;
  702. qp->s_cur = n;
  703. /*
  704. * If we are starting the request from the beginning,
  705. * let the normal send code handle initialization.
  706. */
  707. if (qib_cmp24(psn, wqe->psn) <= 0) {
  708. qp->s_state = OP(SEND_LAST);
  709. goto done;
  710. }
  711. /* Find the work request opcode corresponding to the given PSN. */
  712. opcode = wqe->wr.opcode;
  713. for (;;) {
  714. int diff;
  715. if (++n == qp->s_size)
  716. n = 0;
  717. if (n == qp->s_tail)
  718. break;
  719. wqe = rvt_get_swqe_ptr(qp, n);
  720. diff = qib_cmp24(psn, wqe->psn);
  721. if (diff < 0)
  722. break;
  723. qp->s_cur = n;
  724. /*
  725. * If we are starting the request from the beginning,
  726. * let the normal send code handle initialization.
  727. */
  728. if (diff == 0) {
  729. qp->s_state = OP(SEND_LAST);
  730. goto done;
  731. }
  732. opcode = wqe->wr.opcode;
  733. }
  734. /*
  735. * Set the state to restart in the middle of a request.
  736. * Don't change the s_sge, s_cur_sge, or s_cur_size.
  737. * See qib_make_rc_req().
  738. */
  739. switch (opcode) {
  740. case IB_WR_SEND:
  741. case IB_WR_SEND_WITH_IMM:
  742. qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
  743. break;
  744. case IB_WR_RDMA_WRITE:
  745. case IB_WR_RDMA_WRITE_WITH_IMM:
  746. qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
  747. break;
  748. case IB_WR_RDMA_READ:
  749. qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
  750. break;
  751. default:
  752. /*
  753. * This case shouldn't happen since its only
  754. * one PSN per req.
  755. */
  756. qp->s_state = OP(SEND_LAST);
  757. }
  758. done:
  759. qp->s_psn = psn;
  760. /*
  761. * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer
  762. * asynchronously before the send tasklet can get scheduled.
  763. * Doing it in qib_make_rc_req() is too late.
  764. */
  765. if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
  766. (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
  767. qp->s_flags |= RVT_S_WAIT_PSN;
  768. }
  769. /*
  770. * Back up requester to resend the last un-ACKed request.
  771. * The QP r_lock and s_lock should be held and interrupts disabled.
  772. */
  773. void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
  774. {
  775. struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  776. struct qib_ibport *ibp;
  777. if (qp->s_retry == 0) {
  778. if (qp->s_mig_state == IB_MIG_ARMED) {
  779. qib_migrate_qp(qp);
  780. qp->s_retry = qp->s_retry_cnt;
  781. } else if (qp->s_last == qp->s_acked) {
  782. qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
  783. rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
  784. return;
  785. } else /* XXX need to handle delayed completion */
  786. return;
  787. } else
  788. qp->s_retry--;
  789. ibp = to_iport(qp->ibqp.device, qp->port_num);
  790. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  791. ibp->rvp.n_rc_resends++;
  792. else
  793. ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
  794. qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
  795. RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
  796. RVT_S_WAIT_ACK);
  797. if (wait)
  798. qp->s_flags |= RVT_S_SEND_ONE;
  799. reset_psn(qp, psn);
  800. }
  801. /*
  802. * Set qp->s_sending_psn to the next PSN after the given one.
  803. * This would be psn+1 except when RDMA reads are present.
  804. */
  805. static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
  806. {
  807. struct rvt_swqe *wqe;
  808. u32 n = qp->s_last;
  809. /* Find the work request corresponding to the given PSN. */
  810. for (;;) {
  811. wqe = rvt_get_swqe_ptr(qp, n);
  812. if (qib_cmp24(psn, wqe->lpsn) <= 0) {
  813. if (wqe->wr.opcode == IB_WR_RDMA_READ)
  814. qp->s_sending_psn = wqe->lpsn + 1;
  815. else
  816. qp->s_sending_psn = psn + 1;
  817. break;
  818. }
  819. if (++n == qp->s_size)
  820. n = 0;
  821. if (n == qp->s_tail)
  822. break;
  823. }
  824. }
  825. /*
  826. * This should be called with the QP s_lock held and interrupts disabled.
  827. */
  828. void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
  829. {
  830. struct ib_other_headers *ohdr;
  831. struct rvt_swqe *wqe;
  832. u32 opcode;
  833. u32 psn;
  834. if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
  835. return;
  836. /* Find out where the BTH is */
  837. if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
  838. ohdr = &hdr->u.oth;
  839. else
  840. ohdr = &hdr->u.l.oth;
  841. opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
  842. if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
  843. opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
  844. WARN_ON(!qp->s_rdma_ack_cnt);
  845. qp->s_rdma_ack_cnt--;
  846. return;
  847. }
  848. psn = be32_to_cpu(ohdr->bth[2]);
  849. reset_sending_psn(qp, psn);
  850. /*
  851. * Start timer after a packet requesting an ACK has been sent and
  852. * there are still requests that haven't been acked.
  853. */
  854. if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
  855. !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
  856. (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
  857. rvt_add_retry_timer(qp);
  858. while (qp->s_last != qp->s_acked) {
  859. u32 s_last;
  860. wqe = rvt_get_swqe_ptr(qp, qp->s_last);
  861. if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
  862. qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
  863. break;
  864. s_last = qp->s_last;
  865. if (++s_last >= qp->s_size)
  866. s_last = 0;
  867. qp->s_last = s_last;
  868. /* see post_send() */
  869. barrier();
  870. rvt_put_swqe(wqe);
  871. rvt_qp_swqe_complete(qp,
  872. wqe,
  873. ib_qib_wc_opcode[wqe->wr.opcode],
  874. IB_WC_SUCCESS);
  875. }
  876. /*
  877. * If we were waiting for sends to complete before resending,
  878. * and they are now complete, restart sending.
  879. */
  880. if (qp->s_flags & RVT_S_WAIT_PSN &&
  881. qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
  882. qp->s_flags &= ~RVT_S_WAIT_PSN;
  883. qp->s_sending_psn = qp->s_psn;
  884. qp->s_sending_hpsn = qp->s_psn - 1;
  885. qib_schedule_send(qp);
  886. }
  887. }
  888. static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
  889. {
  890. qp->s_last_psn = psn;
  891. }
  892. /*
  893. * Generate a SWQE completion.
  894. * This is similar to qib_send_complete but has to check to be sure
  895. * that the SGEs are not being referenced if the SWQE is being resent.
  896. */
  897. static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
  898. struct rvt_swqe *wqe,
  899. struct qib_ibport *ibp)
  900. {
  901. /*
  902. * Don't decrement refcount and don't generate a
  903. * completion if the SWQE is being resent until the send
  904. * is finished.
  905. */
  906. if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
  907. qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
  908. u32 s_last;
  909. rvt_put_swqe(wqe);
  910. s_last = qp->s_last;
  911. if (++s_last >= qp->s_size)
  912. s_last = 0;
  913. qp->s_last = s_last;
  914. /* see post_send() */
  915. barrier();
  916. rvt_qp_swqe_complete(qp,
  917. wqe,
  918. ib_qib_wc_opcode[wqe->wr.opcode],
  919. IB_WC_SUCCESS);
  920. } else
  921. this_cpu_inc(*ibp->rvp.rc_delayed_comp);
  922. qp->s_retry = qp->s_retry_cnt;
  923. update_last_psn(qp, wqe->lpsn);
  924. /*
  925. * If we are completing a request which is in the process of
  926. * being resent, we can stop resending it since we know the
  927. * responder has already seen it.
  928. */
  929. if (qp->s_acked == qp->s_cur) {
  930. if (++qp->s_cur >= qp->s_size)
  931. qp->s_cur = 0;
  932. qp->s_acked = qp->s_cur;
  933. wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
  934. if (qp->s_acked != qp->s_tail) {
  935. qp->s_state = OP(SEND_LAST);
  936. qp->s_psn = wqe->psn;
  937. }
  938. } else {
  939. if (++qp->s_acked >= qp->s_size)
  940. qp->s_acked = 0;
  941. if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
  942. qp->s_draining = 0;
  943. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  944. }
  945. return wqe;
  946. }
  947. /**
  948. * do_rc_ack - process an incoming RC ACK
  949. * @qp: the QP the ACK came in on
  950. * @psn: the packet sequence number of the ACK
  951. * @opcode: the opcode of the request that resulted in the ACK
  952. *
  953. * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
  954. * for the given QP.
  955. * Called at interrupt level with the QP s_lock held.
  956. * Returns 1 if OK, 0 if current operation should be aborted (NAK).
  957. */
  958. static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
  959. u64 val, struct qib_ctxtdata *rcd)
  960. {
  961. struct qib_ibport *ibp;
  962. enum ib_wc_status status;
  963. struct rvt_swqe *wqe;
  964. int ret = 0;
  965. u32 ack_psn;
  966. int diff;
  967. /*
  968. * Note that NAKs implicitly ACK outstanding SEND and RDMA write
  969. * requests and implicitly NAK RDMA read and atomic requests issued
  970. * before the NAK'ed request. The MSN won't include the NAK'ed
  971. * request but will include an ACK'ed request(s).
  972. */
  973. ack_psn = psn;
  974. if (aeth >> IB_AETH_NAK_SHIFT)
  975. ack_psn--;
  976. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  977. ibp = to_iport(qp->ibqp.device, qp->port_num);
  978. /*
  979. * The MSN might be for a later WQE than the PSN indicates so
  980. * only complete WQEs that the PSN finishes.
  981. */
  982. while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
  983. /*
  984. * RDMA_READ_RESPONSE_ONLY is a special case since
  985. * we want to generate completion events for everything
  986. * before the RDMA read, copy the data, then generate
  987. * the completion for the read.
  988. */
  989. if (wqe->wr.opcode == IB_WR_RDMA_READ &&
  990. opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
  991. diff == 0) {
  992. ret = 1;
  993. goto bail;
  994. }
  995. /*
  996. * If this request is a RDMA read or atomic, and the ACK is
  997. * for a later operation, this ACK NAKs the RDMA read or
  998. * atomic. In other words, only a RDMA_READ_LAST or ONLY
  999. * can ACK a RDMA read and likewise for atomic ops. Note
  1000. * that the NAK case can only happen if relaxed ordering is
  1001. * used and requests are sent after an RDMA read or atomic
  1002. * is sent but before the response is received.
  1003. */
  1004. if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
  1005. (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
  1006. ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1007. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
  1008. (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
  1009. /* Retry this request. */
  1010. if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
  1011. qp->r_flags |= RVT_R_RDMAR_SEQ;
  1012. qib_restart_rc(qp, qp->s_last_psn + 1, 0);
  1013. if (list_empty(&qp->rspwait)) {
  1014. qp->r_flags |= RVT_R_RSP_SEND;
  1015. rvt_get_qp(qp);
  1016. list_add_tail(&qp->rspwait,
  1017. &rcd->qp_wait_list);
  1018. }
  1019. }
  1020. /*
  1021. * No need to process the ACK/NAK since we are
  1022. * restarting an earlier request.
  1023. */
  1024. goto bail;
  1025. }
  1026. if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1027. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
  1028. u64 *vaddr = wqe->sg_list[0].vaddr;
  1029. *vaddr = val;
  1030. }
  1031. if (qp->s_num_rd_atomic &&
  1032. (wqe->wr.opcode == IB_WR_RDMA_READ ||
  1033. wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1034. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
  1035. qp->s_num_rd_atomic--;
  1036. /* Restart sending task if fence is complete */
  1037. if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
  1038. !qp->s_num_rd_atomic) {
  1039. qp->s_flags &= ~(RVT_S_WAIT_FENCE |
  1040. RVT_S_WAIT_ACK);
  1041. qib_schedule_send(qp);
  1042. } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
  1043. qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
  1044. RVT_S_WAIT_ACK);
  1045. qib_schedule_send(qp);
  1046. }
  1047. }
  1048. wqe = do_rc_completion(qp, wqe, ibp);
  1049. if (qp->s_acked == qp->s_tail)
  1050. break;
  1051. }
  1052. switch (aeth >> IB_AETH_NAK_SHIFT) {
  1053. case 0: /* ACK */
  1054. this_cpu_inc(*ibp->rvp.rc_acks);
  1055. if (qp->s_acked != qp->s_tail) {
  1056. /*
  1057. * We are expecting more ACKs so
  1058. * reset the retransmit timer.
  1059. */
  1060. rvt_mod_retry_timer(qp);
  1061. /*
  1062. * We can stop resending the earlier packets and
  1063. * continue with the next packet the receiver wants.
  1064. */
  1065. if (qib_cmp24(qp->s_psn, psn) <= 0)
  1066. reset_psn(qp, psn + 1);
  1067. } else {
  1068. /* No more acks - kill all timers */
  1069. rvt_stop_rc_timers(qp);
  1070. if (qib_cmp24(qp->s_psn, psn) <= 0) {
  1071. qp->s_state = OP(SEND_LAST);
  1072. qp->s_psn = psn + 1;
  1073. }
  1074. }
  1075. if (qp->s_flags & RVT_S_WAIT_ACK) {
  1076. qp->s_flags &= ~RVT_S_WAIT_ACK;
  1077. qib_schedule_send(qp);
  1078. }
  1079. rvt_get_credit(qp, aeth);
  1080. qp->s_rnr_retry = qp->s_rnr_retry_cnt;
  1081. qp->s_retry = qp->s_retry_cnt;
  1082. update_last_psn(qp, psn);
  1083. return 1;
  1084. case 1: /* RNR NAK */
  1085. ibp->rvp.n_rnr_naks++;
  1086. if (qp->s_acked == qp->s_tail)
  1087. goto bail;
  1088. if (qp->s_flags & RVT_S_WAIT_RNR)
  1089. goto bail;
  1090. if (qp->s_rnr_retry == 0) {
  1091. status = IB_WC_RNR_RETRY_EXC_ERR;
  1092. goto class_b;
  1093. }
  1094. if (qp->s_rnr_retry_cnt < 7)
  1095. qp->s_rnr_retry--;
  1096. /* The last valid PSN is the previous PSN. */
  1097. update_last_psn(qp, psn - 1);
  1098. ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
  1099. reset_psn(qp, psn);
  1100. qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
  1101. rvt_stop_rc_timers(qp);
  1102. rvt_add_rnr_timer(qp, aeth);
  1103. return 0;
  1104. case 3: /* NAK */
  1105. if (qp->s_acked == qp->s_tail)
  1106. goto bail;
  1107. /* The last valid PSN is the previous PSN. */
  1108. update_last_psn(qp, psn - 1);
  1109. switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
  1110. IB_AETH_CREDIT_MASK) {
  1111. case 0: /* PSN sequence error */
  1112. ibp->rvp.n_seq_naks++;
  1113. /*
  1114. * Back up to the responder's expected PSN.
  1115. * Note that we might get a NAK in the middle of an
  1116. * RDMA READ response which terminates the RDMA
  1117. * READ.
  1118. */
  1119. qib_restart_rc(qp, psn, 0);
  1120. qib_schedule_send(qp);
  1121. break;
  1122. case 1: /* Invalid Request */
  1123. status = IB_WC_REM_INV_REQ_ERR;
  1124. ibp->rvp.n_other_naks++;
  1125. goto class_b;
  1126. case 2: /* Remote Access Error */
  1127. status = IB_WC_REM_ACCESS_ERR;
  1128. ibp->rvp.n_other_naks++;
  1129. goto class_b;
  1130. case 3: /* Remote Operation Error */
  1131. status = IB_WC_REM_OP_ERR;
  1132. ibp->rvp.n_other_naks++;
  1133. class_b:
  1134. if (qp->s_last == qp->s_acked) {
  1135. qib_send_complete(qp, wqe, status);
  1136. rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
  1137. }
  1138. break;
  1139. default:
  1140. /* Ignore other reserved NAK error codes */
  1141. goto reserved;
  1142. }
  1143. qp->s_retry = qp->s_retry_cnt;
  1144. qp->s_rnr_retry = qp->s_rnr_retry_cnt;
  1145. goto bail;
  1146. default: /* 2: reserved */
  1147. reserved:
  1148. /* Ignore reserved NAK codes. */
  1149. goto bail;
  1150. }
  1151. bail:
  1152. rvt_stop_rc_timers(qp);
  1153. return ret;
  1154. }
  1155. /*
  1156. * We have seen an out of sequence RDMA read middle or last packet.
  1157. * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
  1158. */
  1159. static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
  1160. struct qib_ctxtdata *rcd)
  1161. {
  1162. struct rvt_swqe *wqe;
  1163. /* Remove QP from retry timer */
  1164. rvt_stop_rc_timers(qp);
  1165. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1166. while (qib_cmp24(psn, wqe->lpsn) > 0) {
  1167. if (wqe->wr.opcode == IB_WR_RDMA_READ ||
  1168. wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
  1169. wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
  1170. break;
  1171. wqe = do_rc_completion(qp, wqe, ibp);
  1172. }
  1173. ibp->rvp.n_rdma_seq++;
  1174. qp->r_flags |= RVT_R_RDMAR_SEQ;
  1175. qib_restart_rc(qp, qp->s_last_psn + 1, 0);
  1176. if (list_empty(&qp->rspwait)) {
  1177. qp->r_flags |= RVT_R_RSP_SEND;
  1178. rvt_get_qp(qp);
  1179. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  1180. }
  1181. }
  1182. /**
  1183. * qib_rc_rcv_resp - process an incoming RC response packet
  1184. * @ibp: the port this packet came in on
  1185. * @ohdr: the other headers for this packet
  1186. * @data: the packet data
  1187. * @tlen: the packet length
  1188. * @qp: the QP for this packet
  1189. * @opcode: the opcode for this packet
  1190. * @psn: the packet sequence number for this packet
  1191. * @hdrsize: the header length
  1192. * @pmtu: the path MTU
  1193. *
  1194. * This is called from qib_rc_rcv() to process an incoming RC response
  1195. * packet for the given QP.
  1196. * Called at interrupt level.
  1197. */
  1198. static void qib_rc_rcv_resp(struct qib_ibport *ibp,
  1199. struct ib_other_headers *ohdr,
  1200. void *data, u32 tlen,
  1201. struct rvt_qp *qp,
  1202. u32 opcode,
  1203. u32 psn, u32 hdrsize, u32 pmtu,
  1204. struct qib_ctxtdata *rcd)
  1205. {
  1206. struct rvt_swqe *wqe;
  1207. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  1208. enum ib_wc_status status;
  1209. unsigned long flags;
  1210. int diff;
  1211. u32 pad;
  1212. u32 aeth;
  1213. u64 val;
  1214. if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
  1215. /*
  1216. * If ACK'd PSN on SDMA busy list try to make progress to
  1217. * reclaim SDMA credits.
  1218. */
  1219. if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
  1220. (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
  1221. /*
  1222. * If send tasklet not running attempt to progress
  1223. * SDMA queue.
  1224. */
  1225. if (!(qp->s_flags & RVT_S_BUSY)) {
  1226. /* Acquire SDMA Lock */
  1227. spin_lock_irqsave(&ppd->sdma_lock, flags);
  1228. /* Invoke sdma make progress */
  1229. qib_sdma_make_progress(ppd);
  1230. /* Release SDMA Lock */
  1231. spin_unlock_irqrestore(&ppd->sdma_lock, flags);
  1232. }
  1233. }
  1234. }
  1235. spin_lock_irqsave(&qp->s_lock, flags);
  1236. if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
  1237. goto ack_done;
  1238. /* Ignore invalid responses. */
  1239. if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
  1240. goto ack_done;
  1241. /* Ignore duplicate responses. */
  1242. diff = qib_cmp24(psn, qp->s_last_psn);
  1243. if (unlikely(diff <= 0)) {
  1244. /* Update credits for "ghost" ACKs */
  1245. if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
  1246. aeth = be32_to_cpu(ohdr->u.aeth);
  1247. if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
  1248. rvt_get_credit(qp, aeth);
  1249. }
  1250. goto ack_done;
  1251. }
  1252. /*
  1253. * Skip everything other than the PSN we expect, if we are waiting
  1254. * for a reply to a restarted RDMA read or atomic op.
  1255. */
  1256. if (qp->r_flags & RVT_R_RDMAR_SEQ) {
  1257. if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
  1258. goto ack_done;
  1259. qp->r_flags &= ~RVT_R_RDMAR_SEQ;
  1260. }
  1261. if (unlikely(qp->s_acked == qp->s_tail))
  1262. goto ack_done;
  1263. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1264. status = IB_WC_SUCCESS;
  1265. switch (opcode) {
  1266. case OP(ACKNOWLEDGE):
  1267. case OP(ATOMIC_ACKNOWLEDGE):
  1268. case OP(RDMA_READ_RESPONSE_FIRST):
  1269. aeth = be32_to_cpu(ohdr->u.aeth);
  1270. if (opcode == OP(ATOMIC_ACKNOWLEDGE))
  1271. val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
  1272. else
  1273. val = 0;
  1274. if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
  1275. opcode != OP(RDMA_READ_RESPONSE_FIRST))
  1276. goto ack_done;
  1277. hdrsize += 4;
  1278. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1279. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1280. goto ack_op_err;
  1281. /*
  1282. * If this is a response to a resent RDMA read, we
  1283. * have to be careful to copy the data to the right
  1284. * location.
  1285. */
  1286. qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
  1287. wqe, psn, pmtu);
  1288. goto read_middle;
  1289. case OP(RDMA_READ_RESPONSE_MIDDLE):
  1290. /* no AETH, no ACK */
  1291. if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
  1292. goto ack_seq_err;
  1293. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1294. goto ack_op_err;
  1295. read_middle:
  1296. if (unlikely(tlen != (hdrsize + pmtu + 4)))
  1297. goto ack_len_err;
  1298. if (unlikely(pmtu >= qp->s_rdma_read_len))
  1299. goto ack_len_err;
  1300. /*
  1301. * We got a response so update the timeout.
  1302. * 4.096 usec. * (1 << qp->timeout)
  1303. */
  1304. rvt_mod_retry_timer(qp);
  1305. if (qp->s_flags & RVT_S_WAIT_ACK) {
  1306. qp->s_flags &= ~RVT_S_WAIT_ACK;
  1307. qib_schedule_send(qp);
  1308. }
  1309. if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
  1310. qp->s_retry = qp->s_retry_cnt;
  1311. /*
  1312. * Update the RDMA receive state but do the copy w/o
  1313. * holding the locks and blocking interrupts.
  1314. */
  1315. qp->s_rdma_read_len -= pmtu;
  1316. update_last_psn(qp, psn);
  1317. spin_unlock_irqrestore(&qp->s_lock, flags);
  1318. qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
  1319. goto bail;
  1320. case OP(RDMA_READ_RESPONSE_ONLY):
  1321. aeth = be32_to_cpu(ohdr->u.aeth);
  1322. if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
  1323. goto ack_done;
  1324. /* Get the number of bytes the message was padded by. */
  1325. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  1326. /*
  1327. * Check that the data size is >= 0 && <= pmtu.
  1328. * Remember to account for the AETH header (4) and
  1329. * ICRC (4).
  1330. */
  1331. if (unlikely(tlen < (hdrsize + pad + 8)))
  1332. goto ack_len_err;
  1333. /*
  1334. * If this is a response to a resent RDMA read, we
  1335. * have to be careful to copy the data to the right
  1336. * location.
  1337. */
  1338. wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
  1339. qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
  1340. wqe, psn, pmtu);
  1341. goto read_last;
  1342. case OP(RDMA_READ_RESPONSE_LAST):
  1343. /* ACKs READ req. */
  1344. if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
  1345. goto ack_seq_err;
  1346. if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
  1347. goto ack_op_err;
  1348. /* Get the number of bytes the message was padded by. */
  1349. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  1350. /*
  1351. * Check that the data size is >= 1 && <= pmtu.
  1352. * Remember to account for the AETH header (4) and
  1353. * ICRC (4).
  1354. */
  1355. if (unlikely(tlen <= (hdrsize + pad + 8)))
  1356. goto ack_len_err;
  1357. read_last:
  1358. tlen -= hdrsize + pad + 8;
  1359. if (unlikely(tlen != qp->s_rdma_read_len))
  1360. goto ack_len_err;
  1361. aeth = be32_to_cpu(ohdr->u.aeth);
  1362. qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
  1363. WARN_ON(qp->s_rdma_read_sge.num_sge);
  1364. (void) do_rc_ack(qp, aeth, psn,
  1365. OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
  1366. goto ack_done;
  1367. }
  1368. ack_op_err:
  1369. status = IB_WC_LOC_QP_OP_ERR;
  1370. goto ack_err;
  1371. ack_seq_err:
  1372. rdma_seq_err(qp, ibp, psn, rcd);
  1373. goto ack_done;
  1374. ack_len_err:
  1375. status = IB_WC_LOC_LEN_ERR;
  1376. ack_err:
  1377. if (qp->s_last == qp->s_acked) {
  1378. qib_send_complete(qp, wqe, status);
  1379. rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
  1380. }
  1381. ack_done:
  1382. spin_unlock_irqrestore(&qp->s_lock, flags);
  1383. bail:
  1384. return;
  1385. }
  1386. /**
  1387. * qib_rc_rcv_error - process an incoming duplicate or error RC packet
  1388. * @ohdr: the other headers for this packet
  1389. * @data: the packet data
  1390. * @qp: the QP for this packet
  1391. * @opcode: the opcode for this packet
  1392. * @psn: the packet sequence number for this packet
  1393. * @diff: the difference between the PSN and the expected PSN
  1394. *
  1395. * This is called from qib_rc_rcv() to process an unexpected
  1396. * incoming RC packet for the given QP.
  1397. * Called at interrupt level.
  1398. * Return 1 if no more processing is needed; otherwise return 0 to
  1399. * schedule a response to be sent.
  1400. */
  1401. static int qib_rc_rcv_error(struct ib_other_headers *ohdr,
  1402. void *data,
  1403. struct rvt_qp *qp,
  1404. u32 opcode,
  1405. u32 psn,
  1406. int diff,
  1407. struct qib_ctxtdata *rcd)
  1408. {
  1409. struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  1410. struct rvt_ack_entry *e;
  1411. unsigned long flags;
  1412. u8 i, prev;
  1413. int old_req;
  1414. if (diff > 0) {
  1415. /*
  1416. * Packet sequence error.
  1417. * A NAK will ACK earlier sends and RDMA writes.
  1418. * Don't queue the NAK if we already sent one.
  1419. */
  1420. if (!qp->r_nak_state) {
  1421. ibp->rvp.n_rc_seqnak++;
  1422. qp->r_nak_state = IB_NAK_PSN_ERROR;
  1423. /* Use the expected PSN. */
  1424. qp->r_ack_psn = qp->r_psn;
  1425. /*
  1426. * Wait to send the sequence NAK until all packets
  1427. * in the receive queue have been processed.
  1428. * Otherwise, we end up propagating congestion.
  1429. */
  1430. if (list_empty(&qp->rspwait)) {
  1431. qp->r_flags |= RVT_R_RSP_NAK;
  1432. rvt_get_qp(qp);
  1433. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  1434. }
  1435. }
  1436. goto done;
  1437. }
  1438. /*
  1439. * Handle a duplicate request. Don't re-execute SEND, RDMA
  1440. * write or atomic op. Don't NAK errors, just silently drop
  1441. * the duplicate request. Note that r_sge, r_len, and
  1442. * r_rcv_len may be in use so don't modify them.
  1443. *
  1444. * We are supposed to ACK the earliest duplicate PSN but we
  1445. * can coalesce an outstanding duplicate ACK. We have to
  1446. * send the earliest so that RDMA reads can be restarted at
  1447. * the requester's expected PSN.
  1448. *
  1449. * First, find where this duplicate PSN falls within the
  1450. * ACKs previously sent.
  1451. * old_req is true if there is an older response that is scheduled
  1452. * to be sent before sending this one.
  1453. */
  1454. e = NULL;
  1455. old_req = 1;
  1456. ibp->rvp.n_rc_dupreq++;
  1457. spin_lock_irqsave(&qp->s_lock, flags);
  1458. for (i = qp->r_head_ack_queue; ; i = prev) {
  1459. if (i == qp->s_tail_ack_queue)
  1460. old_req = 0;
  1461. if (i)
  1462. prev = i - 1;
  1463. else
  1464. prev = QIB_MAX_RDMA_ATOMIC;
  1465. if (prev == qp->r_head_ack_queue) {
  1466. e = NULL;
  1467. break;
  1468. }
  1469. e = &qp->s_ack_queue[prev];
  1470. if (!e->opcode) {
  1471. e = NULL;
  1472. break;
  1473. }
  1474. if (qib_cmp24(psn, e->psn) >= 0) {
  1475. if (prev == qp->s_tail_ack_queue &&
  1476. qib_cmp24(psn, e->lpsn) <= 0)
  1477. old_req = 0;
  1478. break;
  1479. }
  1480. }
  1481. switch (opcode) {
  1482. case OP(RDMA_READ_REQUEST): {
  1483. struct ib_reth *reth;
  1484. u32 offset;
  1485. u32 len;
  1486. /*
  1487. * If we didn't find the RDMA read request in the ack queue,
  1488. * we can ignore this request.
  1489. */
  1490. if (!e || e->opcode != OP(RDMA_READ_REQUEST))
  1491. goto unlock_done;
  1492. /* RETH comes after BTH */
  1493. reth = &ohdr->u.rc.reth;
  1494. /*
  1495. * Address range must be a subset of the original
  1496. * request and start on pmtu boundaries.
  1497. * We reuse the old ack_queue slot since the requester
  1498. * should not back up and request an earlier PSN for the
  1499. * same request.
  1500. */
  1501. offset = ((psn - e->psn) & QIB_PSN_MASK) *
  1502. qp->pmtu;
  1503. len = be32_to_cpu(reth->length);
  1504. if (unlikely(offset + len != e->rdma_sge.sge_length))
  1505. goto unlock_done;
  1506. if (e->rdma_sge.mr) {
  1507. rvt_put_mr(e->rdma_sge.mr);
  1508. e->rdma_sge.mr = NULL;
  1509. }
  1510. if (len != 0) {
  1511. u32 rkey = be32_to_cpu(reth->rkey);
  1512. u64 vaddr = be64_to_cpu(reth->vaddr);
  1513. int ok;
  1514. ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
  1515. IB_ACCESS_REMOTE_READ);
  1516. if (unlikely(!ok))
  1517. goto unlock_done;
  1518. } else {
  1519. e->rdma_sge.vaddr = NULL;
  1520. e->rdma_sge.length = 0;
  1521. e->rdma_sge.sge_length = 0;
  1522. }
  1523. e->psn = psn;
  1524. if (old_req)
  1525. goto unlock_done;
  1526. qp->s_tail_ack_queue = prev;
  1527. break;
  1528. }
  1529. case OP(COMPARE_SWAP):
  1530. case OP(FETCH_ADD): {
  1531. /*
  1532. * If we didn't find the atomic request in the ack queue
  1533. * or the send tasklet is already backed up to send an
  1534. * earlier entry, we can ignore this request.
  1535. */
  1536. if (!e || e->opcode != (u8) opcode || old_req)
  1537. goto unlock_done;
  1538. qp->s_tail_ack_queue = prev;
  1539. break;
  1540. }
  1541. default:
  1542. /*
  1543. * Ignore this operation if it doesn't request an ACK
  1544. * or an earlier RDMA read or atomic is going to be resent.
  1545. */
  1546. if (!(psn & IB_BTH_REQ_ACK) || old_req)
  1547. goto unlock_done;
  1548. /*
  1549. * Resend the most recent ACK if this request is
  1550. * after all the previous RDMA reads and atomics.
  1551. */
  1552. if (i == qp->r_head_ack_queue) {
  1553. spin_unlock_irqrestore(&qp->s_lock, flags);
  1554. qp->r_nak_state = 0;
  1555. qp->r_ack_psn = qp->r_psn - 1;
  1556. goto send_ack;
  1557. }
  1558. /*
  1559. * Try to send a simple ACK to work around a Mellanox bug
  1560. * which doesn't accept a RDMA read response or atomic
  1561. * response as an ACK for earlier SENDs or RDMA writes.
  1562. */
  1563. if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
  1564. spin_unlock_irqrestore(&qp->s_lock, flags);
  1565. qp->r_nak_state = 0;
  1566. qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
  1567. goto send_ack;
  1568. }
  1569. /*
  1570. * Resend the RDMA read or atomic op which
  1571. * ACKs this duplicate request.
  1572. */
  1573. qp->s_tail_ack_queue = i;
  1574. break;
  1575. }
  1576. qp->s_ack_state = OP(ACKNOWLEDGE);
  1577. qp->s_flags |= RVT_S_RESP_PENDING;
  1578. qp->r_nak_state = 0;
  1579. qib_schedule_send(qp);
  1580. unlock_done:
  1581. spin_unlock_irqrestore(&qp->s_lock, flags);
  1582. done:
  1583. return 1;
  1584. send_ack:
  1585. return 0;
  1586. }
  1587. static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
  1588. {
  1589. unsigned next;
  1590. next = n + 1;
  1591. if (next > QIB_MAX_RDMA_ATOMIC)
  1592. next = 0;
  1593. qp->s_tail_ack_queue = next;
  1594. qp->s_ack_state = OP(ACKNOWLEDGE);
  1595. }
  1596. /**
  1597. * qib_rc_rcv - process an incoming RC packet
  1598. * @rcd: the context pointer
  1599. * @hdr: the header of this packet
  1600. * @has_grh: true if the header has a GRH
  1601. * @data: the packet data
  1602. * @tlen: the packet length
  1603. * @qp: the QP for this packet
  1604. *
  1605. * This is called from qib_qp_rcv() to process an incoming RC packet
  1606. * for the given QP.
  1607. * Called at interrupt level.
  1608. */
  1609. void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
  1610. int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
  1611. {
  1612. struct qib_ibport *ibp = &rcd->ppd->ibport_data;
  1613. struct ib_other_headers *ohdr;
  1614. u32 opcode;
  1615. u32 hdrsize;
  1616. u32 psn;
  1617. u32 pad;
  1618. struct ib_wc wc;
  1619. u32 pmtu = qp->pmtu;
  1620. int diff;
  1621. struct ib_reth *reth;
  1622. unsigned long flags;
  1623. int ret;
  1624. /* Check for GRH */
  1625. if (!has_grh) {
  1626. ohdr = &hdr->u.oth;
  1627. hdrsize = 8 + 12; /* LRH + BTH */
  1628. } else {
  1629. ohdr = &hdr->u.l.oth;
  1630. hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
  1631. }
  1632. opcode = be32_to_cpu(ohdr->bth[0]);
  1633. if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
  1634. return;
  1635. psn = be32_to_cpu(ohdr->bth[2]);
  1636. opcode >>= 24;
  1637. /*
  1638. * Process responses (ACKs) before anything else. Note that the
  1639. * packet sequence number will be for something in the send work
  1640. * queue rather than the expected receive packet sequence number.
  1641. * In other words, this QP is the requester.
  1642. */
  1643. if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
  1644. opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
  1645. qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
  1646. hdrsize, pmtu, rcd);
  1647. return;
  1648. }
  1649. /* Compute 24 bits worth of difference. */
  1650. diff = qib_cmp24(psn, qp->r_psn);
  1651. if (unlikely(diff)) {
  1652. if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
  1653. return;
  1654. goto send_ack;
  1655. }
  1656. /* Check for opcode sequence errors. */
  1657. switch (qp->r_state) {
  1658. case OP(SEND_FIRST):
  1659. case OP(SEND_MIDDLE):
  1660. if (opcode == OP(SEND_MIDDLE) ||
  1661. opcode == OP(SEND_LAST) ||
  1662. opcode == OP(SEND_LAST_WITH_IMMEDIATE))
  1663. break;
  1664. goto nack_inv;
  1665. case OP(RDMA_WRITE_FIRST):
  1666. case OP(RDMA_WRITE_MIDDLE):
  1667. if (opcode == OP(RDMA_WRITE_MIDDLE) ||
  1668. opcode == OP(RDMA_WRITE_LAST) ||
  1669. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  1670. break;
  1671. goto nack_inv;
  1672. default:
  1673. if (opcode == OP(SEND_MIDDLE) ||
  1674. opcode == OP(SEND_LAST) ||
  1675. opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
  1676. opcode == OP(RDMA_WRITE_MIDDLE) ||
  1677. opcode == OP(RDMA_WRITE_LAST) ||
  1678. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  1679. goto nack_inv;
  1680. /*
  1681. * Note that it is up to the requester to not send a new
  1682. * RDMA read or atomic operation before receiving an ACK
  1683. * for the previous operation.
  1684. */
  1685. break;
  1686. }
  1687. if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
  1688. rvt_comm_est(qp);
  1689. /* OK, process the packet. */
  1690. switch (opcode) {
  1691. case OP(SEND_FIRST):
  1692. ret = rvt_get_rwqe(qp, false);
  1693. if (ret < 0)
  1694. goto nack_op_err;
  1695. if (!ret)
  1696. goto rnr_nak;
  1697. qp->r_rcv_len = 0;
  1698. /* FALLTHROUGH */
  1699. case OP(SEND_MIDDLE):
  1700. case OP(RDMA_WRITE_MIDDLE):
  1701. send_middle:
  1702. /* Check for invalid length PMTU or posted rwqe len. */
  1703. if (unlikely(tlen != (hdrsize + pmtu + 4)))
  1704. goto nack_inv;
  1705. qp->r_rcv_len += pmtu;
  1706. if (unlikely(qp->r_rcv_len > qp->r_len))
  1707. goto nack_inv;
  1708. qib_copy_sge(&qp->r_sge, data, pmtu, 1);
  1709. break;
  1710. case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
  1711. /* consume RWQE */
  1712. ret = rvt_get_rwqe(qp, true);
  1713. if (ret < 0)
  1714. goto nack_op_err;
  1715. if (!ret)
  1716. goto rnr_nak;
  1717. goto send_last_imm;
  1718. case OP(SEND_ONLY):
  1719. case OP(SEND_ONLY_WITH_IMMEDIATE):
  1720. ret = rvt_get_rwqe(qp, false);
  1721. if (ret < 0)
  1722. goto nack_op_err;
  1723. if (!ret)
  1724. goto rnr_nak;
  1725. qp->r_rcv_len = 0;
  1726. if (opcode == OP(SEND_ONLY))
  1727. goto no_immediate_data;
  1728. /* fall through -- for SEND_ONLY_WITH_IMMEDIATE */
  1729. case OP(SEND_LAST_WITH_IMMEDIATE):
  1730. send_last_imm:
  1731. wc.ex.imm_data = ohdr->u.imm_data;
  1732. hdrsize += 4;
  1733. wc.wc_flags = IB_WC_WITH_IMM;
  1734. goto send_last;
  1735. case OP(SEND_LAST):
  1736. case OP(RDMA_WRITE_LAST):
  1737. no_immediate_data:
  1738. wc.wc_flags = 0;
  1739. wc.ex.imm_data = 0;
  1740. send_last:
  1741. /* Get the number of bytes the message was padded by. */
  1742. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  1743. /* Check for invalid length. */
  1744. /* XXX LAST len should be >= 1 */
  1745. if (unlikely(tlen < (hdrsize + pad + 4)))
  1746. goto nack_inv;
  1747. /* Don't count the CRC. */
  1748. tlen -= (hdrsize + pad + 4);
  1749. wc.byte_len = tlen + qp->r_rcv_len;
  1750. if (unlikely(wc.byte_len > qp->r_len))
  1751. goto nack_inv;
  1752. qib_copy_sge(&qp->r_sge, data, tlen, 1);
  1753. rvt_put_ss(&qp->r_sge);
  1754. qp->r_msn++;
  1755. if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
  1756. break;
  1757. wc.wr_id = qp->r_wr_id;
  1758. wc.status = IB_WC_SUCCESS;
  1759. if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
  1760. opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
  1761. wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
  1762. else
  1763. wc.opcode = IB_WC_RECV;
  1764. wc.qp = &qp->ibqp;
  1765. wc.src_qp = qp->remote_qpn;
  1766. wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
  1767. wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
  1768. /* zero fields that are N/A */
  1769. wc.vendor_err = 0;
  1770. wc.pkey_index = 0;
  1771. wc.dlid_path_bits = 0;
  1772. wc.port_num = 0;
  1773. /* Signal completion event if the solicited bit is set. */
  1774. rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
  1775. ib_bth_is_solicited(ohdr));
  1776. break;
  1777. case OP(RDMA_WRITE_FIRST):
  1778. case OP(RDMA_WRITE_ONLY):
  1779. case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
  1780. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
  1781. goto nack_inv;
  1782. /* consume RWQE */
  1783. reth = &ohdr->u.rc.reth;
  1784. hdrsize += sizeof(*reth);
  1785. qp->r_len = be32_to_cpu(reth->length);
  1786. qp->r_rcv_len = 0;
  1787. qp->r_sge.sg_list = NULL;
  1788. if (qp->r_len != 0) {
  1789. u32 rkey = be32_to_cpu(reth->rkey);
  1790. u64 vaddr = be64_to_cpu(reth->vaddr);
  1791. int ok;
  1792. /* Check rkey & NAK */
  1793. ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
  1794. rkey, IB_ACCESS_REMOTE_WRITE);
  1795. if (unlikely(!ok))
  1796. goto nack_acc;
  1797. qp->r_sge.num_sge = 1;
  1798. } else {
  1799. qp->r_sge.num_sge = 0;
  1800. qp->r_sge.sge.mr = NULL;
  1801. qp->r_sge.sge.vaddr = NULL;
  1802. qp->r_sge.sge.length = 0;
  1803. qp->r_sge.sge.sge_length = 0;
  1804. }
  1805. if (opcode == OP(RDMA_WRITE_FIRST))
  1806. goto send_middle;
  1807. else if (opcode == OP(RDMA_WRITE_ONLY))
  1808. goto no_immediate_data;
  1809. ret = rvt_get_rwqe(qp, true);
  1810. if (ret < 0)
  1811. goto nack_op_err;
  1812. if (!ret) {
  1813. rvt_put_ss(&qp->r_sge);
  1814. goto rnr_nak;
  1815. }
  1816. wc.ex.imm_data = ohdr->u.rc.imm_data;
  1817. hdrsize += 4;
  1818. wc.wc_flags = IB_WC_WITH_IMM;
  1819. goto send_last;
  1820. case OP(RDMA_READ_REQUEST): {
  1821. struct rvt_ack_entry *e;
  1822. u32 len;
  1823. u8 next;
  1824. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
  1825. goto nack_inv;
  1826. next = qp->r_head_ack_queue + 1;
  1827. /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
  1828. if (next > QIB_MAX_RDMA_ATOMIC)
  1829. next = 0;
  1830. spin_lock_irqsave(&qp->s_lock, flags);
  1831. if (unlikely(next == qp->s_tail_ack_queue)) {
  1832. if (!qp->s_ack_queue[next].sent)
  1833. goto nack_inv_unlck;
  1834. qib_update_ack_queue(qp, next);
  1835. }
  1836. e = &qp->s_ack_queue[qp->r_head_ack_queue];
  1837. if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
  1838. rvt_put_mr(e->rdma_sge.mr);
  1839. e->rdma_sge.mr = NULL;
  1840. }
  1841. reth = &ohdr->u.rc.reth;
  1842. len = be32_to_cpu(reth->length);
  1843. if (len) {
  1844. u32 rkey = be32_to_cpu(reth->rkey);
  1845. u64 vaddr = be64_to_cpu(reth->vaddr);
  1846. int ok;
  1847. /* Check rkey & NAK */
  1848. ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
  1849. rkey, IB_ACCESS_REMOTE_READ);
  1850. if (unlikely(!ok))
  1851. goto nack_acc_unlck;
  1852. /*
  1853. * Update the next expected PSN. We add 1 later
  1854. * below, so only add the remainder here.
  1855. */
  1856. qp->r_psn += rvt_div_mtu(qp, len - 1);
  1857. } else {
  1858. e->rdma_sge.mr = NULL;
  1859. e->rdma_sge.vaddr = NULL;
  1860. e->rdma_sge.length = 0;
  1861. e->rdma_sge.sge_length = 0;
  1862. }
  1863. e->opcode = opcode;
  1864. e->sent = 0;
  1865. e->psn = psn;
  1866. e->lpsn = qp->r_psn;
  1867. /*
  1868. * We need to increment the MSN here instead of when we
  1869. * finish sending the result since a duplicate request would
  1870. * increment it more than once.
  1871. */
  1872. qp->r_msn++;
  1873. qp->r_psn++;
  1874. qp->r_state = opcode;
  1875. qp->r_nak_state = 0;
  1876. qp->r_head_ack_queue = next;
  1877. /* Schedule the send tasklet. */
  1878. qp->s_flags |= RVT_S_RESP_PENDING;
  1879. qib_schedule_send(qp);
  1880. goto sunlock;
  1881. }
  1882. case OP(COMPARE_SWAP):
  1883. case OP(FETCH_ADD): {
  1884. struct ib_atomic_eth *ateth;
  1885. struct rvt_ack_entry *e;
  1886. u64 vaddr;
  1887. atomic64_t *maddr;
  1888. u64 sdata;
  1889. u32 rkey;
  1890. u8 next;
  1891. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
  1892. goto nack_inv;
  1893. next = qp->r_head_ack_queue + 1;
  1894. if (next > QIB_MAX_RDMA_ATOMIC)
  1895. next = 0;
  1896. spin_lock_irqsave(&qp->s_lock, flags);
  1897. if (unlikely(next == qp->s_tail_ack_queue)) {
  1898. if (!qp->s_ack_queue[next].sent)
  1899. goto nack_inv_unlck;
  1900. qib_update_ack_queue(qp, next);
  1901. }
  1902. e = &qp->s_ack_queue[qp->r_head_ack_queue];
  1903. if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
  1904. rvt_put_mr(e->rdma_sge.mr);
  1905. e->rdma_sge.mr = NULL;
  1906. }
  1907. ateth = &ohdr->u.atomic_eth;
  1908. vaddr = get_ib_ateth_vaddr(ateth);
  1909. if (unlikely(vaddr & (sizeof(u64) - 1)))
  1910. goto nack_inv_unlck;
  1911. rkey = be32_to_cpu(ateth->rkey);
  1912. /* Check rkey & NAK */
  1913. if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
  1914. vaddr, rkey,
  1915. IB_ACCESS_REMOTE_ATOMIC)))
  1916. goto nack_acc_unlck;
  1917. /* Perform atomic OP and save result. */
  1918. maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
  1919. sdata = get_ib_ateth_swap(ateth);
  1920. e->atomic_data = (opcode == OP(FETCH_ADD)) ?
  1921. (u64) atomic64_add_return(sdata, maddr) - sdata :
  1922. (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
  1923. get_ib_ateth_compare(ateth),
  1924. sdata);
  1925. rvt_put_mr(qp->r_sge.sge.mr);
  1926. qp->r_sge.num_sge = 0;
  1927. e->opcode = opcode;
  1928. e->sent = 0;
  1929. e->psn = psn;
  1930. e->lpsn = psn;
  1931. qp->r_msn++;
  1932. qp->r_psn++;
  1933. qp->r_state = opcode;
  1934. qp->r_nak_state = 0;
  1935. qp->r_head_ack_queue = next;
  1936. /* Schedule the send tasklet. */
  1937. qp->s_flags |= RVT_S_RESP_PENDING;
  1938. qib_schedule_send(qp);
  1939. goto sunlock;
  1940. }
  1941. default:
  1942. /* NAK unknown opcodes. */
  1943. goto nack_inv;
  1944. }
  1945. qp->r_psn++;
  1946. qp->r_state = opcode;
  1947. qp->r_ack_psn = psn;
  1948. qp->r_nak_state = 0;
  1949. /* Send an ACK if requested or required. */
  1950. if (psn & (1 << 31))
  1951. goto send_ack;
  1952. return;
  1953. rnr_nak:
  1954. qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
  1955. qp->r_ack_psn = qp->r_psn;
  1956. /* Queue RNR NAK for later */
  1957. if (list_empty(&qp->rspwait)) {
  1958. qp->r_flags |= RVT_R_RSP_NAK;
  1959. rvt_get_qp(qp);
  1960. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  1961. }
  1962. return;
  1963. nack_op_err:
  1964. rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
  1965. qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
  1966. qp->r_ack_psn = qp->r_psn;
  1967. /* Queue NAK for later */
  1968. if (list_empty(&qp->rspwait)) {
  1969. qp->r_flags |= RVT_R_RSP_NAK;
  1970. rvt_get_qp(qp);
  1971. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  1972. }
  1973. return;
  1974. nack_inv_unlck:
  1975. spin_unlock_irqrestore(&qp->s_lock, flags);
  1976. nack_inv:
  1977. rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
  1978. qp->r_nak_state = IB_NAK_INVALID_REQUEST;
  1979. qp->r_ack_psn = qp->r_psn;
  1980. /* Queue NAK for later */
  1981. if (list_empty(&qp->rspwait)) {
  1982. qp->r_flags |= RVT_R_RSP_NAK;
  1983. rvt_get_qp(qp);
  1984. list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
  1985. }
  1986. return;
  1987. nack_acc_unlck:
  1988. spin_unlock_irqrestore(&qp->s_lock, flags);
  1989. nack_acc:
  1990. rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
  1991. qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
  1992. qp->r_ack_psn = qp->r_psn;
  1993. send_ack:
  1994. qib_send_rc_ack(qp);
  1995. return;
  1996. sunlock:
  1997. spin_unlock_irqrestore(&qp->s_lock, flags);
  1998. }