iwch_cq.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. /*
  2. * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include "iwch_provider.h"
  33. #include "iwch.h"
  34. static int __iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
  35. struct iwch_qp *qhp, struct ib_wc *wc)
  36. {
  37. struct t3_wq *wq = qhp ? &qhp->wq : NULL;
  38. struct t3_cqe cqe;
  39. u32 credit = 0;
  40. u8 cqe_flushed;
  41. u64 cookie;
  42. int ret = 1;
  43. ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
  44. &credit);
  45. if (t3a_device(chp->rhp) && credit) {
  46. pr_debug("%s updating %d cq credits on id %d\n", __func__,
  47. credit, chp->cq.cqid);
  48. cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
  49. }
  50. if (ret) {
  51. ret = -EAGAIN;
  52. goto out;
  53. }
  54. ret = 1;
  55. wc->wr_id = cookie;
  56. wc->qp = qhp ? &qhp->ibqp : NULL;
  57. wc->vendor_err = CQE_STATUS(cqe);
  58. wc->wc_flags = 0;
  59. pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
  60. __func__,
  61. CQE_QPID(cqe), CQE_TYPE(cqe),
  62. CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
  63. CQE_WRID_LOW(cqe), (unsigned long long)cookie);
  64. if (CQE_TYPE(cqe) == 0) {
  65. if (!CQE_STATUS(cqe))
  66. wc->byte_len = CQE_LEN(cqe);
  67. else
  68. wc->byte_len = 0;
  69. wc->opcode = IB_WC_RECV;
  70. if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
  71. CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
  72. wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
  73. wc->wc_flags |= IB_WC_WITH_INVALIDATE;
  74. }
  75. } else {
  76. switch (CQE_OPCODE(cqe)) {
  77. case T3_RDMA_WRITE:
  78. wc->opcode = IB_WC_RDMA_WRITE;
  79. break;
  80. case T3_READ_REQ:
  81. wc->opcode = IB_WC_RDMA_READ;
  82. wc->byte_len = CQE_LEN(cqe);
  83. break;
  84. case T3_SEND:
  85. case T3_SEND_WITH_SE:
  86. case T3_SEND_WITH_INV:
  87. case T3_SEND_WITH_SE_INV:
  88. wc->opcode = IB_WC_SEND;
  89. break;
  90. case T3_LOCAL_INV:
  91. wc->opcode = IB_WC_LOCAL_INV;
  92. break;
  93. case T3_FAST_REGISTER:
  94. wc->opcode = IB_WC_REG_MR;
  95. break;
  96. default:
  97. pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
  98. CQE_OPCODE(cqe), CQE_QPID(cqe));
  99. ret = -EINVAL;
  100. goto out;
  101. }
  102. }
  103. if (cqe_flushed)
  104. wc->status = IB_WC_WR_FLUSH_ERR;
  105. else {
  106. switch (CQE_STATUS(cqe)) {
  107. case TPT_ERR_SUCCESS:
  108. wc->status = IB_WC_SUCCESS;
  109. break;
  110. case TPT_ERR_STAG:
  111. wc->status = IB_WC_LOC_ACCESS_ERR;
  112. break;
  113. case TPT_ERR_PDID:
  114. wc->status = IB_WC_LOC_PROT_ERR;
  115. break;
  116. case TPT_ERR_QPID:
  117. case TPT_ERR_ACCESS:
  118. wc->status = IB_WC_LOC_ACCESS_ERR;
  119. break;
  120. case TPT_ERR_WRAP:
  121. wc->status = IB_WC_GENERAL_ERR;
  122. break;
  123. case TPT_ERR_BOUND:
  124. wc->status = IB_WC_LOC_LEN_ERR;
  125. break;
  126. case TPT_ERR_INVALIDATE_SHARED_MR:
  127. case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
  128. wc->status = IB_WC_MW_BIND_ERR;
  129. break;
  130. case TPT_ERR_CRC:
  131. case TPT_ERR_MARKER:
  132. case TPT_ERR_PDU_LEN_ERR:
  133. case TPT_ERR_OUT_OF_RQE:
  134. case TPT_ERR_DDP_VERSION:
  135. case TPT_ERR_RDMA_VERSION:
  136. case TPT_ERR_DDP_QUEUE_NUM:
  137. case TPT_ERR_MSN:
  138. case TPT_ERR_TBIT:
  139. case TPT_ERR_MO:
  140. case TPT_ERR_MSN_RANGE:
  141. case TPT_ERR_IRD_OVERFLOW:
  142. case TPT_ERR_OPCODE:
  143. wc->status = IB_WC_FATAL_ERR;
  144. break;
  145. case TPT_ERR_SWFLUSH:
  146. wc->status = IB_WC_WR_FLUSH_ERR;
  147. break;
  148. default:
  149. pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
  150. CQE_STATUS(cqe), CQE_QPID(cqe));
  151. ret = -EINVAL;
  152. }
  153. }
  154. out:
  155. return ret;
  156. }
  157. /*
  158. * Get one cq entry from cxio and map it to openib.
  159. *
  160. * Returns:
  161. * 0 EMPTY;
  162. * 1 cqe returned
  163. * -EAGAIN caller must try again
  164. * any other -errno fatal error
  165. */
  166. static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
  167. struct ib_wc *wc)
  168. {
  169. struct iwch_qp *qhp;
  170. struct t3_cqe *rd_cqe;
  171. int ret;
  172. rd_cqe = cxio_next_cqe(&chp->cq);
  173. if (!rd_cqe)
  174. return 0;
  175. qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
  176. if (qhp) {
  177. spin_lock(&qhp->lock);
  178. ret = __iwch_poll_cq_one(rhp, chp, qhp, wc);
  179. spin_unlock(&qhp->lock);
  180. } else {
  181. ret = __iwch_poll_cq_one(rhp, chp, NULL, wc);
  182. }
  183. return ret;
  184. }
  185. int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  186. {
  187. struct iwch_dev *rhp;
  188. struct iwch_cq *chp;
  189. unsigned long flags;
  190. int npolled;
  191. int err = 0;
  192. chp = to_iwch_cq(ibcq);
  193. rhp = chp->rhp;
  194. spin_lock_irqsave(&chp->lock, flags);
  195. for (npolled = 0; npolled < num_entries; ++npolled) {
  196. /*
  197. * Because T3 can post CQEs that are _not_ associated
  198. * with a WR, we might have to poll again after removing
  199. * one of these.
  200. */
  201. do {
  202. err = iwch_poll_cq_one(rhp, chp, wc + npolled);
  203. } while (err == -EAGAIN);
  204. if (err <= 0)
  205. break;
  206. }
  207. spin_unlock_irqrestore(&chp->lock, flags);
  208. if (err < 0)
  209. return err;
  210. else {
  211. return npolled;
  212. }
  213. }