ev.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/slab.h>
  33. #include <linux/mman.h>
  34. #include <net/sock.h>
  35. #include "iw_cxgb4.h"
  36. static void print_tpte(struct c4iw_dev *dev, u32 stag)
  37. {
  38. int ret;
  39. struct fw_ri_tpte tpte;
  40. ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag,
  41. (__be32 *)&tpte);
  42. if (ret) {
  43. dev_err(&dev->rdev.lldi.pdev->dev,
  44. "%s cxgb4_read_tpte err %d\n", __func__, ret);
  45. return;
  46. }
  47. pr_debug("stag idx 0x%x valid %d key 0x%x state %d pdid %d perm 0x%x ps %d len 0x%llx va 0x%llx\n",
  48. stag & 0xffffff00,
  49. FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
  50. FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
  51. FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
  52. FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
  53. FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
  54. FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
  55. ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
  56. ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
  57. }
  58. static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
  59. {
  60. __be64 *p = (void *)err_cqe;
  61. dev_err(&dev->rdev.lldi.pdev->dev,
  62. "AE qpid %d opcode %d status 0x%x "
  63. "type %d len 0x%x wrid.hi 0x%x wrid.lo 0x%x\n",
  64. CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
  65. CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
  66. CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
  67. pr_debug("%016llx %016llx %016llx %016llx - %016llx %016llx %016llx %016llx\n",
  68. be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
  69. be64_to_cpu(p[3]), be64_to_cpu(p[4]), be64_to_cpu(p[5]),
  70. be64_to_cpu(p[6]), be64_to_cpu(p[7]));
  71. /*
  72. * Ingress WRITE and READ_RESP errors provide
  73. * the offending stag, so parse and log it.
  74. */
  75. if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE ||
  76. CQE_OPCODE(err_cqe) == FW_RI_READ_RESP))
  77. print_tpte(dev, CQE_WRID_STAG(err_cqe));
  78. }
  79. static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
  80. struct c4iw_qp *qhp,
  81. struct t4_cqe *err_cqe,
  82. enum ib_event_type ib_event)
  83. {
  84. struct ib_event event;
  85. struct c4iw_qp_attributes attrs;
  86. unsigned long flag;
  87. dump_err_cqe(dev, err_cqe);
  88. if (qhp->attr.state == C4IW_QP_STATE_RTS) {
  89. attrs.next_state = C4IW_QP_STATE_TERMINATE;
  90. c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
  91. &attrs, 0);
  92. }
  93. event.event = ib_event;
  94. event.device = chp->ibcq.device;
  95. if (ib_event == IB_EVENT_CQ_ERR)
  96. event.element.cq = &chp->ibcq;
  97. else
  98. event.element.qp = &qhp->ibqp;
  99. if (qhp->ibqp.event_handler)
  100. (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
  101. if (t4_clear_cq_armed(&chp->cq)) {
  102. spin_lock_irqsave(&chp->comp_handler_lock, flag);
  103. (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
  104. spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
  105. }
  106. }
  107. void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
  108. {
  109. struct c4iw_cq *chp;
  110. struct c4iw_qp *qhp;
  111. u32 cqid;
  112. spin_lock_irq(&dev->lock);
  113. qhp = get_qhp(dev, CQE_QPID(err_cqe));
  114. if (!qhp) {
  115. pr_err("BAD AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
  116. CQE_QPID(err_cqe),
  117. CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
  118. CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
  119. CQE_WRID_LOW(err_cqe));
  120. spin_unlock_irq(&dev->lock);
  121. goto out;
  122. }
  123. if (SQ_TYPE(err_cqe))
  124. cqid = qhp->attr.scq;
  125. else
  126. cqid = qhp->attr.rcq;
  127. chp = get_chp(dev, cqid);
  128. if (!chp) {
  129. pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
  130. cqid, CQE_QPID(err_cqe),
  131. CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
  132. CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
  133. CQE_WRID_LOW(err_cqe));
  134. spin_unlock_irq(&dev->lock);
  135. goto out;
  136. }
  137. c4iw_qp_add_ref(&qhp->ibqp);
  138. atomic_inc(&chp->refcnt);
  139. spin_unlock_irq(&dev->lock);
  140. /* Bad incoming write */
  141. if (RQ_TYPE(err_cqe) &&
  142. (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
  143. post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
  144. goto done;
  145. }
  146. switch (CQE_STATUS(err_cqe)) {
  147. /* Completion Events */
  148. case T4_ERR_SUCCESS:
  149. pr_err("AE with status 0!\n");
  150. break;
  151. case T4_ERR_STAG:
  152. case T4_ERR_PDID:
  153. case T4_ERR_QPID:
  154. case T4_ERR_ACCESS:
  155. case T4_ERR_WRAP:
  156. case T4_ERR_BOUND:
  157. case T4_ERR_INVALIDATE_SHARED_MR:
  158. case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
  159. post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
  160. break;
  161. /* Device Fatal Errors */
  162. case T4_ERR_ECC:
  163. case T4_ERR_ECC_PSTAG:
  164. case T4_ERR_INTERNAL_ERR:
  165. post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
  166. break;
  167. /* QP Fatal Errors */
  168. case T4_ERR_OUT_OF_RQE:
  169. case T4_ERR_PBL_ADDR_BOUND:
  170. case T4_ERR_CRC:
  171. case T4_ERR_MARKER:
  172. case T4_ERR_PDU_LEN_ERR:
  173. case T4_ERR_DDP_VERSION:
  174. case T4_ERR_RDMA_VERSION:
  175. case T4_ERR_OPCODE:
  176. case T4_ERR_DDP_QUEUE_NUM:
  177. case T4_ERR_MSN:
  178. case T4_ERR_TBIT:
  179. case T4_ERR_MO:
  180. case T4_ERR_MSN_GAP:
  181. case T4_ERR_MSN_RANGE:
  182. case T4_ERR_RQE_ADDR_BOUND:
  183. case T4_ERR_IRD_OVERFLOW:
  184. post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
  185. break;
  186. default:
  187. pr_err("Unknown T4 status 0x%x QPID 0x%x\n",
  188. CQE_STATUS(err_cqe), qhp->wq.sq.qid);
  189. post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
  190. break;
  191. }
  192. done:
  193. if (atomic_dec_and_test(&chp->refcnt))
  194. wake_up(&chp->wait);
  195. c4iw_qp_rem_ref(&qhp->ibqp);
  196. out:
  197. return;
  198. }
  199. int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
  200. {
  201. struct c4iw_cq *chp;
  202. unsigned long flag;
  203. spin_lock_irqsave(&dev->lock, flag);
  204. chp = get_chp(dev, qid);
  205. if (chp) {
  206. atomic_inc(&chp->refcnt);
  207. spin_unlock_irqrestore(&dev->lock, flag);
  208. t4_clear_cq_armed(&chp->cq);
  209. spin_lock_irqsave(&chp->comp_handler_lock, flag);
  210. (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
  211. spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
  212. if (atomic_dec_and_test(&chp->refcnt))
  213. wake_up(&chp->wait);
  214. } else {
  215. pr_debug("unknown cqid 0x%x\n", qid);
  216. spin_unlock_irqrestore(&dev->lock, flag);
  217. }
  218. return 0;
  219. }