cq.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. /*
  2. * Copyright (c) 2015 HGST, a Western Digital Company.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/err.h>
  15. #include <linux/slab.h>
  16. #include <rdma/ib_verbs.h>
  17. /* # of WCs to poll for with a single call to ib_poll_cq */
  18. #define IB_POLL_BATCH 16
  19. /* # of WCs to iterate over before yielding */
  20. #define IB_POLL_BUDGET_IRQ 256
  21. #define IB_POLL_BUDGET_WORKQUEUE 65536
  22. #define IB_POLL_FLAGS \
  23. (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
  24. static int __ib_process_cq(struct ib_cq *cq, int budget)
  25. {
  26. int i, n, completed = 0;
  27. while ((n = ib_poll_cq(cq, IB_POLL_BATCH, cq->wc)) > 0) {
  28. for (i = 0; i < n; i++) {
  29. struct ib_wc *wc = &cq->wc[i];
  30. if (wc->wr_cqe)
  31. wc->wr_cqe->done(cq, wc);
  32. else
  33. WARN_ON_ONCE(wc->status == IB_WC_SUCCESS);
  34. }
  35. completed += n;
  36. if (n != IB_POLL_BATCH ||
  37. (budget != -1 && completed >= budget))
  38. break;
  39. }
  40. return completed;
  41. }
  42. /**
  43. * ib_process_direct_cq - process a CQ in caller context
  44. * @cq: CQ to process
  45. * @budget: number of CQEs to poll for
  46. *
  47. * This function is used to process all outstanding CQ entries on a
  48. * %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different
  49. * context and does not ask for completion interrupts from the HCA.
  50. *
  51. * Note: for compatibility reasons -1 can be passed in %budget for unlimited
  52. * polling. Do not use this feature in new code, it will be removed soon.
  53. */
  54. int ib_process_cq_direct(struct ib_cq *cq, int budget)
  55. {
  56. WARN_ON_ONCE(cq->poll_ctx != IB_POLL_DIRECT);
  57. return __ib_process_cq(cq, budget);
  58. }
  59. EXPORT_SYMBOL(ib_process_cq_direct);
  60. static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
  61. {
  62. WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
  63. }
  64. static int ib_poll_handler(struct irq_poll *iop, int budget)
  65. {
  66. struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
  67. int completed;
  68. completed = __ib_process_cq(cq, budget);
  69. if (completed < budget) {
  70. irq_poll_complete(&cq->iop);
  71. if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
  72. irq_poll_sched(&cq->iop);
  73. }
  74. return completed;
  75. }
  76. static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
  77. {
  78. irq_poll_sched(&cq->iop);
  79. }
  80. static void ib_cq_poll_work(struct work_struct *work)
  81. {
  82. struct ib_cq *cq = container_of(work, struct ib_cq, work);
  83. int completed;
  84. completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE);
  85. if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
  86. ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
  87. queue_work(ib_comp_wq, &cq->work);
  88. }
  89. static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
  90. {
  91. queue_work(ib_comp_wq, &cq->work);
  92. }
  93. /**
  94. * ib_alloc_cq - allocate a completion queue
  95. * @dev: device to allocate the CQ for
  96. * @private: driver private data, accessible from cq->cq_context
  97. * @nr_cqe: number of CQEs to allocate
  98. * @comp_vector: HCA completion vectors for this CQ
  99. * @poll_ctx: context to poll the CQ from.
  100. *
  101. * This is the proper interface to allocate a CQ for in-kernel users. A
  102. * CQ allocated with this interface will automatically be polled from the
  103. * specified context. The ULP needs must use wr->wr_cqe instead of wr->wr_id
  104. * to use this CQ abstraction.
  105. */
  106. struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
  107. int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx)
  108. {
  109. struct ib_cq_init_attr cq_attr = {
  110. .cqe = nr_cqe,
  111. .comp_vector = comp_vector,
  112. };
  113. struct ib_cq *cq;
  114. int ret = -ENOMEM;
  115. cq = dev->create_cq(dev, &cq_attr, NULL, NULL);
  116. if (IS_ERR(cq))
  117. return cq;
  118. cq->device = dev;
  119. cq->uobject = NULL;
  120. cq->event_handler = NULL;
  121. cq->cq_context = private;
  122. cq->poll_ctx = poll_ctx;
  123. atomic_set(&cq->usecnt, 0);
  124. cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
  125. if (!cq->wc)
  126. goto out_destroy_cq;
  127. switch (cq->poll_ctx) {
  128. case IB_POLL_DIRECT:
  129. cq->comp_handler = ib_cq_completion_direct;
  130. break;
  131. case IB_POLL_SOFTIRQ:
  132. cq->comp_handler = ib_cq_completion_softirq;
  133. irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
  134. ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
  135. break;
  136. case IB_POLL_WORKQUEUE:
  137. cq->comp_handler = ib_cq_completion_workqueue;
  138. INIT_WORK(&cq->work, ib_cq_poll_work);
  139. ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
  140. break;
  141. default:
  142. ret = -EINVAL;
  143. goto out_free_wc;
  144. }
  145. return cq;
  146. out_free_wc:
  147. kfree(cq->wc);
  148. out_destroy_cq:
  149. cq->device->destroy_cq(cq);
  150. return ERR_PTR(ret);
  151. }
  152. EXPORT_SYMBOL(ib_alloc_cq);
  153. /**
  154. * ib_free_cq - free a completion queue
  155. * @cq: completion queue to free.
  156. */
  157. void ib_free_cq(struct ib_cq *cq)
  158. {
  159. int ret;
  160. if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
  161. return;
  162. switch (cq->poll_ctx) {
  163. case IB_POLL_DIRECT:
  164. break;
  165. case IB_POLL_SOFTIRQ:
  166. irq_poll_disable(&cq->iop);
  167. break;
  168. case IB_POLL_WORKQUEUE:
  169. cancel_work_sync(&cq->work);
  170. break;
  171. default:
  172. WARN_ON_ONCE(1);
  173. }
  174. kfree(cq->wc);
  175. ret = cq->device->destroy_cq(cq);
  176. WARN_ON_ONCE(ret);
  177. }
  178. EXPORT_SYMBOL(ib_free_cq);