vnic_sdma.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. /*
  2. * Copyright(c) 2017 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. /*
  48. * This file contains HFI1 support for VNIC SDMA functionality
  49. */
  50. #include "sdma.h"
  51. #include "vnic.h"
  52. #define HFI1_VNIC_SDMA_Q_ACTIVE BIT(0)
  53. #define HFI1_VNIC_SDMA_Q_DEFERRED BIT(1)
  54. #define HFI1_VNIC_TXREQ_NAME_LEN 32
  55. #define HFI1_VNIC_SDMA_DESC_WTRMRK 64
  56. /*
  57. * struct vnic_txreq - VNIC transmit descriptor
  58. * @txreq: sdma transmit request
  59. * @sdma: vnic sdma pointer
  60. * @skb: skb to send
  61. * @pad: pad buffer
  62. * @plen: pad length
  63. * @pbc_val: pbc value
  64. */
  65. struct vnic_txreq {
  66. struct sdma_txreq txreq;
  67. struct hfi1_vnic_sdma *sdma;
  68. struct sk_buff *skb;
  69. unsigned char pad[HFI1_VNIC_MAX_PAD];
  70. u16 plen;
  71. __le64 pbc_val;
  72. };
  73. static void vnic_sdma_complete(struct sdma_txreq *txreq,
  74. int status)
  75. {
  76. struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
  77. struct hfi1_vnic_sdma *vnic_sdma = tx->sdma;
  78. sdma_txclean(vnic_sdma->dd, txreq);
  79. dev_kfree_skb_any(tx->skb);
  80. kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx);
  81. }
  82. static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
  83. struct vnic_txreq *tx)
  84. {
  85. int i, ret = 0;
  86. ret = sdma_txadd_kvaddr(
  87. sde->dd,
  88. &tx->txreq,
  89. tx->skb->data,
  90. skb_headlen(tx->skb));
  91. if (unlikely(ret))
  92. goto bail_txadd;
  93. for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
  94. struct skb_frag_struct *frag = &skb_shinfo(tx->skb)->frags[i];
  95. /* combine physically continuous fragments later? */
  96. ret = sdma_txadd_page(sde->dd,
  97. &tx->txreq,
  98. skb_frag_page(frag),
  99. frag->page_offset,
  100. skb_frag_size(frag));
  101. if (unlikely(ret))
  102. goto bail_txadd;
  103. }
  104. if (tx->plen)
  105. ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
  106. tx->pad + HFI1_VNIC_MAX_PAD - tx->plen,
  107. tx->plen);
  108. bail_txadd:
  109. return ret;
  110. }
  111. static int build_vnic_tx_desc(struct sdma_engine *sde,
  112. struct vnic_txreq *tx,
  113. u64 pbc)
  114. {
  115. int ret = 0;
  116. u16 hdrbytes = 2 << 2; /* PBC */
  117. ret = sdma_txinit_ahg(
  118. &tx->txreq,
  119. 0,
  120. hdrbytes + tx->skb->len + tx->plen,
  121. 0,
  122. 0,
  123. NULL,
  124. 0,
  125. vnic_sdma_complete);
  126. if (unlikely(ret))
  127. goto bail_txadd;
  128. /* add pbc */
  129. tx->pbc_val = cpu_to_le64(pbc);
  130. ret = sdma_txadd_kvaddr(
  131. sde->dd,
  132. &tx->txreq,
  133. &tx->pbc_val,
  134. hdrbytes);
  135. if (unlikely(ret))
  136. goto bail_txadd;
  137. /* add the ulp payload */
  138. ret = build_vnic_ulp_payload(sde, tx);
  139. bail_txadd:
  140. return ret;
  141. }
  142. /* setup the last plen bypes of pad */
  143. static inline void hfi1_vnic_update_pad(unsigned char *pad, u8 plen)
  144. {
  145. pad[HFI1_VNIC_MAX_PAD - 1] = plen - OPA_VNIC_ICRC_TAIL_LEN;
  146. }
  147. int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
  148. struct hfi1_vnic_vport_info *vinfo,
  149. struct sk_buff *skb, u64 pbc, u8 plen)
  150. {
  151. struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
  152. struct sdma_engine *sde = vnic_sdma->sde;
  153. struct vnic_txreq *tx;
  154. int ret = -ECOMM;
  155. if (unlikely(READ_ONCE(vnic_sdma->state) != HFI1_VNIC_SDMA_Q_ACTIVE))
  156. goto tx_err;
  157. if (unlikely(!sde || !sdma_running(sde)))
  158. goto tx_err;
  159. tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC);
  160. if (unlikely(!tx)) {
  161. ret = -ENOMEM;
  162. goto tx_err;
  163. }
  164. tx->sdma = vnic_sdma;
  165. tx->skb = skb;
  166. hfi1_vnic_update_pad(tx->pad, plen);
  167. tx->plen = plen;
  168. ret = build_vnic_tx_desc(sde, tx, pbc);
  169. if (unlikely(ret))
  170. goto free_desc;
  171. ret = sdma_send_txreq(sde, &vnic_sdma->wait, &tx->txreq,
  172. vnic_sdma->pkts_sent);
  173. /* When -ECOMM, sdma callback will be called with ABORT status */
  174. if (unlikely(ret && unlikely(ret != -ECOMM)))
  175. goto free_desc;
  176. if (!ret) {
  177. vnic_sdma->pkts_sent = true;
  178. iowait_starve_clear(vnic_sdma->pkts_sent, &vnic_sdma->wait);
  179. }
  180. return ret;
  181. free_desc:
  182. sdma_txclean(dd, &tx->txreq);
  183. kmem_cache_free(dd->vnic.txreq_cache, tx);
  184. tx_err:
  185. if (ret != -EBUSY)
  186. dev_kfree_skb_any(skb);
  187. else
  188. vnic_sdma->pkts_sent = false;
  189. return ret;
  190. }
  191. /*
  192. * hfi1_vnic_sdma_sleep - vnic sdma sleep function
  193. *
  194. * This function gets called from sdma_send_txreq() when there are not enough
  195. * sdma descriptors available to send the packet. It adds Tx queue's wait
  196. * structure to sdma engine's dmawait list to be woken up when descriptors
  197. * become available.
  198. */
  199. static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
  200. struct iowait *wait,
  201. struct sdma_txreq *txreq,
  202. uint seq,
  203. bool pkts_sent)
  204. {
  205. struct hfi1_vnic_sdma *vnic_sdma =
  206. container_of(wait, struct hfi1_vnic_sdma, wait);
  207. struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev;
  208. write_seqlock(&dev->iowait_lock);
  209. if (sdma_progress(sde, seq, txreq)) {
  210. write_sequnlock(&dev->iowait_lock);
  211. return -EAGAIN;
  212. }
  213. vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
  214. if (list_empty(&vnic_sdma->wait.list))
  215. iowait_queue(pkts_sent, wait, &sde->dmawait);
  216. write_sequnlock(&dev->iowait_lock);
  217. return -EBUSY;
  218. }
  219. /*
  220. * hfi1_vnic_sdma_wakeup - vnic sdma wakeup function
  221. *
  222. * This function gets called when SDMA descriptors becomes available and Tx
  223. * queue's wait structure was previously added to sdma engine's dmawait list.
  224. * It notifies the upper driver about Tx queue wakeup.
  225. */
  226. static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason)
  227. {
  228. struct hfi1_vnic_sdma *vnic_sdma =
  229. container_of(wait, struct hfi1_vnic_sdma, wait);
  230. struct hfi1_vnic_vport_info *vinfo = vnic_sdma->vinfo;
  231. vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
  232. if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx))
  233. netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx);
  234. };
  235. inline bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo,
  236. u8 q_idx)
  237. {
  238. struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx];
  239. return (READ_ONCE(vnic_sdma->state) == HFI1_VNIC_SDMA_Q_ACTIVE);
  240. }
  241. void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
  242. {
  243. int i;
  244. for (i = 0; i < vinfo->num_tx_q; i++) {
  245. struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i];
  246. iowait_init(&vnic_sdma->wait, 0, NULL, hfi1_vnic_sdma_sleep,
  247. hfi1_vnic_sdma_wakeup, NULL);
  248. vnic_sdma->sde = &vinfo->dd->per_sdma[i];
  249. vnic_sdma->dd = vinfo->dd;
  250. vnic_sdma->vinfo = vinfo;
  251. vnic_sdma->q_idx = i;
  252. vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE;
  253. /* Add a free descriptor watermark for wakeups */
  254. if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) {
  255. INIT_LIST_HEAD(&vnic_sdma->stx.list);
  256. vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK;
  257. list_add_tail(&vnic_sdma->stx.list,
  258. &vnic_sdma->wait.tx_head);
  259. }
  260. }
  261. }
  262. int hfi1_vnic_txreq_init(struct hfi1_devdata *dd)
  263. {
  264. char buf[HFI1_VNIC_TXREQ_NAME_LEN];
  265. snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit);
  266. dd->vnic.txreq_cache = kmem_cache_create(buf,
  267. sizeof(struct vnic_txreq),
  268. 0, SLAB_HWCACHE_ALIGN,
  269. NULL);
  270. if (!dd->vnic.txreq_cache)
  271. return -ENOMEM;
  272. return 0;
  273. }
  274. void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd)
  275. {
  276. kmem_cache_destroy(dd->vnic.txreq_cache);
  277. dd->vnic.txreq_cache = NULL;
  278. }