bnxt_xdp.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2016-2017 Broadcom Limited
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/errno.h>
  11. #include <linux/pci.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/if_vlan.h>
  15. #include <linux/bpf.h>
  16. #include <linux/bpf_trace.h>
  17. #include <linux/filter.h>
  18. #include "bnxt_hsi.h"
  19. #include "bnxt.h"
  20. #include "bnxt_xdp.h"
  21. void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
  22. dma_addr_t mapping, u32 len, u16 rx_prod)
  23. {
  24. struct bnxt_sw_tx_bd *tx_buf;
  25. struct tx_bd *txbd;
  26. u32 flags;
  27. u16 prod;
  28. prod = txr->tx_prod;
  29. tx_buf = &txr->tx_buf_ring[prod];
  30. tx_buf->rx_prod = rx_prod;
  31. txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  32. flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
  33. TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
  34. txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
  35. txbd->tx_bd_opaque = prod;
  36. txbd->tx_bd_haddr = cpu_to_le64(mapping);
  37. prod = NEXT_TX(prod);
  38. txr->tx_prod = prod;
  39. }
  40. void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
  41. {
  42. struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
  43. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  44. struct bnxt_sw_tx_bd *tx_buf;
  45. u16 tx_cons = txr->tx_cons;
  46. u16 last_tx_cons = tx_cons;
  47. u16 rx_prod;
  48. int i;
  49. for (i = 0; i < nr_pkts; i++) {
  50. last_tx_cons = tx_cons;
  51. tx_cons = NEXT_TX(tx_cons);
  52. }
  53. txr->tx_cons = tx_cons;
  54. if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
  55. rx_prod = rxr->rx_prod;
  56. } else {
  57. tx_buf = &txr->tx_buf_ring[last_tx_cons];
  58. rx_prod = tx_buf->rx_prod;
  59. }
  60. bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rx_prod);
  61. }
  62. /* returns the following:
  63. * true - packet consumed by XDP and new buffer is allocated.
  64. * false - packet should be passed to the stack.
  65. */
  66. bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
  67. struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
  68. {
  69. struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
  70. struct bnxt_tx_ring_info *txr;
  71. struct bnxt_sw_rx_bd *rx_buf;
  72. struct pci_dev *pdev;
  73. struct xdp_buff xdp;
  74. dma_addr_t mapping;
  75. void *orig_data;
  76. u32 tx_avail;
  77. u32 offset;
  78. u32 act;
  79. if (!xdp_prog)
  80. return false;
  81. pdev = bp->pdev;
  82. txr = rxr->bnapi->tx_ring;
  83. rx_buf = &rxr->rx_buf_ring[cons];
  84. offset = bp->rx_offset;
  85. xdp.data_hard_start = *data_ptr - offset;
  86. xdp.data = *data_ptr;
  87. xdp_set_data_meta_invalid(&xdp);
  88. xdp.data_end = *data_ptr + *len;
  89. xdp.rxq = &rxr->xdp_rxq;
  90. orig_data = xdp.data;
  91. mapping = rx_buf->mapping - bp->rx_dma_offset;
  92. dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
  93. rcu_read_lock();
  94. act = bpf_prog_run_xdp(xdp_prog, &xdp);
  95. rcu_read_unlock();
  96. tx_avail = bnxt_tx_avail(bp, txr);
  97. /* If the tx ring is not full, we must not update the rx producer yet
  98. * because we may still be transmitting on some BDs.
  99. */
  100. if (tx_avail != bp->tx_ring_size)
  101. *event &= ~BNXT_RX_EVENT;
  102. *len = xdp.data_end - xdp.data;
  103. if (orig_data != xdp.data) {
  104. offset = xdp.data - xdp.data_hard_start;
  105. *data_ptr = xdp.data_hard_start + offset;
  106. }
  107. switch (act) {
  108. case XDP_PASS:
  109. return false;
  110. case XDP_TX:
  111. if (tx_avail < 1) {
  112. trace_xdp_exception(bp->dev, xdp_prog, act);
  113. bnxt_reuse_rx_data(rxr, cons, page);
  114. return true;
  115. }
  116. *event = BNXT_TX_EVENT;
  117. dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
  118. bp->rx_dir);
  119. bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
  120. NEXT_RX(rxr->rx_prod));
  121. bnxt_reuse_rx_data(rxr, cons, page);
  122. return true;
  123. default:
  124. bpf_warn_invalid_xdp_action(act);
  125. /* Fall thru */
  126. case XDP_ABORTED:
  127. trace_xdp_exception(bp->dev, xdp_prog, act);
  128. /* Fall thru */
  129. case XDP_DROP:
  130. bnxt_reuse_rx_data(rxr, cons, page);
  131. break;
  132. }
  133. return true;
  134. }
  135. /* Under rtnl_lock */
  136. static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
  137. {
  138. struct net_device *dev = bp->dev;
  139. int tx_xdp = 0, rc, tc;
  140. struct bpf_prog *old;
  141. if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
  142. netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
  143. bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
  144. return -EOPNOTSUPP;
  145. }
  146. if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
  147. netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
  148. return -EOPNOTSUPP;
  149. }
  150. if (prog)
  151. tx_xdp = bp->rx_nr_rings;
  152. tc = netdev_get_num_tc(dev);
  153. if (!tc)
  154. tc = 1;
  155. rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
  156. true, tc, tx_xdp);
  157. if (rc) {
  158. netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
  159. return rc;
  160. }
  161. if (netif_running(dev))
  162. bnxt_close_nic(bp, true, false);
  163. old = xchg(&bp->xdp_prog, prog);
  164. if (old)
  165. bpf_prog_put(old);
  166. if (prog) {
  167. bnxt_set_rx_skb_mode(bp, true);
  168. } else {
  169. int rx, tx;
  170. bnxt_set_rx_skb_mode(bp, false);
  171. bnxt_get_max_rings(bp, &rx, &tx, true);
  172. if (rx > 1) {
  173. bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
  174. bp->dev->hw_features |= NETIF_F_LRO;
  175. }
  176. }
  177. bp->tx_nr_rings_xdp = tx_xdp;
  178. bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
  179. bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
  180. bp->num_stat_ctxs = bp->cp_nr_rings;
  181. bnxt_set_tpa_flags(bp);
  182. bnxt_set_ring_params(bp);
  183. if (netif_running(dev))
  184. return bnxt_open_nic(bp, true, false);
  185. return 0;
  186. }
  187. int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
  188. {
  189. struct bnxt *bp = netdev_priv(dev);
  190. int rc;
  191. switch (xdp->command) {
  192. case XDP_SETUP_PROG:
  193. rc = bnxt_xdp_set(bp, xdp->prog);
  194. break;
  195. case XDP_QUERY_PROG:
  196. xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0;
  197. rc = 0;
  198. break;
  199. default:
  200. rc = -EINVAL;
  201. break;
  202. }
  203. return rc;
  204. }