xsk_queue.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* XDP user-space ring structure
  3. * Copyright(c) 2018 Intel Corporation.
  4. */
  5. #ifndef _LINUX_XSK_QUEUE_H
  6. #define _LINUX_XSK_QUEUE_H
  7. #include <linux/types.h>
  8. #include <linux/if_xdp.h>
  9. #include <net/xdp_sock.h>
  10. #define RX_BATCH_SIZE 16
  11. #define LAZY_UPDATE_THRESHOLD 128
  12. struct xdp_ring {
  13. u32 producer ____cacheline_aligned_in_smp;
  14. u32 consumer ____cacheline_aligned_in_smp;
  15. };
  16. /* Used for the RX and TX queues for packets */
  17. struct xdp_rxtx_ring {
  18. struct xdp_ring ptrs;
  19. struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
  20. };
  21. /* Used for the fill and completion queues for buffers */
  22. struct xdp_umem_ring {
  23. struct xdp_ring ptrs;
  24. u64 desc[0] ____cacheline_aligned_in_smp;
  25. };
  26. struct xsk_queue {
  27. struct xdp_umem_props umem_props;
  28. u32 ring_mask;
  29. u32 nentries;
  30. u32 prod_head;
  31. u32 prod_tail;
  32. u32 cons_head;
  33. u32 cons_tail;
  34. struct xdp_ring *ring;
  35. u64 invalid_descs;
  36. };
  37. /* Common functions operating for both RXTX and umem queues */
  38. static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
  39. {
  40. return q ? q->invalid_descs : 0;
  41. }
  42. static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
  43. {
  44. u32 entries = q->prod_tail - q->cons_tail;
  45. if (entries == 0) {
  46. /* Refresh the local pointer */
  47. q->prod_tail = READ_ONCE(q->ring->producer);
  48. entries = q->prod_tail - q->cons_tail;
  49. }
  50. return (entries > dcnt) ? dcnt : entries;
  51. }
  52. static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
  53. {
  54. u32 free_entries = q->nentries - (producer - q->cons_tail);
  55. if (free_entries >= dcnt)
  56. return free_entries;
  57. /* Refresh the local tail pointer */
  58. q->cons_tail = READ_ONCE(q->ring->consumer);
  59. return q->nentries - (producer - q->cons_tail);
  60. }
  61. /* UMEM queue */
  62. static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
  63. {
  64. if (addr >= q->umem_props.size) {
  65. q->invalid_descs++;
  66. return false;
  67. }
  68. return true;
  69. }
  70. static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
  71. {
  72. while (q->cons_tail != q->cons_head) {
  73. struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
  74. unsigned int idx = q->cons_tail & q->ring_mask;
  75. *addr = READ_ONCE(ring->desc[idx]) & q->umem_props.chunk_mask;
  76. if (xskq_is_valid_addr(q, *addr))
  77. return addr;
  78. q->cons_tail++;
  79. }
  80. return NULL;
  81. }
  82. static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr)
  83. {
  84. if (q->cons_tail == q->cons_head) {
  85. WRITE_ONCE(q->ring->consumer, q->cons_tail);
  86. q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
  87. /* Order consumer and data */
  88. smp_rmb();
  89. }
  90. return xskq_validate_addr(q, addr);
  91. }
  92. static inline void xskq_discard_addr(struct xsk_queue *q)
  93. {
  94. q->cons_tail++;
  95. }
  96. static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
  97. {
  98. struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
  99. if (xskq_nb_free(q, q->prod_tail, 1) == 0)
  100. return -ENOSPC;
  101. ring->desc[q->prod_tail++ & q->ring_mask] = addr;
  102. /* Order producer and data */
  103. smp_wmb();
  104. WRITE_ONCE(q->ring->producer, q->prod_tail);
  105. return 0;
  106. }
  107. static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr)
  108. {
  109. struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
  110. if (xskq_nb_free(q, q->prod_head, LAZY_UPDATE_THRESHOLD) == 0)
  111. return -ENOSPC;
  112. ring->desc[q->prod_head++ & q->ring_mask] = addr;
  113. return 0;
  114. }
  115. static inline void xskq_produce_flush_addr_n(struct xsk_queue *q,
  116. u32 nb_entries)
  117. {
  118. /* Order producer and data */
  119. smp_wmb();
  120. q->prod_tail += nb_entries;
  121. WRITE_ONCE(q->ring->producer, q->prod_tail);
  122. }
  123. static inline int xskq_reserve_addr(struct xsk_queue *q)
  124. {
  125. if (xskq_nb_free(q, q->prod_head, 1) == 0)
  126. return -ENOSPC;
  127. q->prod_head++;
  128. return 0;
  129. }
  130. /* Rx/Tx queue */
  131. static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
  132. {
  133. if (!xskq_is_valid_addr(q, d->addr))
  134. return false;
  135. if (((d->addr + d->len) & q->umem_props.chunk_mask) !=
  136. (d->addr & q->umem_props.chunk_mask)) {
  137. q->invalid_descs++;
  138. return false;
  139. }
  140. return true;
  141. }
  142. static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
  143. struct xdp_desc *desc)
  144. {
  145. while (q->cons_tail != q->cons_head) {
  146. struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
  147. unsigned int idx = q->cons_tail & q->ring_mask;
  148. *desc = READ_ONCE(ring->desc[idx]);
  149. if (xskq_is_valid_desc(q, desc))
  150. return desc;
  151. q->cons_tail++;
  152. }
  153. return NULL;
  154. }
  155. static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
  156. struct xdp_desc *desc)
  157. {
  158. if (q->cons_tail == q->cons_head) {
  159. WRITE_ONCE(q->ring->consumer, q->cons_tail);
  160. q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
  161. /* Order consumer and data */
  162. smp_rmb();
  163. }
  164. return xskq_validate_desc(q, desc);
  165. }
  166. static inline void xskq_discard_desc(struct xsk_queue *q)
  167. {
  168. q->cons_tail++;
  169. }
  170. static inline int xskq_produce_batch_desc(struct xsk_queue *q,
  171. u64 addr, u32 len)
  172. {
  173. struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
  174. unsigned int idx;
  175. if (xskq_nb_free(q, q->prod_head, 1) == 0)
  176. return -ENOSPC;
  177. idx = (q->prod_head++) & q->ring_mask;
  178. ring->desc[idx].addr = addr;
  179. ring->desc[idx].len = len;
  180. return 0;
  181. }
  182. static inline void xskq_produce_flush_desc(struct xsk_queue *q)
  183. {
  184. /* Order producer and data */
  185. smp_wmb();
  186. q->prod_tail = q->prod_head;
  187. WRITE_ONCE(q->ring->producer, q->prod_tail);
  188. }
  189. static inline bool xskq_full_desc(struct xsk_queue *q)
  190. {
  191. return xskq_nb_avail(q, q->nentries) == q->nentries;
  192. }
  193. static inline bool xskq_empty_desc(struct xsk_queue *q)
  194. {
  195. return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
  196. }
  197. void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
  198. struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
  199. void xskq_destroy(struct xsk_queue *q_ops);
  200. #endif /* _LINUX_XSK_QUEUE_H */