gro_cells.h 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. #ifndef _NET_GRO_CELLS_H
  2. #define _NET_GRO_CELLS_H
  3. #include <linux/skbuff.h>
  4. #include <linux/slab.h>
  5. #include <linux/netdevice.h>
  6. struct gro_cell {
  7. struct sk_buff_head napi_skbs;
  8. struct napi_struct napi;
  9. };
  10. struct gro_cells {
  11. struct gro_cell __percpu *cells;
  12. };
  13. static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
  14. {
  15. struct gro_cell *cell;
  16. struct net_device *dev = skb->dev;
  17. if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
  18. netif_rx(skb);
  19. return;
  20. }
  21. cell = this_cpu_ptr(gcells->cells);
  22. if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
  23. atomic_long_inc(&dev->rx_dropped);
  24. kfree_skb(skb);
  25. return;
  26. }
  27. /* We run in BH context */
  28. spin_lock(&cell->napi_skbs.lock);
  29. __skb_queue_tail(&cell->napi_skbs, skb);
  30. if (skb_queue_len(&cell->napi_skbs) == 1)
  31. napi_schedule(&cell->napi);
  32. spin_unlock(&cell->napi_skbs.lock);
  33. }
  34. /* called unser BH context */
  35. static inline int gro_cell_poll(struct napi_struct *napi, int budget)
  36. {
  37. struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
  38. struct sk_buff *skb;
  39. int work_done = 0;
  40. spin_lock(&cell->napi_skbs.lock);
  41. while (work_done < budget) {
  42. skb = __skb_dequeue(&cell->napi_skbs);
  43. if (!skb)
  44. break;
  45. spin_unlock(&cell->napi_skbs.lock);
  46. napi_gro_receive(napi, skb);
  47. work_done++;
  48. spin_lock(&cell->napi_skbs.lock);
  49. }
  50. if (work_done < budget)
  51. napi_complete(napi);
  52. spin_unlock(&cell->napi_skbs.lock);
  53. return work_done;
  54. }
  55. static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
  56. {
  57. int i;
  58. gcells->cells = alloc_percpu(struct gro_cell);
  59. if (!gcells->cells)
  60. return -ENOMEM;
  61. for_each_possible_cpu(i) {
  62. struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
  63. skb_queue_head_init(&cell->napi_skbs);
  64. netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
  65. napi_enable(&cell->napi);
  66. }
  67. return 0;
  68. }
  69. static inline void gro_cells_destroy(struct gro_cells *gcells)
  70. {
  71. int i;
  72. if (!gcells->cells)
  73. return;
  74. for_each_possible_cpu(i) {
  75. struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
  76. netif_napi_del(&cell->napi);
  77. skb_queue_purge(&cell->napi_skbs);
  78. }
  79. free_percpu(gcells->cells);
  80. gcells->cells = NULL;
  81. }
  82. #endif