test_run.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. /* Copyright (c) 2017 Facebook
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. */
  7. #include <linux/bpf.h>
  8. #include <linux/slab.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/etherdevice.h>
  11. #include <linux/filter.h>
  12. #include <linux/sched/signal.h>
  13. static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
  14. struct bpf_cgroup_storage *storage)
  15. {
  16. u32 ret;
  17. preempt_disable();
  18. rcu_read_lock();
  19. bpf_cgroup_storage_set(storage);
  20. ret = BPF_PROG_RUN(prog, ctx);
  21. rcu_read_unlock();
  22. preempt_enable();
  23. return ret;
  24. }
  25. static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
  26. {
  27. struct bpf_cgroup_storage *storage = NULL;
  28. u64 time_start, time_spent = 0;
  29. u32 ret = 0, i;
  30. storage = bpf_cgroup_storage_alloc(prog);
  31. if (IS_ERR(storage))
  32. return PTR_ERR(storage);
  33. if (!repeat)
  34. repeat = 1;
  35. time_start = ktime_get_ns();
  36. for (i = 0; i < repeat; i++) {
  37. ret = bpf_test_run_one(prog, ctx, storage);
  38. if (need_resched()) {
  39. if (signal_pending(current))
  40. break;
  41. time_spent += ktime_get_ns() - time_start;
  42. cond_resched();
  43. time_start = ktime_get_ns();
  44. }
  45. }
  46. time_spent += ktime_get_ns() - time_start;
  47. do_div(time_spent, repeat);
  48. *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
  49. bpf_cgroup_storage_free(storage);
  50. return ret;
  51. }
  52. static int bpf_test_finish(const union bpf_attr *kattr,
  53. union bpf_attr __user *uattr, const void *data,
  54. u32 size, u32 retval, u32 duration)
  55. {
  56. void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
  57. int err = -EFAULT;
  58. if (data_out && copy_to_user(data_out, data, size))
  59. goto out;
  60. if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
  61. goto out;
  62. if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
  63. goto out;
  64. if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
  65. goto out;
  66. err = 0;
  67. out:
  68. return err;
  69. }
  70. static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
  71. u32 headroom, u32 tailroom)
  72. {
  73. void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
  74. void *data;
  75. if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
  76. return ERR_PTR(-EINVAL);
  77. data = kzalloc(size + headroom + tailroom, GFP_USER);
  78. if (!data)
  79. return ERR_PTR(-ENOMEM);
  80. if (copy_from_user(data + headroom, data_in, size)) {
  81. kfree(data);
  82. return ERR_PTR(-EFAULT);
  83. }
  84. return data;
  85. }
  86. int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
  87. union bpf_attr __user *uattr)
  88. {
  89. bool is_l2 = false, is_direct_pkt_access = false;
  90. u32 size = kattr->test.data_size_in;
  91. u32 repeat = kattr->test.repeat;
  92. u32 retval, duration;
  93. int hh_len = ETH_HLEN;
  94. struct sk_buff *skb;
  95. void *data;
  96. int ret;
  97. data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
  98. SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
  99. if (IS_ERR(data))
  100. return PTR_ERR(data);
  101. switch (prog->type) {
  102. case BPF_PROG_TYPE_SCHED_CLS:
  103. case BPF_PROG_TYPE_SCHED_ACT:
  104. is_l2 = true;
  105. /* fall through */
  106. case BPF_PROG_TYPE_LWT_IN:
  107. case BPF_PROG_TYPE_LWT_OUT:
  108. case BPF_PROG_TYPE_LWT_XMIT:
  109. is_direct_pkt_access = true;
  110. break;
  111. default:
  112. break;
  113. }
  114. skb = build_skb(data, 0);
  115. if (!skb) {
  116. kfree(data);
  117. return -ENOMEM;
  118. }
  119. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  120. __skb_put(skb, size);
  121. skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
  122. skb_reset_network_header(skb);
  123. if (is_l2)
  124. __skb_push(skb, hh_len);
  125. if (is_direct_pkt_access)
  126. bpf_compute_data_pointers(skb);
  127. retval = bpf_test_run(prog, skb, repeat, &duration);
  128. if (!is_l2) {
  129. if (skb_headroom(skb) < hh_len) {
  130. int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
  131. if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
  132. kfree_skb(skb);
  133. return -ENOMEM;
  134. }
  135. }
  136. memset(__skb_push(skb, hh_len), 0, hh_len);
  137. }
  138. size = skb->len;
  139. /* bpf program can never convert linear skb to non-linear */
  140. if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
  141. size = skb_headlen(skb);
  142. ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
  143. kfree_skb(skb);
  144. return ret;
  145. }
  146. int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
  147. union bpf_attr __user *uattr)
  148. {
  149. u32 size = kattr->test.data_size_in;
  150. u32 repeat = kattr->test.repeat;
  151. struct netdev_rx_queue *rxqueue;
  152. struct xdp_buff xdp = {};
  153. u32 retval, duration;
  154. void *data;
  155. int ret;
  156. data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
  157. if (IS_ERR(data))
  158. return PTR_ERR(data);
  159. xdp.data_hard_start = data;
  160. xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
  161. xdp.data_meta = xdp.data;
  162. xdp.data_end = xdp.data + size;
  163. rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
  164. xdp.rxq = &rxqueue->xdp_rxq;
  165. retval = bpf_test_run(prog, &xdp, repeat, &duration);
  166. if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
  167. xdp.data_end != xdp.data + size)
  168. size = xdp.data_end - xdp.data;
  169. ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
  170. kfree(data);
  171. return ret;
  172. }