123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200 |
- /* Copyright (c) 2017 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- */
- #include <linux/bpf.h>
- #include <linux/slab.h>
- #include <linux/vmalloc.h>
- #include <linux/etherdevice.h>
- #include <linux/filter.h>
- #include <linux/sched/signal.h>
- static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
- struct bpf_cgroup_storage *storage)
- {
- u32 ret;
- preempt_disable();
- rcu_read_lock();
- bpf_cgroup_storage_set(storage);
- ret = BPF_PROG_RUN(prog, ctx);
- rcu_read_unlock();
- preempt_enable();
- return ret;
- }
- static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
- {
- struct bpf_cgroup_storage *storage = NULL;
- u64 time_start, time_spent = 0;
- u32 ret = 0, i;
- storage = bpf_cgroup_storage_alloc(prog);
- if (IS_ERR(storage))
- return PTR_ERR(storage);
- if (!repeat)
- repeat = 1;
- time_start = ktime_get_ns();
- for (i = 0; i < repeat; i++) {
- ret = bpf_test_run_one(prog, ctx, storage);
- if (need_resched()) {
- if (signal_pending(current))
- break;
- time_spent += ktime_get_ns() - time_start;
- cond_resched();
- time_start = ktime_get_ns();
- }
- }
- time_spent += ktime_get_ns() - time_start;
- do_div(time_spent, repeat);
- *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
- bpf_cgroup_storage_free(storage);
- return ret;
- }
- static int bpf_test_finish(const union bpf_attr *kattr,
- union bpf_attr __user *uattr, const void *data,
- u32 size, u32 retval, u32 duration)
- {
- void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
- int err = -EFAULT;
- if (data_out && copy_to_user(data_out, data, size))
- goto out;
- if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
- goto out;
- if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
- goto out;
- if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
- goto out;
- err = 0;
- out:
- return err;
- }
- static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
- u32 headroom, u32 tailroom)
- {
- void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
- void *data;
- if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
- return ERR_PTR(-EINVAL);
- data = kzalloc(size + headroom + tailroom, GFP_USER);
- if (!data)
- return ERR_PTR(-ENOMEM);
- if (copy_from_user(data + headroom, data_in, size)) {
- kfree(data);
- return ERR_PTR(-EFAULT);
- }
- return data;
- }
- int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
- union bpf_attr __user *uattr)
- {
- bool is_l2 = false, is_direct_pkt_access = false;
- u32 size = kattr->test.data_size_in;
- u32 repeat = kattr->test.repeat;
- u32 retval, duration;
- int hh_len = ETH_HLEN;
- struct sk_buff *skb;
- void *data;
- int ret;
- data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
- if (IS_ERR(data))
- return PTR_ERR(data);
- switch (prog->type) {
- case BPF_PROG_TYPE_SCHED_CLS:
- case BPF_PROG_TYPE_SCHED_ACT:
- is_l2 = true;
- /* fall through */
- case BPF_PROG_TYPE_LWT_IN:
- case BPF_PROG_TYPE_LWT_OUT:
- case BPF_PROG_TYPE_LWT_XMIT:
- is_direct_pkt_access = true;
- break;
- default:
- break;
- }
- skb = build_skb(data, 0);
- if (!skb) {
- kfree(data);
- return -ENOMEM;
- }
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- __skb_put(skb, size);
- skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
- skb_reset_network_header(skb);
- if (is_l2)
- __skb_push(skb, hh_len);
- if (is_direct_pkt_access)
- bpf_compute_data_pointers(skb);
- retval = bpf_test_run(prog, skb, repeat, &duration);
- if (!is_l2) {
- if (skb_headroom(skb) < hh_len) {
- int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
- if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
- kfree_skb(skb);
- return -ENOMEM;
- }
- }
- memset(__skb_push(skb, hh_len), 0, hh_len);
- }
- size = skb->len;
- /* bpf program can never convert linear skb to non-linear */
- if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
- size = skb_headlen(skb);
- ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
- kfree_skb(skb);
- return ret;
- }
- int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
- union bpf_attr __user *uattr)
- {
- u32 size = kattr->test.data_size_in;
- u32 repeat = kattr->test.repeat;
- struct netdev_rx_queue *rxqueue;
- struct xdp_buff xdp = {};
- u32 retval, duration;
- void *data;
- int ret;
- data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
- if (IS_ERR(data))
- return PTR_ERR(data);
- xdp.data_hard_start = data;
- xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
- xdp.data_meta = xdp.data;
- xdp.data_end = xdp.data + size;
- rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
- xdp.rxq = &rxqueue->xdp_rxq;
- retval = bpf_test_run(prog, &xdp, repeat, &duration);
- if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
- xdp.data_end != xdp.data + size)
- size = xdp.data_end - xdp.data;
- ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
- kfree(data);
- return ret;
- }
|