123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725 |
- /*
- * Functions to manage eBPF programs attached to cgroups
- *
- * Copyright (c) 2016 Daniel Mack
- *
- * This file is subject to the terms and conditions of version 2 of the GNU
- * General Public License. See the file COPYING in the main directory of the
- * Linux distribution for more details.
- */
- #include <linux/kernel.h>
- #include <linux/atomic.h>
- #include <linux/cgroup.h>
- #include <linux/slab.h>
- #include <linux/bpf.h>
- #include <linux/bpf-cgroup.h>
- #include <net/sock.h>
- DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
- EXPORT_SYMBOL(cgroup_bpf_enabled_key);
- /**
- * cgroup_bpf_put() - put references of all bpf programs
- * @cgrp: the cgroup to modify
- */
- void cgroup_bpf_put(struct cgroup *cgrp)
- {
- unsigned int type;
- for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
- struct list_head *progs = &cgrp->bpf.progs[type];
- struct bpf_prog_list *pl, *tmp;
- list_for_each_entry_safe(pl, tmp, progs, node) {
- list_del(&pl->node);
- bpf_prog_put(pl->prog);
- bpf_cgroup_storage_unlink(pl->storage);
- bpf_cgroup_storage_free(pl->storage);
- kfree(pl);
- static_branch_dec(&cgroup_bpf_enabled_key);
- }
- bpf_prog_array_free(cgrp->bpf.effective[type]);
- }
- }
- /* count number of elements in the list.
- * it's slow but the list cannot be long
- */
- static u32 prog_list_length(struct list_head *head)
- {
- struct bpf_prog_list *pl;
- u32 cnt = 0;
- list_for_each_entry(pl, head, node) {
- if (!pl->prog)
- continue;
- cnt++;
- }
- return cnt;
- }
- /* if parent has non-overridable prog attached,
- * disallow attaching new programs to the descendent cgroup.
- * if parent has overridable or multi-prog, allow attaching
- */
- static bool hierarchy_allows_attach(struct cgroup *cgrp,
- enum bpf_attach_type type,
- u32 new_flags)
- {
- struct cgroup *p;
- p = cgroup_parent(cgrp);
- if (!p)
- return true;
- do {
- u32 flags = p->bpf.flags[type];
- u32 cnt;
- if (flags & BPF_F_ALLOW_MULTI)
- return true;
- cnt = prog_list_length(&p->bpf.progs[type]);
- WARN_ON_ONCE(cnt > 1);
- if (cnt == 1)
- return !!(flags & BPF_F_ALLOW_OVERRIDE);
- p = cgroup_parent(p);
- } while (p);
- return true;
- }
- /* compute a chain of effective programs for a given cgroup:
- * start from the list of programs in this cgroup and add
- * all parent programs.
- * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
- * to programs in this cgroup
- */
- static int compute_effective_progs(struct cgroup *cgrp,
- enum bpf_attach_type type,
- struct bpf_prog_array __rcu **array)
- {
- struct bpf_prog_array *progs;
- struct bpf_prog_list *pl;
- struct cgroup *p = cgrp;
- int cnt = 0;
- /* count number of effective programs by walking parents */
- do {
- if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
- cnt += prog_list_length(&p->bpf.progs[type]);
- p = cgroup_parent(p);
- } while (p);
- progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
- if (!progs)
- return -ENOMEM;
- /* populate the array with effective progs */
- cnt = 0;
- p = cgrp;
- do {
- if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
- continue;
- list_for_each_entry(pl, &p->bpf.progs[type], node) {
- if (!pl->prog)
- continue;
- progs->items[cnt].prog = pl->prog;
- progs->items[cnt].cgroup_storage = pl->storage;
- cnt++;
- }
- } while ((p = cgroup_parent(p)));
- rcu_assign_pointer(*array, progs);
- return 0;
- }
- static void activate_effective_progs(struct cgroup *cgrp,
- enum bpf_attach_type type,
- struct bpf_prog_array __rcu *array)
- {
- struct bpf_prog_array __rcu *old_array;
- old_array = xchg(&cgrp->bpf.effective[type], array);
- /* free prog array after grace period, since __cgroup_bpf_run_*()
- * might be still walking the array
- */
- bpf_prog_array_free(old_array);
- }
- /**
- * cgroup_bpf_inherit() - inherit effective programs from parent
- * @cgrp: the cgroup to modify
- */
- int cgroup_bpf_inherit(struct cgroup *cgrp)
- {
- /* has to use marco instead of const int, since compiler thinks
- * that array below is variable length
- */
- #define NR ARRAY_SIZE(cgrp->bpf.effective)
- struct bpf_prog_array __rcu *arrays[NR] = {};
- int i;
- for (i = 0; i < NR; i++)
- INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
- for (i = 0; i < NR; i++)
- if (compute_effective_progs(cgrp, i, &arrays[i]))
- goto cleanup;
- for (i = 0; i < NR; i++)
- activate_effective_progs(cgrp, i, arrays[i]);
- return 0;
- cleanup:
- for (i = 0; i < NR; i++)
- bpf_prog_array_free(arrays[i]);
- return -ENOMEM;
- }
- static int update_effective_progs(struct cgroup *cgrp,
- enum bpf_attach_type type)
- {
- struct cgroup_subsys_state *css;
- int err;
- /* allocate and recompute effective prog arrays */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
- err = compute_effective_progs(desc, type, &desc->bpf.inactive);
- if (err)
- goto cleanup;
- }
- /* all allocations were successful. Activate all prog arrays */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
- activate_effective_progs(desc, type, desc->bpf.inactive);
- desc->bpf.inactive = NULL;
- }
- return 0;
- cleanup:
- /* oom while computing effective. Free all computed effective arrays
- * since they were not activated
- */
- css_for_each_descendant_pre(css, &cgrp->self) {
- struct cgroup *desc = container_of(css, struct cgroup, self);
- bpf_prog_array_free(desc->bpf.inactive);
- desc->bpf.inactive = NULL;
- }
- return err;
- }
- #define BPF_CGROUP_MAX_PROGS 64
- /**
- * __cgroup_bpf_attach() - Attach the program to a cgroup, and
- * propagate the change to descendants
- * @cgrp: The cgroup which descendants to traverse
- * @prog: A program to attach
- * @type: Type of attach operation
- *
- * Must be called with cgroup_mutex held.
- */
- int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type, u32 flags)
- {
- struct list_head *progs = &cgrp->bpf.progs[type];
- struct bpf_prog *old_prog = NULL;
- struct bpf_cgroup_storage *storage, *old_storage = NULL;
- struct bpf_prog_list *pl;
- bool pl_was_allocated;
- int err;
- if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI))
- /* invalid combination */
- return -EINVAL;
- if (!hierarchy_allows_attach(cgrp, type, flags))
- return -EPERM;
- if (!list_empty(progs) && cgrp->bpf.flags[type] != flags)
- /* Disallow attaching non-overridable on top
- * of existing overridable in this cgroup.
- * Disallow attaching multi-prog if overridable or none
- */
- return -EPERM;
- if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
- return -E2BIG;
- storage = bpf_cgroup_storage_alloc(prog);
- if (IS_ERR(storage))
- return -ENOMEM;
- if (flags & BPF_F_ALLOW_MULTI) {
- list_for_each_entry(pl, progs, node) {
- if (pl->prog == prog) {
- /* disallow attaching the same prog twice */
- bpf_cgroup_storage_free(storage);
- return -EINVAL;
- }
- }
- pl = kmalloc(sizeof(*pl), GFP_KERNEL);
- if (!pl) {
- bpf_cgroup_storage_free(storage);
- return -ENOMEM;
- }
- pl_was_allocated = true;
- pl->prog = prog;
- pl->storage = storage;
- list_add_tail(&pl->node, progs);
- } else {
- if (list_empty(progs)) {
- pl = kmalloc(sizeof(*pl), GFP_KERNEL);
- if (!pl) {
- bpf_cgroup_storage_free(storage);
- return -ENOMEM;
- }
- pl_was_allocated = true;
- list_add_tail(&pl->node, progs);
- } else {
- pl = list_first_entry(progs, typeof(*pl), node);
- old_prog = pl->prog;
- old_storage = pl->storage;
- bpf_cgroup_storage_unlink(old_storage);
- pl_was_allocated = false;
- }
- pl->prog = prog;
- pl->storage = storage;
- }
- cgrp->bpf.flags[type] = flags;
- err = update_effective_progs(cgrp, type);
- if (err)
- goto cleanup;
- static_branch_inc(&cgroup_bpf_enabled_key);
- if (old_storage)
- bpf_cgroup_storage_free(old_storage);
- if (old_prog) {
- bpf_prog_put(old_prog);
- static_branch_dec(&cgroup_bpf_enabled_key);
- }
- bpf_cgroup_storage_link(storage, cgrp, type);
- return 0;
- cleanup:
- /* and cleanup the prog list */
- pl->prog = old_prog;
- bpf_cgroup_storage_free(pl->storage);
- pl->storage = old_storage;
- bpf_cgroup_storage_link(old_storage, cgrp, type);
- if (pl_was_allocated) {
- list_del(&pl->node);
- kfree(pl);
- }
- return err;
- }
- /**
- * __cgroup_bpf_detach() - Detach the program from a cgroup, and
- * propagate the change to descendants
- * @cgrp: The cgroup which descendants to traverse
- * @prog: A program to detach or NULL
- * @type: Type of detach operation
- *
- * Must be called with cgroup_mutex held.
- */
- int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type, u32 unused_flags)
- {
- struct list_head *progs = &cgrp->bpf.progs[type];
- u32 flags = cgrp->bpf.flags[type];
- struct bpf_prog *old_prog = NULL;
- struct bpf_prog_list *pl;
- int err;
- if (flags & BPF_F_ALLOW_MULTI) {
- if (!prog)
- /* to detach MULTI prog the user has to specify valid FD
- * of the program to be detached
- */
- return -EINVAL;
- } else {
- if (list_empty(progs))
- /* report error when trying to detach and nothing is attached */
- return -ENOENT;
- }
- if (flags & BPF_F_ALLOW_MULTI) {
- /* find the prog and detach it */
- list_for_each_entry(pl, progs, node) {
- if (pl->prog != prog)
- continue;
- old_prog = prog;
- /* mark it deleted, so it's ignored while
- * recomputing effective
- */
- pl->prog = NULL;
- break;
- }
- if (!old_prog)
- return -ENOENT;
- } else {
- /* to maintain backward compatibility NONE and OVERRIDE cgroups
- * allow detaching with invalid FD (prog==NULL)
- */
- pl = list_first_entry(progs, typeof(*pl), node);
- old_prog = pl->prog;
- pl->prog = NULL;
- }
- err = update_effective_progs(cgrp, type);
- if (err)
- goto cleanup;
- /* now can actually delete it from this cgroup list */
- list_del(&pl->node);
- bpf_cgroup_storage_unlink(pl->storage);
- bpf_cgroup_storage_free(pl->storage);
- kfree(pl);
- if (list_empty(progs))
- /* last program was detached, reset flags to zero */
- cgrp->bpf.flags[type] = 0;
- bpf_prog_put(old_prog);
- static_branch_dec(&cgroup_bpf_enabled_key);
- return 0;
- cleanup:
- /* and restore back old_prog */
- pl->prog = old_prog;
- return err;
- }
- /* Must be called with cgroup_mutex held to avoid races. */
- int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
- union bpf_attr __user *uattr)
- {
- __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
- enum bpf_attach_type type = attr->query.attach_type;
- struct list_head *progs = &cgrp->bpf.progs[type];
- u32 flags = cgrp->bpf.flags[type];
- int cnt, ret = 0, i;
- if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
- cnt = bpf_prog_array_length(cgrp->bpf.effective[type]);
- else
- cnt = prog_list_length(progs);
- if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
- return -EFAULT;
- if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
- return -EFAULT;
- if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
- /* return early if user requested only program count + flags */
- return 0;
- if (attr->query.prog_cnt < cnt) {
- cnt = attr->query.prog_cnt;
- ret = -ENOSPC;
- }
- if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
- return bpf_prog_array_copy_to_user(cgrp->bpf.effective[type],
- prog_ids, cnt);
- } else {
- struct bpf_prog_list *pl;
- u32 id;
- i = 0;
- list_for_each_entry(pl, progs, node) {
- id = pl->prog->aux->id;
- if (copy_to_user(prog_ids + i, &id, sizeof(id)))
- return -EFAULT;
- if (++i == cnt)
- break;
- }
- }
- return ret;
- }
- int cgroup_bpf_prog_attach(const union bpf_attr *attr,
- enum bpf_prog_type ptype, struct bpf_prog *prog)
- {
- struct cgroup *cgrp;
- int ret;
- cgrp = cgroup_get_from_fd(attr->target_fd);
- if (IS_ERR(cgrp))
- return PTR_ERR(cgrp);
- ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
- attr->attach_flags);
- cgroup_put(cgrp);
- return ret;
- }
- int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
- {
- struct bpf_prog *prog;
- struct cgroup *cgrp;
- int ret;
- cgrp = cgroup_get_from_fd(attr->target_fd);
- if (IS_ERR(cgrp))
- return PTR_ERR(cgrp);
- prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
- if (IS_ERR(prog))
- prog = NULL;
- ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
- if (prog)
- bpf_prog_put(prog);
- cgroup_put(cgrp);
- return ret;
- }
- int cgroup_bpf_prog_query(const union bpf_attr *attr,
- union bpf_attr __user *uattr)
- {
- struct cgroup *cgrp;
- int ret;
- cgrp = cgroup_get_from_fd(attr->query.target_fd);
- if (IS_ERR(cgrp))
- return PTR_ERR(cgrp);
- ret = cgroup_bpf_query(cgrp, attr, uattr);
- cgroup_put(cgrp);
- return ret;
- }
- /**
- * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
- * @sk: The socket sending or receiving traffic
- * @skb: The skb that is being sent or received
- * @type: The type of program to be exectuted
- *
- * If no socket is passed, or the socket is not of type INET or INET6,
- * this function does nothing and returns 0.
- *
- * The program type passed in via @type must be suitable for network
- * filtering. No further check is performed to assert that.
- *
- * This function will return %-EPERM if any if an attached program was found
- * and if it returned != 1 during execution. In all other cases, 0 is returned.
- */
- int __cgroup_bpf_run_filter_skb(struct sock *sk,
- struct sk_buff *skb,
- enum bpf_attach_type type)
- {
- unsigned int offset = skb->data - skb_network_header(skb);
- struct sock *save_sk;
- struct cgroup *cgrp;
- int ret;
- if (!sk || !sk_fullsock(sk))
- return 0;
- if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
- return 0;
- cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
- save_sk = skb->sk;
- skb->sk = sk;
- __skb_push(skb, offset);
- ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
- bpf_prog_run_save_cb);
- __skb_pull(skb, offset);
- skb->sk = save_sk;
- return ret == 1 ? 0 : -EPERM;
- }
- EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
- /**
- * __cgroup_bpf_run_filter_sk() - Run a program on a sock
- * @sk: sock structure to manipulate
- * @type: The type of program to be exectuted
- *
- * socket is passed is expected to be of type INET or INET6.
- *
- * The program type passed in via @type must be suitable for sock
- * filtering. No further check is performed to assert that.
- *
- * This function will return %-EPERM if any if an attached program was found
- * and if it returned != 1 during execution. In all other cases, 0 is returned.
- */
- int __cgroup_bpf_run_filter_sk(struct sock *sk,
- enum bpf_attach_type type)
- {
- struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
- int ret;
- ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
- return ret == 1 ? 0 : -EPERM;
- }
- EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
- /**
- * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
- * provided by user sockaddr
- * @sk: sock struct that will use sockaddr
- * @uaddr: sockaddr struct provided by user
- * @type: The type of program to be exectuted
- * @t_ctx: Pointer to attach type specific context
- *
- * socket is expected to be of type INET or INET6.
- *
- * This function will return %-EPERM if an attached program is found and
- * returned value != 1 during execution. In all other cases, 0 is returned.
- */
- int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
- struct sockaddr *uaddr,
- enum bpf_attach_type type,
- void *t_ctx)
- {
- struct bpf_sock_addr_kern ctx = {
- .sk = sk,
- .uaddr = uaddr,
- .t_ctx = t_ctx,
- };
- struct sockaddr_storage unspec;
- struct cgroup *cgrp;
- int ret;
- /* Check socket family since not all sockets represent network
- * endpoint (e.g. AF_UNIX).
- */
- if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
- return 0;
- if (!ctx.uaddr) {
- memset(&unspec, 0, sizeof(unspec));
- ctx.uaddr = (struct sockaddr *)&unspec;
- }
- cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
- ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
- return ret == 1 ? 0 : -EPERM;
- }
- EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
- /**
- * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
- * @sk: socket to get cgroup from
- * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
- * sk with connection information (IP addresses, etc.) May not contain
- * cgroup info if it is a req sock.
- * @type: The type of program to be exectuted
- *
- * socket passed is expected to be of type INET or INET6.
- *
- * The program type passed in via @type must be suitable for sock_ops
- * filtering. No further check is performed to assert that.
- *
- * This function will return %-EPERM if any if an attached program was found
- * and if it returned != 1 during execution. In all other cases, 0 is returned.
- */
- int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
- struct bpf_sock_ops_kern *sock_ops,
- enum bpf_attach_type type)
- {
- struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
- int ret;
- ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
- BPF_PROG_RUN);
- return ret == 1 ? 0 : -EPERM;
- }
- EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
- int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
- short access, enum bpf_attach_type type)
- {
- struct cgroup *cgrp;
- struct bpf_cgroup_dev_ctx ctx = {
- .access_type = (access << 16) | dev_type,
- .major = major,
- .minor = minor,
- };
- int allow = 1;
- rcu_read_lock();
- cgrp = task_dfl_cgroup(current);
- allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
- BPF_PROG_RUN);
- rcu_read_unlock();
- return !allow;
- }
- EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
- static const struct bpf_func_proto *
- cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
- {
- switch (func_id) {
- case BPF_FUNC_map_lookup_elem:
- return &bpf_map_lookup_elem_proto;
- case BPF_FUNC_map_update_elem:
- return &bpf_map_update_elem_proto;
- case BPF_FUNC_map_delete_elem:
- return &bpf_map_delete_elem_proto;
- case BPF_FUNC_get_current_uid_gid:
- return &bpf_get_current_uid_gid_proto;
- case BPF_FUNC_get_local_storage:
- return &bpf_get_local_storage_proto;
- case BPF_FUNC_trace_printk:
- if (capable(CAP_SYS_ADMIN))
- return bpf_get_trace_printk_proto();
- default:
- return NULL;
- }
- }
- static bool cgroup_dev_is_valid_access(int off, int size,
- enum bpf_access_type type,
- const struct bpf_prog *prog,
- struct bpf_insn_access_aux *info)
- {
- const int size_default = sizeof(__u32);
- if (type == BPF_WRITE)
- return false;
- if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
- return false;
- /* The verifier guarantees that size > 0. */
- if (off % size != 0)
- return false;
- switch (off) {
- case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
- bpf_ctx_record_field_size(info, size_default);
- if (!bpf_ctx_narrow_access_ok(off, size, size_default))
- return false;
- break;
- default:
- if (size != size_default)
- return false;
- }
- return true;
- }
- const struct bpf_prog_ops cg_dev_prog_ops = {
- };
- const struct bpf_verifier_ops cg_dev_verifier_ops = {
- .get_func_proto = cgroup_dev_func_proto,
- .is_valid_access = cgroup_dev_is_valid_access,
- };
|