act_bpf.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. /*
  2. * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. */
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <linux/kernel.h>
  12. #include <linux/skbuff.h>
  13. #include <linux/rtnetlink.h>
  14. #include <linux/filter.h>
  15. #include <linux/bpf.h>
  16. #include <net/netlink.h>
  17. #include <net/pkt_sched.h>
  18. #include <linux/tc_act/tc_bpf.h>
  19. #include <net/tc_act/tc_bpf.h>
  20. #define ACT_BPF_NAME_LEN 256
  21. struct tcf_bpf_cfg {
  22. struct bpf_prog *filter;
  23. struct sock_filter *bpf_ops;
  24. const char *bpf_name;
  25. u16 bpf_num_ops;
  26. bool is_ebpf;
  27. };
  28. static unsigned int bpf_net_id;
  29. static struct tc_action_ops act_bpf_ops;
  30. static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
  31. struct tcf_result *res)
  32. {
  33. bool at_ingress = skb_at_tc_ingress(skb);
  34. struct tcf_bpf *prog = to_bpf(act);
  35. struct bpf_prog *filter;
  36. int action, filter_res;
  37. tcf_lastuse_update(&prog->tcf_tm);
  38. bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
  39. rcu_read_lock();
  40. filter = rcu_dereference(prog->filter);
  41. if (at_ingress) {
  42. __skb_push(skb, skb->mac_len);
  43. bpf_compute_data_pointers(skb);
  44. filter_res = BPF_PROG_RUN(filter, skb);
  45. __skb_pull(skb, skb->mac_len);
  46. } else {
  47. bpf_compute_data_pointers(skb);
  48. filter_res = BPF_PROG_RUN(filter, skb);
  49. }
  50. rcu_read_unlock();
  51. /* A BPF program may overwrite the default action opcode.
  52. * Similarly as in cls_bpf, if filter_res == -1 we use the
  53. * default action specified from tc.
  54. *
  55. * In case a different well-known TC_ACT opcode has been
  56. * returned, it will overwrite the default one.
  57. *
  58. * For everything else that is unkown, TC_ACT_UNSPEC is
  59. * returned.
  60. */
  61. switch (filter_res) {
  62. case TC_ACT_PIPE:
  63. case TC_ACT_RECLASSIFY:
  64. case TC_ACT_OK:
  65. case TC_ACT_REDIRECT:
  66. action = filter_res;
  67. break;
  68. case TC_ACT_SHOT:
  69. action = filter_res;
  70. qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
  71. break;
  72. case TC_ACT_UNSPEC:
  73. action = prog->tcf_action;
  74. break;
  75. default:
  76. action = TC_ACT_UNSPEC;
  77. break;
  78. }
  79. return action;
  80. }
  81. static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
  82. {
  83. return !prog->bpf_ops;
  84. }
  85. static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
  86. struct sk_buff *skb)
  87. {
  88. struct nlattr *nla;
  89. if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
  90. return -EMSGSIZE;
  91. nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
  92. sizeof(struct sock_filter));
  93. if (nla == NULL)
  94. return -EMSGSIZE;
  95. memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
  96. return 0;
  97. }
  98. static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
  99. struct sk_buff *skb)
  100. {
  101. struct nlattr *nla;
  102. if (prog->bpf_name &&
  103. nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
  104. return -EMSGSIZE;
  105. if (nla_put_u32(skb, TCA_ACT_BPF_ID, prog->filter->aux->id))
  106. return -EMSGSIZE;
  107. nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
  108. if (nla == NULL)
  109. return -EMSGSIZE;
  110. memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
  111. return 0;
  112. }
  113. static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
  114. int bind, int ref)
  115. {
  116. unsigned char *tp = skb_tail_pointer(skb);
  117. struct tcf_bpf *prog = to_bpf(act);
  118. struct tc_act_bpf opt = {
  119. .index = prog->tcf_index,
  120. .refcnt = refcount_read(&prog->tcf_refcnt) - ref,
  121. .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind,
  122. };
  123. struct tcf_t tm;
  124. int ret;
  125. spin_lock_bh(&prog->tcf_lock);
  126. opt.action = prog->tcf_action;
  127. if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
  128. goto nla_put_failure;
  129. if (tcf_bpf_is_ebpf(prog))
  130. ret = tcf_bpf_dump_ebpf_info(prog, skb);
  131. else
  132. ret = tcf_bpf_dump_bpf_info(prog, skb);
  133. if (ret)
  134. goto nla_put_failure;
  135. tcf_tm_dump(&tm, &prog->tcf_tm);
  136. if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
  137. TCA_ACT_BPF_PAD))
  138. goto nla_put_failure;
  139. spin_unlock_bh(&prog->tcf_lock);
  140. return skb->len;
  141. nla_put_failure:
  142. spin_unlock_bh(&prog->tcf_lock);
  143. nlmsg_trim(skb, tp);
  144. return -1;
  145. }
  146. static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
  147. [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
  148. [TCA_ACT_BPF_FD] = { .type = NLA_U32 },
  149. [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING,
  150. .len = ACT_BPF_NAME_LEN },
  151. [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
  152. [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
  153. .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
  154. };
  155. static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
  156. {
  157. struct sock_filter *bpf_ops;
  158. struct sock_fprog_kern fprog_tmp;
  159. struct bpf_prog *fp;
  160. u16 bpf_size, bpf_num_ops;
  161. int ret;
  162. bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
  163. if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
  164. return -EINVAL;
  165. bpf_size = bpf_num_ops * sizeof(*bpf_ops);
  166. if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
  167. return -EINVAL;
  168. bpf_ops = kmemdup(nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size, GFP_KERNEL);
  169. if (bpf_ops == NULL)
  170. return -ENOMEM;
  171. fprog_tmp.len = bpf_num_ops;
  172. fprog_tmp.filter = bpf_ops;
  173. ret = bpf_prog_create(&fp, &fprog_tmp);
  174. if (ret < 0) {
  175. kfree(bpf_ops);
  176. return ret;
  177. }
  178. cfg->bpf_ops = bpf_ops;
  179. cfg->bpf_num_ops = bpf_num_ops;
  180. cfg->filter = fp;
  181. cfg->is_ebpf = false;
  182. return 0;
  183. }
  184. static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
  185. {
  186. struct bpf_prog *fp;
  187. char *name = NULL;
  188. u32 bpf_fd;
  189. bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
  190. fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT);
  191. if (IS_ERR(fp))
  192. return PTR_ERR(fp);
  193. if (tb[TCA_ACT_BPF_NAME]) {
  194. name = nla_memdup(tb[TCA_ACT_BPF_NAME], GFP_KERNEL);
  195. if (!name) {
  196. bpf_prog_put(fp);
  197. return -ENOMEM;
  198. }
  199. }
  200. cfg->bpf_name = name;
  201. cfg->filter = fp;
  202. cfg->is_ebpf = true;
  203. return 0;
  204. }
  205. static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
  206. {
  207. struct bpf_prog *filter = cfg->filter;
  208. if (filter) {
  209. if (cfg->is_ebpf)
  210. bpf_prog_put(filter);
  211. else
  212. bpf_prog_destroy(filter);
  213. }
  214. kfree(cfg->bpf_ops);
  215. kfree(cfg->bpf_name);
  216. }
  217. static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
  218. struct tcf_bpf_cfg *cfg)
  219. {
  220. cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
  221. /* updates to prog->filter are prevented, since it's called either
  222. * with tcf lock or during final cleanup in rcu callback
  223. */
  224. cfg->filter = rcu_dereference_protected(prog->filter, 1);
  225. cfg->bpf_ops = prog->bpf_ops;
  226. cfg->bpf_name = prog->bpf_name;
  227. }
  228. static int tcf_bpf_init(struct net *net, struct nlattr *nla,
  229. struct nlattr *est, struct tc_action **act,
  230. int replace, int bind, bool rtnl_held,
  231. struct netlink_ext_ack *extack)
  232. {
  233. struct tc_action_net *tn = net_generic(net, bpf_net_id);
  234. struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
  235. struct tcf_bpf_cfg cfg, old;
  236. struct tc_act_bpf *parm;
  237. struct tcf_bpf *prog;
  238. bool is_bpf, is_ebpf;
  239. int ret, res = 0;
  240. u32 index;
  241. if (!nla)
  242. return -EINVAL;
  243. ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy, NULL);
  244. if (ret < 0)
  245. return ret;
  246. if (!tb[TCA_ACT_BPF_PARMS])
  247. return -EINVAL;
  248. parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
  249. index = parm->index;
  250. ret = tcf_idr_check_alloc(tn, &index, act, bind);
  251. if (!ret) {
  252. ret = tcf_idr_create(tn, index, est, act,
  253. &act_bpf_ops, bind, true);
  254. if (ret < 0) {
  255. tcf_idr_cleanup(tn, index);
  256. return ret;
  257. }
  258. res = ACT_P_CREATED;
  259. } else if (ret > 0) {
  260. /* Don't override defaults. */
  261. if (bind)
  262. return 0;
  263. if (!replace) {
  264. tcf_idr_release(*act, bind);
  265. return -EEXIST;
  266. }
  267. } else {
  268. return ret;
  269. }
  270. is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
  271. is_ebpf = tb[TCA_ACT_BPF_FD];
  272. if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
  273. ret = -EINVAL;
  274. goto out;
  275. }
  276. memset(&cfg, 0, sizeof(cfg));
  277. ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
  278. tcf_bpf_init_from_efd(tb, &cfg);
  279. if (ret < 0)
  280. goto out;
  281. prog = to_bpf(*act);
  282. spin_lock_bh(&prog->tcf_lock);
  283. if (res != ACT_P_CREATED)
  284. tcf_bpf_prog_fill_cfg(prog, &old);
  285. prog->bpf_ops = cfg.bpf_ops;
  286. prog->bpf_name = cfg.bpf_name;
  287. if (cfg.bpf_num_ops)
  288. prog->bpf_num_ops = cfg.bpf_num_ops;
  289. prog->tcf_action = parm->action;
  290. rcu_assign_pointer(prog->filter, cfg.filter);
  291. spin_unlock_bh(&prog->tcf_lock);
  292. if (res == ACT_P_CREATED) {
  293. tcf_idr_insert(tn, *act);
  294. } else {
  295. /* make sure the program being replaced is no longer executing */
  296. synchronize_rcu();
  297. tcf_bpf_cfg_cleanup(&old);
  298. }
  299. return res;
  300. out:
  301. tcf_idr_release(*act, bind);
  302. return ret;
  303. }
  304. static void tcf_bpf_cleanup(struct tc_action *act)
  305. {
  306. struct tcf_bpf_cfg tmp;
  307. tcf_bpf_prog_fill_cfg(to_bpf(act), &tmp);
  308. tcf_bpf_cfg_cleanup(&tmp);
  309. }
  310. static int tcf_bpf_walker(struct net *net, struct sk_buff *skb,
  311. struct netlink_callback *cb, int type,
  312. const struct tc_action_ops *ops,
  313. struct netlink_ext_ack *extack)
  314. {
  315. struct tc_action_net *tn = net_generic(net, bpf_net_id);
  316. return tcf_generic_walker(tn, skb, cb, type, ops, extack);
  317. }
  318. static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index,
  319. struct netlink_ext_ack *extack)
  320. {
  321. struct tc_action_net *tn = net_generic(net, bpf_net_id);
  322. return tcf_idr_search(tn, a, index);
  323. }
  324. static struct tc_action_ops act_bpf_ops __read_mostly = {
  325. .kind = "bpf",
  326. .type = TCA_ACT_BPF,
  327. .owner = THIS_MODULE,
  328. .act = tcf_bpf_act,
  329. .dump = tcf_bpf_dump,
  330. .cleanup = tcf_bpf_cleanup,
  331. .init = tcf_bpf_init,
  332. .walk = tcf_bpf_walker,
  333. .lookup = tcf_bpf_search,
  334. .size = sizeof(struct tcf_bpf),
  335. };
  336. static __net_init int bpf_init_net(struct net *net)
  337. {
  338. struct tc_action_net *tn = net_generic(net, bpf_net_id);
  339. return tc_action_net_init(net, tn, &act_bpf_ops);
  340. }
  341. static void __net_exit bpf_exit_net(struct list_head *net_list)
  342. {
  343. tc_action_net_exit(net_list, bpf_net_id);
  344. }
  345. static struct pernet_operations bpf_net_ops = {
  346. .init = bpf_init_net,
  347. .exit_batch = bpf_exit_net,
  348. .id = &bpf_net_id,
  349. .size = sizeof(struct tc_action_net),
  350. };
  351. static int __init bpf_init_module(void)
  352. {
  353. return tcf_register_action(&act_bpf_ops, &bpf_net_ops);
  354. }
  355. static void __exit bpf_cleanup_module(void)
  356. {
  357. tcf_unregister_action(&act_bpf_ops, &bpf_net_ops);
  358. }
  359. module_init(bpf_init_module);
  360. module_exit(bpf_cleanup_module);
  361. MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  362. MODULE_DESCRIPTION("TC BPF based action");
  363. MODULE_LICENSE("GPL v2");