offload.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. /*
  2. * Copyright (C) 2017-2018 Netronome Systems, Inc.
  3. *
  4. * This software is licensed under the GNU General License Version 2,
  5. * June 1991 as shown in the file COPYING in the top-level directory of this
  6. * source tree.
  7. *
  8. * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
  9. * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
  10. * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  11. * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
  12. * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
  13. * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
  14. */
  15. #include <linux/bpf.h>
  16. #include <linux/bpf_verifier.h>
  17. #include <linux/bug.h>
  18. #include <linux/kdev_t.h>
  19. #include <linux/list.h>
  20. #include <linux/lockdep.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/printk.h>
  23. #include <linux/proc_ns.h>
  24. #include <linux/rhashtable.h>
  25. #include <linux/rtnetlink.h>
  26. #include <linux/rwsem.h>
  27. /* Protects offdevs, members of bpf_offload_netdev and offload members
  28. * of all progs.
  29. * RTNL lock cannot be taken when holding this lock.
  30. */
  31. static DECLARE_RWSEM(bpf_devs_lock);
  32. struct bpf_offload_dev {
  33. struct list_head netdevs;
  34. };
  35. struct bpf_offload_netdev {
  36. struct rhash_head l;
  37. struct net_device *netdev;
  38. struct bpf_offload_dev *offdev;
  39. struct list_head progs;
  40. struct list_head maps;
  41. struct list_head offdev_netdevs;
  42. };
  43. static const struct rhashtable_params offdevs_params = {
  44. .nelem_hint = 4,
  45. .key_len = sizeof(struct net_device *),
  46. .key_offset = offsetof(struct bpf_offload_netdev, netdev),
  47. .head_offset = offsetof(struct bpf_offload_netdev, l),
  48. .automatic_shrinking = true,
  49. };
  50. static struct rhashtable offdevs;
  51. static bool offdevs_inited;
  52. static int bpf_dev_offload_check(struct net_device *netdev)
  53. {
  54. if (!netdev)
  55. return -EINVAL;
  56. if (!netdev->netdev_ops->ndo_bpf)
  57. return -EOPNOTSUPP;
  58. return 0;
  59. }
  60. static struct bpf_offload_netdev *
  61. bpf_offload_find_netdev(struct net_device *netdev)
  62. {
  63. lockdep_assert_held(&bpf_devs_lock);
  64. if (!offdevs_inited)
  65. return NULL;
  66. return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
  67. }
  68. int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
  69. {
  70. struct bpf_offload_netdev *ondev;
  71. struct bpf_prog_offload *offload;
  72. int err;
  73. if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
  74. attr->prog_type != BPF_PROG_TYPE_XDP)
  75. return -EINVAL;
  76. if (attr->prog_flags)
  77. return -EINVAL;
  78. offload = kzalloc(sizeof(*offload), GFP_USER);
  79. if (!offload)
  80. return -ENOMEM;
  81. offload->prog = prog;
  82. offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
  83. attr->prog_ifindex);
  84. err = bpf_dev_offload_check(offload->netdev);
  85. if (err)
  86. goto err_maybe_put;
  87. down_write(&bpf_devs_lock);
  88. ondev = bpf_offload_find_netdev(offload->netdev);
  89. if (!ondev) {
  90. err = -EINVAL;
  91. goto err_unlock;
  92. }
  93. prog->aux->offload = offload;
  94. list_add_tail(&offload->offloads, &ondev->progs);
  95. dev_put(offload->netdev);
  96. up_write(&bpf_devs_lock);
  97. return 0;
  98. err_unlock:
  99. up_write(&bpf_devs_lock);
  100. err_maybe_put:
  101. if (offload->netdev)
  102. dev_put(offload->netdev);
  103. kfree(offload);
  104. return err;
  105. }
  106. static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
  107. struct netdev_bpf *data)
  108. {
  109. struct bpf_prog_offload *offload = prog->aux->offload;
  110. struct net_device *netdev;
  111. ASSERT_RTNL();
  112. if (!offload)
  113. return -ENODEV;
  114. netdev = offload->netdev;
  115. data->command = cmd;
  116. return netdev->netdev_ops->ndo_bpf(netdev, data);
  117. }
  118. int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
  119. {
  120. struct netdev_bpf data = {};
  121. int err;
  122. data.verifier.prog = env->prog;
  123. rtnl_lock();
  124. err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
  125. if (err)
  126. goto exit_unlock;
  127. env->prog->aux->offload->dev_ops = data.verifier.ops;
  128. env->prog->aux->offload->dev_state = true;
  129. exit_unlock:
  130. rtnl_unlock();
  131. return err;
  132. }
  133. int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
  134. int insn_idx, int prev_insn_idx)
  135. {
  136. struct bpf_prog_offload *offload;
  137. int ret = -ENODEV;
  138. down_read(&bpf_devs_lock);
  139. offload = env->prog->aux->offload;
  140. if (offload)
  141. ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
  142. up_read(&bpf_devs_lock);
  143. return ret;
  144. }
  145. static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
  146. {
  147. struct bpf_prog_offload *offload = prog->aux->offload;
  148. struct netdev_bpf data = {};
  149. data.offload.prog = prog;
  150. if (offload->dev_state)
  151. WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
  152. /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
  153. bpf_prog_free_id(prog, true);
  154. list_del_init(&offload->offloads);
  155. kfree(offload);
  156. prog->aux->offload = NULL;
  157. }
  158. void bpf_prog_offload_destroy(struct bpf_prog *prog)
  159. {
  160. rtnl_lock();
  161. down_write(&bpf_devs_lock);
  162. if (prog->aux->offload)
  163. __bpf_prog_offload_destroy(prog);
  164. up_write(&bpf_devs_lock);
  165. rtnl_unlock();
  166. }
  167. static int bpf_prog_offload_translate(struct bpf_prog *prog)
  168. {
  169. struct netdev_bpf data = {};
  170. int ret;
  171. data.offload.prog = prog;
  172. rtnl_lock();
  173. ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
  174. rtnl_unlock();
  175. return ret;
  176. }
  177. static unsigned int bpf_prog_warn_on_exec(const void *ctx,
  178. const struct bpf_insn *insn)
  179. {
  180. WARN(1, "attempt to execute device eBPF program on the host!");
  181. return 0;
  182. }
  183. int bpf_prog_offload_compile(struct bpf_prog *prog)
  184. {
  185. prog->bpf_func = bpf_prog_warn_on_exec;
  186. return bpf_prog_offload_translate(prog);
  187. }
  188. struct ns_get_path_bpf_prog_args {
  189. struct bpf_prog *prog;
  190. struct bpf_prog_info *info;
  191. };
  192. static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
  193. {
  194. struct ns_get_path_bpf_prog_args *args = private_data;
  195. struct bpf_prog_aux *aux = args->prog->aux;
  196. struct ns_common *ns;
  197. struct net *net;
  198. rtnl_lock();
  199. down_read(&bpf_devs_lock);
  200. if (aux->offload) {
  201. args->info->ifindex = aux->offload->netdev->ifindex;
  202. net = dev_net(aux->offload->netdev);
  203. get_net(net);
  204. ns = &net->ns;
  205. } else {
  206. args->info->ifindex = 0;
  207. ns = NULL;
  208. }
  209. up_read(&bpf_devs_lock);
  210. rtnl_unlock();
  211. return ns;
  212. }
  213. int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
  214. struct bpf_prog *prog)
  215. {
  216. struct ns_get_path_bpf_prog_args args = {
  217. .prog = prog,
  218. .info = info,
  219. };
  220. struct bpf_prog_aux *aux = prog->aux;
  221. struct inode *ns_inode;
  222. struct path ns_path;
  223. char __user *uinsns;
  224. void *res;
  225. u32 ulen;
  226. res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
  227. if (IS_ERR(res)) {
  228. if (!info->ifindex)
  229. return -ENODEV;
  230. return PTR_ERR(res);
  231. }
  232. down_read(&bpf_devs_lock);
  233. if (!aux->offload) {
  234. up_read(&bpf_devs_lock);
  235. return -ENODEV;
  236. }
  237. ulen = info->jited_prog_len;
  238. info->jited_prog_len = aux->offload->jited_len;
  239. if (info->jited_prog_len && ulen) {
  240. uinsns = u64_to_user_ptr(info->jited_prog_insns);
  241. ulen = min_t(u32, info->jited_prog_len, ulen);
  242. if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
  243. up_read(&bpf_devs_lock);
  244. return -EFAULT;
  245. }
  246. }
  247. up_read(&bpf_devs_lock);
  248. ns_inode = ns_path.dentry->d_inode;
  249. info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
  250. info->netns_ino = ns_inode->i_ino;
  251. path_put(&ns_path);
  252. return 0;
  253. }
  254. const struct bpf_prog_ops bpf_offload_prog_ops = {
  255. };
  256. static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
  257. enum bpf_netdev_command cmd)
  258. {
  259. struct netdev_bpf data = {};
  260. struct net_device *netdev;
  261. ASSERT_RTNL();
  262. data.command = cmd;
  263. data.offmap = offmap;
  264. /* Caller must make sure netdev is valid */
  265. netdev = offmap->netdev;
  266. return netdev->netdev_ops->ndo_bpf(netdev, &data);
  267. }
  268. struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
  269. {
  270. struct net *net = current->nsproxy->net_ns;
  271. struct bpf_offload_netdev *ondev;
  272. struct bpf_offloaded_map *offmap;
  273. int err;
  274. if (!capable(CAP_SYS_ADMIN))
  275. return ERR_PTR(-EPERM);
  276. if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
  277. attr->map_type != BPF_MAP_TYPE_HASH)
  278. return ERR_PTR(-EINVAL);
  279. offmap = kzalloc(sizeof(*offmap), GFP_USER);
  280. if (!offmap)
  281. return ERR_PTR(-ENOMEM);
  282. bpf_map_init_from_attr(&offmap->map, attr);
  283. rtnl_lock();
  284. down_write(&bpf_devs_lock);
  285. offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
  286. err = bpf_dev_offload_check(offmap->netdev);
  287. if (err)
  288. goto err_unlock;
  289. ondev = bpf_offload_find_netdev(offmap->netdev);
  290. if (!ondev) {
  291. err = -EINVAL;
  292. goto err_unlock;
  293. }
  294. err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
  295. if (err)
  296. goto err_unlock;
  297. list_add_tail(&offmap->offloads, &ondev->maps);
  298. up_write(&bpf_devs_lock);
  299. rtnl_unlock();
  300. return &offmap->map;
  301. err_unlock:
  302. up_write(&bpf_devs_lock);
  303. rtnl_unlock();
  304. kfree(offmap);
  305. return ERR_PTR(err);
  306. }
  307. static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
  308. {
  309. WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
  310. /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
  311. bpf_map_free_id(&offmap->map, true);
  312. list_del_init(&offmap->offloads);
  313. offmap->netdev = NULL;
  314. }
  315. void bpf_map_offload_map_free(struct bpf_map *map)
  316. {
  317. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  318. rtnl_lock();
  319. down_write(&bpf_devs_lock);
  320. if (offmap->netdev)
  321. __bpf_map_offload_destroy(offmap);
  322. up_write(&bpf_devs_lock);
  323. rtnl_unlock();
  324. kfree(offmap);
  325. }
  326. int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
  327. {
  328. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  329. int ret = -ENODEV;
  330. down_read(&bpf_devs_lock);
  331. if (offmap->netdev)
  332. ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
  333. up_read(&bpf_devs_lock);
  334. return ret;
  335. }
  336. int bpf_map_offload_update_elem(struct bpf_map *map,
  337. void *key, void *value, u64 flags)
  338. {
  339. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  340. int ret = -ENODEV;
  341. if (unlikely(flags > BPF_EXIST))
  342. return -EINVAL;
  343. down_read(&bpf_devs_lock);
  344. if (offmap->netdev)
  345. ret = offmap->dev_ops->map_update_elem(offmap, key, value,
  346. flags);
  347. up_read(&bpf_devs_lock);
  348. return ret;
  349. }
  350. int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
  351. {
  352. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  353. int ret = -ENODEV;
  354. down_read(&bpf_devs_lock);
  355. if (offmap->netdev)
  356. ret = offmap->dev_ops->map_delete_elem(offmap, key);
  357. up_read(&bpf_devs_lock);
  358. return ret;
  359. }
  360. int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
  361. {
  362. struct bpf_offloaded_map *offmap = map_to_offmap(map);
  363. int ret = -ENODEV;
  364. down_read(&bpf_devs_lock);
  365. if (offmap->netdev)
  366. ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
  367. up_read(&bpf_devs_lock);
  368. return ret;
  369. }
  370. struct ns_get_path_bpf_map_args {
  371. struct bpf_offloaded_map *offmap;
  372. struct bpf_map_info *info;
  373. };
  374. static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
  375. {
  376. struct ns_get_path_bpf_map_args *args = private_data;
  377. struct ns_common *ns;
  378. struct net *net;
  379. rtnl_lock();
  380. down_read(&bpf_devs_lock);
  381. if (args->offmap->netdev) {
  382. args->info->ifindex = args->offmap->netdev->ifindex;
  383. net = dev_net(args->offmap->netdev);
  384. get_net(net);
  385. ns = &net->ns;
  386. } else {
  387. args->info->ifindex = 0;
  388. ns = NULL;
  389. }
  390. up_read(&bpf_devs_lock);
  391. rtnl_unlock();
  392. return ns;
  393. }
  394. int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
  395. {
  396. struct ns_get_path_bpf_map_args args = {
  397. .offmap = map_to_offmap(map),
  398. .info = info,
  399. };
  400. struct inode *ns_inode;
  401. struct path ns_path;
  402. void *res;
  403. res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
  404. if (IS_ERR(res)) {
  405. if (!info->ifindex)
  406. return -ENODEV;
  407. return PTR_ERR(res);
  408. }
  409. ns_inode = ns_path.dentry->d_inode;
  410. info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
  411. info->netns_ino = ns_inode->i_ino;
  412. path_put(&ns_path);
  413. return 0;
  414. }
  415. static bool __bpf_offload_dev_match(struct bpf_prog *prog,
  416. struct net_device *netdev)
  417. {
  418. struct bpf_offload_netdev *ondev1, *ondev2;
  419. struct bpf_prog_offload *offload;
  420. if (!bpf_prog_is_dev_bound(prog->aux))
  421. return false;
  422. offload = prog->aux->offload;
  423. if (!offload)
  424. return false;
  425. if (offload->netdev == netdev)
  426. return true;
  427. ondev1 = bpf_offload_find_netdev(offload->netdev);
  428. ondev2 = bpf_offload_find_netdev(netdev);
  429. return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
  430. }
  431. bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
  432. {
  433. bool ret;
  434. down_read(&bpf_devs_lock);
  435. ret = __bpf_offload_dev_match(prog, netdev);
  436. up_read(&bpf_devs_lock);
  437. return ret;
  438. }
  439. EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
  440. bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
  441. {
  442. struct bpf_offloaded_map *offmap;
  443. bool ret;
  444. if (!bpf_map_is_dev_bound(map))
  445. return bpf_map_offload_neutral(map);
  446. offmap = map_to_offmap(map);
  447. down_read(&bpf_devs_lock);
  448. ret = __bpf_offload_dev_match(prog, offmap->netdev);
  449. up_read(&bpf_devs_lock);
  450. return ret;
  451. }
  452. int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
  453. struct net_device *netdev)
  454. {
  455. struct bpf_offload_netdev *ondev;
  456. int err;
  457. ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
  458. if (!ondev)
  459. return -ENOMEM;
  460. ondev->netdev = netdev;
  461. ondev->offdev = offdev;
  462. INIT_LIST_HEAD(&ondev->progs);
  463. INIT_LIST_HEAD(&ondev->maps);
  464. down_write(&bpf_devs_lock);
  465. err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
  466. if (err) {
  467. netdev_warn(netdev, "failed to register for BPF offload\n");
  468. goto err_unlock_free;
  469. }
  470. list_add(&ondev->offdev_netdevs, &offdev->netdevs);
  471. up_write(&bpf_devs_lock);
  472. return 0;
  473. err_unlock_free:
  474. up_write(&bpf_devs_lock);
  475. kfree(ondev);
  476. return err;
  477. }
  478. EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
  479. void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
  480. struct net_device *netdev)
  481. {
  482. struct bpf_offload_netdev *ondev, *altdev;
  483. struct bpf_offloaded_map *offmap, *mtmp;
  484. struct bpf_prog_offload *offload, *ptmp;
  485. ASSERT_RTNL();
  486. down_write(&bpf_devs_lock);
  487. ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
  488. if (WARN_ON(!ondev))
  489. goto unlock;
  490. WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
  491. list_del(&ondev->offdev_netdevs);
  492. /* Try to move the objects to another netdev of the device */
  493. altdev = list_first_entry_or_null(&offdev->netdevs,
  494. struct bpf_offload_netdev,
  495. offdev_netdevs);
  496. if (altdev) {
  497. list_for_each_entry(offload, &ondev->progs, offloads)
  498. offload->netdev = altdev->netdev;
  499. list_splice_init(&ondev->progs, &altdev->progs);
  500. list_for_each_entry(offmap, &ondev->maps, offloads)
  501. offmap->netdev = altdev->netdev;
  502. list_splice_init(&ondev->maps, &altdev->maps);
  503. } else {
  504. list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
  505. __bpf_prog_offload_destroy(offload->prog);
  506. list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
  507. __bpf_map_offload_destroy(offmap);
  508. }
  509. WARN_ON(!list_empty(&ondev->progs));
  510. WARN_ON(!list_empty(&ondev->maps));
  511. kfree(ondev);
  512. unlock:
  513. up_write(&bpf_devs_lock);
  514. }
  515. EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
  516. struct bpf_offload_dev *bpf_offload_dev_create(void)
  517. {
  518. struct bpf_offload_dev *offdev;
  519. int err;
  520. down_write(&bpf_devs_lock);
  521. if (!offdevs_inited) {
  522. err = rhashtable_init(&offdevs, &offdevs_params);
  523. if (err) {
  524. up_write(&bpf_devs_lock);
  525. return ERR_PTR(err);
  526. }
  527. offdevs_inited = true;
  528. }
  529. up_write(&bpf_devs_lock);
  530. offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
  531. if (!offdev)
  532. return ERR_PTR(-ENOMEM);
  533. INIT_LIST_HEAD(&offdev->netdevs);
  534. return offdev;
  535. }
  536. EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
  537. void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
  538. {
  539. WARN_ON(!list_empty(&offdev->netdevs));
  540. kfree(offdev);
  541. }
  542. EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);