pkt_cls.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506
  1. #ifndef __NET_PKT_CLS_H
  2. #define __NET_PKT_CLS_H
  3. #include <linux/pkt_cls.h>
  4. #include <net/sch_generic.h>
  5. #include <net/act_api.h>
  6. /* Basic packet classifier frontend definitions. */
  7. struct tcf_walker {
  8. int stop;
  9. int skip;
  10. int count;
  11. int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *);
  12. };
  13. int register_tcf_proto_ops(struct tcf_proto_ops *ops);
  14. int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
  15. static inline unsigned long
  16. __cls_set_class(unsigned long *clp, unsigned long cl)
  17. {
  18. return xchg(clp, cl);
  19. }
  20. static inline unsigned long
  21. cls_set_class(struct tcf_proto *tp, unsigned long *clp,
  22. unsigned long cl)
  23. {
  24. unsigned long old_cl;
  25. tcf_tree_lock(tp);
  26. old_cl = __cls_set_class(clp, cl);
  27. tcf_tree_unlock(tp);
  28. return old_cl;
  29. }
  30. static inline void
  31. tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
  32. {
  33. unsigned long cl;
  34. cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid);
  35. cl = cls_set_class(tp, &r->class, cl);
  36. if (cl)
  37. tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
  38. }
  39. static inline void
  40. tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
  41. {
  42. unsigned long cl;
  43. if ((cl = __cls_set_class(&r->class, 0)) != 0)
  44. tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
  45. }
  46. struct tcf_exts {
  47. #ifdef CONFIG_NET_CLS_ACT
  48. __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
  49. int nr_actions;
  50. struct tc_action **actions;
  51. #endif
  52. /* Map to export classifier specific extension TLV types to the
  53. * generic extensions API. Unsupported extensions must be set to 0.
  54. */
  55. int action;
  56. int police;
  57. };
  58. static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
  59. {
  60. #ifdef CONFIG_NET_CLS_ACT
  61. exts->type = 0;
  62. exts->nr_actions = 0;
  63. exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
  64. GFP_KERNEL);
  65. if (!exts->actions)
  66. return -ENOMEM;
  67. #endif
  68. exts->action = action;
  69. exts->police = police;
  70. return 0;
  71. }
  72. /**
  73. * tcf_exts_is_predicative - check if a predicative extension is present
  74. * @exts: tc filter extensions handle
  75. *
  76. * Returns 1 if a predicative extension is present, i.e. an extension which
  77. * might cause further actions and thus overrule the regular tcf_result.
  78. */
  79. static inline int
  80. tcf_exts_is_predicative(struct tcf_exts *exts)
  81. {
  82. #ifdef CONFIG_NET_CLS_ACT
  83. return exts->nr_actions;
  84. #else
  85. return 0;
  86. #endif
  87. }
  88. /**
  89. * tcf_exts_is_available - check if at least one extension is present
  90. * @exts: tc filter extensions handle
  91. *
  92. * Returns 1 if at least one extension is present.
  93. */
  94. static inline int
  95. tcf_exts_is_available(struct tcf_exts *exts)
  96. {
  97. /* All non-predicative extensions must be added here. */
  98. return tcf_exts_is_predicative(exts);
  99. }
  100. static inline void tcf_exts_to_list(const struct tcf_exts *exts,
  101. struct list_head *actions)
  102. {
  103. #ifdef CONFIG_NET_CLS_ACT
  104. int i;
  105. for (i = 0; i < exts->nr_actions; i++) {
  106. struct tc_action *a = exts->actions[i];
  107. list_add_tail(&a->list, actions);
  108. }
  109. #endif
  110. }
  111. /**
  112. * tcf_exts_exec - execute tc filter extensions
  113. * @skb: socket buffer
  114. * @exts: tc filter extensions handle
  115. * @res: desired result
  116. *
  117. * Executes all configured extensions. Returns 0 on a normal execution,
  118. * a negative number if the filter must be considered unmatched or
  119. * a positive action code (TC_ACT_*) which must be returned to the
  120. * underlying layer.
  121. */
  122. static inline int
  123. tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
  124. struct tcf_result *res)
  125. {
  126. #ifdef CONFIG_NET_CLS_ACT
  127. if (exts->nr_actions)
  128. return tcf_action_exec(skb, exts->actions, exts->nr_actions,
  129. res);
  130. #endif
  131. return 0;
  132. }
  133. #ifdef CONFIG_NET_CLS_ACT
  134. #define tc_no_actions(_exts) ((_exts)->nr_actions == 0)
  135. #define tc_single_action(_exts) ((_exts)->nr_actions == 1)
  136. #else /* CONFIG_NET_CLS_ACT */
  137. #define tc_no_actions(_exts) true
  138. #define tc_single_action(_exts) false
  139. #endif /* CONFIG_NET_CLS_ACT */
  140. int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
  141. struct nlattr **tb, struct nlattr *rate_tlv,
  142. struct tcf_exts *exts, bool ovr);
  143. void tcf_exts_destroy(struct tcf_exts *exts);
  144. void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
  145. struct tcf_exts *src);
  146. int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
  147. int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
  148. /**
  149. * struct tcf_pkt_info - packet information
  150. */
  151. struct tcf_pkt_info {
  152. unsigned char * ptr;
  153. int nexthdr;
  154. };
  155. #ifdef CONFIG_NET_EMATCH
  156. struct tcf_ematch_ops;
  157. /**
  158. * struct tcf_ematch - extended match (ematch)
  159. *
  160. * @matchid: identifier to allow userspace to reidentify a match
  161. * @flags: flags specifying attributes and the relation to other matches
  162. * @ops: the operations lookup table of the corresponding ematch module
  163. * @datalen: length of the ematch specific configuration data
  164. * @data: ematch specific data
  165. */
  166. struct tcf_ematch {
  167. struct tcf_ematch_ops * ops;
  168. unsigned long data;
  169. unsigned int datalen;
  170. u16 matchid;
  171. u16 flags;
  172. struct net *net;
  173. };
  174. static inline int tcf_em_is_container(struct tcf_ematch *em)
  175. {
  176. return !em->ops;
  177. }
  178. static inline int tcf_em_is_simple(struct tcf_ematch *em)
  179. {
  180. return em->flags & TCF_EM_SIMPLE;
  181. }
  182. static inline int tcf_em_is_inverted(struct tcf_ematch *em)
  183. {
  184. return em->flags & TCF_EM_INVERT;
  185. }
  186. static inline int tcf_em_last_match(struct tcf_ematch *em)
  187. {
  188. return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
  189. }
  190. static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
  191. {
  192. if (tcf_em_last_match(em))
  193. return 1;
  194. if (result == 0 && em->flags & TCF_EM_REL_AND)
  195. return 1;
  196. if (result != 0 && em->flags & TCF_EM_REL_OR)
  197. return 1;
  198. return 0;
  199. }
  200. /**
  201. * struct tcf_ematch_tree - ematch tree handle
  202. *
  203. * @hdr: ematch tree header supplied by userspace
  204. * @matches: array of ematches
  205. */
  206. struct tcf_ematch_tree {
  207. struct tcf_ematch_tree_hdr hdr;
  208. struct tcf_ematch * matches;
  209. };
  210. /**
  211. * struct tcf_ematch_ops - ematch module operations
  212. *
  213. * @kind: identifier (kind) of this ematch module
  214. * @datalen: length of expected configuration data (optional)
  215. * @change: called during validation (optional)
  216. * @match: called during ematch tree evaluation, must return 1/0
  217. * @destroy: called during destroyage (optional)
  218. * @dump: called during dumping process (optional)
  219. * @owner: owner, must be set to THIS_MODULE
  220. * @link: link to previous/next ematch module (internal use)
  221. */
  222. struct tcf_ematch_ops {
  223. int kind;
  224. int datalen;
  225. int (*change)(struct net *net, void *,
  226. int, struct tcf_ematch *);
  227. int (*match)(struct sk_buff *, struct tcf_ematch *,
  228. struct tcf_pkt_info *);
  229. void (*destroy)(struct tcf_ematch *);
  230. int (*dump)(struct sk_buff *, struct tcf_ematch *);
  231. struct module *owner;
  232. struct list_head link;
  233. };
  234. int tcf_em_register(struct tcf_ematch_ops *);
  235. void tcf_em_unregister(struct tcf_ematch_ops *);
  236. int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
  237. struct tcf_ematch_tree *);
  238. void tcf_em_tree_destroy(struct tcf_ematch_tree *);
  239. int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
  240. int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
  241. struct tcf_pkt_info *);
  242. /**
  243. * tcf_em_tree_change - replace ematch tree of a running classifier
  244. *
  245. * @tp: classifier kind handle
  246. * @dst: destination ematch tree variable
  247. * @src: source ematch tree (temporary tree from tcf_em_tree_validate)
  248. *
  249. * This functions replaces the ematch tree in @dst with the ematch
  250. * tree in @src. The classifier in charge of the ematch tree may be
  251. * running.
  252. */
  253. static inline void tcf_em_tree_change(struct tcf_proto *tp,
  254. struct tcf_ematch_tree *dst,
  255. struct tcf_ematch_tree *src)
  256. {
  257. tcf_tree_lock(tp);
  258. memcpy(dst, src, sizeof(*dst));
  259. tcf_tree_unlock(tp);
  260. }
  261. /**
  262. * tcf_em_tree_match - evaulate an ematch tree
  263. *
  264. * @skb: socket buffer of the packet in question
  265. * @tree: ematch tree to be used for evaluation
  266. * @info: packet information examined by classifier
  267. *
  268. * This function matches @skb against the ematch tree in @tree by going
  269. * through all ematches respecting their logic relations returning
  270. * as soon as the result is obvious.
  271. *
  272. * Returns 1 if the ematch tree as-one matches, no ematches are configured
  273. * or ematch is not enabled in the kernel, otherwise 0 is returned.
  274. */
  275. static inline int tcf_em_tree_match(struct sk_buff *skb,
  276. struct tcf_ematch_tree *tree,
  277. struct tcf_pkt_info *info)
  278. {
  279. if (tree->hdr.nmatches)
  280. return __tcf_em_tree_match(skb, tree, info);
  281. else
  282. return 1;
  283. }
  284. #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
  285. #else /* CONFIG_NET_EMATCH */
  286. struct tcf_ematch_tree {
  287. };
  288. #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
  289. #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
  290. #define tcf_em_tree_dump(skb, t, tlv) (0)
  291. #define tcf_em_tree_change(tp, dst, src) do { } while(0)
  292. #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
  293. #endif /* CONFIG_NET_EMATCH */
  294. static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
  295. {
  296. switch (layer) {
  297. case TCF_LAYER_LINK:
  298. return skb->data;
  299. case TCF_LAYER_NETWORK:
  300. return skb_network_header(skb);
  301. case TCF_LAYER_TRANSPORT:
  302. return skb_transport_header(skb);
  303. }
  304. return NULL;
  305. }
  306. static inline int tcf_valid_offset(const struct sk_buff *skb,
  307. const unsigned char *ptr, const int len)
  308. {
  309. return likely((ptr + len) <= skb_tail_pointer(skb) &&
  310. ptr >= skb->head &&
  311. (ptr <= (ptr + len)));
  312. }
  313. #ifdef CONFIG_NET_CLS_IND
  314. #include <net/net_namespace.h>
  315. static inline int
  316. tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
  317. {
  318. char indev[IFNAMSIZ];
  319. struct net_device *dev;
  320. if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
  321. return -EINVAL;
  322. dev = __dev_get_by_name(net, indev);
  323. if (!dev)
  324. return -ENODEV;
  325. return dev->ifindex;
  326. }
  327. static inline bool
  328. tcf_match_indev(struct sk_buff *skb, int ifindex)
  329. {
  330. if (!ifindex)
  331. return true;
  332. if (!skb->skb_iif)
  333. return false;
  334. return ifindex == skb->skb_iif;
  335. }
  336. #endif /* CONFIG_NET_CLS_IND */
  337. struct tc_cls_u32_knode {
  338. struct tcf_exts *exts;
  339. struct tc_u32_sel *sel;
  340. u32 handle;
  341. u32 val;
  342. u32 mask;
  343. u32 link_handle;
  344. u8 fshift;
  345. };
  346. struct tc_cls_u32_hnode {
  347. u32 handle;
  348. u32 prio;
  349. unsigned int divisor;
  350. };
  351. enum tc_clsu32_command {
  352. TC_CLSU32_NEW_KNODE,
  353. TC_CLSU32_REPLACE_KNODE,
  354. TC_CLSU32_DELETE_KNODE,
  355. TC_CLSU32_NEW_HNODE,
  356. TC_CLSU32_REPLACE_HNODE,
  357. TC_CLSU32_DELETE_HNODE,
  358. };
  359. struct tc_cls_u32_offload {
  360. /* knode values */
  361. enum tc_clsu32_command command;
  362. union {
  363. struct tc_cls_u32_knode knode;
  364. struct tc_cls_u32_hnode hnode;
  365. };
  366. };
  367. static inline bool tc_should_offload(const struct net_device *dev,
  368. const struct tcf_proto *tp, u32 flags)
  369. {
  370. const struct Qdisc *sch = tp->q;
  371. const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
  372. if (!(dev->features & NETIF_F_HW_TC))
  373. return false;
  374. if (flags & TCA_CLS_FLAGS_SKIP_HW)
  375. return false;
  376. if (!dev->netdev_ops->ndo_setup_tc)
  377. return false;
  378. if (cops && cops->tcf_cl_offload)
  379. return cops->tcf_cl_offload(tp->classid);
  380. return true;
  381. }
  382. static inline bool tc_skip_sw(u32 flags)
  383. {
  384. return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
  385. }
  386. /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
  387. static inline bool tc_flags_valid(u32 flags)
  388. {
  389. if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
  390. return false;
  391. if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
  392. return false;
  393. return true;
  394. }
  395. enum tc_fl_command {
  396. TC_CLSFLOWER_REPLACE,
  397. TC_CLSFLOWER_DESTROY,
  398. TC_CLSFLOWER_STATS,
  399. };
  400. struct tc_cls_flower_offload {
  401. enum tc_fl_command command;
  402. unsigned long cookie;
  403. struct flow_dissector *dissector;
  404. struct fl_flow_key *mask;
  405. struct fl_flow_key *key;
  406. struct tcf_exts *exts;
  407. };
  408. enum tc_matchall_command {
  409. TC_CLSMATCHALL_REPLACE,
  410. TC_CLSMATCHALL_DESTROY,
  411. };
  412. struct tc_cls_matchall_offload {
  413. enum tc_matchall_command command;
  414. struct tcf_exts *exts;
  415. unsigned long cookie;
  416. };
  417. enum tc_clsbpf_command {
  418. TC_CLSBPF_ADD,
  419. TC_CLSBPF_REPLACE,
  420. TC_CLSBPF_DESTROY,
  421. TC_CLSBPF_STATS,
  422. };
  423. struct tc_cls_bpf_offload {
  424. enum tc_clsbpf_command command;
  425. struct tcf_exts *exts;
  426. struct bpf_prog *prog;
  427. const char *name;
  428. bool exts_integrated;
  429. u32 gen_flags;
  430. };
  431. #endif