spectrum_flower.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2. /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
  3. #include <linux/kernel.h>
  4. #include <linux/errno.h>
  5. #include <linux/netdevice.h>
  6. #include <net/net_namespace.h>
  7. #include <net/flow_dissector.h>
  8. #include <net/pkt_cls.h>
  9. #include <net/tc_act/tc_gact.h>
  10. #include <net/tc_act/tc_mirred.h>
  11. #include <net/tc_act/tc_vlan.h>
  12. #include "spectrum.h"
  13. #include "core_acl_flex_keys.h"
  14. static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
  15. struct mlxsw_sp_acl_block *block,
  16. struct mlxsw_sp_acl_rule_info *rulei,
  17. struct tcf_exts *exts,
  18. struct netlink_ext_ack *extack)
  19. {
  20. const struct tc_action *a;
  21. int err, i;
  22. if (!tcf_exts_has_actions(exts))
  23. return 0;
  24. /* Count action is inserted first */
  25. err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
  26. if (err)
  27. return err;
  28. tcf_exts_for_each_action(i, a, exts) {
  29. if (is_tcf_gact_ok(a)) {
  30. err = mlxsw_sp_acl_rulei_act_terminate(rulei);
  31. if (err) {
  32. NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
  33. return err;
  34. }
  35. } else if (is_tcf_gact_shot(a)) {
  36. err = mlxsw_sp_acl_rulei_act_drop(rulei);
  37. if (err) {
  38. NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
  39. return err;
  40. }
  41. } else if (is_tcf_gact_trap(a)) {
  42. err = mlxsw_sp_acl_rulei_act_trap(rulei);
  43. if (err) {
  44. NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
  45. return err;
  46. }
  47. } else if (is_tcf_gact_goto_chain(a)) {
  48. u32 chain_index = tcf_gact_goto_chain_index(a);
  49. struct mlxsw_sp_acl_ruleset *ruleset;
  50. u16 group_id;
  51. ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
  52. chain_index,
  53. MLXSW_SP_ACL_PROFILE_FLOWER);
  54. if (IS_ERR(ruleset))
  55. return PTR_ERR(ruleset);
  56. group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
  57. err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
  58. if (err) {
  59. NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
  60. return err;
  61. }
  62. } else if (is_tcf_mirred_egress_redirect(a)) {
  63. struct net_device *out_dev;
  64. struct mlxsw_sp_fid *fid;
  65. u16 fid_index;
  66. fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
  67. fid_index = mlxsw_sp_fid_index(fid);
  68. err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
  69. fid_index, extack);
  70. if (err)
  71. return err;
  72. out_dev = tcf_mirred_dev(a);
  73. err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
  74. out_dev, extack);
  75. if (err)
  76. return err;
  77. } else if (is_tcf_mirred_egress_mirror(a)) {
  78. struct net_device *out_dev = tcf_mirred_dev(a);
  79. err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
  80. block, out_dev,
  81. extack);
  82. if (err)
  83. return err;
  84. } else if (is_tcf_vlan(a)) {
  85. u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
  86. u32 action = tcf_vlan_action(a);
  87. u8 prio = tcf_vlan_push_prio(a);
  88. u16 vid = tcf_vlan_push_vid(a);
  89. err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
  90. action, vid,
  91. proto, prio, extack);
  92. if (err)
  93. return err;
  94. } else {
  95. NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
  96. dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
  97. return -EOPNOTSUPP;
  98. }
  99. }
  100. return 0;
  101. }
  102. static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
  103. struct tc_cls_flower_offload *f)
  104. {
  105. struct flow_dissector_key_ipv4_addrs *key =
  106. skb_flow_dissector_target(f->dissector,
  107. FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  108. f->key);
  109. struct flow_dissector_key_ipv4_addrs *mask =
  110. skb_flow_dissector_target(f->dissector,
  111. FLOW_DISSECTOR_KEY_IPV4_ADDRS,
  112. f->mask);
  113. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
  114. (char *) &key->src,
  115. (char *) &mask->src, 4);
  116. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
  117. (char *) &key->dst,
  118. (char *) &mask->dst, 4);
  119. }
  120. static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
  121. struct tc_cls_flower_offload *f)
  122. {
  123. struct flow_dissector_key_ipv6_addrs *key =
  124. skb_flow_dissector_target(f->dissector,
  125. FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  126. f->key);
  127. struct flow_dissector_key_ipv6_addrs *mask =
  128. skb_flow_dissector_target(f->dissector,
  129. FLOW_DISSECTOR_KEY_IPV6_ADDRS,
  130. f->mask);
  131. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
  132. &key->src.s6_addr[0x0],
  133. &mask->src.s6_addr[0x0], 4);
  134. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
  135. &key->src.s6_addr[0x4],
  136. &mask->src.s6_addr[0x4], 4);
  137. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
  138. &key->src.s6_addr[0x8],
  139. &mask->src.s6_addr[0x8], 4);
  140. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
  141. &key->src.s6_addr[0xC],
  142. &mask->src.s6_addr[0xC], 4);
  143. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
  144. &key->dst.s6_addr[0x0],
  145. &mask->dst.s6_addr[0x0], 4);
  146. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
  147. &key->dst.s6_addr[0x4],
  148. &mask->dst.s6_addr[0x4], 4);
  149. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
  150. &key->dst.s6_addr[0x8],
  151. &mask->dst.s6_addr[0x8], 4);
  152. mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
  153. &key->dst.s6_addr[0xC],
  154. &mask->dst.s6_addr[0xC], 4);
  155. }
  156. static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
  157. struct mlxsw_sp_acl_rule_info *rulei,
  158. struct tc_cls_flower_offload *f,
  159. u8 ip_proto)
  160. {
  161. struct flow_dissector_key_ports *key, *mask;
  162. if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
  163. return 0;
  164. if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
  165. NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
  166. dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
  167. return -EINVAL;
  168. }
  169. key = skb_flow_dissector_target(f->dissector,
  170. FLOW_DISSECTOR_KEY_PORTS,
  171. f->key);
  172. mask = skb_flow_dissector_target(f->dissector,
  173. FLOW_DISSECTOR_KEY_PORTS,
  174. f->mask);
  175. mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
  176. ntohs(key->dst), ntohs(mask->dst));
  177. mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
  178. ntohs(key->src), ntohs(mask->src));
  179. return 0;
  180. }
  181. static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
  182. struct mlxsw_sp_acl_rule_info *rulei,
  183. struct tc_cls_flower_offload *f,
  184. u8 ip_proto)
  185. {
  186. struct flow_dissector_key_tcp *key, *mask;
  187. if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
  188. return 0;
  189. if (ip_proto != IPPROTO_TCP) {
  190. NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
  191. dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
  192. return -EINVAL;
  193. }
  194. key = skb_flow_dissector_target(f->dissector,
  195. FLOW_DISSECTOR_KEY_TCP,
  196. f->key);
  197. mask = skb_flow_dissector_target(f->dissector,
  198. FLOW_DISSECTOR_KEY_TCP,
  199. f->mask);
  200. mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
  201. ntohs(key->flags), ntohs(mask->flags));
  202. return 0;
  203. }
  204. static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
  205. struct mlxsw_sp_acl_rule_info *rulei,
  206. struct tc_cls_flower_offload *f,
  207. u16 n_proto)
  208. {
  209. struct flow_dissector_key_ip *key, *mask;
  210. if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP))
  211. return 0;
  212. if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
  213. NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
  214. dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
  215. return -EINVAL;
  216. }
  217. key = skb_flow_dissector_target(f->dissector,
  218. FLOW_DISSECTOR_KEY_IP,
  219. f->key);
  220. mask = skb_flow_dissector_target(f->dissector,
  221. FLOW_DISSECTOR_KEY_IP,
  222. f->mask);
  223. mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
  224. key->ttl, mask->ttl);
  225. mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
  226. key->tos & 0x3, mask->tos & 0x3);
  227. mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
  228. key->tos >> 6, mask->tos >> 6);
  229. return 0;
  230. }
  231. static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
  232. struct mlxsw_sp_acl_block *block,
  233. struct mlxsw_sp_acl_rule_info *rulei,
  234. struct tc_cls_flower_offload *f)
  235. {
  236. u16 n_proto_mask = 0;
  237. u16 n_proto_key = 0;
  238. u16 addr_type = 0;
  239. u8 ip_proto = 0;
  240. int err;
  241. if (f->dissector->used_keys &
  242. ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
  243. BIT(FLOW_DISSECTOR_KEY_BASIC) |
  244. BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
  245. BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
  246. BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
  247. BIT(FLOW_DISSECTOR_KEY_PORTS) |
  248. BIT(FLOW_DISSECTOR_KEY_TCP) |
  249. BIT(FLOW_DISSECTOR_KEY_IP) |
  250. BIT(FLOW_DISSECTOR_KEY_VLAN))) {
  251. dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
  252. NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
  253. return -EOPNOTSUPP;
  254. }
  255. mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
  256. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
  257. struct flow_dissector_key_control *key =
  258. skb_flow_dissector_target(f->dissector,
  259. FLOW_DISSECTOR_KEY_CONTROL,
  260. f->key);
  261. addr_type = key->addr_type;
  262. }
  263. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
  264. struct flow_dissector_key_basic *key =
  265. skb_flow_dissector_target(f->dissector,
  266. FLOW_DISSECTOR_KEY_BASIC,
  267. f->key);
  268. struct flow_dissector_key_basic *mask =
  269. skb_flow_dissector_target(f->dissector,
  270. FLOW_DISSECTOR_KEY_BASIC,
  271. f->mask);
  272. n_proto_key = ntohs(key->n_proto);
  273. n_proto_mask = ntohs(mask->n_proto);
  274. if (n_proto_key == ETH_P_ALL) {
  275. n_proto_key = 0;
  276. n_proto_mask = 0;
  277. }
  278. mlxsw_sp_acl_rulei_keymask_u32(rulei,
  279. MLXSW_AFK_ELEMENT_ETHERTYPE,
  280. n_proto_key, n_proto_mask);
  281. ip_proto = key->ip_proto;
  282. mlxsw_sp_acl_rulei_keymask_u32(rulei,
  283. MLXSW_AFK_ELEMENT_IP_PROTO,
  284. key->ip_proto, mask->ip_proto);
  285. }
  286. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  287. struct flow_dissector_key_eth_addrs *key =
  288. skb_flow_dissector_target(f->dissector,
  289. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  290. f->key);
  291. struct flow_dissector_key_eth_addrs *mask =
  292. skb_flow_dissector_target(f->dissector,
  293. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  294. f->mask);
  295. mlxsw_sp_acl_rulei_keymask_buf(rulei,
  296. MLXSW_AFK_ELEMENT_DMAC_32_47,
  297. key->dst, mask->dst, 2);
  298. mlxsw_sp_acl_rulei_keymask_buf(rulei,
  299. MLXSW_AFK_ELEMENT_DMAC_0_31,
  300. key->dst + 2, mask->dst + 2, 4);
  301. mlxsw_sp_acl_rulei_keymask_buf(rulei,
  302. MLXSW_AFK_ELEMENT_SMAC_32_47,
  303. key->src, mask->src, 2);
  304. mlxsw_sp_acl_rulei_keymask_buf(rulei,
  305. MLXSW_AFK_ELEMENT_SMAC_0_31,
  306. key->src + 2, mask->src + 2, 4);
  307. }
  308. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
  309. struct flow_dissector_key_vlan *key =
  310. skb_flow_dissector_target(f->dissector,
  311. FLOW_DISSECTOR_KEY_VLAN,
  312. f->key);
  313. struct flow_dissector_key_vlan *mask =
  314. skb_flow_dissector_target(f->dissector,
  315. FLOW_DISSECTOR_KEY_VLAN,
  316. f->mask);
  317. if (mlxsw_sp_acl_block_is_egress_bound(block)) {
  318. NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
  319. return -EOPNOTSUPP;
  320. }
  321. if (mask->vlan_id != 0)
  322. mlxsw_sp_acl_rulei_keymask_u32(rulei,
  323. MLXSW_AFK_ELEMENT_VID,
  324. key->vlan_id,
  325. mask->vlan_id);
  326. if (mask->vlan_priority != 0)
  327. mlxsw_sp_acl_rulei_keymask_u32(rulei,
  328. MLXSW_AFK_ELEMENT_PCP,
  329. key->vlan_priority,
  330. mask->vlan_priority);
  331. }
  332. if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
  333. mlxsw_sp_flower_parse_ipv4(rulei, f);
  334. if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
  335. mlxsw_sp_flower_parse_ipv6(rulei, f);
  336. err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
  337. if (err)
  338. return err;
  339. err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
  340. if (err)
  341. return err;
  342. err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
  343. if (err)
  344. return err;
  345. return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts,
  346. f->common.extack);
  347. }
  348. int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
  349. struct mlxsw_sp_acl_block *block,
  350. struct tc_cls_flower_offload *f)
  351. {
  352. struct mlxsw_sp_acl_rule_info *rulei;
  353. struct mlxsw_sp_acl_ruleset *ruleset;
  354. struct mlxsw_sp_acl_rule *rule;
  355. int err;
  356. ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
  357. f->common.chain_index,
  358. MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
  359. if (IS_ERR(ruleset))
  360. return PTR_ERR(ruleset);
  361. rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie,
  362. f->common.extack);
  363. if (IS_ERR(rule)) {
  364. err = PTR_ERR(rule);
  365. goto err_rule_create;
  366. }
  367. rulei = mlxsw_sp_acl_rule_rulei(rule);
  368. err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
  369. if (err)
  370. goto err_flower_parse;
  371. err = mlxsw_sp_acl_rulei_commit(rulei);
  372. if (err)
  373. goto err_rulei_commit;
  374. err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
  375. if (err)
  376. goto err_rule_add;
  377. mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
  378. return 0;
  379. err_rule_add:
  380. err_rulei_commit:
  381. err_flower_parse:
  382. mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
  383. err_rule_create:
  384. mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
  385. return err;
  386. }
  387. void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
  388. struct mlxsw_sp_acl_block *block,
  389. struct tc_cls_flower_offload *f)
  390. {
  391. struct mlxsw_sp_acl_ruleset *ruleset;
  392. struct mlxsw_sp_acl_rule *rule;
  393. ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
  394. f->common.chain_index,
  395. MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
  396. if (IS_ERR(ruleset))
  397. return;
  398. rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
  399. if (rule) {
  400. mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
  401. mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
  402. }
  403. mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
  404. }
  405. int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
  406. struct mlxsw_sp_acl_block *block,
  407. struct tc_cls_flower_offload *f)
  408. {
  409. struct mlxsw_sp_acl_ruleset *ruleset;
  410. struct mlxsw_sp_acl_rule *rule;
  411. u64 packets;
  412. u64 lastuse;
  413. u64 bytes;
  414. int err;
  415. ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
  416. f->common.chain_index,
  417. MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
  418. if (WARN_ON(IS_ERR(ruleset)))
  419. return -EINVAL;
  420. rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
  421. if (!rule)
  422. return -EINVAL;
  423. err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
  424. &lastuse);
  425. if (err)
  426. goto err_rule_get_stats;
  427. tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
  428. mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
  429. return 0;
  430. err_rule_get_stats:
  431. mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
  432. return err;
  433. }
  434. int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
  435. struct mlxsw_sp_acl_block *block,
  436. struct tc_cls_flower_offload *f)
  437. {
  438. struct mlxsw_sp_acl_ruleset *ruleset;
  439. struct mlxsw_sp_acl_rule_info rulei;
  440. int err;
  441. memset(&rulei, 0, sizeof(rulei));
  442. err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
  443. if (err)
  444. return err;
  445. ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
  446. f->common.chain_index,
  447. MLXSW_SP_ACL_PROFILE_FLOWER,
  448. &rulei.values.elusage);
  449. /* keep the reference to the ruleset */
  450. return PTR_ERR_OR_ZERO(ruleset);
  451. }
  452. void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
  453. struct mlxsw_sp_acl_block *block,
  454. struct tc_cls_flower_offload *f)
  455. {
  456. struct mlxsw_sp_acl_ruleset *ruleset;
  457. ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
  458. f->common.chain_index,
  459. MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
  460. if (IS_ERR(ruleset))
  461. return;
  462. /* put the reference to the ruleset kept in create */
  463. mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
  464. mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
  465. }