bnxt_tc.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2017 Broadcom Limited
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/netdevice.h>
  10. #include <linux/inetdevice.h>
  11. #include <linux/if_vlan.h>
  12. #include <net/flow_dissector.h>
  13. #include <net/pkt_cls.h>
  14. #include <net/tc_act/tc_gact.h>
  15. #include <net/tc_act/tc_skbedit.h>
  16. #include <net/tc_act/tc_mirred.h>
  17. #include <net/tc_act/tc_vlan.h>
  18. #include <net/tc_act/tc_tunnel_key.h>
  19. #include "bnxt_hsi.h"
  20. #include "bnxt.h"
  21. #include "bnxt_sriov.h"
  22. #include "bnxt_tc.h"
  23. #include "bnxt_vfr.h"
  24. #define BNXT_FID_INVALID 0xffff
  25. #define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
  26. #define is_vlan_pcp_wildcarded(vlan_tci_mask) \
  27. ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
  28. #define is_vlan_pcp_exactmatch(vlan_tci_mask) \
  29. ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
  30. #define is_vlan_pcp_zero(vlan_tci) \
  31. ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
  32. #define is_vid_exactmatch(vlan_tci_mask) \
  33. ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
  34. /* Return the dst fid of the func for flow forwarding
  35. * For PFs: src_fid is the fid of the PF
  36. * For VF-reps: src_fid the fid of the VF
  37. */
  38. static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
  39. {
  40. struct bnxt *bp;
  41. /* check if dev belongs to the same switch */
  42. if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
  43. netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
  44. dev->ifindex);
  45. return BNXT_FID_INVALID;
  46. }
  47. /* Is dev a VF-rep? */
  48. if (bnxt_dev_is_vf_rep(dev))
  49. return bnxt_vf_rep_get_fid(dev);
  50. bp = netdev_priv(dev);
  51. return bp->pf.fw_fid;
  52. }
  53. static int bnxt_tc_parse_redir(struct bnxt *bp,
  54. struct bnxt_tc_actions *actions,
  55. const struct tc_action *tc_act)
  56. {
  57. struct net_device *dev = tcf_mirred_dev(tc_act);
  58. if (!dev) {
  59. netdev_info(bp->dev, "no dev in mirred action");
  60. return -EINVAL;
  61. }
  62. actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
  63. actions->dst_dev = dev;
  64. return 0;
  65. }
  66. static int bnxt_tc_parse_vlan(struct bnxt *bp,
  67. struct bnxt_tc_actions *actions,
  68. const struct tc_action *tc_act)
  69. {
  70. switch (tcf_vlan_action(tc_act)) {
  71. case TCA_VLAN_ACT_POP:
  72. actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
  73. break;
  74. case TCA_VLAN_ACT_PUSH:
  75. actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
  76. actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
  77. actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
  78. break;
  79. default:
  80. return -EOPNOTSUPP;
  81. }
  82. return 0;
  83. }
  84. static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
  85. struct bnxt_tc_actions *actions,
  86. const struct tc_action *tc_act)
  87. {
  88. struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
  89. struct ip_tunnel_key *tun_key = &tun_info->key;
  90. if (ip_tunnel_info_af(tun_info) != AF_INET) {
  91. netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
  92. return -EOPNOTSUPP;
  93. }
  94. actions->tun_encap_key = *tun_key;
  95. actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
  96. return 0;
  97. }
  98. static int bnxt_tc_parse_actions(struct bnxt *bp,
  99. struct bnxt_tc_actions *actions,
  100. struct tcf_exts *tc_exts)
  101. {
  102. const struct tc_action *tc_act;
  103. int i, rc;
  104. if (!tcf_exts_has_actions(tc_exts)) {
  105. netdev_info(bp->dev, "no actions");
  106. return -EINVAL;
  107. }
  108. tcf_exts_for_each_action(i, tc_act, tc_exts) {
  109. /* Drop action */
  110. if (is_tcf_gact_shot(tc_act)) {
  111. actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
  112. return 0; /* don't bother with other actions */
  113. }
  114. /* Redirect action */
  115. if (is_tcf_mirred_egress_redirect(tc_act)) {
  116. rc = bnxt_tc_parse_redir(bp, actions, tc_act);
  117. if (rc)
  118. return rc;
  119. continue;
  120. }
  121. /* Push/pop VLAN */
  122. if (is_tcf_vlan(tc_act)) {
  123. rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
  124. if (rc)
  125. return rc;
  126. continue;
  127. }
  128. /* Tunnel encap */
  129. if (is_tcf_tunnel_set(tc_act)) {
  130. rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
  131. if (rc)
  132. return rc;
  133. continue;
  134. }
  135. /* Tunnel decap */
  136. if (is_tcf_tunnel_release(tc_act)) {
  137. actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
  138. continue;
  139. }
  140. }
  141. if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
  142. if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
  143. /* dst_fid is PF's fid */
  144. actions->dst_fid = bp->pf.fw_fid;
  145. } else {
  146. /* find the FID from dst_dev */
  147. actions->dst_fid =
  148. bnxt_flow_get_dst_fid(bp, actions->dst_dev);
  149. if (actions->dst_fid == BNXT_FID_INVALID)
  150. return -EINVAL;
  151. }
  152. }
  153. return 0;
  154. }
  155. #define GET_KEY(flow_cmd, key_type) \
  156. skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
  157. (flow_cmd)->key)
  158. #define GET_MASK(flow_cmd, key_type) \
  159. skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
  160. (flow_cmd)->mask)
  161. static int bnxt_tc_parse_flow(struct bnxt *bp,
  162. struct tc_cls_flower_offload *tc_flow_cmd,
  163. struct bnxt_tc_flow *flow)
  164. {
  165. struct flow_dissector *dissector = tc_flow_cmd->dissector;
  166. u16 addr_type = 0;
  167. /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
  168. if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
  169. (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
  170. netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
  171. dissector->used_keys);
  172. return -EOPNOTSUPP;
  173. }
  174. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
  175. struct flow_dissector_key_control *key =
  176. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);
  177. addr_type = key->addr_type;
  178. }
  179. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
  180. struct flow_dissector_key_basic *key =
  181. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
  182. struct flow_dissector_key_basic *mask =
  183. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
  184. flow->l2_key.ether_type = key->n_proto;
  185. flow->l2_mask.ether_type = mask->n_proto;
  186. if (key->n_proto == htons(ETH_P_IP) ||
  187. key->n_proto == htons(ETH_P_IPV6)) {
  188. flow->l4_key.ip_proto = key->ip_proto;
  189. flow->l4_mask.ip_proto = mask->ip_proto;
  190. }
  191. }
  192. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  193. struct flow_dissector_key_eth_addrs *key =
  194. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
  195. struct flow_dissector_key_eth_addrs *mask =
  196. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
  197. flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
  198. ether_addr_copy(flow->l2_key.dmac, key->dst);
  199. ether_addr_copy(flow->l2_mask.dmac, mask->dst);
  200. ether_addr_copy(flow->l2_key.smac, key->src);
  201. ether_addr_copy(flow->l2_mask.smac, mask->src);
  202. }
  203. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
  204. struct flow_dissector_key_vlan *key =
  205. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
  206. struct flow_dissector_key_vlan *mask =
  207. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
  208. flow->l2_key.inner_vlan_tci =
  209. cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
  210. flow->l2_mask.inner_vlan_tci =
  211. cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
  212. flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
  213. flow->l2_mask.inner_vlan_tpid = htons(0xffff);
  214. flow->l2_key.num_vlans = 1;
  215. }
  216. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
  217. struct flow_dissector_key_ipv4_addrs *key =
  218. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
  219. struct flow_dissector_key_ipv4_addrs *mask =
  220. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
  221. flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
  222. flow->l3_key.ipv4.daddr.s_addr = key->dst;
  223. flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
  224. flow->l3_key.ipv4.saddr.s_addr = key->src;
  225. flow->l3_mask.ipv4.saddr.s_addr = mask->src;
  226. } else if (dissector_uses_key(dissector,
  227. FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
  228. struct flow_dissector_key_ipv6_addrs *key =
  229. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
  230. struct flow_dissector_key_ipv6_addrs *mask =
  231. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
  232. flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
  233. flow->l3_key.ipv6.daddr = key->dst;
  234. flow->l3_mask.ipv6.daddr = mask->dst;
  235. flow->l3_key.ipv6.saddr = key->src;
  236. flow->l3_mask.ipv6.saddr = mask->src;
  237. }
  238. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
  239. struct flow_dissector_key_ports *key =
  240. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
  241. struct flow_dissector_key_ports *mask =
  242. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
  243. flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
  244. flow->l4_key.ports.dport = key->dst;
  245. flow->l4_mask.ports.dport = mask->dst;
  246. flow->l4_key.ports.sport = key->src;
  247. flow->l4_mask.ports.sport = mask->src;
  248. }
  249. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
  250. struct flow_dissector_key_icmp *key =
  251. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
  252. struct flow_dissector_key_icmp *mask =
  253. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
  254. flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
  255. flow->l4_key.icmp.type = key->type;
  256. flow->l4_key.icmp.code = key->code;
  257. flow->l4_mask.icmp.type = mask->type;
  258. flow->l4_mask.icmp.code = mask->code;
  259. }
  260. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
  261. struct flow_dissector_key_control *key =
  262. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL);
  263. addr_type = key->addr_type;
  264. }
  265. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
  266. struct flow_dissector_key_ipv4_addrs *key =
  267. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
  268. struct flow_dissector_key_ipv4_addrs *mask =
  269. GET_MASK(tc_flow_cmd,
  270. FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
  271. flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
  272. flow->tun_key.u.ipv4.dst = key->dst;
  273. flow->tun_mask.u.ipv4.dst = mask->dst;
  274. flow->tun_key.u.ipv4.src = key->src;
  275. flow->tun_mask.u.ipv4.src = mask->src;
  276. } else if (dissector_uses_key(dissector,
  277. FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
  278. return -EOPNOTSUPP;
  279. }
  280. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
  281. struct flow_dissector_key_keyid *key =
  282. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
  283. struct flow_dissector_key_keyid *mask =
  284. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
  285. flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
  286. flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
  287. flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
  288. }
  289. if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
  290. struct flow_dissector_key_ports *key =
  291. GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
  292. struct flow_dissector_key_ports *mask =
  293. GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
  294. flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
  295. flow->tun_key.tp_dst = key->dst;
  296. flow->tun_mask.tp_dst = mask->dst;
  297. flow->tun_key.tp_src = key->src;
  298. flow->tun_mask.tp_src = mask->src;
  299. }
  300. return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
  301. }
  302. static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
  303. {
  304. struct hwrm_cfa_flow_free_input req = { 0 };
  305. int rc;
  306. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
  307. req.flow_handle = flow_handle;
  308. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  309. if (rc)
  310. netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
  311. __func__, flow_handle, rc);
  312. if (rc)
  313. rc = -EIO;
  314. return rc;
  315. }
  316. static int ipv6_mask_len(struct in6_addr *mask)
  317. {
  318. int mask_len = 0, i;
  319. for (i = 0; i < 4; i++)
  320. mask_len += inet_mask_len(mask->s6_addr32[i]);
  321. return mask_len;
  322. }
  323. static bool is_wildcard(void *mask, int len)
  324. {
  325. const u8 *p = mask;
  326. int i;
  327. for (i = 0; i < len; i++) {
  328. if (p[i] != 0)
  329. return false;
  330. }
  331. return true;
  332. }
  333. static bool is_exactmatch(void *mask, int len)
  334. {
  335. const u8 *p = mask;
  336. int i;
  337. for (i = 0; i < len; i++)
  338. if (p[i] != 0xff)
  339. return false;
  340. return true;
  341. }
  342. static bool is_vlan_tci_allowed(__be16 vlan_tci_mask,
  343. __be16 vlan_tci)
  344. {
  345. /* VLAN priority must be either exactly zero or fully wildcarded and
  346. * VLAN id must be exact match.
  347. */
  348. if (is_vid_exactmatch(vlan_tci_mask) &&
  349. ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
  350. is_vlan_pcp_zero(vlan_tci)) ||
  351. is_vlan_pcp_wildcarded(vlan_tci_mask)))
  352. return true;
  353. return false;
  354. }
  355. static bool bits_set(void *key, int len)
  356. {
  357. const u8 *p = key;
  358. int i;
  359. for (i = 0; i < len; i++)
  360. if (p[i] != 0)
  361. return true;
  362. return false;
  363. }
  364. static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
  365. __le16 ref_flow_handle,
  366. __le32 tunnel_handle, __le16 *flow_handle)
  367. {
  368. struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  369. struct bnxt_tc_actions *actions = &flow->actions;
  370. struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
  371. struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
  372. struct hwrm_cfa_flow_alloc_input req = { 0 };
  373. u16 flow_flags = 0, action_flags = 0;
  374. int rc;
  375. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
  376. req.src_fid = cpu_to_le16(flow->src_fid);
  377. req.ref_flow_handle = ref_flow_handle;
  378. if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
  379. actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
  380. req.tunnel_handle = tunnel_handle;
  381. flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
  382. action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
  383. }
  384. req.ethertype = flow->l2_key.ether_type;
  385. req.ip_proto = flow->l4_key.ip_proto;
  386. if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
  387. memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
  388. memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
  389. }
  390. if (flow->l2_key.num_vlans > 0) {
  391. flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
  392. /* FW expects the inner_vlan_tci value to be set
  393. * in outer_vlan_tci when num_vlans is 1 (which is
  394. * always the case in TC.)
  395. */
  396. req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
  397. }
  398. /* If all IP and L4 fields are wildcarded then this is an L2 flow */
  399. if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
  400. is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
  401. flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
  402. } else {
  403. flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
  404. CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
  405. CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
  406. if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
  407. req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
  408. req.ip_dst_mask_len =
  409. inet_mask_len(l3_mask->ipv4.daddr.s_addr);
  410. req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
  411. req.ip_src_mask_len =
  412. inet_mask_len(l3_mask->ipv4.saddr.s_addr);
  413. } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
  414. memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
  415. sizeof(req.ip_dst));
  416. req.ip_dst_mask_len =
  417. ipv6_mask_len(&l3_mask->ipv6.daddr);
  418. memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
  419. sizeof(req.ip_src));
  420. req.ip_src_mask_len =
  421. ipv6_mask_len(&l3_mask->ipv6.saddr);
  422. }
  423. }
  424. if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
  425. req.l4_src_port = flow->l4_key.ports.sport;
  426. req.l4_src_port_mask = flow->l4_mask.ports.sport;
  427. req.l4_dst_port = flow->l4_key.ports.dport;
  428. req.l4_dst_port_mask = flow->l4_mask.ports.dport;
  429. } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
  430. /* l4 ports serve as type/code when ip_proto is ICMP */
  431. req.l4_src_port = htons(flow->l4_key.icmp.type);
  432. req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
  433. req.l4_dst_port = htons(flow->l4_key.icmp.code);
  434. req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
  435. }
  436. req.flags = cpu_to_le16(flow_flags);
  437. if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
  438. action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
  439. } else {
  440. if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
  441. action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
  442. req.dst_fid = cpu_to_le16(actions->dst_fid);
  443. }
  444. if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
  445. action_flags |=
  446. CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
  447. req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
  448. req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
  449. memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
  450. memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
  451. }
  452. if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
  453. action_flags |=
  454. CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
  455. /* Rewrite config with tpid = 0 implies vlan pop */
  456. req.l2_rewrite_vlan_tpid = 0;
  457. memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
  458. memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
  459. }
  460. }
  461. req.action_flags = cpu_to_le16(action_flags);
  462. mutex_lock(&bp->hwrm_cmd_lock);
  463. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  464. if (!rc)
  465. *flow_handle = resp->flow_handle;
  466. mutex_unlock(&bp->hwrm_cmd_lock);
  467. if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
  468. rc = -ENOSPC;
  469. else if (rc)
  470. rc = -EIO;
  471. return rc;
  472. }
  473. static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
  474. struct bnxt_tc_flow *flow,
  475. struct bnxt_tc_l2_key *l2_info,
  476. __le32 ref_decap_handle,
  477. __le32 *decap_filter_handle)
  478. {
  479. struct hwrm_cfa_decap_filter_alloc_output *resp =
  480. bp->hwrm_cmd_resp_addr;
  481. struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
  482. struct ip_tunnel_key *tun_key = &flow->tun_key;
  483. u32 enables = 0;
  484. int rc;
  485. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
  486. req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
  487. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
  488. CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
  489. req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
  490. req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
  491. if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
  492. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
  493. /* tunnel_id is wrongly defined in hsi defn. as __le32 */
  494. req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
  495. }
  496. if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
  497. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
  498. ether_addr_copy(req.dst_macaddr, l2_info->dmac);
  499. }
  500. if (l2_info->num_vlans) {
  501. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
  502. req.t_ivlan_vid = l2_info->inner_vlan_tci;
  503. }
  504. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
  505. req.ethertype = htons(ETH_P_IP);
  506. if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
  507. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
  508. CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
  509. CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
  510. req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
  511. req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
  512. req.src_ipaddr[0] = tun_key->u.ipv4.src;
  513. }
  514. if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
  515. enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
  516. req.dst_port = tun_key->tp_dst;
  517. }
  518. /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
  519. * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
  520. */
  521. req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
  522. req.enables = cpu_to_le32(enables);
  523. mutex_lock(&bp->hwrm_cmd_lock);
  524. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  525. if (!rc)
  526. *decap_filter_handle = resp->decap_filter_id;
  527. else
  528. netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
  529. mutex_unlock(&bp->hwrm_cmd_lock);
  530. if (rc)
  531. rc = -EIO;
  532. return rc;
  533. }
  534. static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
  535. __le32 decap_filter_handle)
  536. {
  537. struct hwrm_cfa_decap_filter_free_input req = { 0 };
  538. int rc;
  539. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
  540. req.decap_filter_id = decap_filter_handle;
  541. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  542. if (rc)
  543. netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
  544. if (rc)
  545. rc = -EIO;
  546. return rc;
  547. }
  548. static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
  549. struct ip_tunnel_key *encap_key,
  550. struct bnxt_tc_l2_key *l2_info,
  551. __le32 *encap_record_handle)
  552. {
  553. struct hwrm_cfa_encap_record_alloc_output *resp =
  554. bp->hwrm_cmd_resp_addr;
  555. struct hwrm_cfa_encap_record_alloc_input req = { 0 };
  556. struct hwrm_cfa_encap_data_vxlan *encap =
  557. (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
  558. struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
  559. (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
  560. int rc;
  561. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
  562. req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
  563. ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
  564. ether_addr_copy(encap->src_mac_addr, l2_info->smac);
  565. if (l2_info->num_vlans) {
  566. encap->num_vlan_tags = l2_info->num_vlans;
  567. encap->ovlan_tci = l2_info->inner_vlan_tci;
  568. encap->ovlan_tpid = l2_info->inner_vlan_tpid;
  569. }
  570. encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
  571. encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
  572. encap_ipv4->ttl = encap_key->ttl;
  573. encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
  574. encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
  575. encap_ipv4->protocol = IPPROTO_UDP;
  576. encap->dst_port = encap_key->tp_dst;
  577. encap->vni = tunnel_id_to_key32(encap_key->tun_id);
  578. mutex_lock(&bp->hwrm_cmd_lock);
  579. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  580. if (!rc)
  581. *encap_record_handle = resp->encap_record_id;
  582. else
  583. netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
  584. mutex_unlock(&bp->hwrm_cmd_lock);
  585. if (rc)
  586. rc = -EIO;
  587. return rc;
  588. }
  589. static int hwrm_cfa_encap_record_free(struct bnxt *bp,
  590. __le32 encap_record_handle)
  591. {
  592. struct hwrm_cfa_encap_record_free_input req = { 0 };
  593. int rc;
  594. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
  595. req.encap_record_id = encap_record_handle;
  596. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  597. if (rc)
  598. netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
  599. if (rc)
  600. rc = -EIO;
  601. return rc;
  602. }
  603. static int bnxt_tc_put_l2_node(struct bnxt *bp,
  604. struct bnxt_tc_flow_node *flow_node)
  605. {
  606. struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
  607. struct bnxt_tc_info *tc_info = bp->tc_info;
  608. int rc;
  609. /* remove flow_node from the L2 shared flow list */
  610. list_del(&flow_node->l2_list_node);
  611. if (--l2_node->refcount == 0) {
  612. rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
  613. tc_info->l2_ht_params);
  614. if (rc)
  615. netdev_err(bp->dev,
  616. "Error: %s: rhashtable_remove_fast: %d",
  617. __func__, rc);
  618. kfree_rcu(l2_node, rcu);
  619. }
  620. return 0;
  621. }
  622. static struct bnxt_tc_l2_node *
  623. bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
  624. struct rhashtable_params ht_params,
  625. struct bnxt_tc_l2_key *l2_key)
  626. {
  627. struct bnxt_tc_l2_node *l2_node;
  628. int rc;
  629. l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
  630. if (!l2_node) {
  631. l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
  632. if (!l2_node) {
  633. rc = -ENOMEM;
  634. return NULL;
  635. }
  636. l2_node->key = *l2_key;
  637. rc = rhashtable_insert_fast(l2_table, &l2_node->node,
  638. ht_params);
  639. if (rc) {
  640. kfree_rcu(l2_node, rcu);
  641. netdev_err(bp->dev,
  642. "Error: %s: rhashtable_insert_fast: %d",
  643. __func__, rc);
  644. return NULL;
  645. }
  646. INIT_LIST_HEAD(&l2_node->common_l2_flows);
  647. }
  648. return l2_node;
  649. }
  650. /* Get the ref_flow_handle for a flow by checking if there are any other
  651. * flows that share the same L2 key as this flow.
  652. */
  653. static int
  654. bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
  655. struct bnxt_tc_flow_node *flow_node,
  656. __le16 *ref_flow_handle)
  657. {
  658. struct bnxt_tc_info *tc_info = bp->tc_info;
  659. struct bnxt_tc_flow_node *ref_flow_node;
  660. struct bnxt_tc_l2_node *l2_node;
  661. l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
  662. tc_info->l2_ht_params,
  663. &flow->l2_key);
  664. if (!l2_node)
  665. return -1;
  666. /* If any other flow is using this l2_node, use it's flow_handle
  667. * as the ref_flow_handle
  668. */
  669. if (l2_node->refcount > 0) {
  670. ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
  671. struct bnxt_tc_flow_node,
  672. l2_list_node);
  673. *ref_flow_handle = ref_flow_node->flow_handle;
  674. } else {
  675. *ref_flow_handle = cpu_to_le16(0xffff);
  676. }
  677. /* Insert the l2_node into the flow_node so that subsequent flows
  678. * with a matching l2 key can use the flow_handle of this flow
  679. * as their ref_flow_handle
  680. */
  681. flow_node->l2_node = l2_node;
  682. list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
  683. l2_node->refcount++;
  684. return 0;
  685. }
  686. /* After the flow parsing is done, this routine is used for checking
  687. * if there are any aspects of the flow that prevent it from being
  688. * offloaded.
  689. */
  690. static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
  691. {
  692. /* If L4 ports are specified then ip_proto must be TCP or UDP */
  693. if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
  694. (flow->l4_key.ip_proto != IPPROTO_TCP &&
  695. flow->l4_key.ip_proto != IPPROTO_UDP)) {
  696. netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
  697. flow->l4_key.ip_proto);
  698. return false;
  699. }
  700. /* Currently source/dest MAC cannot be partial wildcard */
  701. if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
  702. !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
  703. netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n");
  704. return false;
  705. }
  706. if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
  707. !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
  708. netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n");
  709. return false;
  710. }
  711. /* Currently VLAN fields cannot be partial wildcard */
  712. if (bits_set(&flow->l2_key.inner_vlan_tci,
  713. sizeof(flow->l2_key.inner_vlan_tci)) &&
  714. !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
  715. flow->l2_key.inner_vlan_tci)) {
  716. netdev_info(bp->dev, "Unsupported VLAN TCI\n");
  717. return false;
  718. }
  719. if (bits_set(&flow->l2_key.inner_vlan_tpid,
  720. sizeof(flow->l2_key.inner_vlan_tpid)) &&
  721. !is_exactmatch(&flow->l2_mask.inner_vlan_tpid,
  722. sizeof(flow->l2_mask.inner_vlan_tpid))) {
  723. netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n");
  724. return false;
  725. }
  726. /* Currently Ethertype must be set */
  727. if (!is_exactmatch(&flow->l2_mask.ether_type,
  728. sizeof(flow->l2_mask.ether_type))) {
  729. netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n");
  730. return false;
  731. }
  732. return true;
  733. }
  734. /* Returns the final refcount of the node on success
  735. * or a -ve error code on failure
  736. */
  737. static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
  738. struct rhashtable *tunnel_table,
  739. struct rhashtable_params *ht_params,
  740. struct bnxt_tc_tunnel_node *tunnel_node)
  741. {
  742. int rc;
  743. if (--tunnel_node->refcount == 0) {
  744. rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
  745. *ht_params);
  746. if (rc) {
  747. netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
  748. rc = -1;
  749. }
  750. kfree_rcu(tunnel_node, rcu);
  751. return rc;
  752. } else {
  753. return tunnel_node->refcount;
  754. }
  755. }
  756. /* Get (or add) either encap or decap tunnel node from/to the supplied
  757. * hash table.
  758. */
  759. static struct bnxt_tc_tunnel_node *
  760. bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
  761. struct rhashtable_params *ht_params,
  762. struct ip_tunnel_key *tun_key)
  763. {
  764. struct bnxt_tc_tunnel_node *tunnel_node;
  765. int rc;
  766. tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
  767. if (!tunnel_node) {
  768. tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
  769. if (!tunnel_node) {
  770. rc = -ENOMEM;
  771. goto err;
  772. }
  773. tunnel_node->key = *tun_key;
  774. tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
  775. rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
  776. *ht_params);
  777. if (rc) {
  778. kfree_rcu(tunnel_node, rcu);
  779. goto err;
  780. }
  781. }
  782. tunnel_node->refcount++;
  783. return tunnel_node;
  784. err:
  785. netdev_info(bp->dev, "error rc=%d", rc);
  786. return NULL;
  787. }
  788. static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
  789. struct bnxt_tc_flow *flow,
  790. struct bnxt_tc_l2_key *l2_key,
  791. struct bnxt_tc_flow_node *flow_node,
  792. __le32 *ref_decap_handle)
  793. {
  794. struct bnxt_tc_info *tc_info = bp->tc_info;
  795. struct bnxt_tc_flow_node *ref_flow_node;
  796. struct bnxt_tc_l2_node *decap_l2_node;
  797. decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
  798. tc_info->decap_l2_ht_params,
  799. l2_key);
  800. if (!decap_l2_node)
  801. return -1;
  802. /* If any other flow is using this decap_l2_node, use it's decap_handle
  803. * as the ref_decap_handle
  804. */
  805. if (decap_l2_node->refcount > 0) {
  806. ref_flow_node =
  807. list_first_entry(&decap_l2_node->common_l2_flows,
  808. struct bnxt_tc_flow_node,
  809. decap_l2_list_node);
  810. *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
  811. } else {
  812. *ref_decap_handle = INVALID_TUNNEL_HANDLE;
  813. }
  814. /* Insert the l2_node into the flow_node so that subsequent flows
  815. * with a matching decap l2 key can use the decap_filter_handle of
  816. * this flow as their ref_decap_handle
  817. */
  818. flow_node->decap_l2_node = decap_l2_node;
  819. list_add(&flow_node->decap_l2_list_node,
  820. &decap_l2_node->common_l2_flows);
  821. decap_l2_node->refcount++;
  822. return 0;
  823. }
  824. static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
  825. struct bnxt_tc_flow_node *flow_node)
  826. {
  827. struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
  828. struct bnxt_tc_info *tc_info = bp->tc_info;
  829. int rc;
  830. /* remove flow_node from the decap L2 sharing flow list */
  831. list_del(&flow_node->decap_l2_list_node);
  832. if (--decap_l2_node->refcount == 0) {
  833. rc = rhashtable_remove_fast(&tc_info->decap_l2_table,
  834. &decap_l2_node->node,
  835. tc_info->decap_l2_ht_params);
  836. if (rc)
  837. netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
  838. kfree_rcu(decap_l2_node, rcu);
  839. }
  840. }
  841. static void bnxt_tc_put_decap_handle(struct bnxt *bp,
  842. struct bnxt_tc_flow_node *flow_node)
  843. {
  844. __le32 decap_handle = flow_node->decap_node->tunnel_handle;
  845. struct bnxt_tc_info *tc_info = bp->tc_info;
  846. int rc;
  847. if (flow_node->decap_l2_node)
  848. bnxt_tc_put_decap_l2_node(bp, flow_node);
  849. rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
  850. &tc_info->decap_ht_params,
  851. flow_node->decap_node);
  852. if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
  853. hwrm_cfa_decap_filter_free(bp, decap_handle);
  854. }
  855. static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
  856. struct ip_tunnel_key *tun_key,
  857. struct bnxt_tc_l2_key *l2_info)
  858. {
  859. #ifdef CONFIG_INET
  860. struct net_device *real_dst_dev = bp->dev;
  861. struct flowi4 flow = { {0} };
  862. struct net_device *dst_dev;
  863. struct neighbour *nbr;
  864. struct rtable *rt;
  865. int rc;
  866. flow.flowi4_proto = IPPROTO_UDP;
  867. flow.fl4_dport = tun_key->tp_dst;
  868. flow.daddr = tun_key->u.ipv4.dst;
  869. rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
  870. if (IS_ERR(rt)) {
  871. netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
  872. return -EOPNOTSUPP;
  873. }
  874. /* The route must either point to the real_dst_dev or a dst_dev that
  875. * uses the real_dst_dev.
  876. */
  877. dst_dev = rt->dst.dev;
  878. if (is_vlan_dev(dst_dev)) {
  879. #if IS_ENABLED(CONFIG_VLAN_8021Q)
  880. struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
  881. if (vlan->real_dev != real_dst_dev) {
  882. netdev_info(bp->dev,
  883. "dst_dev(%s) doesn't use PF-if(%s)",
  884. netdev_name(dst_dev),
  885. netdev_name(real_dst_dev));
  886. rc = -EOPNOTSUPP;
  887. goto put_rt;
  888. }
  889. l2_info->inner_vlan_tci = htons(vlan->vlan_id);
  890. l2_info->inner_vlan_tpid = vlan->vlan_proto;
  891. l2_info->num_vlans = 1;
  892. #endif
  893. } else if (dst_dev != real_dst_dev) {
  894. netdev_info(bp->dev,
  895. "dst_dev(%s) for %pI4b is not PF-if(%s)",
  896. netdev_name(dst_dev), &flow.daddr,
  897. netdev_name(real_dst_dev));
  898. rc = -EOPNOTSUPP;
  899. goto put_rt;
  900. }
  901. nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
  902. if (!nbr) {
  903. netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
  904. &flow.daddr);
  905. rc = -EOPNOTSUPP;
  906. goto put_rt;
  907. }
  908. tun_key->u.ipv4.src = flow.saddr;
  909. tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
  910. neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
  911. ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
  912. neigh_release(nbr);
  913. ip_rt_put(rt);
  914. return 0;
  915. put_rt:
  916. ip_rt_put(rt);
  917. return rc;
  918. #else
  919. return -EOPNOTSUPP;
  920. #endif
  921. }
  922. static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
  923. struct bnxt_tc_flow_node *flow_node,
  924. __le32 *decap_filter_handle)
  925. {
  926. struct ip_tunnel_key *decap_key = &flow->tun_key;
  927. struct bnxt_tc_info *tc_info = bp->tc_info;
  928. struct bnxt_tc_l2_key l2_info = { {0} };
  929. struct bnxt_tc_tunnel_node *decap_node;
  930. struct ip_tunnel_key tun_key = { 0 };
  931. struct bnxt_tc_l2_key *decap_l2_info;
  932. __le32 ref_decap_handle;
  933. int rc;
  934. /* Check if there's another flow using the same tunnel decap.
  935. * If not, add this tunnel to the table and resolve the other
  936. * tunnel header fileds. Ignore src_port in the tunnel_key,
  937. * since it is not required for decap filters.
  938. */
  939. decap_key->tp_src = 0;
  940. decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
  941. &tc_info->decap_ht_params,
  942. decap_key);
  943. if (!decap_node)
  944. return -ENOMEM;
  945. flow_node->decap_node = decap_node;
  946. if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
  947. goto done;
  948. /* Resolve the L2 fields for tunnel decap
  949. * Resolve the route for remote vtep (saddr) of the decap key
  950. * Find it's next-hop mac addrs
  951. */
  952. tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
  953. tun_key.tp_dst = flow->tun_key.tp_dst;
  954. rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
  955. if (rc)
  956. goto put_decap;
  957. decap_l2_info = &decap_node->l2_info;
  958. /* decap smac is wildcarded */
  959. ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
  960. if (l2_info.num_vlans) {
  961. decap_l2_info->num_vlans = l2_info.num_vlans;
  962. decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
  963. decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
  964. }
  965. flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
  966. /* For getting a decap_filter_handle we first need to check if
  967. * there are any other decap flows that share the same tunnel L2
  968. * key and if so, pass that flow's decap_filter_handle as the
  969. * ref_decap_handle for this flow.
  970. */
  971. rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
  972. &ref_decap_handle);
  973. if (rc)
  974. goto put_decap;
  975. /* Issue the hwrm cmd to allocate a decap filter handle */
  976. rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
  977. ref_decap_handle,
  978. &decap_node->tunnel_handle);
  979. if (rc)
  980. goto put_decap_l2;
  981. done:
  982. *decap_filter_handle = decap_node->tunnel_handle;
  983. return 0;
  984. put_decap_l2:
  985. bnxt_tc_put_decap_l2_node(bp, flow_node);
  986. put_decap:
  987. bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
  988. &tc_info->decap_ht_params,
  989. flow_node->decap_node);
  990. return rc;
  991. }
  992. static void bnxt_tc_put_encap_handle(struct bnxt *bp,
  993. struct bnxt_tc_tunnel_node *encap_node)
  994. {
  995. __le32 encap_handle = encap_node->tunnel_handle;
  996. struct bnxt_tc_info *tc_info = bp->tc_info;
  997. int rc;
  998. rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
  999. &tc_info->encap_ht_params, encap_node);
  1000. if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
  1001. hwrm_cfa_encap_record_free(bp, encap_handle);
  1002. }
  1003. /* Lookup the tunnel encap table and check if there's an encap_handle
  1004. * alloc'd already.
  1005. * If not, query L2 info via a route lookup and issue an encap_record_alloc
  1006. * cmd to FW.
  1007. */
  1008. static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
  1009. struct bnxt_tc_flow_node *flow_node,
  1010. __le32 *encap_handle)
  1011. {
  1012. struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
  1013. struct bnxt_tc_info *tc_info = bp->tc_info;
  1014. struct bnxt_tc_tunnel_node *encap_node;
  1015. int rc;
  1016. /* Check if there's another flow using the same tunnel encap.
  1017. * If not, add this tunnel to the table and resolve the other
  1018. * tunnel header fileds
  1019. */
  1020. encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
  1021. &tc_info->encap_ht_params,
  1022. encap_key);
  1023. if (!encap_node)
  1024. return -ENOMEM;
  1025. flow_node->encap_node = encap_node;
  1026. if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
  1027. goto done;
  1028. rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
  1029. if (rc)
  1030. goto put_encap;
  1031. /* Allocate a new tunnel encap record */
  1032. rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
  1033. &encap_node->tunnel_handle);
  1034. if (rc)
  1035. goto put_encap;
  1036. done:
  1037. *encap_handle = encap_node->tunnel_handle;
  1038. return 0;
  1039. put_encap:
  1040. bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
  1041. &tc_info->encap_ht_params, encap_node);
  1042. return rc;
  1043. }
  1044. static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
  1045. struct bnxt_tc_flow *flow,
  1046. struct bnxt_tc_flow_node *flow_node)
  1047. {
  1048. if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
  1049. bnxt_tc_put_decap_handle(bp, flow_node);
  1050. else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
  1051. bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
  1052. }
  1053. static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
  1054. struct bnxt_tc_flow *flow,
  1055. struct bnxt_tc_flow_node *flow_node,
  1056. __le32 *tunnel_handle)
  1057. {
  1058. if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
  1059. return bnxt_tc_get_decap_handle(bp, flow, flow_node,
  1060. tunnel_handle);
  1061. else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
  1062. return bnxt_tc_get_encap_handle(bp, flow, flow_node,
  1063. tunnel_handle);
  1064. else
  1065. return 0;
  1066. }
  1067. static int __bnxt_tc_del_flow(struct bnxt *bp,
  1068. struct bnxt_tc_flow_node *flow_node)
  1069. {
  1070. struct bnxt_tc_info *tc_info = bp->tc_info;
  1071. int rc;
  1072. /* send HWRM cmd to free the flow-id */
  1073. bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
  1074. mutex_lock(&tc_info->lock);
  1075. /* release references to any tunnel encap/decap nodes */
  1076. bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
  1077. /* release reference to l2 node */
  1078. bnxt_tc_put_l2_node(bp, flow_node);
  1079. mutex_unlock(&tc_info->lock);
  1080. rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
  1081. tc_info->flow_ht_params);
  1082. if (rc)
  1083. netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
  1084. __func__, rc);
  1085. kfree_rcu(flow_node, rcu);
  1086. return 0;
  1087. }
  1088. static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
  1089. u16 src_fid)
  1090. {
  1091. if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
  1092. flow->src_fid = bp->pf.fw_fid;
  1093. else
  1094. flow->src_fid = src_fid;
  1095. }
  1096. /* Add a new flow or replace an existing flow.
  1097. * Notes on locking:
  1098. * There are essentially two critical sections here.
  1099. * 1. while adding a new flow
  1100. * a) lookup l2-key
  1101. * b) issue HWRM cmd and get flow_handle
  1102. * c) link l2-key with flow
  1103. * 2. while deleting a flow
  1104. * a) unlinking l2-key from flow
  1105. * A lock is needed to protect these two critical sections.
  1106. *
  1107. * The hash-tables are already protected by the rhashtable API.
  1108. */
  1109. static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
  1110. struct tc_cls_flower_offload *tc_flow_cmd)
  1111. {
  1112. struct bnxt_tc_flow_node *new_node, *old_node;
  1113. struct bnxt_tc_info *tc_info = bp->tc_info;
  1114. struct bnxt_tc_flow *flow;
  1115. __le32 tunnel_handle = 0;
  1116. __le16 ref_flow_handle;
  1117. int rc;
  1118. /* allocate memory for the new flow and it's node */
  1119. new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
  1120. if (!new_node) {
  1121. rc = -ENOMEM;
  1122. goto done;
  1123. }
  1124. new_node->cookie = tc_flow_cmd->cookie;
  1125. flow = &new_node->flow;
  1126. rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
  1127. if (rc)
  1128. goto free_node;
  1129. bnxt_tc_set_src_fid(bp, flow, src_fid);
  1130. if (!bnxt_tc_can_offload(bp, flow)) {
  1131. rc = -ENOSPC;
  1132. goto free_node;
  1133. }
  1134. /* If a flow exists with the same cookie, delete it */
  1135. old_node = rhashtable_lookup_fast(&tc_info->flow_table,
  1136. &tc_flow_cmd->cookie,
  1137. tc_info->flow_ht_params);
  1138. if (old_node)
  1139. __bnxt_tc_del_flow(bp, old_node);
  1140. /* Check if the L2 part of the flow has been offloaded already.
  1141. * If so, bump up it's refcnt and get it's reference handle.
  1142. */
  1143. mutex_lock(&tc_info->lock);
  1144. rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
  1145. if (rc)
  1146. goto unlock;
  1147. /* If the flow involves tunnel encap/decap, get tunnel_handle */
  1148. rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
  1149. if (rc)
  1150. goto put_l2;
  1151. /* send HWRM cmd to alloc the flow */
  1152. rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
  1153. tunnel_handle, &new_node->flow_handle);
  1154. if (rc)
  1155. goto put_tunnel;
  1156. flow->lastused = jiffies;
  1157. spin_lock_init(&flow->stats_lock);
  1158. /* add new flow to flow-table */
  1159. rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
  1160. tc_info->flow_ht_params);
  1161. if (rc)
  1162. goto hwrm_flow_free;
  1163. mutex_unlock(&tc_info->lock);
  1164. return 0;
  1165. hwrm_flow_free:
  1166. bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
  1167. put_tunnel:
  1168. bnxt_tc_put_tunnel_handle(bp, flow, new_node);
  1169. put_l2:
  1170. bnxt_tc_put_l2_node(bp, new_node);
  1171. unlock:
  1172. mutex_unlock(&tc_info->lock);
  1173. free_node:
  1174. kfree_rcu(new_node, rcu);
  1175. done:
  1176. netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
  1177. __func__, tc_flow_cmd->cookie, rc);
  1178. return rc;
  1179. }
  1180. static int bnxt_tc_del_flow(struct bnxt *bp,
  1181. struct tc_cls_flower_offload *tc_flow_cmd)
  1182. {
  1183. struct bnxt_tc_info *tc_info = bp->tc_info;
  1184. struct bnxt_tc_flow_node *flow_node;
  1185. flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
  1186. &tc_flow_cmd->cookie,
  1187. tc_info->flow_ht_params);
  1188. if (!flow_node)
  1189. return -EINVAL;
  1190. return __bnxt_tc_del_flow(bp, flow_node);
  1191. }
  1192. static int bnxt_tc_get_flow_stats(struct bnxt *bp,
  1193. struct tc_cls_flower_offload *tc_flow_cmd)
  1194. {
  1195. struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
  1196. struct bnxt_tc_info *tc_info = bp->tc_info;
  1197. struct bnxt_tc_flow_node *flow_node;
  1198. struct bnxt_tc_flow *flow;
  1199. unsigned long lastused;
  1200. flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
  1201. &tc_flow_cmd->cookie,
  1202. tc_info->flow_ht_params);
  1203. if (!flow_node)
  1204. return -1;
  1205. flow = &flow_node->flow;
  1206. curr_stats = &flow->stats;
  1207. prev_stats = &flow->prev_stats;
  1208. spin_lock(&flow->stats_lock);
  1209. stats.packets = curr_stats->packets - prev_stats->packets;
  1210. stats.bytes = curr_stats->bytes - prev_stats->bytes;
  1211. *prev_stats = *curr_stats;
  1212. lastused = flow->lastused;
  1213. spin_unlock(&flow->stats_lock);
  1214. tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
  1215. lastused);
  1216. return 0;
  1217. }
  1218. static int
  1219. bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
  1220. struct bnxt_tc_stats_batch stats_batch[])
  1221. {
  1222. struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
  1223. struct hwrm_cfa_flow_stats_input req = { 0 };
  1224. __le16 *req_flow_handles = &req.flow_handle_0;
  1225. int rc, i;
  1226. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
  1227. req.num_flows = cpu_to_le16(num_flows);
  1228. for (i = 0; i < num_flows; i++) {
  1229. struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
  1230. req_flow_handles[i] = flow_node->flow_handle;
  1231. }
  1232. mutex_lock(&bp->hwrm_cmd_lock);
  1233. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  1234. if (!rc) {
  1235. __le64 *resp_packets = &resp->packet_0;
  1236. __le64 *resp_bytes = &resp->byte_0;
  1237. for (i = 0; i < num_flows; i++) {
  1238. stats_batch[i].hw_stats.packets =
  1239. le64_to_cpu(resp_packets[i]);
  1240. stats_batch[i].hw_stats.bytes =
  1241. le64_to_cpu(resp_bytes[i]);
  1242. }
  1243. } else {
  1244. netdev_info(bp->dev, "error rc=%d", rc);
  1245. }
  1246. mutex_unlock(&bp->hwrm_cmd_lock);
  1247. if (rc)
  1248. rc = -EIO;
  1249. return rc;
  1250. }
  1251. /* Add val to accum while handling a possible wraparound
  1252. * of val. Eventhough val is of type u64, its actual width
  1253. * is denoted by mask and will wrap-around beyond that width.
  1254. */
  1255. static void accumulate_val(u64 *accum, u64 val, u64 mask)
  1256. {
  1257. #define low_bits(x, mask) ((x) & (mask))
  1258. #define high_bits(x, mask) ((x) & ~(mask))
  1259. bool wrapped = val < low_bits(*accum, mask);
  1260. *accum = high_bits(*accum, mask) + val;
  1261. if (wrapped)
  1262. *accum += (mask + 1);
  1263. }
  1264. /* The HW counters' width is much less than 64bits.
  1265. * Handle possible wrap-around while updating the stat counters
  1266. */
  1267. static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
  1268. struct bnxt_tc_flow_stats *acc_stats,
  1269. struct bnxt_tc_flow_stats *hw_stats)
  1270. {
  1271. accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
  1272. accumulate_val(&acc_stats->packets, hw_stats->packets,
  1273. tc_info->packets_mask);
  1274. }
  1275. static int
  1276. bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
  1277. struct bnxt_tc_stats_batch stats_batch[])
  1278. {
  1279. struct bnxt_tc_info *tc_info = bp->tc_info;
  1280. int rc, i;
  1281. rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
  1282. if (rc)
  1283. return rc;
  1284. for (i = 0; i < num_flows; i++) {
  1285. struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
  1286. struct bnxt_tc_flow *flow = &flow_node->flow;
  1287. spin_lock(&flow->stats_lock);
  1288. bnxt_flow_stats_accum(tc_info, &flow->stats,
  1289. &stats_batch[i].hw_stats);
  1290. if (flow->stats.packets != flow->prev_stats.packets)
  1291. flow->lastused = jiffies;
  1292. spin_unlock(&flow->stats_lock);
  1293. }
  1294. return 0;
  1295. }
  1296. static int
  1297. bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
  1298. struct bnxt_tc_stats_batch stats_batch[],
  1299. int *num_flows)
  1300. {
  1301. struct bnxt_tc_info *tc_info = bp->tc_info;
  1302. struct rhashtable_iter *iter = &tc_info->iter;
  1303. void *flow_node;
  1304. int rc, i;
  1305. rhashtable_walk_start(iter);
  1306. rc = 0;
  1307. for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
  1308. flow_node = rhashtable_walk_next(iter);
  1309. if (IS_ERR(flow_node)) {
  1310. i = 0;
  1311. if (PTR_ERR(flow_node) == -EAGAIN) {
  1312. continue;
  1313. } else {
  1314. rc = PTR_ERR(flow_node);
  1315. goto done;
  1316. }
  1317. }
  1318. /* No more flows */
  1319. if (!flow_node)
  1320. goto done;
  1321. stats_batch[i].flow_node = flow_node;
  1322. }
  1323. done:
  1324. rhashtable_walk_stop(iter);
  1325. *num_flows = i;
  1326. return rc;
  1327. }
  1328. void bnxt_tc_flow_stats_work(struct bnxt *bp)
  1329. {
  1330. struct bnxt_tc_info *tc_info = bp->tc_info;
  1331. int num_flows, rc;
  1332. num_flows = atomic_read(&tc_info->flow_table.nelems);
  1333. if (!num_flows)
  1334. return;
  1335. rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
  1336. for (;;) {
  1337. rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
  1338. &num_flows);
  1339. if (rc) {
  1340. if (rc == -EAGAIN)
  1341. continue;
  1342. break;
  1343. }
  1344. if (!num_flows)
  1345. break;
  1346. bnxt_tc_flow_stats_batch_update(bp, num_flows,
  1347. tc_info->stats_batch);
  1348. }
  1349. rhashtable_walk_exit(&tc_info->iter);
  1350. }
  1351. int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
  1352. struct tc_cls_flower_offload *cls_flower)
  1353. {
  1354. switch (cls_flower->command) {
  1355. case TC_CLSFLOWER_REPLACE:
  1356. return bnxt_tc_add_flow(bp, src_fid, cls_flower);
  1357. case TC_CLSFLOWER_DESTROY:
  1358. return bnxt_tc_del_flow(bp, cls_flower);
  1359. case TC_CLSFLOWER_STATS:
  1360. return bnxt_tc_get_flow_stats(bp, cls_flower);
  1361. default:
  1362. return -EOPNOTSUPP;
  1363. }
  1364. }
  1365. static const struct rhashtable_params bnxt_tc_flow_ht_params = {
  1366. .head_offset = offsetof(struct bnxt_tc_flow_node, node),
  1367. .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
  1368. .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
  1369. .automatic_shrinking = true
  1370. };
  1371. static const struct rhashtable_params bnxt_tc_l2_ht_params = {
  1372. .head_offset = offsetof(struct bnxt_tc_l2_node, node),
  1373. .key_offset = offsetof(struct bnxt_tc_l2_node, key),
  1374. .key_len = BNXT_TC_L2_KEY_LEN,
  1375. .automatic_shrinking = true
  1376. };
  1377. static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
  1378. .head_offset = offsetof(struct bnxt_tc_l2_node, node),
  1379. .key_offset = offsetof(struct bnxt_tc_l2_node, key),
  1380. .key_len = BNXT_TC_L2_KEY_LEN,
  1381. .automatic_shrinking = true
  1382. };
  1383. static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
  1384. .head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
  1385. .key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
  1386. .key_len = sizeof(struct ip_tunnel_key),
  1387. .automatic_shrinking = true
  1388. };
  1389. /* convert counter width in bits to a mask */
  1390. #define mask(width) ((u64)~0 >> (64 - (width)))
  1391. int bnxt_init_tc(struct bnxt *bp)
  1392. {
  1393. struct bnxt_tc_info *tc_info;
  1394. int rc;
  1395. if (bp->hwrm_spec_code < 0x10803) {
  1396. netdev_warn(bp->dev,
  1397. "Firmware does not support TC flower offload.\n");
  1398. return -ENOTSUPP;
  1399. }
  1400. tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
  1401. if (!tc_info)
  1402. return -ENOMEM;
  1403. mutex_init(&tc_info->lock);
  1404. /* Counter widths are programmed by FW */
  1405. tc_info->bytes_mask = mask(36);
  1406. tc_info->packets_mask = mask(28);
  1407. tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
  1408. rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
  1409. if (rc)
  1410. goto free_tc_info;
  1411. tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
  1412. rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
  1413. if (rc)
  1414. goto destroy_flow_table;
  1415. tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
  1416. rc = rhashtable_init(&tc_info->decap_l2_table,
  1417. &tc_info->decap_l2_ht_params);
  1418. if (rc)
  1419. goto destroy_l2_table;
  1420. tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
  1421. rc = rhashtable_init(&tc_info->decap_table,
  1422. &tc_info->decap_ht_params);
  1423. if (rc)
  1424. goto destroy_decap_l2_table;
  1425. tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
  1426. rc = rhashtable_init(&tc_info->encap_table,
  1427. &tc_info->encap_ht_params);
  1428. if (rc)
  1429. goto destroy_decap_table;
  1430. tc_info->enabled = true;
  1431. bp->dev->hw_features |= NETIF_F_HW_TC;
  1432. bp->dev->features |= NETIF_F_HW_TC;
  1433. bp->tc_info = tc_info;
  1434. return 0;
  1435. destroy_decap_table:
  1436. rhashtable_destroy(&tc_info->decap_table);
  1437. destroy_decap_l2_table:
  1438. rhashtable_destroy(&tc_info->decap_l2_table);
  1439. destroy_l2_table:
  1440. rhashtable_destroy(&tc_info->l2_table);
  1441. destroy_flow_table:
  1442. rhashtable_destroy(&tc_info->flow_table);
  1443. free_tc_info:
  1444. kfree(tc_info);
  1445. return rc;
  1446. }
  1447. void bnxt_shutdown_tc(struct bnxt *bp)
  1448. {
  1449. struct bnxt_tc_info *tc_info = bp->tc_info;
  1450. if (!bnxt_tc_flower_enabled(bp))
  1451. return;
  1452. rhashtable_destroy(&tc_info->flow_table);
  1453. rhashtable_destroy(&tc_info->l2_table);
  1454. rhashtable_destroy(&tc_info->decap_l2_table);
  1455. rhashtable_destroy(&tc_info->decap_table);
  1456. rhashtable_destroy(&tc_info->encap_table);
  1457. kfree(tc_info);
  1458. bp->tc_info = NULL;
  1459. }