cxgb4_tc_u32.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481
  1. /*
  2. * This file is part of the Chelsio T4 Ethernet driver for Linux.
  3. *
  4. * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <net/tc_act/tc_gact.h>
  35. #include <net/tc_act/tc_mirred.h>
  36. #include "cxgb4.h"
  37. #include "cxgb4_tc_u32_parse.h"
  38. #include "cxgb4_tc_u32.h"
  39. /* Fill ch_filter_specification with parsed match value/mask pair. */
  40. static int fill_match_fields(struct adapter *adap,
  41. struct ch_filter_specification *fs,
  42. struct tc_cls_u32_offload *cls,
  43. const struct cxgb4_match_field *entry,
  44. bool next_header)
  45. {
  46. unsigned int i, j;
  47. u32 val, mask;
  48. int off, err;
  49. bool found;
  50. for (i = 0; i < cls->knode.sel->nkeys; i++) {
  51. off = cls->knode.sel->keys[i].off;
  52. val = cls->knode.sel->keys[i].val;
  53. mask = cls->knode.sel->keys[i].mask;
  54. if (next_header) {
  55. /* For next headers, parse only keys with offmask */
  56. if (!cls->knode.sel->keys[i].offmask)
  57. continue;
  58. } else {
  59. /* For the remaining, parse only keys without offmask */
  60. if (cls->knode.sel->keys[i].offmask)
  61. continue;
  62. }
  63. found = false;
  64. for (j = 0; entry[j].val; j++) {
  65. if (off == entry[j].off) {
  66. found = true;
  67. err = entry[j].val(fs, val, mask);
  68. if (err)
  69. return err;
  70. break;
  71. }
  72. }
  73. if (!found)
  74. return -EINVAL;
  75. }
  76. return 0;
  77. }
  78. /* Fill ch_filter_specification with parsed action. */
  79. static int fill_action_fields(struct adapter *adap,
  80. struct ch_filter_specification *fs,
  81. struct tc_cls_u32_offload *cls)
  82. {
  83. unsigned int num_actions = 0;
  84. const struct tc_action *a;
  85. struct tcf_exts *exts;
  86. int i;
  87. exts = cls->knode.exts;
  88. if (!tcf_exts_has_actions(exts))
  89. return -EINVAL;
  90. tcf_exts_for_each_action(i, a, exts) {
  91. /* Don't allow more than one action per rule. */
  92. if (num_actions)
  93. return -EINVAL;
  94. /* Drop in hardware. */
  95. if (is_tcf_gact_shot(a)) {
  96. fs->action = FILTER_DROP;
  97. num_actions++;
  98. continue;
  99. }
  100. /* Re-direct to specified port in hardware. */
  101. if (is_tcf_mirred_egress_redirect(a)) {
  102. struct net_device *n_dev, *target_dev;
  103. bool found = false;
  104. unsigned int i;
  105. target_dev = tcf_mirred_dev(a);
  106. for_each_port(adap, i) {
  107. n_dev = adap->port[i];
  108. if (target_dev == n_dev) {
  109. fs->action = FILTER_SWITCH;
  110. fs->eport = i;
  111. found = true;
  112. break;
  113. }
  114. }
  115. /* Interface doesn't belong to any port of
  116. * the underlying hardware.
  117. */
  118. if (!found)
  119. return -EINVAL;
  120. num_actions++;
  121. continue;
  122. }
  123. /* Un-supported action. */
  124. return -EINVAL;
  125. }
  126. return 0;
  127. }
  128. int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
  129. {
  130. const struct cxgb4_match_field *start, *link_start = NULL;
  131. struct adapter *adapter = netdev2adap(dev);
  132. __be16 protocol = cls->common.protocol;
  133. struct ch_filter_specification fs;
  134. struct cxgb4_tc_u32_table *t;
  135. struct cxgb4_link *link;
  136. unsigned int filter_id;
  137. u32 uhtid, link_uhtid;
  138. bool is_ipv6 = false;
  139. int ret;
  140. if (!can_tc_u32_offload(dev))
  141. return -EOPNOTSUPP;
  142. if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
  143. return -EOPNOTSUPP;
  144. /* Fetch the location to insert the filter. */
  145. filter_id = cls->knode.handle & 0xFFFFF;
  146. if (filter_id > adapter->tids.nftids) {
  147. dev_err(adapter->pdev_dev,
  148. "Location %d out of range for insertion. Max: %d\n",
  149. filter_id, adapter->tids.nftids);
  150. return -ERANGE;
  151. }
  152. t = adapter->tc_u32;
  153. uhtid = TC_U32_USERHTID(cls->knode.handle);
  154. link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
  155. /* Ensure that uhtid is either root u32 (i.e. 0x800)
  156. * or a a valid linked bucket.
  157. */
  158. if (uhtid != 0x800 && uhtid >= t->size)
  159. return -EINVAL;
  160. /* Ensure link handle uhtid is sane, if specified. */
  161. if (link_uhtid >= t->size)
  162. return -EINVAL;
  163. memset(&fs, 0, sizeof(fs));
  164. if (protocol == htons(ETH_P_IPV6)) {
  165. start = cxgb4_ipv6_fields;
  166. is_ipv6 = true;
  167. } else {
  168. start = cxgb4_ipv4_fields;
  169. is_ipv6 = false;
  170. }
  171. if (uhtid != 0x800) {
  172. /* Link must exist from root node before insertion. */
  173. if (!t->table[uhtid - 1].link_handle)
  174. return -EINVAL;
  175. /* Link must have a valid supported next header. */
  176. link_start = t->table[uhtid - 1].match_field;
  177. if (!link_start)
  178. return -EINVAL;
  179. }
  180. /* Parse links and record them for subsequent jumps to valid
  181. * next headers.
  182. */
  183. if (link_uhtid) {
  184. const struct cxgb4_next_header *next;
  185. bool found = false;
  186. unsigned int i, j;
  187. u32 val, mask;
  188. int off;
  189. if (t->table[link_uhtid - 1].link_handle) {
  190. dev_err(adapter->pdev_dev,
  191. "Link handle exists for: 0x%x\n",
  192. link_uhtid);
  193. return -EINVAL;
  194. }
  195. next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps;
  196. /* Try to find matches that allow jumps to next header. */
  197. for (i = 0; next[i].jump; i++) {
  198. if (next[i].offoff != cls->knode.sel->offoff ||
  199. next[i].shift != cls->knode.sel->offshift ||
  200. next[i].mask != cls->knode.sel->offmask ||
  201. next[i].offset != cls->knode.sel->off)
  202. continue;
  203. /* Found a possible candidate. Find a key that
  204. * matches the corresponding offset, value, and
  205. * mask to jump to next header.
  206. */
  207. for (j = 0; j < cls->knode.sel->nkeys; j++) {
  208. off = cls->knode.sel->keys[j].off;
  209. val = cls->knode.sel->keys[j].val;
  210. mask = cls->knode.sel->keys[j].mask;
  211. if (next[i].match_off == off &&
  212. next[i].match_val == val &&
  213. next[i].match_mask == mask) {
  214. found = true;
  215. break;
  216. }
  217. }
  218. if (!found)
  219. continue; /* Try next candidate. */
  220. /* Candidate to jump to next header found.
  221. * Translate all keys to internal specification
  222. * and store them in jump table. This spec is copied
  223. * later to set the actual filters.
  224. */
  225. ret = fill_match_fields(adapter, &fs, cls,
  226. start, false);
  227. if (ret)
  228. goto out;
  229. link = &t->table[link_uhtid - 1];
  230. link->match_field = next[i].jump;
  231. link->link_handle = cls->knode.handle;
  232. memcpy(&link->fs, &fs, sizeof(fs));
  233. break;
  234. }
  235. /* No candidate found to jump to next header. */
  236. if (!found)
  237. return -EINVAL;
  238. return 0;
  239. }
  240. /* Fill ch_filter_specification match fields to be shipped to hardware.
  241. * Copy the linked spec (if any) first. And then update the spec as
  242. * needed.
  243. */
  244. if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
  245. /* Copy linked ch_filter_specification */
  246. memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
  247. ret = fill_match_fields(adapter, &fs, cls,
  248. link_start, true);
  249. if (ret)
  250. goto out;
  251. }
  252. ret = fill_match_fields(adapter, &fs, cls, start, false);
  253. if (ret)
  254. goto out;
  255. /* Fill ch_filter_specification action fields to be shipped to
  256. * hardware.
  257. */
  258. ret = fill_action_fields(adapter, &fs, cls);
  259. if (ret)
  260. goto out;
  261. /* The filter spec has been completely built from the info
  262. * provided from u32. We now set some default fields in the
  263. * spec for sanity.
  264. */
  265. /* Match only packets coming from the ingress port where this
  266. * filter will be created.
  267. */
  268. fs.val.iport = netdev2pinfo(dev)->port_id;
  269. fs.mask.iport = ~0;
  270. /* Enable filter hit counts. */
  271. fs.hitcnts = 1;
  272. /* Set type of filter - IPv6 or IPv4 */
  273. fs.type = is_ipv6 ? 1 : 0;
  274. /* Set the filter */
  275. ret = cxgb4_set_filter(dev, filter_id, &fs);
  276. if (ret)
  277. goto out;
  278. /* If this is a linked bucket, then set the corresponding
  279. * entry in the bitmap to mark it as belonging to this linked
  280. * bucket.
  281. */
  282. if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
  283. set_bit(filter_id, t->table[uhtid - 1].tid_map);
  284. out:
  285. return ret;
  286. }
  287. int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
  288. {
  289. struct adapter *adapter = netdev2adap(dev);
  290. unsigned int filter_id, max_tids, i, j;
  291. struct cxgb4_link *link = NULL;
  292. struct cxgb4_tc_u32_table *t;
  293. u32 handle, uhtid;
  294. int ret;
  295. if (!can_tc_u32_offload(dev))
  296. return -EOPNOTSUPP;
  297. /* Fetch the location to delete the filter. */
  298. filter_id = cls->knode.handle & 0xFFFFF;
  299. if (filter_id > adapter->tids.nftids) {
  300. dev_err(adapter->pdev_dev,
  301. "Location %d out of range for deletion. Max: %d\n",
  302. filter_id, adapter->tids.nftids);
  303. return -ERANGE;
  304. }
  305. t = adapter->tc_u32;
  306. handle = cls->knode.handle;
  307. uhtid = TC_U32_USERHTID(cls->knode.handle);
  308. /* Ensure that uhtid is either root u32 (i.e. 0x800)
  309. * or a a valid linked bucket.
  310. */
  311. if (uhtid != 0x800 && uhtid >= t->size)
  312. return -EINVAL;
  313. /* Delete the specified filter */
  314. if (uhtid != 0x800) {
  315. link = &t->table[uhtid - 1];
  316. if (!link->link_handle)
  317. return -EINVAL;
  318. if (!test_bit(filter_id, link->tid_map))
  319. return -EINVAL;
  320. }
  321. ret = cxgb4_del_filter(dev, filter_id, NULL);
  322. if (ret)
  323. goto out;
  324. if (link)
  325. clear_bit(filter_id, link->tid_map);
  326. /* If a link is being deleted, then delete all filters
  327. * associated with the link.
  328. */
  329. max_tids = adapter->tids.nftids;
  330. for (i = 0; i < t->size; i++) {
  331. link = &t->table[i];
  332. if (link->link_handle == handle) {
  333. for (j = 0; j < max_tids; j++) {
  334. if (!test_bit(j, link->tid_map))
  335. continue;
  336. ret = __cxgb4_del_filter(dev, j, NULL, NULL);
  337. if (ret)
  338. goto out;
  339. clear_bit(j, link->tid_map);
  340. }
  341. /* Clear the link state */
  342. link->match_field = NULL;
  343. link->link_handle = 0;
  344. memset(&link->fs, 0, sizeof(link->fs));
  345. break;
  346. }
  347. }
  348. out:
  349. return ret;
  350. }
  351. void cxgb4_cleanup_tc_u32(struct adapter *adap)
  352. {
  353. struct cxgb4_tc_u32_table *t;
  354. unsigned int i;
  355. if (!adap->tc_u32)
  356. return;
  357. /* Free up all allocated memory. */
  358. t = adap->tc_u32;
  359. for (i = 0; i < t->size; i++) {
  360. struct cxgb4_link *link = &t->table[i];
  361. kvfree(link->tid_map);
  362. }
  363. kvfree(adap->tc_u32);
  364. }
  365. struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
  366. {
  367. unsigned int max_tids = adap->tids.nftids;
  368. struct cxgb4_tc_u32_table *t;
  369. unsigned int i;
  370. if (!max_tids)
  371. return NULL;
  372. t = kvzalloc(sizeof(*t) +
  373. (max_tids * sizeof(struct cxgb4_link)), GFP_KERNEL);
  374. if (!t)
  375. return NULL;
  376. t->size = max_tids;
  377. for (i = 0; i < t->size; i++) {
  378. struct cxgb4_link *link = &t->table[i];
  379. unsigned int bmap_size;
  380. bmap_size = BITS_TO_LONGS(max_tids);
  381. link->tid_map = kvcalloc(bmap_size, sizeof(unsigned long),
  382. GFP_KERNEL);
  383. if (!link->tid_map)
  384. goto out_no_mem;
  385. bitmap_zero(link->tid_map, max_tids);
  386. }
  387. return t;
  388. out_no_mem:
  389. for (i = 0; i < t->size; i++) {
  390. struct cxgb4_link *link = &t->table[i];
  391. if (link->tid_map)
  392. kvfree(link->tid_map);
  393. }
  394. if (t)
  395. kvfree(t);
  396. return NULL;
  397. }