spectrum_acl_tcam.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974
  1. // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2. /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
  3. #include <linux/kernel.h>
  4. #include <linux/slab.h>
  5. #include <linux/errno.h>
  6. #include <linux/bitops.h>
  7. #include <linux/list.h>
  8. #include <linux/rhashtable.h>
  9. #include <linux/netdevice.h>
  10. #include "reg.h"
  11. #include "core.h"
  12. #include "resources.h"
  13. #include "spectrum.h"
  14. #include "spectrum_acl_tcam.h"
  15. #include "core_acl_flex_keys.h"
  16. size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
  17. {
  18. const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  19. return ops->priv_size;
  20. }
  21. int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
  22. struct mlxsw_sp_acl_tcam *tcam)
  23. {
  24. const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  25. u64 max_tcam_regions;
  26. u64 max_regions;
  27. u64 max_groups;
  28. size_t alloc_size;
  29. int err;
  30. max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  31. ACL_MAX_TCAM_REGIONS);
  32. max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
  33. /* Use 1:1 mapping between ACL region and TCAM region */
  34. if (max_tcam_regions < max_regions)
  35. max_regions = max_tcam_regions;
  36. alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
  37. tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
  38. if (!tcam->used_regions)
  39. return -ENOMEM;
  40. tcam->max_regions = max_regions;
  41. max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
  42. alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
  43. tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
  44. if (!tcam->used_groups) {
  45. err = -ENOMEM;
  46. goto err_alloc_used_groups;
  47. }
  48. tcam->max_groups = max_groups;
  49. tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  50. ACL_MAX_GROUP_SIZE);
  51. err = ops->init(mlxsw_sp, tcam->priv, tcam);
  52. if (err)
  53. goto err_tcam_init;
  54. return 0;
  55. err_tcam_init:
  56. kfree(tcam->used_groups);
  57. err_alloc_used_groups:
  58. kfree(tcam->used_regions);
  59. return err;
  60. }
  61. void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
  62. struct mlxsw_sp_acl_tcam *tcam)
  63. {
  64. const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  65. ops->fini(mlxsw_sp, tcam->priv);
  66. kfree(tcam->used_groups);
  67. kfree(tcam->used_regions);
  68. }
  69. int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
  70. struct mlxsw_sp_acl_rule_info *rulei,
  71. u32 *priority, bool fillup_priority)
  72. {
  73. u64 max_priority;
  74. if (!fillup_priority) {
  75. *priority = 0;
  76. return 0;
  77. }
  78. if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
  79. return -EIO;
  80. /* Priority range is 1..cap_kvd_size-1. */
  81. max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
  82. if (rulei->priority >= max_priority)
  83. return -EINVAL;
  84. /* Unlike in TC, in HW, higher number means higher priority. */
  85. *priority = max_priority - rulei->priority;
  86. return 0;
  87. }
  88. static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
  89. u16 *p_id)
  90. {
  91. u16 id;
  92. id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
  93. if (id < tcam->max_regions) {
  94. __set_bit(id, tcam->used_regions);
  95. *p_id = id;
  96. return 0;
  97. }
  98. return -ENOBUFS;
  99. }
  100. static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
  101. u16 id)
  102. {
  103. __clear_bit(id, tcam->used_regions);
  104. }
  105. static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
  106. u16 *p_id)
  107. {
  108. u16 id;
  109. id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
  110. if (id < tcam->max_groups) {
  111. __set_bit(id, tcam->used_groups);
  112. *p_id = id;
  113. return 0;
  114. }
  115. return -ENOBUFS;
  116. }
  117. static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
  118. u16 id)
  119. {
  120. __clear_bit(id, tcam->used_groups);
  121. }
  122. struct mlxsw_sp_acl_tcam_pattern {
  123. const enum mlxsw_afk_element *elements;
  124. unsigned int elements_count;
  125. };
  126. struct mlxsw_sp_acl_tcam_group {
  127. struct mlxsw_sp_acl_tcam *tcam;
  128. u16 id;
  129. struct list_head region_list;
  130. unsigned int region_count;
  131. struct rhashtable chunk_ht;
  132. struct mlxsw_sp_acl_tcam_group_ops *ops;
  133. const struct mlxsw_sp_acl_tcam_pattern *patterns;
  134. unsigned int patterns_count;
  135. bool tmplt_elusage_set;
  136. struct mlxsw_afk_element_usage tmplt_elusage;
  137. };
  138. struct mlxsw_sp_acl_tcam_chunk {
  139. struct list_head list; /* Member of a TCAM region */
  140. struct rhash_head ht_node; /* Member of a chunk HT */
  141. unsigned int priority; /* Priority within the region and group */
  142. struct mlxsw_sp_acl_tcam_group *group;
  143. struct mlxsw_sp_acl_tcam_region *region;
  144. unsigned int ref_count;
  145. unsigned long priv[0];
  146. /* priv has to be always the last item */
  147. };
  148. struct mlxsw_sp_acl_tcam_entry {
  149. struct mlxsw_sp_acl_tcam_chunk *chunk;
  150. unsigned long priv[0];
  151. /* priv has to be always the last item */
  152. };
  153. static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
  154. .key_len = sizeof(unsigned int),
  155. .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
  156. .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
  157. .automatic_shrinking = true,
  158. };
  159. static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
  160. struct mlxsw_sp_acl_tcam_group *group)
  161. {
  162. struct mlxsw_sp_acl_tcam_region *region;
  163. char pagt_pl[MLXSW_REG_PAGT_LEN];
  164. int acl_index = 0;
  165. mlxsw_reg_pagt_pack(pagt_pl, group->id);
  166. list_for_each_entry(region, &group->region_list, list)
  167. mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
  168. mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
  169. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
  170. }
  171. static int
  172. mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
  173. struct mlxsw_sp_acl_tcam *tcam,
  174. struct mlxsw_sp_acl_tcam_group *group,
  175. const struct mlxsw_sp_acl_tcam_pattern *patterns,
  176. unsigned int patterns_count,
  177. struct mlxsw_afk_element_usage *tmplt_elusage)
  178. {
  179. int err;
  180. group->tcam = tcam;
  181. group->patterns = patterns;
  182. group->patterns_count = patterns_count;
  183. if (tmplt_elusage) {
  184. group->tmplt_elusage_set = true;
  185. memcpy(&group->tmplt_elusage, tmplt_elusage,
  186. sizeof(group->tmplt_elusage));
  187. }
  188. INIT_LIST_HEAD(&group->region_list);
  189. err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
  190. if (err)
  191. return err;
  192. err = rhashtable_init(&group->chunk_ht,
  193. &mlxsw_sp_acl_tcam_chunk_ht_params);
  194. if (err)
  195. goto err_rhashtable_init;
  196. return 0;
  197. err_rhashtable_init:
  198. mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
  199. return err;
  200. }
  201. static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
  202. struct mlxsw_sp_acl_tcam_group *group)
  203. {
  204. struct mlxsw_sp_acl_tcam *tcam = group->tcam;
  205. rhashtable_destroy(&group->chunk_ht);
  206. mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
  207. WARN_ON(!list_empty(&group->region_list));
  208. }
  209. static int
  210. mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
  211. struct mlxsw_sp_acl_tcam_group *group,
  212. struct mlxsw_sp_port *mlxsw_sp_port,
  213. bool ingress)
  214. {
  215. char ppbt_pl[MLXSW_REG_PPBT_LEN];
  216. mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
  217. MLXSW_REG_PXBT_E_EACL,
  218. MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
  219. group->id);
  220. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
  221. }
  222. static void
  223. mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
  224. struct mlxsw_sp_acl_tcam_group *group,
  225. struct mlxsw_sp_port *mlxsw_sp_port,
  226. bool ingress)
  227. {
  228. char ppbt_pl[MLXSW_REG_PPBT_LEN];
  229. mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
  230. MLXSW_REG_PXBT_E_EACL,
  231. MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
  232. group->id);
  233. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
  234. }
  235. static u16
  236. mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
  237. {
  238. return group->id;
  239. }
  240. static unsigned int
  241. mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
  242. {
  243. struct mlxsw_sp_acl_tcam_chunk *chunk;
  244. if (list_empty(&region->chunk_list))
  245. return 0;
  246. /* As a priority of a region, return priority of the first chunk */
  247. chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
  248. return chunk->priority;
  249. }
  250. static unsigned int
  251. mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
  252. {
  253. struct mlxsw_sp_acl_tcam_chunk *chunk;
  254. if (list_empty(&region->chunk_list))
  255. return 0;
  256. chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
  257. return chunk->priority;
  258. }
  259. static void
  260. mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
  261. struct mlxsw_sp_acl_tcam_region *region)
  262. {
  263. struct mlxsw_sp_acl_tcam_region *region2;
  264. struct list_head *pos;
  265. /* Position the region inside the list according to priority */
  266. list_for_each(pos, &group->region_list) {
  267. region2 = list_entry(pos, typeof(*region2), list);
  268. if (mlxsw_sp_acl_tcam_region_prio(region2) >
  269. mlxsw_sp_acl_tcam_region_prio(region))
  270. break;
  271. }
  272. list_add_tail(&region->list, pos);
  273. group->region_count++;
  274. }
  275. static void
  276. mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
  277. struct mlxsw_sp_acl_tcam_region *region)
  278. {
  279. group->region_count--;
  280. list_del(&region->list);
  281. }
  282. static int
  283. mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
  284. struct mlxsw_sp_acl_tcam_group *group,
  285. struct mlxsw_sp_acl_tcam_region *region)
  286. {
  287. int err;
  288. if (group->region_count == group->tcam->max_group_size)
  289. return -ENOBUFS;
  290. mlxsw_sp_acl_tcam_group_list_add(group, region);
  291. err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
  292. if (err)
  293. goto err_group_update;
  294. region->group = group;
  295. return 0;
  296. err_group_update:
  297. mlxsw_sp_acl_tcam_group_list_del(group, region);
  298. mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
  299. return err;
  300. }
  301. static void
  302. mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
  303. struct mlxsw_sp_acl_tcam_region *region)
  304. {
  305. struct mlxsw_sp_acl_tcam_group *group = region->group;
  306. mlxsw_sp_acl_tcam_group_list_del(group, region);
  307. mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
  308. }
  309. static struct mlxsw_sp_acl_tcam_region *
  310. mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
  311. unsigned int priority,
  312. struct mlxsw_afk_element_usage *elusage,
  313. bool *p_need_split)
  314. {
  315. struct mlxsw_sp_acl_tcam_region *region, *region2;
  316. struct list_head *pos;
  317. bool issubset;
  318. list_for_each(pos, &group->region_list) {
  319. region = list_entry(pos, typeof(*region), list);
  320. /* First, check if the requested priority does not rather belong
  321. * under some of the next regions.
  322. */
  323. if (pos->next != &group->region_list) { /* not last */
  324. region2 = list_entry(pos->next, typeof(*region2), list);
  325. if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
  326. continue;
  327. }
  328. issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
  329. /* If requested element usage would not fit and the priority
  330. * is lower than the currently inspected region we cannot
  331. * use this region, so return NULL to indicate new region has
  332. * to be created.
  333. */
  334. if (!issubset &&
  335. priority < mlxsw_sp_acl_tcam_region_prio(region))
  336. return NULL;
  337. /* If requested element usage would not fit and the priority
  338. * is higher than the currently inspected region we cannot
  339. * use this region. There is still some hope that the next
  340. * region would be the fit. So let it be processed and
  341. * eventually break at the check right above this.
  342. */
  343. if (!issubset &&
  344. priority > mlxsw_sp_acl_tcam_region_max_prio(region))
  345. continue;
  346. /* Indicate if the region needs to be split in order to add
  347. * the requested priority. Split is needed when requested
  348. * element usage won't fit into the found region.
  349. */
  350. *p_need_split = !issubset;
  351. return region;
  352. }
  353. return NULL; /* New region has to be created. */
  354. }
  355. static void
  356. mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
  357. struct mlxsw_afk_element_usage *elusage,
  358. struct mlxsw_afk_element_usage *out)
  359. {
  360. const struct mlxsw_sp_acl_tcam_pattern *pattern;
  361. int i;
  362. /* In case the template is set, we don't have to look up the pattern
  363. * and just use the template.
  364. */
  365. if (group->tmplt_elusage_set) {
  366. memcpy(out, &group->tmplt_elusage, sizeof(*out));
  367. WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
  368. return;
  369. }
  370. for (i = 0; i < group->patterns_count; i++) {
  371. pattern = &group->patterns[i];
  372. mlxsw_afk_element_usage_fill(out, pattern->elements,
  373. pattern->elements_count);
  374. if (mlxsw_afk_element_usage_subset(elusage, out))
  375. return;
  376. }
  377. memcpy(out, elusage, sizeof(*out));
  378. }
  379. static int
  380. mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
  381. struct mlxsw_sp_acl_tcam_region *region)
  382. {
  383. struct mlxsw_afk_key_info *key_info = region->key_info;
  384. char ptar_pl[MLXSW_REG_PTAR_LEN];
  385. unsigned int encodings_count;
  386. int i;
  387. int err;
  388. mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
  389. region->key_type,
  390. MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
  391. region->id, region->tcam_region_info);
  392. encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
  393. for (i = 0; i < encodings_count; i++) {
  394. u16 encoding;
  395. encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
  396. mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
  397. }
  398. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
  399. if (err)
  400. return err;
  401. mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
  402. return 0;
  403. }
  404. static void
  405. mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
  406. struct mlxsw_sp_acl_tcam_region *region)
  407. {
  408. char ptar_pl[MLXSW_REG_PTAR_LEN];
  409. mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
  410. region->key_type, 0, region->id,
  411. region->tcam_region_info);
  412. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
  413. }
  414. static int
  415. mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
  416. struct mlxsw_sp_acl_tcam_region *region)
  417. {
  418. char pacl_pl[MLXSW_REG_PACL_LEN];
  419. mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
  420. region->tcam_region_info);
  421. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
  422. }
  423. static void
  424. mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
  425. struct mlxsw_sp_acl_tcam_region *region)
  426. {
  427. char pacl_pl[MLXSW_REG_PACL_LEN];
  428. mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
  429. region->tcam_region_info);
  430. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
  431. }
  432. static struct mlxsw_sp_acl_tcam_region *
  433. mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
  434. struct mlxsw_sp_acl_tcam *tcam,
  435. struct mlxsw_afk_element_usage *elusage)
  436. {
  437. const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  438. struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
  439. struct mlxsw_sp_acl_tcam_region *region;
  440. int err;
  441. region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
  442. if (!region)
  443. return ERR_PTR(-ENOMEM);
  444. INIT_LIST_HEAD(&region->chunk_list);
  445. region->mlxsw_sp = mlxsw_sp;
  446. region->key_info = mlxsw_afk_key_info_get(afk, elusage);
  447. if (IS_ERR(region->key_info)) {
  448. err = PTR_ERR(region->key_info);
  449. goto err_key_info_get;
  450. }
  451. err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
  452. if (err)
  453. goto err_region_id_get;
  454. err = ops->region_associate(mlxsw_sp, region);
  455. if (err)
  456. goto err_tcam_region_associate;
  457. region->key_type = ops->key_type;
  458. err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
  459. if (err)
  460. goto err_tcam_region_alloc;
  461. err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
  462. if (err)
  463. goto err_tcam_region_enable;
  464. err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region);
  465. if (err)
  466. goto err_tcam_region_init;
  467. return region;
  468. err_tcam_region_init:
  469. mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
  470. err_tcam_region_enable:
  471. mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
  472. err_tcam_region_alloc:
  473. err_tcam_region_associate:
  474. mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
  475. err_region_id_get:
  476. mlxsw_afk_key_info_put(region->key_info);
  477. err_key_info_get:
  478. kfree(region);
  479. return ERR_PTR(err);
  480. }
  481. static void
  482. mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
  483. struct mlxsw_sp_acl_tcam_region *region)
  484. {
  485. const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  486. ops->region_fini(mlxsw_sp, region->priv);
  487. mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
  488. mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
  489. mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
  490. mlxsw_afk_key_info_put(region->key_info);
  491. kfree(region);
  492. }
  493. static int
  494. mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
  495. struct mlxsw_sp_acl_tcam_group *group,
  496. unsigned int priority,
  497. struct mlxsw_afk_element_usage *elusage,
  498. struct mlxsw_sp_acl_tcam_chunk *chunk)
  499. {
  500. struct mlxsw_sp_acl_tcam_region *region;
  501. bool region_created = false;
  502. bool need_split;
  503. int err;
  504. region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
  505. &need_split);
  506. if (region && need_split) {
  507. /* According to priority, the chunk should belong to an
  508. * existing region. However, this chunk needs elements
  509. * that region does not contain. We need to split the existing
  510. * region into two and create a new region for this chunk
  511. * in between. This is not supported now.
  512. */
  513. return -EOPNOTSUPP;
  514. }
  515. if (!region) {
  516. struct mlxsw_afk_element_usage region_elusage;
  517. mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
  518. &region_elusage);
  519. region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
  520. &region_elusage);
  521. if (IS_ERR(region))
  522. return PTR_ERR(region);
  523. region_created = true;
  524. }
  525. chunk->region = region;
  526. list_add_tail(&chunk->list, &region->chunk_list);
  527. if (!region_created)
  528. return 0;
  529. err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
  530. if (err)
  531. goto err_group_region_attach;
  532. return 0;
  533. err_group_region_attach:
  534. mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
  535. return err;
  536. }
  537. static void
  538. mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
  539. struct mlxsw_sp_acl_tcam_chunk *chunk)
  540. {
  541. struct mlxsw_sp_acl_tcam_region *region = chunk->region;
  542. list_del(&chunk->list);
  543. if (list_empty(&region->chunk_list)) {
  544. mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
  545. mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
  546. }
  547. }
  548. static struct mlxsw_sp_acl_tcam_chunk *
  549. mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
  550. struct mlxsw_sp_acl_tcam_group *group,
  551. unsigned int priority,
  552. struct mlxsw_afk_element_usage *elusage)
  553. {
  554. const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  555. struct mlxsw_sp_acl_tcam_chunk *chunk;
  556. int err;
  557. if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
  558. return ERR_PTR(-EINVAL);
  559. chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
  560. if (!chunk)
  561. return ERR_PTR(-ENOMEM);
  562. chunk->priority = priority;
  563. chunk->group = group;
  564. chunk->ref_count = 1;
  565. err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
  566. elusage, chunk);
  567. if (err)
  568. goto err_chunk_assoc;
  569. ops->chunk_init(chunk->region->priv, chunk->priv, priority);
  570. err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
  571. mlxsw_sp_acl_tcam_chunk_ht_params);
  572. if (err)
  573. goto err_rhashtable_insert;
  574. return chunk;
  575. err_rhashtable_insert:
  576. ops->chunk_fini(chunk->priv);
  577. mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
  578. err_chunk_assoc:
  579. kfree(chunk);
  580. return ERR_PTR(err);
  581. }
  582. static void
  583. mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
  584. struct mlxsw_sp_acl_tcam_chunk *chunk)
  585. {
  586. const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  587. struct mlxsw_sp_acl_tcam_group *group = chunk->group;
  588. rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
  589. mlxsw_sp_acl_tcam_chunk_ht_params);
  590. ops->chunk_fini(chunk->priv);
  591. mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
  592. kfree(chunk);
  593. }
  594. static struct mlxsw_sp_acl_tcam_chunk *
  595. mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
  596. struct mlxsw_sp_acl_tcam_group *group,
  597. unsigned int priority,
  598. struct mlxsw_afk_element_usage *elusage)
  599. {
  600. struct mlxsw_sp_acl_tcam_chunk *chunk;
  601. chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
  602. mlxsw_sp_acl_tcam_chunk_ht_params);
  603. if (chunk) {
  604. if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
  605. elusage)))
  606. return ERR_PTR(-EINVAL);
  607. chunk->ref_count++;
  608. return chunk;
  609. }
  610. return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
  611. priority, elusage);
  612. }
  613. static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
  614. struct mlxsw_sp_acl_tcam_chunk *chunk)
  615. {
  616. if (--chunk->ref_count)
  617. return;
  618. mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
  619. }
  620. static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp)
  621. {
  622. const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  623. return ops->entry_priv_size;
  624. }
  625. static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
  626. struct mlxsw_sp_acl_tcam_group *group,
  627. struct mlxsw_sp_acl_tcam_entry *entry,
  628. struct mlxsw_sp_acl_rule_info *rulei)
  629. {
  630. const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  631. struct mlxsw_sp_acl_tcam_chunk *chunk;
  632. struct mlxsw_sp_acl_tcam_region *region;
  633. int err;
  634. chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
  635. &rulei->values.elusage);
  636. if (IS_ERR(chunk))
  637. return PTR_ERR(chunk);
  638. region = chunk->region;
  639. err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv,
  640. entry->priv, rulei);
  641. if (err)
  642. goto err_entry_add;
  643. entry->chunk = chunk;
  644. return 0;
  645. err_entry_add:
  646. mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
  647. return err;
  648. }
  649. static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
  650. struct mlxsw_sp_acl_tcam_entry *entry)
  651. {
  652. const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  653. struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
  654. struct mlxsw_sp_acl_tcam_region *region = chunk->region;
  655. ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv);
  656. mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
  657. }
  658. static int
  659. mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
  660. struct mlxsw_sp_acl_tcam_entry *entry,
  661. bool *activity)
  662. {
  663. const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
  664. struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
  665. struct mlxsw_sp_acl_tcam_region *region = chunk->region;
  666. return ops->entry_activity_get(mlxsw_sp, region->priv,
  667. entry->priv, activity);
  668. }
  669. static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
  670. MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
  671. MLXSW_AFK_ELEMENT_DMAC_32_47,
  672. MLXSW_AFK_ELEMENT_DMAC_0_31,
  673. MLXSW_AFK_ELEMENT_SMAC_32_47,
  674. MLXSW_AFK_ELEMENT_SMAC_0_31,
  675. MLXSW_AFK_ELEMENT_ETHERTYPE,
  676. MLXSW_AFK_ELEMENT_IP_PROTO,
  677. MLXSW_AFK_ELEMENT_SRC_IP_0_31,
  678. MLXSW_AFK_ELEMENT_DST_IP_0_31,
  679. MLXSW_AFK_ELEMENT_DST_L4_PORT,
  680. MLXSW_AFK_ELEMENT_SRC_L4_PORT,
  681. MLXSW_AFK_ELEMENT_VID,
  682. MLXSW_AFK_ELEMENT_PCP,
  683. MLXSW_AFK_ELEMENT_TCP_FLAGS,
  684. MLXSW_AFK_ELEMENT_IP_TTL_,
  685. MLXSW_AFK_ELEMENT_IP_ECN,
  686. MLXSW_AFK_ELEMENT_IP_DSCP,
  687. };
  688. static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
  689. MLXSW_AFK_ELEMENT_ETHERTYPE,
  690. MLXSW_AFK_ELEMENT_IP_PROTO,
  691. MLXSW_AFK_ELEMENT_SRC_IP_96_127,
  692. MLXSW_AFK_ELEMENT_SRC_IP_64_95,
  693. MLXSW_AFK_ELEMENT_SRC_IP_32_63,
  694. MLXSW_AFK_ELEMENT_SRC_IP_0_31,
  695. MLXSW_AFK_ELEMENT_DST_IP_96_127,
  696. MLXSW_AFK_ELEMENT_DST_IP_64_95,
  697. MLXSW_AFK_ELEMENT_DST_IP_32_63,
  698. MLXSW_AFK_ELEMENT_DST_IP_0_31,
  699. MLXSW_AFK_ELEMENT_DST_L4_PORT,
  700. MLXSW_AFK_ELEMENT_SRC_L4_PORT,
  701. };
  702. static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
  703. {
  704. .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
  705. .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
  706. },
  707. {
  708. .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
  709. .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
  710. },
  711. };
  712. #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
  713. ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
  714. struct mlxsw_sp_acl_tcam_flower_ruleset {
  715. struct mlxsw_sp_acl_tcam_group group;
  716. };
  717. struct mlxsw_sp_acl_tcam_flower_rule {
  718. struct mlxsw_sp_acl_tcam_entry entry;
  719. };
  720. static int
  721. mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
  722. struct mlxsw_sp_acl_tcam *tcam,
  723. void *ruleset_priv,
  724. struct mlxsw_afk_element_usage *tmplt_elusage)
  725. {
  726. struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
  727. return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
  728. mlxsw_sp_acl_tcam_patterns,
  729. MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
  730. tmplt_elusage);
  731. }
  732. static void
  733. mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
  734. void *ruleset_priv)
  735. {
  736. struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
  737. mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
  738. }
  739. static int
  740. mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
  741. void *ruleset_priv,
  742. struct mlxsw_sp_port *mlxsw_sp_port,
  743. bool ingress)
  744. {
  745. struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
  746. return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
  747. mlxsw_sp_port, ingress);
  748. }
  749. static void
  750. mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
  751. void *ruleset_priv,
  752. struct mlxsw_sp_port *mlxsw_sp_port,
  753. bool ingress)
  754. {
  755. struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
  756. mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
  757. mlxsw_sp_port, ingress);
  758. }
  759. static u16
  760. mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
  761. {
  762. struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
  763. return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
  764. }
  765. static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
  766. {
  767. return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) +
  768. mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
  769. }
  770. static int
  771. mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
  772. void *ruleset_priv, void *rule_priv,
  773. struct mlxsw_sp_acl_rule_info *rulei)
  774. {
  775. struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
  776. struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
  777. return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
  778. &rule->entry, rulei);
  779. }
  780. static void
  781. mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
  782. {
  783. struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
  784. mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
  785. }
  786. static int
  787. mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
  788. void *rule_priv, bool *activity)
  789. {
  790. struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
  791. return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
  792. activity);
  793. }
  794. static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
  795. .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
  796. .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
  797. .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del,
  798. .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
  799. .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
  800. .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
  801. .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size,
  802. .rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
  803. .rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
  804. .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
  805. };
  806. static const struct mlxsw_sp_acl_profile_ops *
  807. mlxsw_sp_acl_tcam_profile_ops_arr[] = {
  808. [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
  809. };
  810. const struct mlxsw_sp_acl_profile_ops *
  811. mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
  812. enum mlxsw_sp_acl_profile profile)
  813. {
  814. const struct mlxsw_sp_acl_profile_ops *ops;
  815. if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
  816. return NULL;
  817. ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
  818. if (WARN_ON(!ops))
  819. return NULL;
  820. return ops;
  821. }