mvpp2_cls.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * RSS and Classifier helpers for Marvell PPv2 Network Controller
  4. *
  5. * Copyright (C) 2014 Marvell
  6. *
  7. * Marcin Wojtas <mw@semihalf.com>
  8. */
  9. #include "mvpp2.h"
  10. #include "mvpp2_cls.h"
  11. #include "mvpp2_prs.h"
  12. #define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \
  13. { \
  14. .flow_type = _type, \
  15. .flow_id = _id, \
  16. .supported_hash_opts = _opts, \
  17. .prs_ri = { \
  18. .ri = _ri, \
  19. .ri_mask = _ri_mask \
  20. } \
  21. }
  22. static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
  23. /* TCP over IPv4 flows, Not fragmented, no vlan tag */
  24. MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
  25. MVPP22_CLS_HEK_IP4_5T,
  26. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
  27. MVPP2_PRS_RI_L4_TCP,
  28. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  29. MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
  30. MVPP22_CLS_HEK_IP4_5T,
  31. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
  32. MVPP2_PRS_RI_L4_TCP,
  33. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  34. MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
  35. MVPP22_CLS_HEK_IP4_5T,
  36. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
  37. MVPP2_PRS_RI_L4_TCP,
  38. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  39. /* TCP over IPv4 flows, Not fragmented, with vlan tag */
  40. MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
  41. MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
  42. MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
  43. MVPP2_PRS_IP_MASK),
  44. MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
  45. MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
  46. MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
  47. MVPP2_PRS_IP_MASK),
  48. MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
  49. MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
  50. MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
  51. MVPP2_PRS_IP_MASK),
  52. /* TCP over IPv4 flows, fragmented, no vlan tag */
  53. MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
  54. MVPP22_CLS_HEK_IP4_2T,
  55. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
  56. MVPP2_PRS_RI_L4_TCP,
  57. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  58. MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
  59. MVPP22_CLS_HEK_IP4_2T,
  60. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
  61. MVPP2_PRS_RI_L4_TCP,
  62. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  63. MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
  64. MVPP22_CLS_HEK_IP4_2T,
  65. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
  66. MVPP2_PRS_RI_L4_TCP,
  67. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  68. /* TCP over IPv4 flows, fragmented, with vlan tag */
  69. MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
  70. MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
  71. MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
  72. MVPP2_PRS_IP_MASK),
  73. MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
  74. MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
  75. MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
  76. MVPP2_PRS_IP_MASK),
  77. MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
  78. MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
  79. MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
  80. MVPP2_PRS_IP_MASK),
  81. /* UDP over IPv4 flows, Not fragmented, no vlan tag */
  82. MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
  83. MVPP22_CLS_HEK_IP4_5T,
  84. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
  85. MVPP2_PRS_RI_L4_UDP,
  86. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  87. MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
  88. MVPP22_CLS_HEK_IP4_5T,
  89. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
  90. MVPP2_PRS_RI_L4_UDP,
  91. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  92. MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
  93. MVPP22_CLS_HEK_IP4_5T,
  94. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
  95. MVPP2_PRS_RI_L4_UDP,
  96. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  97. /* UDP over IPv4 flows, Not fragmented, with vlan tag */
  98. MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
  99. MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
  100. MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
  101. MVPP2_PRS_IP_MASK),
  102. MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
  103. MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
  104. MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
  105. MVPP2_PRS_IP_MASK),
  106. MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
  107. MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
  108. MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
  109. MVPP2_PRS_IP_MASK),
  110. /* UDP over IPv4 flows, fragmented, no vlan tag */
  111. MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
  112. MVPP22_CLS_HEK_IP4_2T,
  113. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
  114. MVPP2_PRS_RI_L4_UDP,
  115. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  116. MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
  117. MVPP22_CLS_HEK_IP4_2T,
  118. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
  119. MVPP2_PRS_RI_L4_UDP,
  120. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  121. MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
  122. MVPP22_CLS_HEK_IP4_2T,
  123. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
  124. MVPP2_PRS_RI_L4_UDP,
  125. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  126. /* UDP over IPv4 flows, fragmented, with vlan tag */
  127. MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
  128. MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
  129. MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
  130. MVPP2_PRS_IP_MASK),
  131. MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
  132. MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
  133. MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
  134. MVPP2_PRS_IP_MASK),
  135. MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
  136. MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
  137. MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
  138. MVPP2_PRS_IP_MASK),
  139. /* TCP over IPv6 flows, not fragmented, no vlan tag */
  140. MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
  141. MVPP22_CLS_HEK_IP6_5T,
  142. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
  143. MVPP2_PRS_RI_L4_TCP,
  144. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  145. MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
  146. MVPP22_CLS_HEK_IP6_5T,
  147. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
  148. MVPP2_PRS_RI_L4_TCP,
  149. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  150. /* TCP over IPv6 flows, not fragmented, with vlan tag */
  151. MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
  152. MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
  153. MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
  154. MVPP2_PRS_IP_MASK),
  155. MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
  156. MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
  157. MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
  158. MVPP2_PRS_IP_MASK),
  159. /* TCP over IPv6 flows, fragmented, no vlan tag */
  160. MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
  161. MVPP22_CLS_HEK_IP6_2T,
  162. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
  163. MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
  164. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  165. MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
  166. MVPP22_CLS_HEK_IP6_2T,
  167. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
  168. MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
  169. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  170. /* TCP over IPv6 flows, fragmented, with vlan tag */
  171. MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
  172. MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
  173. MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
  174. MVPP2_PRS_RI_L4_TCP,
  175. MVPP2_PRS_IP_MASK),
  176. MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
  177. MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
  178. MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
  179. MVPP2_PRS_RI_L4_TCP,
  180. MVPP2_PRS_IP_MASK),
  181. /* UDP over IPv6 flows, not fragmented, no vlan tag */
  182. MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
  183. MVPP22_CLS_HEK_IP6_5T,
  184. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
  185. MVPP2_PRS_RI_L4_UDP,
  186. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  187. MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
  188. MVPP22_CLS_HEK_IP6_5T,
  189. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
  190. MVPP2_PRS_RI_L4_UDP,
  191. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  192. /* UDP over IPv6 flows, not fragmented, with vlan tag */
  193. MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
  194. MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
  195. MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
  196. MVPP2_PRS_IP_MASK),
  197. MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
  198. MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
  199. MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
  200. MVPP2_PRS_IP_MASK),
  201. /* UDP over IPv6 flows, fragmented, no vlan tag */
  202. MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
  203. MVPP22_CLS_HEK_IP6_2T,
  204. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
  205. MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
  206. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  207. MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
  208. MVPP22_CLS_HEK_IP6_2T,
  209. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
  210. MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
  211. MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
  212. /* UDP over IPv6 flows, fragmented, with vlan tag */
  213. MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
  214. MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
  215. MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
  216. MVPP2_PRS_RI_L4_UDP,
  217. MVPP2_PRS_IP_MASK),
  218. MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
  219. MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
  220. MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
  221. MVPP2_PRS_RI_L4_UDP,
  222. MVPP2_PRS_IP_MASK),
  223. /* IPv4 flows, no vlan tag */
  224. MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
  225. MVPP22_CLS_HEK_IP4_2T,
  226. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
  227. MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
  228. MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
  229. MVPP22_CLS_HEK_IP4_2T,
  230. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
  231. MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
  232. MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
  233. MVPP22_CLS_HEK_IP4_2T,
  234. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
  235. MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
  236. /* IPv4 flows, with vlan tag */
  237. MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
  238. MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
  239. MVPP2_PRS_RI_L3_IP4,
  240. MVPP2_PRS_RI_L3_PROTO_MASK),
  241. MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
  242. MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
  243. MVPP2_PRS_RI_L3_IP4_OPT,
  244. MVPP2_PRS_RI_L3_PROTO_MASK),
  245. MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
  246. MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
  247. MVPP2_PRS_RI_L3_IP4_OTHER,
  248. MVPP2_PRS_RI_L3_PROTO_MASK),
  249. /* IPv6 flows, no vlan tag */
  250. MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
  251. MVPP22_CLS_HEK_IP6_2T,
  252. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
  253. MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
  254. MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
  255. MVPP22_CLS_HEK_IP6_2T,
  256. MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
  257. MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
  258. /* IPv6 flows, with vlan tag */
  259. MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
  260. MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
  261. MVPP2_PRS_RI_L3_IP6,
  262. MVPP2_PRS_RI_L3_PROTO_MASK),
  263. MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
  264. MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
  265. MVPP2_PRS_RI_L3_IP6,
  266. MVPP2_PRS_RI_L3_PROTO_MASK),
  267. /* Non IP flow, no vlan tag */
  268. MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
  269. 0,
  270. MVPP2_PRS_RI_VLAN_NONE,
  271. MVPP2_PRS_RI_VLAN_MASK),
  272. /* Non IP flow, with vlan tag */
  273. MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
  274. MVPP22_CLS_HEK_OPT_VLAN,
  275. 0, 0),
  276. };
  277. u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index)
  278. {
  279. mvpp2_write(priv, MVPP2_CTRS_IDX, index);
  280. return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR);
  281. }
  282. void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
  283. struct mvpp2_cls_flow_entry *fe)
  284. {
  285. fe->index = index;
  286. mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index);
  287. fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
  288. fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
  289. fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
  290. }
  291. /* Update classification flow table registers */
  292. static void mvpp2_cls_flow_write(struct mvpp2 *priv,
  293. struct mvpp2_cls_flow_entry *fe)
  294. {
  295. mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
  296. mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
  297. mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
  298. mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
  299. }
  300. u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
  301. {
  302. mvpp2_write(priv, MVPP2_CTRS_IDX, index);
  303. return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR);
  304. }
  305. void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
  306. struct mvpp2_cls_lookup_entry *le)
  307. {
  308. u32 val;
  309. val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid;
  310. mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
  311. le->way = way;
  312. le->lkpid = lkpid;
  313. le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG);
  314. }
  315. /* Update classification lookup table register */
  316. static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
  317. struct mvpp2_cls_lookup_entry *le)
  318. {
  319. u32 val;
  320. val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
  321. mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
  322. mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
  323. }
  324. /* Operations on flow entry */
  325. static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
  326. {
  327. return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
  328. }
  329. static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
  330. int num_of_fields)
  331. {
  332. fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
  333. fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
  334. }
  335. static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
  336. int field_index)
  337. {
  338. return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
  339. MVPP2_CLS_FLOW_TBL2_FLD_MASK;
  340. }
  341. static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
  342. int field_index, int field_id)
  343. {
  344. fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
  345. MVPP2_CLS_FLOW_TBL2_FLD_MASK);
  346. fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
  347. }
  348. static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
  349. int engine)
  350. {
  351. fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
  352. fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
  353. }
  354. int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe)
  355. {
  356. return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) &
  357. MVPP2_CLS_FLOW_TBL0_ENG_MASK;
  358. }
  359. static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
  360. bool from_packet)
  361. {
  362. if (from_packet)
  363. fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
  364. else
  365. fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
  366. }
  367. static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
  368. {
  369. fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
  370. fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
  371. }
  372. static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
  373. bool is_last)
  374. {
  375. fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
  376. fe->data[0] |= !!is_last;
  377. }
  378. static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
  379. {
  380. fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
  381. fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
  382. }
  383. static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
  384. u32 port)
  385. {
  386. fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
  387. }
  388. /* Initialize the parser entry for the given flow */
  389. static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
  390. struct mvpp2_cls_flow *flow)
  391. {
  392. mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
  393. flow->prs_ri.ri_mask);
  394. }
  395. /* Initialize the Lookup Id table entry for the given flow */
  396. static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
  397. struct mvpp2_cls_flow *flow)
  398. {
  399. struct mvpp2_cls_lookup_entry le;
  400. le.way = 0;
  401. le.lkpid = flow->flow_id;
  402. /* The default RxQ for this port is set in the C2 lookup */
  403. le.data = 0;
  404. /* We point on the first lookup in the sequence for the flow, that is
  405. * the C2 lookup.
  406. */
  407. le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
  408. /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
  409. le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
  410. mvpp2_cls_lookup_write(priv, &le);
  411. }
  412. /* Initialize the flow table entries for the given flow */
  413. static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
  414. {
  415. struct mvpp2_cls_flow_entry fe;
  416. int i;
  417. /* C2 lookup */
  418. memset(&fe, 0, sizeof(fe));
  419. fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
  420. mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
  421. mvpp2_cls_flow_port_id_sel(&fe, true);
  422. mvpp2_cls_flow_last_set(&fe, 0);
  423. mvpp2_cls_flow_pri_set(&fe, 0);
  424. mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
  425. /* Add all ports */
  426. for (i = 0; i < MVPP2_MAX_PORTS; i++)
  427. mvpp2_cls_flow_port_add(&fe, BIT(i));
  428. mvpp2_cls_flow_write(priv, &fe);
  429. /* C3Hx lookups */
  430. for (i = 0; i < MVPP2_MAX_PORTS; i++) {
  431. memset(&fe, 0, sizeof(fe));
  432. fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
  433. mvpp2_cls_flow_port_id_sel(&fe, true);
  434. mvpp2_cls_flow_pri_set(&fe, i + 1);
  435. mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
  436. mvpp2_cls_flow_port_add(&fe, BIT(i));
  437. mvpp2_cls_flow_write(priv, &fe);
  438. }
  439. /* Update the last entry */
  440. mvpp2_cls_flow_last_set(&fe, 1);
  441. mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
  442. mvpp2_cls_flow_write(priv, &fe);
  443. }
  444. /* Adds a field to the Header Extracted Key generation parameters*/
  445. static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
  446. u32 field_id)
  447. {
  448. int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
  449. if (nb_fields == MVPP2_FLOW_N_FIELDS)
  450. return -EINVAL;
  451. mvpp2_cls_flow_hek_set(fe, nb_fields, field_id);
  452. mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1);
  453. return 0;
  454. }
  455. static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
  456. unsigned long hash_opts)
  457. {
  458. u32 field_id;
  459. int i;
  460. /* Clear old fields */
  461. mvpp2_cls_flow_hek_num_set(fe, 0);
  462. fe->data[2] = 0;
  463. for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
  464. switch (BIT(i)) {
  465. case MVPP22_CLS_HEK_OPT_VLAN:
  466. field_id = MVPP22_CLS_FIELD_VLAN;
  467. break;
  468. case MVPP22_CLS_HEK_OPT_IP4SA:
  469. field_id = MVPP22_CLS_FIELD_IP4SA;
  470. break;
  471. case MVPP22_CLS_HEK_OPT_IP4DA:
  472. field_id = MVPP22_CLS_FIELD_IP4DA;
  473. break;
  474. case MVPP22_CLS_HEK_OPT_IP6SA:
  475. field_id = MVPP22_CLS_FIELD_IP6SA;
  476. break;
  477. case MVPP22_CLS_HEK_OPT_IP6DA:
  478. field_id = MVPP22_CLS_FIELD_IP6DA;
  479. break;
  480. case MVPP22_CLS_HEK_OPT_L4SIP:
  481. field_id = MVPP22_CLS_FIELD_L4SIP;
  482. break;
  483. case MVPP22_CLS_HEK_OPT_L4DIP:
  484. field_id = MVPP22_CLS_FIELD_L4DIP;
  485. break;
  486. default:
  487. return -EINVAL;
  488. }
  489. if (mvpp2_flow_add_hek_field(fe, field_id))
  490. return -EINVAL;
  491. }
  492. return 0;
  493. }
  494. struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
  495. {
  496. if (flow >= MVPP2_N_FLOWS)
  497. return NULL;
  498. return &cls_flows[flow];
  499. }
  500. /* Set the hash generation options for the given traffic flow.
  501. * One traffic flow (in the ethtool sense) has multiple classification flows,
  502. * to handle specific cases such as fragmentation, or the presence of a
  503. * VLAN / DSA Tag.
  504. *
  505. * Each of these individual flows has different constraints, for example we
  506. * can't hash fragmented packets on L4 data (else we would risk having packet
  507. * re-ordering), so each classification flows masks the options with their
  508. * supported ones.
  509. *
  510. */
  511. static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
  512. u16 requested_opts)
  513. {
  514. struct mvpp2_cls_flow_entry fe;
  515. struct mvpp2_cls_flow *flow;
  516. int i, engine, flow_index;
  517. u16 hash_opts;
  518. for (i = 0; i < MVPP2_N_FLOWS; i++) {
  519. flow = mvpp2_cls_flow_get(i);
  520. if (!flow)
  521. return -EINVAL;
  522. if (flow->flow_type != flow_type)
  523. continue;
  524. flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
  525. flow->flow_id);
  526. mvpp2_cls_flow_read(port->priv, flow_index, &fe);
  527. hash_opts = flow->supported_hash_opts & requested_opts;
  528. /* Use C3HB engine to access L4 infos. This adds L4 infos to the
  529. * hash parameters
  530. */
  531. if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
  532. engine = MVPP22_CLS_ENGINE_C3HB;
  533. else
  534. engine = MVPP22_CLS_ENGINE_C3HA;
  535. if (mvpp2_flow_set_hek_fields(&fe, hash_opts))
  536. return -EINVAL;
  537. mvpp2_cls_flow_eng_set(&fe, engine);
  538. mvpp2_cls_flow_write(port->priv, &fe);
  539. }
  540. return 0;
  541. }
  542. u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
  543. {
  544. u16 hash_opts = 0;
  545. int n_fields, i, field;
  546. n_fields = mvpp2_cls_flow_hek_num_get(fe);
  547. for (i = 0; i < n_fields; i++) {
  548. field = mvpp2_cls_flow_hek_get(fe, i);
  549. switch (field) {
  550. case MVPP22_CLS_FIELD_MAC_DA:
  551. hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
  552. break;
  553. case MVPP22_CLS_FIELD_VLAN:
  554. hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
  555. break;
  556. case MVPP22_CLS_FIELD_L3_PROTO:
  557. hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
  558. break;
  559. case MVPP22_CLS_FIELD_IP4SA:
  560. hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
  561. break;
  562. case MVPP22_CLS_FIELD_IP4DA:
  563. hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
  564. break;
  565. case MVPP22_CLS_FIELD_IP6SA:
  566. hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
  567. break;
  568. case MVPP22_CLS_FIELD_IP6DA:
  569. hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
  570. break;
  571. case MVPP22_CLS_FIELD_L4SIP:
  572. hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
  573. break;
  574. case MVPP22_CLS_FIELD_L4DIP:
  575. hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
  576. break;
  577. default:
  578. break;
  579. }
  580. }
  581. return hash_opts;
  582. }
  583. /* Returns the hash opts for this flow. There are several classifier flows
  584. * for one traffic flow, this returns an aggregation of all configurations.
  585. */
  586. static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
  587. {
  588. struct mvpp2_cls_flow_entry fe;
  589. struct mvpp2_cls_flow *flow;
  590. int i, flow_index;
  591. u16 hash_opts = 0;
  592. for (i = 0; i < MVPP2_N_FLOWS; i++) {
  593. flow = mvpp2_cls_flow_get(i);
  594. if (!flow)
  595. return 0;
  596. if (flow->flow_type != flow_type)
  597. continue;
  598. flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
  599. flow->flow_id);
  600. mvpp2_cls_flow_read(port->priv, flow_index, &fe);
  601. hash_opts |= mvpp2_flow_get_hek_fields(&fe);
  602. }
  603. return hash_opts;
  604. }
  605. static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
  606. {
  607. struct mvpp2_cls_flow *flow;
  608. int i;
  609. for (i = 0; i < MVPP2_N_FLOWS; i++) {
  610. flow = mvpp2_cls_flow_get(i);
  611. if (!flow)
  612. break;
  613. mvpp2_cls_flow_prs_init(priv, flow);
  614. mvpp2_cls_flow_lkp_init(priv, flow);
  615. mvpp2_cls_flow_init(priv, flow);
  616. }
  617. }
  618. static void mvpp2_cls_c2_write(struct mvpp2 *priv,
  619. struct mvpp2_cls_c2_entry *c2)
  620. {
  621. mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
  622. /* Write TCAM */
  623. mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
  624. mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
  625. mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
  626. mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
  627. mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
  628. mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
  629. mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
  630. mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
  631. mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
  632. mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
  633. }
  634. void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
  635. struct mvpp2_cls_c2_entry *c2)
  636. {
  637. mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
  638. c2->index = index;
  639. c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
  640. c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
  641. c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
  642. c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
  643. c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
  644. c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
  645. c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
  646. c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
  647. c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
  648. c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
  649. }
  650. static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
  651. {
  652. struct mvpp2_cls_c2_entry c2;
  653. u8 qh, ql, pmap;
  654. memset(&c2, 0, sizeof(c2));
  655. c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
  656. pmap = BIT(port->id);
  657. c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
  658. c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
  659. /* Update RSS status after matching this entry */
  660. c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
  661. /* Mark packet as "forwarded to software", needed for RSS */
  662. c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
  663. /* Configure the default rx queue : Update Queue Low and Queue High, but
  664. * don't lock, since the rx queue selection might be overridden by RSS
  665. */
  666. c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
  667. MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
  668. qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
  669. ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
  670. c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
  671. MVPP22_CLS_C2_ATTR0_QLOW(ql);
  672. mvpp2_cls_c2_write(port->priv, &c2);
  673. }
  674. /* Classifier default initialization */
  675. void mvpp2_cls_init(struct mvpp2 *priv)
  676. {
  677. struct mvpp2_cls_lookup_entry le;
  678. struct mvpp2_cls_flow_entry fe;
  679. int index;
  680. /* Enable classifier */
  681. mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
  682. /* Clear classifier flow table */
  683. memset(&fe.data, 0, sizeof(fe.data));
  684. for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
  685. fe.index = index;
  686. mvpp2_cls_flow_write(priv, &fe);
  687. }
  688. /* Clear classifier lookup table */
  689. le.data = 0;
  690. for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
  691. le.lkpid = index;
  692. le.way = 0;
  693. mvpp2_cls_lookup_write(priv, &le);
  694. le.way = 1;
  695. mvpp2_cls_lookup_write(priv, &le);
  696. }
  697. mvpp2_cls_port_init_flows(priv);
  698. }
  699. void mvpp2_cls_port_config(struct mvpp2_port *port)
  700. {
  701. struct mvpp2_cls_lookup_entry le;
  702. u32 val;
  703. /* Set way for the port */
  704. val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
  705. val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
  706. mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
  707. /* Pick the entry to be accessed in lookup ID decoding table
  708. * according to the way and lkpid.
  709. */
  710. le.lkpid = port->id;
  711. le.way = 0;
  712. le.data = 0;
  713. /* Set initial CPU queue for receiving packets */
  714. le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
  715. le.data |= port->first_rxq;
  716. /* Disable classification engines */
  717. le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
  718. /* Update lookup ID table entry */
  719. mvpp2_cls_lookup_write(port->priv, &le);
  720. mvpp2_port_c2_cls_init(port);
  721. }
  722. u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
  723. {
  724. mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2_index);
  725. return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
  726. }
  727. static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
  728. {
  729. struct mvpp2_cls_c2_entry c2;
  730. mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
  731. c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
  732. mvpp2_cls_c2_write(port->priv, &c2);
  733. }
  734. static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
  735. {
  736. struct mvpp2_cls_c2_entry c2;
  737. mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
  738. c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
  739. mvpp2_cls_c2_write(port->priv, &c2);
  740. }
  741. void mvpp22_rss_enable(struct mvpp2_port *port)
  742. {
  743. mvpp2_rss_port_c2_enable(port);
  744. }
  745. void mvpp22_rss_disable(struct mvpp2_port *port)
  746. {
  747. mvpp2_rss_port_c2_disable(port);
  748. }
  749. /* Set CPU queue number for oversize packets */
  750. void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
  751. {
  752. u32 val;
  753. mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
  754. port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
  755. mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
  756. (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
  757. val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
  758. val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
  759. mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
  760. }
  761. static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
  762. {
  763. int nrxqs, cpu, cpus = num_possible_cpus();
  764. /* Number of RXQs per CPU */
  765. nrxqs = port->nrxqs / cpus;
  766. /* CPU that will handle this rx queue */
  767. cpu = rxq / nrxqs;
  768. if (!cpu_online(cpu))
  769. return port->first_rxq;
  770. /* Indirection to better distribute the paquets on the CPUs when
  771. * configuring the RSS queues.
  772. */
  773. return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
  774. }
  775. void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
  776. {
  777. struct mvpp2 *priv = port->priv;
  778. int i;
  779. for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
  780. u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
  781. MVPP22_RSS_INDEX_TABLE_ENTRY(i);
  782. mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
  783. mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
  784. mvpp22_rxfh_indir(port, port->indir[i]));
  785. }
  786. }
  787. int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
  788. {
  789. u16 hash_opts = 0;
  790. switch (info->flow_type) {
  791. case TCP_V4_FLOW:
  792. case UDP_V4_FLOW:
  793. case TCP_V6_FLOW:
  794. case UDP_V6_FLOW:
  795. if (info->data & RXH_L4_B_0_1)
  796. hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
  797. if (info->data & RXH_L4_B_2_3)
  798. hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
  799. /* Fallthrough */
  800. case IPV4_FLOW:
  801. case IPV6_FLOW:
  802. if (info->data & RXH_L2DA)
  803. hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
  804. if (info->data & RXH_VLAN)
  805. hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
  806. if (info->data & RXH_L3_PROTO)
  807. hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
  808. if (info->data & RXH_IP_SRC)
  809. hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA |
  810. MVPP22_CLS_HEK_OPT_IP6SA);
  811. if (info->data & RXH_IP_DST)
  812. hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA |
  813. MVPP22_CLS_HEK_OPT_IP6DA);
  814. break;
  815. default: return -EOPNOTSUPP;
  816. }
  817. return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
  818. }
  819. int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
  820. {
  821. unsigned long hash_opts;
  822. int i;
  823. hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
  824. info->data = 0;
  825. for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
  826. switch (BIT(i)) {
  827. case MVPP22_CLS_HEK_OPT_MAC_DA:
  828. info->data |= RXH_L2DA;
  829. break;
  830. case MVPP22_CLS_HEK_OPT_VLAN:
  831. info->data |= RXH_VLAN;
  832. break;
  833. case MVPP22_CLS_HEK_OPT_L3_PROTO:
  834. info->data |= RXH_L3_PROTO;
  835. break;
  836. case MVPP22_CLS_HEK_OPT_IP4SA:
  837. case MVPP22_CLS_HEK_OPT_IP6SA:
  838. info->data |= RXH_IP_SRC;
  839. break;
  840. case MVPP22_CLS_HEK_OPT_IP4DA:
  841. case MVPP22_CLS_HEK_OPT_IP6DA:
  842. info->data |= RXH_IP_DST;
  843. break;
  844. case MVPP22_CLS_HEK_OPT_L4SIP:
  845. info->data |= RXH_L4_B_0_1;
  846. break;
  847. case MVPP22_CLS_HEK_OPT_L4DIP:
  848. info->data |= RXH_L4_B_2_3;
  849. break;
  850. default:
  851. return -EINVAL;
  852. }
  853. }
  854. return 0;
  855. }
  856. void mvpp22_rss_port_init(struct mvpp2_port *port)
  857. {
  858. struct mvpp2 *priv = port->priv;
  859. int i;
  860. /* Set the table width: replace the whole classifier Rx queue number
  861. * with the ones configured in RSS table entries.
  862. */
  863. mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
  864. mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
  865. /* The default RxQ is used as a key to select the RSS table to use.
  866. * We use one RSS table per port.
  867. */
  868. mvpp2_write(priv, MVPP22_RSS_INDEX,
  869. MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
  870. mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
  871. MVPP22_RSS_TABLE_POINTER(port->id));
  872. /* Configure the first table to evenly distribute the packets across
  873. * real Rx Queues. The table entries map a hash to a port Rx Queue.
  874. */
  875. for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
  876. port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
  877. mvpp22_rss_fill_table(port, port->id);
  878. /* Configure default flows */
  879. mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
  880. mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
  881. mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
  882. mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
  883. mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
  884. mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
  885. }