rocker_ofdpa.c 82 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014
  1. /*
  2. * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
  3. * implementation
  4. * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
  5. * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/types.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/hashtable.h>
  16. #include <linux/crc32.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/inetdevice.h>
  19. #include <linux/if_vlan.h>
  20. #include <linux/if_bridge.h>
  21. #include <net/neighbour.h>
  22. #include <net/switchdev.h>
  23. #include <net/ip_fib.h>
  24. #include <net/arp.h>
  25. #include "rocker.h"
  26. #include "rocker_tlv.h"
  27. struct ofdpa_flow_tbl_key {
  28. u32 priority;
  29. enum rocker_of_dpa_table_id tbl_id;
  30. union {
  31. struct {
  32. u32 in_pport;
  33. u32 in_pport_mask;
  34. enum rocker_of_dpa_table_id goto_tbl;
  35. } ig_port;
  36. struct {
  37. u32 in_pport;
  38. __be16 vlan_id;
  39. __be16 vlan_id_mask;
  40. enum rocker_of_dpa_table_id goto_tbl;
  41. bool untagged;
  42. __be16 new_vlan_id;
  43. } vlan;
  44. struct {
  45. u32 in_pport;
  46. u32 in_pport_mask;
  47. __be16 eth_type;
  48. u8 eth_dst[ETH_ALEN];
  49. u8 eth_dst_mask[ETH_ALEN];
  50. __be16 vlan_id;
  51. __be16 vlan_id_mask;
  52. enum rocker_of_dpa_table_id goto_tbl;
  53. bool copy_to_cpu;
  54. } term_mac;
  55. struct {
  56. __be16 eth_type;
  57. __be32 dst4;
  58. __be32 dst4_mask;
  59. enum rocker_of_dpa_table_id goto_tbl;
  60. u32 group_id;
  61. } ucast_routing;
  62. struct {
  63. u8 eth_dst[ETH_ALEN];
  64. u8 eth_dst_mask[ETH_ALEN];
  65. int has_eth_dst;
  66. int has_eth_dst_mask;
  67. __be16 vlan_id;
  68. u32 tunnel_id;
  69. enum rocker_of_dpa_table_id goto_tbl;
  70. u32 group_id;
  71. bool copy_to_cpu;
  72. } bridge;
  73. struct {
  74. u32 in_pport;
  75. u32 in_pport_mask;
  76. u8 eth_src[ETH_ALEN];
  77. u8 eth_src_mask[ETH_ALEN];
  78. u8 eth_dst[ETH_ALEN];
  79. u8 eth_dst_mask[ETH_ALEN];
  80. __be16 eth_type;
  81. __be16 vlan_id;
  82. __be16 vlan_id_mask;
  83. u8 ip_proto;
  84. u8 ip_proto_mask;
  85. u8 ip_tos;
  86. u8 ip_tos_mask;
  87. u32 group_id;
  88. } acl;
  89. };
  90. };
  91. struct ofdpa_flow_tbl_entry {
  92. struct hlist_node entry;
  93. u32 cmd;
  94. u64 cookie;
  95. struct ofdpa_flow_tbl_key key;
  96. size_t key_len;
  97. u32 key_crc32; /* key */
  98. struct fib_info *fi;
  99. };
  100. struct ofdpa_group_tbl_entry {
  101. struct hlist_node entry;
  102. u32 cmd;
  103. u32 group_id; /* key */
  104. u16 group_count;
  105. u32 *group_ids;
  106. union {
  107. struct {
  108. u8 pop_vlan;
  109. } l2_interface;
  110. struct {
  111. u8 eth_src[ETH_ALEN];
  112. u8 eth_dst[ETH_ALEN];
  113. __be16 vlan_id;
  114. u32 group_id;
  115. } l2_rewrite;
  116. struct {
  117. u8 eth_src[ETH_ALEN];
  118. u8 eth_dst[ETH_ALEN];
  119. __be16 vlan_id;
  120. bool ttl_check;
  121. u32 group_id;
  122. } l3_unicast;
  123. };
  124. };
  125. struct ofdpa_fdb_tbl_entry {
  126. struct hlist_node entry;
  127. u32 key_crc32; /* key */
  128. bool learned;
  129. unsigned long touched;
  130. struct ofdpa_fdb_tbl_key {
  131. struct ofdpa_port *ofdpa_port;
  132. u8 addr[ETH_ALEN];
  133. __be16 vlan_id;
  134. } key;
  135. };
  136. struct ofdpa_internal_vlan_tbl_entry {
  137. struct hlist_node entry;
  138. int ifindex; /* key */
  139. u32 ref_count;
  140. __be16 vlan_id;
  141. };
  142. struct ofdpa_neigh_tbl_entry {
  143. struct hlist_node entry;
  144. __be32 ip_addr; /* key */
  145. struct net_device *dev;
  146. u32 ref_count;
  147. u32 index;
  148. u8 eth_dst[ETH_ALEN];
  149. bool ttl_check;
  150. };
  151. enum {
  152. OFDPA_CTRL_LINK_LOCAL_MCAST,
  153. OFDPA_CTRL_LOCAL_ARP,
  154. OFDPA_CTRL_IPV4_MCAST,
  155. OFDPA_CTRL_IPV6_MCAST,
  156. OFDPA_CTRL_DFLT_BRIDGING,
  157. OFDPA_CTRL_DFLT_OVS,
  158. OFDPA_CTRL_MAX,
  159. };
  160. #define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00
  161. #define OFDPA_N_INTERNAL_VLANS 255
  162. #define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
  163. #define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
  164. #define OFDPA_UNTAGGED_VID 0
  165. struct ofdpa {
  166. struct rocker *rocker;
  167. DECLARE_HASHTABLE(flow_tbl, 16);
  168. spinlock_t flow_tbl_lock; /* for flow tbl accesses */
  169. u64 flow_tbl_next_cookie;
  170. DECLARE_HASHTABLE(group_tbl, 16);
  171. spinlock_t group_tbl_lock; /* for group tbl accesses */
  172. struct timer_list fdb_cleanup_timer;
  173. DECLARE_HASHTABLE(fdb_tbl, 16);
  174. spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
  175. unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
  176. DECLARE_HASHTABLE(internal_vlan_tbl, 8);
  177. spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
  178. DECLARE_HASHTABLE(neigh_tbl, 16);
  179. spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
  180. u32 neigh_tbl_next_index;
  181. unsigned long ageing_time;
  182. bool fib_aborted;
  183. };
  184. struct ofdpa_port {
  185. struct ofdpa *ofdpa;
  186. struct rocker_port *rocker_port;
  187. struct net_device *dev;
  188. u32 pport;
  189. struct net_device *bridge_dev;
  190. __be16 internal_vlan_id;
  191. int stp_state;
  192. u32 brport_flags;
  193. unsigned long ageing_time;
  194. bool ctrls[OFDPA_CTRL_MAX];
  195. unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
  196. };
  197. static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
  198. static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  199. static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
  200. static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
  201. static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
  202. static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
  203. static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
  204. static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
  205. static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
  206. /* Rocker priority levels for flow table entries. Higher
  207. * priority match takes precedence over lower priority match.
  208. */
  209. enum {
  210. OFDPA_PRIORITY_UNKNOWN = 0,
  211. OFDPA_PRIORITY_IG_PORT = 1,
  212. OFDPA_PRIORITY_VLAN = 1,
  213. OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
  214. OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
  215. OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
  216. OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
  217. OFDPA_PRIORITY_BRIDGING_VLAN = 3,
  218. OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
  219. OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
  220. OFDPA_PRIORITY_BRIDGING_TENANT = 3,
  221. OFDPA_PRIORITY_ACL_CTRL = 3,
  222. OFDPA_PRIORITY_ACL_NORMAL = 2,
  223. OFDPA_PRIORITY_ACL_DFLT = 1,
  224. };
  225. static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
  226. {
  227. u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
  228. u16 end = 0xffe;
  229. u16 _vlan_id = ntohs(vlan_id);
  230. return (_vlan_id >= start && _vlan_id <= end);
  231. }
  232. static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
  233. u16 vid, bool *pop_vlan)
  234. {
  235. __be16 vlan_id;
  236. if (pop_vlan)
  237. *pop_vlan = false;
  238. vlan_id = htons(vid);
  239. if (!vlan_id) {
  240. vlan_id = ofdpa_port->internal_vlan_id;
  241. if (pop_vlan)
  242. *pop_vlan = true;
  243. }
  244. return vlan_id;
  245. }
  246. static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
  247. __be16 vlan_id)
  248. {
  249. if (ofdpa_vlan_id_is_internal(vlan_id))
  250. return 0;
  251. return ntohs(vlan_id);
  252. }
  253. static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
  254. const char *kind)
  255. {
  256. return ofdpa_port->bridge_dev &&
  257. !strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
  258. }
  259. static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
  260. {
  261. return ofdpa_port_is_slave(ofdpa_port, "bridge");
  262. }
  263. static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
  264. {
  265. return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
  266. }
  267. #define OFDPA_OP_FLAG_REMOVE BIT(0)
  268. #define OFDPA_OP_FLAG_NOWAIT BIT(1)
  269. #define OFDPA_OP_FLAG_LEARNED BIT(2)
  270. #define OFDPA_OP_FLAG_REFRESH BIT(3)
  271. static bool ofdpa_flags_nowait(int flags)
  272. {
  273. return flags & OFDPA_OP_FLAG_NOWAIT;
  274. }
  275. static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags,
  276. size_t size)
  277. {
  278. struct switchdev_trans_item *elem = NULL;
  279. gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ?
  280. GFP_ATOMIC : GFP_KERNEL;
  281. /* If in transaction prepare phase, allocate the memory
  282. * and enqueue it on a transaction. If in transaction
  283. * commit phase, dequeue the memory from the transaction
  284. * rather than re-allocating the memory. The idea is the
  285. * driver code paths for prepare and commit are identical
  286. * so the memory allocated in the prepare phase is the
  287. * memory used in the commit phase.
  288. */
  289. if (!trans) {
  290. elem = kzalloc(size + sizeof(*elem), gfp_flags);
  291. } else if (switchdev_trans_ph_prepare(trans)) {
  292. elem = kzalloc(size + sizeof(*elem), gfp_flags);
  293. if (!elem)
  294. return NULL;
  295. switchdev_trans_item_enqueue(trans, elem, kfree, elem);
  296. } else {
  297. elem = switchdev_trans_item_dequeue(trans);
  298. }
  299. return elem ? elem + 1 : NULL;
  300. }
  301. static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags,
  302. size_t size)
  303. {
  304. return __ofdpa_mem_alloc(trans, flags, size);
  305. }
  306. static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags,
  307. size_t n, size_t size)
  308. {
  309. return __ofdpa_mem_alloc(trans, flags, n * size);
  310. }
  311. static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem)
  312. {
  313. struct switchdev_trans_item *elem;
  314. /* Frees are ignored if in transaction prepare phase. The
  315. * memory remains on the per-port list until freed in the
  316. * commit phase.
  317. */
  318. if (switchdev_trans_ph_prepare(trans))
  319. return;
  320. elem = (struct switchdev_trans_item *) mem - 1;
  321. kfree(elem);
  322. }
  323. /*************************************************************
  324. * Flow, group, FDB, internal VLAN and neigh command prepares
  325. *************************************************************/
  326. static int
  327. ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
  328. const struct ofdpa_flow_tbl_entry *entry)
  329. {
  330. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
  331. entry->key.ig_port.in_pport))
  332. return -EMSGSIZE;
  333. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
  334. entry->key.ig_port.in_pport_mask))
  335. return -EMSGSIZE;
  336. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
  337. entry->key.ig_port.goto_tbl))
  338. return -EMSGSIZE;
  339. return 0;
  340. }
  341. static int
  342. ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
  343. const struct ofdpa_flow_tbl_entry *entry)
  344. {
  345. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
  346. entry->key.vlan.in_pport))
  347. return -EMSGSIZE;
  348. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
  349. entry->key.vlan.vlan_id))
  350. return -EMSGSIZE;
  351. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
  352. entry->key.vlan.vlan_id_mask))
  353. return -EMSGSIZE;
  354. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
  355. entry->key.vlan.goto_tbl))
  356. return -EMSGSIZE;
  357. if (entry->key.vlan.untagged &&
  358. rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
  359. entry->key.vlan.new_vlan_id))
  360. return -EMSGSIZE;
  361. return 0;
  362. }
  363. static int
  364. ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
  365. const struct ofdpa_flow_tbl_entry *entry)
  366. {
  367. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
  368. entry->key.term_mac.in_pport))
  369. return -EMSGSIZE;
  370. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
  371. entry->key.term_mac.in_pport_mask))
  372. return -EMSGSIZE;
  373. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
  374. entry->key.term_mac.eth_type))
  375. return -EMSGSIZE;
  376. if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
  377. ETH_ALEN, entry->key.term_mac.eth_dst))
  378. return -EMSGSIZE;
  379. if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
  380. ETH_ALEN, entry->key.term_mac.eth_dst_mask))
  381. return -EMSGSIZE;
  382. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
  383. entry->key.term_mac.vlan_id))
  384. return -EMSGSIZE;
  385. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
  386. entry->key.term_mac.vlan_id_mask))
  387. return -EMSGSIZE;
  388. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
  389. entry->key.term_mac.goto_tbl))
  390. return -EMSGSIZE;
  391. if (entry->key.term_mac.copy_to_cpu &&
  392. rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
  393. entry->key.term_mac.copy_to_cpu))
  394. return -EMSGSIZE;
  395. return 0;
  396. }
  397. static int
  398. ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
  399. const struct ofdpa_flow_tbl_entry *entry)
  400. {
  401. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
  402. entry->key.ucast_routing.eth_type))
  403. return -EMSGSIZE;
  404. if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
  405. entry->key.ucast_routing.dst4))
  406. return -EMSGSIZE;
  407. if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
  408. entry->key.ucast_routing.dst4_mask))
  409. return -EMSGSIZE;
  410. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
  411. entry->key.ucast_routing.goto_tbl))
  412. return -EMSGSIZE;
  413. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
  414. entry->key.ucast_routing.group_id))
  415. return -EMSGSIZE;
  416. return 0;
  417. }
  418. static int
  419. ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
  420. const struct ofdpa_flow_tbl_entry *entry)
  421. {
  422. if (entry->key.bridge.has_eth_dst &&
  423. rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
  424. ETH_ALEN, entry->key.bridge.eth_dst))
  425. return -EMSGSIZE;
  426. if (entry->key.bridge.has_eth_dst_mask &&
  427. rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
  428. ETH_ALEN, entry->key.bridge.eth_dst_mask))
  429. return -EMSGSIZE;
  430. if (entry->key.bridge.vlan_id &&
  431. rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
  432. entry->key.bridge.vlan_id))
  433. return -EMSGSIZE;
  434. if (entry->key.bridge.tunnel_id &&
  435. rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
  436. entry->key.bridge.tunnel_id))
  437. return -EMSGSIZE;
  438. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
  439. entry->key.bridge.goto_tbl))
  440. return -EMSGSIZE;
  441. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
  442. entry->key.bridge.group_id))
  443. return -EMSGSIZE;
  444. if (entry->key.bridge.copy_to_cpu &&
  445. rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
  446. entry->key.bridge.copy_to_cpu))
  447. return -EMSGSIZE;
  448. return 0;
  449. }
  450. static int
  451. ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
  452. const struct ofdpa_flow_tbl_entry *entry)
  453. {
  454. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
  455. entry->key.acl.in_pport))
  456. return -EMSGSIZE;
  457. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
  458. entry->key.acl.in_pport_mask))
  459. return -EMSGSIZE;
  460. if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
  461. ETH_ALEN, entry->key.acl.eth_src))
  462. return -EMSGSIZE;
  463. if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
  464. ETH_ALEN, entry->key.acl.eth_src_mask))
  465. return -EMSGSIZE;
  466. if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
  467. ETH_ALEN, entry->key.acl.eth_dst))
  468. return -EMSGSIZE;
  469. if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
  470. ETH_ALEN, entry->key.acl.eth_dst_mask))
  471. return -EMSGSIZE;
  472. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
  473. entry->key.acl.eth_type))
  474. return -EMSGSIZE;
  475. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
  476. entry->key.acl.vlan_id))
  477. return -EMSGSIZE;
  478. if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
  479. entry->key.acl.vlan_id_mask))
  480. return -EMSGSIZE;
  481. switch (ntohs(entry->key.acl.eth_type)) {
  482. case ETH_P_IP:
  483. case ETH_P_IPV6:
  484. if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
  485. entry->key.acl.ip_proto))
  486. return -EMSGSIZE;
  487. if (rocker_tlv_put_u8(desc_info,
  488. ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
  489. entry->key.acl.ip_proto_mask))
  490. return -EMSGSIZE;
  491. if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
  492. entry->key.acl.ip_tos & 0x3f))
  493. return -EMSGSIZE;
  494. if (rocker_tlv_put_u8(desc_info,
  495. ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
  496. entry->key.acl.ip_tos_mask & 0x3f))
  497. return -EMSGSIZE;
  498. if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
  499. (entry->key.acl.ip_tos & 0xc0) >> 6))
  500. return -EMSGSIZE;
  501. if (rocker_tlv_put_u8(desc_info,
  502. ROCKER_TLV_OF_DPA_IP_ECN_MASK,
  503. (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
  504. return -EMSGSIZE;
  505. break;
  506. }
  507. if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
  508. rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
  509. entry->key.acl.group_id))
  510. return -EMSGSIZE;
  511. return 0;
  512. }
  513. static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
  514. struct rocker_desc_info *desc_info,
  515. void *priv)
  516. {
  517. const struct ofdpa_flow_tbl_entry *entry = priv;
  518. struct rocker_tlv *cmd_info;
  519. int err = 0;
  520. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
  521. return -EMSGSIZE;
  522. cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
  523. if (!cmd_info)
  524. return -EMSGSIZE;
  525. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
  526. entry->key.tbl_id))
  527. return -EMSGSIZE;
  528. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
  529. entry->key.priority))
  530. return -EMSGSIZE;
  531. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
  532. return -EMSGSIZE;
  533. if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
  534. entry->cookie))
  535. return -EMSGSIZE;
  536. switch (entry->key.tbl_id) {
  537. case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
  538. err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
  539. break;
  540. case ROCKER_OF_DPA_TABLE_ID_VLAN:
  541. err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
  542. break;
  543. case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
  544. err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
  545. break;
  546. case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
  547. err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
  548. break;
  549. case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
  550. err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
  551. break;
  552. case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
  553. err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
  554. break;
  555. default:
  556. err = -ENOTSUPP;
  557. break;
  558. }
  559. if (err)
  560. return err;
  561. rocker_tlv_nest_end(desc_info, cmd_info);
  562. return 0;
  563. }
  564. static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
  565. struct rocker_desc_info *desc_info,
  566. void *priv)
  567. {
  568. const struct ofdpa_flow_tbl_entry *entry = priv;
  569. struct rocker_tlv *cmd_info;
  570. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
  571. return -EMSGSIZE;
  572. cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
  573. if (!cmd_info)
  574. return -EMSGSIZE;
  575. if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
  576. entry->cookie))
  577. return -EMSGSIZE;
  578. rocker_tlv_nest_end(desc_info, cmd_info);
  579. return 0;
  580. }
  581. static int
  582. ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
  583. struct ofdpa_group_tbl_entry *entry)
  584. {
  585. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
  586. ROCKER_GROUP_PORT_GET(entry->group_id)))
  587. return -EMSGSIZE;
  588. if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
  589. entry->l2_interface.pop_vlan))
  590. return -EMSGSIZE;
  591. return 0;
  592. }
  593. static int
  594. ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
  595. const struct ofdpa_group_tbl_entry *entry)
  596. {
  597. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
  598. entry->l2_rewrite.group_id))
  599. return -EMSGSIZE;
  600. if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
  601. rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
  602. ETH_ALEN, entry->l2_rewrite.eth_src))
  603. return -EMSGSIZE;
  604. if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
  605. rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
  606. ETH_ALEN, entry->l2_rewrite.eth_dst))
  607. return -EMSGSIZE;
  608. if (entry->l2_rewrite.vlan_id &&
  609. rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
  610. entry->l2_rewrite.vlan_id))
  611. return -EMSGSIZE;
  612. return 0;
  613. }
  614. static int
  615. ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
  616. const struct ofdpa_group_tbl_entry *entry)
  617. {
  618. int i;
  619. struct rocker_tlv *group_ids;
  620. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
  621. entry->group_count))
  622. return -EMSGSIZE;
  623. group_ids = rocker_tlv_nest_start(desc_info,
  624. ROCKER_TLV_OF_DPA_GROUP_IDS);
  625. if (!group_ids)
  626. return -EMSGSIZE;
  627. for (i = 0; i < entry->group_count; i++)
  628. /* Note TLV array is 1-based */
  629. if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
  630. return -EMSGSIZE;
  631. rocker_tlv_nest_end(desc_info, group_ids);
  632. return 0;
  633. }
  634. static int
  635. ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
  636. const struct ofdpa_group_tbl_entry *entry)
  637. {
  638. if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
  639. rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
  640. ETH_ALEN, entry->l3_unicast.eth_src))
  641. return -EMSGSIZE;
  642. if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
  643. rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
  644. ETH_ALEN, entry->l3_unicast.eth_dst))
  645. return -EMSGSIZE;
  646. if (entry->l3_unicast.vlan_id &&
  647. rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
  648. entry->l3_unicast.vlan_id))
  649. return -EMSGSIZE;
  650. if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
  651. entry->l3_unicast.ttl_check))
  652. return -EMSGSIZE;
  653. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
  654. entry->l3_unicast.group_id))
  655. return -EMSGSIZE;
  656. return 0;
  657. }
  658. static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
  659. struct rocker_desc_info *desc_info,
  660. void *priv)
  661. {
  662. struct ofdpa_group_tbl_entry *entry = priv;
  663. struct rocker_tlv *cmd_info;
  664. int err = 0;
  665. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
  666. return -EMSGSIZE;
  667. cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
  668. if (!cmd_info)
  669. return -EMSGSIZE;
  670. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
  671. entry->group_id))
  672. return -EMSGSIZE;
  673. switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
  674. case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
  675. err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
  676. break;
  677. case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
  678. err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
  679. break;
  680. case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
  681. case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
  682. err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
  683. break;
  684. case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
  685. err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
  686. break;
  687. default:
  688. err = -ENOTSUPP;
  689. break;
  690. }
  691. if (err)
  692. return err;
  693. rocker_tlv_nest_end(desc_info, cmd_info);
  694. return 0;
  695. }
  696. static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
  697. struct rocker_desc_info *desc_info,
  698. void *priv)
  699. {
  700. const struct ofdpa_group_tbl_entry *entry = priv;
  701. struct rocker_tlv *cmd_info;
  702. if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
  703. return -EMSGSIZE;
  704. cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
  705. if (!cmd_info)
  706. return -EMSGSIZE;
  707. if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
  708. entry->group_id))
  709. return -EMSGSIZE;
  710. rocker_tlv_nest_end(desc_info, cmd_info);
  711. return 0;
  712. }
  713. /***************************************************
  714. * Flow, group, FDB, internal VLAN and neigh tables
  715. ***************************************************/
  716. static struct ofdpa_flow_tbl_entry *
  717. ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
  718. const struct ofdpa_flow_tbl_entry *match)
  719. {
  720. struct ofdpa_flow_tbl_entry *found;
  721. size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
  722. hash_for_each_possible(ofdpa->flow_tbl, found,
  723. entry, match->key_crc32) {
  724. if (memcmp(&found->key, &match->key, key_len) == 0)
  725. return found;
  726. }
  727. return NULL;
  728. }
  729. static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
  730. struct switchdev_trans *trans, int flags,
  731. struct ofdpa_flow_tbl_entry *match)
  732. {
  733. struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  734. struct ofdpa_flow_tbl_entry *found;
  735. size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
  736. unsigned long lock_flags;
  737. match->key_crc32 = crc32(~0, &match->key, key_len);
  738. spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
  739. found = ofdpa_flow_tbl_find(ofdpa, match);
  740. if (found) {
  741. match->cookie = found->cookie;
  742. if (!switchdev_trans_ph_prepare(trans))
  743. hash_del(&found->entry);
  744. ofdpa_kfree(trans, found);
  745. found = match;
  746. found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
  747. } else {
  748. found = match;
  749. found->cookie = ofdpa->flow_tbl_next_cookie++;
  750. found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
  751. }
  752. if (!switchdev_trans_ph_prepare(trans))
  753. hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
  754. spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
  755. if (!switchdev_trans_ph_prepare(trans))
  756. return rocker_cmd_exec(ofdpa_port->rocker_port,
  757. ofdpa_flags_nowait(flags),
  758. ofdpa_cmd_flow_tbl_add,
  759. found, NULL, NULL);
  760. return 0;
  761. }
  762. static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
  763. struct switchdev_trans *trans, int flags,
  764. struct ofdpa_flow_tbl_entry *match)
  765. {
  766. struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  767. struct ofdpa_flow_tbl_entry *found;
  768. size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
  769. unsigned long lock_flags;
  770. int err = 0;
  771. match->key_crc32 = crc32(~0, &match->key, key_len);
  772. spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
  773. found = ofdpa_flow_tbl_find(ofdpa, match);
  774. if (found) {
  775. if (!switchdev_trans_ph_prepare(trans))
  776. hash_del(&found->entry);
  777. found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
  778. }
  779. spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
  780. ofdpa_kfree(trans, match);
  781. if (found) {
  782. if (!switchdev_trans_ph_prepare(trans))
  783. err = rocker_cmd_exec(ofdpa_port->rocker_port,
  784. ofdpa_flags_nowait(flags),
  785. ofdpa_cmd_flow_tbl_del,
  786. found, NULL, NULL);
  787. ofdpa_kfree(trans, found);
  788. }
  789. return err;
  790. }
  791. static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port,
  792. struct switchdev_trans *trans, int flags,
  793. struct ofdpa_flow_tbl_entry *entry)
  794. {
  795. if (flags & OFDPA_OP_FLAG_REMOVE)
  796. return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry);
  797. else
  798. return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry);
  799. }
  800. static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port,
  801. struct switchdev_trans *trans, int flags,
  802. u32 in_pport, u32 in_pport_mask,
  803. enum rocker_of_dpa_table_id goto_tbl)
  804. {
  805. struct ofdpa_flow_tbl_entry *entry;
  806. entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
  807. if (!entry)
  808. return -ENOMEM;
  809. entry->key.priority = OFDPA_PRIORITY_IG_PORT;
  810. entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
  811. entry->key.ig_port.in_pport = in_pport;
  812. entry->key.ig_port.in_pport_mask = in_pport_mask;
  813. entry->key.ig_port.goto_tbl = goto_tbl;
  814. return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
  815. }
  816. static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
  817. struct switchdev_trans *trans, int flags,
  818. u32 in_pport, __be16 vlan_id,
  819. __be16 vlan_id_mask,
  820. enum rocker_of_dpa_table_id goto_tbl,
  821. bool untagged, __be16 new_vlan_id)
  822. {
  823. struct ofdpa_flow_tbl_entry *entry;
  824. entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
  825. if (!entry)
  826. return -ENOMEM;
  827. entry->key.priority = OFDPA_PRIORITY_VLAN;
  828. entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
  829. entry->key.vlan.in_pport = in_pport;
  830. entry->key.vlan.vlan_id = vlan_id;
  831. entry->key.vlan.vlan_id_mask = vlan_id_mask;
  832. entry->key.vlan.goto_tbl = goto_tbl;
  833. entry->key.vlan.untagged = untagged;
  834. entry->key.vlan.new_vlan_id = new_vlan_id;
  835. return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
  836. }
  837. static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
  838. struct switchdev_trans *trans,
  839. u32 in_pport, u32 in_pport_mask,
  840. __be16 eth_type, const u8 *eth_dst,
  841. const u8 *eth_dst_mask, __be16 vlan_id,
  842. __be16 vlan_id_mask, bool copy_to_cpu,
  843. int flags)
  844. {
  845. struct ofdpa_flow_tbl_entry *entry;
  846. entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
  847. if (!entry)
  848. return -ENOMEM;
  849. if (is_multicast_ether_addr(eth_dst)) {
  850. entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
  851. entry->key.term_mac.goto_tbl =
  852. ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
  853. } else {
  854. entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
  855. entry->key.term_mac.goto_tbl =
  856. ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
  857. }
  858. entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
  859. entry->key.term_mac.in_pport = in_pport;
  860. entry->key.term_mac.in_pport_mask = in_pport_mask;
  861. entry->key.term_mac.eth_type = eth_type;
  862. ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
  863. ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
  864. entry->key.term_mac.vlan_id = vlan_id;
  865. entry->key.term_mac.vlan_id_mask = vlan_id_mask;
  866. entry->key.term_mac.copy_to_cpu = copy_to_cpu;
  867. return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
  868. }
  869. static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
  870. struct switchdev_trans *trans, int flags,
  871. const u8 *eth_dst, const u8 *eth_dst_mask,
  872. __be16 vlan_id, u32 tunnel_id,
  873. enum rocker_of_dpa_table_id goto_tbl,
  874. u32 group_id, bool copy_to_cpu)
  875. {
  876. struct ofdpa_flow_tbl_entry *entry;
  877. u32 priority;
  878. bool vlan_bridging = !!vlan_id;
  879. bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
  880. bool wild = false;
  881. entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
  882. if (!entry)
  883. return -ENOMEM;
  884. entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
  885. if (eth_dst) {
  886. entry->key.bridge.has_eth_dst = 1;
  887. ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
  888. }
  889. if (eth_dst_mask) {
  890. entry->key.bridge.has_eth_dst_mask = 1;
  891. ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
  892. if (!ether_addr_equal(eth_dst_mask, ff_mac))
  893. wild = true;
  894. }
  895. priority = OFDPA_PRIORITY_UNKNOWN;
  896. if (vlan_bridging && dflt && wild)
  897. priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
  898. else if (vlan_bridging && dflt && !wild)
  899. priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
  900. else if (vlan_bridging && !dflt)
  901. priority = OFDPA_PRIORITY_BRIDGING_VLAN;
  902. else if (!vlan_bridging && dflt && wild)
  903. priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
  904. else if (!vlan_bridging && dflt && !wild)
  905. priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
  906. else if (!vlan_bridging && !dflt)
  907. priority = OFDPA_PRIORITY_BRIDGING_TENANT;
  908. entry->key.priority = priority;
  909. entry->key.bridge.vlan_id = vlan_id;
  910. entry->key.bridge.tunnel_id = tunnel_id;
  911. entry->key.bridge.goto_tbl = goto_tbl;
  912. entry->key.bridge.group_id = group_id;
  913. entry->key.bridge.copy_to_cpu = copy_to_cpu;
  914. return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
  915. }
  916. static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
  917. struct switchdev_trans *trans,
  918. __be16 eth_type, __be32 dst,
  919. __be32 dst_mask, u32 priority,
  920. enum rocker_of_dpa_table_id goto_tbl,
  921. u32 group_id, struct fib_info *fi,
  922. int flags)
  923. {
  924. struct ofdpa_flow_tbl_entry *entry;
  925. entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
  926. if (!entry)
  927. return -ENOMEM;
  928. entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
  929. entry->key.priority = priority;
  930. entry->key.ucast_routing.eth_type = eth_type;
  931. entry->key.ucast_routing.dst4 = dst;
  932. entry->key.ucast_routing.dst4_mask = dst_mask;
  933. entry->key.ucast_routing.goto_tbl = goto_tbl;
  934. entry->key.ucast_routing.group_id = group_id;
  935. entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
  936. ucast_routing.group_id);
  937. entry->fi = fi;
  938. return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
  939. }
  940. static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
  941. struct switchdev_trans *trans, int flags,
  942. u32 in_pport, u32 in_pport_mask,
  943. const u8 *eth_src, const u8 *eth_src_mask,
  944. const u8 *eth_dst, const u8 *eth_dst_mask,
  945. __be16 eth_type, __be16 vlan_id,
  946. __be16 vlan_id_mask, u8 ip_proto,
  947. u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
  948. u32 group_id)
  949. {
  950. u32 priority;
  951. struct ofdpa_flow_tbl_entry *entry;
  952. entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
  953. if (!entry)
  954. return -ENOMEM;
  955. priority = OFDPA_PRIORITY_ACL_NORMAL;
  956. if (eth_dst && eth_dst_mask) {
  957. if (ether_addr_equal(eth_dst_mask, mcast_mac))
  958. priority = OFDPA_PRIORITY_ACL_DFLT;
  959. else if (is_link_local_ether_addr(eth_dst))
  960. priority = OFDPA_PRIORITY_ACL_CTRL;
  961. }
  962. entry->key.priority = priority;
  963. entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
  964. entry->key.acl.in_pport = in_pport;
  965. entry->key.acl.in_pport_mask = in_pport_mask;
  966. if (eth_src)
  967. ether_addr_copy(entry->key.acl.eth_src, eth_src);
  968. if (eth_src_mask)
  969. ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
  970. if (eth_dst)
  971. ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
  972. if (eth_dst_mask)
  973. ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
  974. entry->key.acl.eth_type = eth_type;
  975. entry->key.acl.vlan_id = vlan_id;
  976. entry->key.acl.vlan_id_mask = vlan_id_mask;
  977. entry->key.acl.ip_proto = ip_proto;
  978. entry->key.acl.ip_proto_mask = ip_proto_mask;
  979. entry->key.acl.ip_tos = ip_tos;
  980. entry->key.acl.ip_tos_mask = ip_tos_mask;
  981. entry->key.acl.group_id = group_id;
  982. return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
  983. }
  984. static struct ofdpa_group_tbl_entry *
  985. ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
  986. const struct ofdpa_group_tbl_entry *match)
  987. {
  988. struct ofdpa_group_tbl_entry *found;
  989. hash_for_each_possible(ofdpa->group_tbl, found,
  990. entry, match->group_id) {
  991. if (found->group_id == match->group_id)
  992. return found;
  993. }
  994. return NULL;
  995. }
  996. static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans,
  997. struct ofdpa_group_tbl_entry *entry)
  998. {
  999. switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
  1000. case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
  1001. case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
  1002. ofdpa_kfree(trans, entry->group_ids);
  1003. break;
  1004. default:
  1005. break;
  1006. }
  1007. ofdpa_kfree(trans, entry);
  1008. }
  1009. static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
  1010. struct switchdev_trans *trans, int flags,
  1011. struct ofdpa_group_tbl_entry *match)
  1012. {
  1013. struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  1014. struct ofdpa_group_tbl_entry *found;
  1015. unsigned long lock_flags;
  1016. spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
  1017. found = ofdpa_group_tbl_find(ofdpa, match);
  1018. if (found) {
  1019. if (!switchdev_trans_ph_prepare(trans))
  1020. hash_del(&found->entry);
  1021. ofdpa_group_tbl_entry_free(trans, found);
  1022. found = match;
  1023. found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
  1024. } else {
  1025. found = match;
  1026. found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
  1027. }
  1028. if (!switchdev_trans_ph_prepare(trans))
  1029. hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
  1030. spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
  1031. if (!switchdev_trans_ph_prepare(trans))
  1032. return rocker_cmd_exec(ofdpa_port->rocker_port,
  1033. ofdpa_flags_nowait(flags),
  1034. ofdpa_cmd_group_tbl_add,
  1035. found, NULL, NULL);
  1036. return 0;
  1037. }
  1038. static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port,
  1039. struct switchdev_trans *trans, int flags,
  1040. struct ofdpa_group_tbl_entry *match)
  1041. {
  1042. struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  1043. struct ofdpa_group_tbl_entry *found;
  1044. unsigned long lock_flags;
  1045. int err = 0;
  1046. spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
  1047. found = ofdpa_group_tbl_find(ofdpa, match);
  1048. if (found) {
  1049. if (!switchdev_trans_ph_prepare(trans))
  1050. hash_del(&found->entry);
  1051. found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
  1052. }
  1053. spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
  1054. ofdpa_group_tbl_entry_free(trans, match);
  1055. if (found) {
  1056. if (!switchdev_trans_ph_prepare(trans))
  1057. err = rocker_cmd_exec(ofdpa_port->rocker_port,
  1058. ofdpa_flags_nowait(flags),
  1059. ofdpa_cmd_group_tbl_del,
  1060. found, NULL, NULL);
  1061. ofdpa_group_tbl_entry_free(trans, found);
  1062. }
  1063. return err;
  1064. }
  1065. static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port,
  1066. struct switchdev_trans *trans, int flags,
  1067. struct ofdpa_group_tbl_entry *entry)
  1068. {
  1069. if (flags & OFDPA_OP_FLAG_REMOVE)
  1070. return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry);
  1071. else
  1072. return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry);
  1073. }
  1074. static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
  1075. struct switchdev_trans *trans, int flags,
  1076. __be16 vlan_id, u32 out_pport,
  1077. int pop_vlan)
  1078. {
  1079. struct ofdpa_group_tbl_entry *entry;
  1080. entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
  1081. if (!entry)
  1082. return -ENOMEM;
  1083. entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
  1084. entry->l2_interface.pop_vlan = pop_vlan;
  1085. return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
  1086. }
  1087. static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
  1088. struct switchdev_trans *trans,
  1089. int flags, u8 group_count,
  1090. const u32 *group_ids, u32 group_id)
  1091. {
  1092. struct ofdpa_group_tbl_entry *entry;
  1093. entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
  1094. if (!entry)
  1095. return -ENOMEM;
  1096. entry->group_id = group_id;
  1097. entry->group_count = group_count;
  1098. entry->group_ids = ofdpa_kcalloc(trans, flags,
  1099. group_count, sizeof(u32));
  1100. if (!entry->group_ids) {
  1101. ofdpa_kfree(trans, entry);
  1102. return -ENOMEM;
  1103. }
  1104. memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
  1105. return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
  1106. }
  1107. static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
  1108. struct switchdev_trans *trans, int flags,
  1109. __be16 vlan_id, u8 group_count,
  1110. const u32 *group_ids, u32 group_id)
  1111. {
  1112. return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags,
  1113. group_count, group_ids,
  1114. group_id);
  1115. }
  1116. static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port,
  1117. struct switchdev_trans *trans, int flags,
  1118. u32 index, const u8 *src_mac, const u8 *dst_mac,
  1119. __be16 vlan_id, bool ttl_check, u32 pport)
  1120. {
  1121. struct ofdpa_group_tbl_entry *entry;
  1122. entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
  1123. if (!entry)
  1124. return -ENOMEM;
  1125. entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
  1126. if (src_mac)
  1127. ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
  1128. if (dst_mac)
  1129. ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
  1130. entry->l3_unicast.vlan_id = vlan_id;
  1131. entry->l3_unicast.ttl_check = ttl_check;
  1132. entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
  1133. return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
  1134. }
  1135. static struct ofdpa_neigh_tbl_entry *
  1136. ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
  1137. {
  1138. struct ofdpa_neigh_tbl_entry *found;
  1139. hash_for_each_possible(ofdpa->neigh_tbl, found,
  1140. entry, be32_to_cpu(ip_addr))
  1141. if (found->ip_addr == ip_addr)
  1142. return found;
  1143. return NULL;
  1144. }
  1145. static void ofdpa_neigh_add(struct ofdpa *ofdpa,
  1146. struct switchdev_trans *trans,
  1147. struct ofdpa_neigh_tbl_entry *entry)
  1148. {
  1149. if (!switchdev_trans_ph_commit(trans))
  1150. entry->index = ofdpa->neigh_tbl_next_index++;
  1151. if (switchdev_trans_ph_prepare(trans))
  1152. return;
  1153. entry->ref_count++;
  1154. hash_add(ofdpa->neigh_tbl, &entry->entry,
  1155. be32_to_cpu(entry->ip_addr));
  1156. }
  1157. static void ofdpa_neigh_del(struct switchdev_trans *trans,
  1158. struct ofdpa_neigh_tbl_entry *entry)
  1159. {
  1160. if (switchdev_trans_ph_prepare(trans))
  1161. return;
  1162. if (--entry->ref_count == 0) {
  1163. hash_del(&entry->entry);
  1164. ofdpa_kfree(trans, entry);
  1165. }
  1166. }
  1167. static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
  1168. struct switchdev_trans *trans,
  1169. const u8 *eth_dst, bool ttl_check)
  1170. {
  1171. if (eth_dst) {
  1172. ether_addr_copy(entry->eth_dst, eth_dst);
  1173. entry->ttl_check = ttl_check;
  1174. } else if (!switchdev_trans_ph_prepare(trans)) {
  1175. entry->ref_count++;
  1176. }
  1177. }
  1178. static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
  1179. struct switchdev_trans *trans,
  1180. int flags, __be32 ip_addr, const u8 *eth_dst)
  1181. {
  1182. struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  1183. struct ofdpa_neigh_tbl_entry *entry;
  1184. struct ofdpa_neigh_tbl_entry *found;
  1185. unsigned long lock_flags;
  1186. __be16 eth_type = htons(ETH_P_IP);
  1187. enum rocker_of_dpa_table_id goto_tbl =
  1188. ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
  1189. u32 group_id;
  1190. u32 priority = 0;
  1191. bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
  1192. bool updating;
  1193. bool removing;
  1194. int err = 0;
  1195. entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
  1196. if (!entry)
  1197. return -ENOMEM;
  1198. spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
  1199. found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
  1200. updating = found && adding;
  1201. removing = found && !adding;
  1202. adding = !found && adding;
  1203. if (adding) {
  1204. entry->ip_addr = ip_addr;
  1205. entry->dev = ofdpa_port->dev;
  1206. ether_addr_copy(entry->eth_dst, eth_dst);
  1207. entry->ttl_check = true;
  1208. ofdpa_neigh_add(ofdpa, trans, entry);
  1209. } else if (removing) {
  1210. memcpy(entry, found, sizeof(*entry));
  1211. ofdpa_neigh_del(trans, found);
  1212. } else if (updating) {
  1213. ofdpa_neigh_update(found, trans, eth_dst, true);
  1214. memcpy(entry, found, sizeof(*entry));
  1215. } else {
  1216. err = -ENOENT;
  1217. }
  1218. spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
  1219. if (err)
  1220. goto err_out;
  1221. /* For each active neighbor, we have an L3 unicast group and
  1222. * a /32 route to the neighbor, which uses the L3 unicast
  1223. * group. The L3 unicast group can also be referred to by
  1224. * other routes' nexthops.
  1225. */
  1226. err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags,
  1227. entry->index,
  1228. ofdpa_port->dev->dev_addr,
  1229. entry->eth_dst,
  1230. ofdpa_port->internal_vlan_id,
  1231. entry->ttl_check,
  1232. ofdpa_port->pport);
  1233. if (err) {
  1234. netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
  1235. err, entry->index);
  1236. goto err_out;
  1237. }
  1238. if (adding || removing) {
  1239. group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
  1240. err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans,
  1241. eth_type, ip_addr,
  1242. inet_make_mask(32),
  1243. priority, goto_tbl,
  1244. group_id, NULL, flags);
  1245. if (err)
  1246. netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
  1247. err, &entry->ip_addr, group_id);
  1248. }
  1249. err_out:
  1250. if (!adding)
  1251. ofdpa_kfree(trans, entry);
  1252. return err;
  1253. }
  1254. static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
  1255. struct switchdev_trans *trans,
  1256. __be32 ip_addr)
  1257. {
  1258. struct net_device *dev = ofdpa_port->dev;
  1259. struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
  1260. int err = 0;
  1261. if (!n) {
  1262. n = neigh_create(&arp_tbl, &ip_addr, dev);
  1263. if (IS_ERR(n))
  1264. return PTR_ERR(n);
  1265. }
  1266. /* If the neigh is already resolved, then go ahead and
  1267. * install the entry, otherwise start the ARP process to
  1268. * resolve the neigh.
  1269. */
  1270. if (n->nud_state & NUD_VALID)
  1271. err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0,
  1272. ip_addr, n->ha);
  1273. else
  1274. neigh_event_send(n, NULL);
  1275. neigh_release(n);
  1276. return err;
  1277. }
  1278. static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
  1279. struct switchdev_trans *trans, int flags,
  1280. __be32 ip_addr, u32 *index)
  1281. {
  1282. struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  1283. struct ofdpa_neigh_tbl_entry *entry;
  1284. struct ofdpa_neigh_tbl_entry *found;
  1285. unsigned long lock_flags;
  1286. bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
  1287. bool updating;
  1288. bool removing;
  1289. bool resolved = true;
  1290. int err = 0;
  1291. entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
  1292. if (!entry)
  1293. return -ENOMEM;
  1294. spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
  1295. found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
  1296. updating = found && adding;
  1297. removing = found && !adding;
  1298. adding = !found && adding;
  1299. if (adding) {
  1300. entry->ip_addr = ip_addr;
  1301. entry->dev = ofdpa_port->dev;
  1302. ofdpa_neigh_add(ofdpa, trans, entry);
  1303. *index = entry->index;
  1304. resolved = false;
  1305. } else if (removing) {
  1306. *index = found->index;
  1307. ofdpa_neigh_del(trans, found);
  1308. } else if (updating) {
  1309. ofdpa_neigh_update(found, trans, NULL, false);
  1310. resolved = !is_zero_ether_addr(found->eth_dst);
  1311. *index = found->index;
  1312. } else {
  1313. err = -ENOENT;
  1314. }
  1315. spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
  1316. if (!adding)
  1317. ofdpa_kfree(trans, entry);
  1318. if (err)
  1319. return err;
  1320. /* Resolved means neigh ip_addr is resolved to neigh mac. */
  1321. if (!resolved)
  1322. err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr);
  1323. return err;
  1324. }
  1325. static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
  1326. int port_index)
  1327. {
  1328. struct rocker_port *rocker_port;
  1329. rocker_port = ofdpa->rocker->ports[port_index];
  1330. return rocker_port ? rocker_port->wpriv : NULL;
  1331. }
  1332. static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
  1333. struct switchdev_trans *trans,
  1334. int flags, __be16 vlan_id)
  1335. {
  1336. struct ofdpa_port *p;
  1337. const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  1338. unsigned int port_count = ofdpa->rocker->port_count;
  1339. u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
  1340. u32 *group_ids;
  1341. u8 group_count = 0;
  1342. int err = 0;
  1343. int i;
  1344. group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32));
  1345. if (!group_ids)
  1346. return -ENOMEM;
  1347. /* Adjust the flood group for this VLAN. The flood group
  1348. * references an L2 interface group for each port in this
  1349. * VLAN.
  1350. */
  1351. for (i = 0; i < port_count; i++) {
  1352. p = ofdpa_port_get(ofdpa, i);
  1353. if (!p)
  1354. continue;
  1355. if (!ofdpa_port_is_bridged(p))
  1356. continue;
  1357. if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
  1358. group_ids[group_count++] =
  1359. ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
  1360. }
  1361. }
  1362. /* If there are no bridged ports in this VLAN, we're done */
  1363. if (group_count == 0)
  1364. goto no_ports_in_vlan;
  1365. err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id,
  1366. group_count, group_ids, group_id);
  1367. if (err)
  1368. netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
  1369. no_ports_in_vlan:
  1370. ofdpa_kfree(trans, group_ids);
  1371. return err;
  1372. }
  1373. static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
  1374. struct switchdev_trans *trans, int flags,
  1375. __be16 vlan_id, bool pop_vlan)
  1376. {
  1377. const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  1378. unsigned int port_count = ofdpa->rocker->port_count;
  1379. struct ofdpa_port *p;
  1380. bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
  1381. u32 out_pport;
  1382. int ref = 0;
  1383. int err;
  1384. int i;
  1385. /* An L2 interface group for this port in this VLAN, but
  1386. * only when port STP state is LEARNING|FORWARDING.
  1387. */
  1388. if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
  1389. ofdpa_port->stp_state == BR_STATE_FORWARDING) {
  1390. out_pport = ofdpa_port->pport;
  1391. err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
  1392. vlan_id, out_pport, pop_vlan);
  1393. if (err) {
  1394. netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
  1395. err, out_pport);
  1396. return err;
  1397. }
  1398. }
  1399. /* An L2 interface group for this VLAN to CPU port.
  1400. * Add when first port joins this VLAN and destroy when
  1401. * last port leaves this VLAN.
  1402. */
  1403. for (i = 0; i < port_count; i++) {
  1404. p = ofdpa_port_get(ofdpa, i);
  1405. if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
  1406. ref++;
  1407. }
  1408. if ((!adding || ref != 1) && (adding || ref != 0))
  1409. return 0;
  1410. out_pport = 0;
  1411. err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
  1412. vlan_id, out_pport, pop_vlan);
  1413. if (err) {
  1414. netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
  1415. return err;
  1416. }
  1417. return 0;
  1418. }
  1419. static struct ofdpa_ctrl {
  1420. const u8 *eth_dst;
  1421. const u8 *eth_dst_mask;
  1422. __be16 eth_type;
  1423. bool acl;
  1424. bool bridge;
  1425. bool term;
  1426. bool copy_to_cpu;
  1427. } ofdpa_ctrls[] = {
  1428. [OFDPA_CTRL_LINK_LOCAL_MCAST] = {
  1429. /* pass link local multicast pkts up to CPU for filtering */
  1430. .eth_dst = ll_mac,
  1431. .eth_dst_mask = ll_mask,
  1432. .acl = true,
  1433. },
  1434. [OFDPA_CTRL_LOCAL_ARP] = {
  1435. /* pass local ARP pkts up to CPU */
  1436. .eth_dst = zero_mac,
  1437. .eth_dst_mask = zero_mac,
  1438. .eth_type = htons(ETH_P_ARP),
  1439. .acl = true,
  1440. },
  1441. [OFDPA_CTRL_IPV4_MCAST] = {
  1442. /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
  1443. .eth_dst = ipv4_mcast,
  1444. .eth_dst_mask = ipv4_mask,
  1445. .eth_type = htons(ETH_P_IP),
  1446. .term = true,
  1447. .copy_to_cpu = true,
  1448. },
  1449. [OFDPA_CTRL_IPV6_MCAST] = {
  1450. /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
  1451. .eth_dst = ipv6_mcast,
  1452. .eth_dst_mask = ipv6_mask,
  1453. .eth_type = htons(ETH_P_IPV6),
  1454. .term = true,
  1455. .copy_to_cpu = true,
  1456. },
  1457. [OFDPA_CTRL_DFLT_BRIDGING] = {
  1458. /* flood any pkts on vlan */
  1459. .bridge = true,
  1460. .copy_to_cpu = true,
  1461. },
  1462. [OFDPA_CTRL_DFLT_OVS] = {
  1463. /* pass all pkts up to CPU */
  1464. .eth_dst = zero_mac,
  1465. .eth_dst_mask = zero_mac,
  1466. .acl = true,
  1467. },
  1468. };
  1469. static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
  1470. struct switchdev_trans *trans, int flags,
  1471. const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
  1472. {
  1473. u32 in_pport = ofdpa_port->pport;
  1474. u32 in_pport_mask = 0xffffffff;
  1475. u32 out_pport = 0;
  1476. const u8 *eth_src = NULL;
  1477. const u8 *eth_src_mask = NULL;
  1478. __be16 vlan_id_mask = htons(0xffff);
  1479. u8 ip_proto = 0;
  1480. u8 ip_proto_mask = 0;
  1481. u8 ip_tos = 0;
  1482. u8 ip_tos_mask = 0;
  1483. u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
  1484. int err;
  1485. err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags,
  1486. in_pport, in_pport_mask,
  1487. eth_src, eth_src_mask,
  1488. ctrl->eth_dst, ctrl->eth_dst_mask,
  1489. ctrl->eth_type,
  1490. vlan_id, vlan_id_mask,
  1491. ip_proto, ip_proto_mask,
  1492. ip_tos, ip_tos_mask,
  1493. group_id);
  1494. if (err)
  1495. netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
  1496. return err;
  1497. }
  1498. static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
  1499. struct switchdev_trans *trans,
  1500. int flags,
  1501. const struct ofdpa_ctrl *ctrl,
  1502. __be16 vlan_id)
  1503. {
  1504. enum rocker_of_dpa_table_id goto_tbl =
  1505. ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
  1506. u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
  1507. u32 tunnel_id = 0;
  1508. int err;
  1509. if (!ofdpa_port_is_bridged(ofdpa_port))
  1510. return 0;
  1511. err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags,
  1512. ctrl->eth_dst, ctrl->eth_dst_mask,
  1513. vlan_id, tunnel_id,
  1514. goto_tbl, group_id, ctrl->copy_to_cpu);
  1515. if (err)
  1516. netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
  1517. return err;
  1518. }
  1519. static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
  1520. struct switchdev_trans *trans, int flags,
  1521. const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
  1522. {
  1523. u32 in_pport_mask = 0xffffffff;
  1524. __be16 vlan_id_mask = htons(0xffff);
  1525. int err;
  1526. if (ntohs(vlan_id) == 0)
  1527. vlan_id = ofdpa_port->internal_vlan_id;
  1528. err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
  1529. ofdpa_port->pport, in_pport_mask,
  1530. ctrl->eth_type, ctrl->eth_dst,
  1531. ctrl->eth_dst_mask, vlan_id,
  1532. vlan_id_mask, ctrl->copy_to_cpu,
  1533. flags);
  1534. if (err)
  1535. netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
  1536. return err;
  1537. }
  1538. static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port,
  1539. struct switchdev_trans *trans, int flags,
  1540. const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
  1541. {
  1542. if (ctrl->acl)
  1543. return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags,
  1544. ctrl, vlan_id);
  1545. if (ctrl->bridge)
  1546. return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags,
  1547. ctrl, vlan_id);
  1548. if (ctrl->term)
  1549. return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags,
  1550. ctrl, vlan_id);
  1551. return -EOPNOTSUPP;
  1552. }
  1553. static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
  1554. struct switchdev_trans *trans, int flags,
  1555. __be16 vlan_id)
  1556. {
  1557. int err = 0;
  1558. int i;
  1559. for (i = 0; i < OFDPA_CTRL_MAX; i++) {
  1560. if (ofdpa_port->ctrls[i]) {
  1561. err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
  1562. &ofdpa_ctrls[i], vlan_id);
  1563. if (err)
  1564. return err;
  1565. }
  1566. }
  1567. return err;
  1568. }
  1569. static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
  1570. struct switchdev_trans *trans, int flags,
  1571. const struct ofdpa_ctrl *ctrl)
  1572. {
  1573. u16 vid;
  1574. int err = 0;
  1575. for (vid = 1; vid < VLAN_N_VID; vid++) {
  1576. if (!test_bit(vid, ofdpa_port->vlan_bitmap))
  1577. continue;
  1578. err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
  1579. ctrl, htons(vid));
  1580. if (err)
  1581. break;
  1582. }
  1583. return err;
  1584. }
  1585. static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port,
  1586. struct switchdev_trans *trans, int flags, u16 vid)
  1587. {
  1588. enum rocker_of_dpa_table_id goto_tbl =
  1589. ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
  1590. u32 in_pport = ofdpa_port->pport;
  1591. __be16 vlan_id = htons(vid);
  1592. __be16 vlan_id_mask = htons(0xffff);
  1593. __be16 internal_vlan_id;
  1594. bool untagged;
  1595. bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
  1596. int err;
  1597. internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
  1598. if (adding &&
  1599. test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
  1600. return 0; /* already added */
  1601. else if (!adding &&
  1602. !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
  1603. return 0; /* already removed */
  1604. change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
  1605. if (adding) {
  1606. err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags,
  1607. internal_vlan_id);
  1608. if (err) {
  1609. netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
  1610. goto err_out;
  1611. }
  1612. }
  1613. err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags,
  1614. internal_vlan_id, untagged);
  1615. if (err) {
  1616. netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
  1617. goto err_out;
  1618. }
  1619. err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags,
  1620. internal_vlan_id);
  1621. if (err) {
  1622. netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
  1623. goto err_out;
  1624. }
  1625. err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags,
  1626. in_pport, vlan_id, vlan_id_mask,
  1627. goto_tbl, untagged, internal_vlan_id);
  1628. if (err)
  1629. netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
  1630. err_out:
  1631. if (switchdev_trans_ph_prepare(trans))
  1632. change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
  1633. return err;
  1634. }
  1635. static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
  1636. struct switchdev_trans *trans, int flags)
  1637. {
  1638. enum rocker_of_dpa_table_id goto_tbl;
  1639. u32 in_pport;
  1640. u32 in_pport_mask;
  1641. int err;
  1642. /* Normal Ethernet Frames. Matches pkts from any local physical
  1643. * ports. Goto VLAN tbl.
  1644. */
  1645. in_pport = 0;
  1646. in_pport_mask = 0xffff0000;
  1647. goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
  1648. err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags,
  1649. in_pport, in_pport_mask,
  1650. goto_tbl);
  1651. if (err)
  1652. netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
  1653. return err;
  1654. }
  1655. struct ofdpa_fdb_learn_work {
  1656. struct work_struct work;
  1657. struct ofdpa_port *ofdpa_port;
  1658. struct switchdev_trans *trans;
  1659. int flags;
  1660. u8 addr[ETH_ALEN];
  1661. u16 vid;
  1662. };
  1663. static void ofdpa_port_fdb_learn_work(struct work_struct *work)
  1664. {
  1665. const struct ofdpa_fdb_learn_work *lw =
  1666. container_of(work, struct ofdpa_fdb_learn_work, work);
  1667. bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
  1668. bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
  1669. struct switchdev_notifier_fdb_info info;
  1670. info.addr = lw->addr;
  1671. info.vid = lw->vid;
  1672. rtnl_lock();
  1673. if (learned && removing)
  1674. call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
  1675. lw->ofdpa_port->dev, &info.info);
  1676. else if (learned && !removing)
  1677. call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
  1678. lw->ofdpa_port->dev, &info.info);
  1679. rtnl_unlock();
  1680. ofdpa_kfree(lw->trans, work);
  1681. }
  1682. static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
  1683. struct switchdev_trans *trans, int flags,
  1684. const u8 *addr, __be16 vlan_id)
  1685. {
  1686. struct ofdpa_fdb_learn_work *lw;
  1687. enum rocker_of_dpa_table_id goto_tbl =
  1688. ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
  1689. u32 out_pport = ofdpa_port->pport;
  1690. u32 tunnel_id = 0;
  1691. u32 group_id = ROCKER_GROUP_NONE;
  1692. bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC);
  1693. bool copy_to_cpu = false;
  1694. int err;
  1695. if (ofdpa_port_is_bridged(ofdpa_port))
  1696. group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
  1697. if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
  1698. err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr,
  1699. NULL, vlan_id, tunnel_id, goto_tbl,
  1700. group_id, copy_to_cpu);
  1701. if (err)
  1702. return err;
  1703. }
  1704. if (!syncing)
  1705. return 0;
  1706. if (!ofdpa_port_is_bridged(ofdpa_port))
  1707. return 0;
  1708. lw = ofdpa_kzalloc(trans, flags, sizeof(*lw));
  1709. if (!lw)
  1710. return -ENOMEM;
  1711. INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
  1712. lw->ofdpa_port = ofdpa_port;
  1713. lw->trans = trans;
  1714. lw->flags = flags;
  1715. ether_addr_copy(lw->addr, addr);
  1716. lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
  1717. if (switchdev_trans_ph_prepare(trans))
  1718. ofdpa_kfree(trans, lw);
  1719. else
  1720. schedule_work(&lw->work);
  1721. return 0;
  1722. }
  1723. static struct ofdpa_fdb_tbl_entry *
  1724. ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
  1725. const struct ofdpa_fdb_tbl_entry *match)
  1726. {
  1727. struct ofdpa_fdb_tbl_entry *found;
  1728. hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
  1729. if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
  1730. return found;
  1731. return NULL;
  1732. }
  1733. static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
  1734. struct switchdev_trans *trans,
  1735. const unsigned char *addr,
  1736. __be16 vlan_id, int flags)
  1737. {
  1738. struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  1739. struct ofdpa_fdb_tbl_entry *fdb;
  1740. struct ofdpa_fdb_tbl_entry *found;
  1741. bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
  1742. unsigned long lock_flags;
  1743. fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb));
  1744. if (!fdb)
  1745. return -ENOMEM;
  1746. fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
  1747. fdb->touched = jiffies;
  1748. fdb->key.ofdpa_port = ofdpa_port;
  1749. ether_addr_copy(fdb->key.addr, addr);
  1750. fdb->key.vlan_id = vlan_id;
  1751. fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
  1752. spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
  1753. found = ofdpa_fdb_tbl_find(ofdpa, fdb);
  1754. if (found) {
  1755. found->touched = jiffies;
  1756. if (removing) {
  1757. ofdpa_kfree(trans, fdb);
  1758. if (!switchdev_trans_ph_prepare(trans))
  1759. hash_del(&found->entry);
  1760. }
  1761. } else if (!removing) {
  1762. if (!switchdev_trans_ph_prepare(trans))
  1763. hash_add(ofdpa->fdb_tbl, &fdb->entry,
  1764. fdb->key_crc32);
  1765. }
  1766. spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
  1767. /* Check if adding and already exists, or removing and can't find */
  1768. if (!found != !removing) {
  1769. ofdpa_kfree(trans, fdb);
  1770. if (!found && removing)
  1771. return 0;
  1772. /* Refreshing existing to update aging timers */
  1773. flags |= OFDPA_OP_FLAG_REFRESH;
  1774. }
  1775. return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id);
  1776. }
  1777. static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port,
  1778. struct switchdev_trans *trans, int flags)
  1779. {
  1780. struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  1781. struct ofdpa_fdb_tbl_entry *found;
  1782. unsigned long lock_flags;
  1783. struct hlist_node *tmp;
  1784. int bkt;
  1785. int err = 0;
  1786. if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
  1787. ofdpa_port->stp_state == BR_STATE_FORWARDING)
  1788. return 0;
  1789. flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
  1790. spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
  1791. hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
  1792. if (found->key.ofdpa_port != ofdpa_port)
  1793. continue;
  1794. if (!found->learned)
  1795. continue;
  1796. err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags,
  1797. found->key.addr,
  1798. found->key.vlan_id);
  1799. if (err)
  1800. goto err_out;
  1801. if (!switchdev_trans_ph_prepare(trans))
  1802. hash_del(&found->entry);
  1803. }
  1804. err_out:
  1805. spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
  1806. return err;
  1807. }
  1808. static void ofdpa_fdb_cleanup(unsigned long data)
  1809. {
  1810. struct ofdpa *ofdpa = (struct ofdpa *)data;
  1811. struct ofdpa_port *ofdpa_port;
  1812. struct ofdpa_fdb_tbl_entry *entry;
  1813. struct hlist_node *tmp;
  1814. unsigned long next_timer = jiffies + ofdpa->ageing_time;
  1815. unsigned long expires;
  1816. unsigned long lock_flags;
  1817. int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
  1818. OFDPA_OP_FLAG_LEARNED;
  1819. int bkt;
  1820. spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
  1821. hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
  1822. if (!entry->learned)
  1823. continue;
  1824. ofdpa_port = entry->key.ofdpa_port;
  1825. expires = entry->touched + ofdpa_port->ageing_time;
  1826. if (time_before_eq(expires, jiffies)) {
  1827. ofdpa_port_fdb_learn(ofdpa_port, NULL,
  1828. flags, entry->key.addr,
  1829. entry->key.vlan_id);
  1830. hash_del(&entry->entry);
  1831. } else if (time_before(expires, next_timer)) {
  1832. next_timer = expires;
  1833. }
  1834. }
  1835. spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
  1836. mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
  1837. }
  1838. static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
  1839. struct switchdev_trans *trans, int flags,
  1840. __be16 vlan_id)
  1841. {
  1842. u32 in_pport_mask = 0xffffffff;
  1843. __be16 eth_type;
  1844. const u8 *dst_mac_mask = ff_mac;
  1845. __be16 vlan_id_mask = htons(0xffff);
  1846. bool copy_to_cpu = false;
  1847. int err;
  1848. if (ntohs(vlan_id) == 0)
  1849. vlan_id = ofdpa_port->internal_vlan_id;
  1850. eth_type = htons(ETH_P_IP);
  1851. err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
  1852. ofdpa_port->pport, in_pport_mask,
  1853. eth_type, ofdpa_port->dev->dev_addr,
  1854. dst_mac_mask, vlan_id, vlan_id_mask,
  1855. copy_to_cpu, flags);
  1856. if (err)
  1857. return err;
  1858. eth_type = htons(ETH_P_IPV6);
  1859. err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
  1860. ofdpa_port->pport, in_pport_mask,
  1861. eth_type, ofdpa_port->dev->dev_addr,
  1862. dst_mac_mask, vlan_id, vlan_id_mask,
  1863. copy_to_cpu, flags);
  1864. return err;
  1865. }
  1866. static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
  1867. struct switchdev_trans *trans, int flags)
  1868. {
  1869. bool pop_vlan;
  1870. u32 out_pport;
  1871. __be16 vlan_id;
  1872. u16 vid;
  1873. int err;
  1874. /* Port will be forwarding-enabled if its STP state is LEARNING
  1875. * or FORWARDING. Traffic from CPU can still egress, regardless of
  1876. * port STP state. Use L2 interface group on port VLANs as a way
  1877. * to toggle port forwarding: if forwarding is disabled, L2
  1878. * interface group will not exist.
  1879. */
  1880. if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
  1881. ofdpa_port->stp_state != BR_STATE_FORWARDING)
  1882. flags |= OFDPA_OP_FLAG_REMOVE;
  1883. out_pport = ofdpa_port->pport;
  1884. for (vid = 1; vid < VLAN_N_VID; vid++) {
  1885. if (!test_bit(vid, ofdpa_port->vlan_bitmap))
  1886. continue;
  1887. vlan_id = htons(vid);
  1888. pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
  1889. err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
  1890. vlan_id, out_pport, pop_vlan);
  1891. if (err) {
  1892. netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
  1893. err, out_pport);
  1894. return err;
  1895. }
  1896. }
  1897. return 0;
  1898. }
  1899. static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
  1900. struct switchdev_trans *trans,
  1901. int flags, u8 state)
  1902. {
  1903. bool want[OFDPA_CTRL_MAX] = { 0, };
  1904. bool prev_ctrls[OFDPA_CTRL_MAX];
  1905. u8 uninitialized_var(prev_state);
  1906. int err;
  1907. int i;
  1908. if (switchdev_trans_ph_prepare(trans)) {
  1909. memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
  1910. prev_state = ofdpa_port->stp_state;
  1911. }
  1912. if (ofdpa_port->stp_state == state)
  1913. return 0;
  1914. ofdpa_port->stp_state = state;
  1915. switch (state) {
  1916. case BR_STATE_DISABLED:
  1917. /* port is completely disabled */
  1918. break;
  1919. case BR_STATE_LISTENING:
  1920. case BR_STATE_BLOCKING:
  1921. want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
  1922. break;
  1923. case BR_STATE_LEARNING:
  1924. case BR_STATE_FORWARDING:
  1925. if (!ofdpa_port_is_ovsed(ofdpa_port))
  1926. want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
  1927. want[OFDPA_CTRL_IPV4_MCAST] = true;
  1928. want[OFDPA_CTRL_IPV6_MCAST] = true;
  1929. if (ofdpa_port_is_bridged(ofdpa_port))
  1930. want[OFDPA_CTRL_DFLT_BRIDGING] = true;
  1931. else if (ofdpa_port_is_ovsed(ofdpa_port))
  1932. want[OFDPA_CTRL_DFLT_OVS] = true;
  1933. else
  1934. want[OFDPA_CTRL_LOCAL_ARP] = true;
  1935. break;
  1936. }
  1937. for (i = 0; i < OFDPA_CTRL_MAX; i++) {
  1938. if (want[i] != ofdpa_port->ctrls[i]) {
  1939. int ctrl_flags = flags |
  1940. (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
  1941. err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags,
  1942. &ofdpa_ctrls[i]);
  1943. if (err)
  1944. goto err_out;
  1945. ofdpa_port->ctrls[i] = want[i];
  1946. }
  1947. }
  1948. err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags);
  1949. if (err)
  1950. goto err_out;
  1951. err = ofdpa_port_fwding(ofdpa_port, trans, flags);
  1952. err_out:
  1953. if (switchdev_trans_ph_prepare(trans)) {
  1954. memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
  1955. ofdpa_port->stp_state = prev_state;
  1956. }
  1957. return err;
  1958. }
  1959. static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
  1960. {
  1961. if (ofdpa_port_is_bridged(ofdpa_port))
  1962. /* bridge STP will enable port */
  1963. return 0;
  1964. /* port is not bridged, so simulate going to FORWARDING state */
  1965. return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
  1966. BR_STATE_FORWARDING);
  1967. }
  1968. static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
  1969. {
  1970. if (ofdpa_port_is_bridged(ofdpa_port))
  1971. /* bridge STP will disable port */
  1972. return 0;
  1973. /* port is not bridged, so simulate going to DISABLED state */
  1974. return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
  1975. BR_STATE_DISABLED);
  1976. }
  1977. static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
  1978. struct switchdev_trans *trans,
  1979. u16 vid, u16 flags)
  1980. {
  1981. int err;
  1982. /* XXX deal with flags for PVID and untagged */
  1983. err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid);
  1984. if (err)
  1985. return err;
  1986. err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid));
  1987. if (err)
  1988. ofdpa_port_vlan(ofdpa_port, trans,
  1989. OFDPA_OP_FLAG_REMOVE, vid);
  1990. return err;
  1991. }
  1992. static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
  1993. u16 vid, u16 flags)
  1994. {
  1995. int err;
  1996. err = ofdpa_port_router_mac(ofdpa_port, NULL,
  1997. OFDPA_OP_FLAG_REMOVE, htons(vid));
  1998. if (err)
  1999. return err;
  2000. return ofdpa_port_vlan(ofdpa_port, NULL,
  2001. OFDPA_OP_FLAG_REMOVE, vid);
  2002. }
  2003. static struct ofdpa_internal_vlan_tbl_entry *
  2004. ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
  2005. {
  2006. struct ofdpa_internal_vlan_tbl_entry *found;
  2007. hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
  2008. entry, ifindex) {
  2009. if (found->ifindex == ifindex)
  2010. return found;
  2011. }
  2012. return NULL;
  2013. }
  2014. static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
  2015. int ifindex)
  2016. {
  2017. struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  2018. struct ofdpa_internal_vlan_tbl_entry *entry;
  2019. struct ofdpa_internal_vlan_tbl_entry *found;
  2020. unsigned long lock_flags;
  2021. int i;
  2022. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  2023. if (!entry)
  2024. return 0;
  2025. entry->ifindex = ifindex;
  2026. spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
  2027. found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
  2028. if (found) {
  2029. kfree(entry);
  2030. goto found;
  2031. }
  2032. found = entry;
  2033. hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
  2034. for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
  2035. if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
  2036. continue;
  2037. found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
  2038. goto found;
  2039. }
  2040. netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
  2041. found:
  2042. found->ref_count++;
  2043. spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
  2044. return found->vlan_id;
  2045. }
  2046. static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
  2047. struct switchdev_trans *trans, __be32 dst,
  2048. int dst_len, struct fib_info *fi,
  2049. u32 tb_id, int flags)
  2050. {
  2051. const struct fib_nh *nh;
  2052. __be16 eth_type = htons(ETH_P_IP);
  2053. __be32 dst_mask = inet_make_mask(dst_len);
  2054. __be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
  2055. u32 priority = fi->fib_priority;
  2056. enum rocker_of_dpa_table_id goto_tbl =
  2057. ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
  2058. u32 group_id;
  2059. bool nh_on_port;
  2060. bool has_gw;
  2061. u32 index;
  2062. int err;
  2063. /* XXX support ECMP */
  2064. nh = fi->fib_nh;
  2065. nh_on_port = (fi->fib_dev == ofdpa_port->dev);
  2066. has_gw = !!nh->nh_gw;
  2067. if (has_gw && nh_on_port) {
  2068. err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags,
  2069. nh->nh_gw, &index);
  2070. if (err)
  2071. return err;
  2072. group_id = ROCKER_GROUP_L3_UNICAST(index);
  2073. } else {
  2074. /* Send to CPU for processing */
  2075. group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
  2076. }
  2077. err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst,
  2078. dst_mask, priority, goto_tbl,
  2079. group_id, fi, flags);
  2080. if (err)
  2081. netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
  2082. err, &dst);
  2083. return err;
  2084. }
  2085. static void
  2086. ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
  2087. int ifindex)
  2088. {
  2089. struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  2090. struct ofdpa_internal_vlan_tbl_entry *found;
  2091. unsigned long lock_flags;
  2092. unsigned long bit;
  2093. spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
  2094. found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
  2095. if (!found) {
  2096. netdev_err(ofdpa_port->dev,
  2097. "ifindex (%d) not found in internal VLAN tbl\n",
  2098. ifindex);
  2099. goto not_found;
  2100. }
  2101. if (--found->ref_count <= 0) {
  2102. bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
  2103. clear_bit(bit, ofdpa->internal_vlan_bitmap);
  2104. hash_del(&found->entry);
  2105. kfree(found);
  2106. }
  2107. not_found:
  2108. spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
  2109. }
  2110. /**********************************
  2111. * Rocker world ops implementation
  2112. **********************************/
  2113. static int ofdpa_init(struct rocker *rocker)
  2114. {
  2115. struct ofdpa *ofdpa = rocker->wpriv;
  2116. ofdpa->rocker = rocker;
  2117. hash_init(ofdpa->flow_tbl);
  2118. spin_lock_init(&ofdpa->flow_tbl_lock);
  2119. hash_init(ofdpa->group_tbl);
  2120. spin_lock_init(&ofdpa->group_tbl_lock);
  2121. hash_init(ofdpa->fdb_tbl);
  2122. spin_lock_init(&ofdpa->fdb_tbl_lock);
  2123. hash_init(ofdpa->internal_vlan_tbl);
  2124. spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
  2125. hash_init(ofdpa->neigh_tbl);
  2126. spin_lock_init(&ofdpa->neigh_tbl_lock);
  2127. setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup,
  2128. (unsigned long) ofdpa);
  2129. mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
  2130. ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
  2131. return 0;
  2132. }
  2133. static void ofdpa_fini(struct rocker *rocker)
  2134. {
  2135. struct ofdpa *ofdpa = rocker->wpriv;
  2136. unsigned long flags;
  2137. struct ofdpa_flow_tbl_entry *flow_entry;
  2138. struct ofdpa_group_tbl_entry *group_entry;
  2139. struct ofdpa_fdb_tbl_entry *fdb_entry;
  2140. struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
  2141. struct ofdpa_neigh_tbl_entry *neigh_entry;
  2142. struct hlist_node *tmp;
  2143. int bkt;
  2144. del_timer_sync(&ofdpa->fdb_cleanup_timer);
  2145. spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
  2146. hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
  2147. hash_del(&flow_entry->entry);
  2148. spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
  2149. spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
  2150. hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
  2151. hash_del(&group_entry->entry);
  2152. spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
  2153. spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
  2154. hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
  2155. hash_del(&fdb_entry->entry);
  2156. spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
  2157. spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
  2158. hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
  2159. tmp, internal_vlan_entry, entry)
  2160. hash_del(&internal_vlan_entry->entry);
  2161. spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
  2162. spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
  2163. hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
  2164. hash_del(&neigh_entry->entry);
  2165. spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
  2166. }
  2167. static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
  2168. {
  2169. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2170. ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
  2171. ofdpa_port->rocker_port = rocker_port;
  2172. ofdpa_port->dev = rocker_port->dev;
  2173. ofdpa_port->pport = rocker_port->pport;
  2174. ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
  2175. ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
  2176. return 0;
  2177. }
  2178. static int ofdpa_port_init(struct rocker_port *rocker_port)
  2179. {
  2180. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2181. int err;
  2182. rocker_port_set_learning(rocker_port,
  2183. !!(ofdpa_port->brport_flags & BR_LEARNING));
  2184. err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0);
  2185. if (err) {
  2186. netdev_err(ofdpa_port->dev, "install ig port table failed\n");
  2187. return err;
  2188. }
  2189. ofdpa_port->internal_vlan_id =
  2190. ofdpa_port_internal_vlan_id_get(ofdpa_port,
  2191. ofdpa_port->dev->ifindex);
  2192. err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
  2193. if (err) {
  2194. netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
  2195. goto err_untagged_vlan;
  2196. }
  2197. return 0;
  2198. err_untagged_vlan:
  2199. ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
  2200. return err;
  2201. }
  2202. static void ofdpa_port_fini(struct rocker_port *rocker_port)
  2203. {
  2204. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2205. ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
  2206. }
  2207. static int ofdpa_port_open(struct rocker_port *rocker_port)
  2208. {
  2209. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2210. return ofdpa_port_fwd_enable(ofdpa_port, 0);
  2211. }
  2212. static void ofdpa_port_stop(struct rocker_port *rocker_port)
  2213. {
  2214. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2215. ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
  2216. }
  2217. static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
  2218. u8 state,
  2219. struct switchdev_trans *trans)
  2220. {
  2221. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2222. return ofdpa_port_stp_update(ofdpa_port, trans, 0, state);
  2223. }
  2224. static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
  2225. unsigned long brport_flags,
  2226. struct switchdev_trans *trans)
  2227. {
  2228. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2229. unsigned long orig_flags;
  2230. int err = 0;
  2231. orig_flags = ofdpa_port->brport_flags;
  2232. ofdpa_port->brport_flags = brport_flags;
  2233. if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
  2234. !switchdev_trans_ph_prepare(trans))
  2235. err = rocker_port_set_learning(ofdpa_port->rocker_port,
  2236. !!(ofdpa_port->brport_flags & BR_LEARNING));
  2237. if (switchdev_trans_ph_prepare(trans))
  2238. ofdpa_port->brport_flags = orig_flags;
  2239. return err;
  2240. }
  2241. static int
  2242. ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
  2243. unsigned long *p_brport_flags)
  2244. {
  2245. const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2246. *p_brport_flags = ofdpa_port->brport_flags;
  2247. return 0;
  2248. }
  2249. static int
  2250. ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
  2251. u32 ageing_time,
  2252. struct switchdev_trans *trans)
  2253. {
  2254. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2255. struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  2256. if (!switchdev_trans_ph_prepare(trans)) {
  2257. ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
  2258. if (ofdpa_port->ageing_time < ofdpa->ageing_time)
  2259. ofdpa->ageing_time = ofdpa_port->ageing_time;
  2260. mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
  2261. }
  2262. return 0;
  2263. }
  2264. static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
  2265. const struct switchdev_obj_port_vlan *vlan,
  2266. struct switchdev_trans *trans)
  2267. {
  2268. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2269. u16 vid;
  2270. int err;
  2271. for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
  2272. err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags);
  2273. if (err)
  2274. return err;
  2275. }
  2276. return 0;
  2277. }
  2278. static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
  2279. const struct switchdev_obj_port_vlan *vlan)
  2280. {
  2281. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2282. u16 vid;
  2283. int err;
  2284. for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
  2285. err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
  2286. if (err)
  2287. return err;
  2288. }
  2289. return 0;
  2290. }
  2291. static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port,
  2292. struct switchdev_obj_port_vlan *vlan,
  2293. switchdev_obj_dump_cb_t *cb)
  2294. {
  2295. const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2296. u16 vid;
  2297. int err = 0;
  2298. for (vid = 1; vid < VLAN_N_VID; vid++) {
  2299. if (!test_bit(vid, ofdpa_port->vlan_bitmap))
  2300. continue;
  2301. vlan->flags = 0;
  2302. if (ofdpa_vlan_id_is_internal(htons(vid)))
  2303. vlan->flags |= BRIDGE_VLAN_INFO_PVID;
  2304. vlan->vid_begin = vlan->vid_end = vid;
  2305. err = cb(&vlan->obj);
  2306. if (err)
  2307. break;
  2308. }
  2309. return err;
  2310. }
  2311. static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
  2312. const struct switchdev_obj_port_fdb *fdb,
  2313. struct switchdev_trans *trans)
  2314. {
  2315. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2316. __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
  2317. if (!ofdpa_port_is_bridged(ofdpa_port))
  2318. return -EINVAL;
  2319. return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0);
  2320. }
  2321. static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
  2322. const struct switchdev_obj_port_fdb *fdb)
  2323. {
  2324. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2325. __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
  2326. int flags = OFDPA_OP_FLAG_REMOVE;
  2327. if (!ofdpa_port_is_bridged(ofdpa_port))
  2328. return -EINVAL;
  2329. return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags);
  2330. }
  2331. static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port,
  2332. struct switchdev_obj_port_fdb *fdb,
  2333. switchdev_obj_dump_cb_t *cb)
  2334. {
  2335. const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2336. struct ofdpa *ofdpa = ofdpa_port->ofdpa;
  2337. struct ofdpa_fdb_tbl_entry *found;
  2338. struct hlist_node *tmp;
  2339. unsigned long lock_flags;
  2340. int bkt;
  2341. int err = 0;
  2342. spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
  2343. hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
  2344. if (found->key.ofdpa_port != ofdpa_port)
  2345. continue;
  2346. ether_addr_copy(fdb->addr, found->key.addr);
  2347. fdb->ndm_state = NUD_REACHABLE;
  2348. fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port,
  2349. found->key.vlan_id);
  2350. err = cb(&fdb->obj);
  2351. if (err)
  2352. break;
  2353. }
  2354. spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
  2355. return err;
  2356. }
  2357. static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
  2358. struct net_device *bridge)
  2359. {
  2360. int err;
  2361. /* Port is joining bridge, so the internal VLAN for the
  2362. * port is going to change to the bridge internal VLAN.
  2363. * Let's remove untagged VLAN (vid=0) from port and
  2364. * re-add once internal VLAN has changed.
  2365. */
  2366. err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
  2367. if (err)
  2368. return err;
  2369. ofdpa_port_internal_vlan_id_put(ofdpa_port,
  2370. ofdpa_port->dev->ifindex);
  2371. ofdpa_port->internal_vlan_id =
  2372. ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
  2373. ofdpa_port->bridge_dev = bridge;
  2374. return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
  2375. }
  2376. static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
  2377. {
  2378. int err;
  2379. err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
  2380. if (err)
  2381. return err;
  2382. ofdpa_port_internal_vlan_id_put(ofdpa_port,
  2383. ofdpa_port->bridge_dev->ifindex);
  2384. ofdpa_port->internal_vlan_id =
  2385. ofdpa_port_internal_vlan_id_get(ofdpa_port,
  2386. ofdpa_port->dev->ifindex);
  2387. ofdpa_port->bridge_dev = NULL;
  2388. err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
  2389. if (err)
  2390. return err;
  2391. if (ofdpa_port->dev->flags & IFF_UP)
  2392. err = ofdpa_port_fwd_enable(ofdpa_port, 0);
  2393. return err;
  2394. }
  2395. static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
  2396. struct net_device *master)
  2397. {
  2398. int err;
  2399. ofdpa_port->bridge_dev = master;
  2400. err = ofdpa_port_fwd_disable(ofdpa_port, 0);
  2401. if (err)
  2402. return err;
  2403. err = ofdpa_port_fwd_enable(ofdpa_port, 0);
  2404. return err;
  2405. }
  2406. static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
  2407. struct net_device *master)
  2408. {
  2409. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2410. int err = 0;
  2411. if (netif_is_bridge_master(master))
  2412. err = ofdpa_port_bridge_join(ofdpa_port, master);
  2413. else if (netif_is_ovs_master(master))
  2414. err = ofdpa_port_ovs_changed(ofdpa_port, master);
  2415. return err;
  2416. }
  2417. static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
  2418. struct net_device *master)
  2419. {
  2420. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2421. int err = 0;
  2422. if (ofdpa_port_is_bridged(ofdpa_port))
  2423. err = ofdpa_port_bridge_leave(ofdpa_port);
  2424. else if (ofdpa_port_is_ovsed(ofdpa_port))
  2425. err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
  2426. return err;
  2427. }
  2428. static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
  2429. struct neighbour *n)
  2430. {
  2431. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2432. int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
  2433. OFDPA_OP_FLAG_NOWAIT;
  2434. __be32 ip_addr = *(__be32 *) n->primary_key;
  2435. return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
  2436. }
  2437. static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
  2438. struct neighbour *n)
  2439. {
  2440. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2441. int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
  2442. __be32 ip_addr = *(__be32 *) n->primary_key;
  2443. return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
  2444. }
  2445. static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
  2446. const unsigned char *addr,
  2447. __be16 vlan_id)
  2448. {
  2449. struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
  2450. int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
  2451. if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
  2452. ofdpa_port->stp_state != BR_STATE_FORWARDING)
  2453. return 0;
  2454. return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags);
  2455. }
  2456. static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
  2457. struct rocker *rocker)
  2458. {
  2459. struct rocker_port *rocker_port;
  2460. rocker_port = rocker_port_dev_lower_find(dev, rocker);
  2461. return rocker_port ? rocker_port->wpriv : NULL;
  2462. }
  2463. static int ofdpa_fib4_add(struct rocker *rocker,
  2464. const struct fib_entry_notifier_info *fen_info)
  2465. {
  2466. struct ofdpa *ofdpa = rocker->wpriv;
  2467. struct ofdpa_port *ofdpa_port;
  2468. int err;
  2469. if (ofdpa->fib_aborted)
  2470. return 0;
  2471. ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
  2472. if (!ofdpa_port)
  2473. return 0;
  2474. err = ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
  2475. fen_info->dst_len, fen_info->fi,
  2476. fen_info->tb_id, 0);
  2477. if (err)
  2478. return err;
  2479. fib_info_offload_inc(fen_info->fi);
  2480. return 0;
  2481. }
  2482. static int ofdpa_fib4_del(struct rocker *rocker,
  2483. const struct fib_entry_notifier_info *fen_info)
  2484. {
  2485. struct ofdpa *ofdpa = rocker->wpriv;
  2486. struct ofdpa_port *ofdpa_port;
  2487. if (ofdpa->fib_aborted)
  2488. return 0;
  2489. ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
  2490. if (!ofdpa_port)
  2491. return 0;
  2492. fib_info_offload_dec(fen_info->fi);
  2493. return ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst),
  2494. fen_info->dst_len, fen_info->fi,
  2495. fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
  2496. }
  2497. static void ofdpa_fib4_abort(struct rocker *rocker)
  2498. {
  2499. struct ofdpa *ofdpa = rocker->wpriv;
  2500. struct ofdpa_port *ofdpa_port;
  2501. struct ofdpa_flow_tbl_entry *flow_entry;
  2502. struct hlist_node *tmp;
  2503. unsigned long flags;
  2504. int bkt;
  2505. if (ofdpa->fib_aborted)
  2506. return;
  2507. spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
  2508. hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
  2509. if (flow_entry->key.tbl_id !=
  2510. ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
  2511. continue;
  2512. ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev,
  2513. rocker);
  2514. if (!ofdpa_port)
  2515. continue;
  2516. fib_info_offload_dec(flow_entry->fi);
  2517. ofdpa_flow_tbl_del(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE,
  2518. flow_entry);
  2519. }
  2520. spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
  2521. ofdpa->fib_aborted = true;
  2522. }
  2523. struct rocker_world_ops rocker_ofdpa_ops = {
  2524. .kind = "ofdpa",
  2525. .priv_size = sizeof(struct ofdpa),
  2526. .port_priv_size = sizeof(struct ofdpa_port),
  2527. .mode = ROCKER_PORT_MODE_OF_DPA,
  2528. .init = ofdpa_init,
  2529. .fini = ofdpa_fini,
  2530. .port_pre_init = ofdpa_port_pre_init,
  2531. .port_init = ofdpa_port_init,
  2532. .port_fini = ofdpa_port_fini,
  2533. .port_open = ofdpa_port_open,
  2534. .port_stop = ofdpa_port_stop,
  2535. .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
  2536. .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
  2537. .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get,
  2538. .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
  2539. .port_obj_vlan_add = ofdpa_port_obj_vlan_add,
  2540. .port_obj_vlan_del = ofdpa_port_obj_vlan_del,
  2541. .port_obj_vlan_dump = ofdpa_port_obj_vlan_dump,
  2542. .port_obj_fdb_add = ofdpa_port_obj_fdb_add,
  2543. .port_obj_fdb_del = ofdpa_port_obj_fdb_del,
  2544. .port_obj_fdb_dump = ofdpa_port_obj_fdb_dump,
  2545. .port_master_linked = ofdpa_port_master_linked,
  2546. .port_master_unlinked = ofdpa_port_master_unlinked,
  2547. .port_neigh_update = ofdpa_port_neigh_update,
  2548. .port_neigh_destroy = ofdpa_port_neigh_destroy,
  2549. .port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
  2550. .fib4_add = ofdpa_fib4_add,
  2551. .fib4_del = ofdpa_fib4_del,
  2552. .fib4_abort = ofdpa_fib4_abort,
  2553. };