spectrum_span.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978
  1. // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2. /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
  3. #include <linux/if_bridge.h>
  4. #include <linux/list.h>
  5. #include <net/arp.h>
  6. #include <net/gre.h>
  7. #include <net/lag.h>
  8. #include <net/ndisc.h>
  9. #include <net/ip6_tunnel.h>
  10. #include "spectrum.h"
  11. #include "spectrum_ipip.h"
  12. #include "spectrum_span.h"
  13. #include "spectrum_switchdev.h"
  14. int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
  15. {
  16. int i;
  17. if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
  18. return -EIO;
  19. mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
  20. MAX_SPAN);
  21. mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
  22. sizeof(struct mlxsw_sp_span_entry),
  23. GFP_KERNEL);
  24. if (!mlxsw_sp->span.entries)
  25. return -ENOMEM;
  26. for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
  27. struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
  28. INIT_LIST_HEAD(&curr->bound_ports_list);
  29. curr->id = i;
  30. }
  31. return 0;
  32. }
  33. void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
  34. {
  35. int i;
  36. for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
  37. struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
  38. WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
  39. }
  40. kfree(mlxsw_sp->span.entries);
  41. }
  42. static int
  43. mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
  44. struct mlxsw_sp_span_parms *sparmsp)
  45. {
  46. sparmsp->dest_port = netdev_priv(to_dev);
  47. return 0;
  48. }
  49. static int
  50. mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
  51. struct mlxsw_sp_span_parms sparms)
  52. {
  53. struct mlxsw_sp_port *dest_port = sparms.dest_port;
  54. struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
  55. u8 local_port = dest_port->local_port;
  56. char mpat_pl[MLXSW_REG_MPAT_LEN];
  57. int pa_id = span_entry->id;
  58. /* Create a new port analayzer entry for local_port. */
  59. mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
  60. MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
  61. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
  62. }
  63. static void
  64. mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
  65. enum mlxsw_reg_mpat_span_type span_type)
  66. {
  67. struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
  68. struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
  69. u8 local_port = dest_port->local_port;
  70. char mpat_pl[MLXSW_REG_MPAT_LEN];
  71. int pa_id = span_entry->id;
  72. mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
  73. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
  74. }
  75. static void
  76. mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
  77. {
  78. mlxsw_sp_span_entry_deconfigure_common(span_entry,
  79. MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
  80. }
  81. static const
  82. struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
  83. .can_handle = mlxsw_sp_port_dev_check,
  84. .parms = mlxsw_sp_span_entry_phys_parms,
  85. .configure = mlxsw_sp_span_entry_phys_configure,
  86. .deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
  87. };
  88. static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
  89. const void *pkey,
  90. struct net_device *dev,
  91. unsigned char dmac[ETH_ALEN])
  92. {
  93. struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
  94. int err = 0;
  95. if (!neigh) {
  96. neigh = neigh_create(tbl, pkey, dev);
  97. if (IS_ERR(neigh))
  98. return PTR_ERR(neigh);
  99. }
  100. neigh_event_send(neigh, NULL);
  101. read_lock_bh(&neigh->lock);
  102. if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
  103. memcpy(dmac, neigh->ha, ETH_ALEN);
  104. else
  105. err = -ENOENT;
  106. read_unlock_bh(&neigh->lock);
  107. neigh_release(neigh);
  108. return err;
  109. }
  110. static int
  111. mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
  112. {
  113. sparmsp->dest_port = NULL;
  114. return 0;
  115. }
  116. static struct net_device *
  117. mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
  118. unsigned char *dmac,
  119. u16 *p_vid)
  120. {
  121. struct bridge_vlan_info vinfo;
  122. struct net_device *edev;
  123. u16 vid = *p_vid;
  124. if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
  125. return NULL;
  126. if (!vid ||
  127. br_vlan_get_info(br_dev, vid, &vinfo) ||
  128. !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
  129. return NULL;
  130. edev = br_fdb_find_port(br_dev, dmac, vid);
  131. if (!edev)
  132. return NULL;
  133. if (br_vlan_get_info(edev, vid, &vinfo))
  134. return NULL;
  135. if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
  136. *p_vid = 0;
  137. else
  138. *p_vid = vid;
  139. return edev;
  140. }
  141. static struct net_device *
  142. mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
  143. unsigned char *dmac)
  144. {
  145. return br_fdb_find_port(br_dev, dmac, 0);
  146. }
  147. static struct net_device *
  148. mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
  149. unsigned char dmac[ETH_ALEN],
  150. u16 *p_vid)
  151. {
  152. struct mlxsw_sp_bridge_port *bridge_port;
  153. enum mlxsw_reg_spms_state spms_state;
  154. struct net_device *dev = NULL;
  155. struct mlxsw_sp_port *port;
  156. u8 stp_state;
  157. if (br_vlan_enabled(br_dev))
  158. dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
  159. else if (!*p_vid)
  160. dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
  161. if (!dev)
  162. return NULL;
  163. port = mlxsw_sp_port_dev_lower_find(dev);
  164. if (!port)
  165. return NULL;
  166. bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
  167. if (!bridge_port)
  168. return NULL;
  169. stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
  170. spms_state = mlxsw_sp_stp_spms_state(stp_state);
  171. if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
  172. return NULL;
  173. return dev;
  174. }
  175. static struct net_device *
  176. mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
  177. u16 *p_vid)
  178. {
  179. *p_vid = vlan_dev_vlan_id(vlan_dev);
  180. return vlan_dev_real_dev(vlan_dev);
  181. }
  182. static struct net_device *
  183. mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
  184. {
  185. struct net_device *dev;
  186. struct list_head *iter;
  187. netdev_for_each_lower_dev(lag_dev, dev, iter)
  188. if (netif_carrier_ok(dev) &&
  189. net_lag_port_dev_txable(dev) &&
  190. mlxsw_sp_port_dev_check(dev))
  191. return dev;
  192. return NULL;
  193. }
  194. static __maybe_unused int
  195. mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
  196. union mlxsw_sp_l3addr saddr,
  197. union mlxsw_sp_l3addr daddr,
  198. union mlxsw_sp_l3addr gw,
  199. __u8 ttl,
  200. struct neigh_table *tbl,
  201. struct mlxsw_sp_span_parms *sparmsp)
  202. {
  203. unsigned char dmac[ETH_ALEN];
  204. u16 vid = 0;
  205. if (mlxsw_sp_l3addr_is_zero(gw))
  206. gw = daddr;
  207. if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
  208. goto unoffloadable;
  209. if (is_vlan_dev(edev))
  210. edev = mlxsw_sp_span_entry_vlan(edev, &vid);
  211. if (netif_is_bridge_master(edev)) {
  212. edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
  213. if (!edev)
  214. goto unoffloadable;
  215. }
  216. if (is_vlan_dev(edev)) {
  217. if (vid || !(edev->flags & IFF_UP))
  218. goto unoffloadable;
  219. edev = mlxsw_sp_span_entry_vlan(edev, &vid);
  220. }
  221. if (netif_is_lag_master(edev)) {
  222. if (!(edev->flags & IFF_UP))
  223. goto unoffloadable;
  224. edev = mlxsw_sp_span_entry_lag(edev);
  225. if (!edev)
  226. goto unoffloadable;
  227. }
  228. if (!mlxsw_sp_port_dev_check(edev))
  229. goto unoffloadable;
  230. sparmsp->dest_port = netdev_priv(edev);
  231. sparmsp->ttl = ttl;
  232. memcpy(sparmsp->dmac, dmac, ETH_ALEN);
  233. memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
  234. sparmsp->saddr = saddr;
  235. sparmsp->daddr = daddr;
  236. sparmsp->vid = vid;
  237. return 0;
  238. unoffloadable:
  239. return mlxsw_sp_span_entry_unoffloadable(sparmsp);
  240. }
  241. #if IS_ENABLED(CONFIG_NET_IPGRE)
  242. static struct net_device *
  243. mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
  244. __be32 *saddrp, __be32 *daddrp)
  245. {
  246. struct ip_tunnel *tun = netdev_priv(to_dev);
  247. struct net_device *dev = NULL;
  248. struct ip_tunnel_parm parms;
  249. struct rtable *rt = NULL;
  250. struct flowi4 fl4;
  251. /* We assume "dev" stays valid after rt is put. */
  252. ASSERT_RTNL();
  253. parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
  254. ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
  255. 0, 0, parms.link, tun->fwmark);
  256. rt = ip_route_output_key(tun->net, &fl4);
  257. if (IS_ERR(rt))
  258. return NULL;
  259. if (rt->rt_type != RTN_UNICAST)
  260. goto out;
  261. dev = rt->dst.dev;
  262. *saddrp = fl4.saddr;
  263. *daddrp = rt->rt_gateway;
  264. out:
  265. ip_rt_put(rt);
  266. return dev;
  267. }
  268. static int
  269. mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
  270. struct mlxsw_sp_span_parms *sparmsp)
  271. {
  272. struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
  273. union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
  274. union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
  275. bool inherit_tos = tparm.iph.tos & 0x1;
  276. bool inherit_ttl = !tparm.iph.ttl;
  277. union mlxsw_sp_l3addr gw = daddr;
  278. struct net_device *l3edev;
  279. if (!(to_dev->flags & IFF_UP) ||
  280. /* Reject tunnels with GRE keys, checksums, etc. */
  281. tparm.i_flags || tparm.o_flags ||
  282. /* Require a fixed TTL and a TOS copied from the mirrored packet. */
  283. inherit_ttl || !inherit_tos ||
  284. /* A destination address may not be "any". */
  285. mlxsw_sp_l3addr_is_zero(daddr))
  286. return mlxsw_sp_span_entry_unoffloadable(sparmsp);
  287. l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
  288. return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
  289. tparm.iph.ttl,
  290. &arp_tbl, sparmsp);
  291. }
  292. static int
  293. mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
  294. struct mlxsw_sp_span_parms sparms)
  295. {
  296. struct mlxsw_sp_port *dest_port = sparms.dest_port;
  297. struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
  298. u8 local_port = dest_port->local_port;
  299. char mpat_pl[MLXSW_REG_MPAT_LEN];
  300. int pa_id = span_entry->id;
  301. /* Create a new port analayzer entry for local_port. */
  302. mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
  303. MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
  304. mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
  305. mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
  306. MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
  307. sparms.dmac, !!sparms.vid);
  308. mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
  309. sparms.ttl, sparms.smac,
  310. be32_to_cpu(sparms.saddr.addr4),
  311. be32_to_cpu(sparms.daddr.addr4));
  312. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
  313. }
  314. static void
  315. mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
  316. {
  317. mlxsw_sp_span_entry_deconfigure_common(span_entry,
  318. MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
  319. }
  320. static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
  321. .can_handle = is_gretap_dev,
  322. .parms = mlxsw_sp_span_entry_gretap4_parms,
  323. .configure = mlxsw_sp_span_entry_gretap4_configure,
  324. .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
  325. };
  326. #endif
  327. #if IS_ENABLED(CONFIG_IPV6_GRE)
  328. static struct net_device *
  329. mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
  330. struct in6_addr *saddrp,
  331. struct in6_addr *daddrp)
  332. {
  333. struct ip6_tnl *t = netdev_priv(to_dev);
  334. struct flowi6 fl6 = t->fl.u.ip6;
  335. struct net_device *dev = NULL;
  336. struct dst_entry *dst;
  337. struct rt6_info *rt6;
  338. /* We assume "dev" stays valid after dst is released. */
  339. ASSERT_RTNL();
  340. fl6.flowi6_mark = t->parms.fwmark;
  341. if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
  342. return NULL;
  343. dst = ip6_route_output(t->net, NULL, &fl6);
  344. if (!dst || dst->error)
  345. goto out;
  346. rt6 = container_of(dst, struct rt6_info, dst);
  347. dev = dst->dev;
  348. *saddrp = fl6.saddr;
  349. *daddrp = rt6->rt6i_gateway;
  350. out:
  351. dst_release(dst);
  352. return dev;
  353. }
  354. static int
  355. mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
  356. struct mlxsw_sp_span_parms *sparmsp)
  357. {
  358. struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
  359. bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
  360. union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
  361. union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
  362. bool inherit_ttl = !tparm.hop_limit;
  363. union mlxsw_sp_l3addr gw = daddr;
  364. struct net_device *l3edev;
  365. if (!(to_dev->flags & IFF_UP) ||
  366. /* Reject tunnels with GRE keys, checksums, etc. */
  367. tparm.i_flags || tparm.o_flags ||
  368. /* Require a fixed TTL and a TOS copied from the mirrored packet. */
  369. inherit_ttl || !inherit_tos ||
  370. /* A destination address may not be "any". */
  371. mlxsw_sp_l3addr_is_zero(daddr))
  372. return mlxsw_sp_span_entry_unoffloadable(sparmsp);
  373. l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
  374. return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
  375. tparm.hop_limit,
  376. &nd_tbl, sparmsp);
  377. }
  378. static int
  379. mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
  380. struct mlxsw_sp_span_parms sparms)
  381. {
  382. struct mlxsw_sp_port *dest_port = sparms.dest_port;
  383. struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
  384. u8 local_port = dest_port->local_port;
  385. char mpat_pl[MLXSW_REG_MPAT_LEN];
  386. int pa_id = span_entry->id;
  387. /* Create a new port analayzer entry for local_port. */
  388. mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
  389. MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
  390. mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
  391. mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
  392. MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
  393. sparms.dmac, !!sparms.vid);
  394. mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
  395. sparms.saddr.addr6,
  396. sparms.daddr.addr6);
  397. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
  398. }
  399. static void
  400. mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
  401. {
  402. mlxsw_sp_span_entry_deconfigure_common(span_entry,
  403. MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
  404. }
  405. static const
  406. struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
  407. .can_handle = is_ip6gretap_dev,
  408. .parms = mlxsw_sp_span_entry_gretap6_parms,
  409. .configure = mlxsw_sp_span_entry_gretap6_configure,
  410. .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
  411. };
  412. #endif
  413. static bool
  414. mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
  415. {
  416. return is_vlan_dev(dev) &&
  417. mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
  418. }
  419. static int
  420. mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev,
  421. struct mlxsw_sp_span_parms *sparmsp)
  422. {
  423. struct net_device *real_dev;
  424. u16 vid;
  425. if (!(to_dev->flags & IFF_UP))
  426. return mlxsw_sp_span_entry_unoffloadable(sparmsp);
  427. real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
  428. sparmsp->dest_port = netdev_priv(real_dev);
  429. sparmsp->vid = vid;
  430. return 0;
  431. }
  432. static int
  433. mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
  434. struct mlxsw_sp_span_parms sparms)
  435. {
  436. struct mlxsw_sp_port *dest_port = sparms.dest_port;
  437. struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
  438. u8 local_port = dest_port->local_port;
  439. char mpat_pl[MLXSW_REG_MPAT_LEN];
  440. int pa_id = span_entry->id;
  441. mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
  442. MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
  443. mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
  444. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
  445. }
  446. static void
  447. mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
  448. {
  449. mlxsw_sp_span_entry_deconfigure_common(span_entry,
  450. MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
  451. }
  452. static const
  453. struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
  454. .can_handle = mlxsw_sp_span_vlan_can_handle,
  455. .parms = mlxsw_sp_span_entry_vlan_parms,
  456. .configure = mlxsw_sp_span_entry_vlan_configure,
  457. .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
  458. };
  459. static const
  460. struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
  461. &mlxsw_sp_span_entry_ops_phys,
  462. #if IS_ENABLED(CONFIG_NET_IPGRE)
  463. &mlxsw_sp_span_entry_ops_gretap4,
  464. #endif
  465. #if IS_ENABLED(CONFIG_IPV6_GRE)
  466. &mlxsw_sp_span_entry_ops_gretap6,
  467. #endif
  468. &mlxsw_sp_span_entry_ops_vlan,
  469. };
  470. static int
  471. mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
  472. struct mlxsw_sp_span_parms *sparmsp)
  473. {
  474. return mlxsw_sp_span_entry_unoffloadable(sparmsp);
  475. }
  476. static int
  477. mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
  478. struct mlxsw_sp_span_parms sparms)
  479. {
  480. return 0;
  481. }
  482. static void
  483. mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
  484. {
  485. }
  486. static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
  487. .parms = mlxsw_sp_span_entry_nop_parms,
  488. .configure = mlxsw_sp_span_entry_nop_configure,
  489. .deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
  490. };
  491. static void
  492. mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
  493. struct mlxsw_sp_span_entry *span_entry,
  494. struct mlxsw_sp_span_parms sparms)
  495. {
  496. if (sparms.dest_port) {
  497. if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
  498. netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
  499. sparms.dest_port->dev->name);
  500. sparms.dest_port = NULL;
  501. } else if (span_entry->ops->configure(span_entry, sparms)) {
  502. netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
  503. sparms.dest_port->dev->name);
  504. sparms.dest_port = NULL;
  505. }
  506. }
  507. span_entry->parms = sparms;
  508. }
  509. static void
  510. mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
  511. {
  512. if (span_entry->parms.dest_port)
  513. span_entry->ops->deconfigure(span_entry);
  514. }
  515. static struct mlxsw_sp_span_entry *
  516. mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
  517. const struct net_device *to_dev,
  518. const struct mlxsw_sp_span_entry_ops *ops,
  519. struct mlxsw_sp_span_parms sparms)
  520. {
  521. struct mlxsw_sp_span_entry *span_entry = NULL;
  522. int i;
  523. /* find a free entry to use */
  524. for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
  525. if (!mlxsw_sp->span.entries[i].ref_count) {
  526. span_entry = &mlxsw_sp->span.entries[i];
  527. break;
  528. }
  529. }
  530. if (!span_entry)
  531. return NULL;
  532. span_entry->ops = ops;
  533. span_entry->ref_count = 1;
  534. span_entry->to_dev = to_dev;
  535. mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
  536. return span_entry;
  537. }
  538. static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
  539. {
  540. mlxsw_sp_span_entry_deconfigure(span_entry);
  541. }
  542. struct mlxsw_sp_span_entry *
  543. mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
  544. const struct net_device *to_dev)
  545. {
  546. int i;
  547. for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
  548. struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
  549. if (curr->ref_count && curr->to_dev == to_dev)
  550. return curr;
  551. }
  552. return NULL;
  553. }
  554. void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
  555. struct mlxsw_sp_span_entry *span_entry)
  556. {
  557. mlxsw_sp_span_entry_deconfigure(span_entry);
  558. span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
  559. }
  560. static struct mlxsw_sp_span_entry *
  561. mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
  562. {
  563. int i;
  564. for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
  565. struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
  566. if (curr->ref_count && curr->id == span_id)
  567. return curr;
  568. }
  569. return NULL;
  570. }
  571. static struct mlxsw_sp_span_entry *
  572. mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
  573. const struct net_device *to_dev,
  574. const struct mlxsw_sp_span_entry_ops *ops,
  575. struct mlxsw_sp_span_parms sparms)
  576. {
  577. struct mlxsw_sp_span_entry *span_entry;
  578. span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
  579. if (span_entry) {
  580. /* Already exists, just take a reference */
  581. span_entry->ref_count++;
  582. return span_entry;
  583. }
  584. return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
  585. }
  586. static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
  587. struct mlxsw_sp_span_entry *span_entry)
  588. {
  589. WARN_ON(!span_entry->ref_count);
  590. if (--span_entry->ref_count == 0)
  591. mlxsw_sp_span_entry_destroy(span_entry);
  592. return 0;
  593. }
  594. static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
  595. {
  596. struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
  597. struct mlxsw_sp_span_inspected_port *p;
  598. int i;
  599. for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
  600. struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
  601. list_for_each_entry(p, &curr->bound_ports_list, list)
  602. if (p->local_port == port->local_port &&
  603. p->type == MLXSW_SP_SPAN_EGRESS)
  604. return true;
  605. }
  606. return false;
  607. }
  608. static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
  609. int mtu)
  610. {
  611. return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
  612. }
  613. int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
  614. {
  615. struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
  616. char sbib_pl[MLXSW_REG_SBIB_LEN];
  617. int err;
  618. /* If port is egress mirrored, the shared buffer size should be
  619. * updated according to the mtu value
  620. */
  621. if (mlxsw_sp_span_is_egress_mirror(port)) {
  622. u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
  623. mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
  624. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
  625. if (err) {
  626. netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
  627. return err;
  628. }
  629. }
  630. return 0;
  631. }
  632. static struct mlxsw_sp_span_inspected_port *
  633. mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
  634. enum mlxsw_sp_span_type type,
  635. struct mlxsw_sp_port *port,
  636. bool bind)
  637. {
  638. struct mlxsw_sp_span_inspected_port *p;
  639. list_for_each_entry(p, &span_entry->bound_ports_list, list)
  640. if (type == p->type &&
  641. port->local_port == p->local_port &&
  642. bind == p->bound)
  643. return p;
  644. return NULL;
  645. }
  646. static int
  647. mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
  648. struct mlxsw_sp_span_entry *span_entry,
  649. enum mlxsw_sp_span_type type,
  650. bool bind)
  651. {
  652. struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
  653. char mpar_pl[MLXSW_REG_MPAR_LEN];
  654. int pa_id = span_entry->id;
  655. /* bind the port to the SPAN entry */
  656. mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
  657. (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
  658. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
  659. }
  660. static int
  661. mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
  662. struct mlxsw_sp_span_entry *span_entry,
  663. enum mlxsw_sp_span_type type,
  664. bool bind)
  665. {
  666. struct mlxsw_sp_span_inspected_port *inspected_port;
  667. struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
  668. char sbib_pl[MLXSW_REG_SBIB_LEN];
  669. int i;
  670. int err;
  671. /* A given (source port, direction) can only be bound to one analyzer,
  672. * so if a binding is requested, check for conflicts.
  673. */
  674. if (bind)
  675. for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
  676. struct mlxsw_sp_span_entry *curr =
  677. &mlxsw_sp->span.entries[i];
  678. if (mlxsw_sp_span_entry_bound_port_find(curr, type,
  679. port, bind))
  680. return -EEXIST;
  681. }
  682. /* if it is an egress SPAN, bind a shared buffer to it */
  683. if (type == MLXSW_SP_SPAN_EGRESS) {
  684. u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
  685. port->dev->mtu);
  686. mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
  687. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
  688. if (err) {
  689. netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
  690. return err;
  691. }
  692. }
  693. if (bind) {
  694. err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
  695. true);
  696. if (err)
  697. goto err_port_bind;
  698. }
  699. inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
  700. if (!inspected_port) {
  701. err = -ENOMEM;
  702. goto err_inspected_port_alloc;
  703. }
  704. inspected_port->local_port = port->local_port;
  705. inspected_port->type = type;
  706. inspected_port->bound = bind;
  707. list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
  708. return 0;
  709. err_inspected_port_alloc:
  710. if (bind)
  711. mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
  712. false);
  713. err_port_bind:
  714. if (type == MLXSW_SP_SPAN_EGRESS) {
  715. mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
  716. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
  717. }
  718. return err;
  719. }
  720. static void
  721. mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
  722. struct mlxsw_sp_span_entry *span_entry,
  723. enum mlxsw_sp_span_type type,
  724. bool bind)
  725. {
  726. struct mlxsw_sp_span_inspected_port *inspected_port;
  727. struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
  728. char sbib_pl[MLXSW_REG_SBIB_LEN];
  729. inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
  730. port, bind);
  731. if (!inspected_port)
  732. return;
  733. if (bind)
  734. mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
  735. false);
  736. /* remove the SBIB buffer if it was egress SPAN */
  737. if (type == MLXSW_SP_SPAN_EGRESS) {
  738. mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
  739. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
  740. }
  741. mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
  742. list_del(&inspected_port->list);
  743. kfree(inspected_port);
  744. }
  745. static const struct mlxsw_sp_span_entry_ops *
  746. mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
  747. const struct net_device *to_dev)
  748. {
  749. size_t i;
  750. for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
  751. if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
  752. return mlxsw_sp_span_entry_types[i];
  753. return NULL;
  754. }
  755. int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
  756. const struct net_device *to_dev,
  757. enum mlxsw_sp_span_type type, bool bind,
  758. int *p_span_id)
  759. {
  760. struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
  761. const struct mlxsw_sp_span_entry_ops *ops;
  762. struct mlxsw_sp_span_parms sparms = {NULL};
  763. struct mlxsw_sp_span_entry *span_entry;
  764. int err;
  765. ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
  766. if (!ops) {
  767. netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
  768. return -EOPNOTSUPP;
  769. }
  770. err = ops->parms(to_dev, &sparms);
  771. if (err)
  772. return err;
  773. span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
  774. if (!span_entry)
  775. return -ENOBUFS;
  776. netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
  777. span_entry->id);
  778. err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
  779. if (err)
  780. goto err_port_bind;
  781. *p_span_id = span_entry->id;
  782. return 0;
  783. err_port_bind:
  784. mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
  785. return err;
  786. }
  787. void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
  788. enum mlxsw_sp_span_type type, bool bind)
  789. {
  790. struct mlxsw_sp_span_entry *span_entry;
  791. span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
  792. if (!span_entry) {
  793. netdev_err(from->dev, "no span entry found\n");
  794. return;
  795. }
  796. netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
  797. span_entry->id);
  798. mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
  799. }
  800. void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
  801. {
  802. int i;
  803. int err;
  804. ASSERT_RTNL();
  805. for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
  806. struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
  807. struct mlxsw_sp_span_parms sparms = {NULL};
  808. if (!curr->ref_count)
  809. continue;
  810. err = curr->ops->parms(curr->to_dev, &sparms);
  811. if (err)
  812. continue;
  813. if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
  814. mlxsw_sp_span_entry_deconfigure(curr);
  815. mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
  816. }
  817. }
  818. }