br_vlan.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084
  1. #include <linux/kernel.h>
  2. #include <linux/netdevice.h>
  3. #include <linux/rtnetlink.h>
  4. #include <linux/slab.h>
  5. #include <net/switchdev.h>
  6. #include "br_private.h"
  7. static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  8. const void *ptr)
  9. {
  10. const struct net_bridge_vlan *vle = ptr;
  11. u16 vid = *(u16 *)arg->key;
  12. return vle->vid != vid;
  13. }
  14. static const struct rhashtable_params br_vlan_rht_params = {
  15. .head_offset = offsetof(struct net_bridge_vlan, vnode),
  16. .key_offset = offsetof(struct net_bridge_vlan, vid),
  17. .key_len = sizeof(u16),
  18. .nelem_hint = 3,
  19. .locks_mul = 1,
  20. .max_size = VLAN_N_VID,
  21. .obj_cmpfn = br_vlan_cmp,
  22. .automatic_shrinking = true,
  23. };
  24. static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  25. {
  26. return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  27. }
  28. static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  29. {
  30. if (vg->pvid == vid)
  31. return;
  32. smp_wmb();
  33. vg->pvid = vid;
  34. }
  35. static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  36. {
  37. if (vg->pvid != vid)
  38. return;
  39. smp_wmb();
  40. vg->pvid = 0;
  41. }
  42. static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  43. {
  44. struct net_bridge_vlan_group *vg;
  45. if (br_vlan_is_master(v))
  46. vg = br_vlan_group(v->br);
  47. else
  48. vg = nbp_vlan_group(v->port);
  49. if (flags & BRIDGE_VLAN_INFO_PVID)
  50. __vlan_add_pvid(vg, v->vid);
  51. else
  52. __vlan_delete_pvid(vg, v->vid);
  53. if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  54. v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  55. else
  56. v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  57. }
  58. static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  59. u16 vid, u16 flags)
  60. {
  61. struct switchdev_obj_port_vlan v = {
  62. .obj.orig_dev = dev,
  63. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  64. .flags = flags,
  65. .vid_begin = vid,
  66. .vid_end = vid,
  67. };
  68. int err;
  69. /* Try switchdev op first. In case it is not supported, fallback to
  70. * 8021q add.
  71. */
  72. err = switchdev_port_obj_add(dev, &v.obj);
  73. if (err == -EOPNOTSUPP)
  74. return vlan_vid_add(dev, br->vlan_proto, vid);
  75. return err;
  76. }
  77. static void __vlan_add_list(struct net_bridge_vlan *v)
  78. {
  79. struct net_bridge_vlan_group *vg;
  80. struct list_head *headp, *hpos;
  81. struct net_bridge_vlan *vent;
  82. if (br_vlan_is_master(v))
  83. vg = br_vlan_group(v->br);
  84. else
  85. vg = nbp_vlan_group(v->port);
  86. headp = &vg->vlan_list;
  87. list_for_each_prev(hpos, headp) {
  88. vent = list_entry(hpos, struct net_bridge_vlan, vlist);
  89. if (v->vid < vent->vid)
  90. continue;
  91. else
  92. break;
  93. }
  94. list_add_rcu(&v->vlist, hpos);
  95. }
  96. static void __vlan_del_list(struct net_bridge_vlan *v)
  97. {
  98. list_del_rcu(&v->vlist);
  99. }
  100. static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
  101. u16 vid)
  102. {
  103. struct switchdev_obj_port_vlan v = {
  104. .obj.orig_dev = dev,
  105. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  106. .vid_begin = vid,
  107. .vid_end = vid,
  108. };
  109. int err;
  110. /* Try switchdev op first. In case it is not supported, fallback to
  111. * 8021q del.
  112. */
  113. err = switchdev_port_obj_del(dev, &v.obj);
  114. if (err == -EOPNOTSUPP) {
  115. vlan_vid_del(dev, br->vlan_proto, vid);
  116. return 0;
  117. }
  118. return err;
  119. }
  120. /* Returns a master vlan, if it didn't exist it gets created. In all cases a
  121. * a reference is taken to the master vlan before returning.
  122. */
  123. static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
  124. {
  125. struct net_bridge_vlan_group *vg;
  126. struct net_bridge_vlan *masterv;
  127. vg = br_vlan_group(br);
  128. masterv = br_vlan_find(vg, vid);
  129. if (!masterv) {
  130. /* missing global ctx, create it now */
  131. if (br_vlan_add(br, vid, 0))
  132. return NULL;
  133. masterv = br_vlan_find(vg, vid);
  134. if (WARN_ON(!masterv))
  135. return NULL;
  136. }
  137. atomic_inc(&masterv->refcnt);
  138. return masterv;
  139. }
  140. static void br_master_vlan_rcu_free(struct rcu_head *rcu)
  141. {
  142. struct net_bridge_vlan *v;
  143. v = container_of(rcu, struct net_bridge_vlan, rcu);
  144. WARN_ON(!br_vlan_is_master(v));
  145. free_percpu(v->stats);
  146. v->stats = NULL;
  147. kfree(v);
  148. }
  149. static void br_vlan_put_master(struct net_bridge_vlan *masterv)
  150. {
  151. struct net_bridge_vlan_group *vg;
  152. if (!br_vlan_is_master(masterv))
  153. return;
  154. vg = br_vlan_group(masterv->br);
  155. if (atomic_dec_and_test(&masterv->refcnt)) {
  156. rhashtable_remove_fast(&vg->vlan_hash,
  157. &masterv->vnode, br_vlan_rht_params);
  158. __vlan_del_list(masterv);
  159. call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
  160. }
  161. }
  162. /* This is the shared VLAN add function which works for both ports and bridge
  163. * devices. There are four possible calls to this function in terms of the
  164. * vlan entry type:
  165. * 1. vlan is being added on a port (no master flags, global entry exists)
  166. * 2. vlan is being added on a bridge (both master and brentry flags)
  167. * 3. vlan is being added on a port, but a global entry didn't exist which
  168. * is being created right now (master flag set, brentry flag unset), the
  169. * global entry is used for global per-vlan features, but not for filtering
  170. * 4. same as 3 but with both master and brentry flags set so the entry
  171. * will be used for filtering in both the port and the bridge
  172. */
  173. static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
  174. {
  175. struct net_bridge_vlan *masterv = NULL;
  176. struct net_bridge_port *p = NULL;
  177. struct net_bridge_vlan_group *vg;
  178. struct net_device *dev;
  179. struct net_bridge *br;
  180. int err;
  181. if (br_vlan_is_master(v)) {
  182. br = v->br;
  183. dev = br->dev;
  184. vg = br_vlan_group(br);
  185. } else {
  186. p = v->port;
  187. br = p->br;
  188. dev = p->dev;
  189. vg = nbp_vlan_group(p);
  190. }
  191. if (p) {
  192. /* Add VLAN to the device filter if it is supported.
  193. * This ensures tagged traffic enters the bridge when
  194. * promiscuous mode is disabled by br_manage_promisc().
  195. */
  196. err = __vlan_vid_add(dev, br, v->vid, flags);
  197. if (err)
  198. goto out;
  199. /* need to work on the master vlan too */
  200. if (flags & BRIDGE_VLAN_INFO_MASTER) {
  201. err = br_vlan_add(br, v->vid, flags |
  202. BRIDGE_VLAN_INFO_BRENTRY);
  203. if (err)
  204. goto out_filt;
  205. }
  206. masterv = br_vlan_get_master(br, v->vid);
  207. if (!masterv)
  208. goto out_filt;
  209. v->brvlan = masterv;
  210. v->stats = masterv->stats;
  211. }
  212. /* Add the dev mac and count the vlan only if it's usable */
  213. if (br_vlan_should_use(v)) {
  214. err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
  215. if (err) {
  216. br_err(br, "failed insert local address into bridge forwarding table\n");
  217. goto out_filt;
  218. }
  219. vg->num_vlans++;
  220. }
  221. err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
  222. br_vlan_rht_params);
  223. if (err)
  224. goto out_fdb_insert;
  225. __vlan_add_list(v);
  226. __vlan_add_flags(v, flags);
  227. out:
  228. return err;
  229. out_fdb_insert:
  230. if (br_vlan_should_use(v)) {
  231. br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
  232. vg->num_vlans--;
  233. }
  234. out_filt:
  235. if (p) {
  236. __vlan_vid_del(dev, br, v->vid);
  237. if (masterv) {
  238. br_vlan_put_master(masterv);
  239. v->brvlan = NULL;
  240. }
  241. }
  242. goto out;
  243. }
  244. static int __vlan_del(struct net_bridge_vlan *v)
  245. {
  246. struct net_bridge_vlan *masterv = v;
  247. struct net_bridge_vlan_group *vg;
  248. struct net_bridge_port *p = NULL;
  249. int err = 0;
  250. if (br_vlan_is_master(v)) {
  251. vg = br_vlan_group(v->br);
  252. } else {
  253. p = v->port;
  254. vg = nbp_vlan_group(v->port);
  255. masterv = v->brvlan;
  256. }
  257. __vlan_delete_pvid(vg, v->vid);
  258. if (p) {
  259. err = __vlan_vid_del(p->dev, p->br, v->vid);
  260. if (err)
  261. goto out;
  262. }
  263. if (br_vlan_should_use(v)) {
  264. v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
  265. vg->num_vlans--;
  266. }
  267. if (masterv != v) {
  268. rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
  269. br_vlan_rht_params);
  270. __vlan_del_list(v);
  271. kfree_rcu(v, rcu);
  272. }
  273. br_vlan_put_master(masterv);
  274. out:
  275. return err;
  276. }
  277. static void __vlan_group_free(struct net_bridge_vlan_group *vg)
  278. {
  279. WARN_ON(!list_empty(&vg->vlan_list));
  280. rhashtable_destroy(&vg->vlan_hash);
  281. kfree(vg);
  282. }
  283. static void __vlan_flush(struct net_bridge_vlan_group *vg)
  284. {
  285. struct net_bridge_vlan *vlan, *tmp;
  286. __vlan_delete_pvid(vg, vg->pvid);
  287. list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
  288. __vlan_del(vlan);
  289. }
  290. struct sk_buff *br_handle_vlan(struct net_bridge *br,
  291. struct net_bridge_vlan_group *vg,
  292. struct sk_buff *skb)
  293. {
  294. struct br_vlan_stats *stats;
  295. struct net_bridge_vlan *v;
  296. u16 vid;
  297. /* If this packet was not filtered at input, let it pass */
  298. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  299. goto out;
  300. /* At this point, we know that the frame was filtered and contains
  301. * a valid vlan id. If the vlan id has untagged flag set,
  302. * send untagged; otherwise, send tagged.
  303. */
  304. br_vlan_get_tag(skb, &vid);
  305. v = br_vlan_find(vg, vid);
  306. /* Vlan entry must be configured at this point. The
  307. * only exception is the bridge is set in promisc mode and the
  308. * packet is destined for the bridge device. In this case
  309. * pass the packet as is.
  310. */
  311. if (!v || !br_vlan_should_use(v)) {
  312. if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
  313. goto out;
  314. } else {
  315. kfree_skb(skb);
  316. return NULL;
  317. }
  318. }
  319. if (br->vlan_stats_enabled) {
  320. stats = this_cpu_ptr(v->stats);
  321. u64_stats_update_begin(&stats->syncp);
  322. stats->tx_bytes += skb->len;
  323. stats->tx_packets++;
  324. u64_stats_update_end(&stats->syncp);
  325. }
  326. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  327. skb->vlan_tci = 0;
  328. out:
  329. return skb;
  330. }
  331. /* Called under RCU */
  332. static bool __allowed_ingress(const struct net_bridge *br,
  333. struct net_bridge_vlan_group *vg,
  334. struct sk_buff *skb, u16 *vid)
  335. {
  336. struct br_vlan_stats *stats;
  337. struct net_bridge_vlan *v;
  338. bool tagged;
  339. BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
  340. /* If vlan tx offload is disabled on bridge device and frame was
  341. * sent from vlan device on the bridge device, it does not have
  342. * HW accelerated vlan tag.
  343. */
  344. if (unlikely(!skb_vlan_tag_present(skb) &&
  345. skb->protocol == br->vlan_proto)) {
  346. skb = skb_vlan_untag(skb);
  347. if (unlikely(!skb))
  348. return false;
  349. }
  350. if (!br_vlan_get_tag(skb, vid)) {
  351. /* Tagged frame */
  352. if (skb->vlan_proto != br->vlan_proto) {
  353. /* Protocol-mismatch, empty out vlan_tci for new tag */
  354. skb_push(skb, ETH_HLEN);
  355. skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  356. skb_vlan_tag_get(skb));
  357. if (unlikely(!skb))
  358. return false;
  359. skb_pull(skb, ETH_HLEN);
  360. skb_reset_mac_len(skb);
  361. *vid = 0;
  362. tagged = false;
  363. } else {
  364. tagged = true;
  365. }
  366. } else {
  367. /* Untagged frame */
  368. tagged = false;
  369. }
  370. if (!*vid) {
  371. u16 pvid = br_get_pvid(vg);
  372. /* Frame had a tag with VID 0 or did not have a tag.
  373. * See if pvid is set on this port. That tells us which
  374. * vlan untagged or priority-tagged traffic belongs to.
  375. */
  376. if (!pvid)
  377. goto drop;
  378. /* PVID is set on this port. Any untagged or priority-tagged
  379. * ingress frame is considered to belong to this vlan.
  380. */
  381. *vid = pvid;
  382. if (likely(!tagged))
  383. /* Untagged Frame. */
  384. __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
  385. else
  386. /* Priority-tagged Frame.
  387. * At this point, We know that skb->vlan_tci had
  388. * VLAN_TAG_PRESENT bit and its VID field was 0x000.
  389. * We update only VID field and preserve PCP field.
  390. */
  391. skb->vlan_tci |= pvid;
  392. /* if stats are disabled we can avoid the lookup */
  393. if (!br->vlan_stats_enabled)
  394. return true;
  395. }
  396. v = br_vlan_find(vg, *vid);
  397. if (!v || !br_vlan_should_use(v))
  398. goto drop;
  399. if (br->vlan_stats_enabled) {
  400. stats = this_cpu_ptr(v->stats);
  401. u64_stats_update_begin(&stats->syncp);
  402. stats->rx_bytes += skb->len;
  403. stats->rx_packets++;
  404. u64_stats_update_end(&stats->syncp);
  405. }
  406. return true;
  407. drop:
  408. kfree_skb(skb);
  409. return false;
  410. }
  411. bool br_allowed_ingress(const struct net_bridge *br,
  412. struct net_bridge_vlan_group *vg, struct sk_buff *skb,
  413. u16 *vid)
  414. {
  415. /* If VLAN filtering is disabled on the bridge, all packets are
  416. * permitted.
  417. */
  418. if (!br->vlan_enabled) {
  419. BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
  420. return true;
  421. }
  422. return __allowed_ingress(br, vg, skb, vid);
  423. }
  424. /* Called under RCU. */
  425. bool br_allowed_egress(struct net_bridge_vlan_group *vg,
  426. const struct sk_buff *skb)
  427. {
  428. const struct net_bridge_vlan *v;
  429. u16 vid;
  430. /* If this packet was not filtered at input, let it pass */
  431. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  432. return true;
  433. br_vlan_get_tag(skb, &vid);
  434. v = br_vlan_find(vg, vid);
  435. if (v && br_vlan_should_use(v))
  436. return true;
  437. return false;
  438. }
  439. /* Called under RCU */
  440. bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
  441. {
  442. struct net_bridge_vlan_group *vg;
  443. struct net_bridge *br = p->br;
  444. /* If filtering was disabled at input, let it pass. */
  445. if (!br->vlan_enabled)
  446. return true;
  447. vg = nbp_vlan_group_rcu(p);
  448. if (!vg || !vg->num_vlans)
  449. return false;
  450. if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
  451. *vid = 0;
  452. if (!*vid) {
  453. *vid = br_get_pvid(vg);
  454. if (!*vid)
  455. return false;
  456. return true;
  457. }
  458. if (br_vlan_find(vg, *vid))
  459. return true;
  460. return false;
  461. }
  462. /* Must be protected by RTNL.
  463. * Must be called with vid in range from 1 to 4094 inclusive.
  464. */
  465. int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
  466. {
  467. struct net_bridge_vlan_group *vg;
  468. struct net_bridge_vlan *vlan;
  469. int ret;
  470. ASSERT_RTNL();
  471. vg = br_vlan_group(br);
  472. vlan = br_vlan_find(vg, vid);
  473. if (vlan) {
  474. if (!br_vlan_is_brentry(vlan)) {
  475. /* Trying to change flags of non-existent bridge vlan */
  476. if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
  477. return -EINVAL;
  478. /* It was only kept for port vlans, now make it real */
  479. ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
  480. vlan->vid);
  481. if (ret) {
  482. br_err(br, "failed insert local address into bridge forwarding table\n");
  483. return ret;
  484. }
  485. atomic_inc(&vlan->refcnt);
  486. vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
  487. vg->num_vlans++;
  488. }
  489. __vlan_add_flags(vlan, flags);
  490. return 0;
  491. }
  492. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  493. if (!vlan)
  494. return -ENOMEM;
  495. vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
  496. if (!vlan->stats) {
  497. kfree(vlan);
  498. return -ENOMEM;
  499. }
  500. vlan->vid = vid;
  501. vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
  502. vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
  503. vlan->br = br;
  504. if (flags & BRIDGE_VLAN_INFO_BRENTRY)
  505. atomic_set(&vlan->refcnt, 1);
  506. ret = __vlan_add(vlan, flags);
  507. if (ret) {
  508. free_percpu(vlan->stats);
  509. kfree(vlan);
  510. }
  511. return ret;
  512. }
  513. /* Must be protected by RTNL.
  514. * Must be called with vid in range from 1 to 4094 inclusive.
  515. */
  516. int br_vlan_delete(struct net_bridge *br, u16 vid)
  517. {
  518. struct net_bridge_vlan_group *vg;
  519. struct net_bridge_vlan *v;
  520. ASSERT_RTNL();
  521. vg = br_vlan_group(br);
  522. v = br_vlan_find(vg, vid);
  523. if (!v || !br_vlan_is_brentry(v))
  524. return -ENOENT;
  525. br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
  526. br_fdb_delete_by_port(br, NULL, vid, 0);
  527. return __vlan_del(v);
  528. }
  529. void br_vlan_flush(struct net_bridge *br)
  530. {
  531. struct net_bridge_vlan_group *vg;
  532. ASSERT_RTNL();
  533. vg = br_vlan_group(br);
  534. __vlan_flush(vg);
  535. RCU_INIT_POINTER(br->vlgrp, NULL);
  536. synchronize_rcu();
  537. __vlan_group_free(vg);
  538. }
  539. struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
  540. {
  541. if (!vg)
  542. return NULL;
  543. return br_vlan_lookup(&vg->vlan_hash, vid);
  544. }
  545. /* Must be protected by RTNL. */
  546. static void recalculate_group_addr(struct net_bridge *br)
  547. {
  548. if (br->group_addr_set)
  549. return;
  550. spin_lock_bh(&br->lock);
  551. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
  552. /* Bridge Group Address */
  553. br->group_addr[5] = 0x00;
  554. } else { /* vlan_enabled && ETH_P_8021AD */
  555. /* Provider Bridge Group Address */
  556. br->group_addr[5] = 0x08;
  557. }
  558. spin_unlock_bh(&br->lock);
  559. }
  560. /* Must be protected by RTNL. */
  561. void br_recalculate_fwd_mask(struct net_bridge *br)
  562. {
  563. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
  564. br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
  565. else /* vlan_enabled && ETH_P_8021AD */
  566. br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
  567. ~(1u << br->group_addr[5]);
  568. }
  569. int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  570. {
  571. struct switchdev_attr attr = {
  572. .orig_dev = br->dev,
  573. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  574. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  575. .u.vlan_filtering = val,
  576. };
  577. int err;
  578. if (br->vlan_enabled == val)
  579. return 0;
  580. err = switchdev_port_attr_set(br->dev, &attr);
  581. if (err && err != -EOPNOTSUPP)
  582. return err;
  583. br->vlan_enabled = val;
  584. br_manage_promisc(br);
  585. recalculate_group_addr(br);
  586. br_recalculate_fwd_mask(br);
  587. return 0;
  588. }
  589. int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  590. {
  591. return __br_vlan_filter_toggle(br, val);
  592. }
  593. int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
  594. {
  595. int err = 0;
  596. struct net_bridge_port *p;
  597. struct net_bridge_vlan *vlan;
  598. struct net_bridge_vlan_group *vg;
  599. __be16 oldproto;
  600. if (br->vlan_proto == proto)
  601. return 0;
  602. /* Add VLANs for the new proto to the device filter. */
  603. list_for_each_entry(p, &br->port_list, list) {
  604. vg = nbp_vlan_group(p);
  605. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  606. err = vlan_vid_add(p->dev, proto, vlan->vid);
  607. if (err)
  608. goto err_filt;
  609. }
  610. }
  611. oldproto = br->vlan_proto;
  612. br->vlan_proto = proto;
  613. recalculate_group_addr(br);
  614. br_recalculate_fwd_mask(br);
  615. /* Delete VLANs for the old proto from the device filter. */
  616. list_for_each_entry(p, &br->port_list, list) {
  617. vg = nbp_vlan_group(p);
  618. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  619. vlan_vid_del(p->dev, oldproto, vlan->vid);
  620. }
  621. return 0;
  622. err_filt:
  623. list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
  624. vlan_vid_del(p->dev, proto, vlan->vid);
  625. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  626. vg = nbp_vlan_group(p);
  627. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  628. vlan_vid_del(p->dev, proto, vlan->vid);
  629. }
  630. return err;
  631. }
  632. int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
  633. {
  634. if (val != ETH_P_8021Q && val != ETH_P_8021AD)
  635. return -EPROTONOSUPPORT;
  636. return __br_vlan_set_proto(br, htons(val));
  637. }
  638. int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
  639. {
  640. switch (val) {
  641. case 0:
  642. case 1:
  643. br->vlan_stats_enabled = val;
  644. break;
  645. default:
  646. return -EINVAL;
  647. }
  648. return 0;
  649. }
  650. static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  651. {
  652. struct net_bridge_vlan *v;
  653. if (vid != vg->pvid)
  654. return false;
  655. v = br_vlan_lookup(&vg->vlan_hash, vid);
  656. if (v && br_vlan_should_use(v) &&
  657. (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
  658. return true;
  659. return false;
  660. }
  661. static void br_vlan_disable_default_pvid(struct net_bridge *br)
  662. {
  663. struct net_bridge_port *p;
  664. u16 pvid = br->default_pvid;
  665. /* Disable default_pvid on all ports where it is still
  666. * configured.
  667. */
  668. if (vlan_default_pvid(br_vlan_group(br), pvid))
  669. br_vlan_delete(br, pvid);
  670. list_for_each_entry(p, &br->port_list, list) {
  671. if (vlan_default_pvid(nbp_vlan_group(p), pvid))
  672. nbp_vlan_delete(p, pvid);
  673. }
  674. br->default_pvid = 0;
  675. }
  676. int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
  677. {
  678. const struct net_bridge_vlan *pvent;
  679. struct net_bridge_vlan_group *vg;
  680. struct net_bridge_port *p;
  681. u16 old_pvid;
  682. int err = 0;
  683. unsigned long *changed;
  684. if (!pvid) {
  685. br_vlan_disable_default_pvid(br);
  686. return 0;
  687. }
  688. changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
  689. GFP_KERNEL);
  690. if (!changed)
  691. return -ENOMEM;
  692. old_pvid = br->default_pvid;
  693. /* Update default_pvid config only if we do not conflict with
  694. * user configuration.
  695. */
  696. vg = br_vlan_group(br);
  697. pvent = br_vlan_find(vg, pvid);
  698. if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
  699. (!pvent || !br_vlan_should_use(pvent))) {
  700. err = br_vlan_add(br, pvid,
  701. BRIDGE_VLAN_INFO_PVID |
  702. BRIDGE_VLAN_INFO_UNTAGGED |
  703. BRIDGE_VLAN_INFO_BRENTRY);
  704. if (err)
  705. goto out;
  706. br_vlan_delete(br, old_pvid);
  707. set_bit(0, changed);
  708. }
  709. list_for_each_entry(p, &br->port_list, list) {
  710. /* Update default_pvid config only if we do not conflict with
  711. * user configuration.
  712. */
  713. vg = nbp_vlan_group(p);
  714. if ((old_pvid &&
  715. !vlan_default_pvid(vg, old_pvid)) ||
  716. br_vlan_find(vg, pvid))
  717. continue;
  718. err = nbp_vlan_add(p, pvid,
  719. BRIDGE_VLAN_INFO_PVID |
  720. BRIDGE_VLAN_INFO_UNTAGGED);
  721. if (err)
  722. goto err_port;
  723. nbp_vlan_delete(p, old_pvid);
  724. set_bit(p->port_no, changed);
  725. }
  726. br->default_pvid = pvid;
  727. out:
  728. kfree(changed);
  729. return err;
  730. err_port:
  731. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  732. if (!test_bit(p->port_no, changed))
  733. continue;
  734. if (old_pvid)
  735. nbp_vlan_add(p, old_pvid,
  736. BRIDGE_VLAN_INFO_PVID |
  737. BRIDGE_VLAN_INFO_UNTAGGED);
  738. nbp_vlan_delete(p, pvid);
  739. }
  740. if (test_bit(0, changed)) {
  741. if (old_pvid)
  742. br_vlan_add(br, old_pvid,
  743. BRIDGE_VLAN_INFO_PVID |
  744. BRIDGE_VLAN_INFO_UNTAGGED |
  745. BRIDGE_VLAN_INFO_BRENTRY);
  746. br_vlan_delete(br, pvid);
  747. }
  748. goto out;
  749. }
  750. int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
  751. {
  752. u16 pvid = val;
  753. int err = 0;
  754. if (val >= VLAN_VID_MASK)
  755. return -EINVAL;
  756. if (pvid == br->default_pvid)
  757. goto out;
  758. /* Only allow default pvid change when filtering is disabled */
  759. if (br->vlan_enabled) {
  760. pr_info_once("Please disable vlan filtering to change default_pvid\n");
  761. err = -EPERM;
  762. goto out;
  763. }
  764. err = __br_vlan_set_default_pvid(br, pvid);
  765. out:
  766. return err;
  767. }
  768. int br_vlan_init(struct net_bridge *br)
  769. {
  770. struct net_bridge_vlan_group *vg;
  771. int ret = -ENOMEM;
  772. vg = kzalloc(sizeof(*vg), GFP_KERNEL);
  773. if (!vg)
  774. goto out;
  775. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  776. if (ret)
  777. goto err_rhtbl;
  778. INIT_LIST_HEAD(&vg->vlan_list);
  779. br->vlan_proto = htons(ETH_P_8021Q);
  780. br->default_pvid = 1;
  781. rcu_assign_pointer(br->vlgrp, vg);
  782. ret = br_vlan_add(br, 1,
  783. BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
  784. BRIDGE_VLAN_INFO_BRENTRY);
  785. if (ret)
  786. goto err_vlan_add;
  787. out:
  788. return ret;
  789. err_vlan_add:
  790. rhashtable_destroy(&vg->vlan_hash);
  791. err_rhtbl:
  792. kfree(vg);
  793. goto out;
  794. }
  795. int nbp_vlan_init(struct net_bridge_port *p)
  796. {
  797. struct switchdev_attr attr = {
  798. .orig_dev = p->br->dev,
  799. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  800. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  801. .u.vlan_filtering = p->br->vlan_enabled,
  802. };
  803. struct net_bridge_vlan_group *vg;
  804. int ret = -ENOMEM;
  805. vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
  806. if (!vg)
  807. goto out;
  808. ret = switchdev_port_attr_set(p->dev, &attr);
  809. if (ret && ret != -EOPNOTSUPP)
  810. goto err_vlan_enabled;
  811. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  812. if (ret)
  813. goto err_rhtbl;
  814. INIT_LIST_HEAD(&vg->vlan_list);
  815. rcu_assign_pointer(p->vlgrp, vg);
  816. if (p->br->default_pvid) {
  817. ret = nbp_vlan_add(p, p->br->default_pvid,
  818. BRIDGE_VLAN_INFO_PVID |
  819. BRIDGE_VLAN_INFO_UNTAGGED);
  820. if (ret)
  821. goto err_vlan_add;
  822. }
  823. out:
  824. return ret;
  825. err_vlan_add:
  826. RCU_INIT_POINTER(p->vlgrp, NULL);
  827. synchronize_rcu();
  828. rhashtable_destroy(&vg->vlan_hash);
  829. err_vlan_enabled:
  830. err_rhtbl:
  831. kfree(vg);
  832. goto out;
  833. }
  834. /* Must be protected by RTNL.
  835. * Must be called with vid in range from 1 to 4094 inclusive.
  836. */
  837. int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
  838. {
  839. struct switchdev_obj_port_vlan v = {
  840. .obj.orig_dev = port->dev,
  841. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  842. .flags = flags,
  843. .vid_begin = vid,
  844. .vid_end = vid,
  845. };
  846. struct net_bridge_vlan *vlan;
  847. int ret;
  848. ASSERT_RTNL();
  849. vlan = br_vlan_find(nbp_vlan_group(port), vid);
  850. if (vlan) {
  851. /* Pass the flags to the hardware bridge */
  852. ret = switchdev_port_obj_add(port->dev, &v.obj);
  853. if (ret && ret != -EOPNOTSUPP)
  854. return ret;
  855. __vlan_add_flags(vlan, flags);
  856. return 0;
  857. }
  858. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  859. if (!vlan)
  860. return -ENOMEM;
  861. vlan->vid = vid;
  862. vlan->port = port;
  863. ret = __vlan_add(vlan, flags);
  864. if (ret)
  865. kfree(vlan);
  866. return ret;
  867. }
  868. /* Must be protected by RTNL.
  869. * Must be called with vid in range from 1 to 4094 inclusive.
  870. */
  871. int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
  872. {
  873. struct net_bridge_vlan *v;
  874. ASSERT_RTNL();
  875. v = br_vlan_find(nbp_vlan_group(port), vid);
  876. if (!v)
  877. return -ENOENT;
  878. br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
  879. br_fdb_delete_by_port(port->br, port, vid, 0);
  880. return __vlan_del(v);
  881. }
  882. void nbp_vlan_flush(struct net_bridge_port *port)
  883. {
  884. struct net_bridge_vlan_group *vg;
  885. ASSERT_RTNL();
  886. vg = nbp_vlan_group(port);
  887. __vlan_flush(vg);
  888. RCU_INIT_POINTER(port->vlgrp, NULL);
  889. synchronize_rcu();
  890. __vlan_group_free(vg);
  891. }
  892. void br_vlan_get_stats(const struct net_bridge_vlan *v,
  893. struct br_vlan_stats *stats)
  894. {
  895. int i;
  896. memset(stats, 0, sizeof(*stats));
  897. for_each_possible_cpu(i) {
  898. u64 rxpackets, rxbytes, txpackets, txbytes;
  899. struct br_vlan_stats *cpu_stats;
  900. unsigned int start;
  901. cpu_stats = per_cpu_ptr(v->stats, i);
  902. do {
  903. start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
  904. rxpackets = cpu_stats->rx_packets;
  905. rxbytes = cpu_stats->rx_bytes;
  906. txbytes = cpu_stats->tx_bytes;
  907. txpackets = cpu_stats->tx_packets;
  908. } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
  909. stats->rx_packets += rxpackets;
  910. stats->rx_bytes += rxbytes;
  911. stats->tx_bytes += txbytes;
  912. stats->tx_packets += txpackets;
  913. }
  914. }