br_vlan.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211
  1. #include <linux/kernel.h>
  2. #include <linux/netdevice.h>
  3. #include <linux/rtnetlink.h>
  4. #include <linux/slab.h>
  5. #include <net/switchdev.h>
  6. #include "br_private.h"
  7. #include "br_private_tunnel.h"
  8. static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  9. const void *ptr)
  10. {
  11. const struct net_bridge_vlan *vle = ptr;
  12. u16 vid = *(u16 *)arg->key;
  13. return vle->vid != vid;
  14. }
  15. static const struct rhashtable_params br_vlan_rht_params = {
  16. .head_offset = offsetof(struct net_bridge_vlan, vnode),
  17. .key_offset = offsetof(struct net_bridge_vlan, vid),
  18. .key_len = sizeof(u16),
  19. .nelem_hint = 3,
  20. .locks_mul = 1,
  21. .max_size = VLAN_N_VID,
  22. .obj_cmpfn = br_vlan_cmp,
  23. .automatic_shrinking = true,
  24. };
  25. static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  26. {
  27. return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  28. }
  29. static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  30. {
  31. if (vg->pvid == vid)
  32. return false;
  33. smp_wmb();
  34. vg->pvid = vid;
  35. return true;
  36. }
  37. static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  38. {
  39. if (vg->pvid != vid)
  40. return false;
  41. smp_wmb();
  42. vg->pvid = 0;
  43. return true;
  44. }
  45. /* return true if anything changed, false otherwise */
  46. static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  47. {
  48. struct net_bridge_vlan_group *vg;
  49. u16 old_flags = v->flags;
  50. bool ret;
  51. if (br_vlan_is_master(v))
  52. vg = br_vlan_group(v->br);
  53. else
  54. vg = nbp_vlan_group(v->port);
  55. if (flags & BRIDGE_VLAN_INFO_PVID)
  56. ret = __vlan_add_pvid(vg, v->vid);
  57. else
  58. ret = __vlan_delete_pvid(vg, v->vid);
  59. if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  60. v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  61. else
  62. v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  63. return ret || !!(old_flags ^ v->flags);
  64. }
  65. static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  66. u16 vid, u16 flags)
  67. {
  68. int err;
  69. /* Try switchdev op first. In case it is not supported, fallback to
  70. * 8021q add.
  71. */
  72. err = br_switchdev_port_vlan_add(dev, vid, flags);
  73. if (err == -EOPNOTSUPP)
  74. return vlan_vid_add(dev, br->vlan_proto, vid);
  75. return err;
  76. }
  77. static void __vlan_add_list(struct net_bridge_vlan *v)
  78. {
  79. struct net_bridge_vlan_group *vg;
  80. struct list_head *headp, *hpos;
  81. struct net_bridge_vlan *vent;
  82. if (br_vlan_is_master(v))
  83. vg = br_vlan_group(v->br);
  84. else
  85. vg = nbp_vlan_group(v->port);
  86. headp = &vg->vlan_list;
  87. list_for_each_prev(hpos, headp) {
  88. vent = list_entry(hpos, struct net_bridge_vlan, vlist);
  89. if (v->vid < vent->vid)
  90. continue;
  91. else
  92. break;
  93. }
  94. list_add_rcu(&v->vlist, hpos);
  95. }
  96. static void __vlan_del_list(struct net_bridge_vlan *v)
  97. {
  98. list_del_rcu(&v->vlist);
  99. }
  100. static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
  101. u16 vid)
  102. {
  103. int err;
  104. /* Try switchdev op first. In case it is not supported, fallback to
  105. * 8021q del.
  106. */
  107. err = br_switchdev_port_vlan_del(dev, vid);
  108. if (err == -EOPNOTSUPP) {
  109. vlan_vid_del(dev, br->vlan_proto, vid);
  110. return 0;
  111. }
  112. return err;
  113. }
  114. /* Returns a master vlan, if it didn't exist it gets created. In all cases a
  115. * a reference is taken to the master vlan before returning.
  116. */
  117. static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
  118. {
  119. struct net_bridge_vlan_group *vg;
  120. struct net_bridge_vlan *masterv;
  121. vg = br_vlan_group(br);
  122. masterv = br_vlan_find(vg, vid);
  123. if (!masterv) {
  124. bool changed;
  125. /* missing global ctx, create it now */
  126. if (br_vlan_add(br, vid, 0, &changed))
  127. return NULL;
  128. masterv = br_vlan_find(vg, vid);
  129. if (WARN_ON(!masterv))
  130. return NULL;
  131. refcount_set(&masterv->refcnt, 1);
  132. return masterv;
  133. }
  134. refcount_inc(&masterv->refcnt);
  135. return masterv;
  136. }
  137. static void br_master_vlan_rcu_free(struct rcu_head *rcu)
  138. {
  139. struct net_bridge_vlan *v;
  140. v = container_of(rcu, struct net_bridge_vlan, rcu);
  141. WARN_ON(!br_vlan_is_master(v));
  142. free_percpu(v->stats);
  143. v->stats = NULL;
  144. kfree(v);
  145. }
  146. static void br_vlan_put_master(struct net_bridge_vlan *masterv)
  147. {
  148. struct net_bridge_vlan_group *vg;
  149. if (!br_vlan_is_master(masterv))
  150. return;
  151. vg = br_vlan_group(masterv->br);
  152. if (refcount_dec_and_test(&masterv->refcnt)) {
  153. rhashtable_remove_fast(&vg->vlan_hash,
  154. &masterv->vnode, br_vlan_rht_params);
  155. __vlan_del_list(masterv);
  156. call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
  157. }
  158. }
  159. /* This is the shared VLAN add function which works for both ports and bridge
  160. * devices. There are four possible calls to this function in terms of the
  161. * vlan entry type:
  162. * 1. vlan is being added on a port (no master flags, global entry exists)
  163. * 2. vlan is being added on a bridge (both master and brentry flags)
  164. * 3. vlan is being added on a port, but a global entry didn't exist which
  165. * is being created right now (master flag set, brentry flag unset), the
  166. * global entry is used for global per-vlan features, but not for filtering
  167. * 4. same as 3 but with both master and brentry flags set so the entry
  168. * will be used for filtering in both the port and the bridge
  169. */
  170. static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
  171. {
  172. struct net_bridge_vlan *masterv = NULL;
  173. struct net_bridge_port *p = NULL;
  174. struct net_bridge_vlan_group *vg;
  175. struct net_device *dev;
  176. struct net_bridge *br;
  177. int err;
  178. if (br_vlan_is_master(v)) {
  179. br = v->br;
  180. dev = br->dev;
  181. vg = br_vlan_group(br);
  182. } else {
  183. p = v->port;
  184. br = p->br;
  185. dev = p->dev;
  186. vg = nbp_vlan_group(p);
  187. }
  188. if (p) {
  189. /* Add VLAN to the device filter if it is supported.
  190. * This ensures tagged traffic enters the bridge when
  191. * promiscuous mode is disabled by br_manage_promisc().
  192. */
  193. err = __vlan_vid_add(dev, br, v->vid, flags);
  194. if (err)
  195. goto out;
  196. /* need to work on the master vlan too */
  197. if (flags & BRIDGE_VLAN_INFO_MASTER) {
  198. bool changed;
  199. err = br_vlan_add(br, v->vid,
  200. flags | BRIDGE_VLAN_INFO_BRENTRY,
  201. &changed);
  202. if (err)
  203. goto out_filt;
  204. }
  205. masterv = br_vlan_get_master(br, v->vid);
  206. if (!masterv)
  207. goto out_filt;
  208. v->brvlan = masterv;
  209. v->stats = masterv->stats;
  210. } else {
  211. err = br_switchdev_port_vlan_add(dev, v->vid, flags);
  212. if (err && err != -EOPNOTSUPP)
  213. goto out;
  214. }
  215. /* Add the dev mac and count the vlan only if it's usable */
  216. if (br_vlan_should_use(v)) {
  217. err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
  218. if (err) {
  219. br_err(br, "failed insert local address into bridge forwarding table\n");
  220. goto out_filt;
  221. }
  222. vg->num_vlans++;
  223. }
  224. err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
  225. br_vlan_rht_params);
  226. if (err)
  227. goto out_fdb_insert;
  228. __vlan_add_list(v);
  229. __vlan_add_flags(v, flags);
  230. out:
  231. return err;
  232. out_fdb_insert:
  233. if (br_vlan_should_use(v)) {
  234. br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
  235. vg->num_vlans--;
  236. }
  237. out_filt:
  238. if (p) {
  239. __vlan_vid_del(dev, br, v->vid);
  240. if (masterv) {
  241. br_vlan_put_master(masterv);
  242. v->brvlan = NULL;
  243. }
  244. } else {
  245. br_switchdev_port_vlan_del(dev, v->vid);
  246. }
  247. goto out;
  248. }
  249. static int __vlan_del(struct net_bridge_vlan *v)
  250. {
  251. struct net_bridge_vlan *masterv = v;
  252. struct net_bridge_vlan_group *vg;
  253. struct net_bridge_port *p = NULL;
  254. int err = 0;
  255. if (br_vlan_is_master(v)) {
  256. vg = br_vlan_group(v->br);
  257. } else {
  258. p = v->port;
  259. vg = nbp_vlan_group(v->port);
  260. masterv = v->brvlan;
  261. }
  262. __vlan_delete_pvid(vg, v->vid);
  263. if (p) {
  264. err = __vlan_vid_del(p->dev, p->br, v->vid);
  265. if (err)
  266. goto out;
  267. } else {
  268. err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
  269. if (err && err != -EOPNOTSUPP)
  270. goto out;
  271. err = 0;
  272. }
  273. if (br_vlan_should_use(v)) {
  274. v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
  275. vg->num_vlans--;
  276. }
  277. if (masterv != v) {
  278. vlan_tunnel_info_del(vg, v);
  279. rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
  280. br_vlan_rht_params);
  281. __vlan_del_list(v);
  282. kfree_rcu(v, rcu);
  283. }
  284. br_vlan_put_master(masterv);
  285. out:
  286. return err;
  287. }
  288. static void __vlan_group_free(struct net_bridge_vlan_group *vg)
  289. {
  290. WARN_ON(!list_empty(&vg->vlan_list));
  291. rhashtable_destroy(&vg->vlan_hash);
  292. vlan_tunnel_deinit(vg);
  293. kfree(vg);
  294. }
  295. static void __vlan_flush(struct net_bridge_vlan_group *vg)
  296. {
  297. struct net_bridge_vlan *vlan, *tmp;
  298. __vlan_delete_pvid(vg, vg->pvid);
  299. list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
  300. __vlan_del(vlan);
  301. }
  302. struct sk_buff *br_handle_vlan(struct net_bridge *br,
  303. const struct net_bridge_port *p,
  304. struct net_bridge_vlan_group *vg,
  305. struct sk_buff *skb)
  306. {
  307. struct br_vlan_stats *stats;
  308. struct net_bridge_vlan *v;
  309. u16 vid;
  310. /* If this packet was not filtered at input, let it pass */
  311. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  312. goto out;
  313. /* At this point, we know that the frame was filtered and contains
  314. * a valid vlan id. If the vlan id has untagged flag set,
  315. * send untagged; otherwise, send tagged.
  316. */
  317. br_vlan_get_tag(skb, &vid);
  318. v = br_vlan_find(vg, vid);
  319. /* Vlan entry must be configured at this point. The
  320. * only exception is the bridge is set in promisc mode and the
  321. * packet is destined for the bridge device. In this case
  322. * pass the packet as is.
  323. */
  324. if (!v || !br_vlan_should_use(v)) {
  325. if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
  326. goto out;
  327. } else {
  328. kfree_skb(skb);
  329. return NULL;
  330. }
  331. }
  332. if (br->vlan_stats_enabled) {
  333. stats = this_cpu_ptr(v->stats);
  334. u64_stats_update_begin(&stats->syncp);
  335. stats->tx_bytes += skb->len;
  336. stats->tx_packets++;
  337. u64_stats_update_end(&stats->syncp);
  338. }
  339. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  340. skb->vlan_tci = 0;
  341. if (p && (p->flags & BR_VLAN_TUNNEL) &&
  342. br_handle_egress_vlan_tunnel(skb, v)) {
  343. kfree_skb(skb);
  344. return NULL;
  345. }
  346. out:
  347. return skb;
  348. }
  349. /* Called under RCU */
  350. static bool __allowed_ingress(const struct net_bridge *br,
  351. struct net_bridge_vlan_group *vg,
  352. struct sk_buff *skb, u16 *vid)
  353. {
  354. struct br_vlan_stats *stats;
  355. struct net_bridge_vlan *v;
  356. bool tagged;
  357. BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
  358. /* If vlan tx offload is disabled on bridge device and frame was
  359. * sent from vlan device on the bridge device, it does not have
  360. * HW accelerated vlan tag.
  361. */
  362. if (unlikely(!skb_vlan_tag_present(skb) &&
  363. skb->protocol == br->vlan_proto)) {
  364. skb = skb_vlan_untag(skb);
  365. if (unlikely(!skb))
  366. return false;
  367. }
  368. if (!br_vlan_get_tag(skb, vid)) {
  369. /* Tagged frame */
  370. if (skb->vlan_proto != br->vlan_proto) {
  371. /* Protocol-mismatch, empty out vlan_tci for new tag */
  372. skb_push(skb, ETH_HLEN);
  373. skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  374. skb_vlan_tag_get(skb));
  375. if (unlikely(!skb))
  376. return false;
  377. skb_pull(skb, ETH_HLEN);
  378. skb_reset_mac_len(skb);
  379. *vid = 0;
  380. tagged = false;
  381. } else {
  382. tagged = true;
  383. }
  384. } else {
  385. /* Untagged frame */
  386. tagged = false;
  387. }
  388. if (!*vid) {
  389. u16 pvid = br_get_pvid(vg);
  390. /* Frame had a tag with VID 0 or did not have a tag.
  391. * See if pvid is set on this port. That tells us which
  392. * vlan untagged or priority-tagged traffic belongs to.
  393. */
  394. if (!pvid)
  395. goto drop;
  396. /* PVID is set on this port. Any untagged or priority-tagged
  397. * ingress frame is considered to belong to this vlan.
  398. */
  399. *vid = pvid;
  400. if (likely(!tagged))
  401. /* Untagged Frame. */
  402. __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
  403. else
  404. /* Priority-tagged Frame.
  405. * At this point, We know that skb->vlan_tci had
  406. * VLAN_TAG_PRESENT bit and its VID field was 0x000.
  407. * We update only VID field and preserve PCP field.
  408. */
  409. skb->vlan_tci |= pvid;
  410. /* if stats are disabled we can avoid the lookup */
  411. if (!br->vlan_stats_enabled)
  412. return true;
  413. }
  414. v = br_vlan_find(vg, *vid);
  415. if (!v || !br_vlan_should_use(v))
  416. goto drop;
  417. if (br->vlan_stats_enabled) {
  418. stats = this_cpu_ptr(v->stats);
  419. u64_stats_update_begin(&stats->syncp);
  420. stats->rx_bytes += skb->len;
  421. stats->rx_packets++;
  422. u64_stats_update_end(&stats->syncp);
  423. }
  424. return true;
  425. drop:
  426. kfree_skb(skb);
  427. return false;
  428. }
  429. bool br_allowed_ingress(const struct net_bridge *br,
  430. struct net_bridge_vlan_group *vg, struct sk_buff *skb,
  431. u16 *vid)
  432. {
  433. /* If VLAN filtering is disabled on the bridge, all packets are
  434. * permitted.
  435. */
  436. if (!br->vlan_enabled) {
  437. BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
  438. return true;
  439. }
  440. return __allowed_ingress(br, vg, skb, vid);
  441. }
  442. /* Called under RCU. */
  443. bool br_allowed_egress(struct net_bridge_vlan_group *vg,
  444. const struct sk_buff *skb)
  445. {
  446. const struct net_bridge_vlan *v;
  447. u16 vid;
  448. /* If this packet was not filtered at input, let it pass */
  449. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  450. return true;
  451. br_vlan_get_tag(skb, &vid);
  452. v = br_vlan_find(vg, vid);
  453. if (v && br_vlan_should_use(v))
  454. return true;
  455. return false;
  456. }
  457. /* Called under RCU */
  458. bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
  459. {
  460. struct net_bridge_vlan_group *vg;
  461. struct net_bridge *br = p->br;
  462. /* If filtering was disabled at input, let it pass. */
  463. if (!br->vlan_enabled)
  464. return true;
  465. vg = nbp_vlan_group_rcu(p);
  466. if (!vg || !vg->num_vlans)
  467. return false;
  468. if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
  469. *vid = 0;
  470. if (!*vid) {
  471. *vid = br_get_pvid(vg);
  472. if (!*vid)
  473. return false;
  474. return true;
  475. }
  476. if (br_vlan_find(vg, *vid))
  477. return true;
  478. return false;
  479. }
  480. static int br_vlan_add_existing(struct net_bridge *br,
  481. struct net_bridge_vlan_group *vg,
  482. struct net_bridge_vlan *vlan,
  483. u16 flags, bool *changed)
  484. {
  485. int err;
  486. err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags);
  487. if (err && err != -EOPNOTSUPP)
  488. return err;
  489. if (!br_vlan_is_brentry(vlan)) {
  490. /* Trying to change flags of non-existent bridge vlan */
  491. if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
  492. err = -EINVAL;
  493. goto err_flags;
  494. }
  495. /* It was only kept for port vlans, now make it real */
  496. err = br_fdb_insert(br, NULL, br->dev->dev_addr,
  497. vlan->vid);
  498. if (err) {
  499. br_err(br, "failed to insert local address into bridge forwarding table\n");
  500. goto err_fdb_insert;
  501. }
  502. refcount_inc(&vlan->refcnt);
  503. vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
  504. vg->num_vlans++;
  505. *changed = true;
  506. }
  507. if (__vlan_add_flags(vlan, flags))
  508. *changed = true;
  509. return 0;
  510. err_fdb_insert:
  511. err_flags:
  512. br_switchdev_port_vlan_del(br->dev, vlan->vid);
  513. return err;
  514. }
  515. /* Must be protected by RTNL.
  516. * Must be called with vid in range from 1 to 4094 inclusive.
  517. * changed must be true only if the vlan was created or updated
  518. */
  519. int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed)
  520. {
  521. struct net_bridge_vlan_group *vg;
  522. struct net_bridge_vlan *vlan;
  523. int ret;
  524. ASSERT_RTNL();
  525. *changed = false;
  526. vg = br_vlan_group(br);
  527. vlan = br_vlan_find(vg, vid);
  528. if (vlan)
  529. return br_vlan_add_existing(br, vg, vlan, flags, changed);
  530. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  531. if (!vlan)
  532. return -ENOMEM;
  533. vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
  534. if (!vlan->stats) {
  535. kfree(vlan);
  536. return -ENOMEM;
  537. }
  538. vlan->vid = vid;
  539. vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
  540. vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
  541. vlan->br = br;
  542. if (flags & BRIDGE_VLAN_INFO_BRENTRY)
  543. refcount_set(&vlan->refcnt, 1);
  544. ret = __vlan_add(vlan, flags);
  545. if (ret) {
  546. free_percpu(vlan->stats);
  547. kfree(vlan);
  548. } else {
  549. *changed = true;
  550. }
  551. return ret;
  552. }
  553. /* Must be protected by RTNL.
  554. * Must be called with vid in range from 1 to 4094 inclusive.
  555. */
  556. int br_vlan_delete(struct net_bridge *br, u16 vid)
  557. {
  558. struct net_bridge_vlan_group *vg;
  559. struct net_bridge_vlan *v;
  560. ASSERT_RTNL();
  561. vg = br_vlan_group(br);
  562. v = br_vlan_find(vg, vid);
  563. if (!v || !br_vlan_is_brentry(v))
  564. return -ENOENT;
  565. br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
  566. br_fdb_delete_by_port(br, NULL, vid, 0);
  567. vlan_tunnel_info_del(vg, v);
  568. return __vlan_del(v);
  569. }
  570. void br_vlan_flush(struct net_bridge *br)
  571. {
  572. struct net_bridge_vlan_group *vg;
  573. ASSERT_RTNL();
  574. /* delete auto-added default pvid local fdb before flushing vlans
  575. * otherwise it will be leaked on bridge device init failure
  576. */
  577. br_fdb_delete_by_port(br, NULL, 0, 1);
  578. vg = br_vlan_group(br);
  579. __vlan_flush(vg);
  580. RCU_INIT_POINTER(br->vlgrp, NULL);
  581. synchronize_rcu();
  582. __vlan_group_free(vg);
  583. }
  584. struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
  585. {
  586. if (!vg)
  587. return NULL;
  588. return br_vlan_lookup(&vg->vlan_hash, vid);
  589. }
  590. /* Must be protected by RTNL. */
  591. static void recalculate_group_addr(struct net_bridge *br)
  592. {
  593. if (br->group_addr_set)
  594. return;
  595. spin_lock_bh(&br->lock);
  596. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
  597. /* Bridge Group Address */
  598. br->group_addr[5] = 0x00;
  599. } else { /* vlan_enabled && ETH_P_8021AD */
  600. /* Provider Bridge Group Address */
  601. br->group_addr[5] = 0x08;
  602. }
  603. spin_unlock_bh(&br->lock);
  604. }
  605. /* Must be protected by RTNL. */
  606. void br_recalculate_fwd_mask(struct net_bridge *br)
  607. {
  608. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
  609. br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
  610. else /* vlan_enabled && ETH_P_8021AD */
  611. br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
  612. ~(1u << br->group_addr[5]);
  613. }
  614. int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  615. {
  616. struct switchdev_attr attr = {
  617. .orig_dev = br->dev,
  618. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  619. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  620. .u.vlan_filtering = val,
  621. };
  622. int err;
  623. if (br->vlan_enabled == val)
  624. return 0;
  625. err = switchdev_port_attr_set(br->dev, &attr);
  626. if (err && err != -EOPNOTSUPP)
  627. return err;
  628. br->vlan_enabled = val;
  629. br_manage_promisc(br);
  630. recalculate_group_addr(br);
  631. br_recalculate_fwd_mask(br);
  632. return 0;
  633. }
  634. int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  635. {
  636. return __br_vlan_filter_toggle(br, val);
  637. }
  638. bool br_vlan_enabled(const struct net_device *dev)
  639. {
  640. struct net_bridge *br = netdev_priv(dev);
  641. return !!br->vlan_enabled;
  642. }
  643. EXPORT_SYMBOL_GPL(br_vlan_enabled);
  644. int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
  645. {
  646. int err = 0;
  647. struct net_bridge_port *p;
  648. struct net_bridge_vlan *vlan;
  649. struct net_bridge_vlan_group *vg;
  650. __be16 oldproto;
  651. if (br->vlan_proto == proto)
  652. return 0;
  653. /* Add VLANs for the new proto to the device filter. */
  654. list_for_each_entry(p, &br->port_list, list) {
  655. vg = nbp_vlan_group(p);
  656. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  657. err = vlan_vid_add(p->dev, proto, vlan->vid);
  658. if (err)
  659. goto err_filt;
  660. }
  661. }
  662. oldproto = br->vlan_proto;
  663. br->vlan_proto = proto;
  664. recalculate_group_addr(br);
  665. br_recalculate_fwd_mask(br);
  666. /* Delete VLANs for the old proto from the device filter. */
  667. list_for_each_entry(p, &br->port_list, list) {
  668. vg = nbp_vlan_group(p);
  669. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  670. vlan_vid_del(p->dev, oldproto, vlan->vid);
  671. }
  672. return 0;
  673. err_filt:
  674. list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
  675. vlan_vid_del(p->dev, proto, vlan->vid);
  676. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  677. vg = nbp_vlan_group(p);
  678. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  679. vlan_vid_del(p->dev, proto, vlan->vid);
  680. }
  681. return err;
  682. }
  683. int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
  684. {
  685. if (val != ETH_P_8021Q && val != ETH_P_8021AD)
  686. return -EPROTONOSUPPORT;
  687. return __br_vlan_set_proto(br, htons(val));
  688. }
  689. int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
  690. {
  691. switch (val) {
  692. case 0:
  693. case 1:
  694. br->vlan_stats_enabled = val;
  695. break;
  696. default:
  697. return -EINVAL;
  698. }
  699. return 0;
  700. }
  701. static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  702. {
  703. struct net_bridge_vlan *v;
  704. if (vid != vg->pvid)
  705. return false;
  706. v = br_vlan_lookup(&vg->vlan_hash, vid);
  707. if (v && br_vlan_should_use(v) &&
  708. (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
  709. return true;
  710. return false;
  711. }
  712. static void br_vlan_disable_default_pvid(struct net_bridge *br)
  713. {
  714. struct net_bridge_port *p;
  715. u16 pvid = br->default_pvid;
  716. /* Disable default_pvid on all ports where it is still
  717. * configured.
  718. */
  719. if (vlan_default_pvid(br_vlan_group(br), pvid))
  720. br_vlan_delete(br, pvid);
  721. list_for_each_entry(p, &br->port_list, list) {
  722. if (vlan_default_pvid(nbp_vlan_group(p), pvid))
  723. nbp_vlan_delete(p, pvid);
  724. }
  725. br->default_pvid = 0;
  726. }
  727. int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
  728. {
  729. const struct net_bridge_vlan *pvent;
  730. struct net_bridge_vlan_group *vg;
  731. struct net_bridge_port *p;
  732. unsigned long *changed;
  733. bool vlchange;
  734. u16 old_pvid;
  735. int err = 0;
  736. if (!pvid) {
  737. br_vlan_disable_default_pvid(br);
  738. return 0;
  739. }
  740. changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
  741. GFP_KERNEL);
  742. if (!changed)
  743. return -ENOMEM;
  744. old_pvid = br->default_pvid;
  745. /* Update default_pvid config only if we do not conflict with
  746. * user configuration.
  747. */
  748. vg = br_vlan_group(br);
  749. pvent = br_vlan_find(vg, pvid);
  750. if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
  751. (!pvent || !br_vlan_should_use(pvent))) {
  752. err = br_vlan_add(br, pvid,
  753. BRIDGE_VLAN_INFO_PVID |
  754. BRIDGE_VLAN_INFO_UNTAGGED |
  755. BRIDGE_VLAN_INFO_BRENTRY,
  756. &vlchange);
  757. if (err)
  758. goto out;
  759. br_vlan_delete(br, old_pvid);
  760. set_bit(0, changed);
  761. }
  762. list_for_each_entry(p, &br->port_list, list) {
  763. /* Update default_pvid config only if we do not conflict with
  764. * user configuration.
  765. */
  766. vg = nbp_vlan_group(p);
  767. if ((old_pvid &&
  768. !vlan_default_pvid(vg, old_pvid)) ||
  769. br_vlan_find(vg, pvid))
  770. continue;
  771. err = nbp_vlan_add(p, pvid,
  772. BRIDGE_VLAN_INFO_PVID |
  773. BRIDGE_VLAN_INFO_UNTAGGED,
  774. &vlchange);
  775. if (err)
  776. goto err_port;
  777. nbp_vlan_delete(p, old_pvid);
  778. set_bit(p->port_no, changed);
  779. }
  780. br->default_pvid = pvid;
  781. out:
  782. kfree(changed);
  783. return err;
  784. err_port:
  785. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  786. if (!test_bit(p->port_no, changed))
  787. continue;
  788. if (old_pvid)
  789. nbp_vlan_add(p, old_pvid,
  790. BRIDGE_VLAN_INFO_PVID |
  791. BRIDGE_VLAN_INFO_UNTAGGED,
  792. &vlchange);
  793. nbp_vlan_delete(p, pvid);
  794. }
  795. if (test_bit(0, changed)) {
  796. if (old_pvid)
  797. br_vlan_add(br, old_pvid,
  798. BRIDGE_VLAN_INFO_PVID |
  799. BRIDGE_VLAN_INFO_UNTAGGED |
  800. BRIDGE_VLAN_INFO_BRENTRY,
  801. &vlchange);
  802. br_vlan_delete(br, pvid);
  803. }
  804. goto out;
  805. }
  806. int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
  807. {
  808. u16 pvid = val;
  809. int err = 0;
  810. if (val >= VLAN_VID_MASK)
  811. return -EINVAL;
  812. if (pvid == br->default_pvid)
  813. goto out;
  814. /* Only allow default pvid change when filtering is disabled */
  815. if (br->vlan_enabled) {
  816. pr_info_once("Please disable vlan filtering to change default_pvid\n");
  817. err = -EPERM;
  818. goto out;
  819. }
  820. err = __br_vlan_set_default_pvid(br, pvid);
  821. out:
  822. return err;
  823. }
  824. int br_vlan_init(struct net_bridge *br)
  825. {
  826. struct net_bridge_vlan_group *vg;
  827. int ret = -ENOMEM;
  828. bool changed;
  829. vg = kzalloc(sizeof(*vg), GFP_KERNEL);
  830. if (!vg)
  831. goto out;
  832. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  833. if (ret)
  834. goto err_rhtbl;
  835. ret = vlan_tunnel_init(vg);
  836. if (ret)
  837. goto err_tunnel_init;
  838. INIT_LIST_HEAD(&vg->vlan_list);
  839. br->vlan_proto = htons(ETH_P_8021Q);
  840. br->default_pvid = 1;
  841. rcu_assign_pointer(br->vlgrp, vg);
  842. ret = br_vlan_add(br, 1,
  843. BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
  844. BRIDGE_VLAN_INFO_BRENTRY, &changed);
  845. if (ret)
  846. goto err_vlan_add;
  847. out:
  848. return ret;
  849. err_vlan_add:
  850. vlan_tunnel_deinit(vg);
  851. err_tunnel_init:
  852. rhashtable_destroy(&vg->vlan_hash);
  853. err_rhtbl:
  854. kfree(vg);
  855. goto out;
  856. }
  857. int nbp_vlan_init(struct net_bridge_port *p)
  858. {
  859. struct switchdev_attr attr = {
  860. .orig_dev = p->br->dev,
  861. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  862. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  863. .u.vlan_filtering = p->br->vlan_enabled,
  864. };
  865. struct net_bridge_vlan_group *vg;
  866. int ret = -ENOMEM;
  867. vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
  868. if (!vg)
  869. goto out;
  870. ret = switchdev_port_attr_set(p->dev, &attr);
  871. if (ret && ret != -EOPNOTSUPP)
  872. goto err_vlan_enabled;
  873. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  874. if (ret)
  875. goto err_rhtbl;
  876. ret = vlan_tunnel_init(vg);
  877. if (ret)
  878. goto err_tunnel_init;
  879. INIT_LIST_HEAD(&vg->vlan_list);
  880. rcu_assign_pointer(p->vlgrp, vg);
  881. if (p->br->default_pvid) {
  882. bool changed;
  883. ret = nbp_vlan_add(p, p->br->default_pvid,
  884. BRIDGE_VLAN_INFO_PVID |
  885. BRIDGE_VLAN_INFO_UNTAGGED,
  886. &changed);
  887. if (ret)
  888. goto err_vlan_add;
  889. }
  890. out:
  891. return ret;
  892. err_vlan_add:
  893. RCU_INIT_POINTER(p->vlgrp, NULL);
  894. synchronize_rcu();
  895. vlan_tunnel_deinit(vg);
  896. err_tunnel_init:
  897. rhashtable_destroy(&vg->vlan_hash);
  898. err_rhtbl:
  899. err_vlan_enabled:
  900. kfree(vg);
  901. goto out;
  902. }
  903. /* Must be protected by RTNL.
  904. * Must be called with vid in range from 1 to 4094 inclusive.
  905. * changed must be true only if the vlan was created or updated
  906. */
  907. int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
  908. bool *changed)
  909. {
  910. struct net_bridge_vlan *vlan;
  911. int ret;
  912. ASSERT_RTNL();
  913. *changed = false;
  914. vlan = br_vlan_find(nbp_vlan_group(port), vid);
  915. if (vlan) {
  916. /* Pass the flags to the hardware bridge */
  917. ret = br_switchdev_port_vlan_add(port->dev, vid, flags);
  918. if (ret && ret != -EOPNOTSUPP)
  919. return ret;
  920. *changed = __vlan_add_flags(vlan, flags);
  921. return 0;
  922. }
  923. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  924. if (!vlan)
  925. return -ENOMEM;
  926. vlan->vid = vid;
  927. vlan->port = port;
  928. ret = __vlan_add(vlan, flags);
  929. if (ret)
  930. kfree(vlan);
  931. else
  932. *changed = true;
  933. return ret;
  934. }
  935. /* Must be protected by RTNL.
  936. * Must be called with vid in range from 1 to 4094 inclusive.
  937. */
  938. int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
  939. {
  940. struct net_bridge_vlan *v;
  941. ASSERT_RTNL();
  942. v = br_vlan_find(nbp_vlan_group(port), vid);
  943. if (!v)
  944. return -ENOENT;
  945. br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
  946. br_fdb_delete_by_port(port->br, port, vid, 0);
  947. return __vlan_del(v);
  948. }
  949. void nbp_vlan_flush(struct net_bridge_port *port)
  950. {
  951. struct net_bridge_vlan_group *vg;
  952. ASSERT_RTNL();
  953. vg = nbp_vlan_group(port);
  954. __vlan_flush(vg);
  955. RCU_INIT_POINTER(port->vlgrp, NULL);
  956. synchronize_rcu();
  957. __vlan_group_free(vg);
  958. }
  959. void br_vlan_get_stats(const struct net_bridge_vlan *v,
  960. struct br_vlan_stats *stats)
  961. {
  962. int i;
  963. memset(stats, 0, sizeof(*stats));
  964. for_each_possible_cpu(i) {
  965. u64 rxpackets, rxbytes, txpackets, txbytes;
  966. struct br_vlan_stats *cpu_stats;
  967. unsigned int start;
  968. cpu_stats = per_cpu_ptr(v->stats, i);
  969. do {
  970. start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
  971. rxpackets = cpu_stats->rx_packets;
  972. rxbytes = cpu_stats->rx_bytes;
  973. txbytes = cpu_stats->tx_bytes;
  974. txpackets = cpu_stats->tx_packets;
  975. } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
  976. stats->rx_packets += rxpackets;
  977. stats->rx_bytes += rxbytes;
  978. stats->tx_bytes += txbytes;
  979. stats->tx_packets += txpackets;
  980. }
  981. }
  982. int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
  983. {
  984. struct net_bridge_vlan_group *vg;
  985. ASSERT_RTNL();
  986. if (netif_is_bridge_master(dev))
  987. vg = br_vlan_group(netdev_priv(dev));
  988. else
  989. return -EINVAL;
  990. *p_pvid = br_get_pvid(vg);
  991. return 0;
  992. }
  993. EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
  994. int br_vlan_get_info(const struct net_device *dev, u16 vid,
  995. struct bridge_vlan_info *p_vinfo)
  996. {
  997. struct net_bridge_vlan_group *vg;
  998. struct net_bridge_vlan *v;
  999. struct net_bridge_port *p;
  1000. ASSERT_RTNL();
  1001. p = br_port_get_check_rtnl(dev);
  1002. if (p)
  1003. vg = nbp_vlan_group(p);
  1004. else if (netif_is_bridge_master(dev))
  1005. vg = br_vlan_group(netdev_priv(dev));
  1006. else
  1007. return -EINVAL;
  1008. v = br_vlan_find(vg, vid);
  1009. if (!v)
  1010. return -ENOENT;
  1011. p_vinfo->vid = vid;
  1012. p_vinfo->flags = v->flags;
  1013. return 0;
  1014. }
  1015. EXPORT_SYMBOL_GPL(br_vlan_get_info);