br_if.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744
  1. /*
  2. * Userspace interface
  3. * Linux ethernet bridge
  4. *
  5. * Authors:
  6. * Lennert Buytenhek <buytenh@gnu.org>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/netpoll.h>
  17. #include <linux/ethtool.h>
  18. #include <linux/if_arp.h>
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/rtnetlink.h>
  22. #include <linux/if_ether.h>
  23. #include <linux/slab.h>
  24. #include <net/dsa.h>
  25. #include <net/sock.h>
  26. #include <linux/if_vlan.h>
  27. #include <net/switchdev.h>
  28. #include <net/net_namespace.h>
  29. #include "br_private.h"
  30. /*
  31. * Determine initial path cost based on speed.
  32. * using recommendations from 802.1d standard
  33. *
  34. * Since driver might sleep need to not be holding any locks.
  35. */
  36. static int port_cost(struct net_device *dev)
  37. {
  38. struct ethtool_link_ksettings ecmd;
  39. if (!__ethtool_get_link_ksettings(dev, &ecmd)) {
  40. switch (ecmd.base.speed) {
  41. case SPEED_10000:
  42. return 2;
  43. case SPEED_1000:
  44. return 4;
  45. case SPEED_100:
  46. return 19;
  47. case SPEED_10:
  48. return 100;
  49. }
  50. }
  51. /* Old silly heuristics based on name */
  52. if (!strncmp(dev->name, "lec", 3))
  53. return 7;
  54. if (!strncmp(dev->name, "plip", 4))
  55. return 2500;
  56. return 100; /* assume old 10Mbps */
  57. }
  58. /* Check for port carrier transitions. */
  59. void br_port_carrier_check(struct net_bridge_port *p, bool *notified)
  60. {
  61. struct net_device *dev = p->dev;
  62. struct net_bridge *br = p->br;
  63. if (!(p->flags & BR_ADMIN_COST) &&
  64. netif_running(dev) && netif_oper_up(dev))
  65. p->path_cost = port_cost(dev);
  66. *notified = false;
  67. if (!netif_running(br->dev))
  68. return;
  69. spin_lock_bh(&br->lock);
  70. if (netif_running(dev) && netif_oper_up(dev)) {
  71. if (p->state == BR_STATE_DISABLED) {
  72. br_stp_enable_port(p);
  73. *notified = true;
  74. }
  75. } else {
  76. if (p->state != BR_STATE_DISABLED) {
  77. br_stp_disable_port(p);
  78. *notified = true;
  79. }
  80. }
  81. spin_unlock_bh(&br->lock);
  82. }
  83. static void br_port_set_promisc(struct net_bridge_port *p)
  84. {
  85. int err = 0;
  86. if (br_promisc_port(p))
  87. return;
  88. err = dev_set_promiscuity(p->dev, 1);
  89. if (err)
  90. return;
  91. br_fdb_unsync_static(p->br, p);
  92. p->flags |= BR_PROMISC;
  93. }
  94. static void br_port_clear_promisc(struct net_bridge_port *p)
  95. {
  96. int err;
  97. /* Check if the port is already non-promisc or if it doesn't
  98. * support UNICAST filtering. Without unicast filtering support
  99. * we'll end up re-enabling promisc mode anyway, so just check for
  100. * it here.
  101. */
  102. if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
  103. return;
  104. /* Since we'll be clearing the promisc mode, program the port
  105. * first so that we don't have interruption in traffic.
  106. */
  107. err = br_fdb_sync_static(p->br, p);
  108. if (err)
  109. return;
  110. dev_set_promiscuity(p->dev, -1);
  111. p->flags &= ~BR_PROMISC;
  112. }
  113. /* When a port is added or removed or when certain port flags
  114. * change, this function is called to automatically manage
  115. * promiscuity setting of all the bridge ports. We are always called
  116. * under RTNL so can skip using rcu primitives.
  117. */
  118. void br_manage_promisc(struct net_bridge *br)
  119. {
  120. struct net_bridge_port *p;
  121. bool set_all = false;
  122. /* If vlan filtering is disabled or bridge interface is placed
  123. * into promiscuous mode, place all ports in promiscuous mode.
  124. */
  125. if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
  126. set_all = true;
  127. list_for_each_entry(p, &br->port_list, list) {
  128. if (set_all) {
  129. br_port_set_promisc(p);
  130. } else {
  131. /* If the number of auto-ports is <= 1, then all other
  132. * ports will have their output configuration
  133. * statically specified through fdbs. Since ingress
  134. * on the auto-port becomes forwarding/egress to other
  135. * ports and egress configuration is statically known,
  136. * we can say that ingress configuration of the
  137. * auto-port is also statically known.
  138. * This lets us disable promiscuous mode and write
  139. * this config to hw.
  140. */
  141. if (br->auto_cnt == 0 ||
  142. (br->auto_cnt == 1 && br_auto_port(p)))
  143. br_port_clear_promisc(p);
  144. else
  145. br_port_set_promisc(p);
  146. }
  147. }
  148. }
  149. int nbp_backup_change(struct net_bridge_port *p,
  150. struct net_device *backup_dev)
  151. {
  152. struct net_bridge_port *old_backup = rtnl_dereference(p->backup_port);
  153. struct net_bridge_port *backup_p = NULL;
  154. ASSERT_RTNL();
  155. if (backup_dev) {
  156. if (!br_port_exists(backup_dev))
  157. return -ENOENT;
  158. backup_p = br_port_get_rtnl(backup_dev);
  159. if (backup_p->br != p->br)
  160. return -EINVAL;
  161. }
  162. if (p == backup_p)
  163. return -EINVAL;
  164. if (old_backup == backup_p)
  165. return 0;
  166. /* if the backup link is already set, clear it */
  167. if (old_backup)
  168. old_backup->backup_redirected_cnt--;
  169. if (backup_p)
  170. backup_p->backup_redirected_cnt++;
  171. rcu_assign_pointer(p->backup_port, backup_p);
  172. return 0;
  173. }
  174. static void nbp_backup_clear(struct net_bridge_port *p)
  175. {
  176. nbp_backup_change(p, NULL);
  177. if (p->backup_redirected_cnt) {
  178. struct net_bridge_port *cur_p;
  179. list_for_each_entry(cur_p, &p->br->port_list, list) {
  180. struct net_bridge_port *backup_p;
  181. backup_p = rtnl_dereference(cur_p->backup_port);
  182. if (backup_p == p)
  183. nbp_backup_change(cur_p, NULL);
  184. }
  185. }
  186. WARN_ON(rcu_access_pointer(p->backup_port) || p->backup_redirected_cnt);
  187. }
  188. static void nbp_update_port_count(struct net_bridge *br)
  189. {
  190. struct net_bridge_port *p;
  191. u32 cnt = 0;
  192. list_for_each_entry(p, &br->port_list, list) {
  193. if (br_auto_port(p))
  194. cnt++;
  195. }
  196. if (br->auto_cnt != cnt) {
  197. br->auto_cnt = cnt;
  198. br_manage_promisc(br);
  199. }
  200. }
  201. static void nbp_delete_promisc(struct net_bridge_port *p)
  202. {
  203. /* If port is currently promiscuous, unset promiscuity.
  204. * Otherwise, it is a static port so remove all addresses
  205. * from it.
  206. */
  207. dev_set_allmulti(p->dev, -1);
  208. if (br_promisc_port(p))
  209. dev_set_promiscuity(p->dev, -1);
  210. else
  211. br_fdb_unsync_static(p->br, p);
  212. }
  213. static void release_nbp(struct kobject *kobj)
  214. {
  215. struct net_bridge_port *p
  216. = container_of(kobj, struct net_bridge_port, kobj);
  217. kfree(p);
  218. }
  219. static void brport_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
  220. {
  221. struct net_bridge_port *p = kobj_to_brport(kobj);
  222. net_ns_get_ownership(dev_net(p->dev), uid, gid);
  223. }
  224. static struct kobj_type brport_ktype = {
  225. #ifdef CONFIG_SYSFS
  226. .sysfs_ops = &brport_sysfs_ops,
  227. #endif
  228. .release = release_nbp,
  229. .get_ownership = brport_get_ownership,
  230. };
  231. static void destroy_nbp(struct net_bridge_port *p)
  232. {
  233. struct net_device *dev = p->dev;
  234. p->br = NULL;
  235. p->dev = NULL;
  236. dev_put(dev);
  237. kobject_put(&p->kobj);
  238. }
  239. static void destroy_nbp_rcu(struct rcu_head *head)
  240. {
  241. struct net_bridge_port *p =
  242. container_of(head, struct net_bridge_port, rcu);
  243. destroy_nbp(p);
  244. }
  245. static unsigned get_max_headroom(struct net_bridge *br)
  246. {
  247. unsigned max_headroom = 0;
  248. struct net_bridge_port *p;
  249. list_for_each_entry(p, &br->port_list, list) {
  250. unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
  251. if (dev_headroom > max_headroom)
  252. max_headroom = dev_headroom;
  253. }
  254. return max_headroom;
  255. }
  256. static void update_headroom(struct net_bridge *br, int new_hr)
  257. {
  258. struct net_bridge_port *p;
  259. list_for_each_entry(p, &br->port_list, list)
  260. netdev_set_rx_headroom(p->dev, new_hr);
  261. br->dev->needed_headroom = new_hr;
  262. }
  263. /* Delete port(interface) from bridge is done in two steps.
  264. * via RCU. First step, marks device as down. That deletes
  265. * all the timers and stops new packets from flowing through.
  266. *
  267. * Final cleanup doesn't occur until after all CPU's finished
  268. * processing packets.
  269. *
  270. * Protected from multiple admin operations by RTNL mutex
  271. */
  272. static void del_nbp(struct net_bridge_port *p)
  273. {
  274. struct net_bridge *br = p->br;
  275. struct net_device *dev = p->dev;
  276. sysfs_remove_link(br->ifobj, p->dev->name);
  277. nbp_delete_promisc(p);
  278. spin_lock_bh(&br->lock);
  279. br_stp_disable_port(p);
  280. spin_unlock_bh(&br->lock);
  281. br_ifinfo_notify(RTM_DELLINK, NULL, p);
  282. list_del_rcu(&p->list);
  283. if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
  284. update_headroom(br, get_max_headroom(br));
  285. netdev_reset_rx_headroom(dev);
  286. nbp_vlan_flush(p);
  287. br_fdb_delete_by_port(br, p, 0, 1);
  288. switchdev_deferred_process();
  289. nbp_backup_clear(p);
  290. nbp_update_port_count(br);
  291. netdev_upper_dev_unlink(dev, br->dev);
  292. dev->priv_flags &= ~IFF_BRIDGE_PORT;
  293. netdev_rx_handler_unregister(dev);
  294. br_multicast_del_port(p);
  295. kobject_uevent(&p->kobj, KOBJ_REMOVE);
  296. kobject_del(&p->kobj);
  297. br_netpoll_disable(p);
  298. call_rcu(&p->rcu, destroy_nbp_rcu);
  299. }
  300. /* Delete bridge device */
  301. void br_dev_delete(struct net_device *dev, struct list_head *head)
  302. {
  303. struct net_bridge *br = netdev_priv(dev);
  304. struct net_bridge_port *p, *n;
  305. list_for_each_entry_safe(p, n, &br->port_list, list) {
  306. del_nbp(p);
  307. }
  308. br_recalculate_neigh_suppress_enabled(br);
  309. br_fdb_delete_by_port(br, NULL, 0, 1);
  310. cancel_delayed_work_sync(&br->gc_work);
  311. br_sysfs_delbr(br->dev);
  312. unregister_netdevice_queue(br->dev, head);
  313. }
  314. /* find an available port number */
  315. static int find_portno(struct net_bridge *br)
  316. {
  317. int index;
  318. struct net_bridge_port *p;
  319. unsigned long *inuse;
  320. inuse = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
  321. GFP_KERNEL);
  322. if (!inuse)
  323. return -ENOMEM;
  324. set_bit(0, inuse); /* zero is reserved */
  325. list_for_each_entry(p, &br->port_list, list) {
  326. set_bit(p->port_no, inuse);
  327. }
  328. index = find_first_zero_bit(inuse, BR_MAX_PORTS);
  329. kfree(inuse);
  330. return (index >= BR_MAX_PORTS) ? -EXFULL : index;
  331. }
  332. /* called with RTNL but without bridge lock */
  333. static struct net_bridge_port *new_nbp(struct net_bridge *br,
  334. struct net_device *dev)
  335. {
  336. struct net_bridge_port *p;
  337. int index, err;
  338. index = find_portno(br);
  339. if (index < 0)
  340. return ERR_PTR(index);
  341. p = kzalloc(sizeof(*p), GFP_KERNEL);
  342. if (p == NULL)
  343. return ERR_PTR(-ENOMEM);
  344. p->br = br;
  345. dev_hold(dev);
  346. p->dev = dev;
  347. p->path_cost = port_cost(dev);
  348. p->priority = 0x8000 >> BR_PORT_BITS;
  349. p->port_no = index;
  350. p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
  351. br_init_port(p);
  352. br_set_state(p, BR_STATE_DISABLED);
  353. br_stp_port_timer_init(p);
  354. err = br_multicast_add_port(p);
  355. if (err) {
  356. dev_put(dev);
  357. kfree(p);
  358. p = ERR_PTR(err);
  359. }
  360. return p;
  361. }
  362. int br_add_bridge(struct net *net, const char *name)
  363. {
  364. struct net_device *dev;
  365. int res;
  366. dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
  367. br_dev_setup);
  368. if (!dev)
  369. return -ENOMEM;
  370. dev_net_set(dev, net);
  371. dev->rtnl_link_ops = &br_link_ops;
  372. res = register_netdev(dev);
  373. if (res)
  374. free_netdev(dev);
  375. return res;
  376. }
  377. int br_del_bridge(struct net *net, const char *name)
  378. {
  379. struct net_device *dev;
  380. int ret = 0;
  381. rtnl_lock();
  382. dev = __dev_get_by_name(net, name);
  383. if (dev == NULL)
  384. ret = -ENXIO; /* Could not find device */
  385. else if (!(dev->priv_flags & IFF_EBRIDGE)) {
  386. /* Attempt to delete non bridge device! */
  387. ret = -EPERM;
  388. }
  389. else if (dev->flags & IFF_UP) {
  390. /* Not shutdown yet. */
  391. ret = -EBUSY;
  392. }
  393. else
  394. br_dev_delete(dev, NULL);
  395. rtnl_unlock();
  396. return ret;
  397. }
  398. /* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */
  399. static int br_mtu_min(const struct net_bridge *br)
  400. {
  401. const struct net_bridge_port *p;
  402. int ret_mtu = 0;
  403. list_for_each_entry(p, &br->port_list, list)
  404. if (!ret_mtu || ret_mtu > p->dev->mtu)
  405. ret_mtu = p->dev->mtu;
  406. return ret_mtu ? ret_mtu : ETH_DATA_LEN;
  407. }
  408. void br_mtu_auto_adjust(struct net_bridge *br)
  409. {
  410. ASSERT_RTNL();
  411. /* if the bridge MTU was manually configured don't mess with it */
  412. if (br->mtu_set_by_user)
  413. return;
  414. /* change to the minimum MTU and clear the flag which was set by
  415. * the bridge ndo_change_mtu callback
  416. */
  417. dev_set_mtu(br->dev, br_mtu_min(br));
  418. br->mtu_set_by_user = false;
  419. }
  420. static void br_set_gso_limits(struct net_bridge *br)
  421. {
  422. unsigned int gso_max_size = GSO_MAX_SIZE;
  423. u16 gso_max_segs = GSO_MAX_SEGS;
  424. const struct net_bridge_port *p;
  425. list_for_each_entry(p, &br->port_list, list) {
  426. gso_max_size = min(gso_max_size, p->dev->gso_max_size);
  427. gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs);
  428. }
  429. br->dev->gso_max_size = gso_max_size;
  430. br->dev->gso_max_segs = gso_max_segs;
  431. }
  432. /*
  433. * Recomputes features using slave's features
  434. */
  435. netdev_features_t br_features_recompute(struct net_bridge *br,
  436. netdev_features_t features)
  437. {
  438. struct net_bridge_port *p;
  439. netdev_features_t mask;
  440. if (list_empty(&br->port_list))
  441. return features;
  442. mask = features;
  443. features &= ~NETIF_F_ONE_FOR_ALL;
  444. list_for_each_entry(p, &br->port_list, list) {
  445. features = netdev_increment_features(features,
  446. p->dev->features, mask);
  447. }
  448. features = netdev_add_tso_features(features, mask);
  449. return features;
  450. }
  451. /* called with RTNL */
  452. int br_add_if(struct net_bridge *br, struct net_device *dev,
  453. struct netlink_ext_ack *extack)
  454. {
  455. struct net_bridge_port *p;
  456. int err = 0;
  457. unsigned br_hr, dev_hr;
  458. bool changed_addr;
  459. /* Don't allow bridging non-ethernet like devices, or DSA-enabled
  460. * master network devices since the bridge layer rx_handler prevents
  461. * the DSA fake ethertype handler to be invoked, so we do not strip off
  462. * the DSA switch tag protocol header and the bridge layer just return
  463. * RX_HANDLER_CONSUMED, stopping RX processing for these frames.
  464. */
  465. if ((dev->flags & IFF_LOOPBACK) ||
  466. dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
  467. !is_valid_ether_addr(dev->dev_addr) ||
  468. netdev_uses_dsa(dev))
  469. return -EINVAL;
  470. /* No bridging of bridges */
  471. if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) {
  472. NL_SET_ERR_MSG(extack,
  473. "Can not enslave a bridge to a bridge");
  474. return -ELOOP;
  475. }
  476. /* Device has master upper dev */
  477. if (netdev_master_upper_dev_get(dev))
  478. return -EBUSY;
  479. /* No bridging devices that dislike that (e.g. wireless) */
  480. if (dev->priv_flags & IFF_DONT_BRIDGE) {
  481. NL_SET_ERR_MSG(extack,
  482. "Device does not allow enslaving to a bridge");
  483. return -EOPNOTSUPP;
  484. }
  485. p = new_nbp(br, dev);
  486. if (IS_ERR(p))
  487. return PTR_ERR(p);
  488. call_netdevice_notifiers(NETDEV_JOIN, dev);
  489. err = dev_set_allmulti(dev, 1);
  490. if (err) {
  491. kfree(p); /* kobject not yet init'd, manually free */
  492. goto err1;
  493. }
  494. err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
  495. SYSFS_BRIDGE_PORT_ATTR);
  496. if (err)
  497. goto err2;
  498. err = br_sysfs_addif(p);
  499. if (err)
  500. goto err2;
  501. err = br_netpoll_enable(p);
  502. if (err)
  503. goto err3;
  504. err = netdev_rx_handler_register(dev, br_handle_frame, p);
  505. if (err)
  506. goto err4;
  507. dev->priv_flags |= IFF_BRIDGE_PORT;
  508. err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack);
  509. if (err)
  510. goto err5;
  511. err = nbp_switchdev_mark_set(p);
  512. if (err)
  513. goto err6;
  514. dev_disable_lro(dev);
  515. list_add_rcu(&p->list, &br->port_list);
  516. nbp_update_port_count(br);
  517. netdev_update_features(br->dev);
  518. br_hr = br->dev->needed_headroom;
  519. dev_hr = netdev_get_fwd_headroom(dev);
  520. if (br_hr < dev_hr)
  521. update_headroom(br, dev_hr);
  522. else
  523. netdev_set_rx_headroom(dev, br_hr);
  524. if (br_fdb_insert(br, p, dev->dev_addr, 0))
  525. netdev_err(dev, "failed insert local address bridge forwarding table\n");
  526. err = nbp_vlan_init(p);
  527. if (err) {
  528. netdev_err(dev, "failed to initialize vlan filtering on this port\n");
  529. goto err7;
  530. }
  531. spin_lock_bh(&br->lock);
  532. changed_addr = br_stp_recalculate_bridge_id(br);
  533. if (netif_running(dev) && netif_oper_up(dev) &&
  534. (br->dev->flags & IFF_UP))
  535. br_stp_enable_port(p);
  536. spin_unlock_bh(&br->lock);
  537. br_ifinfo_notify(RTM_NEWLINK, NULL, p);
  538. if (changed_addr)
  539. call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
  540. br_mtu_auto_adjust(br);
  541. br_set_gso_limits(br);
  542. kobject_uevent(&p->kobj, KOBJ_ADD);
  543. return 0;
  544. err7:
  545. list_del_rcu(&p->list);
  546. br_fdb_delete_by_port(br, p, 0, 1);
  547. nbp_update_port_count(br);
  548. err6:
  549. netdev_upper_dev_unlink(dev, br->dev);
  550. err5:
  551. dev->priv_flags &= ~IFF_BRIDGE_PORT;
  552. netdev_rx_handler_unregister(dev);
  553. err4:
  554. br_netpoll_disable(p);
  555. err3:
  556. sysfs_remove_link(br->ifobj, p->dev->name);
  557. err2:
  558. kobject_put(&p->kobj);
  559. dev_set_allmulti(dev, -1);
  560. err1:
  561. dev_put(dev);
  562. return err;
  563. }
  564. /* called with RTNL */
  565. int br_del_if(struct net_bridge *br, struct net_device *dev)
  566. {
  567. struct net_bridge_port *p;
  568. bool changed_addr;
  569. p = br_port_get_rtnl(dev);
  570. if (!p || p->br != br)
  571. return -EINVAL;
  572. /* Since more than one interface can be attached to a bridge,
  573. * there still maybe an alternate path for netconsole to use;
  574. * therefore there is no reason for a NETDEV_RELEASE event.
  575. */
  576. del_nbp(p);
  577. br_mtu_auto_adjust(br);
  578. br_set_gso_limits(br);
  579. spin_lock_bh(&br->lock);
  580. changed_addr = br_stp_recalculate_bridge_id(br);
  581. spin_unlock_bh(&br->lock);
  582. if (changed_addr)
  583. call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
  584. netdev_update_features(br->dev);
  585. return 0;
  586. }
  587. void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
  588. {
  589. struct net_bridge *br = p->br;
  590. if (mask & BR_AUTO_MASK)
  591. nbp_update_port_count(br);
  592. if (mask & BR_NEIGH_SUPPRESS)
  593. br_recalculate_neigh_suppress_enabled(br);
  594. }