br_netlink.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650
  1. /*
  2. * Bridge netlink control interface
  3. *
  4. * Authors:
  5. * Stephen Hemminger <shemminger@osdl.org>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/etherdevice.h>
  15. #include <net/rtnetlink.h>
  16. #include <net/net_namespace.h>
  17. #include <net/sock.h>
  18. #include <uapi/linux/if_bridge.h>
  19. #include "br_private.h"
  20. #include "br_private_stp.h"
  21. #include "br_private_tunnel.h"
  22. static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
  23. u32 filter_mask)
  24. {
  25. struct net_bridge_vlan *v;
  26. u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
  27. u16 flags, pvid;
  28. int num_vlans = 0;
  29. if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
  30. return 0;
  31. pvid = br_get_pvid(vg);
  32. /* Count number of vlan infos */
  33. list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
  34. flags = 0;
  35. /* only a context, bridge vlan not activated */
  36. if (!br_vlan_should_use(v))
  37. continue;
  38. if (v->vid == pvid)
  39. flags |= BRIDGE_VLAN_INFO_PVID;
  40. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  41. flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  42. if (vid_range_start == 0) {
  43. goto initvars;
  44. } else if ((v->vid - vid_range_end) == 1 &&
  45. flags == vid_range_flags) {
  46. vid_range_end = v->vid;
  47. continue;
  48. } else {
  49. if ((vid_range_end - vid_range_start) > 0)
  50. num_vlans += 2;
  51. else
  52. num_vlans += 1;
  53. }
  54. initvars:
  55. vid_range_start = v->vid;
  56. vid_range_end = v->vid;
  57. vid_range_flags = flags;
  58. }
  59. if (vid_range_start != 0) {
  60. if ((vid_range_end - vid_range_start) > 0)
  61. num_vlans += 2;
  62. else
  63. num_vlans += 1;
  64. }
  65. return num_vlans;
  66. }
  67. static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
  68. u32 filter_mask)
  69. {
  70. int num_vlans;
  71. if (!vg)
  72. return 0;
  73. if (filter_mask & RTEXT_FILTER_BRVLAN)
  74. return vg->num_vlans;
  75. rcu_read_lock();
  76. num_vlans = __get_num_vlan_infos(vg, filter_mask);
  77. rcu_read_unlock();
  78. return num_vlans;
  79. }
  80. static size_t br_get_link_af_size_filtered(const struct net_device *dev,
  81. u32 filter_mask)
  82. {
  83. struct net_bridge_vlan_group *vg = NULL;
  84. struct net_bridge_port *p = NULL;
  85. struct net_bridge *br;
  86. int num_vlan_infos;
  87. size_t vinfo_sz = 0;
  88. rcu_read_lock();
  89. if (br_port_exists(dev)) {
  90. p = br_port_get_rcu(dev);
  91. vg = nbp_vlan_group_rcu(p);
  92. } else if (dev->priv_flags & IFF_EBRIDGE) {
  93. br = netdev_priv(dev);
  94. vg = br_vlan_group_rcu(br);
  95. }
  96. num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
  97. rcu_read_unlock();
  98. if (p && (p->flags & BR_VLAN_TUNNEL))
  99. vinfo_sz += br_get_vlan_tunnel_info_size(vg);
  100. /* Each VLAN is returned in bridge_vlan_info along with flags */
  101. vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
  102. return vinfo_sz;
  103. }
  104. static inline size_t br_port_info_size(void)
  105. {
  106. return nla_total_size(1) /* IFLA_BRPORT_STATE */
  107. + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */
  108. + nla_total_size(4) /* IFLA_BRPORT_COST */
  109. + nla_total_size(1) /* IFLA_BRPORT_MODE */
  110. + nla_total_size(1) /* IFLA_BRPORT_GUARD */
  111. + nla_total_size(1) /* IFLA_BRPORT_PROTECT */
  112. + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
  113. + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */
  114. + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
  115. + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
  116. + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */
  117. + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */
  118. + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
  119. + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
  120. + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */
  121. + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */
  122. + nla_total_size(1) /* IFLA_BRPORT_ISOLATED */
  123. + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
  124. + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
  125. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */
  126. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */
  127. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */
  128. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */
  129. + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
  130. + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */
  131. + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
  132. + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
  133. + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
  134. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  135. + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
  136. #endif
  137. + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */
  138. + 0;
  139. }
  140. static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
  141. {
  142. return NLMSG_ALIGN(sizeof(struct ifinfomsg))
  143. + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
  144. + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
  145. + nla_total_size(4) /* IFLA_MASTER */
  146. + nla_total_size(4) /* IFLA_MTU */
  147. + nla_total_size(4) /* IFLA_LINK */
  148. + nla_total_size(1) /* IFLA_OPERSTATE */
  149. + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
  150. + nla_total_size(br_get_link_af_size_filtered(dev,
  151. filter_mask)) /* IFLA_AF_SPEC */
  152. + nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */
  153. }
  154. static int br_port_fill_attrs(struct sk_buff *skb,
  155. const struct net_bridge_port *p)
  156. {
  157. u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
  158. struct net_bridge_port *backup_p;
  159. u64 timerval;
  160. if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
  161. nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
  162. nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
  163. nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
  164. nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
  165. nla_put_u8(skb, IFLA_BRPORT_PROTECT,
  166. !!(p->flags & BR_ROOT_BLOCK)) ||
  167. nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
  168. !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
  169. nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
  170. !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
  171. nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
  172. nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
  173. !!(p->flags & BR_FLOOD)) ||
  174. nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
  175. !!(p->flags & BR_MCAST_FLOOD)) ||
  176. nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD,
  177. !!(p->flags & BR_BCAST_FLOOD)) ||
  178. nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
  179. nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
  180. !!(p->flags & BR_PROXYARP_WIFI)) ||
  181. nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
  182. &p->designated_root) ||
  183. nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
  184. &p->designated_bridge) ||
  185. nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
  186. nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
  187. nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
  188. nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
  189. nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
  190. p->topology_change_ack) ||
  191. nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
  192. nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
  193. BR_VLAN_TUNNEL)) ||
  194. nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) ||
  195. nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS,
  196. !!(p->flags & BR_NEIGH_SUPPRESS)) ||
  197. nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)))
  198. return -EMSGSIZE;
  199. timerval = br_timer_value(&p->message_age_timer);
  200. if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
  201. IFLA_BRPORT_PAD))
  202. return -EMSGSIZE;
  203. timerval = br_timer_value(&p->forward_delay_timer);
  204. if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
  205. IFLA_BRPORT_PAD))
  206. return -EMSGSIZE;
  207. timerval = br_timer_value(&p->hold_timer);
  208. if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
  209. IFLA_BRPORT_PAD))
  210. return -EMSGSIZE;
  211. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  212. if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
  213. p->multicast_router))
  214. return -EMSGSIZE;
  215. #endif
  216. /* we might be called only with br->lock */
  217. rcu_read_lock();
  218. backup_p = rcu_dereference(p->backup_port);
  219. if (backup_p)
  220. nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT,
  221. backup_p->dev->ifindex);
  222. rcu_read_unlock();
  223. return 0;
  224. }
  225. static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
  226. u16 vid_end, u16 flags)
  227. {
  228. struct bridge_vlan_info vinfo;
  229. if ((vid_end - vid_start) > 0) {
  230. /* add range to skb */
  231. vinfo.vid = vid_start;
  232. vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
  233. if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
  234. sizeof(vinfo), &vinfo))
  235. goto nla_put_failure;
  236. vinfo.vid = vid_end;
  237. vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
  238. if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
  239. sizeof(vinfo), &vinfo))
  240. goto nla_put_failure;
  241. } else {
  242. vinfo.vid = vid_start;
  243. vinfo.flags = flags;
  244. if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
  245. sizeof(vinfo), &vinfo))
  246. goto nla_put_failure;
  247. }
  248. return 0;
  249. nla_put_failure:
  250. return -EMSGSIZE;
  251. }
  252. static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
  253. struct net_bridge_vlan_group *vg)
  254. {
  255. struct net_bridge_vlan *v;
  256. u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
  257. u16 flags, pvid;
  258. int err = 0;
  259. /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
  260. * and mark vlan info with begin and end flags
  261. * if vlaninfo represents a range
  262. */
  263. pvid = br_get_pvid(vg);
  264. list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
  265. flags = 0;
  266. if (!br_vlan_should_use(v))
  267. continue;
  268. if (v->vid == pvid)
  269. flags |= BRIDGE_VLAN_INFO_PVID;
  270. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  271. flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  272. if (vid_range_start == 0) {
  273. goto initvars;
  274. } else if ((v->vid - vid_range_end) == 1 &&
  275. flags == vid_range_flags) {
  276. vid_range_end = v->vid;
  277. continue;
  278. } else {
  279. err = br_fill_ifvlaninfo_range(skb, vid_range_start,
  280. vid_range_end,
  281. vid_range_flags);
  282. if (err)
  283. return err;
  284. }
  285. initvars:
  286. vid_range_start = v->vid;
  287. vid_range_end = v->vid;
  288. vid_range_flags = flags;
  289. }
  290. if (vid_range_start != 0) {
  291. /* Call it once more to send any left over vlans */
  292. err = br_fill_ifvlaninfo_range(skb, vid_range_start,
  293. vid_range_end,
  294. vid_range_flags);
  295. if (err)
  296. return err;
  297. }
  298. return 0;
  299. }
  300. static int br_fill_ifvlaninfo(struct sk_buff *skb,
  301. struct net_bridge_vlan_group *vg)
  302. {
  303. struct bridge_vlan_info vinfo;
  304. struct net_bridge_vlan *v;
  305. u16 pvid;
  306. pvid = br_get_pvid(vg);
  307. list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
  308. if (!br_vlan_should_use(v))
  309. continue;
  310. vinfo.vid = v->vid;
  311. vinfo.flags = 0;
  312. if (v->vid == pvid)
  313. vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
  314. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  315. vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  316. if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
  317. sizeof(vinfo), &vinfo))
  318. goto nla_put_failure;
  319. }
  320. return 0;
  321. nla_put_failure:
  322. return -EMSGSIZE;
  323. }
  324. /*
  325. * Create one netlink message for one interface
  326. * Contains port and master info as well as carrier and bridge state.
  327. */
  328. static int br_fill_ifinfo(struct sk_buff *skb,
  329. const struct net_bridge_port *port,
  330. u32 pid, u32 seq, int event, unsigned int flags,
  331. u32 filter_mask, const struct net_device *dev)
  332. {
  333. u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
  334. struct net_bridge *br;
  335. struct ifinfomsg *hdr;
  336. struct nlmsghdr *nlh;
  337. if (port)
  338. br = port->br;
  339. else
  340. br = netdev_priv(dev);
  341. br_debug(br, "br_fill_info event %d port %s master %s\n",
  342. event, dev->name, br->dev->name);
  343. nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
  344. if (nlh == NULL)
  345. return -EMSGSIZE;
  346. hdr = nlmsg_data(nlh);
  347. hdr->ifi_family = AF_BRIDGE;
  348. hdr->__ifi_pad = 0;
  349. hdr->ifi_type = dev->type;
  350. hdr->ifi_index = dev->ifindex;
  351. hdr->ifi_flags = dev_get_flags(dev);
  352. hdr->ifi_change = 0;
  353. if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
  354. nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
  355. nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
  356. nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
  357. (dev->addr_len &&
  358. nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
  359. (dev->ifindex != dev_get_iflink(dev) &&
  360. nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
  361. goto nla_put_failure;
  362. if (event == RTM_NEWLINK && port) {
  363. struct nlattr *nest
  364. = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
  365. if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
  366. goto nla_put_failure;
  367. nla_nest_end(skb, nest);
  368. }
  369. /* Check if the VID information is requested */
  370. if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
  371. (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
  372. struct net_bridge_vlan_group *vg;
  373. struct nlattr *af;
  374. int err;
  375. /* RCU needed because of the VLAN locking rules (rcu || rtnl) */
  376. rcu_read_lock();
  377. if (port)
  378. vg = nbp_vlan_group_rcu(port);
  379. else
  380. vg = br_vlan_group_rcu(br);
  381. if (!vg || !vg->num_vlans) {
  382. rcu_read_unlock();
  383. goto done;
  384. }
  385. af = nla_nest_start(skb, IFLA_AF_SPEC);
  386. if (!af) {
  387. rcu_read_unlock();
  388. goto nla_put_failure;
  389. }
  390. if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
  391. err = br_fill_ifvlaninfo_compressed(skb, vg);
  392. else
  393. err = br_fill_ifvlaninfo(skb, vg);
  394. if (port && (port->flags & BR_VLAN_TUNNEL))
  395. err = br_fill_vlan_tunnel_info(skb, vg);
  396. rcu_read_unlock();
  397. if (err)
  398. goto nla_put_failure;
  399. nla_nest_end(skb, af);
  400. }
  401. done:
  402. nlmsg_end(skb, nlh);
  403. return 0;
  404. nla_put_failure:
  405. nlmsg_cancel(skb, nlh);
  406. return -EMSGSIZE;
  407. }
  408. /* Notify listeners of a change in bridge or port information */
  409. void br_ifinfo_notify(int event, const struct net_bridge *br,
  410. const struct net_bridge_port *port)
  411. {
  412. u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
  413. struct net_device *dev;
  414. struct sk_buff *skb;
  415. int err = -ENOBUFS;
  416. struct net *net;
  417. u16 port_no = 0;
  418. if (WARN_ON(!port && !br))
  419. return;
  420. if (port) {
  421. dev = port->dev;
  422. br = port->br;
  423. port_no = port->port_no;
  424. } else {
  425. dev = br->dev;
  426. }
  427. net = dev_net(dev);
  428. br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event);
  429. skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC);
  430. if (skb == NULL)
  431. goto errout;
  432. err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev);
  433. if (err < 0) {
  434. /* -EMSGSIZE implies BUG in br_nlmsg_size() */
  435. WARN_ON(err == -EMSGSIZE);
  436. kfree_skb(skb);
  437. goto errout;
  438. }
  439. rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
  440. return;
  441. errout:
  442. rtnl_set_sk_err(net, RTNLGRP_LINK, err);
  443. }
  444. /*
  445. * Dump information about all ports, in response to GETLINK
  446. */
  447. int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
  448. struct net_device *dev, u32 filter_mask, int nlflags)
  449. {
  450. struct net_bridge_port *port = br_port_get_rtnl(dev);
  451. if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
  452. !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
  453. return 0;
  454. return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
  455. filter_mask, dev);
  456. }
  457. static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
  458. int cmd, struct bridge_vlan_info *vinfo, bool *changed)
  459. {
  460. bool curr_change;
  461. int err = 0;
  462. switch (cmd) {
  463. case RTM_SETLINK:
  464. if (p) {
  465. /* if the MASTER flag is set this will act on the global
  466. * per-VLAN entry as well
  467. */
  468. err = nbp_vlan_add(p, vinfo->vid, vinfo->flags,
  469. &curr_change);
  470. } else {
  471. vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
  472. err = br_vlan_add(br, vinfo->vid, vinfo->flags,
  473. &curr_change);
  474. }
  475. if (curr_change)
  476. *changed = true;
  477. break;
  478. case RTM_DELLINK:
  479. if (p) {
  480. if (!nbp_vlan_delete(p, vinfo->vid))
  481. *changed = true;
  482. if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) &&
  483. !br_vlan_delete(p->br, vinfo->vid))
  484. *changed = true;
  485. } else if (!br_vlan_delete(br, vinfo->vid)) {
  486. *changed = true;
  487. }
  488. break;
  489. }
  490. return err;
  491. }
  492. static int br_process_vlan_info(struct net_bridge *br,
  493. struct net_bridge_port *p, int cmd,
  494. struct bridge_vlan_info *vinfo_curr,
  495. struct bridge_vlan_info **vinfo_last,
  496. bool *changed)
  497. {
  498. if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK)
  499. return -EINVAL;
  500. if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
  501. /* check if we are already processing a range */
  502. if (*vinfo_last)
  503. return -EINVAL;
  504. *vinfo_last = vinfo_curr;
  505. /* don't allow range of pvids */
  506. if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID)
  507. return -EINVAL;
  508. return 0;
  509. }
  510. if (*vinfo_last) {
  511. struct bridge_vlan_info tmp_vinfo;
  512. int v, err;
  513. if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END))
  514. return -EINVAL;
  515. if (vinfo_curr->vid <= (*vinfo_last)->vid)
  516. return -EINVAL;
  517. memcpy(&tmp_vinfo, *vinfo_last,
  518. sizeof(struct bridge_vlan_info));
  519. for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
  520. tmp_vinfo.vid = v;
  521. err = br_vlan_info(br, p, cmd, &tmp_vinfo, changed);
  522. if (err)
  523. break;
  524. }
  525. *vinfo_last = NULL;
  526. return err;
  527. }
  528. return br_vlan_info(br, p, cmd, vinfo_curr, changed);
  529. }
  530. static int br_afspec(struct net_bridge *br,
  531. struct net_bridge_port *p,
  532. struct nlattr *af_spec,
  533. int cmd, bool *changed)
  534. {
  535. struct bridge_vlan_info *vinfo_curr = NULL;
  536. struct bridge_vlan_info *vinfo_last = NULL;
  537. struct nlattr *attr;
  538. struct vtunnel_info tinfo_last = {};
  539. struct vtunnel_info tinfo_curr = {};
  540. int err = 0, rem;
  541. nla_for_each_nested(attr, af_spec, rem) {
  542. err = 0;
  543. switch (nla_type(attr)) {
  544. case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
  545. if (!p || !(p->flags & BR_VLAN_TUNNEL))
  546. return -EINVAL;
  547. err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
  548. if (err)
  549. return err;
  550. err = br_process_vlan_tunnel_info(br, p, cmd,
  551. &tinfo_curr,
  552. &tinfo_last,
  553. changed);
  554. if (err)
  555. return err;
  556. break;
  557. case IFLA_BRIDGE_VLAN_INFO:
  558. if (nla_len(attr) != sizeof(struct bridge_vlan_info))
  559. return -EINVAL;
  560. vinfo_curr = nla_data(attr);
  561. err = br_process_vlan_info(br, p, cmd, vinfo_curr,
  562. &vinfo_last, changed);
  563. if (err)
  564. return err;
  565. break;
  566. }
  567. }
  568. return err;
  569. }
  570. static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
  571. [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
  572. [IFLA_BRPORT_COST] = { .type = NLA_U32 },
  573. [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
  574. [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
  575. [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
  576. [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
  577. [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
  578. [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
  579. [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
  580. [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
  581. [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
  582. [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
  583. [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
  584. [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 },
  585. [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 },
  586. [IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 },
  587. [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 },
  588. [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 },
  589. [IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 },
  590. [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 },
  591. };
  592. /* Change the state of the port and notify spanning tree */
  593. static int br_set_port_state(struct net_bridge_port *p, u8 state)
  594. {
  595. if (state > BR_STATE_BLOCKING)
  596. return -EINVAL;
  597. /* if kernel STP is running, don't allow changes */
  598. if (p->br->stp_enabled == BR_KERNEL_STP)
  599. return -EBUSY;
  600. /* if device is not up, change is not allowed
  601. * if link is not present, only allowable state is disabled
  602. */
  603. if (!netif_running(p->dev) ||
  604. (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
  605. return -ENETDOWN;
  606. br_set_state(p, state);
  607. br_port_state_selection(p->br);
  608. return 0;
  609. }
  610. /* Set/clear or port flags based on attribute */
  611. static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
  612. int attrtype, unsigned long mask)
  613. {
  614. unsigned long flags;
  615. int err;
  616. if (!tb[attrtype])
  617. return 0;
  618. if (nla_get_u8(tb[attrtype]))
  619. flags = p->flags | mask;
  620. else
  621. flags = p->flags & ~mask;
  622. err = br_switchdev_set_port_flag(p, flags, mask);
  623. if (err)
  624. return err;
  625. p->flags = flags;
  626. return 0;
  627. }
  628. /* Process bridge protocol info on port */
  629. static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
  630. {
  631. unsigned long old_flags = p->flags;
  632. bool br_vlan_tunnel_old = false;
  633. int err;
  634. err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
  635. if (err)
  636. return err;
  637. err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
  638. if (err)
  639. return err;
  640. err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
  641. if (err)
  642. return err;
  643. err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
  644. if (err)
  645. return err;
  646. err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
  647. if (err)
  648. return err;
  649. err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
  650. if (err)
  651. return err;
  652. err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
  653. if (err)
  654. return err;
  655. err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
  656. if (err)
  657. return err;
  658. err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
  659. if (err)
  660. return err;
  661. err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
  662. if (err)
  663. return err;
  664. err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
  665. if (err)
  666. return err;
  667. br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false;
  668. err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
  669. if (err)
  670. return err;
  671. if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
  672. nbp_vlan_tunnel_info_flush(p);
  673. if (tb[IFLA_BRPORT_COST]) {
  674. err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
  675. if (err)
  676. return err;
  677. }
  678. if (tb[IFLA_BRPORT_PRIORITY]) {
  679. err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
  680. if (err)
  681. return err;
  682. }
  683. if (tb[IFLA_BRPORT_STATE]) {
  684. err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
  685. if (err)
  686. return err;
  687. }
  688. if (tb[IFLA_BRPORT_FLUSH])
  689. br_fdb_delete_by_port(p->br, p, 0, 0);
  690. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  691. if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
  692. u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
  693. err = br_multicast_set_port_router(p, mcast_router);
  694. if (err)
  695. return err;
  696. }
  697. #endif
  698. if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) {
  699. u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]);
  700. if (fwd_mask & BR_GROUPFWD_MACPAUSE)
  701. return -EINVAL;
  702. p->group_fwd_mask = fwd_mask;
  703. }
  704. err = br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS,
  705. BR_NEIGH_SUPPRESS);
  706. if (err)
  707. return err;
  708. err = br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
  709. if (err)
  710. return err;
  711. if (tb[IFLA_BRPORT_BACKUP_PORT]) {
  712. struct net_device *backup_dev = NULL;
  713. u32 backup_ifindex;
  714. backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]);
  715. if (backup_ifindex) {
  716. backup_dev = __dev_get_by_index(dev_net(p->dev),
  717. backup_ifindex);
  718. if (!backup_dev)
  719. return -ENOENT;
  720. }
  721. err = nbp_backup_change(p, backup_dev);
  722. if (err)
  723. return err;
  724. }
  725. br_port_flags_change(p, old_flags ^ p->flags);
  726. return 0;
  727. }
  728. /* Change state and parameters on port. */
  729. int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
  730. {
  731. struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
  732. struct nlattr *tb[IFLA_BRPORT_MAX + 1];
  733. struct net_bridge_port *p;
  734. struct nlattr *protinfo;
  735. struct nlattr *afspec;
  736. bool changed = false;
  737. int err = 0;
  738. protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
  739. afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  740. if (!protinfo && !afspec)
  741. return 0;
  742. p = br_port_get_rtnl(dev);
  743. /* We want to accept dev as bridge itself if the AF_SPEC
  744. * is set to see if someone is setting vlan info on the bridge
  745. */
  746. if (!p && !afspec)
  747. return -EINVAL;
  748. if (p && protinfo) {
  749. if (protinfo->nla_type & NLA_F_NESTED) {
  750. err = nla_parse_nested(tb, IFLA_BRPORT_MAX, protinfo,
  751. br_port_policy, NULL);
  752. if (err)
  753. return err;
  754. spin_lock_bh(&p->br->lock);
  755. err = br_setport(p, tb);
  756. spin_unlock_bh(&p->br->lock);
  757. } else {
  758. /* Binary compatibility with old RSTP */
  759. if (nla_len(protinfo) < sizeof(u8))
  760. return -EINVAL;
  761. spin_lock_bh(&p->br->lock);
  762. err = br_set_port_state(p, nla_get_u8(protinfo));
  763. spin_unlock_bh(&p->br->lock);
  764. }
  765. if (err)
  766. goto out;
  767. changed = true;
  768. }
  769. if (afspec)
  770. err = br_afspec(br, p, afspec, RTM_SETLINK, &changed);
  771. if (changed)
  772. br_ifinfo_notify(RTM_NEWLINK, br, p);
  773. out:
  774. return err;
  775. }
  776. /* Delete port information */
  777. int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
  778. {
  779. struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
  780. struct net_bridge_port *p;
  781. struct nlattr *afspec;
  782. bool changed = false;
  783. int err = 0;
  784. afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  785. if (!afspec)
  786. return 0;
  787. p = br_port_get_rtnl(dev);
  788. /* We want to accept dev as bridge itself as well */
  789. if (!p && !(dev->priv_flags & IFF_EBRIDGE))
  790. return -EINVAL;
  791. err = br_afspec(br, p, afspec, RTM_DELLINK, &changed);
  792. if (changed)
  793. /* Send RTM_NEWLINK because userspace
  794. * expects RTM_NEWLINK for vlan dels
  795. */
  796. br_ifinfo_notify(RTM_NEWLINK, br, p);
  797. return err;
  798. }
  799. static int br_validate(struct nlattr *tb[], struct nlattr *data[],
  800. struct netlink_ext_ack *extack)
  801. {
  802. if (tb[IFLA_ADDRESS]) {
  803. if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
  804. return -EINVAL;
  805. if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
  806. return -EADDRNOTAVAIL;
  807. }
  808. if (!data)
  809. return 0;
  810. #ifdef CONFIG_BRIDGE_VLAN_FILTERING
  811. if (data[IFLA_BR_VLAN_PROTOCOL]) {
  812. switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) {
  813. case htons(ETH_P_8021Q):
  814. case htons(ETH_P_8021AD):
  815. break;
  816. default:
  817. return -EPROTONOSUPPORT;
  818. }
  819. }
  820. if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
  821. __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
  822. if (defpvid >= VLAN_VID_MASK)
  823. return -EINVAL;
  824. }
  825. #endif
  826. return 0;
  827. }
  828. static int br_port_slave_changelink(struct net_device *brdev,
  829. struct net_device *dev,
  830. struct nlattr *tb[],
  831. struct nlattr *data[],
  832. struct netlink_ext_ack *extack)
  833. {
  834. struct net_bridge *br = netdev_priv(brdev);
  835. int ret;
  836. if (!data)
  837. return 0;
  838. spin_lock_bh(&br->lock);
  839. ret = br_setport(br_port_get_rtnl(dev), data);
  840. spin_unlock_bh(&br->lock);
  841. return ret;
  842. }
  843. static int br_port_fill_slave_info(struct sk_buff *skb,
  844. const struct net_device *brdev,
  845. const struct net_device *dev)
  846. {
  847. return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
  848. }
  849. static size_t br_port_get_slave_size(const struct net_device *brdev,
  850. const struct net_device *dev)
  851. {
  852. return br_port_info_size();
  853. }
  854. static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
  855. [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 },
  856. [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 },
  857. [IFLA_BR_MAX_AGE] = { .type = NLA_U32 },
  858. [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
  859. [IFLA_BR_STP_STATE] = { .type = NLA_U32 },
  860. [IFLA_BR_PRIORITY] = { .type = NLA_U16 },
  861. [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
  862. [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
  863. [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
  864. [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
  865. .len = ETH_ALEN },
  866. [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
  867. [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
  868. [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
  869. [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
  870. [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
  871. [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
  872. [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
  873. [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
  874. [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
  875. [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
  876. [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
  877. [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
  878. [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
  879. [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
  880. [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
  881. [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
  882. [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
  883. [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
  884. [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
  885. [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
  886. [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
  887. [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
  888. };
  889. static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
  890. struct nlattr *data[],
  891. struct netlink_ext_ack *extack)
  892. {
  893. struct net_bridge *br = netdev_priv(brdev);
  894. int err;
  895. if (!data)
  896. return 0;
  897. if (data[IFLA_BR_FORWARD_DELAY]) {
  898. err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
  899. if (err)
  900. return err;
  901. }
  902. if (data[IFLA_BR_HELLO_TIME]) {
  903. err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
  904. if (err)
  905. return err;
  906. }
  907. if (data[IFLA_BR_MAX_AGE]) {
  908. err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
  909. if (err)
  910. return err;
  911. }
  912. if (data[IFLA_BR_AGEING_TIME]) {
  913. err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
  914. if (err)
  915. return err;
  916. }
  917. if (data[IFLA_BR_STP_STATE]) {
  918. u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);
  919. br_stp_set_enabled(br, stp_enabled);
  920. }
  921. if (data[IFLA_BR_PRIORITY]) {
  922. u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);
  923. br_stp_set_bridge_priority(br, priority);
  924. }
  925. if (data[IFLA_BR_VLAN_FILTERING]) {
  926. u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
  927. err = __br_vlan_filter_toggle(br, vlan_filter);
  928. if (err)
  929. return err;
  930. }
  931. #ifdef CONFIG_BRIDGE_VLAN_FILTERING
  932. if (data[IFLA_BR_VLAN_PROTOCOL]) {
  933. __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
  934. err = __br_vlan_set_proto(br, vlan_proto);
  935. if (err)
  936. return err;
  937. }
  938. if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
  939. __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
  940. err = __br_vlan_set_default_pvid(br, defpvid);
  941. if (err)
  942. return err;
  943. }
  944. if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
  945. __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);
  946. err = br_vlan_set_stats(br, vlan_stats);
  947. if (err)
  948. return err;
  949. }
  950. #endif
  951. if (data[IFLA_BR_GROUP_FWD_MASK]) {
  952. u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
  953. if (fwd_mask & BR_GROUPFWD_RESTRICTED)
  954. return -EINVAL;
  955. br->group_fwd_mask = fwd_mask;
  956. }
  957. if (data[IFLA_BR_GROUP_ADDR]) {
  958. u8 new_addr[ETH_ALEN];
  959. if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
  960. return -EINVAL;
  961. memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
  962. if (!is_link_local_ether_addr(new_addr))
  963. return -EINVAL;
  964. if (new_addr[5] == 1 || /* 802.3x Pause address */
  965. new_addr[5] == 2 || /* 802.3ad Slow protocols */
  966. new_addr[5] == 3) /* 802.1X PAE address */
  967. return -EINVAL;
  968. spin_lock_bh(&br->lock);
  969. memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
  970. spin_unlock_bh(&br->lock);
  971. br->group_addr_set = true;
  972. br_recalculate_fwd_mask(br);
  973. }
  974. if (data[IFLA_BR_FDB_FLUSH])
  975. br_fdb_flush(br);
  976. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  977. if (data[IFLA_BR_MCAST_ROUTER]) {
  978. u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
  979. err = br_multicast_set_router(br, multicast_router);
  980. if (err)
  981. return err;
  982. }
  983. if (data[IFLA_BR_MCAST_SNOOPING]) {
  984. u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
  985. err = br_multicast_toggle(br, mcast_snooping);
  986. if (err)
  987. return err;
  988. }
  989. if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
  990. u8 val;
  991. val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
  992. br->multicast_query_use_ifaddr = !!val;
  993. }
  994. if (data[IFLA_BR_MCAST_QUERIER]) {
  995. u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
  996. err = br_multicast_set_querier(br, mcast_querier);
  997. if (err)
  998. return err;
  999. }
  1000. if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) {
  1001. u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]);
  1002. br->hash_elasticity = val;
  1003. }
  1004. if (data[IFLA_BR_MCAST_HASH_MAX]) {
  1005. u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
  1006. err = br_multicast_set_hash_max(br, hash_max);
  1007. if (err)
  1008. return err;
  1009. }
  1010. if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
  1011. u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
  1012. br->multicast_last_member_count = val;
  1013. }
  1014. if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
  1015. u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
  1016. br->multicast_startup_query_count = val;
  1017. }
  1018. if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
  1019. u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
  1020. br->multicast_last_member_interval = clock_t_to_jiffies(val);
  1021. }
  1022. if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
  1023. u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
  1024. br->multicast_membership_interval = clock_t_to_jiffies(val);
  1025. }
  1026. if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
  1027. u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
  1028. br->multicast_querier_interval = clock_t_to_jiffies(val);
  1029. }
  1030. if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
  1031. u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
  1032. br->multicast_query_interval = clock_t_to_jiffies(val);
  1033. }
  1034. if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
  1035. u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
  1036. br->multicast_query_response_interval = clock_t_to_jiffies(val);
  1037. }
  1038. if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
  1039. u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
  1040. br->multicast_startup_query_interval = clock_t_to_jiffies(val);
  1041. }
  1042. if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
  1043. __u8 mcast_stats;
  1044. mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
  1045. br->multicast_stats_enabled = !!mcast_stats;
  1046. }
  1047. if (data[IFLA_BR_MCAST_IGMP_VERSION]) {
  1048. __u8 igmp_version;
  1049. igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]);
  1050. err = br_multicast_set_igmp_version(br, igmp_version);
  1051. if (err)
  1052. return err;
  1053. }
  1054. #if IS_ENABLED(CONFIG_IPV6)
  1055. if (data[IFLA_BR_MCAST_MLD_VERSION]) {
  1056. __u8 mld_version;
  1057. mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]);
  1058. err = br_multicast_set_mld_version(br, mld_version);
  1059. if (err)
  1060. return err;
  1061. }
  1062. #endif
  1063. #endif
  1064. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  1065. if (data[IFLA_BR_NF_CALL_IPTABLES]) {
  1066. u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
  1067. br->nf_call_iptables = val ? true : false;
  1068. }
  1069. if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
  1070. u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
  1071. br->nf_call_ip6tables = val ? true : false;
  1072. }
  1073. if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
  1074. u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
  1075. br->nf_call_arptables = val ? true : false;
  1076. }
  1077. #endif
  1078. return 0;
  1079. }
  1080. static int br_dev_newlink(struct net *src_net, struct net_device *dev,
  1081. struct nlattr *tb[], struct nlattr *data[],
  1082. struct netlink_ext_ack *extack)
  1083. {
  1084. struct net_bridge *br = netdev_priv(dev);
  1085. int err;
  1086. err = register_netdevice(dev);
  1087. if (err)
  1088. return err;
  1089. if (tb[IFLA_ADDRESS]) {
  1090. spin_lock_bh(&br->lock);
  1091. br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
  1092. spin_unlock_bh(&br->lock);
  1093. }
  1094. err = br_changelink(dev, tb, data, extack);
  1095. if (err)
  1096. br_dev_delete(dev, NULL);
  1097. return err;
  1098. }
  1099. static size_t br_get_size(const struct net_device *brdev)
  1100. {
  1101. return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
  1102. nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */
  1103. nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */
  1104. nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */
  1105. nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */
  1106. nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */
  1107. nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */
  1108. #ifdef CONFIG_BRIDGE_VLAN_FILTERING
  1109. nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
  1110. nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */
  1111. nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */
  1112. #endif
  1113. nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */
  1114. nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */
  1115. nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */
  1116. nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */
  1117. nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */
  1118. nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */
  1119. nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
  1120. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
  1121. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
  1122. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
  1123. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
  1124. nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */
  1125. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  1126. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */
  1127. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */
  1128. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
  1129. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */
  1130. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */
  1131. nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */
  1132. nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */
  1133. nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
  1134. nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
  1135. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
  1136. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
  1137. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
  1138. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
  1139. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
  1140. nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
  1141. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */
  1142. nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */
  1143. #endif
  1144. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  1145. nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */
  1146. nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */
  1147. nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */
  1148. #endif
  1149. 0;
  1150. }
  1151. static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
  1152. {
  1153. struct net_bridge *br = netdev_priv(brdev);
  1154. u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
  1155. u32 hello_time = jiffies_to_clock_t(br->hello_time);
  1156. u32 age_time = jiffies_to_clock_t(br->max_age);
  1157. u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
  1158. u32 stp_enabled = br->stp_enabled;
  1159. u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
  1160. u8 vlan_enabled = br_vlan_enabled(br->dev);
  1161. u64 clockval;
  1162. clockval = br_timer_value(&br->hello_timer);
  1163. if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
  1164. return -EMSGSIZE;
  1165. clockval = br_timer_value(&br->tcn_timer);
  1166. if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
  1167. return -EMSGSIZE;
  1168. clockval = br_timer_value(&br->topology_change_timer);
  1169. if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
  1170. IFLA_BR_PAD))
  1171. return -EMSGSIZE;
  1172. clockval = br_timer_value(&br->gc_work.timer);
  1173. if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
  1174. return -EMSGSIZE;
  1175. if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
  1176. nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
  1177. nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
  1178. nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
  1179. nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
  1180. nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
  1181. nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
  1182. nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
  1183. nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
  1184. &br->bridge_id) ||
  1185. nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
  1186. &br->designated_root) ||
  1187. nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
  1188. nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
  1189. nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
  1190. nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
  1191. br->topology_change_detected) ||
  1192. nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr))
  1193. return -EMSGSIZE;
  1194. #ifdef CONFIG_BRIDGE_VLAN_FILTERING
  1195. if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
  1196. nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
  1197. nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled))
  1198. return -EMSGSIZE;
  1199. #endif
  1200. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  1201. if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
  1202. nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) ||
  1203. nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
  1204. br->multicast_query_use_ifaddr) ||
  1205. nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) ||
  1206. nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
  1207. br->multicast_stats_enabled) ||
  1208. nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY,
  1209. br->hash_elasticity) ||
  1210. nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
  1211. nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
  1212. br->multicast_last_member_count) ||
  1213. nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
  1214. br->multicast_startup_query_count) ||
  1215. nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION,
  1216. br->multicast_igmp_version))
  1217. return -EMSGSIZE;
  1218. #if IS_ENABLED(CONFIG_IPV6)
  1219. if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION,
  1220. br->multicast_mld_version))
  1221. return -EMSGSIZE;
  1222. #endif
  1223. clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
  1224. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
  1225. IFLA_BR_PAD))
  1226. return -EMSGSIZE;
  1227. clockval = jiffies_to_clock_t(br->multicast_membership_interval);
  1228. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
  1229. IFLA_BR_PAD))
  1230. return -EMSGSIZE;
  1231. clockval = jiffies_to_clock_t(br->multicast_querier_interval);
  1232. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
  1233. IFLA_BR_PAD))
  1234. return -EMSGSIZE;
  1235. clockval = jiffies_to_clock_t(br->multicast_query_interval);
  1236. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
  1237. IFLA_BR_PAD))
  1238. return -EMSGSIZE;
  1239. clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
  1240. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
  1241. IFLA_BR_PAD))
  1242. return -EMSGSIZE;
  1243. clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
  1244. if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
  1245. IFLA_BR_PAD))
  1246. return -EMSGSIZE;
  1247. #endif
  1248. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  1249. if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
  1250. br->nf_call_iptables ? 1 : 0) ||
  1251. nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
  1252. br->nf_call_ip6tables ? 1 : 0) ||
  1253. nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
  1254. br->nf_call_arptables ? 1 : 0))
  1255. return -EMSGSIZE;
  1256. #endif
  1257. return 0;
  1258. }
  1259. static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
  1260. {
  1261. struct net_bridge_port *p = NULL;
  1262. struct net_bridge_vlan_group *vg;
  1263. struct net_bridge_vlan *v;
  1264. struct net_bridge *br;
  1265. int numvls = 0;
  1266. switch (attr) {
  1267. case IFLA_STATS_LINK_XSTATS:
  1268. br = netdev_priv(dev);
  1269. vg = br_vlan_group(br);
  1270. break;
  1271. case IFLA_STATS_LINK_XSTATS_SLAVE:
  1272. p = br_port_get_rtnl(dev);
  1273. if (!p)
  1274. return 0;
  1275. br = p->br;
  1276. vg = nbp_vlan_group(p);
  1277. break;
  1278. default:
  1279. return 0;
  1280. }
  1281. if (vg) {
  1282. /* we need to count all, even placeholder entries */
  1283. list_for_each_entry(v, &vg->vlan_list, vlist)
  1284. numvls++;
  1285. }
  1286. return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
  1287. nla_total_size(sizeof(struct br_mcast_stats)) +
  1288. nla_total_size(0);
  1289. }
  1290. static int br_fill_linkxstats(struct sk_buff *skb,
  1291. const struct net_device *dev,
  1292. int *prividx, int attr)
  1293. {
  1294. struct nlattr *nla __maybe_unused;
  1295. struct net_bridge_port *p = NULL;
  1296. struct net_bridge_vlan_group *vg;
  1297. struct net_bridge_vlan *v;
  1298. struct net_bridge *br;
  1299. struct nlattr *nest;
  1300. int vl_idx = 0;
  1301. switch (attr) {
  1302. case IFLA_STATS_LINK_XSTATS:
  1303. br = netdev_priv(dev);
  1304. vg = br_vlan_group(br);
  1305. break;
  1306. case IFLA_STATS_LINK_XSTATS_SLAVE:
  1307. p = br_port_get_rtnl(dev);
  1308. if (!p)
  1309. return 0;
  1310. br = p->br;
  1311. vg = nbp_vlan_group(p);
  1312. break;
  1313. default:
  1314. return -EINVAL;
  1315. }
  1316. nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
  1317. if (!nest)
  1318. return -EMSGSIZE;
  1319. if (vg) {
  1320. u16 pvid;
  1321. pvid = br_get_pvid(vg);
  1322. list_for_each_entry(v, &vg->vlan_list, vlist) {
  1323. struct bridge_vlan_xstats vxi;
  1324. struct br_vlan_stats stats;
  1325. if (++vl_idx < *prividx)
  1326. continue;
  1327. memset(&vxi, 0, sizeof(vxi));
  1328. vxi.vid = v->vid;
  1329. vxi.flags = v->flags;
  1330. if (v->vid == pvid)
  1331. vxi.flags |= BRIDGE_VLAN_INFO_PVID;
  1332. br_vlan_get_stats(v, &stats);
  1333. vxi.rx_bytes = stats.rx_bytes;
  1334. vxi.rx_packets = stats.rx_packets;
  1335. vxi.tx_bytes = stats.tx_bytes;
  1336. vxi.tx_packets = stats.tx_packets;
  1337. if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
  1338. goto nla_put_failure;
  1339. }
  1340. }
  1341. #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
  1342. if (++vl_idx >= *prividx) {
  1343. nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
  1344. sizeof(struct br_mcast_stats),
  1345. BRIDGE_XSTATS_PAD);
  1346. if (!nla)
  1347. goto nla_put_failure;
  1348. br_multicast_get_stats(br, p, nla_data(nla));
  1349. }
  1350. #endif
  1351. nla_nest_end(skb, nest);
  1352. *prividx = 0;
  1353. return 0;
  1354. nla_put_failure:
  1355. nla_nest_end(skb, nest);
  1356. *prividx = vl_idx;
  1357. return -EMSGSIZE;
  1358. }
  1359. static struct rtnl_af_ops br_af_ops __read_mostly = {
  1360. .family = AF_BRIDGE,
  1361. .get_link_af_size = br_get_link_af_size_filtered,
  1362. };
  1363. struct rtnl_link_ops br_link_ops __read_mostly = {
  1364. .kind = "bridge",
  1365. .priv_size = sizeof(struct net_bridge),
  1366. .setup = br_dev_setup,
  1367. .maxtype = IFLA_BR_MAX,
  1368. .policy = br_policy,
  1369. .validate = br_validate,
  1370. .newlink = br_dev_newlink,
  1371. .changelink = br_changelink,
  1372. .dellink = br_dev_delete,
  1373. .get_size = br_get_size,
  1374. .fill_info = br_fill_info,
  1375. .fill_linkxstats = br_fill_linkxstats,
  1376. .get_linkxstats_size = br_get_linkxstats_size,
  1377. .slave_maxtype = IFLA_BRPORT_MAX,
  1378. .slave_policy = br_port_policy,
  1379. .slave_changelink = br_port_slave_changelink,
  1380. .get_slave_size = br_port_get_slave_size,
  1381. .fill_slave_info = br_port_fill_slave_info,
  1382. };
  1383. int __init br_netlink_init(void)
  1384. {
  1385. int err;
  1386. br_mdb_init();
  1387. rtnl_af_register(&br_af_ops);
  1388. err = rtnl_link_register(&br_link_ops);
  1389. if (err)
  1390. goto out_af;
  1391. return 0;
  1392. out_af:
  1393. rtnl_af_unregister(&br_af_ops);
  1394. br_mdb_uninit();
  1395. return err;
  1396. }
  1397. void br_netlink_fini(void)
  1398. {
  1399. br_mdb_uninit();
  1400. rtnl_af_unregister(&br_af_ops);
  1401. rtnl_link_unregister(&br_link_ops);
  1402. }