bcast.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579
  1. /*
  2. * net/tipc/bcast.c: TIPC broadcast code
  3. *
  4. * Copyright (c) 2004-2006, 2014-2017, Ericsson AB
  5. * Copyright (c) 2004, Intel Corporation.
  6. * Copyright (c) 2005, 2010-2011, Wind River Systems
  7. * All rights reserved.
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions are met:
  11. *
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. * 3. Neither the names of the copyright holders nor the names of its
  18. * contributors may be used to endorse or promote products derived from
  19. * this software without specific prior written permission.
  20. *
  21. * Alternatively, this software may be distributed under the terms of the
  22. * GNU General Public License ("GPL") version 2 as published by the Free
  23. * Software Foundation.
  24. *
  25. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  26. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  27. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  28. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  29. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  30. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  31. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  32. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  33. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  34. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  35. * POSSIBILITY OF SUCH DAMAGE.
  36. */
  37. #include <linux/tipc_config.h>
  38. #include "socket.h"
  39. #include "msg.h"
  40. #include "bcast.h"
  41. #include "link.h"
  42. #include "name_table.h"
  43. #define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
  44. #define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
  45. const char tipc_bclink_name[] = "broadcast-link";
  46. /**
  47. * struct tipc_bc_base - base structure for keeping broadcast send state
  48. * @link: broadcast send link structure
  49. * @inputq: data input queue; will only carry SOCK_WAKEUP messages
  50. * @dests: array keeping number of reachable destinations per bearer
  51. * @primary_bearer: a bearer having links to all broadcast destinations, if any
  52. * @bcast_support: indicates if primary bearer, if any, supports broadcast
  53. * @rcast_support: indicates if all peer nodes support replicast
  54. * @rc_ratio: dest count as percentage of cluster size where send method changes
  55. * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
  56. */
  57. struct tipc_bc_base {
  58. struct tipc_link *link;
  59. struct sk_buff_head inputq;
  60. int dests[MAX_BEARERS];
  61. int primary_bearer;
  62. bool bcast_support;
  63. bool rcast_support;
  64. int rc_ratio;
  65. int bc_threshold;
  66. };
  67. static struct tipc_bc_base *tipc_bc_base(struct net *net)
  68. {
  69. return tipc_net(net)->bcbase;
  70. }
  71. /* tipc_bcast_get_mtu(): -get the MTU currently used by broadcast link
  72. * Note: the MTU is decremented to give room for a tunnel header, in
  73. * case the message needs to be sent as replicast
  74. */
  75. int tipc_bcast_get_mtu(struct net *net)
  76. {
  77. return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
  78. }
  79. void tipc_bcast_disable_rcast(struct net *net)
  80. {
  81. tipc_bc_base(net)->rcast_support = false;
  82. }
  83. static void tipc_bcbase_calc_bc_threshold(struct net *net)
  84. {
  85. struct tipc_bc_base *bb = tipc_bc_base(net);
  86. int cluster_size = tipc_link_bc_peers(tipc_bc_sndlink(net));
  87. bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100);
  88. }
  89. /* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
  90. * if any, and make it primary bearer
  91. */
  92. static void tipc_bcbase_select_primary(struct net *net)
  93. {
  94. struct tipc_bc_base *bb = tipc_bc_base(net);
  95. int all_dests = tipc_link_bc_peers(bb->link);
  96. int i, mtu, prim;
  97. bb->primary_bearer = INVALID_BEARER_ID;
  98. bb->bcast_support = true;
  99. if (!all_dests)
  100. return;
  101. for (i = 0; i < MAX_BEARERS; i++) {
  102. if (!bb->dests[i])
  103. continue;
  104. mtu = tipc_bearer_mtu(net, i);
  105. if (mtu < tipc_link_mtu(bb->link))
  106. tipc_link_set_mtu(bb->link, mtu);
  107. bb->bcast_support &= tipc_bearer_bcast_support(net, i);
  108. if (bb->dests[i] < all_dests)
  109. continue;
  110. bb->primary_bearer = i;
  111. /* Reduce risk that all nodes select same primary */
  112. if ((i ^ tipc_own_addr(net)) & 1)
  113. break;
  114. }
  115. prim = bb->primary_bearer;
  116. if (prim != INVALID_BEARER_ID)
  117. bb->bcast_support = tipc_bearer_bcast_support(net, prim);
  118. }
  119. void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
  120. {
  121. struct tipc_bc_base *bb = tipc_bc_base(net);
  122. tipc_bcast_lock(net);
  123. bb->dests[bearer_id]++;
  124. tipc_bcbase_select_primary(net);
  125. tipc_bcast_unlock(net);
  126. }
  127. void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id)
  128. {
  129. struct tipc_bc_base *bb = tipc_bc_base(net);
  130. tipc_bcast_lock(net);
  131. bb->dests[bearer_id]--;
  132. tipc_bcbase_select_primary(net);
  133. tipc_bcast_unlock(net);
  134. }
  135. /* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers
  136. *
  137. * Note that number of reachable destinations, as indicated in the dests[]
  138. * array, may transitionally differ from the number of destinations indicated
  139. * in each sent buffer. We can sustain this. Excess destination nodes will
  140. * drop and never acknowledge the unexpected packets, and missing destinations
  141. * will either require retransmission (if they are just about to be added to
  142. * the bearer), or be removed from the buffer's 'ackers' counter (if they
  143. * just went down)
  144. */
  145. static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
  146. {
  147. int bearer_id;
  148. struct tipc_bc_base *bb = tipc_bc_base(net);
  149. struct sk_buff *skb, *_skb;
  150. struct sk_buff_head _xmitq;
  151. if (skb_queue_empty(xmitq))
  152. return;
  153. /* The typical case: at least one bearer has links to all nodes */
  154. bearer_id = bb->primary_bearer;
  155. if (bearer_id >= 0) {
  156. tipc_bearer_bc_xmit(net, bearer_id, xmitq);
  157. return;
  158. }
  159. /* We have to transmit across all bearers */
  160. skb_queue_head_init(&_xmitq);
  161. for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
  162. if (!bb->dests[bearer_id])
  163. continue;
  164. skb_queue_walk(xmitq, skb) {
  165. _skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
  166. if (!_skb)
  167. break;
  168. __skb_queue_tail(&_xmitq, _skb);
  169. }
  170. tipc_bearer_bc_xmit(net, bearer_id, &_xmitq);
  171. }
  172. __skb_queue_purge(xmitq);
  173. __skb_queue_purge(&_xmitq);
  174. }
  175. static void tipc_bcast_select_xmit_method(struct net *net, int dests,
  176. struct tipc_mc_method *method)
  177. {
  178. struct tipc_bc_base *bb = tipc_bc_base(net);
  179. unsigned long exp = method->expires;
  180. /* Broadcast supported by used bearer/bearers? */
  181. if (!bb->bcast_support) {
  182. method->rcast = true;
  183. return;
  184. }
  185. /* Any destinations which don't support replicast ? */
  186. if (!bb->rcast_support) {
  187. method->rcast = false;
  188. return;
  189. }
  190. /* Can current method be changed ? */
  191. method->expires = jiffies + TIPC_METHOD_EXPIRE;
  192. if (method->mandatory || time_before(jiffies, exp))
  193. return;
  194. /* Determine method to use now */
  195. method->rcast = dests <= bb->bc_threshold;
  196. }
  197. /* tipc_bcast_xmit - broadcast the buffer chain to all external nodes
  198. * @net: the applicable net namespace
  199. * @pkts: chain of buffers containing message
  200. * @cong_link_cnt: set to 1 if broadcast link is congested, otherwise 0
  201. * Consumes the buffer chain.
  202. * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE
  203. */
  204. static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
  205. u16 *cong_link_cnt)
  206. {
  207. struct tipc_link *l = tipc_bc_sndlink(net);
  208. struct sk_buff_head xmitq;
  209. int rc = 0;
  210. skb_queue_head_init(&xmitq);
  211. tipc_bcast_lock(net);
  212. if (tipc_link_bc_peers(l))
  213. rc = tipc_link_xmit(l, pkts, &xmitq);
  214. tipc_bcast_unlock(net);
  215. tipc_bcbase_xmit(net, &xmitq);
  216. __skb_queue_purge(pkts);
  217. if (rc == -ELINKCONG) {
  218. *cong_link_cnt = 1;
  219. rc = 0;
  220. }
  221. return rc;
  222. }
  223. /* tipc_rcast_xmit - replicate and send a message to given destination nodes
  224. * @net: the applicable net namespace
  225. * @pkts: chain of buffers containing message
  226. * @dests: list of destination nodes
  227. * @cong_link_cnt: returns number of congested links
  228. * @cong_links: returns identities of congested links
  229. * Returns 0 if success, otherwise errno
  230. */
  231. static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
  232. struct tipc_nlist *dests, u16 *cong_link_cnt)
  233. {
  234. struct tipc_dest *dst, *tmp;
  235. struct sk_buff_head _pkts;
  236. u32 dnode, selector;
  237. selector = msg_link_selector(buf_msg(skb_peek(pkts)));
  238. skb_queue_head_init(&_pkts);
  239. list_for_each_entry_safe(dst, tmp, &dests->list, list) {
  240. dnode = dst->node;
  241. if (!tipc_msg_pskb_copy(dnode, pkts, &_pkts))
  242. return -ENOMEM;
  243. /* Any other return value than -ELINKCONG is ignored */
  244. if (tipc_node_xmit(net, &_pkts, dnode, selector) == -ELINKCONG)
  245. (*cong_link_cnt)++;
  246. }
  247. return 0;
  248. }
  249. /* tipc_mcast_xmit - deliver message to indicated destination nodes
  250. * and to identified node local sockets
  251. * @net: the applicable net namespace
  252. * @pkts: chain of buffers containing message
  253. * @method: send method to be used
  254. * @dests: destination nodes for message.
  255. * @cong_link_cnt: returns number of encountered congested destination links
  256. * Consumes buffer chain.
  257. * Returns 0 if success, otherwise errno
  258. */
  259. int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
  260. struct tipc_mc_method *method, struct tipc_nlist *dests,
  261. u16 *cong_link_cnt)
  262. {
  263. struct sk_buff_head inputq, localq;
  264. int rc = 0;
  265. skb_queue_head_init(&inputq);
  266. skb_queue_head_init(&localq);
  267. /* Clone packets before they are consumed by next call */
  268. if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
  269. rc = -ENOMEM;
  270. goto exit;
  271. }
  272. /* Send according to determined transmit method */
  273. if (dests->remote) {
  274. tipc_bcast_select_xmit_method(net, dests->remote, method);
  275. if (method->rcast)
  276. rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt);
  277. else
  278. rc = tipc_bcast_xmit(net, pkts, cong_link_cnt);
  279. }
  280. if (dests->local)
  281. tipc_sk_mcast_rcv(net, &localq, &inputq);
  282. exit:
  283. /* This queue should normally be empty by now */
  284. __skb_queue_purge(pkts);
  285. return rc;
  286. }
  287. /* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
  288. *
  289. * RCU is locked, no other locks set
  290. */
  291. int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
  292. {
  293. struct tipc_msg *hdr = buf_msg(skb);
  294. struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
  295. struct sk_buff_head xmitq;
  296. int rc;
  297. __skb_queue_head_init(&xmitq);
  298. if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
  299. kfree_skb(skb);
  300. return 0;
  301. }
  302. tipc_bcast_lock(net);
  303. if (msg_user(hdr) == BCAST_PROTOCOL)
  304. rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
  305. else
  306. rc = tipc_link_rcv(l, skb, NULL);
  307. tipc_bcast_unlock(net);
  308. tipc_bcbase_xmit(net, &xmitq);
  309. /* Any socket wakeup messages ? */
  310. if (!skb_queue_empty(inputq))
  311. tipc_sk_rcv(net, inputq);
  312. return rc;
  313. }
  314. /* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
  315. *
  316. * RCU is locked, no other locks set
  317. */
  318. void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
  319. struct tipc_msg *hdr)
  320. {
  321. struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
  322. u16 acked = msg_bcast_ack(hdr);
  323. struct sk_buff_head xmitq;
  324. /* Ignore bc acks sent by peer before bcast synch point was received */
  325. if (msg_bc_ack_invalid(hdr))
  326. return;
  327. __skb_queue_head_init(&xmitq);
  328. tipc_bcast_lock(net);
  329. tipc_link_bc_ack_rcv(l, acked, &xmitq);
  330. tipc_bcast_unlock(net);
  331. tipc_bcbase_xmit(net, &xmitq);
  332. /* Any socket wakeup messages ? */
  333. if (!skb_queue_empty(inputq))
  334. tipc_sk_rcv(net, inputq);
  335. }
  336. /* tipc_bcast_synch_rcv - check and update rcv link with peer's send state
  337. *
  338. * RCU is locked, no other locks set
  339. */
  340. int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
  341. struct tipc_msg *hdr)
  342. {
  343. struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
  344. struct sk_buff_head xmitq;
  345. int rc = 0;
  346. __skb_queue_head_init(&xmitq);
  347. tipc_bcast_lock(net);
  348. if (msg_type(hdr) != STATE_MSG) {
  349. tipc_link_bc_init_rcv(l, hdr);
  350. } else if (!msg_bc_ack_invalid(hdr)) {
  351. tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
  352. rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
  353. }
  354. tipc_bcast_unlock(net);
  355. tipc_bcbase_xmit(net, &xmitq);
  356. /* Any socket wakeup messages ? */
  357. if (!skb_queue_empty(inputq))
  358. tipc_sk_rcv(net, inputq);
  359. return rc;
  360. }
  361. /* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
  362. *
  363. * RCU is locked, node lock is set
  364. */
  365. void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
  366. struct sk_buff_head *xmitq)
  367. {
  368. struct tipc_link *snd_l = tipc_bc_sndlink(net);
  369. tipc_bcast_lock(net);
  370. tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
  371. tipc_bcbase_select_primary(net);
  372. tipc_bcbase_calc_bc_threshold(net);
  373. tipc_bcast_unlock(net);
  374. }
  375. /* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
  376. *
  377. * RCU is locked, node lock is set
  378. */
  379. void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
  380. {
  381. struct tipc_link *snd_l = tipc_bc_sndlink(net);
  382. struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
  383. struct sk_buff_head xmitq;
  384. __skb_queue_head_init(&xmitq);
  385. tipc_bcast_lock(net);
  386. tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
  387. tipc_bcbase_select_primary(net);
  388. tipc_bcbase_calc_bc_threshold(net);
  389. tipc_bcast_unlock(net);
  390. tipc_bcbase_xmit(net, &xmitq);
  391. /* Any socket wakeup messages ? */
  392. if (!skb_queue_empty(inputq))
  393. tipc_sk_rcv(net, inputq);
  394. }
  395. int tipc_bclink_reset_stats(struct net *net)
  396. {
  397. struct tipc_link *l = tipc_bc_sndlink(net);
  398. if (!l)
  399. return -ENOPROTOOPT;
  400. tipc_bcast_lock(net);
  401. tipc_link_reset_stats(l);
  402. tipc_bcast_unlock(net);
  403. return 0;
  404. }
  405. static int tipc_bc_link_set_queue_limits(struct net *net, u32 limit)
  406. {
  407. struct tipc_link *l = tipc_bc_sndlink(net);
  408. if (!l)
  409. return -ENOPROTOOPT;
  410. if (limit < BCLINK_WIN_MIN)
  411. limit = BCLINK_WIN_MIN;
  412. if (limit > TIPC_MAX_LINK_WIN)
  413. return -EINVAL;
  414. tipc_bcast_lock(net);
  415. tipc_link_set_queue_limits(l, limit);
  416. tipc_bcast_unlock(net);
  417. return 0;
  418. }
  419. int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
  420. {
  421. int err;
  422. u32 win;
  423. struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
  424. if (!attrs[TIPC_NLA_LINK_PROP])
  425. return -EINVAL;
  426. err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
  427. if (err)
  428. return err;
  429. if (!props[TIPC_NLA_PROP_WIN])
  430. return -EOPNOTSUPP;
  431. win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
  432. return tipc_bc_link_set_queue_limits(net, win);
  433. }
  434. int tipc_bcast_init(struct net *net)
  435. {
  436. struct tipc_net *tn = tipc_net(net);
  437. struct tipc_bc_base *bb = NULL;
  438. struct tipc_link *l = NULL;
  439. bb = kzalloc(sizeof(*bb), GFP_KERNEL);
  440. if (!bb)
  441. goto enomem;
  442. tn->bcbase = bb;
  443. spin_lock_init(&tipc_net(net)->bclock);
  444. if (!tipc_link_bc_create(net, 0, 0,
  445. FB_MTU,
  446. BCLINK_WIN_DEFAULT,
  447. 0,
  448. &bb->inputq,
  449. NULL,
  450. NULL,
  451. &l))
  452. goto enomem;
  453. bb->link = l;
  454. tn->bcl = l;
  455. bb->rc_ratio = 25;
  456. bb->rcast_support = true;
  457. return 0;
  458. enomem:
  459. kfree(bb);
  460. kfree(l);
  461. return -ENOMEM;
  462. }
  463. void tipc_bcast_stop(struct net *net)
  464. {
  465. struct tipc_net *tn = net_generic(net, tipc_net_id);
  466. synchronize_net();
  467. kfree(tn->bcbase);
  468. kfree(tn->bcl);
  469. }
  470. void tipc_nlist_init(struct tipc_nlist *nl, u32 self)
  471. {
  472. memset(nl, 0, sizeof(*nl));
  473. INIT_LIST_HEAD(&nl->list);
  474. nl->self = self;
  475. }
  476. void tipc_nlist_add(struct tipc_nlist *nl, u32 node)
  477. {
  478. if (node == nl->self)
  479. nl->local = true;
  480. else if (tipc_dest_push(&nl->list, node, 0))
  481. nl->remote++;
  482. }
  483. void tipc_nlist_del(struct tipc_nlist *nl, u32 node)
  484. {
  485. if (node == nl->self)
  486. nl->local = false;
  487. else if (tipc_dest_del(&nl->list, node, 0))
  488. nl->remote--;
  489. }
  490. void tipc_nlist_purge(struct tipc_nlist *nl)
  491. {
  492. tipc_dest_list_purge(&nl->list);
  493. nl->remote = 0;
  494. nl->local = false;
  495. }