monitor.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. /*
  2. * net/tipc/monitor.c
  3. *
  4. * Copyright (c) 2016, Ericsson AB
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions are met:
  9. *
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the names of the copyright holders nor the names of its
  16. * contributors may be used to endorse or promote products derived from
  17. * this software without specific prior written permission.
  18. *
  19. * Alternatively, this software may be distributed under the terms of the
  20. * GNU General Public License ("GPL") version 2 as published by the Free
  21. * Software Foundation.
  22. *
  23. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  24. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  25. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  26. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  27. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  28. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  29. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  30. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  31. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  32. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  33. * POSSIBILITY OF SUCH DAMAGE.
  34. */
  35. #include <net/genetlink.h>
  36. #include "core.h"
  37. #include "addr.h"
  38. #include "monitor.h"
  39. #include "bearer.h"
  40. #define MAX_MON_DOMAIN 64
  41. #define MON_TIMEOUT 120000
  42. #define MAX_PEER_DOWN_EVENTS 4
  43. /* struct tipc_mon_domain: domain record to be transferred between peers
  44. * @len: actual size of domain record
  45. * @gen: current generation of sender's domain
  46. * @ack_gen: most recent generation of self's domain acked by peer
  47. * @member_cnt: number of domain member nodes described in this record
  48. * @up_map: bit map indicating which of the members the sender considers up
  49. * @members: identity of the domain members
  50. */
  51. struct tipc_mon_domain {
  52. u16 len;
  53. u16 gen;
  54. u16 ack_gen;
  55. u16 member_cnt;
  56. u64 up_map;
  57. u32 members[MAX_MON_DOMAIN];
  58. };
  59. /* struct tipc_peer: state of a peer node and its domain
  60. * @addr: tipc node identity of peer
  61. * @head_map: shows which other nodes currently consider peer 'up'
  62. * @domain: most recent domain record from peer
  63. * @hash: position in hashed lookup list
  64. * @list: position in linked list, in circular ascending order by 'addr'
  65. * @applied: number of reported domain members applied on this monitor list
  66. * @is_up: peer is up as seen from this node
  67. * @is_head: peer is assigned domain head as seen from this node
  68. * @is_local: peer is in local domain and should be continuously monitored
  69. * @down_cnt: - numbers of other peers which have reported this on lost
  70. */
  71. struct tipc_peer {
  72. u32 addr;
  73. struct tipc_mon_domain *domain;
  74. struct hlist_node hash;
  75. struct list_head list;
  76. u8 applied;
  77. u8 down_cnt;
  78. bool is_up;
  79. bool is_head;
  80. bool is_local;
  81. };
  82. struct tipc_monitor {
  83. struct hlist_head peers[NODE_HTABLE_SIZE];
  84. int peer_cnt;
  85. struct tipc_peer *self;
  86. rwlock_t lock;
  87. struct tipc_mon_domain cache;
  88. u16 list_gen;
  89. u16 dom_gen;
  90. struct net *net;
  91. struct timer_list timer;
  92. unsigned long timer_intv;
  93. };
  94. static struct tipc_monitor *tipc_monitor(struct net *net, int bearer_id)
  95. {
  96. return tipc_net(net)->monitors[bearer_id];
  97. }
  98. const int tipc_max_domain_size = sizeof(struct tipc_mon_domain);
  99. /* dom_rec_len(): actual length of domain record for transport
  100. */
  101. static int dom_rec_len(struct tipc_mon_domain *dom, u16 mcnt)
  102. {
  103. return ((void *)&dom->members - (void *)dom) + (mcnt * sizeof(u32));
  104. }
  105. /* dom_size() : calculate size of own domain based on number of peers
  106. */
  107. static int dom_size(int peers)
  108. {
  109. int i = 0;
  110. while ((i * i) < peers)
  111. i++;
  112. return i < MAX_MON_DOMAIN ? i : MAX_MON_DOMAIN;
  113. }
  114. static void map_set(u64 *up_map, int i, unsigned int v)
  115. {
  116. *up_map &= ~(1ULL << i);
  117. *up_map |= ((u64)v << i);
  118. }
  119. static int map_get(u64 up_map, int i)
  120. {
  121. return (up_map & (1 << i)) >> i;
  122. }
  123. static struct tipc_peer *peer_prev(struct tipc_peer *peer)
  124. {
  125. return list_last_entry(&peer->list, struct tipc_peer, list);
  126. }
  127. static struct tipc_peer *peer_nxt(struct tipc_peer *peer)
  128. {
  129. return list_first_entry(&peer->list, struct tipc_peer, list);
  130. }
  131. static struct tipc_peer *peer_head(struct tipc_peer *peer)
  132. {
  133. while (!peer->is_head)
  134. peer = peer_prev(peer);
  135. return peer;
  136. }
  137. static struct tipc_peer *get_peer(struct tipc_monitor *mon, u32 addr)
  138. {
  139. struct tipc_peer *peer;
  140. unsigned int thash = tipc_hashfn(addr);
  141. hlist_for_each_entry(peer, &mon->peers[thash], hash) {
  142. if (peer->addr == addr)
  143. return peer;
  144. }
  145. return NULL;
  146. }
  147. static struct tipc_peer *get_self(struct net *net, int bearer_id)
  148. {
  149. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  150. return mon->self;
  151. }
  152. static inline bool tipc_mon_is_active(struct net *net, struct tipc_monitor *mon)
  153. {
  154. struct tipc_net *tn = tipc_net(net);
  155. return mon->peer_cnt > tn->mon_threshold;
  156. }
  157. /* mon_identify_lost_members() : - identify amd mark potentially lost members
  158. */
  159. static void mon_identify_lost_members(struct tipc_peer *peer,
  160. struct tipc_mon_domain *dom_bef,
  161. int applied_bef)
  162. {
  163. struct tipc_peer *member = peer;
  164. struct tipc_mon_domain *dom_aft = peer->domain;
  165. int applied_aft = peer->applied;
  166. int i;
  167. for (i = 0; i < applied_bef; i++) {
  168. member = peer_nxt(member);
  169. /* Do nothing if self or peer already see member as down */
  170. if (!member->is_up || !map_get(dom_bef->up_map, i))
  171. continue;
  172. /* Loss of local node must be detected by active probing */
  173. if (member->is_local)
  174. continue;
  175. /* Start probing if member was removed from applied domain */
  176. if (!applied_aft || (applied_aft < i)) {
  177. member->down_cnt = 1;
  178. continue;
  179. }
  180. /* Member loss is confirmed if it is still in applied domain */
  181. if (!map_get(dom_aft->up_map, i))
  182. member->down_cnt++;
  183. }
  184. }
  185. /* mon_apply_domain() : match a peer's domain record against monitor list
  186. */
  187. static void mon_apply_domain(struct tipc_monitor *mon,
  188. struct tipc_peer *peer)
  189. {
  190. struct tipc_mon_domain *dom = peer->domain;
  191. struct tipc_peer *member;
  192. u32 addr;
  193. int i;
  194. if (!dom || !peer->is_up)
  195. return;
  196. /* Scan across domain members and match against monitor list */
  197. peer->applied = 0;
  198. member = peer_nxt(peer);
  199. for (i = 0; i < dom->member_cnt; i++) {
  200. addr = dom->members[i];
  201. if (addr != member->addr)
  202. return;
  203. peer->applied++;
  204. member = peer_nxt(member);
  205. }
  206. }
  207. /* mon_update_local_domain() : update after peer addition/removal/up/down
  208. */
  209. static void mon_update_local_domain(struct tipc_monitor *mon)
  210. {
  211. struct tipc_peer *self = mon->self;
  212. struct tipc_mon_domain *cache = &mon->cache;
  213. struct tipc_mon_domain *dom = self->domain;
  214. struct tipc_peer *peer = self;
  215. u64 prev_up_map = dom->up_map;
  216. u16 member_cnt, i;
  217. bool diff;
  218. /* Update local domain size based on current size of cluster */
  219. member_cnt = dom_size(mon->peer_cnt) - 1;
  220. self->applied = member_cnt;
  221. /* Update native and cached outgoing local domain records */
  222. dom->len = dom_rec_len(dom, member_cnt);
  223. diff = dom->member_cnt != member_cnt;
  224. dom->member_cnt = member_cnt;
  225. for (i = 0; i < member_cnt; i++) {
  226. peer = peer_nxt(peer);
  227. diff |= dom->members[i] != peer->addr;
  228. dom->members[i] = peer->addr;
  229. map_set(&dom->up_map, i, peer->is_up);
  230. cache->members[i] = htonl(peer->addr);
  231. }
  232. diff |= dom->up_map != prev_up_map;
  233. if (!diff)
  234. return;
  235. dom->gen = ++mon->dom_gen;
  236. cache->len = htons(dom->len);
  237. cache->gen = htons(dom->gen);
  238. cache->member_cnt = htons(member_cnt);
  239. cache->up_map = cpu_to_be64(dom->up_map);
  240. mon_apply_domain(mon, self);
  241. }
  242. /* mon_update_neighbors() : update preceding neighbors of added/removed peer
  243. */
  244. static void mon_update_neighbors(struct tipc_monitor *mon,
  245. struct tipc_peer *peer)
  246. {
  247. int dz, i;
  248. dz = dom_size(mon->peer_cnt);
  249. for (i = 0; i < dz; i++) {
  250. mon_apply_domain(mon, peer);
  251. peer = peer_prev(peer);
  252. }
  253. }
  254. /* mon_assign_roles() : reassign peer roles after a network change
  255. * The monitor list is consistent at this stage; i.e., each peer is monitoring
  256. * a set of domain members as matched between domain record and the monitor list
  257. */
  258. static void mon_assign_roles(struct tipc_monitor *mon, struct tipc_peer *head)
  259. {
  260. struct tipc_peer *peer = peer_nxt(head);
  261. struct tipc_peer *self = mon->self;
  262. int i = 0;
  263. for (; peer != self; peer = peer_nxt(peer)) {
  264. peer->is_local = false;
  265. /* Update domain member */
  266. if (i++ < head->applied) {
  267. peer->is_head = false;
  268. if (head == self)
  269. peer->is_local = true;
  270. continue;
  271. }
  272. /* Assign next domain head */
  273. if (!peer->is_up)
  274. continue;
  275. if (peer->is_head)
  276. break;
  277. head = peer;
  278. head->is_head = true;
  279. i = 0;
  280. }
  281. mon->list_gen++;
  282. }
  283. void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id)
  284. {
  285. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  286. struct tipc_peer *self = get_self(net, bearer_id);
  287. struct tipc_peer *peer, *prev, *head;
  288. write_lock_bh(&mon->lock);
  289. peer = get_peer(mon, addr);
  290. if (!peer)
  291. goto exit;
  292. prev = peer_prev(peer);
  293. list_del(&peer->list);
  294. hlist_del(&peer->hash);
  295. kfree(peer->domain);
  296. kfree(peer);
  297. mon->peer_cnt--;
  298. head = peer_head(prev);
  299. if (head == self)
  300. mon_update_local_domain(mon);
  301. mon_update_neighbors(mon, prev);
  302. /* Revert to full-mesh monitoring if we reach threshold */
  303. if (!tipc_mon_is_active(net, mon)) {
  304. list_for_each_entry(peer, &self->list, list) {
  305. kfree(peer->domain);
  306. peer->domain = NULL;
  307. peer->applied = 0;
  308. }
  309. }
  310. mon_assign_roles(mon, head);
  311. exit:
  312. write_unlock_bh(&mon->lock);
  313. }
  314. static bool tipc_mon_add_peer(struct tipc_monitor *mon, u32 addr,
  315. struct tipc_peer **peer)
  316. {
  317. struct tipc_peer *self = mon->self;
  318. struct tipc_peer *cur, *prev, *p;
  319. p = kzalloc(sizeof(*p), GFP_ATOMIC);
  320. *peer = p;
  321. if (!p)
  322. return false;
  323. p->addr = addr;
  324. /* Add new peer to lookup list */
  325. INIT_LIST_HEAD(&p->list);
  326. hlist_add_head(&p->hash, &mon->peers[tipc_hashfn(addr)]);
  327. /* Sort new peer into iterator list, in ascending circular order */
  328. prev = self;
  329. list_for_each_entry(cur, &self->list, list) {
  330. if ((addr > prev->addr) && (addr < cur->addr))
  331. break;
  332. if (((addr < cur->addr) || (addr > prev->addr)) &&
  333. (prev->addr > cur->addr))
  334. break;
  335. prev = cur;
  336. }
  337. list_add_tail(&p->list, &cur->list);
  338. mon->peer_cnt++;
  339. mon_update_neighbors(mon, p);
  340. return true;
  341. }
  342. void tipc_mon_peer_up(struct net *net, u32 addr, int bearer_id)
  343. {
  344. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  345. struct tipc_peer *self = get_self(net, bearer_id);
  346. struct tipc_peer *peer, *head;
  347. write_lock_bh(&mon->lock);
  348. peer = get_peer(mon, addr);
  349. if (!peer && !tipc_mon_add_peer(mon, addr, &peer))
  350. goto exit;
  351. peer->is_up = true;
  352. head = peer_head(peer);
  353. if (head == self)
  354. mon_update_local_domain(mon);
  355. mon_assign_roles(mon, head);
  356. exit:
  357. write_unlock_bh(&mon->lock);
  358. }
  359. void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id)
  360. {
  361. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  362. struct tipc_peer *self = get_self(net, bearer_id);
  363. struct tipc_peer *peer, *head;
  364. struct tipc_mon_domain *dom;
  365. int applied;
  366. write_lock_bh(&mon->lock);
  367. peer = get_peer(mon, addr);
  368. if (!peer) {
  369. pr_warn("Mon: unknown link %x/%u DOWN\n", addr, bearer_id);
  370. goto exit;
  371. }
  372. applied = peer->applied;
  373. peer->applied = 0;
  374. dom = peer->domain;
  375. peer->domain = NULL;
  376. if (peer->is_head)
  377. mon_identify_lost_members(peer, dom, applied);
  378. kfree(dom);
  379. peer->is_up = false;
  380. peer->is_head = false;
  381. peer->is_local = false;
  382. peer->down_cnt = 0;
  383. head = peer_head(peer);
  384. if (head == self)
  385. mon_update_local_domain(mon);
  386. mon_assign_roles(mon, head);
  387. exit:
  388. write_unlock_bh(&mon->lock);
  389. }
  390. /* tipc_mon_rcv - process monitor domain event message
  391. */
  392. void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
  393. struct tipc_mon_state *state, int bearer_id)
  394. {
  395. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  396. struct tipc_mon_domain *arrv_dom = data;
  397. struct tipc_mon_domain dom_bef;
  398. struct tipc_mon_domain *dom;
  399. struct tipc_peer *peer;
  400. u16 new_member_cnt = ntohs(arrv_dom->member_cnt);
  401. int new_dlen = dom_rec_len(arrv_dom, new_member_cnt);
  402. u16 new_gen = ntohs(arrv_dom->gen);
  403. u16 acked_gen = ntohs(arrv_dom->ack_gen);
  404. bool probing = state->probing;
  405. int i, applied_bef;
  406. state->probing = false;
  407. /* Sanity check received domain record */
  408. if (dlen < dom_rec_len(arrv_dom, 0))
  409. return;
  410. if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
  411. return;
  412. if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen)
  413. return;
  414. /* Synch generation numbers with peer if link just came up */
  415. if (!state->synched) {
  416. state->peer_gen = new_gen - 1;
  417. state->acked_gen = acked_gen;
  418. state->synched = true;
  419. }
  420. if (more(acked_gen, state->acked_gen))
  421. state->acked_gen = acked_gen;
  422. /* Drop duplicate unless we are waiting for a probe response */
  423. if (!more(new_gen, state->peer_gen) && !probing)
  424. return;
  425. write_lock_bh(&mon->lock);
  426. peer = get_peer(mon, addr);
  427. if (!peer || !peer->is_up)
  428. goto exit;
  429. /* Peer is confirmed, stop any ongoing probing */
  430. peer->down_cnt = 0;
  431. /* Task is done for duplicate record */
  432. if (!more(new_gen, state->peer_gen))
  433. goto exit;
  434. state->peer_gen = new_gen;
  435. /* Cache current domain record for later use */
  436. dom_bef.member_cnt = 0;
  437. dom = peer->domain;
  438. if (dom)
  439. memcpy(&dom_bef, dom, dom->len);
  440. /* Transform and store received domain record */
  441. if (!dom || (dom->len < new_dlen)) {
  442. kfree(dom);
  443. dom = kmalloc(new_dlen, GFP_ATOMIC);
  444. peer->domain = dom;
  445. if (!dom)
  446. goto exit;
  447. }
  448. dom->len = new_dlen;
  449. dom->gen = new_gen;
  450. dom->member_cnt = new_member_cnt;
  451. dom->up_map = be64_to_cpu(arrv_dom->up_map);
  452. for (i = 0; i < new_member_cnt; i++)
  453. dom->members[i] = ntohl(arrv_dom->members[i]);
  454. /* Update peers affected by this domain record */
  455. applied_bef = peer->applied;
  456. mon_apply_domain(mon, peer);
  457. mon_identify_lost_members(peer, &dom_bef, applied_bef);
  458. mon_assign_roles(mon, peer_head(peer));
  459. exit:
  460. write_unlock_bh(&mon->lock);
  461. }
  462. void tipc_mon_prep(struct net *net, void *data, int *dlen,
  463. struct tipc_mon_state *state, int bearer_id)
  464. {
  465. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  466. struct tipc_mon_domain *dom = data;
  467. u16 gen = mon->dom_gen;
  468. u16 len;
  469. /* Send invalid record if not active */
  470. if (!tipc_mon_is_active(net, mon)) {
  471. dom->len = 0;
  472. return;
  473. }
  474. /* Send only a dummy record with ack if peer has acked our last sent */
  475. if (likely(state->acked_gen == gen)) {
  476. len = dom_rec_len(dom, 0);
  477. *dlen = len;
  478. dom->len = htons(len);
  479. dom->gen = htons(gen);
  480. dom->ack_gen = htons(state->peer_gen);
  481. dom->member_cnt = 0;
  482. return;
  483. }
  484. /* Send the full record */
  485. read_lock_bh(&mon->lock);
  486. len = ntohs(mon->cache.len);
  487. *dlen = len;
  488. memcpy(data, &mon->cache, len);
  489. read_unlock_bh(&mon->lock);
  490. dom->ack_gen = htons(state->peer_gen);
  491. }
  492. void tipc_mon_get_state(struct net *net, u32 addr,
  493. struct tipc_mon_state *state,
  494. int bearer_id)
  495. {
  496. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  497. struct tipc_peer *peer;
  498. if (!tipc_mon_is_active(net, mon)) {
  499. state->probing = false;
  500. state->monitoring = true;
  501. return;
  502. }
  503. /* Used cached state if table has not changed */
  504. if (!state->probing &&
  505. (state->list_gen == mon->list_gen) &&
  506. (state->acked_gen == mon->dom_gen))
  507. return;
  508. read_lock_bh(&mon->lock);
  509. peer = get_peer(mon, addr);
  510. if (peer) {
  511. state->probing = state->acked_gen != mon->dom_gen;
  512. state->probing |= peer->down_cnt;
  513. state->reset |= peer->down_cnt >= MAX_PEER_DOWN_EVENTS;
  514. state->monitoring = peer->is_local;
  515. state->monitoring |= peer->is_head;
  516. state->list_gen = mon->list_gen;
  517. }
  518. read_unlock_bh(&mon->lock);
  519. }
  520. static void mon_timeout(struct timer_list *t)
  521. {
  522. struct tipc_monitor *mon = from_timer(mon, t, timer);
  523. struct tipc_peer *self;
  524. int best_member_cnt = dom_size(mon->peer_cnt) - 1;
  525. write_lock_bh(&mon->lock);
  526. self = mon->self;
  527. if (self && (best_member_cnt != self->applied)) {
  528. mon_update_local_domain(mon);
  529. mon_assign_roles(mon, self);
  530. }
  531. write_unlock_bh(&mon->lock);
  532. mod_timer(&mon->timer, jiffies + mon->timer_intv);
  533. }
  534. int tipc_mon_create(struct net *net, int bearer_id)
  535. {
  536. struct tipc_net *tn = tipc_net(net);
  537. struct tipc_monitor *mon;
  538. struct tipc_peer *self;
  539. struct tipc_mon_domain *dom;
  540. if (tn->monitors[bearer_id])
  541. return 0;
  542. mon = kzalloc(sizeof(*mon), GFP_ATOMIC);
  543. self = kzalloc(sizeof(*self), GFP_ATOMIC);
  544. dom = kzalloc(sizeof(*dom), GFP_ATOMIC);
  545. if (!mon || !self || !dom) {
  546. kfree(mon);
  547. kfree(self);
  548. kfree(dom);
  549. return -ENOMEM;
  550. }
  551. tn->monitors[bearer_id] = mon;
  552. rwlock_init(&mon->lock);
  553. mon->net = net;
  554. mon->peer_cnt = 1;
  555. mon->self = self;
  556. self->domain = dom;
  557. self->addr = tipc_own_addr(net);
  558. self->is_up = true;
  559. self->is_head = true;
  560. INIT_LIST_HEAD(&self->list);
  561. timer_setup(&mon->timer, mon_timeout, 0);
  562. mon->timer_intv = msecs_to_jiffies(MON_TIMEOUT + (tn->random & 0xffff));
  563. mod_timer(&mon->timer, jiffies + mon->timer_intv);
  564. return 0;
  565. }
  566. void tipc_mon_delete(struct net *net, int bearer_id)
  567. {
  568. struct tipc_net *tn = tipc_net(net);
  569. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  570. struct tipc_peer *self;
  571. struct tipc_peer *peer, *tmp;
  572. if (!mon)
  573. return;
  574. self = get_self(net, bearer_id);
  575. write_lock_bh(&mon->lock);
  576. tn->monitors[bearer_id] = NULL;
  577. list_for_each_entry_safe(peer, tmp, &self->list, list) {
  578. list_del(&peer->list);
  579. hlist_del(&peer->hash);
  580. kfree(peer->domain);
  581. kfree(peer);
  582. }
  583. mon->self = NULL;
  584. write_unlock_bh(&mon->lock);
  585. del_timer_sync(&mon->timer);
  586. kfree(self->domain);
  587. kfree(self);
  588. kfree(mon);
  589. }
  590. void tipc_mon_reinit_self(struct net *net)
  591. {
  592. struct tipc_monitor *mon;
  593. int bearer_id;
  594. for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
  595. mon = tipc_monitor(net, bearer_id);
  596. if (!mon)
  597. continue;
  598. write_lock_bh(&mon->lock);
  599. mon->self->addr = tipc_own_addr(net);
  600. write_unlock_bh(&mon->lock);
  601. }
  602. }
  603. int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size)
  604. {
  605. struct tipc_net *tn = tipc_net(net);
  606. if (cluster_size > TIPC_CLUSTER_SIZE)
  607. return -EINVAL;
  608. tn->mon_threshold = cluster_size;
  609. return 0;
  610. }
  611. int tipc_nl_monitor_get_threshold(struct net *net)
  612. {
  613. struct tipc_net *tn = tipc_net(net);
  614. return tn->mon_threshold;
  615. }
  616. static int __tipc_nl_add_monitor_peer(struct tipc_peer *peer,
  617. struct tipc_nl_msg *msg)
  618. {
  619. struct tipc_mon_domain *dom = peer->domain;
  620. struct nlattr *attrs;
  621. void *hdr;
  622. hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
  623. NLM_F_MULTI, TIPC_NL_MON_PEER_GET);
  624. if (!hdr)
  625. return -EMSGSIZE;
  626. attrs = nla_nest_start(msg->skb, TIPC_NLA_MON_PEER);
  627. if (!attrs)
  628. goto msg_full;
  629. if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_ADDR, peer->addr))
  630. goto attr_msg_full;
  631. if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_APPLIED, peer->applied))
  632. goto attr_msg_full;
  633. if (peer->is_up)
  634. if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_UP))
  635. goto attr_msg_full;
  636. if (peer->is_local)
  637. if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_LOCAL))
  638. goto attr_msg_full;
  639. if (peer->is_head)
  640. if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_HEAD))
  641. goto attr_msg_full;
  642. if (dom) {
  643. if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_DOMGEN, dom->gen))
  644. goto attr_msg_full;
  645. if (nla_put_u64_64bit(msg->skb, TIPC_NLA_MON_PEER_UPMAP,
  646. dom->up_map, TIPC_NLA_MON_PEER_PAD))
  647. goto attr_msg_full;
  648. if (nla_put(msg->skb, TIPC_NLA_MON_PEER_MEMBERS,
  649. dom->member_cnt * sizeof(u32), &dom->members))
  650. goto attr_msg_full;
  651. }
  652. nla_nest_end(msg->skb, attrs);
  653. genlmsg_end(msg->skb, hdr);
  654. return 0;
  655. attr_msg_full:
  656. nla_nest_cancel(msg->skb, attrs);
  657. msg_full:
  658. genlmsg_cancel(msg->skb, hdr);
  659. return -EMSGSIZE;
  660. }
  661. int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
  662. u32 bearer_id, u32 *prev_node)
  663. {
  664. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  665. struct tipc_peer *peer;
  666. if (!mon)
  667. return -EINVAL;
  668. read_lock_bh(&mon->lock);
  669. peer = mon->self;
  670. do {
  671. if (*prev_node) {
  672. if (peer->addr == *prev_node)
  673. *prev_node = 0;
  674. else
  675. continue;
  676. }
  677. if (__tipc_nl_add_monitor_peer(peer, msg)) {
  678. *prev_node = peer->addr;
  679. read_unlock_bh(&mon->lock);
  680. return -EMSGSIZE;
  681. }
  682. } while ((peer = peer_nxt(peer)) != mon->self);
  683. read_unlock_bh(&mon->lock);
  684. return 0;
  685. }
  686. int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
  687. u32 bearer_id)
  688. {
  689. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  690. char bearer_name[TIPC_MAX_BEARER_NAME];
  691. struct nlattr *attrs;
  692. void *hdr;
  693. int ret;
  694. ret = tipc_bearer_get_name(net, bearer_name, bearer_id);
  695. if (ret || !mon)
  696. return 0;
  697. hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
  698. NLM_F_MULTI, TIPC_NL_MON_GET);
  699. if (!hdr)
  700. return -EMSGSIZE;
  701. attrs = nla_nest_start(msg->skb, TIPC_NLA_MON);
  702. if (!attrs)
  703. goto msg_full;
  704. read_lock_bh(&mon->lock);
  705. if (nla_put_u32(msg->skb, TIPC_NLA_MON_REF, bearer_id))
  706. goto attr_msg_full;
  707. if (tipc_mon_is_active(net, mon))
  708. if (nla_put_flag(msg->skb, TIPC_NLA_MON_ACTIVE))
  709. goto attr_msg_full;
  710. if (nla_put_string(msg->skb, TIPC_NLA_MON_BEARER_NAME, bearer_name))
  711. goto attr_msg_full;
  712. if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEERCNT, mon->peer_cnt))
  713. goto attr_msg_full;
  714. if (nla_put_u32(msg->skb, TIPC_NLA_MON_LISTGEN, mon->list_gen))
  715. goto attr_msg_full;
  716. read_unlock_bh(&mon->lock);
  717. nla_nest_end(msg->skb, attrs);
  718. genlmsg_end(msg->skb, hdr);
  719. return 0;
  720. attr_msg_full:
  721. read_unlock_bh(&mon->lock);
  722. nla_nest_cancel(msg->skb, attrs);
  723. msg_full:
  724. genlmsg_cancel(msg->skb, hdr);
  725. return -EMSGSIZE;
  726. }