monitor.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809
  1. /*
  2. * net/tipc/monitor.c
  3. *
  4. * Copyright (c) 2016, Ericsson AB
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions are met:
  9. *
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the names of the copyright holders nor the names of its
  16. * contributors may be used to endorse or promote products derived from
  17. * this software without specific prior written permission.
  18. *
  19. * Alternatively, this software may be distributed under the terms of the
  20. * GNU General Public License ("GPL") version 2 as published by the Free
  21. * Software Foundation.
  22. *
  23. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  24. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  25. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  26. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  27. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  28. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  29. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  30. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  31. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  32. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  33. * POSSIBILITY OF SUCH DAMAGE.
  34. */
  35. #include <net/genetlink.h>
  36. #include "core.h"
  37. #include "addr.h"
  38. #include "monitor.h"
  39. #include "bearer.h"
  40. #define MAX_MON_DOMAIN 64
  41. #define MON_TIMEOUT 120000
  42. #define MAX_PEER_DOWN_EVENTS 4
  43. /* struct tipc_mon_domain: domain record to be transferred between peers
  44. * @len: actual size of domain record
  45. * @gen: current generation of sender's domain
  46. * @ack_gen: most recent generation of self's domain acked by peer
  47. * @member_cnt: number of domain member nodes described in this record
  48. * @up_map: bit map indicating which of the members the sender considers up
  49. * @members: identity of the domain members
  50. */
  51. struct tipc_mon_domain {
  52. u16 len;
  53. u16 gen;
  54. u16 ack_gen;
  55. u16 member_cnt;
  56. u64 up_map;
  57. u32 members[MAX_MON_DOMAIN];
  58. };
  59. /* struct tipc_peer: state of a peer node and its domain
  60. * @addr: tipc node identity of peer
  61. * @head_map: shows which other nodes currently consider peer 'up'
  62. * @domain: most recent domain record from peer
  63. * @hash: position in hashed lookup list
  64. * @list: position in linked list, in circular ascending order by 'addr'
  65. * @applied: number of reported domain members applied on this monitor list
  66. * @is_up: peer is up as seen from this node
  67. * @is_head: peer is assigned domain head as seen from this node
  68. * @is_local: peer is in local domain and should be continuously monitored
  69. * @down_cnt: - numbers of other peers which have reported this on lost
  70. */
  71. struct tipc_peer {
  72. u32 addr;
  73. struct tipc_mon_domain *domain;
  74. struct hlist_node hash;
  75. struct list_head list;
  76. u8 applied;
  77. u8 down_cnt;
  78. bool is_up;
  79. bool is_head;
  80. bool is_local;
  81. };
  82. struct tipc_monitor {
  83. struct hlist_head peers[NODE_HTABLE_SIZE];
  84. int peer_cnt;
  85. struct tipc_peer *self;
  86. rwlock_t lock;
  87. struct tipc_mon_domain cache;
  88. u16 list_gen;
  89. u16 dom_gen;
  90. struct net *net;
  91. struct timer_list timer;
  92. unsigned long timer_intv;
  93. };
  94. static struct tipc_monitor *tipc_monitor(struct net *net, int bearer_id)
  95. {
  96. return tipc_net(net)->monitors[bearer_id];
  97. }
  98. const int tipc_max_domain_size = sizeof(struct tipc_mon_domain);
  99. /* dom_rec_len(): actual length of domain record for transport
  100. */
  101. static int dom_rec_len(struct tipc_mon_domain *dom, u16 mcnt)
  102. {
  103. return ((void *)&dom->members - (void *)dom) + (mcnt * sizeof(u32));
  104. }
  105. /* dom_size() : calculate size of own domain based on number of peers
  106. */
  107. static int dom_size(int peers)
  108. {
  109. int i = 0;
  110. while ((i * i) < peers)
  111. i++;
  112. return i < MAX_MON_DOMAIN ? i : MAX_MON_DOMAIN;
  113. }
  114. static void map_set(u64 *up_map, int i, unsigned int v)
  115. {
  116. *up_map &= ~(1ULL << i);
  117. *up_map |= ((u64)v << i);
  118. }
  119. static int map_get(u64 up_map, int i)
  120. {
  121. return (up_map & (1 << i)) >> i;
  122. }
  123. static struct tipc_peer *peer_prev(struct tipc_peer *peer)
  124. {
  125. return list_last_entry(&peer->list, struct tipc_peer, list);
  126. }
  127. static struct tipc_peer *peer_nxt(struct tipc_peer *peer)
  128. {
  129. return list_first_entry(&peer->list, struct tipc_peer, list);
  130. }
  131. static struct tipc_peer *peer_head(struct tipc_peer *peer)
  132. {
  133. while (!peer->is_head)
  134. peer = peer_prev(peer);
  135. return peer;
  136. }
  137. static struct tipc_peer *get_peer(struct tipc_monitor *mon, u32 addr)
  138. {
  139. struct tipc_peer *peer;
  140. unsigned int thash = tipc_hashfn(addr);
  141. hlist_for_each_entry(peer, &mon->peers[thash], hash) {
  142. if (peer->addr == addr)
  143. return peer;
  144. }
  145. return NULL;
  146. }
  147. static struct tipc_peer *get_self(struct net *net, int bearer_id)
  148. {
  149. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  150. return mon->self;
  151. }
  152. static inline bool tipc_mon_is_active(struct net *net, struct tipc_monitor *mon)
  153. {
  154. struct tipc_net *tn = tipc_net(net);
  155. return mon->peer_cnt > tn->mon_threshold;
  156. }
  157. /* mon_identify_lost_members() : - identify amd mark potentially lost members
  158. */
  159. static void mon_identify_lost_members(struct tipc_peer *peer,
  160. struct tipc_mon_domain *dom_bef,
  161. int applied_bef)
  162. {
  163. struct tipc_peer *member = peer;
  164. struct tipc_mon_domain *dom_aft = peer->domain;
  165. int applied_aft = peer->applied;
  166. int i;
  167. for (i = 0; i < applied_bef; i++) {
  168. member = peer_nxt(member);
  169. /* Do nothing if self or peer already see member as down */
  170. if (!member->is_up || !map_get(dom_bef->up_map, i))
  171. continue;
  172. /* Loss of local node must be detected by active probing */
  173. if (member->is_local)
  174. continue;
  175. /* Start probing if member was removed from applied domain */
  176. if (!applied_aft || (applied_aft < i)) {
  177. member->down_cnt = 1;
  178. continue;
  179. }
  180. /* Member loss is confirmed if it is still in applied domain */
  181. if (!map_get(dom_aft->up_map, i))
  182. member->down_cnt++;
  183. }
  184. }
  185. /* mon_apply_domain() : match a peer's domain record against monitor list
  186. */
  187. static void mon_apply_domain(struct tipc_monitor *mon,
  188. struct tipc_peer *peer)
  189. {
  190. struct tipc_mon_domain *dom = peer->domain;
  191. struct tipc_peer *member;
  192. u32 addr;
  193. int i;
  194. if (!dom || !peer->is_up)
  195. return;
  196. /* Scan across domain members and match against monitor list */
  197. peer->applied = 0;
  198. member = peer_nxt(peer);
  199. for (i = 0; i < dom->member_cnt; i++) {
  200. addr = dom->members[i];
  201. if (addr != member->addr)
  202. return;
  203. peer->applied++;
  204. member = peer_nxt(member);
  205. }
  206. }
  207. /* mon_update_local_domain() : update after peer addition/removal/up/down
  208. */
  209. static void mon_update_local_domain(struct tipc_monitor *mon)
  210. {
  211. struct tipc_peer *self = mon->self;
  212. struct tipc_mon_domain *cache = &mon->cache;
  213. struct tipc_mon_domain *dom = self->domain;
  214. struct tipc_peer *peer = self;
  215. u64 prev_up_map = dom->up_map;
  216. u16 member_cnt, i;
  217. bool diff;
  218. /* Update local domain size based on current size of cluster */
  219. member_cnt = dom_size(mon->peer_cnt) - 1;
  220. self->applied = member_cnt;
  221. /* Update native and cached outgoing local domain records */
  222. dom->len = dom_rec_len(dom, member_cnt);
  223. diff = dom->member_cnt != member_cnt;
  224. dom->member_cnt = member_cnt;
  225. for (i = 0; i < member_cnt; i++) {
  226. peer = peer_nxt(peer);
  227. diff |= dom->members[i] != peer->addr;
  228. dom->members[i] = peer->addr;
  229. map_set(&dom->up_map, i, peer->is_up);
  230. cache->members[i] = htonl(peer->addr);
  231. }
  232. diff |= dom->up_map != prev_up_map;
  233. if (!diff)
  234. return;
  235. dom->gen = ++mon->dom_gen;
  236. cache->len = htons(dom->len);
  237. cache->gen = htons(dom->gen);
  238. cache->member_cnt = htons(member_cnt);
  239. cache->up_map = cpu_to_be64(dom->up_map);
  240. mon_apply_domain(mon, self);
  241. }
  242. /* mon_update_neighbors() : update preceding neighbors of added/removed peer
  243. */
  244. static void mon_update_neighbors(struct tipc_monitor *mon,
  245. struct tipc_peer *peer)
  246. {
  247. int dz, i;
  248. dz = dom_size(mon->peer_cnt);
  249. for (i = 0; i < dz; i++) {
  250. mon_apply_domain(mon, peer);
  251. peer = peer_prev(peer);
  252. }
  253. }
  254. /* mon_assign_roles() : reassign peer roles after a network change
  255. * The monitor list is consistent at this stage; i.e., each peer is monitoring
  256. * a set of domain members as matched between domain record and the monitor list
  257. */
  258. static void mon_assign_roles(struct tipc_monitor *mon, struct tipc_peer *head)
  259. {
  260. struct tipc_peer *peer = peer_nxt(head);
  261. struct tipc_peer *self = mon->self;
  262. int i = 0;
  263. for (; peer != self; peer = peer_nxt(peer)) {
  264. peer->is_local = false;
  265. /* Update domain member */
  266. if (i++ < head->applied) {
  267. peer->is_head = false;
  268. if (head == self)
  269. peer->is_local = true;
  270. continue;
  271. }
  272. /* Assign next domain head */
  273. if (!peer->is_up)
  274. continue;
  275. if (peer->is_head)
  276. break;
  277. head = peer;
  278. head->is_head = true;
  279. i = 0;
  280. }
  281. mon->list_gen++;
  282. }
  283. void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id)
  284. {
  285. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  286. struct tipc_peer *self = get_self(net, bearer_id);
  287. struct tipc_peer *peer, *prev, *head;
  288. write_lock_bh(&mon->lock);
  289. peer = get_peer(mon, addr);
  290. if (!peer)
  291. goto exit;
  292. prev = peer_prev(peer);
  293. list_del(&peer->list);
  294. hlist_del(&peer->hash);
  295. kfree(peer->domain);
  296. kfree(peer);
  297. mon->peer_cnt--;
  298. head = peer_head(prev);
  299. if (head == self)
  300. mon_update_local_domain(mon);
  301. mon_update_neighbors(mon, prev);
  302. /* Revert to full-mesh monitoring if we reach threshold */
  303. if (!tipc_mon_is_active(net, mon)) {
  304. list_for_each_entry(peer, &self->list, list) {
  305. kfree(peer->domain);
  306. peer->domain = NULL;
  307. peer->applied = 0;
  308. }
  309. }
  310. mon_assign_roles(mon, head);
  311. exit:
  312. write_unlock_bh(&mon->lock);
  313. }
  314. static bool tipc_mon_add_peer(struct tipc_monitor *mon, u32 addr,
  315. struct tipc_peer **peer)
  316. {
  317. struct tipc_peer *self = mon->self;
  318. struct tipc_peer *cur, *prev, *p;
  319. p = kzalloc(sizeof(*p), GFP_ATOMIC);
  320. *peer = p;
  321. if (!p)
  322. return false;
  323. p->addr = addr;
  324. /* Add new peer to lookup list */
  325. INIT_LIST_HEAD(&p->list);
  326. hlist_add_head(&p->hash, &mon->peers[tipc_hashfn(addr)]);
  327. /* Sort new peer into iterator list, in ascending circular order */
  328. prev = self;
  329. list_for_each_entry(cur, &self->list, list) {
  330. if ((addr > prev->addr) && (addr < cur->addr))
  331. break;
  332. if (((addr < cur->addr) || (addr > prev->addr)) &&
  333. (prev->addr > cur->addr))
  334. break;
  335. prev = cur;
  336. }
  337. list_add_tail(&p->list, &cur->list);
  338. mon->peer_cnt++;
  339. mon_update_neighbors(mon, p);
  340. return true;
  341. }
  342. void tipc_mon_peer_up(struct net *net, u32 addr, int bearer_id)
  343. {
  344. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  345. struct tipc_peer *self = get_self(net, bearer_id);
  346. struct tipc_peer *peer, *head;
  347. write_lock_bh(&mon->lock);
  348. peer = get_peer(mon, addr);
  349. if (!peer && !tipc_mon_add_peer(mon, addr, &peer))
  350. goto exit;
  351. peer->is_up = true;
  352. head = peer_head(peer);
  353. if (head == self)
  354. mon_update_local_domain(mon);
  355. mon_assign_roles(mon, head);
  356. exit:
  357. write_unlock_bh(&mon->lock);
  358. }
  359. void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id)
  360. {
  361. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  362. struct tipc_peer *self = get_self(net, bearer_id);
  363. struct tipc_peer *peer, *head;
  364. struct tipc_mon_domain *dom;
  365. int applied;
  366. write_lock_bh(&mon->lock);
  367. peer = get_peer(mon, addr);
  368. if (!peer) {
  369. pr_warn("Mon: unknown link %x/%u DOWN\n", addr, bearer_id);
  370. goto exit;
  371. }
  372. applied = peer->applied;
  373. peer->applied = 0;
  374. dom = peer->domain;
  375. peer->domain = NULL;
  376. if (peer->is_head)
  377. mon_identify_lost_members(peer, dom, applied);
  378. kfree(dom);
  379. peer->is_up = false;
  380. peer->is_head = false;
  381. peer->is_local = false;
  382. peer->down_cnt = 0;
  383. head = peer_head(peer);
  384. if (head == self)
  385. mon_update_local_domain(mon);
  386. mon_assign_roles(mon, head);
  387. exit:
  388. write_unlock_bh(&mon->lock);
  389. }
  390. /* tipc_mon_rcv - process monitor domain event message
  391. */
  392. void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
  393. struct tipc_mon_state *state, int bearer_id)
  394. {
  395. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  396. struct tipc_mon_domain *arrv_dom = data;
  397. struct tipc_mon_domain dom_bef;
  398. struct tipc_mon_domain *dom;
  399. struct tipc_peer *peer;
  400. u16 new_member_cnt = ntohs(arrv_dom->member_cnt);
  401. int new_dlen = dom_rec_len(arrv_dom, new_member_cnt);
  402. u16 new_gen = ntohs(arrv_dom->gen);
  403. u16 acked_gen = ntohs(arrv_dom->ack_gen);
  404. bool probing = state->probing;
  405. int i, applied_bef;
  406. state->probing = false;
  407. /* Sanity check received domain record */
  408. if (dlen < dom_rec_len(arrv_dom, 0))
  409. return;
  410. if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
  411. return;
  412. if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen)
  413. return;
  414. /* Synch generation numbers with peer if link just came up */
  415. if (!state->synched) {
  416. state->peer_gen = new_gen - 1;
  417. state->acked_gen = acked_gen;
  418. state->synched = true;
  419. }
  420. if (more(acked_gen, state->acked_gen))
  421. state->acked_gen = acked_gen;
  422. /* Drop duplicate unless we are waiting for a probe response */
  423. if (!more(new_gen, state->peer_gen) && !probing)
  424. return;
  425. write_lock_bh(&mon->lock);
  426. peer = get_peer(mon, addr);
  427. if (!peer || !peer->is_up)
  428. goto exit;
  429. /* Peer is confirmed, stop any ongoing probing */
  430. peer->down_cnt = 0;
  431. /* Task is done for duplicate record */
  432. if (!more(new_gen, state->peer_gen))
  433. goto exit;
  434. state->peer_gen = new_gen;
  435. /* Cache current domain record for later use */
  436. dom_bef.member_cnt = 0;
  437. dom = peer->domain;
  438. if (dom)
  439. memcpy(&dom_bef, dom, dom->len);
  440. /* Transform and store received domain record */
  441. if (!dom || (dom->len < new_dlen)) {
  442. kfree(dom);
  443. dom = kmalloc(new_dlen, GFP_ATOMIC);
  444. peer->domain = dom;
  445. if (!dom)
  446. goto exit;
  447. }
  448. dom->len = new_dlen;
  449. dom->gen = new_gen;
  450. dom->member_cnt = new_member_cnt;
  451. dom->up_map = be64_to_cpu(arrv_dom->up_map);
  452. for (i = 0; i < new_member_cnt; i++)
  453. dom->members[i] = ntohl(arrv_dom->members[i]);
  454. /* Update peers affected by this domain record */
  455. applied_bef = peer->applied;
  456. mon_apply_domain(mon, peer);
  457. mon_identify_lost_members(peer, &dom_bef, applied_bef);
  458. mon_assign_roles(mon, peer_head(peer));
  459. exit:
  460. write_unlock_bh(&mon->lock);
  461. }
  462. void tipc_mon_prep(struct net *net, void *data, int *dlen,
  463. struct tipc_mon_state *state, int bearer_id)
  464. {
  465. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  466. struct tipc_mon_domain *dom = data;
  467. u16 gen = mon->dom_gen;
  468. u16 len;
  469. if (!tipc_mon_is_active(net, mon))
  470. return;
  471. /* Send only a dummy record with ack if peer has acked our last sent */
  472. if (likely(state->acked_gen == gen)) {
  473. len = dom_rec_len(dom, 0);
  474. *dlen = len;
  475. dom->len = htons(len);
  476. dom->gen = htons(gen);
  477. dom->ack_gen = htons(state->peer_gen);
  478. dom->member_cnt = 0;
  479. return;
  480. }
  481. /* Send the full record */
  482. read_lock_bh(&mon->lock);
  483. len = ntohs(mon->cache.len);
  484. *dlen = len;
  485. memcpy(data, &mon->cache, len);
  486. read_unlock_bh(&mon->lock);
  487. dom->ack_gen = htons(state->peer_gen);
  488. }
  489. void tipc_mon_get_state(struct net *net, u32 addr,
  490. struct tipc_mon_state *state,
  491. int bearer_id)
  492. {
  493. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  494. struct tipc_peer *peer;
  495. /* Used cached state if table has not changed */
  496. if (!state->probing &&
  497. (state->list_gen == mon->list_gen) &&
  498. (state->acked_gen == mon->dom_gen))
  499. return;
  500. read_lock_bh(&mon->lock);
  501. peer = get_peer(mon, addr);
  502. if (peer) {
  503. state->probing = state->acked_gen != mon->dom_gen;
  504. state->probing |= peer->down_cnt;
  505. state->reset |= peer->down_cnt >= MAX_PEER_DOWN_EVENTS;
  506. state->monitoring = peer->is_local;
  507. state->monitoring |= peer->is_head;
  508. state->list_gen = mon->list_gen;
  509. }
  510. read_unlock_bh(&mon->lock);
  511. }
  512. static void mon_timeout(unsigned long m)
  513. {
  514. struct tipc_monitor *mon = (void *)m;
  515. struct tipc_peer *self;
  516. int best_member_cnt = dom_size(mon->peer_cnt) - 1;
  517. write_lock_bh(&mon->lock);
  518. self = mon->self;
  519. if (self && (best_member_cnt != self->applied)) {
  520. mon_update_local_domain(mon);
  521. mon_assign_roles(mon, self);
  522. }
  523. write_unlock_bh(&mon->lock);
  524. mod_timer(&mon->timer, jiffies + mon->timer_intv);
  525. }
  526. int tipc_mon_create(struct net *net, int bearer_id)
  527. {
  528. struct tipc_net *tn = tipc_net(net);
  529. struct tipc_monitor *mon;
  530. struct tipc_peer *self;
  531. struct tipc_mon_domain *dom;
  532. if (tn->monitors[bearer_id])
  533. return 0;
  534. mon = kzalloc(sizeof(*mon), GFP_ATOMIC);
  535. self = kzalloc(sizeof(*self), GFP_ATOMIC);
  536. dom = kzalloc(sizeof(*dom), GFP_ATOMIC);
  537. if (!mon || !self || !dom) {
  538. kfree(mon);
  539. kfree(self);
  540. kfree(dom);
  541. return -ENOMEM;
  542. }
  543. tn->monitors[bearer_id] = mon;
  544. rwlock_init(&mon->lock);
  545. mon->net = net;
  546. mon->peer_cnt = 1;
  547. mon->self = self;
  548. self->domain = dom;
  549. self->addr = tipc_own_addr(net);
  550. self->is_up = true;
  551. self->is_head = true;
  552. INIT_LIST_HEAD(&self->list);
  553. setup_timer(&mon->timer, mon_timeout, (unsigned long)mon);
  554. mon->timer_intv = msecs_to_jiffies(MON_TIMEOUT + (tn->random & 0xffff));
  555. mod_timer(&mon->timer, jiffies + mon->timer_intv);
  556. return 0;
  557. }
  558. void tipc_mon_delete(struct net *net, int bearer_id)
  559. {
  560. struct tipc_net *tn = tipc_net(net);
  561. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  562. struct tipc_peer *self;
  563. struct tipc_peer *peer, *tmp;
  564. if (!mon)
  565. return;
  566. self = get_self(net, bearer_id);
  567. write_lock_bh(&mon->lock);
  568. tn->monitors[bearer_id] = NULL;
  569. list_for_each_entry_safe(peer, tmp, &self->list, list) {
  570. list_del(&peer->list);
  571. hlist_del(&peer->hash);
  572. kfree(peer->domain);
  573. kfree(peer);
  574. }
  575. mon->self = NULL;
  576. write_unlock_bh(&mon->lock);
  577. del_timer_sync(&mon->timer);
  578. kfree(self->domain);
  579. kfree(self);
  580. kfree(mon);
  581. }
  582. int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size)
  583. {
  584. struct tipc_net *tn = tipc_net(net);
  585. if (cluster_size > TIPC_CLUSTER_SIZE)
  586. return -EINVAL;
  587. tn->mon_threshold = cluster_size;
  588. return 0;
  589. }
  590. int tipc_nl_monitor_get_threshold(struct net *net)
  591. {
  592. struct tipc_net *tn = tipc_net(net);
  593. return tn->mon_threshold;
  594. }
  595. int __tipc_nl_add_monitor_peer(struct tipc_peer *peer, struct tipc_nl_msg *msg)
  596. {
  597. struct tipc_mon_domain *dom = peer->domain;
  598. struct nlattr *attrs;
  599. void *hdr;
  600. hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
  601. NLM_F_MULTI, TIPC_NL_MON_PEER_GET);
  602. if (!hdr)
  603. return -EMSGSIZE;
  604. attrs = nla_nest_start(msg->skb, TIPC_NLA_MON_PEER);
  605. if (!attrs)
  606. goto msg_full;
  607. if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_ADDR, peer->addr))
  608. goto attr_msg_full;
  609. if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_APPLIED, peer->applied))
  610. goto attr_msg_full;
  611. if (peer->is_up)
  612. if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_UP))
  613. goto attr_msg_full;
  614. if (peer->is_local)
  615. if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_LOCAL))
  616. goto attr_msg_full;
  617. if (peer->is_head)
  618. if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_HEAD))
  619. goto attr_msg_full;
  620. if (dom) {
  621. if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_DOMGEN, dom->gen))
  622. goto attr_msg_full;
  623. if (nla_put_u64_64bit(msg->skb, TIPC_NLA_MON_PEER_UPMAP,
  624. dom->up_map, TIPC_NLA_MON_PEER_PAD))
  625. goto attr_msg_full;
  626. if (nla_put(msg->skb, TIPC_NLA_MON_PEER_MEMBERS,
  627. dom->member_cnt * sizeof(u32), &dom->members))
  628. goto attr_msg_full;
  629. }
  630. nla_nest_end(msg->skb, attrs);
  631. genlmsg_end(msg->skb, hdr);
  632. return 0;
  633. attr_msg_full:
  634. nla_nest_cancel(msg->skb, attrs);
  635. msg_full:
  636. genlmsg_cancel(msg->skb, hdr);
  637. return -EMSGSIZE;
  638. }
  639. int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
  640. u32 bearer_id, u32 *prev_node)
  641. {
  642. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  643. struct tipc_peer *peer;
  644. if (!mon)
  645. return -EINVAL;
  646. read_lock_bh(&mon->lock);
  647. peer = mon->self;
  648. do {
  649. if (*prev_node) {
  650. if (peer->addr == *prev_node)
  651. *prev_node = 0;
  652. else
  653. continue;
  654. }
  655. if (__tipc_nl_add_monitor_peer(peer, msg)) {
  656. *prev_node = peer->addr;
  657. read_unlock_bh(&mon->lock);
  658. return -EMSGSIZE;
  659. }
  660. } while ((peer = peer_nxt(peer)) != mon->self);
  661. read_unlock_bh(&mon->lock);
  662. return 0;
  663. }
  664. int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
  665. u32 bearer_id)
  666. {
  667. struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
  668. char bearer_name[TIPC_MAX_BEARER_NAME];
  669. struct nlattr *attrs;
  670. void *hdr;
  671. int ret;
  672. ret = tipc_bearer_get_name(net, bearer_name, bearer_id);
  673. if (ret || !mon)
  674. return -EINVAL;
  675. hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
  676. NLM_F_MULTI, TIPC_NL_MON_GET);
  677. if (!hdr)
  678. return -EMSGSIZE;
  679. attrs = nla_nest_start(msg->skb, TIPC_NLA_MON);
  680. if (!attrs)
  681. goto msg_full;
  682. read_lock_bh(&mon->lock);
  683. if (nla_put_u32(msg->skb, TIPC_NLA_MON_REF, bearer_id))
  684. goto attr_msg_full;
  685. if (tipc_mon_is_active(net, mon))
  686. if (nla_put_flag(msg->skb, TIPC_NLA_MON_ACTIVE))
  687. goto attr_msg_full;
  688. if (nla_put_string(msg->skb, TIPC_NLA_MON_BEARER_NAME, bearer_name))
  689. goto attr_msg_full;
  690. if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEERCNT, mon->peer_cnt))
  691. goto attr_msg_full;
  692. if (nla_put_u32(msg->skb, TIPC_NLA_MON_LISTGEN, mon->list_gen))
  693. goto attr_msg_full;
  694. read_unlock_bh(&mon->lock);
  695. nla_nest_end(msg->skb, attrs);
  696. genlmsg_end(msg->skb, hdr);
  697. return 0;
  698. attr_msg_full:
  699. read_unlock_bh(&mon->lock);
  700. nla_nest_cancel(msg->skb, attrs);
  701. msg_full:
  702. genlmsg_cancel(msg->skb, hdr);
  703. return -EMSGSIZE;
  704. }