name_table.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062
  1. /*
  2. * net/tipc/name_table.c: TIPC name table code
  3. *
  4. * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
  5. * Copyright (c) 2004-2008, 2010-2014, Wind River Systems
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include <net/sock.h>
  37. #include "core.h"
  38. #include "netlink.h"
  39. #include "name_table.h"
  40. #include "name_distr.h"
  41. #include "subscr.h"
  42. #include "bcast.h"
  43. #include "addr.h"
  44. #include "node.h"
  45. #include <net/genetlink.h>
  46. #define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
  47. /**
  48. * struct name_info - name sequence publication info
  49. * @node_list: circular list of publications made by own node
  50. * @cluster_list: circular list of publications made by own cluster
  51. * @zone_list: circular list of publications made by own zone
  52. * @node_list_size: number of entries in "node_list"
  53. * @cluster_list_size: number of entries in "cluster_list"
  54. * @zone_list_size: number of entries in "zone_list"
  55. *
  56. * Note: The zone list always contains at least one entry, since all
  57. * publications of the associated name sequence belong to it.
  58. * (The cluster and node lists may be empty.)
  59. */
  60. struct name_info {
  61. struct list_head node_list;
  62. struct list_head cluster_list;
  63. struct list_head zone_list;
  64. u32 node_list_size;
  65. u32 cluster_list_size;
  66. u32 zone_list_size;
  67. };
  68. /**
  69. * struct sub_seq - container for all published instances of a name sequence
  70. * @lower: name sequence lower bound
  71. * @upper: name sequence upper bound
  72. * @info: pointer to name sequence publication info
  73. */
  74. struct sub_seq {
  75. u32 lower;
  76. u32 upper;
  77. struct name_info *info;
  78. };
  79. /**
  80. * struct name_seq - container for all published instances of a name type
  81. * @type: 32 bit 'type' value for name sequence
  82. * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
  83. * sub-sequences are sorted in ascending order
  84. * @alloc: number of sub-sequences currently in array
  85. * @first_free: array index of first unused sub-sequence entry
  86. * @ns_list: links to adjacent name sequences in hash chain
  87. * @subscriptions: list of subscriptions for this 'type'
  88. * @lock: spinlock controlling access to publication lists of all sub-sequences
  89. * @rcu: RCU callback head used for deferred freeing
  90. */
  91. struct name_seq {
  92. u32 type;
  93. struct sub_seq *sseqs;
  94. u32 alloc;
  95. u32 first_free;
  96. struct hlist_node ns_list;
  97. struct list_head subscriptions;
  98. spinlock_t lock;
  99. struct rcu_head rcu;
  100. };
  101. static int hash(int x)
  102. {
  103. return x & (TIPC_NAMETBL_SIZE - 1);
  104. }
  105. /**
  106. * publ_create - create a publication structure
  107. */
  108. static struct publication *publ_create(u32 type, u32 lower, u32 upper,
  109. u32 scope, u32 node, u32 port_ref,
  110. u32 key)
  111. {
  112. struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
  113. if (publ == NULL) {
  114. pr_warn("Publication creation failure, no memory\n");
  115. return NULL;
  116. }
  117. publ->type = type;
  118. publ->lower = lower;
  119. publ->upper = upper;
  120. publ->scope = scope;
  121. publ->node = node;
  122. publ->ref = port_ref;
  123. publ->key = key;
  124. INIT_LIST_HEAD(&publ->pport_list);
  125. return publ;
  126. }
  127. /**
  128. * tipc_subseq_alloc - allocate a specified number of sub-sequence structures
  129. */
  130. static struct sub_seq *tipc_subseq_alloc(u32 cnt)
  131. {
  132. return kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
  133. }
  134. /**
  135. * tipc_nameseq_create - create a name sequence structure for the specified 'type'
  136. *
  137. * Allocates a single sub-sequence structure and sets it to all 0's.
  138. */
  139. static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
  140. {
  141. struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
  142. struct sub_seq *sseq = tipc_subseq_alloc(1);
  143. if (!nseq || !sseq) {
  144. pr_warn("Name sequence creation failed, no memory\n");
  145. kfree(nseq);
  146. kfree(sseq);
  147. return NULL;
  148. }
  149. spin_lock_init(&nseq->lock);
  150. nseq->type = type;
  151. nseq->sseqs = sseq;
  152. nseq->alloc = 1;
  153. INIT_HLIST_NODE(&nseq->ns_list);
  154. INIT_LIST_HEAD(&nseq->subscriptions);
  155. hlist_add_head_rcu(&nseq->ns_list, seq_head);
  156. return nseq;
  157. }
  158. /**
  159. * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
  160. *
  161. * Very time-critical, so binary searches through sub-sequence array.
  162. */
  163. static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
  164. u32 instance)
  165. {
  166. struct sub_seq *sseqs = nseq->sseqs;
  167. int low = 0;
  168. int high = nseq->first_free - 1;
  169. int mid;
  170. while (low <= high) {
  171. mid = (low + high) / 2;
  172. if (instance < sseqs[mid].lower)
  173. high = mid - 1;
  174. else if (instance > sseqs[mid].upper)
  175. low = mid + 1;
  176. else
  177. return &sseqs[mid];
  178. }
  179. return NULL;
  180. }
  181. /**
  182. * nameseq_locate_subseq - determine position of name instance in sub-sequence
  183. *
  184. * Returns index in sub-sequence array of the entry that contains the specified
  185. * instance value; if no entry contains that value, returns the position
  186. * where a new entry for it would be inserted in the array.
  187. *
  188. * Note: Similar to binary search code for locating a sub-sequence.
  189. */
  190. static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
  191. {
  192. struct sub_seq *sseqs = nseq->sseqs;
  193. int low = 0;
  194. int high = nseq->first_free - 1;
  195. int mid;
  196. while (low <= high) {
  197. mid = (low + high) / 2;
  198. if (instance < sseqs[mid].lower)
  199. high = mid - 1;
  200. else if (instance > sseqs[mid].upper)
  201. low = mid + 1;
  202. else
  203. return mid;
  204. }
  205. return low;
  206. }
  207. /**
  208. * tipc_nameseq_insert_publ
  209. */
  210. static struct publication *tipc_nameseq_insert_publ(struct net *net,
  211. struct name_seq *nseq,
  212. u32 type, u32 lower,
  213. u32 upper, u32 scope,
  214. u32 node, u32 port, u32 key)
  215. {
  216. struct tipc_subscription *s;
  217. struct tipc_subscription *st;
  218. struct publication *publ;
  219. struct sub_seq *sseq;
  220. struct name_info *info;
  221. int created_subseq = 0;
  222. sseq = nameseq_find_subseq(nseq, lower);
  223. if (sseq) {
  224. /* Lower end overlaps existing entry => need an exact match */
  225. if ((sseq->lower != lower) || (sseq->upper != upper)) {
  226. return NULL;
  227. }
  228. info = sseq->info;
  229. /* Check if an identical publication already exists */
  230. list_for_each_entry(publ, &info->zone_list, zone_list) {
  231. if ((publ->ref == port) && (publ->key == key) &&
  232. (!publ->node || (publ->node == node)))
  233. return NULL;
  234. }
  235. } else {
  236. u32 inspos;
  237. struct sub_seq *freesseq;
  238. /* Find where lower end should be inserted */
  239. inspos = nameseq_locate_subseq(nseq, lower);
  240. /* Fail if upper end overlaps into an existing entry */
  241. if ((inspos < nseq->first_free) &&
  242. (upper >= nseq->sseqs[inspos].lower)) {
  243. return NULL;
  244. }
  245. /* Ensure there is space for new sub-sequence */
  246. if (nseq->first_free == nseq->alloc) {
  247. struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
  248. if (!sseqs) {
  249. pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
  250. type, lower, upper);
  251. return NULL;
  252. }
  253. memcpy(sseqs, nseq->sseqs,
  254. nseq->alloc * sizeof(struct sub_seq));
  255. kfree(nseq->sseqs);
  256. nseq->sseqs = sseqs;
  257. nseq->alloc *= 2;
  258. }
  259. info = kzalloc(sizeof(*info), GFP_ATOMIC);
  260. if (!info) {
  261. pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
  262. type, lower, upper);
  263. return NULL;
  264. }
  265. INIT_LIST_HEAD(&info->node_list);
  266. INIT_LIST_HEAD(&info->cluster_list);
  267. INIT_LIST_HEAD(&info->zone_list);
  268. /* Insert new sub-sequence */
  269. sseq = &nseq->sseqs[inspos];
  270. freesseq = &nseq->sseqs[nseq->first_free];
  271. memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq));
  272. memset(sseq, 0, sizeof(*sseq));
  273. nseq->first_free++;
  274. sseq->lower = lower;
  275. sseq->upper = upper;
  276. sseq->info = info;
  277. created_subseq = 1;
  278. }
  279. /* Insert a publication */
  280. publ = publ_create(type, lower, upper, scope, node, port, key);
  281. if (!publ)
  282. return NULL;
  283. list_add(&publ->zone_list, &info->zone_list);
  284. info->zone_list_size++;
  285. if (in_own_cluster(net, node)) {
  286. list_add(&publ->cluster_list, &info->cluster_list);
  287. info->cluster_list_size++;
  288. }
  289. if (in_own_node(net, node)) {
  290. list_add(&publ->node_list, &info->node_list);
  291. info->node_list_size++;
  292. }
  293. /* Any subscriptions waiting for notification? */
  294. list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
  295. tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
  296. TIPC_PUBLISHED, publ->ref,
  297. publ->node, created_subseq);
  298. }
  299. return publ;
  300. }
  301. /**
  302. * tipc_nameseq_remove_publ
  303. *
  304. * NOTE: There may be cases where TIPC is asked to remove a publication
  305. * that is not in the name table. For example, if another node issues a
  306. * publication for a name sequence that overlaps an existing name sequence
  307. * the publication will not be recorded, which means the publication won't
  308. * be found when the name sequence is later withdrawn by that node.
  309. * A failed withdraw request simply returns a failure indication and lets the
  310. * caller issue any error or warning messages associated with such a problem.
  311. */
  312. static struct publication *tipc_nameseq_remove_publ(struct net *net,
  313. struct name_seq *nseq,
  314. u32 inst, u32 node,
  315. u32 ref, u32 key)
  316. {
  317. struct publication *publ;
  318. struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
  319. struct name_info *info;
  320. struct sub_seq *free;
  321. struct tipc_subscription *s, *st;
  322. int removed_subseq = 0;
  323. if (!sseq)
  324. return NULL;
  325. info = sseq->info;
  326. /* Locate publication, if it exists */
  327. list_for_each_entry(publ, &info->zone_list, zone_list) {
  328. if ((publ->key == key) && (publ->ref == ref) &&
  329. (!publ->node || (publ->node == node)))
  330. goto found;
  331. }
  332. return NULL;
  333. found:
  334. /* Remove publication from zone scope list */
  335. list_del(&publ->zone_list);
  336. info->zone_list_size--;
  337. /* Remove publication from cluster scope list, if present */
  338. if (in_own_cluster(net, node)) {
  339. list_del(&publ->cluster_list);
  340. info->cluster_list_size--;
  341. }
  342. /* Remove publication from node scope list, if present */
  343. if (in_own_node(net, node)) {
  344. list_del(&publ->node_list);
  345. info->node_list_size--;
  346. }
  347. /* Contract subseq list if no more publications for that subseq */
  348. if (list_empty(&info->zone_list)) {
  349. kfree(info);
  350. free = &nseq->sseqs[nseq->first_free--];
  351. memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof(*sseq));
  352. removed_subseq = 1;
  353. }
  354. /* Notify any waiting subscriptions */
  355. list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
  356. tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
  357. TIPC_WITHDRAWN, publ->ref,
  358. publ->node, removed_subseq);
  359. }
  360. return publ;
  361. }
  362. /**
  363. * tipc_nameseq_subscribe - attach a subscription, and issue
  364. * the prescribed number of events if there is any sub-
  365. * sequence overlapping with the requested sequence
  366. */
  367. static void tipc_nameseq_subscribe(struct name_seq *nseq,
  368. struct tipc_subscription *s)
  369. {
  370. struct sub_seq *sseq = nseq->sseqs;
  371. struct tipc_name_seq ns;
  372. tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
  373. list_add(&s->nameseq_list, &nseq->subscriptions);
  374. if (!sseq)
  375. return;
  376. while (sseq != &nseq->sseqs[nseq->first_free]) {
  377. if (tipc_subscrp_check_overlap(&ns, sseq->lower, sseq->upper)) {
  378. struct publication *crs;
  379. struct name_info *info = sseq->info;
  380. int must_report = 1;
  381. list_for_each_entry(crs, &info->zone_list, zone_list) {
  382. tipc_subscrp_report_overlap(s, sseq->lower,
  383. sseq->upper,
  384. TIPC_PUBLISHED,
  385. crs->ref, crs->node,
  386. must_report);
  387. must_report = 0;
  388. }
  389. }
  390. sseq++;
  391. }
  392. }
  393. static struct name_seq *nametbl_find_seq(struct net *net, u32 type)
  394. {
  395. struct tipc_net *tn = net_generic(net, tipc_net_id);
  396. struct hlist_head *seq_head;
  397. struct name_seq *ns;
  398. seq_head = &tn->nametbl->seq_hlist[hash(type)];
  399. hlist_for_each_entry_rcu(ns, seq_head, ns_list) {
  400. if (ns->type == type)
  401. return ns;
  402. }
  403. return NULL;
  404. };
  405. struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
  406. u32 lower, u32 upper, u32 scope,
  407. u32 node, u32 port, u32 key)
  408. {
  409. struct tipc_net *tn = net_generic(net, tipc_net_id);
  410. struct publication *publ;
  411. struct name_seq *seq = nametbl_find_seq(net, type);
  412. int index = hash(type);
  413. if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
  414. (lower > upper)) {
  415. pr_debug("Failed to publish illegal {%u,%u,%u} with scope %u\n",
  416. type, lower, upper, scope);
  417. return NULL;
  418. }
  419. if (!seq)
  420. seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
  421. if (!seq)
  422. return NULL;
  423. spin_lock_bh(&seq->lock);
  424. publ = tipc_nameseq_insert_publ(net, seq, type, lower, upper,
  425. scope, node, port, key);
  426. spin_unlock_bh(&seq->lock);
  427. return publ;
  428. }
  429. struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
  430. u32 lower, u32 node, u32 ref,
  431. u32 key)
  432. {
  433. struct publication *publ;
  434. struct name_seq *seq = nametbl_find_seq(net, type);
  435. if (!seq)
  436. return NULL;
  437. spin_lock_bh(&seq->lock);
  438. publ = tipc_nameseq_remove_publ(net, seq, lower, node, ref, key);
  439. if (!seq->first_free && list_empty(&seq->subscriptions)) {
  440. hlist_del_init_rcu(&seq->ns_list);
  441. kfree(seq->sseqs);
  442. spin_unlock_bh(&seq->lock);
  443. kfree_rcu(seq, rcu);
  444. return publ;
  445. }
  446. spin_unlock_bh(&seq->lock);
  447. return publ;
  448. }
  449. /**
  450. * tipc_nametbl_translate - perform name translation
  451. *
  452. * On entry, 'destnode' is the search domain used during translation.
  453. *
  454. * On exit:
  455. * - if name translation is deferred to another node/cluster/zone,
  456. * leaves 'destnode' unchanged (will be non-zero) and returns 0
  457. * - if name translation is attempted and succeeds, sets 'destnode'
  458. * to publishing node and returns port reference (will be non-zero)
  459. * - if name translation is attempted and fails, sets 'destnode' to 0
  460. * and returns 0
  461. */
  462. u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance,
  463. u32 *destnode)
  464. {
  465. struct tipc_net *tn = net_generic(net, tipc_net_id);
  466. struct sub_seq *sseq;
  467. struct name_info *info;
  468. struct publication *publ;
  469. struct name_seq *seq;
  470. u32 ref = 0;
  471. u32 node = 0;
  472. if (!tipc_in_scope(*destnode, tn->own_addr))
  473. return 0;
  474. rcu_read_lock();
  475. seq = nametbl_find_seq(net, type);
  476. if (unlikely(!seq))
  477. goto not_found;
  478. spin_lock_bh(&seq->lock);
  479. sseq = nameseq_find_subseq(seq, instance);
  480. if (unlikely(!sseq))
  481. goto no_match;
  482. info = sseq->info;
  483. /* Closest-First Algorithm */
  484. if (likely(!*destnode)) {
  485. if (!list_empty(&info->node_list)) {
  486. publ = list_first_entry(&info->node_list,
  487. struct publication,
  488. node_list);
  489. list_move_tail(&publ->node_list,
  490. &info->node_list);
  491. } else if (!list_empty(&info->cluster_list)) {
  492. publ = list_first_entry(&info->cluster_list,
  493. struct publication,
  494. cluster_list);
  495. list_move_tail(&publ->cluster_list,
  496. &info->cluster_list);
  497. } else {
  498. publ = list_first_entry(&info->zone_list,
  499. struct publication,
  500. zone_list);
  501. list_move_tail(&publ->zone_list,
  502. &info->zone_list);
  503. }
  504. }
  505. /* Round-Robin Algorithm */
  506. else if (*destnode == tn->own_addr) {
  507. if (list_empty(&info->node_list))
  508. goto no_match;
  509. publ = list_first_entry(&info->node_list, struct publication,
  510. node_list);
  511. list_move_tail(&publ->node_list, &info->node_list);
  512. } else if (in_own_cluster_exact(net, *destnode)) {
  513. if (list_empty(&info->cluster_list))
  514. goto no_match;
  515. publ = list_first_entry(&info->cluster_list, struct publication,
  516. cluster_list);
  517. list_move_tail(&publ->cluster_list, &info->cluster_list);
  518. } else {
  519. publ = list_first_entry(&info->zone_list, struct publication,
  520. zone_list);
  521. list_move_tail(&publ->zone_list, &info->zone_list);
  522. }
  523. ref = publ->ref;
  524. node = publ->node;
  525. no_match:
  526. spin_unlock_bh(&seq->lock);
  527. not_found:
  528. rcu_read_unlock();
  529. *destnode = node;
  530. return ref;
  531. }
  532. /**
  533. * tipc_nametbl_mc_translate - find multicast destinations
  534. *
  535. * Creates list of all local ports that overlap the given multicast address;
  536. * also determines if any off-node ports overlap.
  537. *
  538. * Note: Publications with a scope narrower than 'limit' are ignored.
  539. * (i.e. local node-scope publications mustn't receive messages arriving
  540. * from another node, even if the multcast link brought it here)
  541. *
  542. * Returns non-zero if any off-node ports overlap
  543. */
  544. int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
  545. u32 limit, struct tipc_plist *dports)
  546. {
  547. struct name_seq *seq;
  548. struct sub_seq *sseq;
  549. struct sub_seq *sseq_stop;
  550. struct name_info *info;
  551. int res = 0;
  552. rcu_read_lock();
  553. seq = nametbl_find_seq(net, type);
  554. if (!seq)
  555. goto exit;
  556. spin_lock_bh(&seq->lock);
  557. sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
  558. sseq_stop = seq->sseqs + seq->first_free;
  559. for (; sseq != sseq_stop; sseq++) {
  560. struct publication *publ;
  561. if (sseq->lower > upper)
  562. break;
  563. info = sseq->info;
  564. list_for_each_entry(publ, &info->node_list, node_list) {
  565. if (publ->scope <= limit)
  566. tipc_plist_push(dports, publ->ref);
  567. }
  568. if (info->cluster_list_size != info->node_list_size)
  569. res = 1;
  570. }
  571. spin_unlock_bh(&seq->lock);
  572. exit:
  573. rcu_read_unlock();
  574. return res;
  575. }
  576. /*
  577. * tipc_nametbl_publish - add name publication to network name tables
  578. */
  579. struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
  580. u32 upper, u32 scope, u32 port_ref,
  581. u32 key)
  582. {
  583. struct publication *publ;
  584. struct sk_buff *buf = NULL;
  585. struct tipc_net *tn = net_generic(net, tipc_net_id);
  586. spin_lock_bh(&tn->nametbl_lock);
  587. if (tn->nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
  588. pr_warn("Publication failed, local publication limit reached (%u)\n",
  589. TIPC_MAX_PUBLICATIONS);
  590. spin_unlock_bh(&tn->nametbl_lock);
  591. return NULL;
  592. }
  593. publ = tipc_nametbl_insert_publ(net, type, lower, upper, scope,
  594. tn->own_addr, port_ref, key);
  595. if (likely(publ)) {
  596. tn->nametbl->local_publ_count++;
  597. buf = tipc_named_publish(net, publ);
  598. /* Any pending external events? */
  599. tipc_named_process_backlog(net);
  600. }
  601. spin_unlock_bh(&tn->nametbl_lock);
  602. if (buf)
  603. tipc_node_broadcast(net, buf);
  604. return publ;
  605. }
  606. /**
  607. * tipc_nametbl_withdraw - withdraw name publication from network name tables
  608. */
  609. int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
  610. u32 key)
  611. {
  612. struct publication *publ;
  613. struct sk_buff *skb = NULL;
  614. struct tipc_net *tn = net_generic(net, tipc_net_id);
  615. spin_lock_bh(&tn->nametbl_lock);
  616. publ = tipc_nametbl_remove_publ(net, type, lower, tn->own_addr,
  617. ref, key);
  618. if (likely(publ)) {
  619. tn->nametbl->local_publ_count--;
  620. skb = tipc_named_withdraw(net, publ);
  621. /* Any pending external events? */
  622. tipc_named_process_backlog(net);
  623. list_del_init(&publ->pport_list);
  624. kfree_rcu(publ, rcu);
  625. } else {
  626. pr_err("Unable to remove local publication\n"
  627. "(type=%u, lower=%u, ref=%u, key=%u)\n",
  628. type, lower, ref, key);
  629. }
  630. spin_unlock_bh(&tn->nametbl_lock);
  631. if (skb) {
  632. tipc_node_broadcast(net, skb);
  633. return 1;
  634. }
  635. return 0;
  636. }
  637. /**
  638. * tipc_nametbl_subscribe - add a subscription object to the name table
  639. */
  640. void tipc_nametbl_subscribe(struct tipc_subscription *s)
  641. {
  642. struct tipc_net *tn = net_generic(s->net, tipc_net_id);
  643. u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap);
  644. int index = hash(type);
  645. struct name_seq *seq;
  646. struct tipc_name_seq ns;
  647. spin_lock_bh(&tn->nametbl_lock);
  648. seq = nametbl_find_seq(s->net, type);
  649. if (!seq)
  650. seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
  651. if (seq) {
  652. spin_lock_bh(&seq->lock);
  653. tipc_nameseq_subscribe(seq, s);
  654. spin_unlock_bh(&seq->lock);
  655. } else {
  656. tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
  657. pr_warn("Failed to create subscription for {%u,%u,%u}\n",
  658. ns.type, ns.lower, ns.upper);
  659. }
  660. spin_unlock_bh(&tn->nametbl_lock);
  661. }
  662. /**
  663. * tipc_nametbl_unsubscribe - remove a subscription object from name table
  664. */
  665. void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
  666. {
  667. struct tipc_net *tn = net_generic(s->net, tipc_net_id);
  668. struct name_seq *seq;
  669. u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap);
  670. spin_lock_bh(&tn->nametbl_lock);
  671. seq = nametbl_find_seq(s->net, type);
  672. if (seq != NULL) {
  673. spin_lock_bh(&seq->lock);
  674. list_del_init(&s->nameseq_list);
  675. if (!seq->first_free && list_empty(&seq->subscriptions)) {
  676. hlist_del_init_rcu(&seq->ns_list);
  677. kfree(seq->sseqs);
  678. spin_unlock_bh(&seq->lock);
  679. kfree_rcu(seq, rcu);
  680. } else {
  681. spin_unlock_bh(&seq->lock);
  682. }
  683. }
  684. spin_unlock_bh(&tn->nametbl_lock);
  685. }
  686. int tipc_nametbl_init(struct net *net)
  687. {
  688. struct tipc_net *tn = net_generic(net, tipc_net_id);
  689. struct name_table *tipc_nametbl;
  690. int i;
  691. tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC);
  692. if (!tipc_nametbl)
  693. return -ENOMEM;
  694. for (i = 0; i < TIPC_NAMETBL_SIZE; i++)
  695. INIT_HLIST_HEAD(&tipc_nametbl->seq_hlist[i]);
  696. INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
  697. INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
  698. INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]);
  699. tn->nametbl = tipc_nametbl;
  700. spin_lock_init(&tn->nametbl_lock);
  701. return 0;
  702. }
  703. /**
  704. * tipc_purge_publications - remove all publications for a given type
  705. *
  706. * tipc_nametbl_lock must be held when calling this function
  707. */
  708. static void tipc_purge_publications(struct net *net, struct name_seq *seq)
  709. {
  710. struct publication *publ, *safe;
  711. struct sub_seq *sseq;
  712. struct name_info *info;
  713. spin_lock_bh(&seq->lock);
  714. sseq = seq->sseqs;
  715. info = sseq->info;
  716. list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
  717. tipc_nameseq_remove_publ(net, seq, publ->lower, publ->node,
  718. publ->ref, publ->key);
  719. kfree_rcu(publ, rcu);
  720. }
  721. hlist_del_init_rcu(&seq->ns_list);
  722. kfree(seq->sseqs);
  723. spin_unlock_bh(&seq->lock);
  724. kfree_rcu(seq, rcu);
  725. }
  726. void tipc_nametbl_stop(struct net *net)
  727. {
  728. u32 i;
  729. struct name_seq *seq;
  730. struct hlist_head *seq_head;
  731. struct tipc_net *tn = net_generic(net, tipc_net_id);
  732. struct name_table *tipc_nametbl = tn->nametbl;
  733. /* Verify name table is empty and purge any lingering
  734. * publications, then release the name table
  735. */
  736. spin_lock_bh(&tn->nametbl_lock);
  737. for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
  738. if (hlist_empty(&tipc_nametbl->seq_hlist[i]))
  739. continue;
  740. seq_head = &tipc_nametbl->seq_hlist[i];
  741. hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
  742. tipc_purge_publications(net, seq);
  743. }
  744. }
  745. spin_unlock_bh(&tn->nametbl_lock);
  746. synchronize_net();
  747. kfree(tipc_nametbl);
  748. }
  749. static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
  750. struct name_seq *seq,
  751. struct sub_seq *sseq, u32 *last_publ)
  752. {
  753. void *hdr;
  754. struct nlattr *attrs;
  755. struct nlattr *publ;
  756. struct publication *p;
  757. if (*last_publ) {
  758. list_for_each_entry(p, &sseq->info->zone_list, zone_list)
  759. if (p->key == *last_publ)
  760. break;
  761. if (p->key != *last_publ)
  762. return -EPIPE;
  763. } else {
  764. p = list_first_entry(&sseq->info->zone_list, struct publication,
  765. zone_list);
  766. }
  767. list_for_each_entry_from(p, &sseq->info->zone_list, zone_list) {
  768. *last_publ = p->key;
  769. hdr = genlmsg_put(msg->skb, msg->portid, msg->seq,
  770. &tipc_genl_family, NLM_F_MULTI,
  771. TIPC_NL_NAME_TABLE_GET);
  772. if (!hdr)
  773. return -EMSGSIZE;
  774. attrs = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE);
  775. if (!attrs)
  776. goto msg_full;
  777. publ = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE_PUBL);
  778. if (!publ)
  779. goto attr_msg_full;
  780. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, seq->type))
  781. goto publ_msg_full;
  782. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sseq->lower))
  783. goto publ_msg_full;
  784. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sseq->upper))
  785. goto publ_msg_full;
  786. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope))
  787. goto publ_msg_full;
  788. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node))
  789. goto publ_msg_full;
  790. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->ref))
  791. goto publ_msg_full;
  792. if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key))
  793. goto publ_msg_full;
  794. nla_nest_end(msg->skb, publ);
  795. nla_nest_end(msg->skb, attrs);
  796. genlmsg_end(msg->skb, hdr);
  797. }
  798. *last_publ = 0;
  799. return 0;
  800. publ_msg_full:
  801. nla_nest_cancel(msg->skb, publ);
  802. attr_msg_full:
  803. nla_nest_cancel(msg->skb, attrs);
  804. msg_full:
  805. genlmsg_cancel(msg->skb, hdr);
  806. return -EMSGSIZE;
  807. }
  808. static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq,
  809. u32 *last_lower, u32 *last_publ)
  810. {
  811. struct sub_seq *sseq;
  812. struct sub_seq *sseq_start;
  813. int err;
  814. if (*last_lower) {
  815. sseq_start = nameseq_find_subseq(seq, *last_lower);
  816. if (!sseq_start)
  817. return -EPIPE;
  818. } else {
  819. sseq_start = seq->sseqs;
  820. }
  821. for (sseq = sseq_start; sseq != &seq->sseqs[seq->first_free]; sseq++) {
  822. err = __tipc_nl_add_nametable_publ(msg, seq, sseq, last_publ);
  823. if (err) {
  824. *last_lower = sseq->lower;
  825. return err;
  826. }
  827. }
  828. *last_lower = 0;
  829. return 0;
  830. }
  831. static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg,
  832. u32 *last_type, u32 *last_lower, u32 *last_publ)
  833. {
  834. struct tipc_net *tn = net_generic(net, tipc_net_id);
  835. struct hlist_head *seq_head;
  836. struct name_seq *seq = NULL;
  837. int err;
  838. int i;
  839. if (*last_type)
  840. i = hash(*last_type);
  841. else
  842. i = 0;
  843. for (; i < TIPC_NAMETBL_SIZE; i++) {
  844. seq_head = &tn->nametbl->seq_hlist[i];
  845. if (*last_type) {
  846. seq = nametbl_find_seq(net, *last_type);
  847. if (!seq)
  848. return -EPIPE;
  849. } else {
  850. hlist_for_each_entry_rcu(seq, seq_head, ns_list)
  851. break;
  852. if (!seq)
  853. continue;
  854. }
  855. hlist_for_each_entry_from_rcu(seq, ns_list) {
  856. spin_lock_bh(&seq->lock);
  857. err = __tipc_nl_subseq_list(msg, seq, last_lower,
  858. last_publ);
  859. if (err) {
  860. *last_type = seq->type;
  861. spin_unlock_bh(&seq->lock);
  862. return err;
  863. }
  864. spin_unlock_bh(&seq->lock);
  865. }
  866. *last_type = 0;
  867. }
  868. return 0;
  869. }
  870. int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
  871. {
  872. int err;
  873. int done = cb->args[3];
  874. u32 last_type = cb->args[0];
  875. u32 last_lower = cb->args[1];
  876. u32 last_publ = cb->args[2];
  877. struct net *net = sock_net(skb->sk);
  878. struct tipc_nl_msg msg;
  879. if (done)
  880. return 0;
  881. msg.skb = skb;
  882. msg.portid = NETLINK_CB(cb->skb).portid;
  883. msg.seq = cb->nlh->nlmsg_seq;
  884. rcu_read_lock();
  885. err = tipc_nl_seq_list(net, &msg, &last_type, &last_lower, &last_publ);
  886. if (!err) {
  887. done = 1;
  888. } else if (err != -EMSGSIZE) {
  889. /* We never set seq or call nl_dump_check_consistent() this
  890. * means that setting prev_seq here will cause the consistence
  891. * check to fail in the netlink callback handler. Resulting in
  892. * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if
  893. * we got an error.
  894. */
  895. cb->prev_seq = 1;
  896. }
  897. rcu_read_unlock();
  898. cb->args[0] = last_type;
  899. cb->args[1] = last_lower;
  900. cb->args[2] = last_publ;
  901. cb->args[3] = done;
  902. return skb->len;
  903. }
  904. void tipc_plist_push(struct tipc_plist *pl, u32 port)
  905. {
  906. struct tipc_plist *nl;
  907. if (likely(!pl->port)) {
  908. pl->port = port;
  909. return;
  910. }
  911. if (pl->port == port)
  912. return;
  913. list_for_each_entry(nl, &pl->list, list) {
  914. if (nl->port == port)
  915. return;
  916. }
  917. nl = kmalloc(sizeof(*nl), GFP_ATOMIC);
  918. if (nl) {
  919. nl->port = port;
  920. list_add(&nl->list, &pl->list);
  921. }
  922. }
  923. u32 tipc_plist_pop(struct tipc_plist *pl)
  924. {
  925. struct tipc_plist *nl;
  926. u32 port = 0;
  927. if (likely(list_empty(&pl->list))) {
  928. port = pl->port;
  929. pl->port = 0;
  930. return port;
  931. }
  932. nl = list_first_entry(&pl->list, typeof(*nl), list);
  933. port = nl->port;
  934. list_del(&nl->list);
  935. kfree(nl);
  936. return port;
  937. }