ip6_flowlabel.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. /*
  2. * ip6_flowlabel.c IPv6 flowlabel manager.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. */
  11. #include <linux/capability.h>
  12. #include <linux/errno.h>
  13. #include <linux/types.h>
  14. #include <linux/socket.h>
  15. #include <linux/net.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/in6.h>
  18. #include <linux/proc_fs.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/slab.h>
  21. #include <linux/export.h>
  22. #include <linux/pid_namespace.h>
  23. #include <net/net_namespace.h>
  24. #include <net/sock.h>
  25. #include <net/ipv6.h>
  26. #include <net/rawv6.h>
  27. #include <net/transp_v6.h>
  28. #include <linux/uaccess.h>
  29. #define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
  30. in old IPv6 RFC. Well, it was reasonable value.
  31. */
  32. #define FL_MAX_LINGER 150 /* Maximal linger timeout */
  33. /* FL hash table */
  34. #define FL_MAX_PER_SOCK 32
  35. #define FL_MAX_SIZE 4096
  36. #define FL_HASH_MASK 255
  37. #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
  38. static atomic_t fl_size = ATOMIC_INIT(0);
  39. static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
  40. static void ip6_fl_gc(struct timer_list *unused);
  41. static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc);
  42. /* FL hash table lock: it protects only of GC */
  43. static DEFINE_SPINLOCK(ip6_fl_lock);
  44. /* Big socket sock */
  45. static DEFINE_SPINLOCK(ip6_sk_fl_lock);
  46. #define for_each_fl_rcu(hash, fl) \
  47. for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
  48. fl != NULL; \
  49. fl = rcu_dereference_bh(fl->next))
  50. #define for_each_fl_continue_rcu(fl) \
  51. for (fl = rcu_dereference_bh(fl->next); \
  52. fl != NULL; \
  53. fl = rcu_dereference_bh(fl->next))
  54. #define for_each_sk_fl_rcu(np, sfl) \
  55. for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \
  56. sfl != NULL; \
  57. sfl = rcu_dereference_bh(sfl->next))
  58. static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
  59. {
  60. struct ip6_flowlabel *fl;
  61. for_each_fl_rcu(FL_HASH(label), fl) {
  62. if (fl->label == label && net_eq(fl->fl_net, net))
  63. return fl;
  64. }
  65. return NULL;
  66. }
  67. static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
  68. {
  69. struct ip6_flowlabel *fl;
  70. rcu_read_lock_bh();
  71. fl = __fl_lookup(net, label);
  72. if (fl && !atomic_inc_not_zero(&fl->users))
  73. fl = NULL;
  74. rcu_read_unlock_bh();
  75. return fl;
  76. }
  77. static void fl_free_rcu(struct rcu_head *head)
  78. {
  79. struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
  80. if (fl->share == IPV6_FL_S_PROCESS)
  81. put_pid(fl->owner.pid);
  82. kfree(fl->opt);
  83. kfree(fl);
  84. }
  85. static void fl_free(struct ip6_flowlabel *fl)
  86. {
  87. if (fl)
  88. call_rcu(&fl->rcu, fl_free_rcu);
  89. }
  90. static void fl_release(struct ip6_flowlabel *fl)
  91. {
  92. spin_lock_bh(&ip6_fl_lock);
  93. fl->lastuse = jiffies;
  94. if (atomic_dec_and_test(&fl->users)) {
  95. unsigned long ttd = fl->lastuse + fl->linger;
  96. if (time_after(ttd, fl->expires))
  97. fl->expires = ttd;
  98. ttd = fl->expires;
  99. if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
  100. struct ipv6_txoptions *opt = fl->opt;
  101. fl->opt = NULL;
  102. kfree(opt);
  103. }
  104. if (!timer_pending(&ip6_fl_gc_timer) ||
  105. time_after(ip6_fl_gc_timer.expires, ttd))
  106. mod_timer(&ip6_fl_gc_timer, ttd);
  107. }
  108. spin_unlock_bh(&ip6_fl_lock);
  109. }
  110. static void ip6_fl_gc(struct timer_list *unused)
  111. {
  112. int i;
  113. unsigned long now = jiffies;
  114. unsigned long sched = 0;
  115. spin_lock(&ip6_fl_lock);
  116. for (i = 0; i <= FL_HASH_MASK; i++) {
  117. struct ip6_flowlabel *fl;
  118. struct ip6_flowlabel __rcu **flp;
  119. flp = &fl_ht[i];
  120. while ((fl = rcu_dereference_protected(*flp,
  121. lockdep_is_held(&ip6_fl_lock))) != NULL) {
  122. if (atomic_read(&fl->users) == 0) {
  123. unsigned long ttd = fl->lastuse + fl->linger;
  124. if (time_after(ttd, fl->expires))
  125. fl->expires = ttd;
  126. ttd = fl->expires;
  127. if (time_after_eq(now, ttd)) {
  128. *flp = fl->next;
  129. fl_free(fl);
  130. atomic_dec(&fl_size);
  131. continue;
  132. }
  133. if (!sched || time_before(ttd, sched))
  134. sched = ttd;
  135. }
  136. flp = &fl->next;
  137. }
  138. }
  139. if (!sched && atomic_read(&fl_size))
  140. sched = now + FL_MAX_LINGER;
  141. if (sched) {
  142. mod_timer(&ip6_fl_gc_timer, sched);
  143. }
  144. spin_unlock(&ip6_fl_lock);
  145. }
  146. static void __net_exit ip6_fl_purge(struct net *net)
  147. {
  148. int i;
  149. spin_lock_bh(&ip6_fl_lock);
  150. for (i = 0; i <= FL_HASH_MASK; i++) {
  151. struct ip6_flowlabel *fl;
  152. struct ip6_flowlabel __rcu **flp;
  153. flp = &fl_ht[i];
  154. while ((fl = rcu_dereference_protected(*flp,
  155. lockdep_is_held(&ip6_fl_lock))) != NULL) {
  156. if (net_eq(fl->fl_net, net) &&
  157. atomic_read(&fl->users) == 0) {
  158. *flp = fl->next;
  159. fl_free(fl);
  160. atomic_dec(&fl_size);
  161. continue;
  162. }
  163. flp = &fl->next;
  164. }
  165. }
  166. spin_unlock_bh(&ip6_fl_lock);
  167. }
  168. static struct ip6_flowlabel *fl_intern(struct net *net,
  169. struct ip6_flowlabel *fl, __be32 label)
  170. {
  171. struct ip6_flowlabel *lfl;
  172. fl->label = label & IPV6_FLOWLABEL_MASK;
  173. spin_lock_bh(&ip6_fl_lock);
  174. if (label == 0) {
  175. for (;;) {
  176. fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
  177. if (fl->label) {
  178. lfl = __fl_lookup(net, fl->label);
  179. if (!lfl)
  180. break;
  181. }
  182. }
  183. } else {
  184. /*
  185. * we dropper the ip6_fl_lock, so this entry could reappear
  186. * and we need to recheck with it.
  187. *
  188. * OTOH no need to search the active socket first, like it is
  189. * done in ipv6_flowlabel_opt - sock is locked, so new entry
  190. * with the same label can only appear on another sock
  191. */
  192. lfl = __fl_lookup(net, fl->label);
  193. if (lfl) {
  194. atomic_inc(&lfl->users);
  195. spin_unlock_bh(&ip6_fl_lock);
  196. return lfl;
  197. }
  198. }
  199. fl->lastuse = jiffies;
  200. fl->next = fl_ht[FL_HASH(fl->label)];
  201. rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
  202. atomic_inc(&fl_size);
  203. spin_unlock_bh(&ip6_fl_lock);
  204. return NULL;
  205. }
  206. /* Socket flowlabel lists */
  207. struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
  208. {
  209. struct ipv6_fl_socklist *sfl;
  210. struct ipv6_pinfo *np = inet6_sk(sk);
  211. label &= IPV6_FLOWLABEL_MASK;
  212. rcu_read_lock_bh();
  213. for_each_sk_fl_rcu(np, sfl) {
  214. struct ip6_flowlabel *fl = sfl->fl;
  215. if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
  216. fl->lastuse = jiffies;
  217. rcu_read_unlock_bh();
  218. return fl;
  219. }
  220. }
  221. rcu_read_unlock_bh();
  222. return NULL;
  223. }
  224. EXPORT_SYMBOL_GPL(fl6_sock_lookup);
  225. void fl6_free_socklist(struct sock *sk)
  226. {
  227. struct ipv6_pinfo *np = inet6_sk(sk);
  228. struct ipv6_fl_socklist *sfl;
  229. if (!rcu_access_pointer(np->ipv6_fl_list))
  230. return;
  231. spin_lock_bh(&ip6_sk_fl_lock);
  232. while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
  233. lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
  234. np->ipv6_fl_list = sfl->next;
  235. spin_unlock_bh(&ip6_sk_fl_lock);
  236. fl_release(sfl->fl);
  237. kfree_rcu(sfl, rcu);
  238. spin_lock_bh(&ip6_sk_fl_lock);
  239. }
  240. spin_unlock_bh(&ip6_sk_fl_lock);
  241. }
  242. /* Service routines */
  243. /*
  244. It is the only difficult place. flowlabel enforces equal headers
  245. before and including routing header, however user may supply options
  246. following rthdr.
  247. */
  248. struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
  249. struct ip6_flowlabel *fl,
  250. struct ipv6_txoptions *fopt)
  251. {
  252. struct ipv6_txoptions *fl_opt = fl->opt;
  253. if (!fopt || fopt->opt_flen == 0)
  254. return fl_opt;
  255. if (fl_opt) {
  256. opt_space->hopopt = fl_opt->hopopt;
  257. opt_space->dst0opt = fl_opt->dst0opt;
  258. opt_space->srcrt = fl_opt->srcrt;
  259. opt_space->opt_nflen = fl_opt->opt_nflen;
  260. } else {
  261. if (fopt->opt_nflen == 0)
  262. return fopt;
  263. opt_space->hopopt = NULL;
  264. opt_space->dst0opt = NULL;
  265. opt_space->srcrt = NULL;
  266. opt_space->opt_nflen = 0;
  267. }
  268. opt_space->dst1opt = fopt->dst1opt;
  269. opt_space->opt_flen = fopt->opt_flen;
  270. opt_space->tot_len = fopt->tot_len;
  271. return opt_space;
  272. }
  273. EXPORT_SYMBOL_GPL(fl6_merge_options);
  274. static unsigned long check_linger(unsigned long ttl)
  275. {
  276. if (ttl < FL_MIN_LINGER)
  277. return FL_MIN_LINGER*HZ;
  278. if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
  279. return 0;
  280. return ttl*HZ;
  281. }
  282. static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
  283. {
  284. linger = check_linger(linger);
  285. if (!linger)
  286. return -EPERM;
  287. expires = check_linger(expires);
  288. if (!expires)
  289. return -EPERM;
  290. spin_lock_bh(&ip6_fl_lock);
  291. fl->lastuse = jiffies;
  292. if (time_before(fl->linger, linger))
  293. fl->linger = linger;
  294. if (time_before(expires, fl->linger))
  295. expires = fl->linger;
  296. if (time_before(fl->expires, fl->lastuse + expires))
  297. fl->expires = fl->lastuse + expires;
  298. spin_unlock_bh(&ip6_fl_lock);
  299. return 0;
  300. }
  301. static struct ip6_flowlabel *
  302. fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
  303. char __user *optval, int optlen, int *err_p)
  304. {
  305. struct ip6_flowlabel *fl = NULL;
  306. int olen;
  307. int addr_type;
  308. int err;
  309. olen = optlen - CMSG_ALIGN(sizeof(*freq));
  310. err = -EINVAL;
  311. if (olen > 64 * 1024)
  312. goto done;
  313. err = -ENOMEM;
  314. fl = kzalloc(sizeof(*fl), GFP_KERNEL);
  315. if (!fl)
  316. goto done;
  317. if (olen > 0) {
  318. struct msghdr msg;
  319. struct flowi6 flowi6;
  320. struct ipcm6_cookie ipc6;
  321. err = -ENOMEM;
  322. fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
  323. if (!fl->opt)
  324. goto done;
  325. memset(fl->opt, 0, sizeof(*fl->opt));
  326. fl->opt->tot_len = sizeof(*fl->opt) + olen;
  327. err = -EFAULT;
  328. if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
  329. goto done;
  330. msg.msg_controllen = olen;
  331. msg.msg_control = (void *)(fl->opt+1);
  332. memset(&flowi6, 0, sizeof(flowi6));
  333. ipc6.opt = fl->opt;
  334. err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6);
  335. if (err)
  336. goto done;
  337. err = -EINVAL;
  338. if (fl->opt->opt_flen)
  339. goto done;
  340. if (fl->opt->opt_nflen == 0) {
  341. kfree(fl->opt);
  342. fl->opt = NULL;
  343. }
  344. }
  345. fl->fl_net = net;
  346. fl->expires = jiffies;
  347. err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
  348. if (err)
  349. goto done;
  350. fl->share = freq->flr_share;
  351. addr_type = ipv6_addr_type(&freq->flr_dst);
  352. if ((addr_type & IPV6_ADDR_MAPPED) ||
  353. addr_type == IPV6_ADDR_ANY) {
  354. err = -EINVAL;
  355. goto done;
  356. }
  357. fl->dst = freq->flr_dst;
  358. atomic_set(&fl->users, 1);
  359. switch (fl->share) {
  360. case IPV6_FL_S_EXCL:
  361. case IPV6_FL_S_ANY:
  362. break;
  363. case IPV6_FL_S_PROCESS:
  364. fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
  365. break;
  366. case IPV6_FL_S_USER:
  367. fl->owner.uid = current_euid();
  368. break;
  369. default:
  370. err = -EINVAL;
  371. goto done;
  372. }
  373. return fl;
  374. done:
  375. fl_free(fl);
  376. *err_p = err;
  377. return NULL;
  378. }
  379. static int mem_check(struct sock *sk)
  380. {
  381. struct ipv6_pinfo *np = inet6_sk(sk);
  382. struct ipv6_fl_socklist *sfl;
  383. int room = FL_MAX_SIZE - atomic_read(&fl_size);
  384. int count = 0;
  385. if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
  386. return 0;
  387. rcu_read_lock_bh();
  388. for_each_sk_fl_rcu(np, sfl)
  389. count++;
  390. rcu_read_unlock_bh();
  391. if (room <= 0 ||
  392. ((count >= FL_MAX_PER_SOCK ||
  393. (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
  394. !capable(CAP_NET_ADMIN)))
  395. return -ENOBUFS;
  396. return 0;
  397. }
  398. static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
  399. struct ip6_flowlabel *fl)
  400. {
  401. spin_lock_bh(&ip6_sk_fl_lock);
  402. sfl->fl = fl;
  403. sfl->next = np->ipv6_fl_list;
  404. rcu_assign_pointer(np->ipv6_fl_list, sfl);
  405. spin_unlock_bh(&ip6_sk_fl_lock);
  406. }
  407. int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
  408. int flags)
  409. {
  410. struct ipv6_pinfo *np = inet6_sk(sk);
  411. struct ipv6_fl_socklist *sfl;
  412. if (flags & IPV6_FL_F_REMOTE) {
  413. freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
  414. return 0;
  415. }
  416. if (np->repflow) {
  417. freq->flr_label = np->flow_label;
  418. return 0;
  419. }
  420. rcu_read_lock_bh();
  421. for_each_sk_fl_rcu(np, sfl) {
  422. if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
  423. spin_lock_bh(&ip6_fl_lock);
  424. freq->flr_label = sfl->fl->label;
  425. freq->flr_dst = sfl->fl->dst;
  426. freq->flr_share = sfl->fl->share;
  427. freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
  428. freq->flr_linger = sfl->fl->linger / HZ;
  429. spin_unlock_bh(&ip6_fl_lock);
  430. rcu_read_unlock_bh();
  431. return 0;
  432. }
  433. }
  434. rcu_read_unlock_bh();
  435. return -ENOENT;
  436. }
  437. int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
  438. {
  439. int uninitialized_var(err);
  440. struct net *net = sock_net(sk);
  441. struct ipv6_pinfo *np = inet6_sk(sk);
  442. struct in6_flowlabel_req freq;
  443. struct ipv6_fl_socklist *sfl1 = NULL;
  444. struct ipv6_fl_socklist *sfl;
  445. struct ipv6_fl_socklist __rcu **sflp;
  446. struct ip6_flowlabel *fl, *fl1 = NULL;
  447. if (optlen < sizeof(freq))
  448. return -EINVAL;
  449. if (copy_from_user(&freq, optval, sizeof(freq)))
  450. return -EFAULT;
  451. switch (freq.flr_action) {
  452. case IPV6_FL_A_PUT:
  453. if (freq.flr_flags & IPV6_FL_F_REFLECT) {
  454. if (sk->sk_protocol != IPPROTO_TCP)
  455. return -ENOPROTOOPT;
  456. if (!np->repflow)
  457. return -ESRCH;
  458. np->flow_label = 0;
  459. np->repflow = 0;
  460. return 0;
  461. }
  462. spin_lock_bh(&ip6_sk_fl_lock);
  463. for (sflp = &np->ipv6_fl_list;
  464. (sfl = rcu_dereference_protected(*sflp,
  465. lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
  466. sflp = &sfl->next) {
  467. if (sfl->fl->label == freq.flr_label) {
  468. if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
  469. np->flow_label &= ~IPV6_FLOWLABEL_MASK;
  470. *sflp = sfl->next;
  471. spin_unlock_bh(&ip6_sk_fl_lock);
  472. fl_release(sfl->fl);
  473. kfree_rcu(sfl, rcu);
  474. return 0;
  475. }
  476. }
  477. spin_unlock_bh(&ip6_sk_fl_lock);
  478. return -ESRCH;
  479. case IPV6_FL_A_RENEW:
  480. rcu_read_lock_bh();
  481. for_each_sk_fl_rcu(np, sfl) {
  482. if (sfl->fl->label == freq.flr_label) {
  483. err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
  484. rcu_read_unlock_bh();
  485. return err;
  486. }
  487. }
  488. rcu_read_unlock_bh();
  489. if (freq.flr_share == IPV6_FL_S_NONE &&
  490. ns_capable(net->user_ns, CAP_NET_ADMIN)) {
  491. fl = fl_lookup(net, freq.flr_label);
  492. if (fl) {
  493. err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
  494. fl_release(fl);
  495. return err;
  496. }
  497. }
  498. return -ESRCH;
  499. case IPV6_FL_A_GET:
  500. if (freq.flr_flags & IPV6_FL_F_REFLECT) {
  501. struct net *net = sock_net(sk);
  502. if (net->ipv6.sysctl.flowlabel_consistency) {
  503. net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
  504. return -EPERM;
  505. }
  506. if (sk->sk_protocol != IPPROTO_TCP)
  507. return -ENOPROTOOPT;
  508. np->repflow = 1;
  509. return 0;
  510. }
  511. if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
  512. return -EINVAL;
  513. if (net->ipv6.sysctl.flowlabel_state_ranges &&
  514. (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
  515. return -ERANGE;
  516. fl = fl_create(net, sk, &freq, optval, optlen, &err);
  517. if (!fl)
  518. return err;
  519. sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
  520. if (freq.flr_label) {
  521. err = -EEXIST;
  522. rcu_read_lock_bh();
  523. for_each_sk_fl_rcu(np, sfl) {
  524. if (sfl->fl->label == freq.flr_label) {
  525. if (freq.flr_flags&IPV6_FL_F_EXCL) {
  526. rcu_read_unlock_bh();
  527. goto done;
  528. }
  529. fl1 = sfl->fl;
  530. if (!atomic_inc_not_zero(&fl1->users))
  531. fl1 = NULL;
  532. break;
  533. }
  534. }
  535. rcu_read_unlock_bh();
  536. if (!fl1)
  537. fl1 = fl_lookup(net, freq.flr_label);
  538. if (fl1) {
  539. recheck:
  540. err = -EEXIST;
  541. if (freq.flr_flags&IPV6_FL_F_EXCL)
  542. goto release;
  543. err = -EPERM;
  544. if (fl1->share == IPV6_FL_S_EXCL ||
  545. fl1->share != fl->share ||
  546. ((fl1->share == IPV6_FL_S_PROCESS) &&
  547. (fl1->owner.pid != fl->owner.pid)) ||
  548. ((fl1->share == IPV6_FL_S_USER) &&
  549. !uid_eq(fl1->owner.uid, fl->owner.uid)))
  550. goto release;
  551. err = -ENOMEM;
  552. if (!sfl1)
  553. goto release;
  554. if (fl->linger > fl1->linger)
  555. fl1->linger = fl->linger;
  556. if ((long)(fl->expires - fl1->expires) > 0)
  557. fl1->expires = fl->expires;
  558. fl_link(np, sfl1, fl1);
  559. fl_free(fl);
  560. return 0;
  561. release:
  562. fl_release(fl1);
  563. goto done;
  564. }
  565. }
  566. err = -ENOENT;
  567. if (!(freq.flr_flags&IPV6_FL_F_CREATE))
  568. goto done;
  569. err = -ENOMEM;
  570. if (!sfl1)
  571. goto done;
  572. err = mem_check(sk);
  573. if (err != 0)
  574. goto done;
  575. fl1 = fl_intern(net, fl, freq.flr_label);
  576. if (fl1)
  577. goto recheck;
  578. if (!freq.flr_label) {
  579. if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
  580. &fl->label, sizeof(fl->label))) {
  581. /* Intentionally ignore fault. */
  582. }
  583. }
  584. fl_link(np, sfl1, fl);
  585. return 0;
  586. default:
  587. return -EINVAL;
  588. }
  589. done:
  590. fl_free(fl);
  591. kfree(sfl1);
  592. return err;
  593. }
  594. #ifdef CONFIG_PROC_FS
  595. struct ip6fl_iter_state {
  596. struct seq_net_private p;
  597. struct pid_namespace *pid_ns;
  598. int bucket;
  599. };
  600. #define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private)
  601. static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
  602. {
  603. struct ip6_flowlabel *fl = NULL;
  604. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  605. struct net *net = seq_file_net(seq);
  606. for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
  607. for_each_fl_rcu(state->bucket, fl) {
  608. if (net_eq(fl->fl_net, net))
  609. goto out;
  610. }
  611. }
  612. fl = NULL;
  613. out:
  614. return fl;
  615. }
  616. static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
  617. {
  618. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  619. struct net *net = seq_file_net(seq);
  620. for_each_fl_continue_rcu(fl) {
  621. if (net_eq(fl->fl_net, net))
  622. goto out;
  623. }
  624. try_again:
  625. if (++state->bucket <= FL_HASH_MASK) {
  626. for_each_fl_rcu(state->bucket, fl) {
  627. if (net_eq(fl->fl_net, net))
  628. goto out;
  629. }
  630. goto try_again;
  631. }
  632. fl = NULL;
  633. out:
  634. return fl;
  635. }
  636. static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
  637. {
  638. struct ip6_flowlabel *fl = ip6fl_get_first(seq);
  639. if (fl)
  640. while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
  641. --pos;
  642. return pos ? NULL : fl;
  643. }
  644. static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
  645. __acquires(RCU)
  646. {
  647. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  648. state->pid_ns = proc_pid_ns(file_inode(seq->file));
  649. rcu_read_lock_bh();
  650. return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  651. }
  652. static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  653. {
  654. struct ip6_flowlabel *fl;
  655. if (v == SEQ_START_TOKEN)
  656. fl = ip6fl_get_first(seq);
  657. else
  658. fl = ip6fl_get_next(seq, v);
  659. ++*pos;
  660. return fl;
  661. }
  662. static void ip6fl_seq_stop(struct seq_file *seq, void *v)
  663. __releases(RCU)
  664. {
  665. rcu_read_unlock_bh();
  666. }
  667. static int ip6fl_seq_show(struct seq_file *seq, void *v)
  668. {
  669. struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
  670. if (v == SEQ_START_TOKEN) {
  671. seq_puts(seq, "Label S Owner Users Linger Expires Dst Opt\n");
  672. } else {
  673. struct ip6_flowlabel *fl = v;
  674. seq_printf(seq,
  675. "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
  676. (unsigned int)ntohl(fl->label),
  677. fl->share,
  678. ((fl->share == IPV6_FL_S_PROCESS) ?
  679. pid_nr_ns(fl->owner.pid, state->pid_ns) :
  680. ((fl->share == IPV6_FL_S_USER) ?
  681. from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
  682. 0)),
  683. atomic_read(&fl->users),
  684. fl->linger/HZ,
  685. (long)(fl->expires - jiffies)/HZ,
  686. &fl->dst,
  687. fl->opt ? fl->opt->opt_nflen : 0);
  688. }
  689. return 0;
  690. }
  691. static const struct seq_operations ip6fl_seq_ops = {
  692. .start = ip6fl_seq_start,
  693. .next = ip6fl_seq_next,
  694. .stop = ip6fl_seq_stop,
  695. .show = ip6fl_seq_show,
  696. };
  697. static int __net_init ip6_flowlabel_proc_init(struct net *net)
  698. {
  699. if (!proc_create_net("ip6_flowlabel", 0444, net->proc_net,
  700. &ip6fl_seq_ops, sizeof(struct ip6fl_iter_state)))
  701. return -ENOMEM;
  702. return 0;
  703. }
  704. static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
  705. {
  706. remove_proc_entry("ip6_flowlabel", net->proc_net);
  707. }
  708. #else
  709. static inline int ip6_flowlabel_proc_init(struct net *net)
  710. {
  711. return 0;
  712. }
  713. static inline void ip6_flowlabel_proc_fini(struct net *net)
  714. {
  715. }
  716. #endif
  717. static void __net_exit ip6_flowlabel_net_exit(struct net *net)
  718. {
  719. ip6_fl_purge(net);
  720. ip6_flowlabel_proc_fini(net);
  721. }
  722. static struct pernet_operations ip6_flowlabel_net_ops = {
  723. .init = ip6_flowlabel_proc_init,
  724. .exit = ip6_flowlabel_net_exit,
  725. };
  726. int ip6_flowlabel_init(void)
  727. {
  728. return register_pernet_subsys(&ip6_flowlabel_net_ops);
  729. }
  730. void ip6_flowlabel_cleanup(void)
  731. {
  732. del_timer(&ip6_fl_gc_timer);
  733. unregister_pernet_subsys(&ip6_flowlabel_net_ops);
  734. }