socket.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*
  2. * File: socket.c
  3. *
  4. * Phonet sockets
  5. *
  6. * Copyright (C) 2008 Nokia Corporation.
  7. *
  8. * Authors: Sakari Ailus <sakari.ailus@nokia.com>
  9. * Rémi Denis-Courmont
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * version 2 as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  23. * 02110-1301 USA
  24. */
  25. #include <linux/gfp.h>
  26. #include <linux/kernel.h>
  27. #include <linux/net.h>
  28. #include <linux/poll.h>
  29. #include <net/sock.h>
  30. #include <net/tcp_states.h>
  31. #include <linux/phonet.h>
  32. #include <linux/export.h>
  33. #include <net/phonet/phonet.h>
  34. #include <net/phonet/pep.h>
  35. #include <net/phonet/pn_dev.h>
  36. static int pn_socket_release(struct socket *sock)
  37. {
  38. struct sock *sk = sock->sk;
  39. if (sk) {
  40. sock->sk = NULL;
  41. sk->sk_prot->close(sk, 0);
  42. }
  43. return 0;
  44. }
  45. #define PN_HASHSIZE 16
  46. #define PN_HASHMASK (PN_HASHSIZE-1)
  47. static struct {
  48. struct hlist_head hlist[PN_HASHSIZE];
  49. struct mutex lock;
  50. } pnsocks;
  51. void __init pn_sock_init(void)
  52. {
  53. unsigned int i;
  54. for (i = 0; i < PN_HASHSIZE; i++)
  55. INIT_HLIST_HEAD(pnsocks.hlist + i);
  56. mutex_init(&pnsocks.lock);
  57. }
  58. static struct hlist_head *pn_hash_list(u16 obj)
  59. {
  60. return pnsocks.hlist + (obj & PN_HASHMASK);
  61. }
  62. /*
  63. * Find address based on socket address, match only certain fields.
  64. * Also grab sock if it was found. Remember to sock_put it later.
  65. */
  66. struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
  67. {
  68. struct sock *sknode;
  69. struct sock *rval = NULL;
  70. u16 obj = pn_sockaddr_get_object(spn);
  71. u8 res = spn->spn_resource;
  72. struct hlist_head *hlist = pn_hash_list(obj);
  73. rcu_read_lock();
  74. sk_for_each_rcu(sknode, hlist) {
  75. struct pn_sock *pn = pn_sk(sknode);
  76. BUG_ON(!pn->sobject); /* unbound socket */
  77. if (!net_eq(sock_net(sknode), net))
  78. continue;
  79. if (pn_port(obj)) {
  80. /* Look up socket by port */
  81. if (pn_port(pn->sobject) != pn_port(obj))
  82. continue;
  83. } else {
  84. /* If port is zero, look up by resource */
  85. if (pn->resource != res)
  86. continue;
  87. }
  88. if (pn_addr(pn->sobject) &&
  89. pn_addr(pn->sobject) != pn_addr(obj))
  90. continue;
  91. rval = sknode;
  92. sock_hold(sknode);
  93. break;
  94. }
  95. rcu_read_unlock();
  96. return rval;
  97. }
  98. /* Deliver a broadcast packet (only in bottom-half) */
  99. void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
  100. {
  101. struct hlist_head *hlist = pnsocks.hlist;
  102. unsigned int h;
  103. rcu_read_lock();
  104. for (h = 0; h < PN_HASHSIZE; h++) {
  105. struct sock *sknode;
  106. sk_for_each(sknode, hlist) {
  107. struct sk_buff *clone;
  108. if (!net_eq(sock_net(sknode), net))
  109. continue;
  110. if (!sock_flag(sknode, SOCK_BROADCAST))
  111. continue;
  112. clone = skb_clone(skb, GFP_ATOMIC);
  113. if (clone) {
  114. sock_hold(sknode);
  115. sk_receive_skb(sknode, clone, 0);
  116. }
  117. }
  118. hlist++;
  119. }
  120. rcu_read_unlock();
  121. }
  122. int pn_sock_hash(struct sock *sk)
  123. {
  124. struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
  125. mutex_lock(&pnsocks.lock);
  126. sk_add_node_rcu(sk, hlist);
  127. mutex_unlock(&pnsocks.lock);
  128. return 0;
  129. }
  130. EXPORT_SYMBOL(pn_sock_hash);
  131. void pn_sock_unhash(struct sock *sk)
  132. {
  133. mutex_lock(&pnsocks.lock);
  134. sk_del_node_init_rcu(sk);
  135. mutex_unlock(&pnsocks.lock);
  136. pn_sock_unbind_all_res(sk);
  137. synchronize_rcu();
  138. }
  139. EXPORT_SYMBOL(pn_sock_unhash);
  140. static DEFINE_MUTEX(port_mutex);
  141. static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
  142. {
  143. struct sock *sk = sock->sk;
  144. struct pn_sock *pn = pn_sk(sk);
  145. struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
  146. int err;
  147. u16 handle;
  148. u8 saddr;
  149. if (sk->sk_prot->bind)
  150. return sk->sk_prot->bind(sk, addr, len);
  151. if (len < sizeof(struct sockaddr_pn))
  152. return -EINVAL;
  153. if (spn->spn_family != AF_PHONET)
  154. return -EAFNOSUPPORT;
  155. handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
  156. saddr = pn_addr(handle);
  157. if (saddr && phonet_address_lookup(sock_net(sk), saddr))
  158. return -EADDRNOTAVAIL;
  159. lock_sock(sk);
  160. if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
  161. err = -EINVAL; /* attempt to rebind */
  162. goto out;
  163. }
  164. WARN_ON(sk_hashed(sk));
  165. mutex_lock(&port_mutex);
  166. err = sk->sk_prot->get_port(sk, pn_port(handle));
  167. if (err)
  168. goto out_port;
  169. /* get_port() sets the port, bind() sets the address if applicable */
  170. pn->sobject = pn_object(saddr, pn_port(pn->sobject));
  171. pn->resource = spn->spn_resource;
  172. /* Enable RX on the socket */
  173. err = sk->sk_prot->hash(sk);
  174. out_port:
  175. mutex_unlock(&port_mutex);
  176. out:
  177. release_sock(sk);
  178. return err;
  179. }
  180. static int pn_socket_autobind(struct socket *sock)
  181. {
  182. struct sockaddr_pn sa;
  183. int err;
  184. memset(&sa, 0, sizeof(sa));
  185. sa.spn_family = AF_PHONET;
  186. err = pn_socket_bind(sock, (struct sockaddr *)&sa,
  187. sizeof(struct sockaddr_pn));
  188. if (err != -EINVAL)
  189. return err;
  190. BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
  191. return 0; /* socket was already bound */
  192. }
  193. static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
  194. int len, int flags)
  195. {
  196. struct sock *sk = sock->sk;
  197. struct pn_sock *pn = pn_sk(sk);
  198. struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
  199. struct task_struct *tsk = current;
  200. long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
  201. int err;
  202. if (pn_socket_autobind(sock))
  203. return -ENOBUFS;
  204. if (len < sizeof(struct sockaddr_pn))
  205. return -EINVAL;
  206. if (spn->spn_family != AF_PHONET)
  207. return -EAFNOSUPPORT;
  208. lock_sock(sk);
  209. switch (sock->state) {
  210. case SS_UNCONNECTED:
  211. if (sk->sk_state != TCP_CLOSE) {
  212. err = -EISCONN;
  213. goto out;
  214. }
  215. break;
  216. case SS_CONNECTING:
  217. err = -EALREADY;
  218. goto out;
  219. default:
  220. err = -EISCONN;
  221. goto out;
  222. }
  223. pn->dobject = pn_sockaddr_get_object(spn);
  224. pn->resource = pn_sockaddr_get_resource(spn);
  225. sock->state = SS_CONNECTING;
  226. err = sk->sk_prot->connect(sk, addr, len);
  227. if (err) {
  228. sock->state = SS_UNCONNECTED;
  229. pn->dobject = 0;
  230. goto out;
  231. }
  232. while (sk->sk_state == TCP_SYN_SENT) {
  233. DEFINE_WAIT(wait);
  234. if (!timeo) {
  235. err = -EINPROGRESS;
  236. goto out;
  237. }
  238. if (signal_pending(tsk)) {
  239. err = sock_intr_errno(timeo);
  240. goto out;
  241. }
  242. prepare_to_wait_exclusive(sk_sleep(sk), &wait,
  243. TASK_INTERRUPTIBLE);
  244. release_sock(sk);
  245. timeo = schedule_timeout(timeo);
  246. lock_sock(sk);
  247. finish_wait(sk_sleep(sk), &wait);
  248. }
  249. if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
  250. err = 0;
  251. else if (sk->sk_state == TCP_CLOSE_WAIT)
  252. err = -ECONNRESET;
  253. else
  254. err = -ECONNREFUSED;
  255. sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
  256. out:
  257. release_sock(sk);
  258. return err;
  259. }
  260. static int pn_socket_accept(struct socket *sock, struct socket *newsock,
  261. int flags)
  262. {
  263. struct sock *sk = sock->sk;
  264. struct sock *newsk;
  265. int err;
  266. if (unlikely(sk->sk_state != TCP_LISTEN))
  267. return -EINVAL;
  268. newsk = sk->sk_prot->accept(sk, flags, &err);
  269. if (!newsk)
  270. return err;
  271. lock_sock(newsk);
  272. sock_graft(newsk, newsock);
  273. newsock->state = SS_CONNECTED;
  274. release_sock(newsk);
  275. return 0;
  276. }
  277. static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
  278. int *sockaddr_len, int peer)
  279. {
  280. struct sock *sk = sock->sk;
  281. struct pn_sock *pn = pn_sk(sk);
  282. memset(addr, 0, sizeof(struct sockaddr_pn));
  283. addr->sa_family = AF_PHONET;
  284. if (!peer) /* Race with bind() here is userland's problem. */
  285. pn_sockaddr_set_object((struct sockaddr_pn *)addr,
  286. pn->sobject);
  287. *sockaddr_len = sizeof(struct sockaddr_pn);
  288. return 0;
  289. }
  290. static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
  291. poll_table *wait)
  292. {
  293. struct sock *sk = sock->sk;
  294. struct pep_sock *pn = pep_sk(sk);
  295. unsigned int mask = 0;
  296. poll_wait(file, sk_sleep(sk), wait);
  297. if (sk->sk_state == TCP_CLOSE)
  298. return POLLERR;
  299. if (!skb_queue_empty(&sk->sk_receive_queue))
  300. mask |= POLLIN | POLLRDNORM;
  301. if (!skb_queue_empty(&pn->ctrlreq_queue))
  302. mask |= POLLPRI;
  303. if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
  304. return POLLHUP;
  305. if (sk->sk_state == TCP_ESTABLISHED &&
  306. atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
  307. atomic_read(&pn->tx_credits))
  308. mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
  309. return mask;
  310. }
  311. static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
  312. unsigned long arg)
  313. {
  314. struct sock *sk = sock->sk;
  315. struct pn_sock *pn = pn_sk(sk);
  316. if (cmd == SIOCPNGETOBJECT) {
  317. struct net_device *dev;
  318. u16 handle;
  319. u8 saddr;
  320. if (get_user(handle, (__u16 __user *)arg))
  321. return -EFAULT;
  322. lock_sock(sk);
  323. if (sk->sk_bound_dev_if)
  324. dev = dev_get_by_index(sock_net(sk),
  325. sk->sk_bound_dev_if);
  326. else
  327. dev = phonet_device_get(sock_net(sk));
  328. if (dev && (dev->flags & IFF_UP))
  329. saddr = phonet_address_get(dev, pn_addr(handle));
  330. else
  331. saddr = PN_NO_ADDR;
  332. release_sock(sk);
  333. if (dev)
  334. dev_put(dev);
  335. if (saddr == PN_NO_ADDR)
  336. return -EHOSTUNREACH;
  337. handle = pn_object(saddr, pn_port(pn->sobject));
  338. return put_user(handle, (__u16 __user *)arg);
  339. }
  340. return sk->sk_prot->ioctl(sk, cmd, arg);
  341. }
  342. static int pn_socket_listen(struct socket *sock, int backlog)
  343. {
  344. struct sock *sk = sock->sk;
  345. int err = 0;
  346. if (pn_socket_autobind(sock))
  347. return -ENOBUFS;
  348. lock_sock(sk);
  349. if (sock->state != SS_UNCONNECTED) {
  350. err = -EINVAL;
  351. goto out;
  352. }
  353. if (sk->sk_state != TCP_LISTEN) {
  354. sk->sk_state = TCP_LISTEN;
  355. sk->sk_ack_backlog = 0;
  356. }
  357. sk->sk_max_ack_backlog = backlog;
  358. out:
  359. release_sock(sk);
  360. return err;
  361. }
  362. static int pn_socket_sendmsg(struct socket *sock, struct msghdr *m,
  363. size_t total_len)
  364. {
  365. struct sock *sk = sock->sk;
  366. if (pn_socket_autobind(sock))
  367. return -EAGAIN;
  368. return sk->sk_prot->sendmsg(sk, m, total_len);
  369. }
  370. const struct proto_ops phonet_dgram_ops = {
  371. .family = AF_PHONET,
  372. .owner = THIS_MODULE,
  373. .release = pn_socket_release,
  374. .bind = pn_socket_bind,
  375. .connect = sock_no_connect,
  376. .socketpair = sock_no_socketpair,
  377. .accept = sock_no_accept,
  378. .getname = pn_socket_getname,
  379. .poll = datagram_poll,
  380. .ioctl = pn_socket_ioctl,
  381. .listen = sock_no_listen,
  382. .shutdown = sock_no_shutdown,
  383. .setsockopt = sock_no_setsockopt,
  384. .getsockopt = sock_no_getsockopt,
  385. #ifdef CONFIG_COMPAT
  386. .compat_setsockopt = sock_no_setsockopt,
  387. .compat_getsockopt = sock_no_getsockopt,
  388. #endif
  389. .sendmsg = pn_socket_sendmsg,
  390. .recvmsg = sock_common_recvmsg,
  391. .mmap = sock_no_mmap,
  392. .sendpage = sock_no_sendpage,
  393. };
  394. const struct proto_ops phonet_stream_ops = {
  395. .family = AF_PHONET,
  396. .owner = THIS_MODULE,
  397. .release = pn_socket_release,
  398. .bind = pn_socket_bind,
  399. .connect = pn_socket_connect,
  400. .socketpair = sock_no_socketpair,
  401. .accept = pn_socket_accept,
  402. .getname = pn_socket_getname,
  403. .poll = pn_socket_poll,
  404. .ioctl = pn_socket_ioctl,
  405. .listen = pn_socket_listen,
  406. .shutdown = sock_no_shutdown,
  407. .setsockopt = sock_common_setsockopt,
  408. .getsockopt = sock_common_getsockopt,
  409. #ifdef CONFIG_COMPAT
  410. .compat_setsockopt = compat_sock_common_setsockopt,
  411. .compat_getsockopt = compat_sock_common_getsockopt,
  412. #endif
  413. .sendmsg = pn_socket_sendmsg,
  414. .recvmsg = sock_common_recvmsg,
  415. .mmap = sock_no_mmap,
  416. .sendpage = sock_no_sendpage,
  417. };
  418. EXPORT_SYMBOL(phonet_stream_ops);
  419. /* allocate port for a socket */
  420. int pn_sock_get_port(struct sock *sk, unsigned short sport)
  421. {
  422. static int port_cur;
  423. struct net *net = sock_net(sk);
  424. struct pn_sock *pn = pn_sk(sk);
  425. struct sockaddr_pn try_sa;
  426. struct sock *tmpsk;
  427. memset(&try_sa, 0, sizeof(struct sockaddr_pn));
  428. try_sa.spn_family = AF_PHONET;
  429. WARN_ON(!mutex_is_locked(&port_mutex));
  430. if (!sport) {
  431. /* search free port */
  432. int port, pmin, pmax;
  433. phonet_get_local_port_range(&pmin, &pmax);
  434. for (port = pmin; port <= pmax; port++) {
  435. port_cur++;
  436. if (port_cur < pmin || port_cur > pmax)
  437. port_cur = pmin;
  438. pn_sockaddr_set_port(&try_sa, port_cur);
  439. tmpsk = pn_find_sock_by_sa(net, &try_sa);
  440. if (tmpsk == NULL) {
  441. sport = port_cur;
  442. goto found;
  443. } else
  444. sock_put(tmpsk);
  445. }
  446. } else {
  447. /* try to find specific port */
  448. pn_sockaddr_set_port(&try_sa, sport);
  449. tmpsk = pn_find_sock_by_sa(net, &try_sa);
  450. if (tmpsk == NULL)
  451. /* No sock there! We can use that port... */
  452. goto found;
  453. else
  454. sock_put(tmpsk);
  455. }
  456. /* the port must be in use already */
  457. return -EADDRINUSE;
  458. found:
  459. pn->sobject = pn_object(pn_addr(pn->sobject), sport);
  460. return 0;
  461. }
  462. EXPORT_SYMBOL(pn_sock_get_port);
  463. #ifdef CONFIG_PROC_FS
  464. static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
  465. {
  466. struct net *net = seq_file_net(seq);
  467. struct hlist_head *hlist = pnsocks.hlist;
  468. struct sock *sknode;
  469. unsigned int h;
  470. for (h = 0; h < PN_HASHSIZE; h++) {
  471. sk_for_each_rcu(sknode, hlist) {
  472. if (!net_eq(net, sock_net(sknode)))
  473. continue;
  474. if (!pos)
  475. return sknode;
  476. pos--;
  477. }
  478. hlist++;
  479. }
  480. return NULL;
  481. }
  482. static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
  483. {
  484. struct net *net = seq_file_net(seq);
  485. do
  486. sk = sk_next(sk);
  487. while (sk && !net_eq(net, sock_net(sk)));
  488. return sk;
  489. }
  490. static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
  491. __acquires(rcu)
  492. {
  493. rcu_read_lock();
  494. return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  495. }
  496. static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  497. {
  498. struct sock *sk;
  499. if (v == SEQ_START_TOKEN)
  500. sk = pn_sock_get_idx(seq, 0);
  501. else
  502. sk = pn_sock_get_next(seq, v);
  503. (*pos)++;
  504. return sk;
  505. }
  506. static void pn_sock_seq_stop(struct seq_file *seq, void *v)
  507. __releases(rcu)
  508. {
  509. rcu_read_unlock();
  510. }
  511. static int pn_sock_seq_show(struct seq_file *seq, void *v)
  512. {
  513. seq_setwidth(seq, 127);
  514. if (v == SEQ_START_TOKEN)
  515. seq_puts(seq, "pt loc rem rs st tx_queue rx_queue "
  516. " uid inode ref pointer drops");
  517. else {
  518. struct sock *sk = v;
  519. struct pn_sock *pn = pn_sk(sk);
  520. seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
  521. "%d %pK %d",
  522. sk->sk_protocol, pn->sobject, pn->dobject,
  523. pn->resource, sk->sk_state,
  524. sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
  525. from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
  526. sock_i_ino(sk),
  527. atomic_read(&sk->sk_refcnt), sk,
  528. atomic_read(&sk->sk_drops));
  529. }
  530. seq_pad(seq, '\n');
  531. return 0;
  532. }
  533. static const struct seq_operations pn_sock_seq_ops = {
  534. .start = pn_sock_seq_start,
  535. .next = pn_sock_seq_next,
  536. .stop = pn_sock_seq_stop,
  537. .show = pn_sock_seq_show,
  538. };
  539. static int pn_sock_open(struct inode *inode, struct file *file)
  540. {
  541. return seq_open_net(inode, file, &pn_sock_seq_ops,
  542. sizeof(struct seq_net_private));
  543. }
  544. const struct file_operations pn_sock_seq_fops = {
  545. .owner = THIS_MODULE,
  546. .open = pn_sock_open,
  547. .read = seq_read,
  548. .llseek = seq_lseek,
  549. .release = seq_release_net,
  550. };
  551. #endif
  552. static struct {
  553. struct sock *sk[256];
  554. } pnres;
  555. /*
  556. * Find and hold socket based on resource.
  557. */
  558. struct sock *pn_find_sock_by_res(struct net *net, u8 res)
  559. {
  560. struct sock *sk;
  561. if (!net_eq(net, &init_net))
  562. return NULL;
  563. rcu_read_lock();
  564. sk = rcu_dereference(pnres.sk[res]);
  565. if (sk)
  566. sock_hold(sk);
  567. rcu_read_unlock();
  568. return sk;
  569. }
  570. static DEFINE_MUTEX(resource_mutex);
  571. int pn_sock_bind_res(struct sock *sk, u8 res)
  572. {
  573. int ret = -EADDRINUSE;
  574. if (!net_eq(sock_net(sk), &init_net))
  575. return -ENOIOCTLCMD;
  576. if (!capable(CAP_SYS_ADMIN))
  577. return -EPERM;
  578. if (pn_socket_autobind(sk->sk_socket))
  579. return -EAGAIN;
  580. mutex_lock(&resource_mutex);
  581. if (pnres.sk[res] == NULL) {
  582. sock_hold(sk);
  583. rcu_assign_pointer(pnres.sk[res], sk);
  584. ret = 0;
  585. }
  586. mutex_unlock(&resource_mutex);
  587. return ret;
  588. }
  589. int pn_sock_unbind_res(struct sock *sk, u8 res)
  590. {
  591. int ret = -ENOENT;
  592. if (!capable(CAP_SYS_ADMIN))
  593. return -EPERM;
  594. mutex_lock(&resource_mutex);
  595. if (pnres.sk[res] == sk) {
  596. RCU_INIT_POINTER(pnres.sk[res], NULL);
  597. ret = 0;
  598. }
  599. mutex_unlock(&resource_mutex);
  600. if (ret == 0) {
  601. synchronize_rcu();
  602. sock_put(sk);
  603. }
  604. return ret;
  605. }
  606. void pn_sock_unbind_all_res(struct sock *sk)
  607. {
  608. unsigned int res, match = 0;
  609. mutex_lock(&resource_mutex);
  610. for (res = 0; res < 256; res++) {
  611. if (pnres.sk[res] == sk) {
  612. RCU_INIT_POINTER(pnres.sk[res], NULL);
  613. match++;
  614. }
  615. }
  616. mutex_unlock(&resource_mutex);
  617. while (match > 0) {
  618. __sock_put(sk);
  619. match--;
  620. }
  621. /* Caller is responsible for RCU sync before final sock_put() */
  622. }
  623. #ifdef CONFIG_PROC_FS
  624. static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
  625. {
  626. struct net *net = seq_file_net(seq);
  627. unsigned int i;
  628. if (!net_eq(net, &init_net))
  629. return NULL;
  630. for (i = 0; i < 256; i++) {
  631. if (pnres.sk[i] == NULL)
  632. continue;
  633. if (!pos)
  634. return pnres.sk + i;
  635. pos--;
  636. }
  637. return NULL;
  638. }
  639. static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
  640. {
  641. struct net *net = seq_file_net(seq);
  642. unsigned int i;
  643. BUG_ON(!net_eq(net, &init_net));
  644. for (i = (sk - pnres.sk) + 1; i < 256; i++)
  645. if (pnres.sk[i])
  646. return pnres.sk + i;
  647. return NULL;
  648. }
  649. static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
  650. __acquires(resource_mutex)
  651. {
  652. mutex_lock(&resource_mutex);
  653. return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  654. }
  655. static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  656. {
  657. struct sock **sk;
  658. if (v == SEQ_START_TOKEN)
  659. sk = pn_res_get_idx(seq, 0);
  660. else
  661. sk = pn_res_get_next(seq, v);
  662. (*pos)++;
  663. return sk;
  664. }
  665. static void pn_res_seq_stop(struct seq_file *seq, void *v)
  666. __releases(resource_mutex)
  667. {
  668. mutex_unlock(&resource_mutex);
  669. }
  670. static int pn_res_seq_show(struct seq_file *seq, void *v)
  671. {
  672. seq_setwidth(seq, 63);
  673. if (v == SEQ_START_TOKEN)
  674. seq_puts(seq, "rs uid inode");
  675. else {
  676. struct sock **psk = v;
  677. struct sock *sk = *psk;
  678. seq_printf(seq, "%02X %5u %lu",
  679. (int) (psk - pnres.sk),
  680. from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
  681. sock_i_ino(sk));
  682. }
  683. seq_pad(seq, '\n');
  684. return 0;
  685. }
  686. static const struct seq_operations pn_res_seq_ops = {
  687. .start = pn_res_seq_start,
  688. .next = pn_res_seq_next,
  689. .stop = pn_res_seq_stop,
  690. .show = pn_res_seq_show,
  691. };
  692. static int pn_res_open(struct inode *inode, struct file *file)
  693. {
  694. return seq_open_net(inode, file, &pn_res_seq_ops,
  695. sizeof(struct seq_net_private));
  696. }
  697. const struct file_operations pn_res_seq_fops = {
  698. .owner = THIS_MODULE,
  699. .open = pn_res_open,
  700. .read = seq_read,
  701. .llseek = seq_lseek,
  702. .release = seq_release_net,
  703. };
  704. #endif