server.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640
  1. /*
  2. * net/tipc/server.c: TIPC server infrastructure
  3. *
  4. * Copyright (c) 2012-2013, Wind River Systems
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions are met:
  9. *
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the names of the copyright holders nor the names of its
  16. * contributors may be used to endorse or promote products derived from
  17. * this software without specific prior written permission.
  18. *
  19. * Alternatively, this software may be distributed under the terms of the
  20. * GNU General Public License ("GPL") version 2 as published by the Free
  21. * Software Foundation.
  22. *
  23. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  24. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  25. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  26. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  27. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  28. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  29. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  30. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  31. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  32. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  33. * POSSIBILITY OF SUCH DAMAGE.
  34. */
  35. #include "server.h"
  36. #include "core.h"
  37. #include "socket.h"
  38. #include <net/sock.h>
  39. #include <linux/module.h>
  40. /* Number of messages to send before rescheduling */
  41. #define MAX_SEND_MSG_COUNT 25
  42. #define MAX_RECV_MSG_COUNT 25
  43. #define CF_CONNECTED 1
  44. #define CF_SERVER 2
  45. #define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data)
  46. /**
  47. * struct tipc_conn - TIPC connection structure
  48. * @kref: reference counter to connection object
  49. * @conid: connection identifier
  50. * @sock: socket handler associated with connection
  51. * @flags: indicates connection state
  52. * @server: pointer to connected server
  53. * @rwork: receive work item
  54. * @usr_data: user-specified field
  55. * @rx_action: what to do when connection socket is active
  56. * @outqueue: pointer to first outbound message in queue
  57. * @outqueue_lock: control access to the outqueue
  58. * @outqueue: list of connection objects for its server
  59. * @swork: send work item
  60. */
  61. struct tipc_conn {
  62. struct kref kref;
  63. int conid;
  64. struct socket *sock;
  65. unsigned long flags;
  66. struct tipc_server *server;
  67. struct work_struct rwork;
  68. int (*rx_action) (struct tipc_conn *con);
  69. void *usr_data;
  70. struct list_head outqueue;
  71. spinlock_t outqueue_lock;
  72. struct work_struct swork;
  73. };
  74. /* An entry waiting to be sent */
  75. struct outqueue_entry {
  76. struct list_head list;
  77. struct kvec iov;
  78. struct sockaddr_tipc dest;
  79. };
  80. static void tipc_recv_work(struct work_struct *work);
  81. static void tipc_send_work(struct work_struct *work);
  82. static void tipc_clean_outqueues(struct tipc_conn *con);
  83. static void tipc_conn_kref_release(struct kref *kref)
  84. {
  85. struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
  86. struct tipc_server *s = con->server;
  87. struct sockaddr_tipc *saddr = s->saddr;
  88. struct socket *sock = con->sock;
  89. struct sock *sk;
  90. if (sock) {
  91. sk = sock->sk;
  92. if (test_bit(CF_SERVER, &con->flags)) {
  93. __module_get(sock->ops->owner);
  94. __module_get(sk->sk_prot_creator->owner);
  95. }
  96. saddr->scope = -TIPC_NODE_SCOPE;
  97. kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
  98. sock_release(sock);
  99. con->sock = NULL;
  100. spin_lock_bh(&s->idr_lock);
  101. idr_remove(&s->conn_idr, con->conid);
  102. s->idr_in_use--;
  103. spin_unlock_bh(&s->idr_lock);
  104. }
  105. tipc_clean_outqueues(con);
  106. kfree(con);
  107. }
  108. static void conn_put(struct tipc_conn *con)
  109. {
  110. kref_put(&con->kref, tipc_conn_kref_release);
  111. }
  112. static void conn_get(struct tipc_conn *con)
  113. {
  114. kref_get(&con->kref);
  115. }
  116. static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
  117. {
  118. struct tipc_conn *con;
  119. spin_lock_bh(&s->idr_lock);
  120. con = idr_find(&s->conn_idr, conid);
  121. if (con && test_bit(CF_CONNECTED, &con->flags))
  122. conn_get(con);
  123. else
  124. con = NULL;
  125. spin_unlock_bh(&s->idr_lock);
  126. return con;
  127. }
  128. static void sock_data_ready(struct sock *sk)
  129. {
  130. struct tipc_conn *con;
  131. read_lock_bh(&sk->sk_callback_lock);
  132. con = sock2con(sk);
  133. if (con && test_bit(CF_CONNECTED, &con->flags)) {
  134. conn_get(con);
  135. if (!queue_work(con->server->rcv_wq, &con->rwork))
  136. conn_put(con);
  137. }
  138. read_unlock_bh(&sk->sk_callback_lock);
  139. }
  140. static void sock_write_space(struct sock *sk)
  141. {
  142. struct tipc_conn *con;
  143. read_lock_bh(&sk->sk_callback_lock);
  144. con = sock2con(sk);
  145. if (con && test_bit(CF_CONNECTED, &con->flags)) {
  146. conn_get(con);
  147. if (!queue_work(con->server->send_wq, &con->swork))
  148. conn_put(con);
  149. }
  150. read_unlock_bh(&sk->sk_callback_lock);
  151. }
  152. static void tipc_register_callbacks(struct socket *sock, struct tipc_conn *con)
  153. {
  154. struct sock *sk = sock->sk;
  155. write_lock_bh(&sk->sk_callback_lock);
  156. sk->sk_data_ready = sock_data_ready;
  157. sk->sk_write_space = sock_write_space;
  158. sk->sk_user_data = con;
  159. con->sock = sock;
  160. write_unlock_bh(&sk->sk_callback_lock);
  161. }
  162. static void tipc_unregister_callbacks(struct tipc_conn *con)
  163. {
  164. struct sock *sk = con->sock->sk;
  165. write_lock_bh(&sk->sk_callback_lock);
  166. sk->sk_user_data = NULL;
  167. write_unlock_bh(&sk->sk_callback_lock);
  168. }
  169. static void tipc_close_conn(struct tipc_conn *con)
  170. {
  171. struct tipc_server *s = con->server;
  172. if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
  173. tipc_unregister_callbacks(con);
  174. if (con->conid)
  175. s->tipc_conn_release(con->conid, con->usr_data);
  176. /* We shouldn't flush pending works as we may be in the
  177. * thread. In fact the races with pending rx/tx work structs
  178. * are harmless for us here as we have already deleted this
  179. * connection from server connection list.
  180. */
  181. kernel_sock_shutdown(con->sock, SHUT_RDWR);
  182. conn_put(con);
  183. }
  184. }
  185. static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
  186. {
  187. struct tipc_conn *con;
  188. int ret;
  189. con = kzalloc(sizeof(struct tipc_conn), GFP_ATOMIC);
  190. if (!con)
  191. return ERR_PTR(-ENOMEM);
  192. kref_init(&con->kref);
  193. INIT_LIST_HEAD(&con->outqueue);
  194. spin_lock_init(&con->outqueue_lock);
  195. INIT_WORK(&con->swork, tipc_send_work);
  196. INIT_WORK(&con->rwork, tipc_recv_work);
  197. spin_lock_bh(&s->idr_lock);
  198. ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC);
  199. if (ret < 0) {
  200. kfree(con);
  201. spin_unlock_bh(&s->idr_lock);
  202. return ERR_PTR(-ENOMEM);
  203. }
  204. con->conid = ret;
  205. s->idr_in_use++;
  206. spin_unlock_bh(&s->idr_lock);
  207. set_bit(CF_CONNECTED, &con->flags);
  208. con->server = s;
  209. return con;
  210. }
  211. static int tipc_receive_from_sock(struct tipc_conn *con)
  212. {
  213. struct msghdr msg = {};
  214. struct tipc_server *s = con->server;
  215. struct sockaddr_tipc addr;
  216. struct kvec iov;
  217. void *buf;
  218. int ret;
  219. buf = kmem_cache_alloc(s->rcvbuf_cache, GFP_ATOMIC);
  220. if (!buf) {
  221. ret = -ENOMEM;
  222. goto out_close;
  223. }
  224. iov.iov_base = buf;
  225. iov.iov_len = s->max_rcvbuf_size;
  226. msg.msg_name = &addr;
  227. ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
  228. MSG_DONTWAIT);
  229. if (ret <= 0) {
  230. kmem_cache_free(s->rcvbuf_cache, buf);
  231. goto out_close;
  232. }
  233. s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr,
  234. con->usr_data, buf, ret);
  235. kmem_cache_free(s->rcvbuf_cache, buf);
  236. return 0;
  237. out_close:
  238. if (ret != -EWOULDBLOCK)
  239. tipc_close_conn(con);
  240. else if (ret == 0)
  241. /* Don't return success if we really got EOF */
  242. ret = -EAGAIN;
  243. return ret;
  244. }
  245. static int tipc_accept_from_sock(struct tipc_conn *con)
  246. {
  247. struct tipc_server *s = con->server;
  248. struct socket *sock = con->sock;
  249. struct socket *newsock;
  250. struct tipc_conn *newcon;
  251. int ret;
  252. ret = kernel_accept(sock, &newsock, O_NONBLOCK);
  253. if (ret < 0)
  254. return ret;
  255. newcon = tipc_alloc_conn(con->server);
  256. if (IS_ERR(newcon)) {
  257. ret = PTR_ERR(newcon);
  258. sock_release(newsock);
  259. return ret;
  260. }
  261. newcon->rx_action = tipc_receive_from_sock;
  262. tipc_register_callbacks(newsock, newcon);
  263. /* Notify that new connection is incoming */
  264. newcon->usr_data = s->tipc_conn_new(newcon->conid);
  265. if (!newcon->usr_data) {
  266. sock_release(newsock);
  267. conn_put(newcon);
  268. return -ENOMEM;
  269. }
  270. /* Wake up receive process in case of 'SYN+' message */
  271. newsock->sk->sk_data_ready(newsock->sk);
  272. return ret;
  273. }
  274. static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
  275. {
  276. struct tipc_server *s = con->server;
  277. struct socket *sock = NULL;
  278. int ret;
  279. ret = sock_create_kern(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock);
  280. if (ret < 0)
  281. return NULL;
  282. ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
  283. (char *)&s->imp, sizeof(s->imp));
  284. if (ret < 0)
  285. goto create_err;
  286. ret = kernel_bind(sock, (struct sockaddr *)s->saddr, sizeof(*s->saddr));
  287. if (ret < 0)
  288. goto create_err;
  289. switch (s->type) {
  290. case SOCK_STREAM:
  291. case SOCK_SEQPACKET:
  292. con->rx_action = tipc_accept_from_sock;
  293. ret = kernel_listen(sock, 0);
  294. if (ret < 0)
  295. goto create_err;
  296. break;
  297. case SOCK_DGRAM:
  298. case SOCK_RDM:
  299. con->rx_action = tipc_receive_from_sock;
  300. break;
  301. default:
  302. pr_err("Unknown socket type %d\n", s->type);
  303. goto create_err;
  304. }
  305. /* As server's listening socket owner and creator is the same module,
  306. * we have to decrease TIPC module reference count to guarantee that
  307. * it remains zero after the server socket is created, otherwise,
  308. * executing "rmmod" command is unable to make TIPC module deleted
  309. * after TIPC module is inserted successfully.
  310. *
  311. * However, the reference count is ever increased twice in
  312. * sock_create_kern(): one is to increase the reference count of owner
  313. * of TIPC socket's proto_ops struct; another is to increment the
  314. * reference count of owner of TIPC proto struct. Therefore, we must
  315. * decrement the module reference count twice to ensure that it keeps
  316. * zero after server's listening socket is created. Of course, we
  317. * must bump the module reference count twice as well before the socket
  318. * is closed.
  319. */
  320. module_put(sock->ops->owner);
  321. module_put(sock->sk->sk_prot_creator->owner);
  322. set_bit(CF_SERVER, &con->flags);
  323. return sock;
  324. create_err:
  325. kernel_sock_shutdown(sock, SHUT_RDWR);
  326. sock_release(sock);
  327. return NULL;
  328. }
  329. static int tipc_open_listening_sock(struct tipc_server *s)
  330. {
  331. struct socket *sock;
  332. struct tipc_conn *con;
  333. con = tipc_alloc_conn(s);
  334. if (IS_ERR(con))
  335. return PTR_ERR(con);
  336. sock = tipc_create_listen_sock(con);
  337. if (!sock) {
  338. idr_remove(&s->conn_idr, con->conid);
  339. s->idr_in_use--;
  340. kfree(con);
  341. return -EINVAL;
  342. }
  343. tipc_register_callbacks(sock, con);
  344. return 0;
  345. }
  346. static struct outqueue_entry *tipc_alloc_entry(void *data, int len)
  347. {
  348. struct outqueue_entry *entry;
  349. void *buf;
  350. entry = kmalloc(sizeof(struct outqueue_entry), GFP_ATOMIC);
  351. if (!entry)
  352. return NULL;
  353. buf = kmemdup(data, len, GFP_ATOMIC);
  354. if (!buf) {
  355. kfree(entry);
  356. return NULL;
  357. }
  358. entry->iov.iov_base = buf;
  359. entry->iov.iov_len = len;
  360. return entry;
  361. }
  362. static void tipc_free_entry(struct outqueue_entry *e)
  363. {
  364. kfree(e->iov.iov_base);
  365. kfree(e);
  366. }
  367. static void tipc_clean_outqueues(struct tipc_conn *con)
  368. {
  369. struct outqueue_entry *e, *safe;
  370. spin_lock_bh(&con->outqueue_lock);
  371. list_for_each_entry_safe(e, safe, &con->outqueue, list) {
  372. list_del(&e->list);
  373. tipc_free_entry(e);
  374. }
  375. spin_unlock_bh(&con->outqueue_lock);
  376. }
  377. int tipc_conn_sendmsg(struct tipc_server *s, int conid,
  378. struct sockaddr_tipc *addr, void *data, size_t len)
  379. {
  380. struct outqueue_entry *e;
  381. struct tipc_conn *con;
  382. con = tipc_conn_lookup(s, conid);
  383. if (!con)
  384. return -EINVAL;
  385. if (!test_bit(CF_CONNECTED, &con->flags)) {
  386. conn_put(con);
  387. return 0;
  388. }
  389. e = tipc_alloc_entry(data, len);
  390. if (!e) {
  391. conn_put(con);
  392. return -ENOMEM;
  393. }
  394. if (addr)
  395. memcpy(&e->dest, addr, sizeof(struct sockaddr_tipc));
  396. spin_lock_bh(&con->outqueue_lock);
  397. list_add_tail(&e->list, &con->outqueue);
  398. spin_unlock_bh(&con->outqueue_lock);
  399. if (!queue_work(s->send_wq, &con->swork))
  400. conn_put(con);
  401. return 0;
  402. }
  403. void tipc_conn_terminate(struct tipc_server *s, int conid)
  404. {
  405. struct tipc_conn *con;
  406. con = tipc_conn_lookup(s, conid);
  407. if (con) {
  408. tipc_close_conn(con);
  409. conn_put(con);
  410. }
  411. }
  412. static void tipc_send_to_sock(struct tipc_conn *con)
  413. {
  414. int count = 0;
  415. struct tipc_server *s = con->server;
  416. struct outqueue_entry *e;
  417. struct msghdr msg;
  418. int ret;
  419. spin_lock_bh(&con->outqueue_lock);
  420. while (test_bit(CF_CONNECTED, &con->flags)) {
  421. e = list_entry(con->outqueue.next, struct outqueue_entry,
  422. list);
  423. if ((struct list_head *) e == &con->outqueue)
  424. break;
  425. spin_unlock_bh(&con->outqueue_lock);
  426. memset(&msg, 0, sizeof(msg));
  427. msg.msg_flags = MSG_DONTWAIT;
  428. if (s->type == SOCK_DGRAM || s->type == SOCK_RDM) {
  429. msg.msg_name = &e->dest;
  430. msg.msg_namelen = sizeof(struct sockaddr_tipc);
  431. }
  432. ret = kernel_sendmsg(con->sock, &msg, &e->iov, 1,
  433. e->iov.iov_len);
  434. if (ret == -EWOULDBLOCK || ret == 0) {
  435. cond_resched();
  436. goto out;
  437. } else if (ret < 0) {
  438. goto send_err;
  439. }
  440. /* Don't starve users filling buffers */
  441. if (++count >= MAX_SEND_MSG_COUNT) {
  442. cond_resched();
  443. count = 0;
  444. }
  445. spin_lock_bh(&con->outqueue_lock);
  446. list_del(&e->list);
  447. tipc_free_entry(e);
  448. }
  449. spin_unlock_bh(&con->outqueue_lock);
  450. out:
  451. return;
  452. send_err:
  453. tipc_close_conn(con);
  454. }
  455. static void tipc_recv_work(struct work_struct *work)
  456. {
  457. struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
  458. int count = 0;
  459. while (test_bit(CF_CONNECTED, &con->flags)) {
  460. if (con->rx_action(con))
  461. break;
  462. /* Don't flood Rx machine */
  463. if (++count >= MAX_RECV_MSG_COUNT) {
  464. cond_resched();
  465. count = 0;
  466. }
  467. }
  468. conn_put(con);
  469. }
  470. static void tipc_send_work(struct work_struct *work)
  471. {
  472. struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
  473. if (test_bit(CF_CONNECTED, &con->flags))
  474. tipc_send_to_sock(con);
  475. conn_put(con);
  476. }
  477. static void tipc_work_stop(struct tipc_server *s)
  478. {
  479. destroy_workqueue(s->rcv_wq);
  480. destroy_workqueue(s->send_wq);
  481. }
  482. static int tipc_work_start(struct tipc_server *s)
  483. {
  484. s->rcv_wq = alloc_ordered_workqueue("tipc_rcv", 0);
  485. if (!s->rcv_wq) {
  486. pr_err("can't start tipc receive workqueue\n");
  487. return -ENOMEM;
  488. }
  489. s->send_wq = alloc_ordered_workqueue("tipc_send", 0);
  490. if (!s->send_wq) {
  491. pr_err("can't start tipc send workqueue\n");
  492. destroy_workqueue(s->rcv_wq);
  493. return -ENOMEM;
  494. }
  495. return 0;
  496. }
  497. int tipc_server_start(struct tipc_server *s)
  498. {
  499. int ret;
  500. spin_lock_init(&s->idr_lock);
  501. idr_init(&s->conn_idr);
  502. s->idr_in_use = 0;
  503. s->rcvbuf_cache = kmem_cache_create(s->name, s->max_rcvbuf_size,
  504. 0, SLAB_HWCACHE_ALIGN, NULL);
  505. if (!s->rcvbuf_cache)
  506. return -ENOMEM;
  507. ret = tipc_work_start(s);
  508. if (ret < 0) {
  509. kmem_cache_destroy(s->rcvbuf_cache);
  510. return ret;
  511. }
  512. ret = tipc_open_listening_sock(s);
  513. if (ret < 0) {
  514. tipc_work_stop(s);
  515. kmem_cache_destroy(s->rcvbuf_cache);
  516. return ret;
  517. }
  518. return ret;
  519. }
  520. void tipc_server_stop(struct tipc_server *s)
  521. {
  522. struct tipc_conn *con;
  523. int id;
  524. spin_lock_bh(&s->idr_lock);
  525. for (id = 0; s->idr_in_use; id++) {
  526. con = idr_find(&s->conn_idr, id);
  527. if (con) {
  528. spin_unlock_bh(&s->idr_lock);
  529. tipc_close_conn(con);
  530. spin_lock_bh(&s->idr_lock);
  531. }
  532. }
  533. spin_unlock_bh(&s->idr_lock);
  534. tipc_work_stop(s);
  535. kmem_cache_destroy(s->rcvbuf_cache);
  536. idr_destroy(&s->conn_idr);
  537. }