ar-connection.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929
  1. /* RxRPC virtual connection handler
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/net.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/crypto.h>
  16. #include <net/sock.h>
  17. #include <net/af_rxrpc.h>
  18. #include "ar-internal.h"
  19. /*
  20. * Time till a connection expires after last use (in seconds).
  21. */
  22. unsigned rxrpc_connection_expiry = 10 * 60;
  23. static void rxrpc_connection_reaper(struct work_struct *work);
  24. LIST_HEAD(rxrpc_connections);
  25. DEFINE_RWLOCK(rxrpc_connection_lock);
  26. static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
  27. /*
  28. * allocate a new client connection bundle
  29. */
  30. static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
  31. {
  32. struct rxrpc_conn_bundle *bundle;
  33. _enter("");
  34. bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
  35. if (bundle) {
  36. INIT_LIST_HEAD(&bundle->unused_conns);
  37. INIT_LIST_HEAD(&bundle->avail_conns);
  38. INIT_LIST_HEAD(&bundle->busy_conns);
  39. init_waitqueue_head(&bundle->chanwait);
  40. atomic_set(&bundle->usage, 1);
  41. }
  42. _leave(" = %p", bundle);
  43. return bundle;
  44. }
  45. /*
  46. * compare bundle parameters with what we're looking for
  47. * - return -ve, 0 or +ve
  48. */
  49. static inline
  50. int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
  51. struct key *key, __be16 service_id)
  52. {
  53. return (bundle->service_id - service_id) ?:
  54. ((unsigned long) bundle->key - (unsigned long) key);
  55. }
  56. /*
  57. * get bundle of client connections that a client socket can make use of
  58. */
  59. struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
  60. struct rxrpc_transport *trans,
  61. struct key *key,
  62. __be16 service_id,
  63. gfp_t gfp)
  64. {
  65. struct rxrpc_conn_bundle *bundle, *candidate;
  66. struct rb_node *p, *parent, **pp;
  67. _enter("%p{%x},%x,%hx,",
  68. rx, key_serial(key), trans->debug_id, ntohs(service_id));
  69. if (rx->trans == trans && rx->bundle) {
  70. atomic_inc(&rx->bundle->usage);
  71. return rx->bundle;
  72. }
  73. /* search the extant bundles first for one that matches the specified
  74. * user ID */
  75. spin_lock(&trans->client_lock);
  76. p = trans->bundles.rb_node;
  77. while (p) {
  78. bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
  79. if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
  80. p = p->rb_left;
  81. else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
  82. p = p->rb_right;
  83. else
  84. goto found_extant_bundle;
  85. }
  86. spin_unlock(&trans->client_lock);
  87. /* not yet present - create a candidate for a new record and then
  88. * redo the search */
  89. candidate = rxrpc_alloc_bundle(gfp);
  90. if (!candidate) {
  91. _leave(" = -ENOMEM");
  92. return ERR_PTR(-ENOMEM);
  93. }
  94. candidate->key = key_get(key);
  95. candidate->service_id = service_id;
  96. spin_lock(&trans->client_lock);
  97. pp = &trans->bundles.rb_node;
  98. parent = NULL;
  99. while (*pp) {
  100. parent = *pp;
  101. bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
  102. if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
  103. pp = &(*pp)->rb_left;
  104. else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
  105. pp = &(*pp)->rb_right;
  106. else
  107. goto found_extant_second;
  108. }
  109. /* second search also failed; add the new bundle */
  110. bundle = candidate;
  111. candidate = NULL;
  112. rb_link_node(&bundle->node, parent, pp);
  113. rb_insert_color(&bundle->node, &trans->bundles);
  114. spin_unlock(&trans->client_lock);
  115. _net("BUNDLE new on trans %d", trans->debug_id);
  116. if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
  117. atomic_inc(&bundle->usage);
  118. rx->bundle = bundle;
  119. }
  120. _leave(" = %p [new]", bundle);
  121. return bundle;
  122. /* we found the bundle in the list immediately */
  123. found_extant_bundle:
  124. atomic_inc(&bundle->usage);
  125. spin_unlock(&trans->client_lock);
  126. _net("BUNDLE old on trans %d", trans->debug_id);
  127. if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
  128. atomic_inc(&bundle->usage);
  129. rx->bundle = bundle;
  130. }
  131. _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
  132. return bundle;
  133. /* we found the bundle on the second time through the list */
  134. found_extant_second:
  135. atomic_inc(&bundle->usage);
  136. spin_unlock(&trans->client_lock);
  137. kfree(candidate);
  138. _net("BUNDLE old2 on trans %d", trans->debug_id);
  139. if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
  140. atomic_inc(&bundle->usage);
  141. rx->bundle = bundle;
  142. }
  143. _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
  144. return bundle;
  145. }
  146. /*
  147. * release a bundle
  148. */
  149. void rxrpc_put_bundle(struct rxrpc_transport *trans,
  150. struct rxrpc_conn_bundle *bundle)
  151. {
  152. _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
  153. if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
  154. _debug("Destroy bundle");
  155. rb_erase(&bundle->node, &trans->bundles);
  156. spin_unlock(&trans->client_lock);
  157. ASSERT(list_empty(&bundle->unused_conns));
  158. ASSERT(list_empty(&bundle->avail_conns));
  159. ASSERT(list_empty(&bundle->busy_conns));
  160. ASSERTCMP(bundle->num_conns, ==, 0);
  161. key_put(bundle->key);
  162. kfree(bundle);
  163. }
  164. _leave("");
  165. }
  166. /*
  167. * allocate a new connection
  168. */
  169. static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
  170. {
  171. struct rxrpc_connection *conn;
  172. _enter("");
  173. conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
  174. if (conn) {
  175. INIT_WORK(&conn->processor, &rxrpc_process_connection);
  176. INIT_LIST_HEAD(&conn->bundle_link);
  177. conn->calls = RB_ROOT;
  178. skb_queue_head_init(&conn->rx_queue);
  179. rwlock_init(&conn->lock);
  180. spin_lock_init(&conn->state_lock);
  181. atomic_set(&conn->usage, 1);
  182. conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
  183. conn->avail_calls = RXRPC_MAXCALLS;
  184. conn->size_align = 4;
  185. conn->header_size = sizeof(struct rxrpc_header);
  186. }
  187. _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
  188. return conn;
  189. }
  190. /*
  191. * assign a connection ID to a connection and add it to the transport's
  192. * connection lookup tree
  193. * - called with transport client lock held
  194. */
  195. static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
  196. {
  197. struct rxrpc_connection *xconn;
  198. struct rb_node *parent, **p;
  199. __be32 epoch;
  200. u32 real_conn_id;
  201. _enter("");
  202. epoch = conn->epoch;
  203. write_lock_bh(&conn->trans->conn_lock);
  204. conn->trans->conn_idcounter += RXRPC_CID_INC;
  205. if (conn->trans->conn_idcounter < RXRPC_CID_INC)
  206. conn->trans->conn_idcounter = RXRPC_CID_INC;
  207. real_conn_id = conn->trans->conn_idcounter;
  208. attempt_insertion:
  209. parent = NULL;
  210. p = &conn->trans->client_conns.rb_node;
  211. while (*p) {
  212. parent = *p;
  213. xconn = rb_entry(parent, struct rxrpc_connection, node);
  214. if (epoch < xconn->epoch)
  215. p = &(*p)->rb_left;
  216. else if (epoch > xconn->epoch)
  217. p = &(*p)->rb_right;
  218. else if (real_conn_id < xconn->real_conn_id)
  219. p = &(*p)->rb_left;
  220. else if (real_conn_id > xconn->real_conn_id)
  221. p = &(*p)->rb_right;
  222. else
  223. goto id_exists;
  224. }
  225. /* we've found a suitable hole - arrange for this connection to occupy
  226. * it */
  227. rb_link_node(&conn->node, parent, p);
  228. rb_insert_color(&conn->node, &conn->trans->client_conns);
  229. conn->real_conn_id = real_conn_id;
  230. conn->cid = htonl(real_conn_id);
  231. write_unlock_bh(&conn->trans->conn_lock);
  232. _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid));
  233. return;
  234. /* we found a connection with the proposed ID - walk the tree from that
  235. * point looking for the next unused ID */
  236. id_exists:
  237. for (;;) {
  238. real_conn_id += RXRPC_CID_INC;
  239. if (real_conn_id < RXRPC_CID_INC) {
  240. real_conn_id = RXRPC_CID_INC;
  241. conn->trans->conn_idcounter = real_conn_id;
  242. goto attempt_insertion;
  243. }
  244. parent = rb_next(parent);
  245. if (!parent)
  246. goto attempt_insertion;
  247. xconn = rb_entry(parent, struct rxrpc_connection, node);
  248. if (epoch < xconn->epoch ||
  249. real_conn_id < xconn->real_conn_id)
  250. goto attempt_insertion;
  251. }
  252. }
  253. /*
  254. * add a call to a connection's call-by-ID tree
  255. */
  256. static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
  257. struct rxrpc_call *call)
  258. {
  259. struct rxrpc_call *xcall;
  260. struct rb_node *parent, **p;
  261. __be32 call_id;
  262. write_lock_bh(&conn->lock);
  263. call_id = call->call_id;
  264. p = &conn->calls.rb_node;
  265. parent = NULL;
  266. while (*p) {
  267. parent = *p;
  268. xcall = rb_entry(parent, struct rxrpc_call, conn_node);
  269. if (call_id < xcall->call_id)
  270. p = &(*p)->rb_left;
  271. else if (call_id > xcall->call_id)
  272. p = &(*p)->rb_right;
  273. else
  274. BUG();
  275. }
  276. rb_link_node(&call->conn_node, parent, p);
  277. rb_insert_color(&call->conn_node, &conn->calls);
  278. write_unlock_bh(&conn->lock);
  279. }
  280. /*
  281. * connect a call on an exclusive connection
  282. */
  283. static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
  284. struct rxrpc_transport *trans,
  285. __be16 service_id,
  286. struct rxrpc_call *call,
  287. gfp_t gfp)
  288. {
  289. struct rxrpc_connection *conn;
  290. int chan, ret;
  291. _enter("");
  292. conn = rx->conn;
  293. if (!conn) {
  294. /* not yet present - create a candidate for a new connection
  295. * and then redo the check */
  296. conn = rxrpc_alloc_connection(gfp);
  297. if (!conn) {
  298. _leave(" = -ENOMEM");
  299. return -ENOMEM;
  300. }
  301. conn->trans = trans;
  302. conn->bundle = NULL;
  303. conn->service_id = service_id;
  304. conn->epoch = rxrpc_epoch;
  305. conn->in_clientflag = 0;
  306. conn->out_clientflag = RXRPC_CLIENT_INITIATED;
  307. conn->cid = 0;
  308. conn->state = RXRPC_CONN_CLIENT;
  309. conn->avail_calls = RXRPC_MAXCALLS - 1;
  310. conn->security_level = rx->min_sec_level;
  311. conn->key = key_get(rx->key);
  312. ret = rxrpc_init_client_conn_security(conn);
  313. if (ret < 0) {
  314. key_put(conn->key);
  315. kfree(conn);
  316. _leave(" = %d [key]", ret);
  317. return ret;
  318. }
  319. write_lock_bh(&rxrpc_connection_lock);
  320. list_add_tail(&conn->link, &rxrpc_connections);
  321. write_unlock_bh(&rxrpc_connection_lock);
  322. spin_lock(&trans->client_lock);
  323. atomic_inc(&trans->usage);
  324. _net("CONNECT EXCL new %d on TRANS %d",
  325. conn->debug_id, conn->trans->debug_id);
  326. rxrpc_assign_connection_id(conn);
  327. rx->conn = conn;
  328. } else {
  329. spin_lock(&trans->client_lock);
  330. }
  331. /* we've got a connection with a free channel and we can now attach the
  332. * call to it
  333. * - we're holding the transport's client lock
  334. * - we're holding a reference on the connection
  335. */
  336. for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
  337. if (!conn->channels[chan])
  338. goto found_channel;
  339. goto no_free_channels;
  340. found_channel:
  341. atomic_inc(&conn->usage);
  342. conn->channels[chan] = call;
  343. call->conn = conn;
  344. call->channel = chan;
  345. call->cid = conn->cid | htonl(chan);
  346. call->call_id = htonl(++conn->call_counter);
  347. _net("CONNECT client on conn %d chan %d as call %x",
  348. conn->debug_id, chan, ntohl(call->call_id));
  349. spin_unlock(&trans->client_lock);
  350. rxrpc_add_call_ID_to_conn(conn, call);
  351. _leave(" = 0");
  352. return 0;
  353. no_free_channels:
  354. spin_unlock(&trans->client_lock);
  355. _leave(" = -ENOSR");
  356. return -ENOSR;
  357. }
  358. /*
  359. * find a connection for a call
  360. * - called in process context with IRQs enabled
  361. */
  362. int rxrpc_connect_call(struct rxrpc_sock *rx,
  363. struct rxrpc_transport *trans,
  364. struct rxrpc_conn_bundle *bundle,
  365. struct rxrpc_call *call,
  366. gfp_t gfp)
  367. {
  368. struct rxrpc_connection *conn, *candidate;
  369. int chan, ret;
  370. DECLARE_WAITQUEUE(myself, current);
  371. _enter("%p,%lx,", rx, call->user_call_ID);
  372. if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
  373. return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
  374. call, gfp);
  375. spin_lock(&trans->client_lock);
  376. for (;;) {
  377. /* see if the bundle has a call slot available */
  378. if (!list_empty(&bundle->avail_conns)) {
  379. _debug("avail");
  380. conn = list_entry(bundle->avail_conns.next,
  381. struct rxrpc_connection,
  382. bundle_link);
  383. if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
  384. list_del_init(&conn->bundle_link);
  385. bundle->num_conns--;
  386. continue;
  387. }
  388. if (--conn->avail_calls == 0)
  389. list_move(&conn->bundle_link,
  390. &bundle->busy_conns);
  391. ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
  392. ASSERT(conn->channels[0] == NULL ||
  393. conn->channels[1] == NULL ||
  394. conn->channels[2] == NULL ||
  395. conn->channels[3] == NULL);
  396. atomic_inc(&conn->usage);
  397. break;
  398. }
  399. if (!list_empty(&bundle->unused_conns)) {
  400. _debug("unused");
  401. conn = list_entry(bundle->unused_conns.next,
  402. struct rxrpc_connection,
  403. bundle_link);
  404. if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
  405. list_del_init(&conn->bundle_link);
  406. bundle->num_conns--;
  407. continue;
  408. }
  409. ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
  410. conn->avail_calls = RXRPC_MAXCALLS - 1;
  411. ASSERT(conn->channels[0] == NULL &&
  412. conn->channels[1] == NULL &&
  413. conn->channels[2] == NULL &&
  414. conn->channels[3] == NULL);
  415. atomic_inc(&conn->usage);
  416. list_move(&conn->bundle_link, &bundle->avail_conns);
  417. break;
  418. }
  419. /* need to allocate a new connection */
  420. _debug("get new conn [%d]", bundle->num_conns);
  421. spin_unlock(&trans->client_lock);
  422. if (signal_pending(current))
  423. goto interrupted;
  424. if (bundle->num_conns >= 20) {
  425. _debug("too many conns");
  426. if (!(gfp & __GFP_WAIT)) {
  427. _leave(" = -EAGAIN");
  428. return -EAGAIN;
  429. }
  430. add_wait_queue(&bundle->chanwait, &myself);
  431. for (;;) {
  432. set_current_state(TASK_INTERRUPTIBLE);
  433. if (bundle->num_conns < 20 ||
  434. !list_empty(&bundle->unused_conns) ||
  435. !list_empty(&bundle->avail_conns))
  436. break;
  437. if (signal_pending(current))
  438. goto interrupted_dequeue;
  439. schedule();
  440. }
  441. remove_wait_queue(&bundle->chanwait, &myself);
  442. __set_current_state(TASK_RUNNING);
  443. spin_lock(&trans->client_lock);
  444. continue;
  445. }
  446. /* not yet present - create a candidate for a new connection and then
  447. * redo the check */
  448. candidate = rxrpc_alloc_connection(gfp);
  449. if (!candidate) {
  450. _leave(" = -ENOMEM");
  451. return -ENOMEM;
  452. }
  453. candidate->trans = trans;
  454. candidate->bundle = bundle;
  455. candidate->service_id = bundle->service_id;
  456. candidate->epoch = rxrpc_epoch;
  457. candidate->in_clientflag = 0;
  458. candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
  459. candidate->cid = 0;
  460. candidate->state = RXRPC_CONN_CLIENT;
  461. candidate->avail_calls = RXRPC_MAXCALLS;
  462. candidate->security_level = rx->min_sec_level;
  463. candidate->key = key_get(bundle->key);
  464. ret = rxrpc_init_client_conn_security(candidate);
  465. if (ret < 0) {
  466. key_put(candidate->key);
  467. kfree(candidate);
  468. _leave(" = %d [key]", ret);
  469. return ret;
  470. }
  471. write_lock_bh(&rxrpc_connection_lock);
  472. list_add_tail(&candidate->link, &rxrpc_connections);
  473. write_unlock_bh(&rxrpc_connection_lock);
  474. spin_lock(&trans->client_lock);
  475. list_add(&candidate->bundle_link, &bundle->unused_conns);
  476. bundle->num_conns++;
  477. atomic_inc(&bundle->usage);
  478. atomic_inc(&trans->usage);
  479. _net("CONNECT new %d on TRANS %d",
  480. candidate->debug_id, candidate->trans->debug_id);
  481. rxrpc_assign_connection_id(candidate);
  482. if (candidate->security)
  483. candidate->security->prime_packet_security(candidate);
  484. /* leave the candidate lurking in zombie mode attached to the
  485. * bundle until we're ready for it */
  486. rxrpc_put_connection(candidate);
  487. candidate = NULL;
  488. }
  489. /* we've got a connection with a free channel and we can now attach the
  490. * call to it
  491. * - we're holding the transport's client lock
  492. * - we're holding a reference on the connection
  493. * - we're holding a reference on the bundle
  494. */
  495. for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
  496. if (!conn->channels[chan])
  497. goto found_channel;
  498. ASSERT(conn->channels[0] == NULL ||
  499. conn->channels[1] == NULL ||
  500. conn->channels[2] == NULL ||
  501. conn->channels[3] == NULL);
  502. BUG();
  503. found_channel:
  504. conn->channels[chan] = call;
  505. call->conn = conn;
  506. call->channel = chan;
  507. call->cid = conn->cid | htonl(chan);
  508. call->call_id = htonl(++conn->call_counter);
  509. _net("CONNECT client on conn %d chan %d as call %x",
  510. conn->debug_id, chan, ntohl(call->call_id));
  511. ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
  512. spin_unlock(&trans->client_lock);
  513. rxrpc_add_call_ID_to_conn(conn, call);
  514. _leave(" = 0");
  515. return 0;
  516. interrupted_dequeue:
  517. remove_wait_queue(&bundle->chanwait, &myself);
  518. __set_current_state(TASK_RUNNING);
  519. interrupted:
  520. _leave(" = -ERESTARTSYS");
  521. return -ERESTARTSYS;
  522. }
  523. /*
  524. * get a record of an incoming connection
  525. */
  526. struct rxrpc_connection *
  527. rxrpc_incoming_connection(struct rxrpc_transport *trans,
  528. struct rxrpc_header *hdr,
  529. gfp_t gfp)
  530. {
  531. struct rxrpc_connection *conn, *candidate = NULL;
  532. struct rb_node *p, **pp;
  533. const char *new = "old";
  534. __be32 epoch;
  535. u32 conn_id;
  536. _enter("");
  537. ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
  538. epoch = hdr->epoch;
  539. conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
  540. /* search the connection list first */
  541. read_lock_bh(&trans->conn_lock);
  542. p = trans->server_conns.rb_node;
  543. while (p) {
  544. conn = rb_entry(p, struct rxrpc_connection, node);
  545. _debug("maybe %x", conn->real_conn_id);
  546. if (epoch < conn->epoch)
  547. p = p->rb_left;
  548. else if (epoch > conn->epoch)
  549. p = p->rb_right;
  550. else if (conn_id < conn->real_conn_id)
  551. p = p->rb_left;
  552. else if (conn_id > conn->real_conn_id)
  553. p = p->rb_right;
  554. else
  555. goto found_extant_connection;
  556. }
  557. read_unlock_bh(&trans->conn_lock);
  558. /* not yet present - create a candidate for a new record and then
  559. * redo the search */
  560. candidate = rxrpc_alloc_connection(gfp);
  561. if (!candidate) {
  562. _leave(" = -ENOMEM");
  563. return ERR_PTR(-ENOMEM);
  564. }
  565. candidate->trans = trans;
  566. candidate->epoch = hdr->epoch;
  567. candidate->cid = hdr->cid & cpu_to_be32(RXRPC_CIDMASK);
  568. candidate->service_id = hdr->serviceId;
  569. candidate->security_ix = hdr->securityIndex;
  570. candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
  571. candidate->out_clientflag = 0;
  572. candidate->real_conn_id = conn_id;
  573. candidate->state = RXRPC_CONN_SERVER;
  574. if (candidate->service_id)
  575. candidate->state = RXRPC_CONN_SERVER_UNSECURED;
  576. write_lock_bh(&trans->conn_lock);
  577. pp = &trans->server_conns.rb_node;
  578. p = NULL;
  579. while (*pp) {
  580. p = *pp;
  581. conn = rb_entry(p, struct rxrpc_connection, node);
  582. if (epoch < conn->epoch)
  583. pp = &(*pp)->rb_left;
  584. else if (epoch > conn->epoch)
  585. pp = &(*pp)->rb_right;
  586. else if (conn_id < conn->real_conn_id)
  587. pp = &(*pp)->rb_left;
  588. else if (conn_id > conn->real_conn_id)
  589. pp = &(*pp)->rb_right;
  590. else
  591. goto found_extant_second;
  592. }
  593. /* we can now add the new candidate to the list */
  594. conn = candidate;
  595. candidate = NULL;
  596. rb_link_node(&conn->node, p, pp);
  597. rb_insert_color(&conn->node, &trans->server_conns);
  598. atomic_inc(&conn->trans->usage);
  599. write_unlock_bh(&trans->conn_lock);
  600. write_lock_bh(&rxrpc_connection_lock);
  601. list_add_tail(&conn->link, &rxrpc_connections);
  602. write_unlock_bh(&rxrpc_connection_lock);
  603. new = "new";
  604. success:
  605. _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id);
  606. _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
  607. return conn;
  608. /* we found the connection in the list immediately */
  609. found_extant_connection:
  610. if (hdr->securityIndex != conn->security_ix) {
  611. read_unlock_bh(&trans->conn_lock);
  612. goto security_mismatch;
  613. }
  614. atomic_inc(&conn->usage);
  615. read_unlock_bh(&trans->conn_lock);
  616. goto success;
  617. /* we found the connection on the second time through the list */
  618. found_extant_second:
  619. if (hdr->securityIndex != conn->security_ix) {
  620. write_unlock_bh(&trans->conn_lock);
  621. goto security_mismatch;
  622. }
  623. atomic_inc(&conn->usage);
  624. write_unlock_bh(&trans->conn_lock);
  625. kfree(candidate);
  626. goto success;
  627. security_mismatch:
  628. kfree(candidate);
  629. _leave(" = -EKEYREJECTED");
  630. return ERR_PTR(-EKEYREJECTED);
  631. }
  632. /*
  633. * find a connection based on transport and RxRPC connection ID for an incoming
  634. * packet
  635. */
  636. struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
  637. struct rxrpc_header *hdr)
  638. {
  639. struct rxrpc_connection *conn;
  640. struct rb_node *p;
  641. __be32 epoch;
  642. u32 conn_id;
  643. _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags);
  644. read_lock_bh(&trans->conn_lock);
  645. conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
  646. epoch = hdr->epoch;
  647. if (hdr->flags & RXRPC_CLIENT_INITIATED)
  648. p = trans->server_conns.rb_node;
  649. else
  650. p = trans->client_conns.rb_node;
  651. while (p) {
  652. conn = rb_entry(p, struct rxrpc_connection, node);
  653. _debug("maybe %x", conn->real_conn_id);
  654. if (epoch < conn->epoch)
  655. p = p->rb_left;
  656. else if (epoch > conn->epoch)
  657. p = p->rb_right;
  658. else if (conn_id < conn->real_conn_id)
  659. p = p->rb_left;
  660. else if (conn_id > conn->real_conn_id)
  661. p = p->rb_right;
  662. else
  663. goto found;
  664. }
  665. read_unlock_bh(&trans->conn_lock);
  666. _leave(" = NULL");
  667. return NULL;
  668. found:
  669. atomic_inc(&conn->usage);
  670. read_unlock_bh(&trans->conn_lock);
  671. _leave(" = %p", conn);
  672. return conn;
  673. }
  674. /*
  675. * release a virtual connection
  676. */
  677. void rxrpc_put_connection(struct rxrpc_connection *conn)
  678. {
  679. _enter("%p{u=%d,d=%d}",
  680. conn, atomic_read(&conn->usage), conn->debug_id);
  681. ASSERTCMP(atomic_read(&conn->usage), >, 0);
  682. conn->put_time = get_seconds();
  683. if (atomic_dec_and_test(&conn->usage)) {
  684. _debug("zombie");
  685. rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
  686. }
  687. _leave("");
  688. }
  689. /*
  690. * destroy a virtual connection
  691. */
  692. static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
  693. {
  694. _enter("%p{%d}", conn, atomic_read(&conn->usage));
  695. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  696. _net("DESTROY CONN %d", conn->debug_id);
  697. if (conn->bundle)
  698. rxrpc_put_bundle(conn->trans, conn->bundle);
  699. ASSERT(RB_EMPTY_ROOT(&conn->calls));
  700. rxrpc_purge_queue(&conn->rx_queue);
  701. rxrpc_clear_conn_security(conn);
  702. rxrpc_put_transport(conn->trans);
  703. kfree(conn);
  704. _leave("");
  705. }
  706. /*
  707. * reap dead connections
  708. */
  709. static void rxrpc_connection_reaper(struct work_struct *work)
  710. {
  711. struct rxrpc_connection *conn, *_p;
  712. unsigned long now, earliest, reap_time;
  713. LIST_HEAD(graveyard);
  714. _enter("");
  715. now = get_seconds();
  716. earliest = ULONG_MAX;
  717. write_lock_bh(&rxrpc_connection_lock);
  718. list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
  719. _debug("reap CONN %d { u=%d,t=%ld }",
  720. conn->debug_id, atomic_read(&conn->usage),
  721. (long) now - (long) conn->put_time);
  722. if (likely(atomic_read(&conn->usage) > 0))
  723. continue;
  724. spin_lock(&conn->trans->client_lock);
  725. write_lock(&conn->trans->conn_lock);
  726. reap_time = conn->put_time + rxrpc_connection_expiry;
  727. if (atomic_read(&conn->usage) > 0) {
  728. ;
  729. } else if (reap_time <= now) {
  730. list_move_tail(&conn->link, &graveyard);
  731. if (conn->out_clientflag)
  732. rb_erase(&conn->node,
  733. &conn->trans->client_conns);
  734. else
  735. rb_erase(&conn->node,
  736. &conn->trans->server_conns);
  737. if (conn->bundle) {
  738. list_del_init(&conn->bundle_link);
  739. conn->bundle->num_conns--;
  740. }
  741. } else if (reap_time < earliest) {
  742. earliest = reap_time;
  743. }
  744. write_unlock(&conn->trans->conn_lock);
  745. spin_unlock(&conn->trans->client_lock);
  746. }
  747. write_unlock_bh(&rxrpc_connection_lock);
  748. if (earliest != ULONG_MAX) {
  749. _debug("reschedule reaper %ld", (long) earliest - now);
  750. ASSERTCMP(earliest, >, now);
  751. rxrpc_queue_delayed_work(&rxrpc_connection_reap,
  752. (earliest - now) * HZ);
  753. }
  754. /* then destroy all those pulled out */
  755. while (!list_empty(&graveyard)) {
  756. conn = list_entry(graveyard.next, struct rxrpc_connection,
  757. link);
  758. list_del_init(&conn->link);
  759. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  760. rxrpc_destroy_connection(conn);
  761. }
  762. _leave("");
  763. }
  764. /*
  765. * preemptively destroy all the connection records rather than waiting for them
  766. * to time out
  767. */
  768. void __exit rxrpc_destroy_all_connections(void)
  769. {
  770. _enter("");
  771. rxrpc_connection_expiry = 0;
  772. cancel_delayed_work(&rxrpc_connection_reap);
  773. rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
  774. _leave("");
  775. }