conn_service.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. /* Service connection management
  2. *
  3. * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/slab.h>
  12. #include "ar-internal.h"
  13. /*
  14. * Find a service connection under RCU conditions.
  15. *
  16. * We could use a hash table, but that is subject to bucket stuffing by an
  17. * attacker as the client gets to pick the epoch and cid values and would know
  18. * the hash function. So, instead, we use a hash table for the peer and from
  19. * that an rbtree to find the service connection. Under ordinary circumstances
  20. * it might be slower than a large hash table, but it is at least limited in
  21. * depth.
  22. */
  23. struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
  24. struct sk_buff *skb)
  25. {
  26. struct rxrpc_connection *conn = NULL;
  27. struct rxrpc_conn_proto k;
  28. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  29. struct rb_node *p;
  30. unsigned int seq = 0;
  31. k.epoch = sp->hdr.epoch;
  32. k.cid = sp->hdr.cid & RXRPC_CIDMASK;
  33. do {
  34. /* Unfortunately, rbtree walking doesn't give reliable results
  35. * under just the RCU read lock, so we have to check for
  36. * changes.
  37. */
  38. read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
  39. p = rcu_dereference_raw(peer->service_conns.rb_node);
  40. while (p) {
  41. conn = rb_entry(p, struct rxrpc_connection, service_node);
  42. if (conn->proto.index_key < k.index_key)
  43. p = rcu_dereference_raw(p->rb_left);
  44. else if (conn->proto.index_key > k.index_key)
  45. p = rcu_dereference_raw(p->rb_right);
  46. else
  47. break;
  48. conn = NULL;
  49. }
  50. } while (need_seqretry(&peer->service_conn_lock, seq));
  51. done_seqretry(&peer->service_conn_lock, seq);
  52. _leave(" = %d", conn ? conn->debug_id : -1);
  53. return conn;
  54. }
  55. /*
  56. * Insert a service connection into a peer's tree, thereby making it a target
  57. * for incoming packets.
  58. */
  59. static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
  60. struct rxrpc_connection *conn)
  61. {
  62. struct rxrpc_connection *cursor = NULL;
  63. struct rxrpc_conn_proto k = conn->proto;
  64. struct rb_node **pp, *parent;
  65. write_seqlock_bh(&peer->service_conn_lock);
  66. pp = &peer->service_conns.rb_node;
  67. parent = NULL;
  68. while (*pp) {
  69. parent = *pp;
  70. cursor = rb_entry(parent,
  71. struct rxrpc_connection, service_node);
  72. if (cursor->proto.index_key < k.index_key)
  73. pp = &(*pp)->rb_left;
  74. else if (cursor->proto.index_key > k.index_key)
  75. pp = &(*pp)->rb_right;
  76. else
  77. goto found_extant_conn;
  78. }
  79. rb_link_node_rcu(&conn->service_node, parent, pp);
  80. rb_insert_color(&conn->service_node, &peer->service_conns);
  81. conn_published:
  82. set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
  83. write_sequnlock_bh(&peer->service_conn_lock);
  84. _leave(" = %d [new]", conn->debug_id);
  85. return;
  86. found_extant_conn:
  87. if (atomic_read(&cursor->usage) == 0)
  88. goto replace_old_connection;
  89. write_sequnlock_bh(&peer->service_conn_lock);
  90. /* We should not be able to get here. rxrpc_incoming_connection() is
  91. * called in a non-reentrant context, so there can't be a race to
  92. * insert a new connection.
  93. */
  94. BUG();
  95. replace_old_connection:
  96. /* The old connection is from an outdated epoch. */
  97. _debug("replace conn");
  98. rb_replace_node_rcu(&cursor->service_node,
  99. &conn->service_node,
  100. &peer->service_conns);
  101. clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags);
  102. goto conn_published;
  103. }
  104. /*
  105. * Preallocate a service connection. The connection is placed on the proc and
  106. * reap lists so that we don't have to get the lock from BH context.
  107. */
  108. struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
  109. gfp_t gfp)
  110. {
  111. struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
  112. if (conn) {
  113. /* We maintain an extra ref on the connection whilst it is on
  114. * the rxrpc_connections list.
  115. */
  116. conn->state = RXRPC_CONN_SERVICE_PREALLOC;
  117. atomic_set(&conn->usage, 2);
  118. atomic_inc(&rxnet->nr_conns);
  119. write_lock(&rxnet->conn_lock);
  120. list_add_tail(&conn->link, &rxnet->service_conns);
  121. list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
  122. write_unlock(&rxnet->conn_lock);
  123. trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
  124. atomic_read(&conn->usage),
  125. __builtin_return_address(0));
  126. }
  127. return conn;
  128. }
  129. /*
  130. * Set up an incoming connection. This is called in BH context with the RCU
  131. * read lock held.
  132. */
  133. void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
  134. struct rxrpc_connection *conn,
  135. struct sk_buff *skb)
  136. {
  137. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  138. _enter("");
  139. conn->proto.epoch = sp->hdr.epoch;
  140. conn->proto.cid = sp->hdr.cid & RXRPC_CIDMASK;
  141. conn->params.service_id = sp->hdr.serviceId;
  142. conn->service_id = sp->hdr.serviceId;
  143. conn->security_ix = sp->hdr.securityIndex;
  144. conn->out_clientflag = 0;
  145. if (conn->security_ix)
  146. conn->state = RXRPC_CONN_SERVICE_UNSECURED;
  147. else
  148. conn->state = RXRPC_CONN_SERVICE;
  149. /* See if we should upgrade the service. This can only happen on the
  150. * first packet on a new connection. Once done, it applies to all
  151. * subsequent calls on that connection.
  152. */
  153. if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE &&
  154. conn->service_id == rx->service_upgrade.from)
  155. conn->service_id = rx->service_upgrade.to;
  156. /* Make the connection a target for incoming packets. */
  157. rxrpc_publish_service_conn(conn->params.peer, conn);
  158. _net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid);
  159. }
  160. /*
  161. * Remove the service connection from the peer's tree, thereby removing it as a
  162. * target for incoming packets.
  163. */
  164. void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
  165. {
  166. struct rxrpc_peer *peer = conn->params.peer;
  167. write_seqlock_bh(&peer->service_conn_lock);
  168. if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
  169. rb_erase(&conn->service_node, &peer->service_conns);
  170. write_sequnlock_bh(&peer->service_conn_lock);
  171. }