ar-internal.h 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331
  1. /* AF_RXRPC internal definitions
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/atomic.h>
  12. #include <linux/seqlock.h>
  13. #include <net/sock.h>
  14. #include <net/af_rxrpc.h>
  15. #include <rxrpc/packet.h>
  16. #if 0
  17. #define CHECK_SLAB_OKAY(X) \
  18. BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
  19. (POISON_FREE << 8 | POISON_FREE))
  20. #else
  21. #define CHECK_SLAB_OKAY(X) do {} while (0)
  22. #endif
  23. #define FCRYPT_BSIZE 8
  24. struct rxrpc_crypt {
  25. union {
  26. u8 x[FCRYPT_BSIZE];
  27. __be32 n[2];
  28. };
  29. } __attribute__((aligned(8)));
  30. #define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
  31. #define rxrpc_queue_delayed_work(WS,D) \
  32. queue_delayed_work(rxrpc_workqueue, (WS), (D))
  33. struct rxrpc_connection;
  34. /*
  35. * Mark applied to socket buffers.
  36. */
  37. enum rxrpc_skb_mark {
  38. RXRPC_SKB_MARK_DATA, /* data message */
  39. RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
  40. RXRPC_SKB_MARK_BUSY, /* server busy message */
  41. RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
  42. RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
  43. RXRPC_SKB_MARK_NET_ERROR, /* network error message */
  44. RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
  45. RXRPC_SKB_MARK_NEW_CALL, /* local error message */
  46. };
  47. /*
  48. * sk_state for RxRPC sockets
  49. */
  50. enum {
  51. RXRPC_UNBOUND = 0,
  52. RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
  53. RXRPC_CLIENT_BOUND, /* client local address bound */
  54. RXRPC_SERVER_BOUND, /* server local address bound */
  55. RXRPC_SERVER_LISTENING, /* server listening for connections */
  56. RXRPC_CLOSE, /* socket is being closed */
  57. };
  58. /*
  59. * Service backlog preallocation.
  60. *
  61. * This contains circular buffers of preallocated peers, connections and calls
  62. * for incoming service calls and their head and tail pointers. This allows
  63. * calls to be set up in the data_ready handler, thereby avoiding the need to
  64. * shuffle packets around so much.
  65. */
  66. struct rxrpc_backlog {
  67. unsigned short peer_backlog_head;
  68. unsigned short peer_backlog_tail;
  69. unsigned short conn_backlog_head;
  70. unsigned short conn_backlog_tail;
  71. unsigned short call_backlog_head;
  72. unsigned short call_backlog_tail;
  73. #define RXRPC_BACKLOG_MAX 32
  74. struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
  75. struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
  76. struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
  77. };
  78. /*
  79. * RxRPC socket definition
  80. */
  81. struct rxrpc_sock {
  82. /* WARNING: sk has to be the first member */
  83. struct sock sk;
  84. rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */
  85. rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */
  86. struct rxrpc_local *local; /* local endpoint */
  87. struct rxrpc_backlog *backlog; /* Preallocation for services */
  88. spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */
  89. struct list_head sock_calls; /* List of calls owned by this socket */
  90. struct list_head to_be_accepted; /* calls awaiting acceptance */
  91. struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */
  92. rwlock_t recvmsg_lock; /* Lock for recvmsg_q */
  93. struct key *key; /* security for this socket */
  94. struct key *securities; /* list of server security descriptors */
  95. struct rb_root calls; /* User ID -> call mapping */
  96. unsigned long flags;
  97. #define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
  98. rwlock_t call_lock; /* lock for calls */
  99. u32 min_sec_level; /* minimum security level */
  100. #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
  101. bool exclusive; /* Exclusive connection for a client socket */
  102. sa_family_t family; /* Protocol family created with */
  103. struct sockaddr_rxrpc srx; /* local address */
  104. struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
  105. };
  106. #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
  107. /*
  108. * CPU-byteorder normalised Rx packet header.
  109. */
  110. struct rxrpc_host_header {
  111. u32 epoch; /* client boot timestamp */
  112. u32 cid; /* connection and channel ID */
  113. u32 callNumber; /* call ID (0 for connection-level packets) */
  114. u32 seq; /* sequence number of pkt in call stream */
  115. u32 serial; /* serial number of pkt sent to network */
  116. u8 type; /* packet type */
  117. u8 flags; /* packet flags */
  118. u8 userStatus; /* app-layer defined status */
  119. u8 securityIndex; /* security protocol ID */
  120. union {
  121. u16 _rsvd; /* reserved */
  122. u16 cksum; /* kerberos security checksum */
  123. };
  124. u16 serviceId; /* service ID */
  125. } __packed;
  126. /*
  127. * RxRPC socket buffer private variables
  128. * - max 48 bytes (struct sk_buff::cb)
  129. */
  130. struct rxrpc_skb_priv {
  131. union {
  132. u8 nr_jumbo; /* Number of jumbo subpackets */
  133. };
  134. union {
  135. int remain; /* amount of space remaining for next write */
  136. };
  137. struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
  138. };
  139. #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
  140. /*
  141. * RxRPC security module interface
  142. */
  143. struct rxrpc_security {
  144. const char *name; /* name of this service */
  145. u8 security_index; /* security type provided */
  146. /* Initialise a security service */
  147. int (*init)(void);
  148. /* Clean up a security service */
  149. void (*exit)(void);
  150. /* initialise a connection's security */
  151. int (*init_connection_security)(struct rxrpc_connection *);
  152. /* prime a connection's packet security */
  153. int (*prime_packet_security)(struct rxrpc_connection *);
  154. /* impose security on a packet */
  155. int (*secure_packet)(struct rxrpc_call *,
  156. struct sk_buff *,
  157. size_t,
  158. void *);
  159. /* verify the security on a received packet */
  160. int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
  161. unsigned int, unsigned int, rxrpc_seq_t, u16);
  162. /* Locate the data in a received packet that has been verified. */
  163. void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
  164. unsigned int *, unsigned int *);
  165. /* issue a challenge */
  166. int (*issue_challenge)(struct rxrpc_connection *);
  167. /* respond to a challenge */
  168. int (*respond_to_challenge)(struct rxrpc_connection *,
  169. struct sk_buff *,
  170. u32 *);
  171. /* verify a response */
  172. int (*verify_response)(struct rxrpc_connection *,
  173. struct sk_buff *,
  174. u32 *);
  175. /* clear connection security */
  176. void (*clear)(struct rxrpc_connection *);
  177. };
  178. /*
  179. * RxRPC local transport endpoint description
  180. * - owned by a single AF_RXRPC socket
  181. * - pointed to by transport socket struct sk_user_data
  182. */
  183. struct rxrpc_local {
  184. struct rcu_head rcu;
  185. atomic_t usage;
  186. struct list_head link;
  187. struct socket *socket; /* my UDP socket */
  188. struct work_struct processor;
  189. struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
  190. struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
  191. struct sk_buff_head reject_queue; /* packets awaiting rejection */
  192. struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
  193. struct rb_root client_conns; /* Client connections by socket params */
  194. spinlock_t client_conns_lock; /* Lock for client_conns */
  195. spinlock_t lock; /* access lock */
  196. rwlock_t services_lock; /* lock for services list */
  197. int debug_id; /* debug ID for printks */
  198. bool dead;
  199. struct sockaddr_rxrpc srx; /* local address */
  200. };
  201. /*
  202. * RxRPC remote transport endpoint definition
  203. * - matched by local endpoint, remote port, address and protocol type
  204. */
  205. struct rxrpc_peer {
  206. struct rcu_head rcu; /* This must be first */
  207. atomic_t usage;
  208. unsigned long hash_key;
  209. struct hlist_node hash_link;
  210. struct rxrpc_local *local;
  211. struct hlist_head error_targets; /* targets for net error distribution */
  212. struct work_struct error_distributor;
  213. struct rb_root service_conns; /* Service connections */
  214. seqlock_t service_conn_lock;
  215. spinlock_t lock; /* access lock */
  216. unsigned int if_mtu; /* interface MTU for this peer */
  217. unsigned int mtu; /* network MTU for this peer */
  218. unsigned int maxdata; /* data size (MTU - hdrsize) */
  219. unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
  220. int debug_id; /* debug ID for printks */
  221. int error_report; /* Net (+0) or local (+1000000) to distribute */
  222. #define RXRPC_LOCAL_ERROR_OFFSET 1000000
  223. struct sockaddr_rxrpc srx; /* remote address */
  224. /* calculated RTT cache */
  225. #define RXRPC_RTT_CACHE_SIZE 32
  226. ktime_t rtt_last_req; /* Time of last RTT request */
  227. u64 rtt; /* Current RTT estimate (in nS) */
  228. u64 rtt_sum; /* Sum of cache contents */
  229. u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
  230. u8 rtt_cursor; /* next entry at which to insert */
  231. u8 rtt_usage; /* amount of cache actually used */
  232. };
  233. /*
  234. * Keys for matching a connection.
  235. */
  236. struct rxrpc_conn_proto {
  237. union {
  238. struct {
  239. u32 epoch; /* epoch of this connection */
  240. u32 cid; /* connection ID */
  241. };
  242. u64 index_key;
  243. };
  244. };
  245. struct rxrpc_conn_parameters {
  246. struct rxrpc_local *local; /* Representation of local endpoint */
  247. struct rxrpc_peer *peer; /* Remote endpoint */
  248. struct key *key; /* Security details */
  249. bool exclusive; /* T if conn is exclusive */
  250. u16 service_id; /* Service ID for this connection */
  251. u32 security_level; /* Security level selected */
  252. };
  253. /*
  254. * Bits in the connection flags.
  255. */
  256. enum rxrpc_conn_flag {
  257. RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
  258. RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
  259. RXRPC_CONN_IN_CLIENT_CONNS, /* Conn is in local->client_conns */
  260. RXRPC_CONN_EXPOSED, /* Conn has extra ref for exposure */
  261. RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
  262. RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
  263. };
  264. /*
  265. * Events that can be raised upon a connection.
  266. */
  267. enum rxrpc_conn_event {
  268. RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */
  269. };
  270. /*
  271. * The connection cache state.
  272. */
  273. enum rxrpc_conn_cache_state {
  274. RXRPC_CONN_CLIENT_INACTIVE, /* Conn is not yet listed */
  275. RXRPC_CONN_CLIENT_WAITING, /* Conn is on wait list, waiting for capacity */
  276. RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */
  277. RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */
  278. RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */
  279. RXRPC_CONN__NR_CACHE_STATES
  280. };
  281. /*
  282. * The connection protocol state.
  283. */
  284. enum rxrpc_conn_proto_state {
  285. RXRPC_CONN_UNUSED, /* Connection not yet attempted */
  286. RXRPC_CONN_CLIENT, /* Client connection */
  287. RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
  288. RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
  289. RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
  290. RXRPC_CONN_SERVICE, /* Service secured connection */
  291. RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */
  292. RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */
  293. RXRPC_CONN__NR_STATES
  294. };
  295. /*
  296. * RxRPC connection definition
  297. * - matched by { local, peer, epoch, conn_id, direction }
  298. * - each connection can only handle four simultaneous calls
  299. */
  300. struct rxrpc_connection {
  301. struct rxrpc_conn_proto proto;
  302. struct rxrpc_conn_parameters params;
  303. atomic_t usage;
  304. struct rcu_head rcu;
  305. struct list_head cache_link;
  306. spinlock_t channel_lock;
  307. unsigned char active_chans; /* Mask of active channels */
  308. #define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
  309. struct list_head waiting_calls; /* Calls waiting for channels */
  310. struct rxrpc_channel {
  311. struct rxrpc_call __rcu *call; /* Active call */
  312. u32 call_id; /* ID of current call */
  313. u32 call_counter; /* Call ID counter */
  314. u32 last_call; /* ID of last call */
  315. u8 last_type; /* Type of last packet */
  316. u16 last_service_id;
  317. union {
  318. u32 last_seq;
  319. u32 last_abort;
  320. };
  321. } channels[RXRPC_MAXCALLS];
  322. struct work_struct processor; /* connection event processor */
  323. union {
  324. struct rb_node client_node; /* Node in local->client_conns */
  325. struct rb_node service_node; /* Node in peer->service_conns */
  326. };
  327. struct list_head proc_link; /* link in procfs list */
  328. struct list_head link; /* link in master connection list */
  329. struct sk_buff_head rx_queue; /* received conn-level packets */
  330. const struct rxrpc_security *security; /* applied security module */
  331. struct key *server_key; /* security for this service */
  332. struct crypto_skcipher *cipher; /* encryption handle */
  333. struct rxrpc_crypt csum_iv; /* packet checksum base */
  334. unsigned long flags;
  335. unsigned long events;
  336. unsigned long idle_timestamp; /* Time at which last became idle */
  337. spinlock_t state_lock; /* state-change lock */
  338. enum rxrpc_conn_cache_state cache_state;
  339. enum rxrpc_conn_proto_state state; /* current state of connection */
  340. u32 local_abort; /* local abort code */
  341. u32 remote_abort; /* remote abort code */
  342. int debug_id; /* debug ID for printks */
  343. atomic_t serial; /* packet serial number counter */
  344. unsigned int hi_serial; /* highest serial number received */
  345. u32 security_nonce; /* response re-use preventer */
  346. u8 size_align; /* data size alignment (for security) */
  347. u8 security_size; /* security header size */
  348. u8 security_ix; /* security type */
  349. u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
  350. };
  351. /*
  352. * Flags in call->flags.
  353. */
  354. enum rxrpc_call_flag {
  355. RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
  356. RXRPC_CALL_HAS_USERID, /* has a user ID attached */
  357. RXRPC_CALL_IS_SERVICE, /* Call is service call */
  358. RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
  359. RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
  360. RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
  361. RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
  362. RXRPC_CALL_PINGING, /* Ping in process */
  363. RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
  364. };
  365. /*
  366. * Events that can be raised on a call.
  367. */
  368. enum rxrpc_call_event {
  369. RXRPC_CALL_EV_ACK, /* need to generate ACK */
  370. RXRPC_CALL_EV_ABORT, /* need to generate abort */
  371. RXRPC_CALL_EV_TIMER, /* Timer expired */
  372. RXRPC_CALL_EV_RESEND, /* Tx resend required */
  373. RXRPC_CALL_EV_PING, /* Ping send required */
  374. };
  375. /*
  376. * The states that a call can be in.
  377. */
  378. enum rxrpc_call_state {
  379. RXRPC_CALL_UNINITIALISED,
  380. RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */
  381. RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
  382. RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
  383. RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
  384. RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
  385. RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
  386. RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
  387. RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
  388. RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
  389. RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
  390. RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
  391. RXRPC_CALL_COMPLETE, /* - call complete */
  392. NR__RXRPC_CALL_STATES
  393. };
  394. /*
  395. * Call completion condition (state == RXRPC_CALL_COMPLETE).
  396. */
  397. enum rxrpc_call_completion {
  398. RXRPC_CALL_SUCCEEDED, /* - Normal termination */
  399. RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
  400. RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
  401. RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
  402. RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
  403. NR__RXRPC_CALL_COMPLETIONS
  404. };
  405. /*
  406. * Call Tx congestion management modes.
  407. */
  408. enum rxrpc_congest_mode {
  409. RXRPC_CALL_SLOW_START,
  410. RXRPC_CALL_CONGEST_AVOIDANCE,
  411. RXRPC_CALL_PACKET_LOSS,
  412. RXRPC_CALL_FAST_RETRANSMIT,
  413. NR__RXRPC_CONGEST_MODES
  414. };
  415. /*
  416. * RxRPC call definition
  417. * - matched by { connection, call_id }
  418. */
  419. struct rxrpc_call {
  420. struct rcu_head rcu;
  421. struct rxrpc_connection *conn; /* connection carrying call */
  422. struct rxrpc_peer *peer; /* Peer record for remote address */
  423. struct rxrpc_sock __rcu *socket; /* socket responsible */
  424. ktime_t ack_at; /* When deferred ACK needs to happen */
  425. ktime_t resend_at; /* When next resend needs to happen */
  426. ktime_t ping_at; /* When next to send a ping */
  427. ktime_t expire_at; /* When the call times out */
  428. struct timer_list timer; /* Combined event timer */
  429. struct work_struct processor; /* Event processor */
  430. rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
  431. struct list_head link; /* link in master call list */
  432. struct list_head chan_wait_link; /* Link in conn->waiting_calls */
  433. struct hlist_node error_link; /* link in error distribution list */
  434. struct list_head accept_link; /* Link in rx->acceptq */
  435. struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
  436. struct list_head sock_link; /* Link in rx->sock_calls */
  437. struct rb_node sock_node; /* Node in rx->calls */
  438. struct sk_buff *tx_pending; /* Tx socket buffer being filled */
  439. wait_queue_head_t waitq; /* Wait queue for channel or Tx */
  440. __be32 crypto_buf[2]; /* Temporary packet crypto buffer */
  441. unsigned long user_call_ID; /* user-defined call ID */
  442. unsigned long flags;
  443. unsigned long events;
  444. spinlock_t lock;
  445. rwlock_t state_lock; /* lock for state transition */
  446. u32 abort_code; /* Local/remote abort code */
  447. int error; /* Local error incurred */
  448. enum rxrpc_call_state state; /* current state of call */
  449. enum rxrpc_call_completion completion; /* Call completion condition */
  450. atomic_t usage;
  451. u16 service_id; /* service ID */
  452. u8 security_ix; /* Security type */
  453. u32 call_id; /* call ID on connection */
  454. u32 cid; /* connection ID plus channel index */
  455. int debug_id; /* debug ID for printks */
  456. unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
  457. unsigned short rx_pkt_len; /* Current recvmsg packet len */
  458. /* Rx/Tx circular buffer, depending on phase.
  459. *
  460. * In the Rx phase, packets are annotated with 0 or the number of the
  461. * segment of a jumbo packet each buffer refers to. There can be up to
  462. * 47 segments in a maximum-size UDP packet.
  463. *
  464. * In the Tx phase, packets are annotated with which buffers have been
  465. * acked.
  466. */
  467. #define RXRPC_RXTX_BUFF_SIZE 64
  468. #define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
  469. #define RXRPC_INIT_RX_WINDOW_SIZE 32
  470. struct sk_buff **rxtx_buffer;
  471. u8 *rxtx_annotations;
  472. #define RXRPC_TX_ANNO_ACK 0
  473. #define RXRPC_TX_ANNO_UNACK 1
  474. #define RXRPC_TX_ANNO_NAK 2
  475. #define RXRPC_TX_ANNO_RETRANS 3
  476. #define RXRPC_TX_ANNO_MASK 0x03
  477. #define RXRPC_TX_ANNO_LAST 0x04
  478. #define RXRPC_TX_ANNO_RESENT 0x08
  479. #define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */
  480. #define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */
  481. #define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
  482. rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
  483. * not hard-ACK'd packet follows this.
  484. */
  485. rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
  486. /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
  487. * is fixed, we keep these numbers in terms of segments (ie. DATA
  488. * packets) rather than bytes.
  489. */
  490. #define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
  491. u8 cong_cwnd; /* Congestion window size */
  492. u8 cong_extra; /* Extra to send for congestion management */
  493. u8 cong_ssthresh; /* Slow-start threshold */
  494. enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */
  495. u8 cong_dup_acks; /* Count of ACKs showing missing packets */
  496. u8 cong_cumul_acks; /* Cumulative ACK count */
  497. ktime_t cong_tstamp; /* Last time cwnd was changed */
  498. rxrpc_seq_t rx_hard_ack; /* Dead slot in buffer; the first received but not
  499. * consumed packet follows this.
  500. */
  501. rxrpc_seq_t rx_top; /* Highest Rx slot allocated. */
  502. rxrpc_seq_t rx_expect_next; /* Expected next packet sequence number */
  503. u8 rx_winsize; /* Size of Rx window */
  504. u8 tx_winsize; /* Maximum size of Tx window */
  505. bool tx_phase; /* T if transmission phase, F if receive phase */
  506. u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
  507. /* receive-phase ACK management */
  508. u8 ackr_reason; /* reason to ACK */
  509. u16 ackr_skew; /* skew on packet being ACK'd */
  510. rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
  511. rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
  512. rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
  513. rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
  514. /* ping management */
  515. rxrpc_serial_t ping_serial; /* Last ping sent */
  516. ktime_t ping_time; /* Time last ping sent */
  517. /* transmission-phase ACK management */
  518. ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
  519. rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
  520. rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
  521. };
  522. /*
  523. * Summary of a new ACK and the changes it made to the Tx buffer packet states.
  524. */
  525. struct rxrpc_ack_summary {
  526. u8 ack_reason;
  527. u8 nr_acks; /* Number of ACKs in packet */
  528. u8 nr_nacks; /* Number of NACKs in packet */
  529. u8 nr_new_acks; /* Number of new ACKs in packet */
  530. u8 nr_new_nacks; /* Number of new NACKs in packet */
  531. u8 nr_rot_new_acks; /* Number of rotated new ACKs */
  532. bool new_low_nack; /* T if new low NACK found */
  533. bool retrans_timeo; /* T if reTx due to timeout happened */
  534. u8 flight_size; /* Number of unreceived transmissions */
  535. /* Place to stash values for tracing */
  536. enum rxrpc_congest_mode mode:8;
  537. u8 cwnd;
  538. u8 ssthresh;
  539. u8 dup_acks;
  540. u8 cumulative_acks;
  541. };
  542. enum rxrpc_skb_trace {
  543. rxrpc_skb_rx_cleaned,
  544. rxrpc_skb_rx_freed,
  545. rxrpc_skb_rx_got,
  546. rxrpc_skb_rx_lost,
  547. rxrpc_skb_rx_received,
  548. rxrpc_skb_rx_rotated,
  549. rxrpc_skb_rx_purged,
  550. rxrpc_skb_rx_seen,
  551. rxrpc_skb_tx_cleaned,
  552. rxrpc_skb_tx_freed,
  553. rxrpc_skb_tx_got,
  554. rxrpc_skb_tx_new,
  555. rxrpc_skb_tx_rotated,
  556. rxrpc_skb_tx_seen,
  557. rxrpc_skb__nr_trace
  558. };
  559. extern const char rxrpc_skb_traces[rxrpc_skb__nr_trace][7];
  560. enum rxrpc_conn_trace {
  561. rxrpc_conn_new_client,
  562. rxrpc_conn_new_service,
  563. rxrpc_conn_queued,
  564. rxrpc_conn_seen,
  565. rxrpc_conn_got,
  566. rxrpc_conn_put_client,
  567. rxrpc_conn_put_service,
  568. rxrpc_conn__nr_trace
  569. };
  570. extern const char rxrpc_conn_traces[rxrpc_conn__nr_trace][4];
  571. enum rxrpc_client_trace {
  572. rxrpc_client_activate_chans,
  573. rxrpc_client_alloc,
  574. rxrpc_client_chan_activate,
  575. rxrpc_client_chan_disconnect,
  576. rxrpc_client_chan_pass,
  577. rxrpc_client_chan_unstarted,
  578. rxrpc_client_cleanup,
  579. rxrpc_client_count,
  580. rxrpc_client_discard,
  581. rxrpc_client_duplicate,
  582. rxrpc_client_exposed,
  583. rxrpc_client_replace,
  584. rxrpc_client_to_active,
  585. rxrpc_client_to_culled,
  586. rxrpc_client_to_idle,
  587. rxrpc_client_to_inactive,
  588. rxrpc_client_to_waiting,
  589. rxrpc_client_uncount,
  590. rxrpc_client__nr_trace
  591. };
  592. extern const char rxrpc_client_traces[rxrpc_client__nr_trace][7];
  593. extern const char rxrpc_conn_cache_states[RXRPC_CONN__NR_CACHE_STATES][5];
  594. enum rxrpc_call_trace {
  595. rxrpc_call_new_client,
  596. rxrpc_call_new_service,
  597. rxrpc_call_queued,
  598. rxrpc_call_queued_ref,
  599. rxrpc_call_seen,
  600. rxrpc_call_connected,
  601. rxrpc_call_release,
  602. rxrpc_call_got,
  603. rxrpc_call_got_userid,
  604. rxrpc_call_got_kernel,
  605. rxrpc_call_put,
  606. rxrpc_call_put_userid,
  607. rxrpc_call_put_kernel,
  608. rxrpc_call_put_noqueue,
  609. rxrpc_call_error,
  610. rxrpc_call__nr_trace
  611. };
  612. extern const char rxrpc_call_traces[rxrpc_call__nr_trace][4];
  613. enum rxrpc_transmit_trace {
  614. rxrpc_transmit_wait,
  615. rxrpc_transmit_queue,
  616. rxrpc_transmit_queue_last,
  617. rxrpc_transmit_rotate,
  618. rxrpc_transmit_rotate_last,
  619. rxrpc_transmit_await_reply,
  620. rxrpc_transmit_end,
  621. rxrpc_transmit__nr_trace
  622. };
  623. extern const char rxrpc_transmit_traces[rxrpc_transmit__nr_trace][4];
  624. enum rxrpc_receive_trace {
  625. rxrpc_receive_incoming,
  626. rxrpc_receive_queue,
  627. rxrpc_receive_queue_last,
  628. rxrpc_receive_front,
  629. rxrpc_receive_rotate,
  630. rxrpc_receive_end,
  631. rxrpc_receive__nr_trace
  632. };
  633. extern const char rxrpc_receive_traces[rxrpc_receive__nr_trace][4];
  634. enum rxrpc_recvmsg_trace {
  635. rxrpc_recvmsg_enter,
  636. rxrpc_recvmsg_wait,
  637. rxrpc_recvmsg_dequeue,
  638. rxrpc_recvmsg_hole,
  639. rxrpc_recvmsg_next,
  640. rxrpc_recvmsg_cont,
  641. rxrpc_recvmsg_full,
  642. rxrpc_recvmsg_data_return,
  643. rxrpc_recvmsg_terminal,
  644. rxrpc_recvmsg_to_be_accepted,
  645. rxrpc_recvmsg_return,
  646. rxrpc_recvmsg__nr_trace
  647. };
  648. extern const char rxrpc_recvmsg_traces[rxrpc_recvmsg__nr_trace][5];
  649. enum rxrpc_rtt_tx_trace {
  650. rxrpc_rtt_tx_ping,
  651. rxrpc_rtt_tx_data,
  652. rxrpc_rtt_tx__nr_trace
  653. };
  654. extern const char rxrpc_rtt_tx_traces[rxrpc_rtt_tx__nr_trace][5];
  655. enum rxrpc_rtt_rx_trace {
  656. rxrpc_rtt_rx_ping_response,
  657. rxrpc_rtt_rx_requested_ack,
  658. rxrpc_rtt_rx__nr_trace
  659. };
  660. extern const char rxrpc_rtt_rx_traces[rxrpc_rtt_rx__nr_trace][5];
  661. enum rxrpc_timer_trace {
  662. rxrpc_timer_begin,
  663. rxrpc_timer_init_for_reply,
  664. rxrpc_timer_init_for_send_reply,
  665. rxrpc_timer_expired,
  666. rxrpc_timer_set_for_ack,
  667. rxrpc_timer_set_for_ping,
  668. rxrpc_timer_set_for_resend,
  669. rxrpc_timer_set_for_send,
  670. rxrpc_timer__nr_trace
  671. };
  672. extern const char rxrpc_timer_traces[rxrpc_timer__nr_trace][8];
  673. enum rxrpc_propose_ack_trace {
  674. rxrpc_propose_ack_client_tx_end,
  675. rxrpc_propose_ack_input_data,
  676. rxrpc_propose_ack_ping_for_lost_ack,
  677. rxrpc_propose_ack_ping_for_lost_reply,
  678. rxrpc_propose_ack_ping_for_params,
  679. rxrpc_propose_ack_processing_op,
  680. rxrpc_propose_ack_respond_to_ack,
  681. rxrpc_propose_ack_respond_to_ping,
  682. rxrpc_propose_ack_retry_tx,
  683. rxrpc_propose_ack_rotate_rx,
  684. rxrpc_propose_ack_terminal_ack,
  685. rxrpc_propose_ack__nr_trace
  686. };
  687. enum rxrpc_propose_ack_outcome {
  688. rxrpc_propose_ack_use,
  689. rxrpc_propose_ack_update,
  690. rxrpc_propose_ack_subsume,
  691. rxrpc_propose_ack__nr_outcomes
  692. };
  693. extern const char rxrpc_propose_ack_traces[rxrpc_propose_ack__nr_trace][8];
  694. extern const char *const rxrpc_propose_ack_outcomes[rxrpc_propose_ack__nr_outcomes];
  695. enum rxrpc_congest_change {
  696. rxrpc_cong_begin_retransmission,
  697. rxrpc_cong_cleared_nacks,
  698. rxrpc_cong_new_low_nack,
  699. rxrpc_cong_no_change,
  700. rxrpc_cong_progress,
  701. rxrpc_cong_retransmit_again,
  702. rxrpc_cong_rtt_window_end,
  703. rxrpc_cong_saw_nack,
  704. rxrpc_congest__nr_change
  705. };
  706. extern const char rxrpc_congest_modes[NR__RXRPC_CONGEST_MODES][10];
  707. extern const char rxrpc_congest_changes[rxrpc_congest__nr_change][9];
  708. extern const char *const rxrpc_pkts[];
  709. extern const char rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4];
  710. #include <trace/events/rxrpc.h>
  711. /*
  712. * af_rxrpc.c
  713. */
  714. extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
  715. extern u32 rxrpc_epoch;
  716. extern atomic_t rxrpc_debug_id;
  717. extern struct workqueue_struct *rxrpc_workqueue;
  718. /*
  719. * call_accept.c
  720. */
  721. int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
  722. void rxrpc_discard_prealloc(struct rxrpc_sock *);
  723. struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
  724. struct rxrpc_connection *,
  725. struct sk_buff *);
  726. void rxrpc_accept_incoming_calls(struct rxrpc_local *);
  727. struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
  728. rxrpc_notify_rx_t);
  729. int rxrpc_reject_call(struct rxrpc_sock *);
  730. /*
  731. * call_event.c
  732. */
  733. void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
  734. void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
  735. void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
  736. enum rxrpc_propose_ack_trace);
  737. void rxrpc_process_call(struct work_struct *);
  738. /*
  739. * call_object.c
  740. */
  741. extern const char *const rxrpc_call_states[];
  742. extern const char *const rxrpc_call_completions[];
  743. extern unsigned int rxrpc_max_call_lifetime;
  744. extern struct kmem_cache *rxrpc_call_jar;
  745. extern struct list_head rxrpc_calls;
  746. extern rwlock_t rxrpc_call_lock;
  747. struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
  748. struct rxrpc_call *rxrpc_alloc_call(gfp_t);
  749. struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
  750. struct rxrpc_conn_parameters *,
  751. struct sockaddr_rxrpc *,
  752. unsigned long, gfp_t);
  753. void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
  754. struct sk_buff *);
  755. void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
  756. void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
  757. bool __rxrpc_queue_call(struct rxrpc_call *);
  758. bool rxrpc_queue_call(struct rxrpc_call *);
  759. void rxrpc_see_call(struct rxrpc_call *);
  760. void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
  761. void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
  762. void rxrpc_cleanup_call(struct rxrpc_call *);
  763. void __exit rxrpc_destroy_all_calls(void);
  764. static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
  765. {
  766. return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
  767. }
  768. static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
  769. {
  770. return !rxrpc_is_service_call(call);
  771. }
  772. /*
  773. * Transition a call to the complete state.
  774. */
  775. static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
  776. enum rxrpc_call_completion compl,
  777. u32 abort_code,
  778. int error)
  779. {
  780. if (call->state < RXRPC_CALL_COMPLETE) {
  781. call->abort_code = abort_code;
  782. call->error = error;
  783. call->completion = compl,
  784. call->state = RXRPC_CALL_COMPLETE;
  785. wake_up(&call->waitq);
  786. return true;
  787. }
  788. return false;
  789. }
  790. static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
  791. enum rxrpc_call_completion compl,
  792. u32 abort_code,
  793. int error)
  794. {
  795. bool ret;
  796. write_lock_bh(&call->state_lock);
  797. ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
  798. write_unlock_bh(&call->state_lock);
  799. return ret;
  800. }
  801. /*
  802. * Record that a call successfully completed.
  803. */
  804. static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
  805. {
  806. return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
  807. }
  808. static inline bool rxrpc_call_completed(struct rxrpc_call *call)
  809. {
  810. bool ret;
  811. write_lock_bh(&call->state_lock);
  812. ret = __rxrpc_call_completed(call);
  813. write_unlock_bh(&call->state_lock);
  814. return ret;
  815. }
  816. /*
  817. * Record that a call is locally aborted.
  818. */
  819. static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
  820. rxrpc_seq_t seq,
  821. u32 abort_code, int error)
  822. {
  823. trace_rxrpc_abort(why, call->cid, call->call_id, seq,
  824. abort_code, error);
  825. return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
  826. abort_code, error);
  827. }
  828. static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
  829. rxrpc_seq_t seq, u32 abort_code, int error)
  830. {
  831. bool ret;
  832. write_lock_bh(&call->state_lock);
  833. ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
  834. write_unlock_bh(&call->state_lock);
  835. return ret;
  836. }
  837. /*
  838. * conn_client.c
  839. */
  840. extern unsigned int rxrpc_max_client_connections;
  841. extern unsigned int rxrpc_reap_client_connections;
  842. extern unsigned int rxrpc_conn_idle_client_expiry;
  843. extern unsigned int rxrpc_conn_idle_client_fast_expiry;
  844. extern struct idr rxrpc_client_conn_ids;
  845. void rxrpc_destroy_client_conn_ids(void);
  846. int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
  847. struct sockaddr_rxrpc *, gfp_t);
  848. void rxrpc_expose_client_call(struct rxrpc_call *);
  849. void rxrpc_disconnect_client_call(struct rxrpc_call *);
  850. void rxrpc_put_client_conn(struct rxrpc_connection *);
  851. void __exit rxrpc_destroy_all_client_connections(void);
  852. /*
  853. * conn_event.c
  854. */
  855. void rxrpc_process_connection(struct work_struct *);
  856. /*
  857. * conn_object.c
  858. */
  859. extern unsigned int rxrpc_connection_expiry;
  860. extern struct list_head rxrpc_connections;
  861. extern struct list_head rxrpc_connection_proc_list;
  862. extern rwlock_t rxrpc_connection_lock;
  863. int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
  864. struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
  865. struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
  866. struct sk_buff *);
  867. void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
  868. void rxrpc_disconnect_call(struct rxrpc_call *);
  869. void rxrpc_kill_connection(struct rxrpc_connection *);
  870. bool rxrpc_queue_conn(struct rxrpc_connection *);
  871. void rxrpc_see_connection(struct rxrpc_connection *);
  872. void rxrpc_get_connection(struct rxrpc_connection *);
  873. struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
  874. void rxrpc_put_service_conn(struct rxrpc_connection *);
  875. void __exit rxrpc_destroy_all_connections(void);
  876. static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
  877. {
  878. return conn->out_clientflag;
  879. }
  880. static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
  881. {
  882. return !rxrpc_conn_is_client(conn);
  883. }
  884. static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
  885. {
  886. if (!conn)
  887. return;
  888. if (rxrpc_conn_is_client(conn))
  889. rxrpc_put_client_conn(conn);
  890. else
  891. rxrpc_put_service_conn(conn);
  892. }
  893. /*
  894. * conn_service.c
  895. */
  896. struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
  897. struct sk_buff *);
  898. struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t);
  899. void rxrpc_new_incoming_connection(struct rxrpc_connection *, struct sk_buff *);
  900. void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
  901. /*
  902. * input.c
  903. */
  904. void rxrpc_data_ready(struct sock *);
  905. /*
  906. * insecure.c
  907. */
  908. extern const struct rxrpc_security rxrpc_no_security;
  909. /*
  910. * key.c
  911. */
  912. extern struct key_type key_type_rxrpc;
  913. extern struct key_type key_type_rxrpc_s;
  914. int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
  915. int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
  916. int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
  917. u32);
  918. /*
  919. * local_event.c
  920. */
  921. extern void rxrpc_process_local_events(struct rxrpc_local *);
  922. /*
  923. * local_object.c
  924. */
  925. struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *);
  926. void __rxrpc_put_local(struct rxrpc_local *);
  927. void __exit rxrpc_destroy_all_locals(void);
  928. static inline void rxrpc_get_local(struct rxrpc_local *local)
  929. {
  930. atomic_inc(&local->usage);
  931. }
  932. static inline
  933. struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
  934. {
  935. return atomic_inc_not_zero(&local->usage) ? local : NULL;
  936. }
  937. static inline void rxrpc_put_local(struct rxrpc_local *local)
  938. {
  939. if (local && atomic_dec_and_test(&local->usage))
  940. __rxrpc_put_local(local);
  941. }
  942. static inline void rxrpc_queue_local(struct rxrpc_local *local)
  943. {
  944. rxrpc_queue_work(&local->processor);
  945. }
  946. /*
  947. * misc.c
  948. */
  949. extern unsigned int rxrpc_max_backlog __read_mostly;
  950. extern unsigned int rxrpc_requested_ack_delay;
  951. extern unsigned int rxrpc_soft_ack_delay;
  952. extern unsigned int rxrpc_idle_ack_delay;
  953. extern unsigned int rxrpc_rx_window_size;
  954. extern unsigned int rxrpc_rx_mtu;
  955. extern unsigned int rxrpc_rx_jumbo_max;
  956. extern unsigned int rxrpc_resend_timeout;
  957. extern const s8 rxrpc_ack_priority[];
  958. /*
  959. * output.c
  960. */
  961. int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
  962. int rxrpc_send_abort_packet(struct rxrpc_call *);
  963. int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
  964. void rxrpc_reject_packets(struct rxrpc_local *);
  965. /*
  966. * peer_event.c
  967. */
  968. void rxrpc_error_report(struct sock *);
  969. void rxrpc_peer_error_distributor(struct work_struct *);
  970. void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
  971. rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
  972. /*
  973. * peer_object.c
  974. */
  975. struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
  976. const struct sockaddr_rxrpc *);
  977. struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
  978. struct sockaddr_rxrpc *, gfp_t);
  979. struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
  980. struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
  981. struct rxrpc_peer *);
  982. static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
  983. {
  984. atomic_inc(&peer->usage);
  985. return peer;
  986. }
  987. static inline
  988. struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
  989. {
  990. return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
  991. }
  992. extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
  993. static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
  994. {
  995. if (peer && atomic_dec_and_test(&peer->usage))
  996. __rxrpc_put_peer(peer);
  997. }
  998. /*
  999. * proc.c
  1000. */
  1001. extern const struct file_operations rxrpc_call_seq_fops;
  1002. extern const struct file_operations rxrpc_connection_seq_fops;
  1003. /*
  1004. * recvmsg.c
  1005. */
  1006. void rxrpc_notify_socket(struct rxrpc_call *);
  1007. int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
  1008. /*
  1009. * rxkad.c
  1010. */
  1011. #ifdef CONFIG_RXKAD
  1012. extern const struct rxrpc_security rxkad;
  1013. #endif
  1014. /*
  1015. * security.c
  1016. */
  1017. int __init rxrpc_init_security(void);
  1018. void rxrpc_exit_security(void);
  1019. int rxrpc_init_client_conn_security(struct rxrpc_connection *);
  1020. int rxrpc_init_server_conn_security(struct rxrpc_connection *);
  1021. /*
  1022. * sendmsg.c
  1023. */
  1024. int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
  1025. /*
  1026. * skbuff.c
  1027. */
  1028. void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
  1029. void rxrpc_packet_destructor(struct sk_buff *);
  1030. void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
  1031. void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
  1032. void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
  1033. void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
  1034. void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace);
  1035. void rxrpc_purge_queue(struct sk_buff_head *);
  1036. /*
  1037. * sysctl.c
  1038. */
  1039. #ifdef CONFIG_SYSCTL
  1040. extern int __init rxrpc_sysctl_init(void);
  1041. extern void rxrpc_sysctl_exit(void);
  1042. #else
  1043. static inline int __init rxrpc_sysctl_init(void) { return 0; }
  1044. static inline void rxrpc_sysctl_exit(void) {}
  1045. #endif
  1046. /*
  1047. * utils.c
  1048. */
  1049. int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
  1050. static inline bool before(u32 seq1, u32 seq2)
  1051. {
  1052. return (s32)(seq1 - seq2) < 0;
  1053. }
  1054. static inline bool before_eq(u32 seq1, u32 seq2)
  1055. {
  1056. return (s32)(seq1 - seq2) <= 0;
  1057. }
  1058. static inline bool after(u32 seq1, u32 seq2)
  1059. {
  1060. return (s32)(seq1 - seq2) > 0;
  1061. }
  1062. static inline bool after_eq(u32 seq1, u32 seq2)
  1063. {
  1064. return (s32)(seq1 - seq2) >= 0;
  1065. }
  1066. /*
  1067. * debug tracing
  1068. */
  1069. extern unsigned int rxrpc_debug;
  1070. #define dbgprintk(FMT,...) \
  1071. printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
  1072. #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  1073. #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  1074. #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
  1075. #define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
  1076. #define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
  1077. #if defined(__KDEBUG)
  1078. #define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
  1079. #define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
  1080. #define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
  1081. #define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
  1082. #define _net(FMT,...) knet(FMT,##__VA_ARGS__)
  1083. #elif defined(CONFIG_AF_RXRPC_DEBUG)
  1084. #define RXRPC_DEBUG_KENTER 0x01
  1085. #define RXRPC_DEBUG_KLEAVE 0x02
  1086. #define RXRPC_DEBUG_KDEBUG 0x04
  1087. #define RXRPC_DEBUG_KPROTO 0x08
  1088. #define RXRPC_DEBUG_KNET 0x10
  1089. #define _enter(FMT,...) \
  1090. do { \
  1091. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
  1092. kenter(FMT,##__VA_ARGS__); \
  1093. } while (0)
  1094. #define _leave(FMT,...) \
  1095. do { \
  1096. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
  1097. kleave(FMT,##__VA_ARGS__); \
  1098. } while (0)
  1099. #define _debug(FMT,...) \
  1100. do { \
  1101. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
  1102. kdebug(FMT,##__VA_ARGS__); \
  1103. } while (0)
  1104. #define _proto(FMT,...) \
  1105. do { \
  1106. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
  1107. kproto(FMT,##__VA_ARGS__); \
  1108. } while (0)
  1109. #define _net(FMT,...) \
  1110. do { \
  1111. if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
  1112. knet(FMT,##__VA_ARGS__); \
  1113. } while (0)
  1114. #else
  1115. #define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
  1116. #define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
  1117. #define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
  1118. #define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
  1119. #define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
  1120. #endif
  1121. /*
  1122. * debug assertion checking
  1123. */
  1124. #if 1 // defined(__KDEBUGALL)
  1125. #define ASSERT(X) \
  1126. do { \
  1127. if (unlikely(!(X))) { \
  1128. pr_err("Assertion failed\n"); \
  1129. BUG(); \
  1130. } \
  1131. } while (0)
  1132. #define ASSERTCMP(X, OP, Y) \
  1133. do { \
  1134. __typeof__(X) _x = (X); \
  1135. __typeof__(Y) _y = (__typeof__(X))(Y); \
  1136. if (unlikely(!(_x OP _y))) { \
  1137. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  1138. (unsigned long)_x, (unsigned long)_x, #OP, \
  1139. (unsigned long)_y, (unsigned long)_y); \
  1140. BUG(); \
  1141. } \
  1142. } while (0)
  1143. #define ASSERTIF(C, X) \
  1144. do { \
  1145. if (unlikely((C) && !(X))) { \
  1146. pr_err("Assertion failed\n"); \
  1147. BUG(); \
  1148. } \
  1149. } while (0)
  1150. #define ASSERTIFCMP(C, X, OP, Y) \
  1151. do { \
  1152. __typeof__(X) _x = (X); \
  1153. __typeof__(Y) _y = (__typeof__(X))(Y); \
  1154. if (unlikely((C) && !(_x OP _y))) { \
  1155. pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
  1156. (unsigned long)_x, (unsigned long)_x, #OP, \
  1157. (unsigned long)_y, (unsigned long)_y); \
  1158. BUG(); \
  1159. } \
  1160. } while (0)
  1161. #else
  1162. #define ASSERT(X) \
  1163. do { \
  1164. } while (0)
  1165. #define ASSERTCMP(X, OP, Y) \
  1166. do { \
  1167. } while (0)
  1168. #define ASSERTIF(C, X) \
  1169. do { \
  1170. } while (0)
  1171. #define ASSERTIFCMP(C, X, OP, Y) \
  1172. do { \
  1173. } while (0)
  1174. #endif /* __KDEBUGALL */