xprt_rdma.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /*
  2. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the BSD-type
  8. * license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. *
  14. * Redistributions of source code must retain the above copyright
  15. * notice, this list of conditions and the following disclaimer.
  16. *
  17. * Redistributions in binary form must reproduce the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer in the documentation and/or other materials provided
  20. * with the distribution.
  21. *
  22. * Neither the name of the Network Appliance, Inc. nor the names of
  23. * its contributors may be used to endorse or promote products
  24. * derived from this software without specific prior written
  25. * permission.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38. */
  39. #ifndef _LINUX_SUNRPC_XPRT_RDMA_H
  40. #define _LINUX_SUNRPC_XPRT_RDMA_H
  41. #include <linux/wait.h> /* wait_queue_head_t, etc */
  42. #include <linux/spinlock.h> /* spinlock_t, etc */
  43. #include <asm/atomic.h> /* atomic_t, etc */
  44. #include <rdma/rdma_cm.h> /* RDMA connection api */
  45. #include <rdma/ib_verbs.h> /* RDMA verbs api */
  46. #include <linux/sunrpc/clnt.h> /* rpc_xprt */
  47. #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */
  48. #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */
  49. #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */
  50. #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */
  51. /*
  52. * Interface Adapter -- one per transport instance
  53. */
  54. struct rpcrdma_ia {
  55. struct rdma_cm_id *ri_id;
  56. struct ib_pd *ri_pd;
  57. struct ib_mr *ri_bind_mem;
  58. u32 ri_dma_lkey;
  59. int ri_have_dma_lkey;
  60. struct completion ri_done;
  61. int ri_async_rc;
  62. enum rpcrdma_memreg ri_memreg_strategy;
  63. };
  64. /*
  65. * RDMA Endpoint -- one per transport instance
  66. */
  67. struct rpcrdma_ep {
  68. atomic_t rep_cqcount;
  69. int rep_cqinit;
  70. int rep_connected;
  71. struct rpcrdma_ia *rep_ia;
  72. struct ib_cq *rep_cq;
  73. struct ib_qp_init_attr rep_attr;
  74. wait_queue_head_t rep_connect_wait;
  75. struct ib_sge rep_pad; /* holds zeroed pad */
  76. struct ib_mr *rep_pad_mr; /* holds zeroed pad */
  77. void (*rep_func)(struct rpcrdma_ep *);
  78. struct rpc_xprt *rep_xprt; /* for rep_func */
  79. struct rdma_conn_param rep_remote_cma;
  80. struct sockaddr_storage rep_remote_addr;
  81. };
  82. #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
  83. #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
  84. /*
  85. * struct rpcrdma_rep -- this structure encapsulates state required to recv
  86. * and complete a reply, asychronously. It needs several pieces of
  87. * state:
  88. * o recv buffer (posted to provider)
  89. * o ib_sge (also donated to provider)
  90. * o status of reply (length, success or not)
  91. * o bookkeeping state to get run by tasklet (list, etc)
  92. *
  93. * These are allocated during initialization, per-transport instance;
  94. * however, the tasklet execution list itself is global, as it should
  95. * always be pretty short.
  96. *
  97. * N of these are associated with a transport instance, and stored in
  98. * struct rpcrdma_buffer. N is the max number of outstanding requests.
  99. */
  100. /* temporary static scatter/gather max */
  101. #define RPCRDMA_MAX_DATA_SEGS (8) /* max scatter/gather */
  102. #define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */
  103. #define MAX_RPCRDMAHDR (\
  104. /* max supported RPC/RDMA header */ \
  105. sizeof(struct rpcrdma_msg) + (2 * sizeof(u32)) + \
  106. (sizeof(struct rpcrdma_read_chunk) * RPCRDMA_MAX_SEGS) + sizeof(u32))
  107. struct rpcrdma_buffer;
  108. struct rpcrdma_rep {
  109. unsigned int rr_len; /* actual received reply length */
  110. struct rpcrdma_buffer *rr_buffer; /* home base for this structure */
  111. struct rpc_xprt *rr_xprt; /* needed for request/reply matching */
  112. void (*rr_func)(struct rpcrdma_rep *);/* called by tasklet in softint */
  113. struct list_head rr_list; /* tasklet list */
  114. wait_queue_head_t rr_unbind; /* optional unbind wait */
  115. struct ib_sge rr_iov; /* for posting */
  116. struct ib_mr *rr_handle; /* handle for mem in rr_iov */
  117. char rr_base[MAX_RPCRDMAHDR]; /* minimal inline receive buffer */
  118. };
  119. /*
  120. * struct rpcrdma_req -- structure central to the request/reply sequence.
  121. *
  122. * N of these are associated with a transport instance, and stored in
  123. * struct rpcrdma_buffer. N is the max number of outstanding requests.
  124. *
  125. * It includes pre-registered buffer memory for send AND recv.
  126. * The recv buffer, however, is not owned by this structure, and
  127. * is "donated" to the hardware when a recv is posted. When a
  128. * reply is handled, the recv buffer used is given back to the
  129. * struct rpcrdma_req associated with the request.
  130. *
  131. * In addition to the basic memory, this structure includes an array
  132. * of iovs for send operations. The reason is that the iovs passed to
  133. * ib_post_{send,recv} must not be modified until the work request
  134. * completes.
  135. *
  136. * NOTES:
  137. * o RPCRDMA_MAX_SEGS is the max number of addressible chunk elements we
  138. * marshal. The number needed varies depending on the iov lists that
  139. * are passed to us, the memory registration mode we are in, and if
  140. * physical addressing is used, the layout.
  141. */
  142. struct rpcrdma_mr_seg { /* chunk descriptors */
  143. union { /* chunk memory handles */
  144. struct ib_mr *rl_mr; /* if registered directly */
  145. struct rpcrdma_mw { /* if registered from region */
  146. union {
  147. struct ib_mw *mw;
  148. struct ib_fmr *fmr;
  149. struct {
  150. struct ib_fast_reg_page_list *fr_pgl;
  151. struct ib_mr *fr_mr;
  152. enum { FRMR_IS_INVALID, FRMR_IS_VALID } state;
  153. } frmr;
  154. } r;
  155. struct list_head mw_list;
  156. } *rl_mw;
  157. } mr_chunk;
  158. u64 mr_base; /* registration result */
  159. u32 mr_rkey; /* registration result */
  160. u32 mr_len; /* length of chunk or segment */
  161. int mr_nsegs; /* number of segments in chunk or 0 */
  162. enum dma_data_direction mr_dir; /* segment mapping direction */
  163. dma_addr_t mr_dma; /* segment mapping address */
  164. size_t mr_dmalen; /* segment mapping length */
  165. struct page *mr_page; /* owning page, if any */
  166. char *mr_offset; /* kva if no page, else offset */
  167. };
  168. struct rpcrdma_req {
  169. size_t rl_size; /* actual length of buffer */
  170. unsigned int rl_niovs; /* 0, 2 or 4 */
  171. unsigned int rl_nchunks; /* non-zero if chunks */
  172. unsigned int rl_connect_cookie; /* retry detection */
  173. struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
  174. struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
  175. struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];/* chunk segments */
  176. struct ib_sge rl_send_iov[4]; /* for active requests */
  177. struct ib_sge rl_iov; /* for posting */
  178. struct ib_mr *rl_handle; /* handle for mem in rl_iov */
  179. char rl_base[MAX_RPCRDMAHDR]; /* start of actual buffer */
  180. __u32 rl_xdr_buf[0]; /* start of returned rpc rq_buffer */
  181. };
  182. #define rpcr_to_rdmar(r) \
  183. container_of((r)->rq_buffer, struct rpcrdma_req, rl_xdr_buf[0])
  184. /*
  185. * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for
  186. * inline requests/replies, and client/server credits.
  187. *
  188. * One of these is associated with a transport instance
  189. */
  190. struct rpcrdma_buffer {
  191. spinlock_t rb_lock; /* protects indexes */
  192. atomic_t rb_credits; /* most recent server credits */
  193. unsigned long rb_cwndscale; /* cached framework rpc_cwndscale */
  194. int rb_max_requests;/* client max requests */
  195. struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */
  196. int rb_send_index;
  197. struct rpcrdma_req **rb_send_bufs;
  198. int rb_recv_index;
  199. struct rpcrdma_rep **rb_recv_bufs;
  200. char *rb_pool;
  201. };
  202. #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
  203. /*
  204. * Internal structure for transport instance creation. This
  205. * exists primarily for modularity.
  206. *
  207. * This data should be set with mount options
  208. */
  209. struct rpcrdma_create_data_internal {
  210. struct sockaddr_storage addr; /* RDMA server address */
  211. unsigned int max_requests; /* max requests (slots) in flight */
  212. unsigned int rsize; /* mount rsize - max read hdr+data */
  213. unsigned int wsize; /* mount wsize - max write hdr+data */
  214. unsigned int inline_rsize; /* max non-rdma read data payload */
  215. unsigned int inline_wsize; /* max non-rdma write data payload */
  216. unsigned int padding; /* non-rdma write header padding */
  217. };
  218. #define RPCRDMA_INLINE_READ_THRESHOLD(rq) \
  219. (rpcx_to_rdmad(rq->rq_task->tk_xprt).inline_rsize)
  220. #define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\
  221. (rpcx_to_rdmad(rq->rq_task->tk_xprt).inline_wsize)
  222. #define RPCRDMA_INLINE_PAD_VALUE(rq)\
  223. rpcx_to_rdmad(rq->rq_task->tk_xprt).padding
  224. /*
  225. * Statistics for RPCRDMA
  226. */
  227. struct rpcrdma_stats {
  228. unsigned long read_chunk_count;
  229. unsigned long write_chunk_count;
  230. unsigned long reply_chunk_count;
  231. unsigned long long total_rdma_request;
  232. unsigned long long total_rdma_reply;
  233. unsigned long long pullup_copy_count;
  234. unsigned long long fixup_copy_count;
  235. unsigned long hardway_register_count;
  236. unsigned long failed_marshal_count;
  237. unsigned long bad_reply_count;
  238. };
  239. /*
  240. * RPCRDMA transport -- encapsulates the structures above for
  241. * integration with RPC.
  242. *
  243. * The contained structures are embedded, not pointers,
  244. * for convenience. This structure need not be visible externally.
  245. *
  246. * It is allocated and initialized during mount, and released
  247. * during unmount.
  248. */
  249. struct rpcrdma_xprt {
  250. struct rpc_xprt xprt;
  251. struct rpcrdma_ia rx_ia;
  252. struct rpcrdma_ep rx_ep;
  253. struct rpcrdma_buffer rx_buf;
  254. struct rpcrdma_create_data_internal rx_data;
  255. struct delayed_work rdma_connect;
  256. struct rpcrdma_stats rx_stats;
  257. };
  258. #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, xprt)
  259. #define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)
  260. /* Setting this to 0 ensures interoperability with early servers.
  261. * Setting this to 1 enhances certain unaligned read/write performance.
  262. * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */
  263. extern int xprt_rdma_pad_optimize;
  264. /*
  265. * Interface Adapter calls - xprtrdma/verbs.c
  266. */
  267. int rpcrdma_ia_open(struct rpcrdma_xprt *, struct sockaddr *, int);
  268. void rpcrdma_ia_close(struct rpcrdma_ia *);
  269. /*
  270. * Endpoint calls - xprtrdma/verbs.c
  271. */
  272. int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
  273. struct rpcrdma_create_data_internal *);
  274. int rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
  275. int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
  276. int rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
  277. int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
  278. struct rpcrdma_req *);
  279. int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *,
  280. struct rpcrdma_rep *);
  281. /*
  282. * Buffer calls - xprtrdma/verbs.c
  283. */
  284. int rpcrdma_buffer_create(struct rpcrdma_buffer *, struct rpcrdma_ep *,
  285. struct rpcrdma_ia *,
  286. struct rpcrdma_create_data_internal *);
  287. void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
  288. struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
  289. void rpcrdma_buffer_put(struct rpcrdma_req *);
  290. void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
  291. void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
  292. int rpcrdma_register_internal(struct rpcrdma_ia *, void *, int,
  293. struct ib_mr **, struct ib_sge *);
  294. int rpcrdma_deregister_internal(struct rpcrdma_ia *,
  295. struct ib_mr *, struct ib_sge *);
  296. int rpcrdma_register_external(struct rpcrdma_mr_seg *,
  297. int, int, struct rpcrdma_xprt *);
  298. int rpcrdma_deregister_external(struct rpcrdma_mr_seg *,
  299. struct rpcrdma_xprt *, void *);
  300. /*
  301. * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
  302. */
  303. void rpcrdma_conn_func(struct rpcrdma_ep *);
  304. void rpcrdma_reply_handler(struct rpcrdma_rep *);
  305. /*
  306. * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
  307. */
  308. int rpcrdma_marshal_req(struct rpc_rqst *);
  309. #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */