csio_wr.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /*
  2. * This file is part of the Chelsio FCoE driver for Linux.
  3. *
  4. * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #ifndef __CSIO_WR_H__
  35. #define __CSIO_WR_H__
  36. #include <linux/cache.h>
  37. #include "csio_defs.h"
  38. #include "t4fw_api.h"
  39. #include "t4fw_api_stor.h"
  40. /*
  41. * SGE register field values.
  42. */
  43. #define X_INGPCIEBOUNDARY_32B 0
  44. #define X_INGPCIEBOUNDARY_64B 1
  45. #define X_INGPCIEBOUNDARY_128B 2
  46. #define X_INGPCIEBOUNDARY_256B 3
  47. #define X_INGPCIEBOUNDARY_512B 4
  48. #define X_INGPCIEBOUNDARY_1024B 5
  49. #define X_INGPCIEBOUNDARY_2048B 6
  50. #define X_INGPCIEBOUNDARY_4096B 7
  51. /* GTS register */
  52. #define X_TIMERREG_COUNTER0 0
  53. #define X_TIMERREG_COUNTER1 1
  54. #define X_TIMERREG_COUNTER2 2
  55. #define X_TIMERREG_COUNTER3 3
  56. #define X_TIMERREG_COUNTER4 4
  57. #define X_TIMERREG_COUNTER5 5
  58. #define X_TIMERREG_RESTART_COUNTER 6
  59. #define X_TIMERREG_UPDATE_CIDX 7
  60. /*
  61. * Egress Context field values
  62. */
  63. #define X_FETCHBURSTMIN_16B 0
  64. #define X_FETCHBURSTMIN_32B 1
  65. #define X_FETCHBURSTMIN_64B 2
  66. #define X_FETCHBURSTMIN_128B 3
  67. #define X_FETCHBURSTMAX_64B 0
  68. #define X_FETCHBURSTMAX_128B 1
  69. #define X_FETCHBURSTMAX_256B 2
  70. #define X_FETCHBURSTMAX_512B 3
  71. #define X_HOSTFCMODE_NONE 0
  72. #define X_HOSTFCMODE_INGRESS_QUEUE 1
  73. #define X_HOSTFCMODE_STATUS_PAGE 2
  74. #define X_HOSTFCMODE_BOTH 3
  75. /*
  76. * Ingress Context field values
  77. */
  78. #define X_UPDATESCHEDULING_TIMER 0
  79. #define X_UPDATESCHEDULING_COUNTER_OPTTIMER 1
  80. #define X_UPDATEDELIVERY_NONE 0
  81. #define X_UPDATEDELIVERY_INTERRUPT 1
  82. #define X_UPDATEDELIVERY_STATUS_PAGE 2
  83. #define X_UPDATEDELIVERY_BOTH 3
  84. #define X_INTERRUPTDESTINATION_PCIE 0
  85. #define X_INTERRUPTDESTINATION_IQ 1
  86. #define X_RSPD_TYPE_FLBUF 0
  87. #define X_RSPD_TYPE_CPL 1
  88. #define X_RSPD_TYPE_INTR 2
  89. /* WR status is at the same position as retval in a CMD header */
  90. #define csio_wr_status(_wr) \
  91. (FW_CMD_RETVAL_G(ntohl(((struct fw_cmd_hdr *)(_wr))->lo)))
  92. struct csio_hw;
  93. extern int csio_intr_coalesce_cnt;
  94. extern int csio_intr_coalesce_time;
  95. /* Ingress queue params */
  96. struct csio_iq_params {
  97. uint8_t iq_start:1;
  98. uint8_t iq_stop:1;
  99. uint8_t pfn:3;
  100. uint8_t vfn;
  101. uint16_t physiqid;
  102. uint16_t iqid;
  103. uint16_t fl0id;
  104. uint16_t fl1id;
  105. uint8_t viid;
  106. uint8_t type;
  107. uint8_t iqasynch;
  108. uint8_t reserved4;
  109. uint8_t iqandst;
  110. uint8_t iqanus;
  111. uint8_t iqanud;
  112. uint16_t iqandstindex;
  113. uint8_t iqdroprss;
  114. uint8_t iqpciech;
  115. uint8_t iqdcaen;
  116. uint8_t iqdcacpu;
  117. uint8_t iqintcntthresh;
  118. uint8_t iqo;
  119. uint8_t iqcprio;
  120. uint8_t iqesize;
  121. uint16_t iqsize;
  122. uint64_t iqaddr;
  123. uint8_t iqflintiqhsen;
  124. uint8_t reserved5;
  125. uint8_t iqflintcongen;
  126. uint8_t iqflintcngchmap;
  127. uint32_t reserved6;
  128. uint8_t fl0hostfcmode;
  129. uint8_t fl0cprio;
  130. uint8_t fl0paden;
  131. uint8_t fl0packen;
  132. uint8_t fl0congen;
  133. uint8_t fl0dcaen;
  134. uint8_t fl0dcacpu;
  135. uint8_t fl0fbmin;
  136. uint8_t fl0fbmax;
  137. uint8_t fl0cidxfthresho;
  138. uint8_t fl0cidxfthresh;
  139. uint16_t fl0size;
  140. uint64_t fl0addr;
  141. uint64_t reserved7;
  142. uint8_t fl1hostfcmode;
  143. uint8_t fl1cprio;
  144. uint8_t fl1paden;
  145. uint8_t fl1packen;
  146. uint8_t fl1congen;
  147. uint8_t fl1dcaen;
  148. uint8_t fl1dcacpu;
  149. uint8_t fl1fbmin;
  150. uint8_t fl1fbmax;
  151. uint8_t fl1cidxfthresho;
  152. uint8_t fl1cidxfthresh;
  153. uint16_t fl1size;
  154. uint64_t fl1addr;
  155. };
  156. /* Egress queue params */
  157. struct csio_eq_params {
  158. uint8_t pfn;
  159. uint8_t vfn;
  160. uint8_t eqstart:1;
  161. uint8_t eqstop:1;
  162. uint16_t physeqid;
  163. uint32_t eqid;
  164. uint8_t hostfcmode:2;
  165. uint8_t cprio:1;
  166. uint8_t pciechn:3;
  167. uint16_t iqid;
  168. uint8_t dcaen:1;
  169. uint8_t dcacpu:5;
  170. uint8_t fbmin:3;
  171. uint8_t fbmax:3;
  172. uint8_t cidxfthresho:1;
  173. uint8_t cidxfthresh:3;
  174. uint16_t eqsize;
  175. uint64_t eqaddr;
  176. };
  177. struct csio_dma_buf {
  178. struct list_head list;
  179. void *vaddr; /* Virtual address */
  180. dma_addr_t paddr; /* Physical address */
  181. uint32_t len; /* Buffer size */
  182. };
  183. /* Generic I/O request structure */
  184. struct csio_ioreq {
  185. struct csio_sm sm; /* SM, List
  186. * should be the first member
  187. */
  188. int iq_idx; /* Ingress queue index */
  189. int eq_idx; /* Egress queue index */
  190. uint32_t nsge; /* Number of SG elements */
  191. uint32_t tmo; /* Driver timeout */
  192. uint32_t datadir; /* Data direction */
  193. struct csio_dma_buf dma_buf; /* Req/resp DMA buffers */
  194. uint16_t wr_status; /* WR completion status */
  195. int16_t drv_status; /* Driver internal status */
  196. struct csio_lnode *lnode; /* Owner lnode */
  197. struct csio_rnode *rnode; /* Src/destination rnode */
  198. void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *);
  199. /* completion callback */
  200. void *scratch1; /* Scratch area 1.
  201. */
  202. void *scratch2; /* Scratch area 2. */
  203. struct list_head gen_list; /* Any list associated with
  204. * this ioreq.
  205. */
  206. uint64_t fw_handle; /* Unique handle passed
  207. * to FW
  208. */
  209. uint8_t dcopy; /* Data copy required */
  210. uint8_t reserved1;
  211. uint16_t reserved2;
  212. struct completion cmplobj; /* ioreq completion object */
  213. } ____cacheline_aligned_in_smp;
  214. /*
  215. * Egress status page for egress cidx updates
  216. */
  217. struct csio_qstatus_page {
  218. __be32 qid;
  219. __be16 cidx;
  220. __be16 pidx;
  221. };
  222. enum {
  223. CSIO_MAX_FLBUF_PER_IQWR = 4,
  224. CSIO_QCREDIT_SZ = 64, /* pidx/cidx increments
  225. * in bytes
  226. */
  227. CSIO_MAX_QID = 0xFFFF,
  228. CSIO_MAX_IQ = 128,
  229. CSIO_SGE_NTIMERS = 6,
  230. CSIO_SGE_NCOUNTERS = 4,
  231. CSIO_SGE_FL_SIZE_REGS = 16,
  232. };
  233. /* Defines for type */
  234. enum {
  235. CSIO_EGRESS = 1,
  236. CSIO_INGRESS = 2,
  237. CSIO_FREELIST = 3,
  238. };
  239. /*
  240. * Structure for footer (last 2 flits) of Ingress Queue Entry.
  241. */
  242. struct csio_iqwr_footer {
  243. __be32 hdrbuflen_pidx;
  244. __be32 pldbuflen_qid;
  245. union {
  246. u8 type_gen;
  247. __be64 last_flit;
  248. } u;
  249. };
  250. #define IQWRF_NEWBUF (1 << 31)
  251. #define IQWRF_LEN_GET(x) (((x) >> 0) & 0x7fffffffU)
  252. #define IQWRF_GEN_SHIFT 7
  253. #define IQWRF_TYPE_GET(x) (((x) >> 4) & 0x3U)
  254. /*
  255. * WR pair:
  256. * ========
  257. * A WR can start towards the end of a queue, and then continue at the
  258. * beginning, since the queue is considered to be circular. This will
  259. * require a pair of address/len to be passed back to the caller -
  260. * hence the Work request pair structure.
  261. */
  262. struct csio_wr_pair {
  263. void *addr1;
  264. uint32_t size1;
  265. void *addr2;
  266. uint32_t size2;
  267. };
  268. /*
  269. * The following structure is used by ingress processing to return the
  270. * free list buffers to consumers.
  271. */
  272. struct csio_fl_dma_buf {
  273. struct csio_dma_buf flbufs[CSIO_MAX_FLBUF_PER_IQWR];
  274. /* Freelist DMA buffers */
  275. int offset; /* Offset within the
  276. * first FL buf.
  277. */
  278. uint32_t totlen; /* Total length */
  279. uint8_t defer_free; /* Free of buffer can
  280. * deferred
  281. */
  282. };
  283. /* Data-types */
  284. typedef void (*iq_handler_t)(struct csio_hw *, void *, uint32_t,
  285. struct csio_fl_dma_buf *, void *);
  286. struct csio_iq {
  287. uint16_t iqid; /* Queue ID */
  288. uint16_t physiqid; /* Physical Queue ID */
  289. uint16_t genbit; /* Generation bit,
  290. * initially set to 1
  291. */
  292. int flq_idx; /* Freelist queue index */
  293. iq_handler_t iq_intx_handler; /* IQ INTx handler routine */
  294. };
  295. struct csio_eq {
  296. uint16_t eqid; /* Qid */
  297. uint16_t physeqid; /* Physical Queue ID */
  298. uint8_t wrap[512]; /* Temp area for q-wrap around*/
  299. };
  300. struct csio_fl {
  301. uint16_t flid; /* Qid */
  302. uint16_t packen; /* Packing enabled? */
  303. int offset; /* Offset within FL buf */
  304. int sreg; /* Size register */
  305. struct csio_dma_buf *bufs; /* Free list buffer ptr array
  306. * indexed using flq->cidx/pidx
  307. */
  308. };
  309. struct csio_qstats {
  310. uint32_t n_tot_reqs; /* Total no. of Requests */
  311. uint32_t n_tot_rsps; /* Total no. of responses */
  312. uint32_t n_qwrap; /* Queue wraps */
  313. uint32_t n_eq_wr_split; /* Number of split EQ WRs */
  314. uint32_t n_qentry; /* Queue entry */
  315. uint32_t n_qempty; /* Queue empty */
  316. uint32_t n_qfull; /* Queue fulls */
  317. uint32_t n_rsp_unknown; /* Unknown response type */
  318. uint32_t n_stray_comp; /* Stray completion intr */
  319. uint32_t n_flq_refill; /* Number of FL refills */
  320. };
  321. /* Queue metadata */
  322. struct csio_q {
  323. uint16_t type; /* Type: Ingress/Egress/FL */
  324. uint16_t pidx; /* producer index */
  325. uint16_t cidx; /* consumer index */
  326. uint16_t inc_idx; /* Incremental index */
  327. uint32_t wr_sz; /* Size of all WRs in this q
  328. * if fixed
  329. */
  330. void *vstart; /* Base virtual address
  331. * of queue
  332. */
  333. void *vwrap; /* Virtual end address to
  334. * wrap around at
  335. */
  336. uint32_t credits; /* Size of queue in credits */
  337. void *owner; /* Owner */
  338. union { /* Queue contexts */
  339. struct csio_iq iq;
  340. struct csio_eq eq;
  341. struct csio_fl fl;
  342. } un;
  343. dma_addr_t pstart; /* Base physical address of
  344. * queue
  345. */
  346. uint32_t portid; /* PCIE Channel */
  347. uint32_t size; /* Size of queue in bytes */
  348. struct csio_qstats stats; /* Statistics */
  349. } ____cacheline_aligned_in_smp;
  350. struct csio_sge {
  351. uint32_t csio_fl_align; /* Calculated and cached
  352. * for fast path
  353. */
  354. uint32_t sge_control; /* padding, boundaries,
  355. * lengths, etc.
  356. */
  357. uint32_t sge_host_page_size; /* Host page size */
  358. uint32_t sge_fl_buf_size[CSIO_SGE_FL_SIZE_REGS];
  359. /* free list buffer sizes */
  360. uint16_t timer_val[CSIO_SGE_NTIMERS];
  361. uint8_t counter_val[CSIO_SGE_NCOUNTERS];
  362. };
  363. /* Work request module */
  364. struct csio_wrm {
  365. int num_q; /* Number of queues */
  366. struct csio_q **q_arr; /* Array of queue pointers
  367. * allocated dynamically
  368. * based on configured values
  369. */
  370. uint32_t fw_iq_start; /* Start ID of IQ for this fn*/
  371. uint32_t fw_eq_start; /* Start ID of EQ for this fn*/
  372. struct csio_q *intr_map[CSIO_MAX_IQ];
  373. /* IQ-id to IQ map table. */
  374. int free_qidx; /* queue idx of free queue */
  375. struct csio_sge sge; /* SGE params */
  376. };
  377. #define csio_get_q(__hw, __idx) ((__hw)->wrm.q_arr[__idx])
  378. #define csio_q_type(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->type)
  379. #define csio_q_pidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pidx)
  380. #define csio_q_cidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->cidx)
  381. #define csio_q_inc_idx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->inc_idx)
  382. #define csio_q_vstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->vstart)
  383. #define csio_q_pstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pstart)
  384. #define csio_q_size(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->size)
  385. #define csio_q_credits(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->credits)
  386. #define csio_q_portid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->portid)
  387. #define csio_q_wr_sz(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->wr_sz)
  388. #define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid)
  389. #define csio_q_physiqid(__hw, __idx) \
  390. ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid)
  391. #define csio_q_iq_flq_idx(__hw, __idx) \
  392. ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx)
  393. #define csio_q_eqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.eqid)
  394. #define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid)
  395. #define csio_q_physeqid(__hw, __idx) \
  396. ((__hw)->wrm.q_arr[(__idx)]->un.eq.physeqid)
  397. #define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1)
  398. #define csio_q_iq_to_flid(__hw, __iq_idx) \
  399. csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)
  400. #define csio_q_set_intr_map(__hw, __iq_idx, __rel_iq_id) \
  401. (__hw)->wrm.intr_map[__rel_iq_id] = csio_get_q(__hw, __iq_idx)
  402. #define csio_q_eq_wrap(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.wrap)
  403. struct csio_mb;
  404. int csio_wr_alloc_q(struct csio_hw *, uint32_t, uint32_t,
  405. uint16_t, void *, uint32_t, int, iq_handler_t);
  406. int csio_wr_iq_create(struct csio_hw *, void *, int,
  407. uint32_t, uint8_t, bool,
  408. void (*)(struct csio_hw *, struct csio_mb *));
  409. int csio_wr_eq_create(struct csio_hw *, void *, int, int, uint8_t,
  410. void (*)(struct csio_hw *, struct csio_mb *));
  411. int csio_wr_destroy_queues(struct csio_hw *, bool cmd);
  412. int csio_wr_get(struct csio_hw *, int, uint32_t,
  413. struct csio_wr_pair *);
  414. void csio_wr_copy_to_wrp(void *, struct csio_wr_pair *, uint32_t, uint32_t);
  415. int csio_wr_issue(struct csio_hw *, int, bool);
  416. int csio_wr_process_iq(struct csio_hw *, struct csio_q *,
  417. void (*)(struct csio_hw *, void *,
  418. uint32_t, struct csio_fl_dma_buf *,
  419. void *),
  420. void *);
  421. int csio_wr_process_iq_idx(struct csio_hw *, int,
  422. void (*)(struct csio_hw *, void *,
  423. uint32_t, struct csio_fl_dma_buf *,
  424. void *),
  425. void *);
  426. void csio_wr_sge_init(struct csio_hw *);
  427. int csio_wrm_init(struct csio_wrm *, struct csio_hw *);
  428. void csio_wrm_exit(struct csio_wrm *, struct csio_hw *);
  429. #endif /* ifndef __CSIO_WR_H__ */