bnx2x_init_ops.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845
  1. /* bnx2x_init_ops.h: Qlogic Everest network driver.
  2. * Static functions needed during the initialization.
  3. * This file is "included" in bnx2x_main.c.
  4. *
  5. * Copyright (c) 2007-2013 Broadcom Corporation
  6. * Copyright (c) 2014 QLogic Corporation
  7. All rights reserved
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation.
  12. *
  13. * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  14. * Written by: Vladislav Zolotarov
  15. */
  16. #ifndef BNX2X_INIT_OPS_H
  17. #define BNX2X_INIT_OPS_H
  18. #ifndef BP_ILT
  19. #define BP_ILT(bp) NULL
  20. #endif
  21. #ifndef BP_FUNC
  22. #define BP_FUNC(bp) 0
  23. #endif
  24. #ifndef BP_PORT
  25. #define BP_PORT(bp) 0
  26. #endif
  27. #ifndef BNX2X_ILT_FREE
  28. #define BNX2X_ILT_FREE(x, y, sz)
  29. #endif
  30. #ifndef BNX2X_ILT_ZALLOC
  31. #define BNX2X_ILT_ZALLOC(x, y, sz)
  32. #endif
  33. #ifndef ILOG2
  34. #define ILOG2(x) x
  35. #endif
  36. static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
  37. static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
  38. static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
  39. dma_addr_t phys_addr, u32 addr,
  40. u32 len);
  41. static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr,
  42. const u32 *data, u32 len)
  43. {
  44. u32 i;
  45. for (i = 0; i < len; i++)
  46. REG_WR(bp, addr + i*4, data[i]);
  47. }
  48. static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr,
  49. const u32 *data, u32 len)
  50. {
  51. u32 i;
  52. for (i = 0; i < len; i++)
  53. bnx2x_reg_wr_ind(bp, addr + i*4, data[i]);
  54. }
  55. static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
  56. u8 wb)
  57. {
  58. if (bp->dmae_ready)
  59. bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
  60. /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
  61. else if (wb && CHIP_IS_E1(bp))
  62. bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
  63. /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
  64. else
  65. bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
  66. }
  67. static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill,
  68. u32 len, u8 wb)
  69. {
  70. u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
  71. u32 buf_len32 = buf_len/4;
  72. u32 i;
  73. memset(GUNZIP_BUF(bp), (u8)fill, buf_len);
  74. for (i = 0; i < len; i += buf_len32) {
  75. u32 cur_len = min(buf_len32, len - i);
  76. bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb);
  77. }
  78. }
  79. static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
  80. {
  81. if (bp->dmae_ready)
  82. bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
  83. /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
  84. else if (CHIP_IS_E1(bp))
  85. bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
  86. /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
  87. else
  88. bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
  89. }
  90. static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
  91. const u32 *data, u32 len64)
  92. {
  93. u32 buf_len32 = FW_BUF_SIZE/4;
  94. u32 len = len64*2;
  95. u64 data64 = 0;
  96. u32 i;
  97. /* 64 bit value is in a blob: first low DWORD, then high DWORD */
  98. data64 = HILO_U64((*(data + 1)), (*data));
  99. len64 = min((u32)(FW_BUF_SIZE/8), len64);
  100. for (i = 0; i < len64; i++) {
  101. u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i;
  102. *pdata = data64;
  103. }
  104. for (i = 0; i < len; i += buf_len32) {
  105. u32 cur_len = min(buf_len32, len - i);
  106. bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len);
  107. }
  108. }
  109. /*********************************************************
  110. There are different blobs for each PRAM section.
  111. In addition, each blob write operation is divided into a few operations
  112. in order to decrease the amount of phys. contiguous buffer needed.
  113. Thus, when we select a blob the address may be with some offset
  114. from the beginning of PRAM section.
  115. The same holds for the INT_TABLE sections.
  116. **********************************************************/
  117. #define IF_IS_INT_TABLE_ADDR(base, addr) \
  118. if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
  119. #define IF_IS_PRAM_ADDR(base, addr) \
  120. if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
  121. static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr,
  122. const u8 *data)
  123. {
  124. IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
  125. data = INIT_TSEM_INT_TABLE_DATA(bp);
  126. else
  127. IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
  128. data = INIT_CSEM_INT_TABLE_DATA(bp);
  129. else
  130. IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
  131. data = INIT_USEM_INT_TABLE_DATA(bp);
  132. else
  133. IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
  134. data = INIT_XSEM_INT_TABLE_DATA(bp);
  135. else
  136. IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
  137. data = INIT_TSEM_PRAM_DATA(bp);
  138. else
  139. IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
  140. data = INIT_CSEM_PRAM_DATA(bp);
  141. else
  142. IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
  143. data = INIT_USEM_PRAM_DATA(bp);
  144. else
  145. IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
  146. data = INIT_XSEM_PRAM_DATA(bp);
  147. return data;
  148. }
  149. extern void bnx2x_init_wr_wb(struct bnx2x *, u32, const u32 *, u32);
  150. /*(DEBLOBBED)*/
  151. static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
  152. u32 val_hi)
  153. {
  154. u32 wb_write[2];
  155. wb_write[0] = val_lo;
  156. wb_write[1] = val_hi;
  157. REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
  158. }
  159. static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
  160. u32 blob_off)
  161. {
  162. const u8 *data = NULL;
  163. int rc;
  164. u32 i;
  165. data = bnx2x_sel_blob(bp, addr, data) + blob_off*4;
  166. rc = bnx2x_gunzip(bp, data, len);
  167. if (rc)
  168. return;
  169. /* gunzip_outlen is in dwords */
  170. len = GUNZIP_OUTLEN(bp);
  171. for (i = 0; i < len; i++)
  172. ((u32 *)GUNZIP_BUF(bp))[i] = (__force u32)
  173. cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
  174. bnx2x_write_big_buf_wb(bp, addr, len);
  175. }
  176. /*(DEBLOBBED)*/
  177. /****************************************************************************
  178. * PXP Arbiter
  179. ****************************************************************************/
  180. /*
  181. * This code configures the PCI read/write arbiter
  182. * which implements a weighted round robin
  183. * between the virtual queues in the chip.
  184. *
  185. * The values were derived for each PCI max payload and max request size.
  186. * since max payload and max request size are only known at run time,
  187. * this is done as a separate init stage.
  188. */
  189. #define NUM_WR_Q 13
  190. #define NUM_RD_Q 29
  191. #define MAX_RD_ORD 3
  192. #define MAX_WR_ORD 2
  193. /* configuration for one arbiter queue */
  194. struct arb_line {
  195. int l;
  196. int add;
  197. int ubound;
  198. };
  199. /* derived configuration for each read queue for each max request size */
  200. static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
  201. /* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
  202. { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
  203. { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
  204. { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
  205. { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
  206. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  207. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  208. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  209. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  210. /* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  211. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  212. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  213. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  214. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  215. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  216. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  217. { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
  218. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  219. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  220. /* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  221. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  222. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  223. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  224. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  225. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  226. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  227. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  228. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  229. { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
  230. };
  231. /* derived configuration for each write queue for each max request size */
  232. static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
  233. /* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
  234. { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
  235. { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
  236. { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
  237. { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
  238. { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
  239. { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
  240. { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
  241. { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
  242. /* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
  243. { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
  244. { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
  245. { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
  246. };
  247. /* register addresses for read queues */
  248. static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
  249. /* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
  250. PXP2_REG_RQ_BW_RD_UBOUND0},
  251. {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
  252. PXP2_REG_PSWRQ_BW_UB1},
  253. {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
  254. PXP2_REG_PSWRQ_BW_UB2},
  255. {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
  256. PXP2_REG_PSWRQ_BW_UB3},
  257. {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
  258. PXP2_REG_RQ_BW_RD_UBOUND4},
  259. {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
  260. PXP2_REG_RQ_BW_RD_UBOUND5},
  261. {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
  262. PXP2_REG_PSWRQ_BW_UB6},
  263. {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
  264. PXP2_REG_PSWRQ_BW_UB7},
  265. {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
  266. PXP2_REG_PSWRQ_BW_UB8},
  267. /* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
  268. PXP2_REG_PSWRQ_BW_UB9},
  269. {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
  270. PXP2_REG_PSWRQ_BW_UB10},
  271. {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
  272. PXP2_REG_PSWRQ_BW_UB11},
  273. {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
  274. PXP2_REG_RQ_BW_RD_UBOUND12},
  275. {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
  276. PXP2_REG_RQ_BW_RD_UBOUND13},
  277. {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
  278. PXP2_REG_RQ_BW_RD_UBOUND14},
  279. {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
  280. PXP2_REG_RQ_BW_RD_UBOUND15},
  281. {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
  282. PXP2_REG_RQ_BW_RD_UBOUND16},
  283. {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
  284. PXP2_REG_RQ_BW_RD_UBOUND17},
  285. {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
  286. PXP2_REG_RQ_BW_RD_UBOUND18},
  287. /* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
  288. PXP2_REG_RQ_BW_RD_UBOUND19},
  289. {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
  290. PXP2_REG_RQ_BW_RD_UBOUND20},
  291. {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
  292. PXP2_REG_RQ_BW_RD_UBOUND22},
  293. {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
  294. PXP2_REG_RQ_BW_RD_UBOUND23},
  295. {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
  296. PXP2_REG_RQ_BW_RD_UBOUND24},
  297. {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
  298. PXP2_REG_RQ_BW_RD_UBOUND25},
  299. {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
  300. PXP2_REG_RQ_BW_RD_UBOUND26},
  301. {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
  302. PXP2_REG_RQ_BW_RD_UBOUND27},
  303. {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
  304. PXP2_REG_PSWRQ_BW_UB28}
  305. };
  306. /* register addresses for write queues */
  307. static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
  308. /* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
  309. PXP2_REG_PSWRQ_BW_UB1},
  310. {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
  311. PXP2_REG_PSWRQ_BW_UB2},
  312. {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
  313. PXP2_REG_PSWRQ_BW_UB3},
  314. {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
  315. PXP2_REG_PSWRQ_BW_UB6},
  316. {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
  317. PXP2_REG_PSWRQ_BW_UB7},
  318. {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
  319. PXP2_REG_PSWRQ_BW_UB8},
  320. {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
  321. PXP2_REG_PSWRQ_BW_UB9},
  322. {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
  323. PXP2_REG_PSWRQ_BW_UB10},
  324. {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
  325. PXP2_REG_PSWRQ_BW_UB11},
  326. /* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
  327. PXP2_REG_PSWRQ_BW_UB28},
  328. {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
  329. PXP2_REG_RQ_BW_WR_UBOUND29},
  330. {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
  331. PXP2_REG_RQ_BW_WR_UBOUND30}
  332. };
  333. static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order,
  334. int w_order)
  335. {
  336. u32 val, i;
  337. if (r_order > MAX_RD_ORD) {
  338. DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
  339. r_order, MAX_RD_ORD);
  340. r_order = MAX_RD_ORD;
  341. }
  342. if (w_order > MAX_WR_ORD) {
  343. DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
  344. w_order, MAX_WR_ORD);
  345. w_order = MAX_WR_ORD;
  346. }
  347. if (CHIP_REV_IS_FPGA(bp)) {
  348. DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
  349. w_order = 0;
  350. }
  351. DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
  352. for (i = 0; i < NUM_RD_Q-1; i++) {
  353. REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
  354. REG_WR(bp, read_arb_addr[i].add,
  355. read_arb_data[i][r_order].add);
  356. REG_WR(bp, read_arb_addr[i].ubound,
  357. read_arb_data[i][r_order].ubound);
  358. }
  359. for (i = 0; i < NUM_WR_Q-1; i++) {
  360. if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
  361. (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
  362. REG_WR(bp, write_arb_addr[i].l,
  363. write_arb_data[i][w_order].l);
  364. REG_WR(bp, write_arb_addr[i].add,
  365. write_arb_data[i][w_order].add);
  366. REG_WR(bp, write_arb_addr[i].ubound,
  367. write_arb_data[i][w_order].ubound);
  368. } else {
  369. val = REG_RD(bp, write_arb_addr[i].l);
  370. REG_WR(bp, write_arb_addr[i].l,
  371. val | (write_arb_data[i][w_order].l << 10));
  372. val = REG_RD(bp, write_arb_addr[i].add);
  373. REG_WR(bp, write_arb_addr[i].add,
  374. val | (write_arb_data[i][w_order].add << 10));
  375. val = REG_RD(bp, write_arb_addr[i].ubound);
  376. REG_WR(bp, write_arb_addr[i].ubound,
  377. val | (write_arb_data[i][w_order].ubound << 7));
  378. }
  379. }
  380. val = write_arb_data[NUM_WR_Q-1][w_order].add;
  381. val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
  382. val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
  383. REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
  384. val = read_arb_data[NUM_RD_Q-1][r_order].add;
  385. val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
  386. val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
  387. REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
  388. REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
  389. REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
  390. REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
  391. REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
  392. if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
  393. REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
  394. if (CHIP_IS_E3(bp))
  395. REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
  396. else if (CHIP_IS_E2(bp))
  397. REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
  398. else
  399. REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
  400. if (!CHIP_IS_E1(bp)) {
  401. /* MPS w_order optimal TH presently TH
  402. * 128 0 0 2
  403. * 256 1 1 3
  404. * >=512 2 2 3
  405. */
  406. /* DMAE is special */
  407. if (!CHIP_IS_E1H(bp)) {
  408. /* E2 can use optimal TH */
  409. val = w_order;
  410. REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
  411. } else {
  412. val = ((w_order == 0) ? 2 : 3);
  413. REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
  414. }
  415. REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
  416. REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
  417. REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
  418. REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
  419. REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
  420. REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
  421. REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
  422. REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
  423. REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
  424. REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
  425. }
  426. /* Validate number of tags suppoted by device */
  427. #define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
  428. val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
  429. val &= 0xFF;
  430. if (val <= 0x20)
  431. REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
  432. }
  433. /****************************************************************************
  434. * ILT management
  435. ****************************************************************************/
  436. /*
  437. * This codes hides the low level HW interaction for ILT management and
  438. * configuration. The API consists of a shadow ILT table which is set by the
  439. * driver and a set of routines to use it to configure the HW.
  440. *
  441. */
  442. /* ILT HW init operations */
  443. /* ILT memory management operations */
  444. #define ILT_MEMOP_ALLOC 0
  445. #define ILT_MEMOP_FREE 1
  446. /* the phys address is shifted right 12 bits and has an added
  447. * 1=valid bit added to the 53rd bit
  448. * then since this is a wide register(TM)
  449. * we split it into two 32 bit writes
  450. */
  451. #define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
  452. #define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
  453. #define ILT_RANGE(f, l) (((l) << 10) | f)
  454. static int bnx2x_ilt_line_mem_op(struct bnx2x *bp,
  455. struct ilt_line *line, u32 size, u8 memop)
  456. {
  457. if (memop == ILT_MEMOP_FREE) {
  458. BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
  459. return 0;
  460. }
  461. BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
  462. if (!line->page)
  463. return -1;
  464. line->size = size;
  465. return 0;
  466. }
  467. static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
  468. u8 memop)
  469. {
  470. int i, rc;
  471. struct bnx2x_ilt *ilt = BP_ILT(bp);
  472. struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
  473. if (!ilt || !ilt->lines)
  474. return -1;
  475. if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
  476. return 0;
  477. for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
  478. rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
  479. ilt_cli->page_size, memop);
  480. }
  481. return rc;
  482. }
  483. static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop)
  484. {
  485. int rc = 0;
  486. if (CONFIGURE_NIC_MODE(bp))
  487. rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
  488. if (!rc)
  489. rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
  490. return rc;
  491. }
  492. static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
  493. {
  494. int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
  495. if (!rc)
  496. rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
  497. if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
  498. rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
  499. return rc;
  500. }
  501. static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
  502. dma_addr_t page_mapping)
  503. {
  504. u32 reg;
  505. if (CHIP_IS_E1(bp))
  506. reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
  507. else
  508. reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
  509. bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
  510. }
  511. static void bnx2x_ilt_line_init_op(struct bnx2x *bp,
  512. struct bnx2x_ilt *ilt, int idx, u8 initop)
  513. {
  514. dma_addr_t null_mapping;
  515. int abs_idx = ilt->start_line + idx;
  516. switch (initop) {
  517. case INITOP_INIT:
  518. /* set in the init-value array */
  519. case INITOP_SET:
  520. bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
  521. break;
  522. case INITOP_CLEAR:
  523. null_mapping = 0;
  524. bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
  525. break;
  526. }
  527. }
  528. static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
  529. struct ilt_client_info *ilt_cli,
  530. u32 ilt_start, u8 initop)
  531. {
  532. u32 start_reg = 0;
  533. u32 end_reg = 0;
  534. /* The boundary is either SET or INIT,
  535. CLEAR => SET and for now SET ~~ INIT */
  536. /* find the appropriate regs */
  537. if (CHIP_IS_E1(bp)) {
  538. switch (ilt_cli->client_num) {
  539. case ILT_CLIENT_CDU:
  540. start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
  541. break;
  542. case ILT_CLIENT_QM:
  543. start_reg = PXP2_REG_PSWRQ_QM0_L2P;
  544. break;
  545. case ILT_CLIENT_SRC:
  546. start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
  547. break;
  548. case ILT_CLIENT_TM:
  549. start_reg = PXP2_REG_PSWRQ_TM0_L2P;
  550. break;
  551. }
  552. REG_WR(bp, start_reg + BP_FUNC(bp)*4,
  553. ILT_RANGE((ilt_start + ilt_cli->start),
  554. (ilt_start + ilt_cli->end)));
  555. } else {
  556. switch (ilt_cli->client_num) {
  557. case ILT_CLIENT_CDU:
  558. start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
  559. end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
  560. break;
  561. case ILT_CLIENT_QM:
  562. start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
  563. end_reg = PXP2_REG_RQ_QM_LAST_ILT;
  564. break;
  565. case ILT_CLIENT_SRC:
  566. start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
  567. end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
  568. break;
  569. case ILT_CLIENT_TM:
  570. start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
  571. end_reg = PXP2_REG_RQ_TM_LAST_ILT;
  572. break;
  573. }
  574. REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
  575. REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
  576. }
  577. }
  578. static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
  579. struct bnx2x_ilt *ilt,
  580. struct ilt_client_info *ilt_cli,
  581. u8 initop)
  582. {
  583. int i;
  584. if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
  585. return;
  586. for (i = ilt_cli->start; i <= ilt_cli->end; i++)
  587. bnx2x_ilt_line_init_op(bp, ilt, i, initop);
  588. /* init/clear the ILT boundries */
  589. bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
  590. }
  591. static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
  592. struct ilt_client_info *ilt_cli, u8 initop)
  593. {
  594. struct bnx2x_ilt *ilt = BP_ILT(bp);
  595. bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
  596. }
  597. static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
  598. int cli_num, u8 initop)
  599. {
  600. struct bnx2x_ilt *ilt = BP_ILT(bp);
  601. struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
  602. bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
  603. }
  604. static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop)
  605. {
  606. if (CONFIGURE_NIC_MODE(bp))
  607. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
  608. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
  609. }
  610. static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
  611. {
  612. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
  613. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
  614. if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
  615. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
  616. }
  617. static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
  618. u32 psz_reg, u8 initop)
  619. {
  620. struct bnx2x_ilt *ilt = BP_ILT(bp);
  621. struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
  622. if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
  623. return;
  624. switch (initop) {
  625. case INITOP_INIT:
  626. /* set in the init-value array */
  627. case INITOP_SET:
  628. REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
  629. break;
  630. case INITOP_CLEAR:
  631. break;
  632. }
  633. }
  634. /*
  635. * called during init common stage, ilt clients should be initialized
  636. * prioir to calling this function
  637. */
  638. static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
  639. {
  640. bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
  641. PXP2_REG_RQ_CDU_P_SIZE, initop);
  642. bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
  643. PXP2_REG_RQ_QM_P_SIZE, initop);
  644. bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
  645. PXP2_REG_RQ_SRC_P_SIZE, initop);
  646. bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
  647. PXP2_REG_RQ_TM_P_SIZE, initop);
  648. }
  649. /****************************************************************************
  650. * QM initializations
  651. ****************************************************************************/
  652. #define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
  653. #define QM_INIT_MIN_CID_COUNT 31
  654. #define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
  655. /* called during init port stage */
  656. static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
  657. u8 initop)
  658. {
  659. int port = BP_PORT(bp);
  660. if (QM_INIT(qm_cid_count)) {
  661. switch (initop) {
  662. case INITOP_INIT:
  663. /* set in the init-value array */
  664. case INITOP_SET:
  665. REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
  666. qm_cid_count/16 - 1);
  667. break;
  668. case INITOP_CLEAR:
  669. break;
  670. }
  671. }
  672. }
  673. static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count,
  674. u32 base_reg, u32 reg)
  675. {
  676. int i;
  677. u32 wb_data[2] = {0, 0};
  678. for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
  679. REG_WR(bp, base_reg + i*4,
  680. qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
  681. bnx2x_init_wr_wb(bp, reg + i*8, wb_data, 2);
  682. }
  683. }
  684. /* called during init common stage */
  685. static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
  686. u8 initop)
  687. {
  688. if (!QM_INIT(qm_cid_count))
  689. return;
  690. switch (initop) {
  691. case INITOP_INIT:
  692. /* set in the init-value array */
  693. case INITOP_SET:
  694. bnx2x_qm_set_ptr_table(bp, qm_cid_count,
  695. QM_REG_BASEADDR, QM_REG_PTRTBL);
  696. if (CHIP_IS_E1H(bp))
  697. bnx2x_qm_set_ptr_table(bp, qm_cid_count,
  698. QM_REG_BASEADDR_EXT_A,
  699. QM_REG_PTRTBL_EXT_A);
  700. break;
  701. case INITOP_CLEAR:
  702. break;
  703. }
  704. }
  705. /****************************************************************************
  706. * SRC initializations
  707. ****************************************************************************/
  708. /* called during init func stage */
  709. static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
  710. dma_addr_t t2_mapping, int src_cid_count)
  711. {
  712. int i;
  713. int port = BP_PORT(bp);
  714. /* Initialize T2 */
  715. for (i = 0; i < src_cid_count-1; i++)
  716. t2[i].next = (u64)(t2_mapping +
  717. (i+1)*sizeof(struct src_ent));
  718. /* tell the searcher where the T2 table is */
  719. REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
  720. bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
  721. U64_LO(t2_mapping), U64_HI(t2_mapping));
  722. bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
  723. U64_LO((u64)t2_mapping +
  724. (src_cid_count-1) * sizeof(struct src_ent)),
  725. U64_HI((u64)t2_mapping +
  726. (src_cid_count-1) * sizeof(struct src_ent)));
  727. }
  728. #endif /* BNX2X_INIT_OPS_H */