bfa_ioc_ct.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999
  1. /*
  2. * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  3. * Copyright (c) 2014- QLogic Corporation.
  4. * All rights reserved
  5. * www.qlogic.com
  6. *
  7. * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License (GPL) Version 2 as
  11. * published by the Free Software Foundation
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. */
  18. #include "bfad_drv.h"
  19. #include "bfa_ioc.h"
  20. #include "bfi_reg.h"
  21. #include "bfa_defs.h"
  22. BFA_TRC_FILE(CNA, IOC_CT);
  23. #define bfa_ioc_ct_sync_pos(__ioc) \
  24. ((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
  25. #define BFA_IOC_SYNC_REQD_SH 16
  26. #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
  27. #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
  28. #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
  29. #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
  30. (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
  31. /*
  32. * forward declarations
  33. */
  34. static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
  35. static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
  36. static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
  37. static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
  38. static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
  39. static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
  40. static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
  41. static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
  42. static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
  43. static void bfa_ioc_ct_set_cur_ioc_fwstate(
  44. struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
  45. static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
  46. static void bfa_ioc_ct_set_alt_ioc_fwstate(
  47. struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
  48. static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
  49. static struct bfa_ioc_hwif_s hwif_ct;
  50. static struct bfa_ioc_hwif_s hwif_ct2;
  51. /*
  52. * Return true if firmware of current driver matches the running firmware.
  53. */
  54. static bfa_boolean_t
  55. bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
  56. {
  57. enum bfi_ioc_state ioc_fwstate;
  58. u32 usecnt;
  59. struct bfi_ioc_image_hdr_s fwhdr;
  60. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  61. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  62. /*
  63. * If usage count is 0, always return TRUE.
  64. */
  65. if (usecnt == 0) {
  66. writel(1, ioc->ioc_regs.ioc_usage_reg);
  67. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  68. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  69. writel(0, ioc->ioc_regs.ioc_fail_sync);
  70. bfa_trc(ioc, usecnt);
  71. return BFA_TRUE;
  72. }
  73. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  74. bfa_trc(ioc, ioc_fwstate);
  75. /*
  76. * Use count cannot be non-zero and chip in uninitialized state.
  77. */
  78. WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
  79. /*
  80. * Check if another driver with a different firmware is active
  81. */
  82. bfa_ioc_fwver_get(ioc, &fwhdr);
  83. if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
  84. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  85. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  86. bfa_trc(ioc, usecnt);
  87. return BFA_FALSE;
  88. }
  89. /*
  90. * Same firmware version. Increment the reference count.
  91. */
  92. usecnt++;
  93. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  94. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  95. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  96. bfa_trc(ioc, usecnt);
  97. return BFA_TRUE;
  98. }
  99. static void
  100. bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
  101. {
  102. u32 usecnt;
  103. /*
  104. * decrement usage count
  105. */
  106. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  107. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  108. WARN_ON(usecnt <= 0);
  109. usecnt--;
  110. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  111. bfa_trc(ioc, usecnt);
  112. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  113. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  114. }
  115. /*
  116. * Notify other functions on HB failure.
  117. */
  118. static void
  119. bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
  120. {
  121. if (bfa_ioc_is_cna(ioc)) {
  122. writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
  123. writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
  124. /* Wait for halt to take effect */
  125. readl(ioc->ioc_regs.ll_halt);
  126. readl(ioc->ioc_regs.alt_ll_halt);
  127. } else {
  128. writel(~0U, ioc->ioc_regs.err_set);
  129. readl(ioc->ioc_regs.err_set);
  130. }
  131. }
  132. /*
  133. * Host to LPU mailbox message addresses
  134. */
  135. static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
  136. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  137. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
  138. { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
  139. { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
  140. };
  141. /*
  142. * Host <-> LPU mailbox command/status registers - port 0
  143. */
  144. static struct { u32 hfn, lpu; } ct_p0reg[] = {
  145. { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
  146. { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
  147. { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
  148. { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
  149. };
  150. /*
  151. * Host <-> LPU mailbox command/status registers - port 1
  152. */
  153. static struct { u32 hfn, lpu; } ct_p1reg[] = {
  154. { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
  155. { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
  156. { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
  157. { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
  158. };
  159. static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
  160. ct2_reg[] = {
  161. { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  162. CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
  163. CT2_HOSTFN_LPU0_READ_STAT},
  164. { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  165. CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
  166. CT2_HOSTFN_LPU1_READ_STAT},
  167. };
  168. static void
  169. bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
  170. {
  171. void __iomem *rb;
  172. int pcifn = bfa_ioc_pcifn(ioc);
  173. rb = bfa_ioc_bar0(ioc);
  174. ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
  175. ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
  176. ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
  177. if (ioc->port_id == 0) {
  178. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  179. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  180. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  181. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
  182. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
  183. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  184. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  185. } else {
  186. ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
  187. ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
  188. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  189. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
  190. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
  191. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  192. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  193. }
  194. /*
  195. * PSS control registers
  196. */
  197. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  198. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  199. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
  200. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
  201. /*
  202. * IOC semaphore registers and serialization
  203. */
  204. ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
  205. ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
  206. ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
  207. ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
  208. ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
  209. /*
  210. * sram memory access
  211. */
  212. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  213. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  214. /*
  215. * err set reg : for notification of hb failure in fcmode
  216. */
  217. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  218. }
  219. static void
  220. bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
  221. {
  222. void __iomem *rb;
  223. int port = bfa_ioc_portid(ioc);
  224. rb = bfa_ioc_bar0(ioc);
  225. ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
  226. ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
  227. ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
  228. ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
  229. ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
  230. ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
  231. if (port == 0) {
  232. ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
  233. ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  234. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
  235. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  236. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  237. } else {
  238. ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
  239. ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
  240. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  241. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  242. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  243. }
  244. /*
  245. * PSS control registers
  246. */
  247. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  248. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  249. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
  250. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
  251. /*
  252. * IOC semaphore registers and serialization
  253. */
  254. ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
  255. ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
  256. ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
  257. ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
  258. ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
  259. /*
  260. * sram memory access
  261. */
  262. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  263. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  264. /*
  265. * err set reg : for notification of hb failure in fcmode
  266. */
  267. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  268. }
  269. /*
  270. * Initialize IOC to port mapping.
  271. */
  272. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  273. static void
  274. bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
  275. {
  276. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  277. u32 r32;
  278. /*
  279. * For catapult, base port id on personality register and IOC type
  280. */
  281. r32 = readl(rb + FNC_PERS_REG);
  282. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  283. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  284. bfa_trc(ioc, bfa_ioc_pcifn(ioc));
  285. bfa_trc(ioc, ioc->port_id);
  286. }
  287. static void
  288. bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
  289. {
  290. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  291. u32 r32;
  292. r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
  293. ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
  294. bfa_trc(ioc, bfa_ioc_pcifn(ioc));
  295. bfa_trc(ioc, ioc->port_id);
  296. }
  297. /*
  298. * Set interrupt mode for a function: INTX or MSIX
  299. */
  300. static void
  301. bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
  302. {
  303. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  304. u32 r32, mode;
  305. r32 = readl(rb + FNC_PERS_REG);
  306. bfa_trc(ioc, r32);
  307. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  308. __F0_INTX_STATUS;
  309. /*
  310. * If already in desired mode, do not change anything
  311. */
  312. if ((!msix && mode) || (msix && !mode))
  313. return;
  314. if (msix)
  315. mode = __F0_INTX_STATUS_MSIX;
  316. else
  317. mode = __F0_INTX_STATUS_INTA;
  318. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  319. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  320. bfa_trc(ioc, r32);
  321. writel(r32, rb + FNC_PERS_REG);
  322. }
  323. bfa_boolean_t
  324. bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
  325. {
  326. u32 r32;
  327. r32 = readl(ioc->ioc_regs.lpu_read_stat);
  328. if (r32) {
  329. writel(1, ioc->ioc_regs.lpu_read_stat);
  330. return BFA_TRUE;
  331. }
  332. return BFA_FALSE;
  333. }
  334. /*
  335. * Cleanup hw semaphore and usecnt registers
  336. */
  337. static void
  338. bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
  339. {
  340. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  341. writel(0, ioc->ioc_regs.ioc_usage_reg);
  342. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  343. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  344. writel(0, ioc->ioc_regs.ioc_fail_sync);
  345. /*
  346. * Read the hw sem reg to make sure that it is locked
  347. * before we clear it. If it is not locked, writing 1
  348. * will lock it instead of clearing it.
  349. */
  350. readl(ioc->ioc_regs.ioc_sem_reg);
  351. writel(1, ioc->ioc_regs.ioc_sem_reg);
  352. }
  353. static bfa_boolean_t
  354. bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
  355. {
  356. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  357. uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  358. /*
  359. * Driver load time. If the sync required bit for this PCI fn
  360. * is set, it is due to an unclean exit by the driver for this
  361. * PCI fn in the previous incarnation. Whoever comes here first
  362. * should clean it up, no matter which PCI fn.
  363. */
  364. if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
  365. writel(0, ioc->ioc_regs.ioc_fail_sync);
  366. writel(1, ioc->ioc_regs.ioc_usage_reg);
  367. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  368. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  369. return BFA_TRUE;
  370. }
  371. return bfa_ioc_ct_sync_complete(ioc);
  372. }
  373. /*
  374. * Synchronized IOC failure processing routines
  375. */
  376. static void
  377. bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
  378. {
  379. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  380. uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
  381. writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
  382. }
  383. static void
  384. bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
  385. {
  386. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  387. uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
  388. bfa_ioc_ct_sync_pos(ioc);
  389. writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
  390. }
  391. static void
  392. bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
  393. {
  394. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  395. writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
  396. ioc->ioc_regs.ioc_fail_sync);
  397. }
  398. static bfa_boolean_t
  399. bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
  400. {
  401. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  402. uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  403. uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
  404. uint32_t tmp_ackd;
  405. if (sync_ackd == 0)
  406. return BFA_TRUE;
  407. /*
  408. * The check below is to see whether any other PCI fn
  409. * has reinitialized the ASIC (reset sync_ackd bits)
  410. * and failed again while this IOC was waiting for hw
  411. * semaphore (in bfa_iocpf_sm_semwait()).
  412. */
  413. tmp_ackd = sync_ackd;
  414. if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
  415. !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
  416. sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
  417. if (sync_reqd == sync_ackd) {
  418. writel(bfa_ioc_ct_clear_sync_ackd(r32),
  419. ioc->ioc_regs.ioc_fail_sync);
  420. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  421. writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
  422. return BFA_TRUE;
  423. }
  424. /*
  425. * If another PCI fn reinitialized and failed again while
  426. * this IOC was waiting for hw sem, the sync_ackd bit for
  427. * this IOC need to be set again to allow reinitialization.
  428. */
  429. if (tmp_ackd != sync_ackd)
  430. writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
  431. return BFA_FALSE;
  432. }
  433. /**
  434. * Called from bfa_ioc_attach() to map asic specific calls.
  435. */
  436. static void
  437. bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
  438. {
  439. hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
  440. hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
  441. hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
  442. hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
  443. hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
  444. hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
  445. hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
  446. hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
  447. hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
  448. hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate;
  449. hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate;
  450. hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate;
  451. hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate;
  452. }
  453. /**
  454. * Called from bfa_ioc_attach() to map asic specific calls.
  455. */
  456. void
  457. bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
  458. {
  459. bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
  460. hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
  461. hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
  462. hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
  463. hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
  464. ioc->ioc_hwif = &hwif_ct;
  465. }
  466. /**
  467. * Called from bfa_ioc_attach() to map asic specific calls.
  468. */
  469. void
  470. bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
  471. {
  472. bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
  473. hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
  474. hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
  475. hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
  476. hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
  477. hwif_ct2.ioc_isr_mode_set = NULL;
  478. ioc->ioc_hwif = &hwif_ct2;
  479. }
  480. /*
  481. * Workaround for MSI-X resource allocation for catapult-2 with no asic block
  482. */
  483. #define HOSTFN_MSIX_DEFAULT 64
  484. #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
  485. #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
  486. #define __MSIX_VT_NUMVT__MK 0x003ff800
  487. #define __MSIX_VT_NUMVT__SH 11
  488. #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
  489. #define __MSIX_VT_OFST_ 0x000007ff
  490. void
  491. bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
  492. {
  493. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  494. u32 r32;
  495. r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  496. if (r32 & __MSIX_VT_NUMVT__MK) {
  497. writel(r32 & __MSIX_VT_OFST_,
  498. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  499. return;
  500. }
  501. writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
  502. HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  503. rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  504. writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  505. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  506. }
  507. bfa_status_t
  508. bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
  509. {
  510. u32 pll_sclk, pll_fclk, r32;
  511. bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
  512. pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
  513. __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
  514. __APP_PLL_SCLK_JITLMT0_1(3U) |
  515. __APP_PLL_SCLK_CNTLMT0_1(1U);
  516. pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
  517. __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
  518. __APP_PLL_LCLK_JITLMT0_1(3U) |
  519. __APP_PLL_LCLK_CNTLMT0_1(1U);
  520. if (fcmode) {
  521. writel(0, (rb + OP_MODE));
  522. writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
  523. __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
  524. } else {
  525. writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
  526. writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
  527. }
  528. writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
  529. writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
  530. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  531. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  532. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  533. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  534. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  535. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  536. writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
  537. rb + APP_PLL_SCLK_CTL_REG);
  538. writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
  539. rb + APP_PLL_LCLK_CTL_REG);
  540. writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
  541. __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
  542. writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
  543. __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
  544. readl(rb + HOSTFN0_INT_MSK);
  545. udelay(2000);
  546. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  547. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  548. writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
  549. writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
  550. if (!fcmode) {
  551. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
  552. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
  553. }
  554. r32 = readl((rb + PSS_CTL_REG));
  555. r32 &= ~__PSS_LMEM_RESET;
  556. writel(r32, (rb + PSS_CTL_REG));
  557. udelay(1000);
  558. if (!fcmode) {
  559. writel(0, (rb + PMM_1T_RESET_REG_P0));
  560. writel(0, (rb + PMM_1T_RESET_REG_P1));
  561. }
  562. writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
  563. udelay(1000);
  564. r32 = readl((rb + MBIST_STAT_REG));
  565. writel(0, (rb + MBIST_CTL_REG));
  566. return BFA_STATUS_OK;
  567. }
  568. static void
  569. bfa_ioc_ct2_sclk_init(void __iomem *rb)
  570. {
  571. u32 r32;
  572. /*
  573. * put s_clk PLL and PLL FSM in reset
  574. */
  575. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  576. r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
  577. r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
  578. __APP_PLL_SCLK_LOGIC_SOFT_RESET);
  579. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  580. /*
  581. * Ignore mode and program for the max clock (which is FC16)
  582. * Firmware/NFC will do the PLL init appropiately
  583. */
  584. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  585. r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
  586. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  587. /*
  588. * while doing PLL init dont clock gate ethernet subsystem
  589. */
  590. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  591. writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
  592. r32 = readl((rb + CT2_PCIE_MISC_REG));
  593. writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
  594. /*
  595. * set sclk value
  596. */
  597. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  598. r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
  599. __APP_PLL_SCLK_CLK_DIV2);
  600. writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  601. /*
  602. * poll for s_clk lock or delay 1ms
  603. */
  604. udelay(1000);
  605. }
  606. static void
  607. bfa_ioc_ct2_lclk_init(void __iomem *rb)
  608. {
  609. u32 r32;
  610. /*
  611. * put l_clk PLL and PLL FSM in reset
  612. */
  613. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  614. r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
  615. r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
  616. __APP_PLL_LCLK_LOGIC_SOFT_RESET);
  617. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  618. /*
  619. * set LPU speed (set for FC16 which will work for other modes)
  620. */
  621. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  622. writel(r32, (rb + CT2_CHIP_MISC_PRG));
  623. /*
  624. * set LPU half speed (set for FC16 which will work for other modes)
  625. */
  626. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  627. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  628. /*
  629. * set lclk for mode (set for FC16)
  630. */
  631. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  632. r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
  633. r32 |= 0x20c1731b;
  634. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  635. /*
  636. * poll for s_clk lock or delay 1ms
  637. */
  638. udelay(1000);
  639. }
  640. static void
  641. bfa_ioc_ct2_mem_init(void __iomem *rb)
  642. {
  643. u32 r32;
  644. r32 = readl((rb + PSS_CTL_REG));
  645. r32 &= ~__PSS_LMEM_RESET;
  646. writel(r32, (rb + PSS_CTL_REG));
  647. udelay(1000);
  648. writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
  649. udelay(1000);
  650. writel(0, (rb + CT2_MBIST_CTL_REG));
  651. }
  652. void
  653. bfa_ioc_ct2_mac_reset(void __iomem *rb)
  654. {
  655. /* put port0, port1 MAC & AHB in reset */
  656. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  657. rb + CT2_CSI_MAC_CONTROL_REG(0));
  658. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  659. rb + CT2_CSI_MAC_CONTROL_REG(1));
  660. }
  661. static void
  662. bfa_ioc_ct2_enable_flash(void __iomem *rb)
  663. {
  664. u32 r32;
  665. r32 = readl((rb + PSS_GPIO_OUT_REG));
  666. writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
  667. r32 = readl((rb + PSS_GPIO_OE_REG));
  668. writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
  669. }
  670. #define CT2_NFC_MAX_DELAY 1000
  671. #define CT2_NFC_PAUSE_MAX_DELAY 4000
  672. #define CT2_NFC_VER_VALID 0x147
  673. #define CT2_NFC_STATE_RUNNING 0x20000001
  674. #define BFA_IOC_PLL_POLL 1000000
  675. static bfa_boolean_t
  676. bfa_ioc_ct2_nfc_halted(void __iomem *rb)
  677. {
  678. u32 r32;
  679. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  680. if (r32 & __NFC_CONTROLLER_HALTED)
  681. return BFA_TRUE;
  682. return BFA_FALSE;
  683. }
  684. static void
  685. bfa_ioc_ct2_nfc_halt(void __iomem *rb)
  686. {
  687. int i;
  688. writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
  689. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  690. if (bfa_ioc_ct2_nfc_halted(rb))
  691. break;
  692. udelay(1000);
  693. }
  694. WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
  695. }
  696. static void
  697. bfa_ioc_ct2_nfc_resume(void __iomem *rb)
  698. {
  699. u32 r32;
  700. int i;
  701. writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
  702. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  703. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  704. if (!(r32 & __NFC_CONTROLLER_HALTED))
  705. return;
  706. udelay(1000);
  707. }
  708. WARN_ON(1);
  709. }
  710. static void
  711. bfa_ioc_ct2_clk_reset(void __iomem *rb)
  712. {
  713. u32 r32;
  714. bfa_ioc_ct2_sclk_init(rb);
  715. bfa_ioc_ct2_lclk_init(rb);
  716. /*
  717. * release soft reset on s_clk & l_clk
  718. */
  719. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  720. writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
  721. (rb + CT2_APP_PLL_SCLK_CTL_REG));
  722. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  723. writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
  724. (rb + CT2_APP_PLL_LCLK_CTL_REG));
  725. }
  726. static void
  727. bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
  728. {
  729. u32 r32, i;
  730. r32 = readl((rb + PSS_CTL_REG));
  731. r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  732. writel(r32, (rb + PSS_CTL_REG));
  733. writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
  734. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  735. r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
  736. if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
  737. break;
  738. }
  739. WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
  740. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  741. r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
  742. if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
  743. break;
  744. }
  745. WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
  746. r32 = readl(rb + CT2_CSI_FW_CTL_REG);
  747. WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
  748. }
  749. static void
  750. bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
  751. {
  752. u32 r32;
  753. int i;
  754. if (bfa_ioc_ct2_nfc_halted(rb))
  755. bfa_ioc_ct2_nfc_resume(rb);
  756. for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) {
  757. r32 = readl(rb + CT2_NFC_STS_REG);
  758. if (r32 == CT2_NFC_STATE_RUNNING)
  759. return;
  760. udelay(1000);
  761. }
  762. r32 = readl(rb + CT2_NFC_STS_REG);
  763. WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING));
  764. }
  765. bfa_status_t
  766. bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
  767. {
  768. u32 wgn, r32, nfc_ver;
  769. wgn = readl(rb + CT2_WGN_STATUS);
  770. if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
  771. /*
  772. * If flash is corrupted, enable flash explicitly
  773. */
  774. bfa_ioc_ct2_clk_reset(rb);
  775. bfa_ioc_ct2_enable_flash(rb);
  776. bfa_ioc_ct2_mac_reset(rb);
  777. bfa_ioc_ct2_clk_reset(rb);
  778. bfa_ioc_ct2_enable_flash(rb);
  779. } else {
  780. nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
  781. if ((nfc_ver >= CT2_NFC_VER_VALID) &&
  782. (wgn == (__A2T_AHB_LOAD | __WGN_READY))) {
  783. bfa_ioc_ct2_wait_till_nfc_running(rb);
  784. bfa_ioc_ct2_nfc_clk_reset(rb);
  785. } else {
  786. bfa_ioc_ct2_nfc_halt(rb);
  787. bfa_ioc_ct2_clk_reset(rb);
  788. bfa_ioc_ct2_mac_reset(rb);
  789. bfa_ioc_ct2_clk_reset(rb);
  790. }
  791. }
  792. /*
  793. * The very first PCIe DMA Read done by LPU fails with a fatal error,
  794. * when Address Translation Cache (ATC) has been enabled by system BIOS.
  795. *
  796. * Workaround:
  797. * Disable Invalidated Tag Match Enable capability by setting the bit 26
  798. * of CHIP_MISC_PRG to 0, by default it is set to 1.
  799. */
  800. r32 = readl(rb + CT2_CHIP_MISC_PRG);
  801. writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG));
  802. /*
  803. * Mask the interrupts and clear any
  804. * pending interrupts left by BIOS/EFI
  805. */
  806. writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
  807. writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
  808. /* For first time initialization, no need to clear interrupts */
  809. r32 = readl(rb + HOST_SEM5_REG);
  810. if (r32 & 0x1) {
  811. r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
  812. if (r32 == 1) {
  813. writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
  814. readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
  815. }
  816. r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
  817. if (r32 == 1) {
  818. writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
  819. readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
  820. }
  821. }
  822. bfa_ioc_ct2_mem_init(rb);
  823. writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
  824. writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
  825. return BFA_STATUS_OK;
  826. }
  827. static void
  828. bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
  829. enum bfi_ioc_state fwstate)
  830. {
  831. writel(fwstate, ioc->ioc_regs.ioc_fwstate);
  832. }
  833. static enum bfi_ioc_state
  834. bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
  835. {
  836. return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
  837. }
  838. static void
  839. bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
  840. enum bfi_ioc_state fwstate)
  841. {
  842. writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
  843. }
  844. static enum bfi_ioc_state
  845. bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
  846. {
  847. return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate);
  848. }