bfa_ioc_ct.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_ioc.h"
  19. #include "bfi_reg.h"
  20. #include "bfa_defs.h"
  21. BFA_TRC_FILE(CNA, IOC_CT);
  22. #define bfa_ioc_ct_sync_pos(__ioc) \
  23. ((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
  24. #define BFA_IOC_SYNC_REQD_SH 16
  25. #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
  26. #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
  27. #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
  28. #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
  29. (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
  30. /*
  31. * forward declarations
  32. */
  33. static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
  34. static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
  35. static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
  36. static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
  37. static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
  38. static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
  39. static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
  40. static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
  41. static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
  42. static void bfa_ioc_ct_set_cur_ioc_fwstate(
  43. struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
  44. static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc);
  45. static void bfa_ioc_ct_set_alt_ioc_fwstate(
  46. struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate);
  47. static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc);
  48. static struct bfa_ioc_hwif_s hwif_ct;
  49. static struct bfa_ioc_hwif_s hwif_ct2;
  50. /*
  51. * Return true if firmware of current driver matches the running firmware.
  52. */
  53. static bfa_boolean_t
  54. bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
  55. {
  56. enum bfi_ioc_state ioc_fwstate;
  57. u32 usecnt;
  58. struct bfi_ioc_image_hdr_s fwhdr;
  59. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  60. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  61. /*
  62. * If usage count is 0, always return TRUE.
  63. */
  64. if (usecnt == 0) {
  65. writel(1, ioc->ioc_regs.ioc_usage_reg);
  66. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  67. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  68. writel(0, ioc->ioc_regs.ioc_fail_sync);
  69. bfa_trc(ioc, usecnt);
  70. return BFA_TRUE;
  71. }
  72. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  73. bfa_trc(ioc, ioc_fwstate);
  74. /*
  75. * Use count cannot be non-zero and chip in uninitialized state.
  76. */
  77. WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
  78. /*
  79. * Check if another driver with a different firmware is active
  80. */
  81. bfa_ioc_fwver_get(ioc, &fwhdr);
  82. if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
  83. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  84. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  85. bfa_trc(ioc, usecnt);
  86. return BFA_FALSE;
  87. }
  88. /*
  89. * Same firmware version. Increment the reference count.
  90. */
  91. usecnt++;
  92. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  93. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  94. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  95. bfa_trc(ioc, usecnt);
  96. return BFA_TRUE;
  97. }
  98. static void
  99. bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
  100. {
  101. u32 usecnt;
  102. /*
  103. * decrement usage count
  104. */
  105. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  106. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  107. WARN_ON(usecnt <= 0);
  108. usecnt--;
  109. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  110. bfa_trc(ioc, usecnt);
  111. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  112. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  113. }
  114. /*
  115. * Notify other functions on HB failure.
  116. */
  117. static void
  118. bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
  119. {
  120. if (bfa_ioc_is_cna(ioc)) {
  121. writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
  122. writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
  123. /* Wait for halt to take effect */
  124. readl(ioc->ioc_regs.ll_halt);
  125. readl(ioc->ioc_regs.alt_ll_halt);
  126. } else {
  127. writel(~0U, ioc->ioc_regs.err_set);
  128. readl(ioc->ioc_regs.err_set);
  129. }
  130. }
  131. /*
  132. * Host to LPU mailbox message addresses
  133. */
  134. static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
  135. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  136. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
  137. { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
  138. { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
  139. };
  140. /*
  141. * Host <-> LPU mailbox command/status registers - port 0
  142. */
  143. static struct { u32 hfn, lpu; } ct_p0reg[] = {
  144. { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
  145. { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
  146. { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
  147. { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
  148. };
  149. /*
  150. * Host <-> LPU mailbox command/status registers - port 1
  151. */
  152. static struct { u32 hfn, lpu; } ct_p1reg[] = {
  153. { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
  154. { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
  155. { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
  156. { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
  157. };
  158. static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
  159. ct2_reg[] = {
  160. { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  161. CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
  162. CT2_HOSTFN_LPU0_READ_STAT},
  163. { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  164. CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
  165. CT2_HOSTFN_LPU1_READ_STAT},
  166. };
  167. static void
  168. bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
  169. {
  170. void __iomem *rb;
  171. int pcifn = bfa_ioc_pcifn(ioc);
  172. rb = bfa_ioc_bar0(ioc);
  173. ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
  174. ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
  175. ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
  176. if (ioc->port_id == 0) {
  177. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  178. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  179. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  180. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
  181. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
  182. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  183. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  184. } else {
  185. ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
  186. ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
  187. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  188. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
  189. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
  190. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  191. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  192. }
  193. /*
  194. * PSS control registers
  195. */
  196. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  197. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  198. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
  199. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
  200. /*
  201. * IOC semaphore registers and serialization
  202. */
  203. ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
  204. ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
  205. ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
  206. ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
  207. ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
  208. /*
  209. * sram memory access
  210. */
  211. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  212. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  213. /*
  214. * err set reg : for notification of hb failure in fcmode
  215. */
  216. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  217. }
  218. static void
  219. bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
  220. {
  221. void __iomem *rb;
  222. int port = bfa_ioc_portid(ioc);
  223. rb = bfa_ioc_bar0(ioc);
  224. ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
  225. ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
  226. ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
  227. ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
  228. ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
  229. ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
  230. if (port == 0) {
  231. ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
  232. ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  233. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
  234. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  235. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  236. } else {
  237. ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
  238. ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
  239. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  240. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  241. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  242. }
  243. /*
  244. * PSS control registers
  245. */
  246. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  247. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  248. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
  249. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
  250. /*
  251. * IOC semaphore registers and serialization
  252. */
  253. ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
  254. ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
  255. ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
  256. ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
  257. ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
  258. /*
  259. * sram memory access
  260. */
  261. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  262. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  263. /*
  264. * err set reg : for notification of hb failure in fcmode
  265. */
  266. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  267. }
  268. /*
  269. * Initialize IOC to port mapping.
  270. */
  271. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  272. static void
  273. bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
  274. {
  275. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  276. u32 r32;
  277. /*
  278. * For catapult, base port id on personality register and IOC type
  279. */
  280. r32 = readl(rb + FNC_PERS_REG);
  281. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  282. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  283. bfa_trc(ioc, bfa_ioc_pcifn(ioc));
  284. bfa_trc(ioc, ioc->port_id);
  285. }
  286. static void
  287. bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
  288. {
  289. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  290. u32 r32;
  291. r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
  292. ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
  293. bfa_trc(ioc, bfa_ioc_pcifn(ioc));
  294. bfa_trc(ioc, ioc->port_id);
  295. }
  296. /*
  297. * Set interrupt mode for a function: INTX or MSIX
  298. */
  299. static void
  300. bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
  301. {
  302. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  303. u32 r32, mode;
  304. r32 = readl(rb + FNC_PERS_REG);
  305. bfa_trc(ioc, r32);
  306. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  307. __F0_INTX_STATUS;
  308. /*
  309. * If already in desired mode, do not change anything
  310. */
  311. if ((!msix && mode) || (msix && !mode))
  312. return;
  313. if (msix)
  314. mode = __F0_INTX_STATUS_MSIX;
  315. else
  316. mode = __F0_INTX_STATUS_INTA;
  317. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  318. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  319. bfa_trc(ioc, r32);
  320. writel(r32, rb + FNC_PERS_REG);
  321. }
  322. bfa_boolean_t
  323. bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
  324. {
  325. u32 r32;
  326. r32 = readl(ioc->ioc_regs.lpu_read_stat);
  327. if (r32) {
  328. writel(1, ioc->ioc_regs.lpu_read_stat);
  329. return BFA_TRUE;
  330. }
  331. return BFA_FALSE;
  332. }
  333. /*
  334. * Cleanup hw semaphore and usecnt registers
  335. */
  336. static void
  337. bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
  338. {
  339. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  340. writel(0, ioc->ioc_regs.ioc_usage_reg);
  341. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  342. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  343. writel(0, ioc->ioc_regs.ioc_fail_sync);
  344. /*
  345. * Read the hw sem reg to make sure that it is locked
  346. * before we clear it. If it is not locked, writing 1
  347. * will lock it instead of clearing it.
  348. */
  349. readl(ioc->ioc_regs.ioc_sem_reg);
  350. writel(1, ioc->ioc_regs.ioc_sem_reg);
  351. }
  352. static bfa_boolean_t
  353. bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
  354. {
  355. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  356. uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  357. /*
  358. * Driver load time. If the sync required bit for this PCI fn
  359. * is set, it is due to an unclean exit by the driver for this
  360. * PCI fn in the previous incarnation. Whoever comes here first
  361. * should clean it up, no matter which PCI fn.
  362. */
  363. if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
  364. writel(0, ioc->ioc_regs.ioc_fail_sync);
  365. writel(1, ioc->ioc_regs.ioc_usage_reg);
  366. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  367. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  368. return BFA_TRUE;
  369. }
  370. return bfa_ioc_ct_sync_complete(ioc);
  371. }
  372. /*
  373. * Synchronized IOC failure processing routines
  374. */
  375. static void
  376. bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
  377. {
  378. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  379. uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
  380. writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
  381. }
  382. static void
  383. bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
  384. {
  385. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  386. uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
  387. bfa_ioc_ct_sync_pos(ioc);
  388. writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
  389. }
  390. static void
  391. bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
  392. {
  393. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  394. writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
  395. ioc->ioc_regs.ioc_fail_sync);
  396. }
  397. static bfa_boolean_t
  398. bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
  399. {
  400. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  401. uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  402. uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
  403. uint32_t tmp_ackd;
  404. if (sync_ackd == 0)
  405. return BFA_TRUE;
  406. /*
  407. * The check below is to see whether any other PCI fn
  408. * has reinitialized the ASIC (reset sync_ackd bits)
  409. * and failed again while this IOC was waiting for hw
  410. * semaphore (in bfa_iocpf_sm_semwait()).
  411. */
  412. tmp_ackd = sync_ackd;
  413. if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
  414. !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
  415. sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
  416. if (sync_reqd == sync_ackd) {
  417. writel(bfa_ioc_ct_clear_sync_ackd(r32),
  418. ioc->ioc_regs.ioc_fail_sync);
  419. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  420. writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
  421. return BFA_TRUE;
  422. }
  423. /*
  424. * If another PCI fn reinitialized and failed again while
  425. * this IOC was waiting for hw sem, the sync_ackd bit for
  426. * this IOC need to be set again to allow reinitialization.
  427. */
  428. if (tmp_ackd != sync_ackd)
  429. writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
  430. return BFA_FALSE;
  431. }
  432. /**
  433. * Called from bfa_ioc_attach() to map asic specific calls.
  434. */
  435. static void
  436. bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
  437. {
  438. hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
  439. hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
  440. hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
  441. hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
  442. hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
  443. hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
  444. hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
  445. hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
  446. hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
  447. hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate;
  448. hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate;
  449. hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate;
  450. hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate;
  451. }
  452. /**
  453. * Called from bfa_ioc_attach() to map asic specific calls.
  454. */
  455. void
  456. bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
  457. {
  458. bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
  459. hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
  460. hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
  461. hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
  462. hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
  463. ioc->ioc_hwif = &hwif_ct;
  464. }
  465. /**
  466. * Called from bfa_ioc_attach() to map asic specific calls.
  467. */
  468. void
  469. bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
  470. {
  471. bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
  472. hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
  473. hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
  474. hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
  475. hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
  476. hwif_ct2.ioc_isr_mode_set = NULL;
  477. ioc->ioc_hwif = &hwif_ct2;
  478. }
  479. /*
  480. * Workaround for MSI-X resource allocation for catapult-2 with no asic block
  481. */
  482. #define HOSTFN_MSIX_DEFAULT 64
  483. #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
  484. #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
  485. #define __MSIX_VT_NUMVT__MK 0x003ff800
  486. #define __MSIX_VT_NUMVT__SH 11
  487. #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
  488. #define __MSIX_VT_OFST_ 0x000007ff
  489. void
  490. bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
  491. {
  492. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  493. u32 r32;
  494. r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  495. if (r32 & __MSIX_VT_NUMVT__MK) {
  496. writel(r32 & __MSIX_VT_OFST_,
  497. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  498. return;
  499. }
  500. writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
  501. HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  502. rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  503. writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  504. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  505. }
  506. bfa_status_t
  507. bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
  508. {
  509. u32 pll_sclk, pll_fclk, r32;
  510. bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
  511. pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
  512. __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
  513. __APP_PLL_SCLK_JITLMT0_1(3U) |
  514. __APP_PLL_SCLK_CNTLMT0_1(1U);
  515. pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
  516. __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
  517. __APP_PLL_LCLK_JITLMT0_1(3U) |
  518. __APP_PLL_LCLK_CNTLMT0_1(1U);
  519. if (fcmode) {
  520. writel(0, (rb + OP_MODE));
  521. writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
  522. __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
  523. } else {
  524. writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
  525. writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
  526. }
  527. writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
  528. writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
  529. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  530. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  531. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  532. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  533. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  534. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  535. writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
  536. rb + APP_PLL_SCLK_CTL_REG);
  537. writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
  538. rb + APP_PLL_LCLK_CTL_REG);
  539. writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
  540. __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
  541. writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
  542. __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
  543. readl(rb + HOSTFN0_INT_MSK);
  544. udelay(2000);
  545. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  546. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  547. writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
  548. writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
  549. if (!fcmode) {
  550. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
  551. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
  552. }
  553. r32 = readl((rb + PSS_CTL_REG));
  554. r32 &= ~__PSS_LMEM_RESET;
  555. writel(r32, (rb + PSS_CTL_REG));
  556. udelay(1000);
  557. if (!fcmode) {
  558. writel(0, (rb + PMM_1T_RESET_REG_P0));
  559. writel(0, (rb + PMM_1T_RESET_REG_P1));
  560. }
  561. writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
  562. udelay(1000);
  563. r32 = readl((rb + MBIST_STAT_REG));
  564. writel(0, (rb + MBIST_CTL_REG));
  565. return BFA_STATUS_OK;
  566. }
  567. static void
  568. bfa_ioc_ct2_sclk_init(void __iomem *rb)
  569. {
  570. u32 r32;
  571. /*
  572. * put s_clk PLL and PLL FSM in reset
  573. */
  574. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  575. r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
  576. r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
  577. __APP_PLL_SCLK_LOGIC_SOFT_RESET);
  578. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  579. /*
  580. * Ignore mode and program for the max clock (which is FC16)
  581. * Firmware/NFC will do the PLL init appropiately
  582. */
  583. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  584. r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
  585. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  586. /*
  587. * while doing PLL init dont clock gate ethernet subsystem
  588. */
  589. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  590. writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
  591. r32 = readl((rb + CT2_PCIE_MISC_REG));
  592. writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
  593. /*
  594. * set sclk value
  595. */
  596. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  597. r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
  598. __APP_PLL_SCLK_CLK_DIV2);
  599. writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  600. /*
  601. * poll for s_clk lock or delay 1ms
  602. */
  603. udelay(1000);
  604. }
  605. static void
  606. bfa_ioc_ct2_lclk_init(void __iomem *rb)
  607. {
  608. u32 r32;
  609. /*
  610. * put l_clk PLL and PLL FSM in reset
  611. */
  612. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  613. r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
  614. r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
  615. __APP_PLL_LCLK_LOGIC_SOFT_RESET);
  616. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  617. /*
  618. * set LPU speed (set for FC16 which will work for other modes)
  619. */
  620. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  621. writel(r32, (rb + CT2_CHIP_MISC_PRG));
  622. /*
  623. * set LPU half speed (set for FC16 which will work for other modes)
  624. */
  625. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  626. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  627. /*
  628. * set lclk for mode (set for FC16)
  629. */
  630. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  631. r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
  632. r32 |= 0x20c1731b;
  633. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  634. /*
  635. * poll for s_clk lock or delay 1ms
  636. */
  637. udelay(1000);
  638. }
  639. static void
  640. bfa_ioc_ct2_mem_init(void __iomem *rb)
  641. {
  642. u32 r32;
  643. r32 = readl((rb + PSS_CTL_REG));
  644. r32 &= ~__PSS_LMEM_RESET;
  645. writel(r32, (rb + PSS_CTL_REG));
  646. udelay(1000);
  647. writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
  648. udelay(1000);
  649. writel(0, (rb + CT2_MBIST_CTL_REG));
  650. }
  651. void
  652. bfa_ioc_ct2_mac_reset(void __iomem *rb)
  653. {
  654. /* put port0, port1 MAC & AHB in reset */
  655. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  656. rb + CT2_CSI_MAC_CONTROL_REG(0));
  657. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  658. rb + CT2_CSI_MAC_CONTROL_REG(1));
  659. }
  660. static void
  661. bfa_ioc_ct2_enable_flash(void __iomem *rb)
  662. {
  663. u32 r32;
  664. r32 = readl((rb + PSS_GPIO_OUT_REG));
  665. writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
  666. r32 = readl((rb + PSS_GPIO_OE_REG));
  667. writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
  668. }
  669. #define CT2_NFC_MAX_DELAY 1000
  670. #define CT2_NFC_PAUSE_MAX_DELAY 4000
  671. #define CT2_NFC_VER_VALID 0x147
  672. #define CT2_NFC_STATE_RUNNING 0x20000001
  673. #define BFA_IOC_PLL_POLL 1000000
  674. static bfa_boolean_t
  675. bfa_ioc_ct2_nfc_halted(void __iomem *rb)
  676. {
  677. u32 r32;
  678. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  679. if (r32 & __NFC_CONTROLLER_HALTED)
  680. return BFA_TRUE;
  681. return BFA_FALSE;
  682. }
  683. static void
  684. bfa_ioc_ct2_nfc_halt(void __iomem *rb)
  685. {
  686. int i;
  687. writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
  688. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  689. if (bfa_ioc_ct2_nfc_halted(rb))
  690. break;
  691. udelay(1000);
  692. }
  693. WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
  694. }
  695. static void
  696. bfa_ioc_ct2_nfc_resume(void __iomem *rb)
  697. {
  698. u32 r32;
  699. int i;
  700. writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
  701. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  702. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  703. if (!(r32 & __NFC_CONTROLLER_HALTED))
  704. return;
  705. udelay(1000);
  706. }
  707. WARN_ON(1);
  708. }
  709. static void
  710. bfa_ioc_ct2_clk_reset(void __iomem *rb)
  711. {
  712. u32 r32;
  713. bfa_ioc_ct2_sclk_init(rb);
  714. bfa_ioc_ct2_lclk_init(rb);
  715. /*
  716. * release soft reset on s_clk & l_clk
  717. */
  718. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  719. writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
  720. (rb + CT2_APP_PLL_SCLK_CTL_REG));
  721. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  722. writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
  723. (rb + CT2_APP_PLL_LCLK_CTL_REG));
  724. }
  725. static void
  726. bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
  727. {
  728. u32 r32, i;
  729. r32 = readl((rb + PSS_CTL_REG));
  730. r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  731. writel(r32, (rb + PSS_CTL_REG));
  732. writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
  733. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  734. r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
  735. if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
  736. break;
  737. }
  738. WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
  739. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  740. r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
  741. if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
  742. break;
  743. }
  744. WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
  745. r32 = readl(rb + CT2_CSI_FW_CTL_REG);
  746. WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
  747. }
  748. static void
  749. bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
  750. {
  751. u32 r32;
  752. int i;
  753. if (bfa_ioc_ct2_nfc_halted(rb))
  754. bfa_ioc_ct2_nfc_resume(rb);
  755. for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) {
  756. r32 = readl(rb + CT2_NFC_STS_REG);
  757. if (r32 == CT2_NFC_STATE_RUNNING)
  758. return;
  759. udelay(1000);
  760. }
  761. r32 = readl(rb + CT2_NFC_STS_REG);
  762. WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING));
  763. }
  764. bfa_status_t
  765. bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
  766. {
  767. u32 wgn, r32, nfc_ver;
  768. wgn = readl(rb + CT2_WGN_STATUS);
  769. if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
  770. /*
  771. * If flash is corrupted, enable flash explicitly
  772. */
  773. bfa_ioc_ct2_clk_reset(rb);
  774. bfa_ioc_ct2_enable_flash(rb);
  775. bfa_ioc_ct2_mac_reset(rb);
  776. bfa_ioc_ct2_clk_reset(rb);
  777. bfa_ioc_ct2_enable_flash(rb);
  778. } else {
  779. nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
  780. if ((nfc_ver >= CT2_NFC_VER_VALID) &&
  781. (wgn == (__A2T_AHB_LOAD | __WGN_READY))) {
  782. bfa_ioc_ct2_wait_till_nfc_running(rb);
  783. bfa_ioc_ct2_nfc_clk_reset(rb);
  784. } else {
  785. bfa_ioc_ct2_nfc_halt(rb);
  786. bfa_ioc_ct2_clk_reset(rb);
  787. bfa_ioc_ct2_mac_reset(rb);
  788. bfa_ioc_ct2_clk_reset(rb);
  789. }
  790. }
  791. /*
  792. * The very first PCIe DMA Read done by LPU fails with a fatal error,
  793. * when Address Translation Cache (ATC) has been enabled by system BIOS.
  794. *
  795. * Workaround:
  796. * Disable Invalidated Tag Match Enable capability by setting the bit 26
  797. * of CHIP_MISC_PRG to 0, by default it is set to 1.
  798. */
  799. r32 = readl(rb + CT2_CHIP_MISC_PRG);
  800. writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG));
  801. /*
  802. * Mask the interrupts and clear any
  803. * pending interrupts left by BIOS/EFI
  804. */
  805. writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
  806. writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
  807. /* For first time initialization, no need to clear interrupts */
  808. r32 = readl(rb + HOST_SEM5_REG);
  809. if (r32 & 0x1) {
  810. r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
  811. if (r32 == 1) {
  812. writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
  813. readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
  814. }
  815. r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
  816. if (r32 == 1) {
  817. writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
  818. readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
  819. }
  820. }
  821. bfa_ioc_ct2_mem_init(rb);
  822. writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
  823. writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
  824. return BFA_STATUS_OK;
  825. }
  826. static void
  827. bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc,
  828. enum bfi_ioc_state fwstate)
  829. {
  830. writel(fwstate, ioc->ioc_regs.ioc_fwstate);
  831. }
  832. static enum bfi_ioc_state
  833. bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc)
  834. {
  835. return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
  836. }
  837. static void
  838. bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc,
  839. enum bfi_ioc_state fwstate)
  840. {
  841. writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
  842. }
  843. static enum bfi_ioc_state
  844. bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc)
  845. {
  846. return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate);
  847. }