bfa_msgq.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. /*
  2. * Linux network driver for QLogic BR-series Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  15. * Copyright (c) 2014-2015 QLogic Corporation
  16. * All rights reserved
  17. * www.qlogic.com
  18. */
  19. /* MSGQ module source file. */
  20. #include "bfi.h"
  21. #include "bfa_msgq.h"
  22. #include "bfa_ioc.h"
  23. #define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
  24. { \
  25. bfa_msgq_cmdcbfn_t cbfn; \
  26. void *cbarg; \
  27. cbfn = (_cmdq_ent)->cbfn; \
  28. cbarg = (_cmdq_ent)->cbarg; \
  29. (_cmdq_ent)->cbfn = NULL; \
  30. (_cmdq_ent)->cbarg = NULL; \
  31. if (cbfn) { \
  32. cbfn(cbarg, (_status)); \
  33. } \
  34. }
  35. static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
  36. static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
  37. enum cmdq_event {
  38. CMDQ_E_START = 1,
  39. CMDQ_E_STOP = 2,
  40. CMDQ_E_FAIL = 3,
  41. CMDQ_E_POST = 4,
  42. CMDQ_E_INIT_RESP = 5,
  43. CMDQ_E_DB_READY = 6,
  44. };
  45. bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
  46. bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
  47. bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
  48. bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
  49. enum cmdq_event);
  50. static void
  51. cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
  52. {
  53. struct bfa_msgq_cmd_entry *cmdq_ent;
  54. cmdq->producer_index = 0;
  55. cmdq->consumer_index = 0;
  56. cmdq->flags = 0;
  57. cmdq->token = 0;
  58. cmdq->offset = 0;
  59. cmdq->bytes_to_copy = 0;
  60. while (!list_empty(&cmdq->pending_q)) {
  61. cmdq_ent = list_first_entry(&cmdq->pending_q,
  62. struct bfa_msgq_cmd_entry, qe);
  63. list_del(&cmdq_ent->qe);
  64. call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
  65. }
  66. }
  67. static void
  68. cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
  69. {
  70. switch (event) {
  71. case CMDQ_E_START:
  72. bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
  73. break;
  74. case CMDQ_E_STOP:
  75. case CMDQ_E_FAIL:
  76. /* No-op */
  77. break;
  78. case CMDQ_E_POST:
  79. cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
  80. break;
  81. default:
  82. bfa_sm_fault(event);
  83. }
  84. }
  85. static void
  86. cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
  87. {
  88. bfa_wc_down(&cmdq->msgq->init_wc);
  89. }
  90. static void
  91. cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
  92. {
  93. switch (event) {
  94. case CMDQ_E_STOP:
  95. case CMDQ_E_FAIL:
  96. bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
  97. break;
  98. case CMDQ_E_POST:
  99. cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
  100. break;
  101. case CMDQ_E_INIT_RESP:
  102. if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
  103. cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
  104. bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
  105. } else
  106. bfa_fsm_set_state(cmdq, cmdq_sm_ready);
  107. break;
  108. default:
  109. bfa_sm_fault(event);
  110. }
  111. }
  112. static void
  113. cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
  114. {
  115. }
  116. static void
  117. cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
  118. {
  119. switch (event) {
  120. case CMDQ_E_STOP:
  121. case CMDQ_E_FAIL:
  122. bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
  123. break;
  124. case CMDQ_E_POST:
  125. bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
  126. break;
  127. default:
  128. bfa_sm_fault(event);
  129. }
  130. }
  131. static void
  132. cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
  133. {
  134. bfa_msgq_cmdq_dbell(cmdq);
  135. }
  136. static void
  137. cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
  138. {
  139. switch (event) {
  140. case CMDQ_E_STOP:
  141. case CMDQ_E_FAIL:
  142. bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
  143. break;
  144. case CMDQ_E_POST:
  145. cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
  146. break;
  147. case CMDQ_E_DB_READY:
  148. if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
  149. cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
  150. bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
  151. } else
  152. bfa_fsm_set_state(cmdq, cmdq_sm_ready);
  153. break;
  154. default:
  155. bfa_sm_fault(event);
  156. }
  157. }
  158. static void
  159. bfa_msgq_cmdq_dbell_ready(void *arg)
  160. {
  161. struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
  162. bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
  163. }
  164. static void
  165. bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
  166. {
  167. struct bfi_msgq_h2i_db *dbell =
  168. (struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
  169. memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
  170. bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
  171. dbell->mh.mtag.i2htok = 0;
  172. dbell->idx.cmdq_pi = htons(cmdq->producer_index);
  173. if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
  174. bfa_msgq_cmdq_dbell_ready, cmdq)) {
  175. bfa_msgq_cmdq_dbell_ready(cmdq);
  176. }
  177. }
  178. static void
  179. __cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
  180. {
  181. size_t len = cmd->msg_size;
  182. int num_entries = 0;
  183. size_t to_copy;
  184. u8 *src, *dst;
  185. src = (u8 *)cmd->msg_hdr;
  186. dst = (u8 *)cmdq->addr.kva;
  187. dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
  188. while (len) {
  189. to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
  190. len : BFI_MSGQ_CMD_ENTRY_SIZE;
  191. memcpy(dst, src, to_copy);
  192. len -= to_copy;
  193. src += BFI_MSGQ_CMD_ENTRY_SIZE;
  194. BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
  195. dst = (u8 *)cmdq->addr.kva;
  196. dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
  197. num_entries++;
  198. }
  199. }
  200. static void
  201. bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
  202. {
  203. struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
  204. struct bfa_msgq_cmd_entry *cmd;
  205. int posted = 0;
  206. cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
  207. /* Walk through pending list to see if the command can be posted */
  208. while (!list_empty(&cmdq->pending_q)) {
  209. cmd = list_first_entry(&cmdq->pending_q,
  210. struct bfa_msgq_cmd_entry, qe);
  211. if (ntohs(cmd->msg_hdr->num_entries) <=
  212. BFA_MSGQ_FREE_CNT(cmdq)) {
  213. list_del(&cmd->qe);
  214. __cmd_copy(cmdq, cmd);
  215. posted = 1;
  216. call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
  217. } else {
  218. break;
  219. }
  220. }
  221. if (posted)
  222. bfa_fsm_send_event(cmdq, CMDQ_E_POST);
  223. }
  224. static void
  225. bfa_msgq_cmdq_copy_next(void *arg)
  226. {
  227. struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
  228. if (cmdq->bytes_to_copy)
  229. bfa_msgq_cmdq_copy_rsp(cmdq);
  230. }
  231. static void
  232. bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
  233. {
  234. struct bfi_msgq_i2h_cmdq_copy_req *req =
  235. (struct bfi_msgq_i2h_cmdq_copy_req *)mb;
  236. cmdq->token = 0;
  237. cmdq->offset = ntohs(req->offset);
  238. cmdq->bytes_to_copy = ntohs(req->len);
  239. bfa_msgq_cmdq_copy_rsp(cmdq);
  240. }
  241. static void
  242. bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
  243. {
  244. struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
  245. (struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
  246. int copied;
  247. u8 *addr = (u8 *)cmdq->addr.kva;
  248. memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
  249. bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
  250. rsp->mh.mtag.i2htok = htons(cmdq->token);
  251. copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
  252. cmdq->bytes_to_copy;
  253. addr += cmdq->offset;
  254. memcpy(rsp->data, addr, copied);
  255. cmdq->token++;
  256. cmdq->offset += copied;
  257. cmdq->bytes_to_copy -= copied;
  258. if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
  259. bfa_msgq_cmdq_copy_next, cmdq)) {
  260. bfa_msgq_cmdq_copy_next(cmdq);
  261. }
  262. }
  263. static void
  264. bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
  265. {
  266. cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
  267. INIT_LIST_HEAD(&cmdq->pending_q);
  268. cmdq->msgq = msgq;
  269. bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
  270. }
  271. static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
  272. enum rspq_event {
  273. RSPQ_E_START = 1,
  274. RSPQ_E_STOP = 2,
  275. RSPQ_E_FAIL = 3,
  276. RSPQ_E_RESP = 4,
  277. RSPQ_E_INIT_RESP = 5,
  278. RSPQ_E_DB_READY = 6,
  279. };
  280. bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
  281. bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
  282. enum rspq_event);
  283. bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
  284. bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
  285. enum rspq_event);
  286. static void
  287. rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
  288. {
  289. rspq->producer_index = 0;
  290. rspq->consumer_index = 0;
  291. rspq->flags = 0;
  292. }
  293. static void
  294. rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
  295. {
  296. switch (event) {
  297. case RSPQ_E_START:
  298. bfa_fsm_set_state(rspq, rspq_sm_init_wait);
  299. break;
  300. case RSPQ_E_STOP:
  301. case RSPQ_E_FAIL:
  302. /* No-op */
  303. break;
  304. default:
  305. bfa_sm_fault(event);
  306. }
  307. }
  308. static void
  309. rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
  310. {
  311. bfa_wc_down(&rspq->msgq->init_wc);
  312. }
  313. static void
  314. rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
  315. {
  316. switch (event) {
  317. case RSPQ_E_FAIL:
  318. case RSPQ_E_STOP:
  319. bfa_fsm_set_state(rspq, rspq_sm_stopped);
  320. break;
  321. case RSPQ_E_INIT_RESP:
  322. bfa_fsm_set_state(rspq, rspq_sm_ready);
  323. break;
  324. default:
  325. bfa_sm_fault(event);
  326. }
  327. }
  328. static void
  329. rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
  330. {
  331. }
  332. static void
  333. rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
  334. {
  335. switch (event) {
  336. case RSPQ_E_STOP:
  337. case RSPQ_E_FAIL:
  338. bfa_fsm_set_state(rspq, rspq_sm_stopped);
  339. break;
  340. case RSPQ_E_RESP:
  341. bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
  342. break;
  343. default:
  344. bfa_sm_fault(event);
  345. }
  346. }
  347. static void
  348. rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
  349. {
  350. if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
  351. bfa_msgq_rspq_dbell(rspq);
  352. }
  353. static void
  354. rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
  355. {
  356. switch (event) {
  357. case RSPQ_E_STOP:
  358. case RSPQ_E_FAIL:
  359. bfa_fsm_set_state(rspq, rspq_sm_stopped);
  360. break;
  361. case RSPQ_E_RESP:
  362. rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
  363. break;
  364. case RSPQ_E_DB_READY:
  365. if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
  366. rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
  367. bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
  368. } else
  369. bfa_fsm_set_state(rspq, rspq_sm_ready);
  370. break;
  371. default:
  372. bfa_sm_fault(event);
  373. }
  374. }
  375. static void
  376. bfa_msgq_rspq_dbell_ready(void *arg)
  377. {
  378. struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
  379. bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
  380. }
  381. static void
  382. bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
  383. {
  384. struct bfi_msgq_h2i_db *dbell =
  385. (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
  386. memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
  387. bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
  388. dbell->mh.mtag.i2htok = 0;
  389. dbell->idx.rspq_ci = htons(rspq->consumer_index);
  390. if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
  391. bfa_msgq_rspq_dbell_ready, rspq)) {
  392. bfa_msgq_rspq_dbell_ready(rspq);
  393. }
  394. }
  395. static void
  396. bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
  397. {
  398. struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
  399. struct bfi_msgq_mhdr *msghdr;
  400. int num_entries;
  401. int mc;
  402. u8 *rspq_qe;
  403. rspq->producer_index = ntohs(dbell->idx.rspq_pi);
  404. while (rspq->consumer_index != rspq->producer_index) {
  405. rspq_qe = (u8 *)rspq->addr.kva;
  406. rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
  407. msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
  408. mc = msghdr->msg_class;
  409. num_entries = ntohs(msghdr->num_entries);
  410. if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
  411. break;
  412. (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
  413. BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
  414. rspq->depth);
  415. }
  416. bfa_fsm_send_event(rspq, RSPQ_E_RESP);
  417. }
  418. static void
  419. bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
  420. {
  421. rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
  422. rspq->msgq = msgq;
  423. bfa_fsm_set_state(rspq, rspq_sm_stopped);
  424. }
  425. static void
  426. bfa_msgq_init_rsp(struct bfa_msgq *msgq,
  427. struct bfi_mbmsg *mb)
  428. {
  429. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
  430. bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
  431. }
  432. static void
  433. bfa_msgq_init(void *arg)
  434. {
  435. struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
  436. struct bfi_msgq_cfg_req *msgq_cfg =
  437. (struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
  438. memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
  439. bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
  440. msgq_cfg->mh.mtag.i2htok = 0;
  441. bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
  442. msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
  443. bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
  444. msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
  445. bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
  446. }
  447. static void
  448. bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
  449. {
  450. struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
  451. switch (msg->mh.msg_id) {
  452. case BFI_MSGQ_I2H_INIT_RSP:
  453. bfa_msgq_init_rsp(msgq, msg);
  454. break;
  455. case BFI_MSGQ_I2H_DOORBELL_PI:
  456. bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
  457. break;
  458. case BFI_MSGQ_I2H_DOORBELL_CI:
  459. bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
  460. break;
  461. case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
  462. bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
  463. break;
  464. default:
  465. BUG_ON(1);
  466. }
  467. }
  468. static void
  469. bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
  470. {
  471. struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
  472. switch (event) {
  473. case BFA_IOC_E_ENABLED:
  474. bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
  475. bfa_wc_up(&msgq->init_wc);
  476. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
  477. bfa_wc_up(&msgq->init_wc);
  478. bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
  479. bfa_wc_wait(&msgq->init_wc);
  480. break;
  481. case BFA_IOC_E_DISABLED:
  482. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
  483. bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
  484. break;
  485. case BFA_IOC_E_FAILED:
  486. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
  487. bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
  488. break;
  489. default:
  490. break;
  491. }
  492. }
  493. u32
  494. bfa_msgq_meminfo(void)
  495. {
  496. return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
  497. roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
  498. }
  499. void
  500. bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
  501. {
  502. msgq->cmdq.addr.kva = kva;
  503. msgq->cmdq.addr.pa = pa;
  504. kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
  505. pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
  506. msgq->rspq.addr.kva = kva;
  507. msgq->rspq.addr.pa = pa;
  508. }
  509. void
  510. bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
  511. {
  512. msgq->ioc = ioc;
  513. bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
  514. bfa_msgq_rspq_attach(&msgq->rspq, msgq);
  515. bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
  516. bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
  517. bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
  518. }
  519. void
  520. bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
  521. bfa_msgq_mcfunc_t cbfn, void *cbarg)
  522. {
  523. msgq->rspq.rsphdlr[mc].cbfn = cbfn;
  524. msgq->rspq.rsphdlr[mc].cbarg = cbarg;
  525. }
  526. void
  527. bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd)
  528. {
  529. if (ntohs(cmd->msg_hdr->num_entries) <=
  530. BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
  531. __cmd_copy(&msgq->cmdq, cmd);
  532. call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
  533. bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
  534. } else {
  535. list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
  536. }
  537. }
  538. void
  539. bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
  540. {
  541. struct bfa_msgq_rspq *rspq = &msgq->rspq;
  542. size_t len = buf_len;
  543. size_t to_copy;
  544. int ci;
  545. u8 *src, *dst;
  546. ci = rspq->consumer_index;
  547. src = (u8 *)rspq->addr.kva;
  548. src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
  549. dst = buf;
  550. while (len) {
  551. to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
  552. len : BFI_MSGQ_RSP_ENTRY_SIZE;
  553. memcpy(dst, src, to_copy);
  554. len -= to_copy;
  555. dst += BFI_MSGQ_RSP_ENTRY_SIZE;
  556. BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
  557. src = (u8 *)rspq->addr.kva;
  558. src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
  559. }
  560. }