hinic_hw_io.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. /*
  2. * Huawei HiNIC PCI Express Linux driver
  3. * Copyright(c) 2017 Huawei Technologies Co., Ltd
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  12. * for more details.
  13. *
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/types.h>
  17. #include <linux/pci.h>
  18. #include <linux/device.h>
  19. #include <linux/errno.h>
  20. #include <linux/slab.h>
  21. #include <linux/semaphore.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/io.h>
  24. #include <linux/err.h>
  25. #include "hinic_hw_if.h"
  26. #include "hinic_hw_eqs.h"
  27. #include "hinic_hw_wqe.h"
  28. #include "hinic_hw_wq.h"
  29. #include "hinic_hw_cmdq.h"
  30. #include "hinic_hw_qp_ctxt.h"
  31. #include "hinic_hw_qp.h"
  32. #include "hinic_hw_io.h"
  33. #define CI_Q_ADDR_SIZE sizeof(u32)
  34. #define CI_ADDR(base_addr, q_id) ((base_addr) + \
  35. (q_id) * CI_Q_ADDR_SIZE)
  36. #define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE)
  37. #define DB_IDX(db, db_base) \
  38. (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
  39. enum io_cmd {
  40. IO_CMD_MODIFY_QUEUE_CTXT = 0,
  41. };
  42. static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
  43. {
  44. int i;
  45. for (i = 0; i < HINIC_DB_MAX_AREAS; i++)
  46. free_db_area->db_idx[i] = i;
  47. free_db_area->alloc_pos = 0;
  48. free_db_area->return_pos = HINIC_DB_MAX_AREAS;
  49. free_db_area->num_free = HINIC_DB_MAX_AREAS;
  50. sema_init(&free_db_area->idx_lock, 1);
  51. }
  52. static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io)
  53. {
  54. struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
  55. int pos, idx;
  56. down(&free_db_area->idx_lock);
  57. free_db_area->num_free--;
  58. if (free_db_area->num_free < 0) {
  59. free_db_area->num_free++;
  60. up(&free_db_area->idx_lock);
  61. return ERR_PTR(-ENOMEM);
  62. }
  63. pos = free_db_area->alloc_pos++;
  64. pos &= HINIC_DB_MAX_AREAS - 1;
  65. idx = free_db_area->db_idx[pos];
  66. free_db_area->db_idx[pos] = -1;
  67. up(&free_db_area->idx_lock);
  68. return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE;
  69. }
  70. static void return_db_area(struct hinic_func_to_io *func_to_io,
  71. void __iomem *db_base)
  72. {
  73. struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
  74. int pos, idx = DB_IDX(db_base, func_to_io->db_base);
  75. down(&free_db_area->idx_lock);
  76. pos = free_db_area->return_pos++;
  77. pos &= HINIC_DB_MAX_AREAS - 1;
  78. free_db_area->db_idx[pos] = idx;
  79. free_db_area->num_free++;
  80. up(&free_db_area->idx_lock);
  81. }
  82. static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
  83. u16 num_sqs)
  84. {
  85. struct hinic_hwif *hwif = func_to_io->hwif;
  86. struct hinic_sq_ctxt_block *sq_ctxt_block;
  87. struct pci_dev *pdev = hwif->pdev;
  88. struct hinic_cmdq_buf cmdq_buf;
  89. struct hinic_sq_ctxt *sq_ctxt;
  90. struct hinic_qp *qp;
  91. u64 out_param;
  92. int err, i;
  93. err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
  94. if (err) {
  95. dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
  96. return err;
  97. }
  98. sq_ctxt_block = cmdq_buf.buf;
  99. sq_ctxt = sq_ctxt_block->sq_ctxt;
  100. hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ,
  101. num_sqs, func_to_io->max_qps);
  102. for (i = 0; i < num_sqs; i++) {
  103. qp = &func_to_io->qps[i];
  104. hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq,
  105. base_qpn + qp->q_id);
  106. }
  107. cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs);
  108. err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
  109. IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
  110. &out_param);
  111. if ((err) || (out_param != 0)) {
  112. dev_err(&pdev->dev, "Failed to set SQ ctxts\n");
  113. err = -EFAULT;
  114. }
  115. hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
  116. return err;
  117. }
  118. static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
  119. u16 num_rqs)
  120. {
  121. struct hinic_hwif *hwif = func_to_io->hwif;
  122. struct hinic_rq_ctxt_block *rq_ctxt_block;
  123. struct pci_dev *pdev = hwif->pdev;
  124. struct hinic_cmdq_buf cmdq_buf;
  125. struct hinic_rq_ctxt *rq_ctxt;
  126. struct hinic_qp *qp;
  127. u64 out_param;
  128. int err, i;
  129. err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
  130. if (err) {
  131. dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
  132. return err;
  133. }
  134. rq_ctxt_block = cmdq_buf.buf;
  135. rq_ctxt = rq_ctxt_block->rq_ctxt;
  136. hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ,
  137. num_rqs, func_to_io->max_qps);
  138. for (i = 0; i < num_rqs; i++) {
  139. qp = &func_to_io->qps[i];
  140. hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq,
  141. base_qpn + qp->q_id);
  142. }
  143. cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs);
  144. err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
  145. IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
  146. &out_param);
  147. if ((err) || (out_param != 0)) {
  148. dev_err(&pdev->dev, "Failed to set RQ ctxts\n");
  149. err = -EFAULT;
  150. }
  151. hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
  152. return err;
  153. }
  154. /**
  155. * write_qp_ctxts - write the qp ctxt to HW
  156. * @func_to_io: func to io channel that holds the IO components
  157. * @base_qpn: first qp number
  158. * @num_qps: number of qps to write
  159. *
  160. * Return 0 - Success, negative - Failure
  161. **/
  162. static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
  163. u16 num_qps)
  164. {
  165. return (write_sq_ctxts(func_to_io, base_qpn, num_qps) ||
  166. write_rq_ctxts(func_to_io, base_qpn, num_qps));
  167. }
  168. /**
  169. * init_qp - Initialize a Queue Pair
  170. * @func_to_io: func to io channel that holds the IO components
  171. * @qp: pointer to the qp to initialize
  172. * @q_id: the id of the qp
  173. * @sq_msix_entry: msix entry for sq
  174. * @rq_msix_entry: msix entry for rq
  175. *
  176. * Return 0 - Success, negative - Failure
  177. **/
  178. static int init_qp(struct hinic_func_to_io *func_to_io,
  179. struct hinic_qp *qp, int q_id,
  180. struct msix_entry *sq_msix_entry,
  181. struct msix_entry *rq_msix_entry)
  182. {
  183. struct hinic_hwif *hwif = func_to_io->hwif;
  184. struct pci_dev *pdev = hwif->pdev;
  185. void __iomem *db_base;
  186. int err;
  187. qp->q_id = q_id;
  188. err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
  189. HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
  190. HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE);
  191. if (err) {
  192. dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
  193. return err;
  194. }
  195. err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
  196. HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
  197. HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE);
  198. if (err) {
  199. dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
  200. goto err_rq_alloc;
  201. }
  202. db_base = get_db_area(func_to_io);
  203. if (IS_ERR(db_base)) {
  204. dev_err(&pdev->dev, "Failed to get DB area for SQ\n");
  205. err = PTR_ERR(db_base);
  206. goto err_get_db;
  207. }
  208. func_to_io->sq_db[q_id] = db_base;
  209. err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id],
  210. sq_msix_entry,
  211. CI_ADDR(func_to_io->ci_addr_base, q_id),
  212. CI_ADDR(func_to_io->ci_dma_base, q_id), db_base);
  213. if (err) {
  214. dev_err(&pdev->dev, "Failed to init SQ\n");
  215. goto err_sq_init;
  216. }
  217. err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
  218. rq_msix_entry);
  219. if (err) {
  220. dev_err(&pdev->dev, "Failed to init RQ\n");
  221. goto err_rq_init;
  222. }
  223. return 0;
  224. err_rq_init:
  225. hinic_clean_sq(&qp->sq);
  226. err_sq_init:
  227. return_db_area(func_to_io, db_base);
  228. err_get_db:
  229. hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
  230. err_rq_alloc:
  231. hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
  232. return err;
  233. }
  234. /**
  235. * destroy_qp - Clean the resources of a Queue Pair
  236. * @func_to_io: func to io channel that holds the IO components
  237. * @qp: pointer to the qp to clean
  238. **/
  239. static void destroy_qp(struct hinic_func_to_io *func_to_io,
  240. struct hinic_qp *qp)
  241. {
  242. int q_id = qp->q_id;
  243. hinic_clean_rq(&qp->rq);
  244. hinic_clean_sq(&qp->sq);
  245. return_db_area(func_to_io, func_to_io->sq_db[q_id]);
  246. hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
  247. hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
  248. }
  249. /**
  250. * hinic_io_create_qps - Create Queue Pairs
  251. * @func_to_io: func to io channel that holds the IO components
  252. * @base_qpn: base qp number
  253. * @num_qps: number queue pairs to create
  254. * @sq_msix_entry: msix entries for sq
  255. * @rq_msix_entry: msix entries for rq
  256. *
  257. * Return 0 - Success, negative - Failure
  258. **/
  259. int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
  260. u16 base_qpn, int num_qps,
  261. struct msix_entry *sq_msix_entries,
  262. struct msix_entry *rq_msix_entries)
  263. {
  264. struct hinic_hwif *hwif = func_to_io->hwif;
  265. struct pci_dev *pdev = hwif->pdev;
  266. size_t qps_size, wq_size, db_size;
  267. void *ci_addr_base;
  268. int i, j, err;
  269. qps_size = num_qps * sizeof(*func_to_io->qps);
  270. func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL);
  271. if (!func_to_io->qps)
  272. return -ENOMEM;
  273. wq_size = num_qps * sizeof(*func_to_io->sq_wq);
  274. func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
  275. if (!func_to_io->sq_wq) {
  276. err = -ENOMEM;
  277. goto err_sq_wq;
  278. }
  279. wq_size = num_qps * sizeof(*func_to_io->rq_wq);
  280. func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
  281. if (!func_to_io->rq_wq) {
  282. err = -ENOMEM;
  283. goto err_rq_wq;
  284. }
  285. db_size = num_qps * sizeof(*func_to_io->sq_db);
  286. func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL);
  287. if (!func_to_io->sq_db) {
  288. err = -ENOMEM;
  289. goto err_sq_db;
  290. }
  291. ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
  292. &func_to_io->ci_dma_base,
  293. GFP_KERNEL);
  294. if (!ci_addr_base) {
  295. dev_err(&pdev->dev, "Failed to allocate CI area\n");
  296. err = -ENOMEM;
  297. goto err_ci_base;
  298. }
  299. func_to_io->ci_addr_base = ci_addr_base;
  300. for (i = 0; i < num_qps; i++) {
  301. err = init_qp(func_to_io, &func_to_io->qps[i], i,
  302. &sq_msix_entries[i], &rq_msix_entries[i]);
  303. if (err) {
  304. dev_err(&pdev->dev, "Failed to create QP %d\n", i);
  305. goto err_init_qp;
  306. }
  307. }
  308. err = write_qp_ctxts(func_to_io, base_qpn, num_qps);
  309. if (err) {
  310. dev_err(&pdev->dev, "Failed to init QP ctxts\n");
  311. goto err_write_qp_ctxts;
  312. }
  313. return 0;
  314. err_write_qp_ctxts:
  315. err_init_qp:
  316. for (j = 0; j < i; j++)
  317. destroy_qp(func_to_io, &func_to_io->qps[j]);
  318. dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
  319. func_to_io->ci_addr_base, func_to_io->ci_dma_base);
  320. err_ci_base:
  321. devm_kfree(&pdev->dev, func_to_io->sq_db);
  322. err_sq_db:
  323. devm_kfree(&pdev->dev, func_to_io->rq_wq);
  324. err_rq_wq:
  325. devm_kfree(&pdev->dev, func_to_io->sq_wq);
  326. err_sq_wq:
  327. devm_kfree(&pdev->dev, func_to_io->qps);
  328. return err;
  329. }
  330. /**
  331. * hinic_io_destroy_qps - Destroy the IO Queue Pairs
  332. * @func_to_io: func to io channel that holds the IO components
  333. * @num_qps: number queue pairs to destroy
  334. **/
  335. void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
  336. {
  337. struct hinic_hwif *hwif = func_to_io->hwif;
  338. struct pci_dev *pdev = hwif->pdev;
  339. size_t ci_table_size;
  340. int i;
  341. ci_table_size = CI_TABLE_SIZE(num_qps);
  342. for (i = 0; i < num_qps; i++)
  343. destroy_qp(func_to_io, &func_to_io->qps[i]);
  344. dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base,
  345. func_to_io->ci_dma_base);
  346. devm_kfree(&pdev->dev, func_to_io->sq_db);
  347. devm_kfree(&pdev->dev, func_to_io->rq_wq);
  348. devm_kfree(&pdev->dev, func_to_io->sq_wq);
  349. devm_kfree(&pdev->dev, func_to_io->qps);
  350. }
  351. /**
  352. * hinic_io_init - Initialize the IO components
  353. * @func_to_io: func to io channel that holds the IO components
  354. * @hwif: HW interface for accessing IO
  355. * @max_qps: maximum QPs in HW
  356. * @num_ceqs: number completion event queues
  357. * @ceq_msix_entries: msix entries for ceqs
  358. *
  359. * Return 0 - Success, negative - Failure
  360. **/
  361. int hinic_io_init(struct hinic_func_to_io *func_to_io,
  362. struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
  363. struct msix_entry *ceq_msix_entries)
  364. {
  365. struct pci_dev *pdev = hwif->pdev;
  366. enum hinic_cmdq_type cmdq, type;
  367. void __iomem *db_area;
  368. int err;
  369. func_to_io->hwif = hwif;
  370. func_to_io->qps = NULL;
  371. func_to_io->max_qps = max_qps;
  372. err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs,
  373. HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
  374. ceq_msix_entries);
  375. if (err) {
  376. dev_err(&pdev->dev, "Failed to init CEQs\n");
  377. return err;
  378. }
  379. err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif);
  380. if (err) {
  381. dev_err(&pdev->dev, "Failed to allocate WQS for IO\n");
  382. goto err_wqs_alloc;
  383. }
  384. func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR);
  385. if (!func_to_io->db_base) {
  386. dev_err(&pdev->dev, "Failed to remap IO DB area\n");
  387. err = -ENOMEM;
  388. goto err_db_ioremap;
  389. }
  390. init_db_area_idx(&func_to_io->free_db_area);
  391. for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) {
  392. db_area = get_db_area(func_to_io);
  393. if (IS_ERR(db_area)) {
  394. dev_err(&pdev->dev, "Failed to get cmdq db area\n");
  395. err = PTR_ERR(db_area);
  396. goto err_db_area;
  397. }
  398. func_to_io->cmdq_db_area[cmdq] = db_area;
  399. }
  400. err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif,
  401. func_to_io->cmdq_db_area);
  402. if (err) {
  403. dev_err(&pdev->dev, "Failed to initialize cmdqs\n");
  404. goto err_init_cmdqs;
  405. }
  406. return 0;
  407. err_init_cmdqs:
  408. err_db_area:
  409. for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
  410. return_db_area(func_to_io, func_to_io->cmdq_db_area[type]);
  411. iounmap(func_to_io->db_base);
  412. err_db_ioremap:
  413. hinic_wqs_free(&func_to_io->wqs);
  414. err_wqs_alloc:
  415. hinic_ceqs_free(&func_to_io->ceqs);
  416. return err;
  417. }
  418. /**
  419. * hinic_io_free - Free the IO components
  420. * @func_to_io: func to io channel that holds the IO components
  421. **/
  422. void hinic_io_free(struct hinic_func_to_io *func_to_io)
  423. {
  424. enum hinic_cmdq_type cmdq;
  425. hinic_free_cmdqs(&func_to_io->cmdqs);
  426. for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
  427. return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
  428. iounmap(func_to_io->db_base);
  429. hinic_wqs_free(&func_to_io->wqs);
  430. hinic_ceqs_free(&func_to_io->ceqs);
  431. }