lpfc_mem.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
  6. * Copyright (C) 2004-2014 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  10. * *
  11. * This program is free software; you can redistribute it and/or *
  12. * modify it under the terms of version 2 of the GNU General *
  13. * Public License as published by the Free Software Foundation. *
  14. * This program is distributed in the hope that it will be useful. *
  15. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  16. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  17. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  18. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  20. * more details, a copy of which can be found in the file COPYING *
  21. * included with this package. *
  22. *******************************************************************/
  23. #include <linux/mempool.h>
  24. #include <linux/slab.h>
  25. #include <linux/pci.h>
  26. #include <linux/interrupt.h>
  27. #include <scsi/scsi.h>
  28. #include <scsi/scsi_device.h>
  29. #include <scsi/scsi_transport_fc.h>
  30. #include <scsi/fc/fc_fs.h>
  31. #include <linux/nvme-fc-driver.h>
  32. #include "lpfc_hw4.h"
  33. #include "lpfc_hw.h"
  34. #include "lpfc_sli.h"
  35. #include "lpfc_sli4.h"
  36. #include "lpfc_nl.h"
  37. #include "lpfc_disc.h"
  38. #include "lpfc.h"
  39. #include "lpfc_scsi.h"
  40. #include "lpfc_nvme.h"
  41. #include "lpfc_nvmet.h"
  42. #include "lpfc_crtn.h"
  43. #include "lpfc_logmsg.h"
  44. #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
  45. #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
  46. #define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */
  47. int
  48. lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
  49. size_t bytes;
  50. int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
  51. if (max_xri <= 0)
  52. return -ENOMEM;
  53. bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
  54. sizeof(unsigned long);
  55. phba->cfg_rrq_xri_bitmap_sz = bytes;
  56. phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
  57. bytes);
  58. if (!phba->active_rrq_pool)
  59. return -ENOMEM;
  60. else
  61. return 0;
  62. }
  63. /**
  64. * lpfc_mem_alloc - create and allocate all PCI and memory pools
  65. * @phba: HBA to allocate pools for
  66. *
  67. * Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool,
  68. * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
  69. * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
  70. *
  71. * Notes: Not interrupt-safe. Must be called with no locks held. If any
  72. * allocation fails, frees all successfully allocated memory before returning.
  73. *
  74. * Returns:
  75. * 0 on success
  76. * -ENOMEM on failure (if any memory allocations fail)
  77. **/
  78. int
  79. lpfc_mem_alloc(struct lpfc_hba *phba, int align)
  80. {
  81. struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
  82. int i;
  83. if (phba->sli_rev == LPFC_SLI_REV4) {
  84. /* Calculate alignment */
  85. if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
  86. i = phba->cfg_sg_dma_buf_size;
  87. else
  88. i = SLI4_PAGE_SIZE;
  89. phba->lpfc_sg_dma_buf_pool =
  90. dma_pool_create("lpfc_sg_dma_buf_pool",
  91. &phba->pcidev->dev,
  92. phba->cfg_sg_dma_buf_size,
  93. i, 0);
  94. if (!phba->lpfc_sg_dma_buf_pool)
  95. goto fail;
  96. } else {
  97. phba->lpfc_sg_dma_buf_pool =
  98. dma_pool_create("lpfc_sg_dma_buf_pool",
  99. &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
  100. align, 0);
  101. if (!phba->lpfc_sg_dma_buf_pool)
  102. goto fail;
  103. }
  104. phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
  105. LPFC_BPL_SIZE,
  106. align, 0);
  107. if (!phba->lpfc_mbuf_pool)
  108. goto fail_free_dma_buf_pool;
  109. pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
  110. sizeof(struct lpfc_dmabuf),
  111. GFP_KERNEL);
  112. if (!pool->elements)
  113. goto fail_free_lpfc_mbuf_pool;
  114. pool->max_count = 0;
  115. pool->current_count = 0;
  116. for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
  117. pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
  118. GFP_KERNEL, &pool->elements[i].phys);
  119. if (!pool->elements[i].virt)
  120. goto fail_free_mbuf_pool;
  121. pool->max_count++;
  122. pool->current_count++;
  123. }
  124. phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
  125. sizeof(LPFC_MBOXQ_t));
  126. if (!phba->mbox_mem_pool)
  127. goto fail_free_mbuf_pool;
  128. phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
  129. sizeof(struct lpfc_nodelist));
  130. if (!phba->nlp_mem_pool)
  131. goto fail_free_mbox_pool;
  132. if (phba->sli_rev == LPFC_SLI_REV4) {
  133. phba->rrq_pool =
  134. mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
  135. sizeof(struct lpfc_node_rrq));
  136. if (!phba->rrq_pool)
  137. goto fail_free_nlp_mem_pool;
  138. phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
  139. &phba->pcidev->dev,
  140. LPFC_HDR_BUF_SIZE, align, 0);
  141. if (!phba->lpfc_hrb_pool)
  142. goto fail_free_rrq_mem_pool;
  143. phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
  144. &phba->pcidev->dev,
  145. LPFC_DATA_BUF_SIZE, align, 0);
  146. if (!phba->lpfc_drb_pool)
  147. goto fail_free_hrb_pool;
  148. phba->lpfc_hbq_pool = NULL;
  149. } else {
  150. phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
  151. &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
  152. if (!phba->lpfc_hbq_pool)
  153. goto fail_free_nlp_mem_pool;
  154. phba->lpfc_hrb_pool = NULL;
  155. phba->lpfc_drb_pool = NULL;
  156. }
  157. if (phba->cfg_EnableXLane) {
  158. phba->device_data_mem_pool = mempool_create_kmalloc_pool(
  159. LPFC_DEVICE_DATA_POOL_SIZE,
  160. sizeof(struct lpfc_device_data));
  161. if (!phba->device_data_mem_pool)
  162. goto fail_free_drb_pool;
  163. } else {
  164. phba->device_data_mem_pool = NULL;
  165. }
  166. return 0;
  167. fail_free_drb_pool:
  168. dma_pool_destroy(phba->lpfc_drb_pool);
  169. phba->lpfc_drb_pool = NULL;
  170. fail_free_hrb_pool:
  171. dma_pool_destroy(phba->lpfc_hrb_pool);
  172. phba->lpfc_hrb_pool = NULL;
  173. fail_free_rrq_mem_pool:
  174. mempool_destroy(phba->rrq_pool);
  175. phba->rrq_pool = NULL;
  176. fail_free_nlp_mem_pool:
  177. mempool_destroy(phba->nlp_mem_pool);
  178. phba->nlp_mem_pool = NULL;
  179. fail_free_mbox_pool:
  180. mempool_destroy(phba->mbox_mem_pool);
  181. phba->mbox_mem_pool = NULL;
  182. fail_free_mbuf_pool:
  183. while (i--)
  184. dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
  185. pool->elements[i].phys);
  186. kfree(pool->elements);
  187. fail_free_lpfc_mbuf_pool:
  188. dma_pool_destroy(phba->lpfc_mbuf_pool);
  189. phba->lpfc_mbuf_pool = NULL;
  190. fail_free_dma_buf_pool:
  191. dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
  192. phba->lpfc_sg_dma_buf_pool = NULL;
  193. fail:
  194. return -ENOMEM;
  195. }
  196. int
  197. lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
  198. {
  199. phba->lpfc_nvmet_drb_pool =
  200. dma_pool_create("lpfc_nvmet_drb_pool",
  201. &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
  202. SGL_ALIGN_SZ, 0);
  203. if (!phba->lpfc_nvmet_drb_pool) {
  204. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  205. "6024 Can't enable NVME Target - no memory\n");
  206. return -ENOMEM;
  207. }
  208. return 0;
  209. }
  210. /**
  211. * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
  212. * @phba: HBA to free memory for
  213. *
  214. * Description: Free the memory allocated by lpfc_mem_alloc routine. This
  215. * routine is a the counterpart of lpfc_mem_alloc.
  216. *
  217. * Returns: None
  218. **/
  219. void
  220. lpfc_mem_free(struct lpfc_hba *phba)
  221. {
  222. int i;
  223. struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
  224. struct lpfc_device_data *device_data;
  225. /* Free HBQ pools */
  226. lpfc_sli_hbqbuf_free_all(phba);
  227. if (phba->lpfc_nvmet_drb_pool)
  228. dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
  229. phba->lpfc_nvmet_drb_pool = NULL;
  230. if (phba->lpfc_drb_pool)
  231. dma_pool_destroy(phba->lpfc_drb_pool);
  232. phba->lpfc_drb_pool = NULL;
  233. if (phba->lpfc_hrb_pool)
  234. dma_pool_destroy(phba->lpfc_hrb_pool);
  235. phba->lpfc_hrb_pool = NULL;
  236. if (phba->txrdy_payload_pool)
  237. dma_pool_destroy(phba->txrdy_payload_pool);
  238. phba->txrdy_payload_pool = NULL;
  239. if (phba->lpfc_hbq_pool)
  240. dma_pool_destroy(phba->lpfc_hbq_pool);
  241. phba->lpfc_hbq_pool = NULL;
  242. if (phba->rrq_pool)
  243. mempool_destroy(phba->rrq_pool);
  244. phba->rrq_pool = NULL;
  245. /* Free NLP memory pool */
  246. mempool_destroy(phba->nlp_mem_pool);
  247. phba->nlp_mem_pool = NULL;
  248. if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
  249. mempool_destroy(phba->active_rrq_pool);
  250. phba->active_rrq_pool = NULL;
  251. }
  252. /* Free mbox memory pool */
  253. mempool_destroy(phba->mbox_mem_pool);
  254. phba->mbox_mem_pool = NULL;
  255. /* Free MBUF memory pool */
  256. for (i = 0; i < pool->current_count; i++)
  257. dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
  258. pool->elements[i].phys);
  259. kfree(pool->elements);
  260. dma_pool_destroy(phba->lpfc_mbuf_pool);
  261. phba->lpfc_mbuf_pool = NULL;
  262. /* Free DMA buffer memory pool */
  263. dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
  264. phba->lpfc_sg_dma_buf_pool = NULL;
  265. /* Free Device Data memory pool */
  266. if (phba->device_data_mem_pool) {
  267. /* Ensure all objects have been returned to the pool */
  268. while (!list_empty(&phba->luns)) {
  269. device_data = list_first_entry(&phba->luns,
  270. struct lpfc_device_data,
  271. listentry);
  272. list_del(&device_data->listentry);
  273. mempool_free(device_data, phba->device_data_mem_pool);
  274. }
  275. mempool_destroy(phba->device_data_mem_pool);
  276. }
  277. phba->device_data_mem_pool = NULL;
  278. return;
  279. }
  280. /**
  281. * lpfc_mem_free_all - Frees all PCI and driver memory
  282. * @phba: HBA to free memory for
  283. *
  284. * Description: Free memory from PCI and driver memory pools and also those
  285. * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
  286. * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
  287. * the VPI bitmask.
  288. *
  289. * Returns: None
  290. **/
  291. void
  292. lpfc_mem_free_all(struct lpfc_hba *phba)
  293. {
  294. struct lpfc_sli *psli = &phba->sli;
  295. LPFC_MBOXQ_t *mbox, *next_mbox;
  296. struct lpfc_dmabuf *mp;
  297. /* Free memory used in mailbox queue back to mailbox memory pool */
  298. list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
  299. mp = (struct lpfc_dmabuf *) (mbox->context1);
  300. if (mp) {
  301. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  302. kfree(mp);
  303. }
  304. list_del(&mbox->list);
  305. mempool_free(mbox, phba->mbox_mem_pool);
  306. }
  307. /* Free memory used in mailbox cmpl list back to mailbox memory pool */
  308. list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
  309. mp = (struct lpfc_dmabuf *) (mbox->context1);
  310. if (mp) {
  311. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  312. kfree(mp);
  313. }
  314. list_del(&mbox->list);
  315. mempool_free(mbox, phba->mbox_mem_pool);
  316. }
  317. /* Free the active mailbox command back to the mailbox memory pool */
  318. spin_lock_irq(&phba->hbalock);
  319. psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  320. spin_unlock_irq(&phba->hbalock);
  321. if (psli->mbox_active) {
  322. mbox = psli->mbox_active;
  323. mp = (struct lpfc_dmabuf *) (mbox->context1);
  324. if (mp) {
  325. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  326. kfree(mp);
  327. }
  328. mempool_free(mbox, phba->mbox_mem_pool);
  329. psli->mbox_active = NULL;
  330. }
  331. /* Free and destroy all the allocated memory pools */
  332. lpfc_mem_free(phba);
  333. /* Free the iocb lookup array */
  334. kfree(psli->iocbq_lookup);
  335. psli->iocbq_lookup = NULL;
  336. return;
  337. }
  338. /**
  339. * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool
  340. * @phba: HBA which owns the pool to allocate from
  341. * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
  342. * @handle: used to return the DMA-mapped address of the mbuf
  343. *
  344. * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
  345. * Allocates from generic dma_pool_alloc function first and if that fails and
  346. * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
  347. * HBA's pool.
  348. *
  349. * Notes: Not interrupt-safe. Must be called with no locks held. Takes
  350. * phba->hbalock.
  351. *
  352. * Returns:
  353. * pointer to the allocated mbuf on success
  354. * NULL on failure
  355. **/
  356. void *
  357. lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
  358. {
  359. struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
  360. unsigned long iflags;
  361. void *ret;
  362. ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
  363. spin_lock_irqsave(&phba->hbalock, iflags);
  364. if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
  365. pool->current_count--;
  366. ret = pool->elements[pool->current_count].virt;
  367. *handle = pool->elements[pool->current_count].phys;
  368. }
  369. spin_unlock_irqrestore(&phba->hbalock, iflags);
  370. return ret;
  371. }
  372. /**
  373. * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
  374. * @phba: HBA which owns the pool to return to
  375. * @virt: mbuf to free
  376. * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
  377. *
  378. * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
  379. * it is below its max_count, frees the mbuf otherwise.
  380. *
  381. * Notes: Must be called with phba->hbalock held to synchronize access to
  382. * lpfc_mbuf_safety_pool.
  383. *
  384. * Returns: None
  385. **/
  386. void
  387. __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
  388. {
  389. struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
  390. if (pool->current_count < pool->max_count) {
  391. pool->elements[pool->current_count].virt = virt;
  392. pool->elements[pool->current_count].phys = dma;
  393. pool->current_count++;
  394. } else {
  395. dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
  396. }
  397. return;
  398. }
  399. /**
  400. * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
  401. * @phba: HBA which owns the pool to return to
  402. * @virt: mbuf to free
  403. * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
  404. *
  405. * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
  406. * it is below its max_count, frees the mbuf otherwise.
  407. *
  408. * Notes: Takes phba->hbalock. Can be called with or without other locks held.
  409. *
  410. * Returns: None
  411. **/
  412. void
  413. lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
  414. {
  415. unsigned long iflags;
  416. spin_lock_irqsave(&phba->hbalock, iflags);
  417. __lpfc_mbuf_free(phba, virt, dma);
  418. spin_unlock_irqrestore(&phba->hbalock, iflags);
  419. return;
  420. }
  421. /**
  422. * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the
  423. * lpfc_sg_dma_buf_pool PCI pool
  424. * @phba: HBA which owns the pool to allocate from
  425. * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
  426. * @handle: used to return the DMA-mapped address of the nvmet_buf
  427. *
  428. * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
  429. * PCI pool. Allocates from generic dma_pool_alloc function.
  430. *
  431. * Returns:
  432. * pointer to the allocated nvmet_buf on success
  433. * NULL on failure
  434. **/
  435. void *
  436. lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
  437. {
  438. void *ret;
  439. ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
  440. return ret;
  441. }
  442. /**
  443. * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool
  444. * PCI pool
  445. * @phba: HBA which owns the pool to return to
  446. * @virt: nvmet_buf to free
  447. * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed
  448. *
  449. * Returns: None
  450. **/
  451. void
  452. lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
  453. {
  454. dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
  455. }
  456. /**
  457. * lpfc_els_hbq_alloc - Allocate an HBQ buffer
  458. * @phba: HBA to allocate HBQ buffer for
  459. *
  460. * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
  461. * pool along a non-DMA-mapped container for it.
  462. *
  463. * Notes: Not interrupt-safe. Must be called with no locks held.
  464. *
  465. * Returns:
  466. * pointer to HBQ on success
  467. * NULL on failure
  468. **/
  469. struct hbq_dmabuf *
  470. lpfc_els_hbq_alloc(struct lpfc_hba *phba)
  471. {
  472. struct hbq_dmabuf *hbqbp;
  473. hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
  474. if (!hbqbp)
  475. return NULL;
  476. hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
  477. &hbqbp->dbuf.phys);
  478. if (!hbqbp->dbuf.virt) {
  479. kfree(hbqbp);
  480. return NULL;
  481. }
  482. hbqbp->total_size = LPFC_BPL_SIZE;
  483. return hbqbp;
  484. }
  485. /**
  486. * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
  487. * @phba: HBA buffer was allocated for
  488. * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
  489. *
  490. * Description: Frees both the container and the DMA-mapped buffer returned by
  491. * lpfc_els_hbq_alloc.
  492. *
  493. * Notes: Can be called with or without locks held.
  494. *
  495. * Returns: None
  496. **/
  497. void
  498. lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
  499. {
  500. dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
  501. kfree(hbqbp);
  502. return;
  503. }
  504. /**
  505. * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
  506. * @phba: HBA to allocate a receive buffer for
  507. *
  508. * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
  509. * pool along a non-DMA-mapped container for it.
  510. *
  511. * Notes: Not interrupt-safe. Must be called with no locks held.
  512. *
  513. * Returns:
  514. * pointer to HBQ on success
  515. * NULL on failure
  516. **/
  517. struct hbq_dmabuf *
  518. lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
  519. {
  520. struct hbq_dmabuf *dma_buf;
  521. dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
  522. if (!dma_buf)
  523. return NULL;
  524. dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
  525. &dma_buf->hbuf.phys);
  526. if (!dma_buf->hbuf.virt) {
  527. kfree(dma_buf);
  528. return NULL;
  529. }
  530. dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
  531. &dma_buf->dbuf.phys);
  532. if (!dma_buf->dbuf.virt) {
  533. dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
  534. dma_buf->hbuf.phys);
  535. kfree(dma_buf);
  536. return NULL;
  537. }
  538. dma_buf->total_size = LPFC_DATA_BUF_SIZE;
  539. return dma_buf;
  540. }
  541. /**
  542. * lpfc_sli4_rb_free - Frees a receive buffer
  543. * @phba: HBA buffer was allocated for
  544. * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
  545. *
  546. * Description: Frees both the container and the DMA-mapped buffers returned by
  547. * lpfc_sli4_rb_alloc.
  548. *
  549. * Notes: Can be called with or without locks held.
  550. *
  551. * Returns: None
  552. **/
  553. void
  554. lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
  555. {
  556. dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
  557. dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
  558. kfree(dmab);
  559. }
  560. /**
  561. * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer
  562. * @phba: HBA to allocate a receive buffer for
  563. *
  564. * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
  565. * pool along a non-DMA-mapped container for it.
  566. *
  567. * Notes: Not interrupt-safe. Must be called with no locks held.
  568. *
  569. * Returns:
  570. * pointer to HBQ on success
  571. * NULL on failure
  572. **/
  573. struct rqb_dmabuf *
  574. lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
  575. {
  576. struct rqb_dmabuf *dma_buf;
  577. dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
  578. if (!dma_buf)
  579. return NULL;
  580. dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
  581. &dma_buf->hbuf.phys);
  582. if (!dma_buf->hbuf.virt) {
  583. kfree(dma_buf);
  584. return NULL;
  585. }
  586. dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
  587. GFP_KERNEL, &dma_buf->dbuf.phys);
  588. if (!dma_buf->dbuf.virt) {
  589. dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
  590. dma_buf->hbuf.phys);
  591. kfree(dma_buf);
  592. return NULL;
  593. }
  594. dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
  595. return dma_buf;
  596. }
  597. /**
  598. * lpfc_sli4_nvmet_free - Frees a receive buffer
  599. * @phba: HBA buffer was allocated for
  600. * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc
  601. *
  602. * Description: Frees both the container and the DMA-mapped buffers returned by
  603. * lpfc_sli4_nvmet_alloc.
  604. *
  605. * Notes: Can be called with or without locks held.
  606. *
  607. * Returns: None
  608. **/
  609. void
  610. lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
  611. {
  612. dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
  613. dma_pool_free(phba->lpfc_nvmet_drb_pool,
  614. dmab->dbuf.virt, dmab->dbuf.phys);
  615. kfree(dmab);
  616. }
  617. /**
  618. * lpfc_in_buf_free - Free a DMA buffer
  619. * @phba: HBA buffer is associated with
  620. * @mp: Buffer to free
  621. *
  622. * Description: Frees the given DMA buffer in the appropriate way given if the
  623. * HBA is running in SLI3 mode with HBQs enabled.
  624. *
  625. * Notes: Takes phba->hbalock. Can be called with or without other locks held.
  626. *
  627. * Returns: None
  628. **/
  629. void
  630. lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
  631. {
  632. struct hbq_dmabuf *hbq_entry;
  633. unsigned long flags;
  634. if (!mp)
  635. return;
  636. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  637. hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
  638. /* Check whether HBQ is still in use */
  639. spin_lock_irqsave(&phba->hbalock, flags);
  640. if (!phba->hbq_in_use) {
  641. spin_unlock_irqrestore(&phba->hbalock, flags);
  642. return;
  643. }
  644. list_del(&hbq_entry->dbuf.list);
  645. if (hbq_entry->tag == -1) {
  646. (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
  647. (phba, hbq_entry);
  648. } else {
  649. lpfc_sli_free_hbq(phba, hbq_entry);
  650. }
  651. spin_unlock_irqrestore(&phba->hbalock, flags);
  652. } else {
  653. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  654. kfree(mp);
  655. }
  656. return;
  657. }
  658. /**
  659. * lpfc_rq_buf_free - Free a RQ DMA buffer
  660. * @phba: HBA buffer is associated with
  661. * @mp: Buffer to free
  662. *
  663. * Description: Frees the given DMA buffer in the appropriate way given by
  664. * reposting it to its associated RQ so it can be reused.
  665. *
  666. * Notes: Takes phba->hbalock. Can be called with or without other locks held.
  667. *
  668. * Returns: None
  669. **/
  670. void
  671. lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
  672. {
  673. struct lpfc_rqb *rqbp;
  674. struct lpfc_rqe hrqe;
  675. struct lpfc_rqe drqe;
  676. struct rqb_dmabuf *rqb_entry;
  677. unsigned long flags;
  678. int rc;
  679. if (!mp)
  680. return;
  681. rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf);
  682. rqbp = rqb_entry->hrq->rqbp;
  683. spin_lock_irqsave(&phba->hbalock, flags);
  684. list_del(&rqb_entry->hbuf.list);
  685. hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys);
  686. hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys);
  687. drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys);
  688. drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
  689. rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
  690. if (rc < 0) {
  691. (rqbp->rqb_free_buffer)(phba, rqb_entry);
  692. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  693. "6409 Cannot post to HRQ %d: %x %x %x "
  694. "DRQ %x %x\n",
  695. rqb_entry->hrq->queue_id,
  696. rqb_entry->hrq->host_index,
  697. rqb_entry->hrq->hba_index,
  698. rqb_entry->hrq->entry_count,
  699. rqb_entry->drq->host_index,
  700. rqb_entry->drq->hba_index);
  701. } else {
  702. list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
  703. rqbp->buffer_count++;
  704. }
  705. spin_unlock_irqrestore(&phba->hbalock, flags);
  706. }