ccp-dmaengine.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * AMD Cryptographic Coprocessor (CCP) driver
  4. *
  5. * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
  6. *
  7. * Author: Gary R Hook <gary.hook@amd.com>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/dmaengine.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/mutex.h>
  15. #include <linux/ccp.h>
  16. #include "ccp-dev.h"
  17. #include "../../dma/dmaengine.h"
  18. #define CCP_DMA_WIDTH(_mask) \
  19. ({ \
  20. u64 mask = _mask + 1; \
  21. (mask == 0) ? 64 : fls64(mask); \
  22. })
  23. /* The CCP as a DMA provider can be configured for public or private
  24. * channels. Default is specified in the vdata for the device (PCI ID).
  25. * This module parameter will override for all channels on all devices:
  26. * dma_chan_attr = 0x2 to force all channels public
  27. * = 0x1 to force all channels private
  28. * = 0x0 to defer to the vdata setting
  29. * = any other value: warning, revert to 0x0
  30. */
  31. static unsigned int dma_chan_attr = CCP_DMA_DFLT;
  32. module_param(dma_chan_attr, uint, 0444);
  33. MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
  34. static unsigned int dmaengine = 1;
  35. module_param(dmaengine, uint, 0444);
  36. MODULE_PARM_DESC(dmaengine, "Register services with the DMA subsystem (any non-zero value, default: 1)");
  37. static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
  38. {
  39. switch (dma_chan_attr) {
  40. case CCP_DMA_DFLT:
  41. return ccp->vdata->dma_chan_attr;
  42. case CCP_DMA_PRIV:
  43. return DMA_PRIVATE;
  44. case CCP_DMA_PUB:
  45. return 0;
  46. default:
  47. dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
  48. dma_chan_attr);
  49. return ccp->vdata->dma_chan_attr;
  50. }
  51. }
  52. static void ccp_free_cmd_resources(struct ccp_device *ccp,
  53. struct list_head *list)
  54. {
  55. struct ccp_dma_cmd *cmd, *ctmp;
  56. list_for_each_entry_safe(cmd, ctmp, list, entry) {
  57. list_del(&cmd->entry);
  58. kmem_cache_free(ccp->dma_cmd_cache, cmd);
  59. }
  60. }
  61. static void ccp_free_desc_resources(struct ccp_device *ccp,
  62. struct list_head *list)
  63. {
  64. struct ccp_dma_desc *desc, *dtmp;
  65. list_for_each_entry_safe(desc, dtmp, list, entry) {
  66. ccp_free_cmd_resources(ccp, &desc->active);
  67. ccp_free_cmd_resources(ccp, &desc->pending);
  68. list_del(&desc->entry);
  69. kmem_cache_free(ccp->dma_desc_cache, desc);
  70. }
  71. }
  72. static void ccp_free_chan_resources(struct dma_chan *dma_chan)
  73. {
  74. struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
  75. dma_chan);
  76. unsigned long flags;
  77. dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
  78. spin_lock_irqsave(&chan->lock, flags);
  79. ccp_free_desc_resources(chan->ccp, &chan->complete);
  80. ccp_free_desc_resources(chan->ccp, &chan->active);
  81. ccp_free_desc_resources(chan->ccp, &chan->pending);
  82. ccp_free_desc_resources(chan->ccp, &chan->created);
  83. spin_unlock_irqrestore(&chan->lock, flags);
  84. }
  85. static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
  86. struct list_head *list)
  87. {
  88. struct ccp_dma_desc *desc, *dtmp;
  89. list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
  90. if (!async_tx_test_ack(&desc->tx_desc))
  91. continue;
  92. dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
  93. ccp_free_cmd_resources(ccp, &desc->active);
  94. ccp_free_cmd_resources(ccp, &desc->pending);
  95. list_del(&desc->entry);
  96. kmem_cache_free(ccp->dma_desc_cache, desc);
  97. }
  98. }
  99. static void ccp_do_cleanup(unsigned long data)
  100. {
  101. struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
  102. unsigned long flags;
  103. dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
  104. dma_chan_name(&chan->dma_chan));
  105. spin_lock_irqsave(&chan->lock, flags);
  106. ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
  107. spin_unlock_irqrestore(&chan->lock, flags);
  108. }
  109. static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
  110. {
  111. struct ccp_dma_cmd *cmd;
  112. int ret;
  113. cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
  114. list_move(&cmd->entry, &desc->active);
  115. dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
  116. desc->tx_desc.cookie, cmd);
  117. ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
  118. if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
  119. return 0;
  120. dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
  121. ret, desc->tx_desc.cookie, cmd);
  122. return ret;
  123. }
  124. static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
  125. {
  126. struct ccp_dma_cmd *cmd;
  127. cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
  128. entry);
  129. if (!cmd)
  130. return;
  131. dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
  132. __func__, desc->tx_desc.cookie, cmd);
  133. list_del(&cmd->entry);
  134. kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
  135. }
  136. static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
  137. struct ccp_dma_desc *desc)
  138. {
  139. /* Move current DMA descriptor to the complete list */
  140. if (desc)
  141. list_move(&desc->entry, &chan->complete);
  142. /* Get the next DMA descriptor on the active list */
  143. desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
  144. entry);
  145. return desc;
  146. }
  147. static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
  148. struct ccp_dma_desc *desc)
  149. {
  150. struct dma_async_tx_descriptor *tx_desc;
  151. unsigned long flags;
  152. /* Loop over descriptors until one is found with commands */
  153. do {
  154. if (desc) {
  155. /* Remove the DMA command from the list and free it */
  156. ccp_free_active_cmd(desc);
  157. if (!list_empty(&desc->pending)) {
  158. /* No errors, keep going */
  159. if (desc->status != DMA_ERROR)
  160. return desc;
  161. /* Error, free remaining commands and move on */
  162. ccp_free_cmd_resources(desc->ccp,
  163. &desc->pending);
  164. }
  165. tx_desc = &desc->tx_desc;
  166. } else {
  167. tx_desc = NULL;
  168. }
  169. spin_lock_irqsave(&chan->lock, flags);
  170. if (desc) {
  171. if (desc->status != DMA_ERROR)
  172. desc->status = DMA_COMPLETE;
  173. dev_dbg(desc->ccp->dev,
  174. "%s - tx %d complete, status=%u\n", __func__,
  175. desc->tx_desc.cookie, desc->status);
  176. dma_cookie_complete(tx_desc);
  177. dma_descriptor_unmap(tx_desc);
  178. }
  179. desc = __ccp_next_dma_desc(chan, desc);
  180. spin_unlock_irqrestore(&chan->lock, flags);
  181. if (tx_desc) {
  182. dmaengine_desc_get_callback_invoke(tx_desc, NULL);
  183. dma_run_dependencies(tx_desc);
  184. }
  185. } while (desc);
  186. return NULL;
  187. }
  188. static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
  189. {
  190. struct ccp_dma_desc *desc;
  191. if (list_empty(&chan->pending))
  192. return NULL;
  193. desc = list_empty(&chan->active)
  194. ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
  195. : NULL;
  196. list_splice_tail_init(&chan->pending, &chan->active);
  197. return desc;
  198. }
  199. static void ccp_cmd_callback(void *data, int err)
  200. {
  201. struct ccp_dma_desc *desc = data;
  202. struct ccp_dma_chan *chan;
  203. int ret;
  204. if (err == -EINPROGRESS)
  205. return;
  206. chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
  207. dma_chan);
  208. dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
  209. __func__, desc->tx_desc.cookie, err);
  210. if (err)
  211. desc->status = DMA_ERROR;
  212. while (true) {
  213. /* Check for DMA descriptor completion */
  214. desc = ccp_handle_active_desc(chan, desc);
  215. /* Don't submit cmd if no descriptor or DMA is paused */
  216. if (!desc || (chan->status == DMA_PAUSED))
  217. break;
  218. ret = ccp_issue_next_cmd(desc);
  219. if (!ret)
  220. break;
  221. desc->status = DMA_ERROR;
  222. }
  223. tasklet_schedule(&chan->cleanup_tasklet);
  224. }
  225. static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
  226. {
  227. struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
  228. tx_desc);
  229. struct ccp_dma_chan *chan;
  230. dma_cookie_t cookie;
  231. unsigned long flags;
  232. chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
  233. spin_lock_irqsave(&chan->lock, flags);
  234. cookie = dma_cookie_assign(tx_desc);
  235. list_del(&desc->entry);
  236. list_add_tail(&desc->entry, &chan->pending);
  237. spin_unlock_irqrestore(&chan->lock, flags);
  238. dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
  239. __func__, cookie);
  240. return cookie;
  241. }
  242. static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
  243. {
  244. struct ccp_dma_cmd *cmd;
  245. cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
  246. if (cmd)
  247. memset(cmd, 0, sizeof(*cmd));
  248. return cmd;
  249. }
  250. static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
  251. unsigned long flags)
  252. {
  253. struct ccp_dma_desc *desc;
  254. desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
  255. if (!desc)
  256. return NULL;
  257. dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
  258. desc->tx_desc.flags = flags;
  259. desc->tx_desc.tx_submit = ccp_tx_submit;
  260. desc->ccp = chan->ccp;
  261. INIT_LIST_HEAD(&desc->entry);
  262. INIT_LIST_HEAD(&desc->pending);
  263. INIT_LIST_HEAD(&desc->active);
  264. desc->status = DMA_IN_PROGRESS;
  265. return desc;
  266. }
  267. static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
  268. struct scatterlist *dst_sg,
  269. unsigned int dst_nents,
  270. struct scatterlist *src_sg,
  271. unsigned int src_nents,
  272. unsigned long flags)
  273. {
  274. struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
  275. dma_chan);
  276. struct ccp_device *ccp = chan->ccp;
  277. struct ccp_dma_desc *desc;
  278. struct ccp_dma_cmd *cmd;
  279. struct ccp_cmd *ccp_cmd;
  280. struct ccp_passthru_nomap_engine *ccp_pt;
  281. unsigned int src_offset, src_len;
  282. unsigned int dst_offset, dst_len;
  283. unsigned int len;
  284. unsigned long sflags;
  285. size_t total_len;
  286. if (!dst_sg || !src_sg)
  287. return NULL;
  288. if (!dst_nents || !src_nents)
  289. return NULL;
  290. desc = ccp_alloc_dma_desc(chan, flags);
  291. if (!desc)
  292. return NULL;
  293. total_len = 0;
  294. src_len = sg_dma_len(src_sg);
  295. src_offset = 0;
  296. dst_len = sg_dma_len(dst_sg);
  297. dst_offset = 0;
  298. while (true) {
  299. if (!src_len) {
  300. src_nents--;
  301. if (!src_nents)
  302. break;
  303. src_sg = sg_next(src_sg);
  304. if (!src_sg)
  305. break;
  306. src_len = sg_dma_len(src_sg);
  307. src_offset = 0;
  308. continue;
  309. }
  310. if (!dst_len) {
  311. dst_nents--;
  312. if (!dst_nents)
  313. break;
  314. dst_sg = sg_next(dst_sg);
  315. if (!dst_sg)
  316. break;
  317. dst_len = sg_dma_len(dst_sg);
  318. dst_offset = 0;
  319. continue;
  320. }
  321. len = min(dst_len, src_len);
  322. cmd = ccp_alloc_dma_cmd(chan);
  323. if (!cmd)
  324. goto err;
  325. ccp_cmd = &cmd->ccp_cmd;
  326. ccp_cmd->ccp = chan->ccp;
  327. ccp_pt = &ccp_cmd->u.passthru_nomap;
  328. ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
  329. ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
  330. ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
  331. ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
  332. ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
  333. ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
  334. ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
  335. ccp_pt->src_len = len;
  336. ccp_pt->final = 1;
  337. ccp_cmd->callback = ccp_cmd_callback;
  338. ccp_cmd->data = desc;
  339. list_add_tail(&cmd->entry, &desc->pending);
  340. dev_dbg(ccp->dev,
  341. "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
  342. cmd, &ccp_pt->src_dma,
  343. &ccp_pt->dst_dma, ccp_pt->src_len);
  344. total_len += len;
  345. src_len -= len;
  346. src_offset += len;
  347. dst_len -= len;
  348. dst_offset += len;
  349. }
  350. desc->len = total_len;
  351. if (list_empty(&desc->pending))
  352. goto err;
  353. dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
  354. spin_lock_irqsave(&chan->lock, sflags);
  355. list_add_tail(&desc->entry, &chan->created);
  356. spin_unlock_irqrestore(&chan->lock, sflags);
  357. return desc;
  358. err:
  359. ccp_free_cmd_resources(ccp, &desc->pending);
  360. kmem_cache_free(ccp->dma_desc_cache, desc);
  361. return NULL;
  362. }
  363. static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
  364. struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
  365. unsigned long flags)
  366. {
  367. struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
  368. dma_chan);
  369. struct ccp_dma_desc *desc;
  370. struct scatterlist dst_sg, src_sg;
  371. dev_dbg(chan->ccp->dev,
  372. "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
  373. __func__, &src, &dst, len, flags);
  374. sg_init_table(&dst_sg, 1);
  375. sg_dma_address(&dst_sg) = dst;
  376. sg_dma_len(&dst_sg) = len;
  377. sg_init_table(&src_sg, 1);
  378. sg_dma_address(&src_sg) = src;
  379. sg_dma_len(&src_sg) = len;
  380. desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
  381. if (!desc)
  382. return NULL;
  383. return &desc->tx_desc;
  384. }
  385. static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
  386. struct dma_chan *dma_chan, unsigned long flags)
  387. {
  388. struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
  389. dma_chan);
  390. struct ccp_dma_desc *desc;
  391. desc = ccp_alloc_dma_desc(chan, flags);
  392. if (!desc)
  393. return NULL;
  394. return &desc->tx_desc;
  395. }
  396. static void ccp_issue_pending(struct dma_chan *dma_chan)
  397. {
  398. struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
  399. dma_chan);
  400. struct ccp_dma_desc *desc;
  401. unsigned long flags;
  402. dev_dbg(chan->ccp->dev, "%s\n", __func__);
  403. spin_lock_irqsave(&chan->lock, flags);
  404. desc = __ccp_pending_to_active(chan);
  405. spin_unlock_irqrestore(&chan->lock, flags);
  406. /* If there was nothing active, start processing */
  407. if (desc)
  408. ccp_cmd_callback(desc, 0);
  409. }
  410. static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
  411. dma_cookie_t cookie,
  412. struct dma_tx_state *state)
  413. {
  414. struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
  415. dma_chan);
  416. struct ccp_dma_desc *desc;
  417. enum dma_status ret;
  418. unsigned long flags;
  419. if (chan->status == DMA_PAUSED) {
  420. ret = DMA_PAUSED;
  421. goto out;
  422. }
  423. ret = dma_cookie_status(dma_chan, cookie, state);
  424. if (ret == DMA_COMPLETE) {
  425. spin_lock_irqsave(&chan->lock, flags);
  426. /* Get status from complete chain, if still there */
  427. list_for_each_entry(desc, &chan->complete, entry) {
  428. if (desc->tx_desc.cookie != cookie)
  429. continue;
  430. ret = desc->status;
  431. break;
  432. }
  433. spin_unlock_irqrestore(&chan->lock, flags);
  434. }
  435. out:
  436. dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
  437. return ret;
  438. }
  439. static int ccp_pause(struct dma_chan *dma_chan)
  440. {
  441. struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
  442. dma_chan);
  443. chan->status = DMA_PAUSED;
  444. /*TODO: Wait for active DMA to complete before returning? */
  445. return 0;
  446. }
  447. static int ccp_resume(struct dma_chan *dma_chan)
  448. {
  449. struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
  450. dma_chan);
  451. struct ccp_dma_desc *desc;
  452. unsigned long flags;
  453. spin_lock_irqsave(&chan->lock, flags);
  454. desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
  455. entry);
  456. spin_unlock_irqrestore(&chan->lock, flags);
  457. /* Indicate the channel is running again */
  458. chan->status = DMA_IN_PROGRESS;
  459. /* If there was something active, re-start */
  460. if (desc)
  461. ccp_cmd_callback(desc, 0);
  462. return 0;
  463. }
  464. static int ccp_terminate_all(struct dma_chan *dma_chan)
  465. {
  466. struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
  467. dma_chan);
  468. unsigned long flags;
  469. dev_dbg(chan->ccp->dev, "%s\n", __func__);
  470. /*TODO: Wait for active DMA to complete before continuing */
  471. spin_lock_irqsave(&chan->lock, flags);
  472. /*TODO: Purge the complete list? */
  473. ccp_free_desc_resources(chan->ccp, &chan->active);
  474. ccp_free_desc_resources(chan->ccp, &chan->pending);
  475. ccp_free_desc_resources(chan->ccp, &chan->created);
  476. spin_unlock_irqrestore(&chan->lock, flags);
  477. return 0;
  478. }
  479. int ccp_dmaengine_register(struct ccp_device *ccp)
  480. {
  481. struct ccp_dma_chan *chan;
  482. struct dma_device *dma_dev = &ccp->dma_dev;
  483. struct dma_chan *dma_chan;
  484. char *dma_cmd_cache_name;
  485. char *dma_desc_cache_name;
  486. unsigned int i;
  487. int ret;
  488. if (!dmaengine)
  489. return 0;
  490. ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
  491. sizeof(*(ccp->ccp_dma_chan)),
  492. GFP_KERNEL);
  493. if (!ccp->ccp_dma_chan)
  494. return -ENOMEM;
  495. dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
  496. "%s-dmaengine-cmd-cache",
  497. ccp->name);
  498. if (!dma_cmd_cache_name)
  499. return -ENOMEM;
  500. ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
  501. sizeof(struct ccp_dma_cmd),
  502. sizeof(void *),
  503. SLAB_HWCACHE_ALIGN, NULL);
  504. if (!ccp->dma_cmd_cache)
  505. return -ENOMEM;
  506. dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
  507. "%s-dmaengine-desc-cache",
  508. ccp->name);
  509. if (!dma_desc_cache_name) {
  510. ret = -ENOMEM;
  511. goto err_cache;
  512. }
  513. ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
  514. sizeof(struct ccp_dma_desc),
  515. sizeof(void *),
  516. SLAB_HWCACHE_ALIGN, NULL);
  517. if (!ccp->dma_desc_cache) {
  518. ret = -ENOMEM;
  519. goto err_cache;
  520. }
  521. dma_dev->dev = ccp->dev;
  522. dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
  523. dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
  524. dma_dev->directions = DMA_MEM_TO_MEM;
  525. dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
  526. dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
  527. dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
  528. /* The DMA channels for this device can be set to public or private,
  529. * and overridden by the module parameter dma_chan_attr.
  530. * Default: according to the value in vdata (dma_chan_attr=0)
  531. * dma_chan_attr=0x1: all channels private (override vdata)
  532. * dma_chan_attr=0x2: all channels public (override vdata)
  533. */
  534. if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
  535. dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
  536. INIT_LIST_HEAD(&dma_dev->channels);
  537. for (i = 0; i < ccp->cmd_q_count; i++) {
  538. chan = ccp->ccp_dma_chan + i;
  539. dma_chan = &chan->dma_chan;
  540. chan->ccp = ccp;
  541. spin_lock_init(&chan->lock);
  542. INIT_LIST_HEAD(&chan->created);
  543. INIT_LIST_HEAD(&chan->pending);
  544. INIT_LIST_HEAD(&chan->active);
  545. INIT_LIST_HEAD(&chan->complete);
  546. tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
  547. (unsigned long)chan);
  548. dma_chan->device = dma_dev;
  549. dma_cookie_init(dma_chan);
  550. list_add_tail(&dma_chan->device_node, &dma_dev->channels);
  551. }
  552. dma_dev->device_free_chan_resources = ccp_free_chan_resources;
  553. dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
  554. dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
  555. dma_dev->device_issue_pending = ccp_issue_pending;
  556. dma_dev->device_tx_status = ccp_tx_status;
  557. dma_dev->device_pause = ccp_pause;
  558. dma_dev->device_resume = ccp_resume;
  559. dma_dev->device_terminate_all = ccp_terminate_all;
  560. ret = dma_async_device_register(dma_dev);
  561. if (ret)
  562. goto err_reg;
  563. return 0;
  564. err_reg:
  565. kmem_cache_destroy(ccp->dma_desc_cache);
  566. err_cache:
  567. kmem_cache_destroy(ccp->dma_cmd_cache);
  568. return ret;
  569. }
  570. void ccp_dmaengine_unregister(struct ccp_device *ccp)
  571. {
  572. struct dma_device *dma_dev = &ccp->dma_dev;
  573. if (!dmaengine)
  574. return;
  575. dma_async_device_unregister(dma_dev);
  576. kmem_cache_destroy(ccp->dma_desc_cache);
  577. kmem_cache_destroy(ccp->dma_cmd_cache);
  578. }