ccp-dev-v3.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. /*
  2. * AMD Cryptographic Coprocessor (CCP) driver
  3. *
  4. * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. * Author: Gary R Hook <gary.hook@amd.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/module.h>
  14. #include <linux/kernel.h>
  15. #include <linux/pci.h>
  16. #include <linux/kthread.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/ccp.h>
  19. #include "ccp-dev.h"
  20. static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
  21. {
  22. int start;
  23. struct ccp_device *ccp = cmd_q->ccp;
  24. for (;;) {
  25. mutex_lock(&ccp->sb_mutex);
  26. start = (u32)bitmap_find_next_zero_area(ccp->sb,
  27. ccp->sb_count,
  28. ccp->sb_start,
  29. count, 0);
  30. if (start <= ccp->sb_count) {
  31. bitmap_set(ccp->sb, start, count);
  32. mutex_unlock(&ccp->sb_mutex);
  33. break;
  34. }
  35. ccp->sb_avail = 0;
  36. mutex_unlock(&ccp->sb_mutex);
  37. /* Wait for KSB entries to become available */
  38. if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
  39. return 0;
  40. }
  41. return KSB_START + start;
  42. }
  43. static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start,
  44. unsigned int count)
  45. {
  46. struct ccp_device *ccp = cmd_q->ccp;
  47. if (!start)
  48. return;
  49. mutex_lock(&ccp->sb_mutex);
  50. bitmap_clear(ccp->sb, start - KSB_START, count);
  51. ccp->sb_avail = 1;
  52. mutex_unlock(&ccp->sb_mutex);
  53. wake_up_interruptible_all(&ccp->sb_queue);
  54. }
  55. static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q)
  56. {
  57. return CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
  58. }
  59. static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
  60. {
  61. struct ccp_cmd_queue *cmd_q = op->cmd_q;
  62. struct ccp_device *ccp = cmd_q->ccp;
  63. void __iomem *cr_addr;
  64. u32 cr0, cmd;
  65. unsigned int i;
  66. int ret = 0;
  67. /* We could read a status register to see how many free slots
  68. * are actually available, but reading that register resets it
  69. * and you could lose some error information.
  70. */
  71. cmd_q->free_slots--;
  72. cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
  73. | (op->jobid << REQ0_JOBID_SHIFT)
  74. | REQ0_WAIT_FOR_WRITE;
  75. if (op->soc)
  76. cr0 |= REQ0_STOP_ON_COMPLETE
  77. | REQ0_INT_ON_COMPLETE;
  78. if (op->ioc || !cmd_q->free_slots)
  79. cr0 |= REQ0_INT_ON_COMPLETE;
  80. /* Start at CMD_REQ1 */
  81. cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
  82. mutex_lock(&ccp->req_mutex);
  83. /* Write CMD_REQ1 through CMD_REQx first */
  84. for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
  85. iowrite32(*(cr + i), cr_addr);
  86. /* Tell the CCP to start */
  87. wmb();
  88. iowrite32(cr0, ccp->io_regs + CMD_REQ0);
  89. mutex_unlock(&ccp->req_mutex);
  90. if (cr0 & REQ0_INT_ON_COMPLETE) {
  91. /* Wait for the job to complete */
  92. ret = wait_event_interruptible(cmd_q->int_queue,
  93. cmd_q->int_rcvd);
  94. if (ret || cmd_q->cmd_error) {
  95. /* On error delete all related jobs from the queue */
  96. cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
  97. | op->jobid;
  98. if (cmd_q->cmd_error)
  99. ccp_log_error(cmd_q->ccp,
  100. cmd_q->cmd_error);
  101. iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
  102. if (!ret)
  103. ret = -EIO;
  104. } else if (op->soc) {
  105. /* Delete just head job from the queue on SoC */
  106. cmd = DEL_Q_ACTIVE
  107. | (cmd_q->id << DEL_Q_ID_SHIFT)
  108. | op->jobid;
  109. iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
  110. }
  111. cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
  112. cmd_q->int_rcvd = 0;
  113. }
  114. return ret;
  115. }
  116. static int ccp_perform_aes(struct ccp_op *op)
  117. {
  118. u32 cr[6];
  119. /* Fill out the register contents for REQ1 through REQ6 */
  120. cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
  121. | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
  122. | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
  123. | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
  124. | (op->sb_key << REQ1_KEY_KSB_SHIFT);
  125. cr[1] = op->src.u.dma.length - 1;
  126. cr[2] = ccp_addr_lo(&op->src.u.dma);
  127. cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
  128. | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
  129. | ccp_addr_hi(&op->src.u.dma);
  130. cr[4] = ccp_addr_lo(&op->dst.u.dma);
  131. cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
  132. | ccp_addr_hi(&op->dst.u.dma);
  133. if (op->u.aes.mode == CCP_AES_MODE_CFB)
  134. cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
  135. if (op->eom)
  136. cr[0] |= REQ1_EOM;
  137. if (op->init)
  138. cr[0] |= REQ1_INIT;
  139. return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
  140. }
  141. static int ccp_perform_xts_aes(struct ccp_op *op)
  142. {
  143. u32 cr[6];
  144. /* Fill out the register contents for REQ1 through REQ6 */
  145. cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
  146. | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
  147. | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
  148. | (op->sb_key << REQ1_KEY_KSB_SHIFT);
  149. cr[1] = op->src.u.dma.length - 1;
  150. cr[2] = ccp_addr_lo(&op->src.u.dma);
  151. cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
  152. | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
  153. | ccp_addr_hi(&op->src.u.dma);
  154. cr[4] = ccp_addr_lo(&op->dst.u.dma);
  155. cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
  156. | ccp_addr_hi(&op->dst.u.dma);
  157. if (op->eom)
  158. cr[0] |= REQ1_EOM;
  159. if (op->init)
  160. cr[0] |= REQ1_INIT;
  161. return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
  162. }
  163. static int ccp_perform_sha(struct ccp_op *op)
  164. {
  165. u32 cr[6];
  166. /* Fill out the register contents for REQ1 through REQ6 */
  167. cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
  168. | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
  169. | REQ1_INIT;
  170. cr[1] = op->src.u.dma.length - 1;
  171. cr[2] = ccp_addr_lo(&op->src.u.dma);
  172. cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
  173. | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
  174. | ccp_addr_hi(&op->src.u.dma);
  175. if (op->eom) {
  176. cr[0] |= REQ1_EOM;
  177. cr[4] = lower_32_bits(op->u.sha.msg_bits);
  178. cr[5] = upper_32_bits(op->u.sha.msg_bits);
  179. } else {
  180. cr[4] = 0;
  181. cr[5] = 0;
  182. }
  183. return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
  184. }
  185. static int ccp_perform_rsa(struct ccp_op *op)
  186. {
  187. u32 cr[6];
  188. /* Fill out the register contents for REQ1 through REQ6 */
  189. cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
  190. | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
  191. | (op->sb_key << REQ1_KEY_KSB_SHIFT)
  192. | REQ1_EOM;
  193. cr[1] = op->u.rsa.input_len - 1;
  194. cr[2] = ccp_addr_lo(&op->src.u.dma);
  195. cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
  196. | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
  197. | ccp_addr_hi(&op->src.u.dma);
  198. cr[4] = ccp_addr_lo(&op->dst.u.dma);
  199. cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
  200. | ccp_addr_hi(&op->dst.u.dma);
  201. return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
  202. }
  203. static int ccp_perform_passthru(struct ccp_op *op)
  204. {
  205. u32 cr[6];
  206. /* Fill out the register contents for REQ1 through REQ6 */
  207. cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
  208. | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
  209. | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
  210. if (op->src.type == CCP_MEMTYPE_SYSTEM)
  211. cr[1] = op->src.u.dma.length - 1;
  212. else
  213. cr[1] = op->dst.u.dma.length - 1;
  214. if (op->src.type == CCP_MEMTYPE_SYSTEM) {
  215. cr[2] = ccp_addr_lo(&op->src.u.dma);
  216. cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
  217. | ccp_addr_hi(&op->src.u.dma);
  218. if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
  219. cr[3] |= (op->sb_key << REQ4_KSB_SHIFT);
  220. } else {
  221. cr[2] = op->src.u.sb * CCP_SB_BYTES;
  222. cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT);
  223. }
  224. if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
  225. cr[4] = ccp_addr_lo(&op->dst.u.dma);
  226. cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
  227. | ccp_addr_hi(&op->dst.u.dma);
  228. } else {
  229. cr[4] = op->dst.u.sb * CCP_SB_BYTES;
  230. cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT);
  231. }
  232. if (op->eom)
  233. cr[0] |= REQ1_EOM;
  234. return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
  235. }
  236. static int ccp_perform_ecc(struct ccp_op *op)
  237. {
  238. u32 cr[6];
  239. /* Fill out the register contents for REQ1 through REQ6 */
  240. cr[0] = REQ1_ECC_AFFINE_CONVERT
  241. | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
  242. | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
  243. | REQ1_EOM;
  244. cr[1] = op->src.u.dma.length - 1;
  245. cr[2] = ccp_addr_lo(&op->src.u.dma);
  246. cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
  247. | ccp_addr_hi(&op->src.u.dma);
  248. cr[4] = ccp_addr_lo(&op->dst.u.dma);
  249. cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
  250. | ccp_addr_hi(&op->dst.u.dma);
  251. return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
  252. }
  253. static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
  254. {
  255. iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
  256. }
  257. static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
  258. {
  259. iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
  260. }
  261. static void ccp_irq_bh(unsigned long data)
  262. {
  263. struct ccp_device *ccp = (struct ccp_device *)data;
  264. struct ccp_cmd_queue *cmd_q;
  265. u32 q_int, status;
  266. unsigned int i;
  267. status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
  268. for (i = 0; i < ccp->cmd_q_count; i++) {
  269. cmd_q = &ccp->cmd_q[i];
  270. q_int = status & (cmd_q->int_ok | cmd_q->int_err);
  271. if (q_int) {
  272. cmd_q->int_status = status;
  273. cmd_q->q_status = ioread32(cmd_q->reg_status);
  274. cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
  275. /* On error, only save the first error value */
  276. if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
  277. cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
  278. cmd_q->int_rcvd = 1;
  279. /* Acknowledge the interrupt and wake the kthread */
  280. iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
  281. wake_up_interruptible(&cmd_q->int_queue);
  282. }
  283. }
  284. ccp_enable_queue_interrupts(ccp);
  285. }
  286. static irqreturn_t ccp_irq_handler(int irq, void *data)
  287. {
  288. struct ccp_device *ccp = (struct ccp_device *)data;
  289. ccp_disable_queue_interrupts(ccp);
  290. if (ccp->use_tasklet)
  291. tasklet_schedule(&ccp->irq_tasklet);
  292. else
  293. ccp_irq_bh((unsigned long)ccp);
  294. return IRQ_HANDLED;
  295. }
  296. static int ccp_init(struct ccp_device *ccp)
  297. {
  298. struct device *dev = ccp->dev;
  299. struct ccp_cmd_queue *cmd_q;
  300. struct dma_pool *dma_pool;
  301. char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
  302. unsigned int qmr, i;
  303. int ret;
  304. /* Find available queues */
  305. ccp->qim = 0;
  306. qmr = ioread32(ccp->io_regs + Q_MASK_REG);
  307. for (i = 0; i < MAX_HW_QUEUES; i++) {
  308. if (!(qmr & (1 << i)))
  309. continue;
  310. /* Allocate a dma pool for this queue */
  311. snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
  312. ccp->name, i);
  313. dma_pool = dma_pool_create(dma_pool_name, dev,
  314. CCP_DMAPOOL_MAX_SIZE,
  315. CCP_DMAPOOL_ALIGN, 0);
  316. if (!dma_pool) {
  317. dev_err(dev, "unable to allocate dma pool\n");
  318. ret = -ENOMEM;
  319. goto e_pool;
  320. }
  321. cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
  322. ccp->cmd_q_count++;
  323. cmd_q->ccp = ccp;
  324. cmd_q->id = i;
  325. cmd_q->dma_pool = dma_pool;
  326. /* Reserve 2 KSB regions for the queue */
  327. cmd_q->sb_key = KSB_START + ccp->sb_start++;
  328. cmd_q->sb_ctx = KSB_START + ccp->sb_start++;
  329. ccp->sb_count -= 2;
  330. /* Preset some register values and masks that are queue
  331. * number dependent
  332. */
  333. cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
  334. (CMD_Q_STATUS_INCR * i);
  335. cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
  336. (CMD_Q_STATUS_INCR * i);
  337. cmd_q->int_ok = 1 << (i * 2);
  338. cmd_q->int_err = 1 << ((i * 2) + 1);
  339. cmd_q->free_slots = ccp_get_free_slots(cmd_q);
  340. init_waitqueue_head(&cmd_q->int_queue);
  341. /* Build queue interrupt mask (two interrupts per queue) */
  342. ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
  343. #ifdef CONFIG_ARM64
  344. /* For arm64 set the recommended queue cache settings */
  345. iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
  346. (CMD_Q_CACHE_INC * i));
  347. #endif
  348. dev_dbg(dev, "queue #%u available\n", i);
  349. }
  350. if (ccp->cmd_q_count == 0) {
  351. dev_notice(dev, "no command queues available\n");
  352. ret = -EIO;
  353. goto e_pool;
  354. }
  355. dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
  356. /* Disable and clear interrupts until ready */
  357. ccp_disable_queue_interrupts(ccp);
  358. for (i = 0; i < ccp->cmd_q_count; i++) {
  359. cmd_q = &ccp->cmd_q[i];
  360. ioread32(cmd_q->reg_int_status);
  361. ioread32(cmd_q->reg_status);
  362. }
  363. iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
  364. /* Request an irq */
  365. ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp);
  366. if (ret) {
  367. dev_err(dev, "unable to allocate an IRQ\n");
  368. goto e_pool;
  369. }
  370. /* Initialize the ISR tasklet? */
  371. if (ccp->use_tasklet)
  372. tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
  373. (unsigned long)ccp);
  374. dev_dbg(dev, "Starting threads...\n");
  375. /* Create a kthread for each queue */
  376. for (i = 0; i < ccp->cmd_q_count; i++) {
  377. struct task_struct *kthread;
  378. cmd_q = &ccp->cmd_q[i];
  379. kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
  380. "%s-q%u", ccp->name, cmd_q->id);
  381. if (IS_ERR(kthread)) {
  382. dev_err(dev, "error creating queue thread (%ld)\n",
  383. PTR_ERR(kthread));
  384. ret = PTR_ERR(kthread);
  385. goto e_kthread;
  386. }
  387. cmd_q->kthread = kthread;
  388. wake_up_process(kthread);
  389. }
  390. dev_dbg(dev, "Enabling interrupts...\n");
  391. /* Enable interrupts */
  392. ccp_enable_queue_interrupts(ccp);
  393. dev_dbg(dev, "Registering device...\n");
  394. ccp_add_device(ccp);
  395. ret = ccp_register_rng(ccp);
  396. if (ret)
  397. goto e_kthread;
  398. /* Register the DMA engine support */
  399. ret = ccp_dmaengine_register(ccp);
  400. if (ret)
  401. goto e_hwrng;
  402. return 0;
  403. e_hwrng:
  404. ccp_unregister_rng(ccp);
  405. e_kthread:
  406. for (i = 0; i < ccp->cmd_q_count; i++)
  407. if (ccp->cmd_q[i].kthread)
  408. kthread_stop(ccp->cmd_q[i].kthread);
  409. sp_free_ccp_irq(ccp->sp, ccp);
  410. e_pool:
  411. for (i = 0; i < ccp->cmd_q_count; i++)
  412. dma_pool_destroy(ccp->cmd_q[i].dma_pool);
  413. return ret;
  414. }
  415. static void ccp_destroy(struct ccp_device *ccp)
  416. {
  417. struct ccp_cmd_queue *cmd_q;
  418. struct ccp_cmd *cmd;
  419. unsigned int i;
  420. /* Unregister the DMA engine */
  421. ccp_dmaengine_unregister(ccp);
  422. /* Unregister the RNG */
  423. ccp_unregister_rng(ccp);
  424. /* Remove this device from the list of available units */
  425. ccp_del_device(ccp);
  426. /* Disable and clear interrupts */
  427. ccp_disable_queue_interrupts(ccp);
  428. for (i = 0; i < ccp->cmd_q_count; i++) {
  429. cmd_q = &ccp->cmd_q[i];
  430. ioread32(cmd_q->reg_int_status);
  431. ioread32(cmd_q->reg_status);
  432. }
  433. iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
  434. /* Stop the queue kthreads */
  435. for (i = 0; i < ccp->cmd_q_count; i++)
  436. if (ccp->cmd_q[i].kthread)
  437. kthread_stop(ccp->cmd_q[i].kthread);
  438. sp_free_ccp_irq(ccp->sp, ccp);
  439. for (i = 0; i < ccp->cmd_q_count; i++)
  440. dma_pool_destroy(ccp->cmd_q[i].dma_pool);
  441. /* Flush the cmd and backlog queue */
  442. while (!list_empty(&ccp->cmd)) {
  443. /* Invoke the callback directly with an error code */
  444. cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
  445. list_del(&cmd->entry);
  446. cmd->callback(cmd->data, -ENODEV);
  447. }
  448. while (!list_empty(&ccp->backlog)) {
  449. /* Invoke the callback directly with an error code */
  450. cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
  451. list_del(&cmd->entry);
  452. cmd->callback(cmd->data, -ENODEV);
  453. }
  454. }
  455. static const struct ccp_actions ccp3_actions = {
  456. .aes = ccp_perform_aes,
  457. .xts_aes = ccp_perform_xts_aes,
  458. .des3 = NULL,
  459. .sha = ccp_perform_sha,
  460. .rsa = ccp_perform_rsa,
  461. .passthru = ccp_perform_passthru,
  462. .ecc = ccp_perform_ecc,
  463. .sballoc = ccp_alloc_ksb,
  464. .sbfree = ccp_free_ksb,
  465. .init = ccp_init,
  466. .destroy = ccp_destroy,
  467. .get_free_slots = ccp_get_free_slots,
  468. .irqhandler = ccp_irq_handler,
  469. };
  470. const struct ccp_vdata ccpv3_platform = {
  471. .version = CCP_VERSION(3, 0),
  472. .setup = NULL,
  473. .perform = &ccp3_actions,
  474. .offset = 0,
  475. .rsamax = CCP_RSA_MAX_WIDTH,
  476. };
  477. const struct ccp_vdata ccpv3 = {
  478. .version = CCP_VERSION(3, 0),
  479. .setup = NULL,
  480. .perform = &ccp3_actions,
  481. .offset = 0x20000,
  482. .rsamax = CCP_RSA_MAX_WIDTH,
  483. };