cesa.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. /*
  2. * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
  3. * that can be found on the following platform: Orion, Kirkwood, Armada. This
  4. * driver supports the TDMA engine on platforms on which it is available.
  5. *
  6. * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
  7. * Author: Arnaud Ebalard <arno@natisbad.org>
  8. *
  9. * This work is based on an initial version written by
  10. * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License version 2 as published
  14. * by the Free Software Foundation.
  15. */
  16. #include <linux/delay.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/genalloc.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/io.h>
  21. #include <linux/kthread.h>
  22. #include <linux/mbus.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/slab.h>
  26. #include <linux/module.h>
  27. #include <linux/clk.h>
  28. #include <linux/of.h>
  29. #include <linux/of_platform.h>
  30. #include <linux/of_irq.h>
  31. #include "cesa.h"
  32. /* Limit of the crypto queue before reaching the backlog */
  33. #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
  34. struct mv_cesa_dev *cesa_dev;
  35. struct crypto_async_request *
  36. mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
  37. struct crypto_async_request **backlog)
  38. {
  39. struct crypto_async_request *req;
  40. *backlog = crypto_get_backlog(&engine->queue);
  41. req = crypto_dequeue_request(&engine->queue);
  42. if (!req)
  43. return NULL;
  44. return req;
  45. }
  46. static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
  47. {
  48. struct crypto_async_request *req = NULL, *backlog = NULL;
  49. struct mv_cesa_ctx *ctx;
  50. spin_lock_bh(&engine->lock);
  51. if (!engine->req) {
  52. req = mv_cesa_dequeue_req_locked(engine, &backlog);
  53. engine->req = req;
  54. }
  55. spin_unlock_bh(&engine->lock);
  56. if (!req)
  57. return;
  58. if (backlog)
  59. backlog->complete(backlog, -EINPROGRESS);
  60. ctx = crypto_tfm_ctx(req->tfm);
  61. ctx->ops->step(req);
  62. }
  63. static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
  64. {
  65. struct crypto_async_request *req;
  66. struct mv_cesa_ctx *ctx;
  67. int res;
  68. req = engine->req;
  69. ctx = crypto_tfm_ctx(req->tfm);
  70. res = ctx->ops->process(req, status);
  71. if (res == 0) {
  72. ctx->ops->complete(req);
  73. mv_cesa_engine_enqueue_complete_request(engine, req);
  74. } else if (res == -EINPROGRESS) {
  75. ctx->ops->step(req);
  76. }
  77. return res;
  78. }
  79. static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
  80. {
  81. if (engine->chain.first && engine->chain.last)
  82. return mv_cesa_tdma_process(engine, status);
  83. return mv_cesa_std_process(engine, status);
  84. }
  85. static inline void
  86. mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
  87. int res)
  88. {
  89. ctx->ops->cleanup(req);
  90. local_bh_disable();
  91. req->complete(req, res);
  92. local_bh_enable();
  93. }
  94. static irqreturn_t mv_cesa_int(int irq, void *priv)
  95. {
  96. struct mv_cesa_engine *engine = priv;
  97. struct crypto_async_request *req;
  98. struct mv_cesa_ctx *ctx;
  99. u32 status, mask;
  100. irqreturn_t ret = IRQ_NONE;
  101. while (true) {
  102. int res;
  103. mask = mv_cesa_get_int_mask(engine);
  104. status = readl(engine->regs + CESA_SA_INT_STATUS);
  105. if (!(status & mask))
  106. break;
  107. /*
  108. * TODO: avoid clearing the FPGA_INT_STATUS if this not
  109. * relevant on some platforms.
  110. */
  111. writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
  112. writel(~status, engine->regs + CESA_SA_INT_STATUS);
  113. /* Process fetched requests */
  114. res = mv_cesa_int_process(engine, status & mask);
  115. ret = IRQ_HANDLED;
  116. spin_lock_bh(&engine->lock);
  117. req = engine->req;
  118. if (res != -EINPROGRESS)
  119. engine->req = NULL;
  120. spin_unlock_bh(&engine->lock);
  121. ctx = crypto_tfm_ctx(req->tfm);
  122. if (res && res != -EINPROGRESS)
  123. mv_cesa_complete_req(ctx, req, res);
  124. /* Launch the next pending request */
  125. mv_cesa_rearm_engine(engine);
  126. /* Iterate over the complete queue */
  127. while (true) {
  128. req = mv_cesa_engine_dequeue_complete_request(engine);
  129. if (!req)
  130. break;
  131. ctx = crypto_tfm_ctx(req->tfm);
  132. mv_cesa_complete_req(ctx, req, 0);
  133. }
  134. }
  135. return ret;
  136. }
  137. int mv_cesa_queue_req(struct crypto_async_request *req,
  138. struct mv_cesa_req *creq)
  139. {
  140. int ret;
  141. struct mv_cesa_engine *engine = creq->engine;
  142. spin_lock_bh(&engine->lock);
  143. ret = crypto_enqueue_request(&engine->queue, req);
  144. if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
  145. (ret == -EINPROGRESS || ret == -EBUSY))
  146. mv_cesa_tdma_chain(engine, creq);
  147. spin_unlock_bh(&engine->lock);
  148. if (ret != -EINPROGRESS)
  149. return ret;
  150. mv_cesa_rearm_engine(engine);
  151. return -EINPROGRESS;
  152. }
  153. static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
  154. {
  155. int ret;
  156. int i, j;
  157. for (i = 0; i < cesa->caps->ncipher_algs; i++) {
  158. ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]);
  159. if (ret)
  160. goto err_unregister_crypto;
  161. }
  162. for (i = 0; i < cesa->caps->nahash_algs; i++) {
  163. ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
  164. if (ret)
  165. goto err_unregister_ahash;
  166. }
  167. return 0;
  168. err_unregister_ahash:
  169. for (j = 0; j < i; j++)
  170. crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
  171. i = cesa->caps->ncipher_algs;
  172. err_unregister_crypto:
  173. for (j = 0; j < i; j++)
  174. crypto_unregister_skcipher(cesa->caps->cipher_algs[j]);
  175. return ret;
  176. }
  177. static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
  178. {
  179. int i;
  180. for (i = 0; i < cesa->caps->nahash_algs; i++)
  181. crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
  182. for (i = 0; i < cesa->caps->ncipher_algs; i++)
  183. crypto_unregister_skcipher(cesa->caps->cipher_algs[i]);
  184. }
  185. static struct skcipher_alg *orion_cipher_algs[] = {
  186. &mv_cesa_ecb_des_alg,
  187. &mv_cesa_cbc_des_alg,
  188. &mv_cesa_ecb_des3_ede_alg,
  189. &mv_cesa_cbc_des3_ede_alg,
  190. &mv_cesa_ecb_aes_alg,
  191. &mv_cesa_cbc_aes_alg,
  192. };
  193. static struct ahash_alg *orion_ahash_algs[] = {
  194. &mv_md5_alg,
  195. &mv_sha1_alg,
  196. &mv_ahmac_md5_alg,
  197. &mv_ahmac_sha1_alg,
  198. };
  199. static struct skcipher_alg *armada_370_cipher_algs[] = {
  200. &mv_cesa_ecb_des_alg,
  201. &mv_cesa_cbc_des_alg,
  202. &mv_cesa_ecb_des3_ede_alg,
  203. &mv_cesa_cbc_des3_ede_alg,
  204. &mv_cesa_ecb_aes_alg,
  205. &mv_cesa_cbc_aes_alg,
  206. };
  207. static struct ahash_alg *armada_370_ahash_algs[] = {
  208. &mv_md5_alg,
  209. &mv_sha1_alg,
  210. &mv_sha256_alg,
  211. &mv_ahmac_md5_alg,
  212. &mv_ahmac_sha1_alg,
  213. &mv_ahmac_sha256_alg,
  214. };
  215. static const struct mv_cesa_caps orion_caps = {
  216. .nengines = 1,
  217. .cipher_algs = orion_cipher_algs,
  218. .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
  219. .ahash_algs = orion_ahash_algs,
  220. .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
  221. .has_tdma = false,
  222. };
  223. static const struct mv_cesa_caps kirkwood_caps = {
  224. .nengines = 1,
  225. .cipher_algs = orion_cipher_algs,
  226. .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
  227. .ahash_algs = orion_ahash_algs,
  228. .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
  229. .has_tdma = true,
  230. };
  231. static const struct mv_cesa_caps armada_370_caps = {
  232. .nengines = 1,
  233. .cipher_algs = armada_370_cipher_algs,
  234. .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
  235. .ahash_algs = armada_370_ahash_algs,
  236. .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
  237. .has_tdma = true,
  238. };
  239. static const struct mv_cesa_caps armada_xp_caps = {
  240. .nengines = 2,
  241. .cipher_algs = armada_370_cipher_algs,
  242. .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
  243. .ahash_algs = armada_370_ahash_algs,
  244. .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
  245. .has_tdma = true,
  246. };
  247. static const struct of_device_id mv_cesa_of_match_table[] = {
  248. { .compatible = "marvell,orion-crypto", .data = &orion_caps },
  249. { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
  250. { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
  251. { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
  252. { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
  253. { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
  254. { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
  255. {}
  256. };
  257. MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
  258. static void
  259. mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
  260. const struct mbus_dram_target_info *dram)
  261. {
  262. void __iomem *iobase = engine->regs;
  263. int i;
  264. for (i = 0; i < 4; i++) {
  265. writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
  266. writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
  267. }
  268. for (i = 0; i < dram->num_cs; i++) {
  269. const struct mbus_dram_window *cs = dram->cs + i;
  270. writel(((cs->size - 1) & 0xffff0000) |
  271. (cs->mbus_attr << 8) |
  272. (dram->mbus_dram_target_id << 4) | 1,
  273. iobase + CESA_TDMA_WINDOW_CTRL(i));
  274. writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
  275. }
  276. }
  277. static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
  278. {
  279. struct device *dev = cesa->dev;
  280. struct mv_cesa_dev_dma *dma;
  281. if (!cesa->caps->has_tdma)
  282. return 0;
  283. dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
  284. if (!dma)
  285. return -ENOMEM;
  286. dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
  287. sizeof(struct mv_cesa_tdma_desc),
  288. 16, 0);
  289. if (!dma->tdma_desc_pool)
  290. return -ENOMEM;
  291. dma->op_pool = dmam_pool_create("cesa_op", dev,
  292. sizeof(struct mv_cesa_op_ctx), 16, 0);
  293. if (!dma->op_pool)
  294. return -ENOMEM;
  295. dma->cache_pool = dmam_pool_create("cesa_cache", dev,
  296. CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
  297. if (!dma->cache_pool)
  298. return -ENOMEM;
  299. dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
  300. if (!dma->padding_pool)
  301. return -ENOMEM;
  302. cesa->dma = dma;
  303. return 0;
  304. }
  305. static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
  306. {
  307. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  308. struct mv_cesa_engine *engine = &cesa->engines[idx];
  309. const char *res_name = "sram";
  310. struct resource *res;
  311. engine->pool = of_gen_pool_get(cesa->dev->of_node,
  312. "marvell,crypto-srams", idx);
  313. if (engine->pool) {
  314. engine->sram = gen_pool_dma_alloc(engine->pool,
  315. cesa->sram_size,
  316. &engine->sram_dma);
  317. if (engine->sram)
  318. return 0;
  319. engine->pool = NULL;
  320. return -ENOMEM;
  321. }
  322. if (cesa->caps->nengines > 1) {
  323. if (!idx)
  324. res_name = "sram0";
  325. else
  326. res_name = "sram1";
  327. }
  328. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  329. res_name);
  330. if (!res || resource_size(res) < cesa->sram_size)
  331. return -EINVAL;
  332. engine->sram = devm_ioremap_resource(cesa->dev, res);
  333. if (IS_ERR(engine->sram))
  334. return PTR_ERR(engine->sram);
  335. engine->sram_dma = dma_map_resource(cesa->dev, res->start,
  336. cesa->sram_size,
  337. DMA_BIDIRECTIONAL, 0);
  338. if (dma_mapping_error(cesa->dev, engine->sram_dma))
  339. return -ENOMEM;
  340. return 0;
  341. }
  342. static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
  343. {
  344. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  345. struct mv_cesa_engine *engine = &cesa->engines[idx];
  346. if (engine->pool)
  347. gen_pool_free(engine->pool, (unsigned long)engine->sram,
  348. cesa->sram_size);
  349. else
  350. dma_unmap_resource(cesa->dev, engine->sram_dma,
  351. cesa->sram_size, DMA_BIDIRECTIONAL, 0);
  352. }
  353. static int mv_cesa_probe(struct platform_device *pdev)
  354. {
  355. const struct mv_cesa_caps *caps = &orion_caps;
  356. const struct mbus_dram_target_info *dram;
  357. const struct of_device_id *match;
  358. struct device *dev = &pdev->dev;
  359. struct mv_cesa_dev *cesa;
  360. struct mv_cesa_engine *engines;
  361. struct resource *res;
  362. int irq, ret, i;
  363. u32 sram_size;
  364. if (cesa_dev) {
  365. dev_err(&pdev->dev, "Only one CESA device authorized\n");
  366. return -EEXIST;
  367. }
  368. if (dev->of_node) {
  369. match = of_match_node(mv_cesa_of_match_table, dev->of_node);
  370. if (!match || !match->data)
  371. return -ENOTSUPP;
  372. caps = match->data;
  373. }
  374. cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
  375. if (!cesa)
  376. return -ENOMEM;
  377. cesa->caps = caps;
  378. cesa->dev = dev;
  379. sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
  380. of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
  381. &sram_size);
  382. if (sram_size < CESA_SA_MIN_SRAM_SIZE)
  383. sram_size = CESA_SA_MIN_SRAM_SIZE;
  384. cesa->sram_size = sram_size;
  385. cesa->engines = devm_kcalloc(dev, caps->nengines, sizeof(*engines),
  386. GFP_KERNEL);
  387. if (!cesa->engines)
  388. return -ENOMEM;
  389. spin_lock_init(&cesa->lock);
  390. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
  391. cesa->regs = devm_ioremap_resource(dev, res);
  392. if (IS_ERR(cesa->regs))
  393. return PTR_ERR(cesa->regs);
  394. ret = mv_cesa_dev_dma_init(cesa);
  395. if (ret)
  396. return ret;
  397. dram = mv_mbus_dram_info_nooverlap();
  398. platform_set_drvdata(pdev, cesa);
  399. for (i = 0; i < caps->nengines; i++) {
  400. struct mv_cesa_engine *engine = &cesa->engines[i];
  401. char res_name[7];
  402. engine->id = i;
  403. spin_lock_init(&engine->lock);
  404. ret = mv_cesa_get_sram(pdev, i);
  405. if (ret)
  406. goto err_cleanup;
  407. irq = platform_get_irq(pdev, i);
  408. if (irq < 0) {
  409. ret = irq;
  410. goto err_cleanup;
  411. }
  412. /*
  413. * Not all platforms can gate the CESA clocks: do not complain
  414. * if the clock does not exist.
  415. */
  416. snprintf(res_name, sizeof(res_name), "cesa%d", i);
  417. engine->clk = devm_clk_get(dev, res_name);
  418. if (IS_ERR(engine->clk)) {
  419. engine->clk = devm_clk_get(dev, NULL);
  420. if (IS_ERR(engine->clk))
  421. engine->clk = NULL;
  422. }
  423. snprintf(res_name, sizeof(res_name), "cesaz%d", i);
  424. engine->zclk = devm_clk_get(dev, res_name);
  425. if (IS_ERR(engine->zclk))
  426. engine->zclk = NULL;
  427. ret = clk_prepare_enable(engine->clk);
  428. if (ret)
  429. goto err_cleanup;
  430. ret = clk_prepare_enable(engine->zclk);
  431. if (ret)
  432. goto err_cleanup;
  433. engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
  434. if (dram && cesa->caps->has_tdma)
  435. mv_cesa_conf_mbus_windows(engine, dram);
  436. writel(0, engine->regs + CESA_SA_INT_STATUS);
  437. writel(CESA_SA_CFG_STOP_DIG_ERR,
  438. engine->regs + CESA_SA_CFG);
  439. writel(engine->sram_dma & CESA_SA_SRAM_MSK,
  440. engine->regs + CESA_SA_DESC_P0);
  441. ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
  442. IRQF_ONESHOT,
  443. dev_name(&pdev->dev),
  444. engine);
  445. if (ret)
  446. goto err_cleanup;
  447. crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
  448. atomic_set(&engine->load, 0);
  449. INIT_LIST_HEAD(&engine->complete_queue);
  450. }
  451. cesa_dev = cesa;
  452. ret = mv_cesa_add_algs(cesa);
  453. if (ret) {
  454. cesa_dev = NULL;
  455. goto err_cleanup;
  456. }
  457. dev_info(dev, "CESA device successfully registered\n");
  458. return 0;
  459. err_cleanup:
  460. for (i = 0; i < caps->nengines; i++) {
  461. clk_disable_unprepare(cesa->engines[i].zclk);
  462. clk_disable_unprepare(cesa->engines[i].clk);
  463. mv_cesa_put_sram(pdev, i);
  464. }
  465. return ret;
  466. }
  467. static int mv_cesa_remove(struct platform_device *pdev)
  468. {
  469. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  470. int i;
  471. mv_cesa_remove_algs(cesa);
  472. for (i = 0; i < cesa->caps->nengines; i++) {
  473. clk_disable_unprepare(cesa->engines[i].zclk);
  474. clk_disable_unprepare(cesa->engines[i].clk);
  475. mv_cesa_put_sram(pdev, i);
  476. }
  477. return 0;
  478. }
  479. static const struct platform_device_id mv_cesa_plat_id_table[] = {
  480. { .name = "mv_crypto" },
  481. { /* sentinel */ },
  482. };
  483. MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
  484. static struct platform_driver marvell_cesa = {
  485. .probe = mv_cesa_probe,
  486. .remove = mv_cesa_remove,
  487. .id_table = mv_cesa_plat_id_table,
  488. .driver = {
  489. .name = "marvell-cesa",
  490. .of_match_table = mv_cesa_of_match_table,
  491. },
  492. };
  493. module_platform_driver(marvell_cesa);
  494. MODULE_ALIAS("platform:mv_crypto");
  495. MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
  496. MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
  497. MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
  498. MODULE_LICENSE("GPL v2");