omap-aes.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Cryptographic API.
  4. *
  5. * Support for OMAP AES HW acceleration.
  6. *
  7. * Copyright (c) 2010 Nokia Corporation
  8. * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
  9. * Copyright (c) 2011 Texas Instruments Incorporated
  10. */
  11. #define pr_fmt(fmt) "%20s: " fmt, __func__
  12. #define prn(num) pr_debug(#num "=%d\n", num)
  13. #define prx(num) pr_debug(#num "=%x\n", num)
  14. #include <linux/err.h>
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/errno.h>
  18. #include <linux/kernel.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/of.h>
  25. #include <linux/of_device.h>
  26. #include <linux/of_address.h>
  27. #include <linux/io.h>
  28. #include <linux/crypto.h>
  29. #include <linux/interrupt.h>
  30. #include <crypto/scatterwalk.h>
  31. #include <crypto/aes.h>
  32. #include <crypto/gcm.h>
  33. #include <crypto/engine.h>
  34. #include <crypto/internal/skcipher.h>
  35. #include <crypto/internal/aead.h>
  36. #include "omap-crypto.h"
  37. #include "omap-aes.h"
  38. /* keep registered devices data here */
  39. static LIST_HEAD(dev_list);
  40. static DEFINE_SPINLOCK(list_lock);
  41. static int aes_fallback_sz = 200;
  42. #ifdef DEBUG
  43. #define omap_aes_read(dd, offset) \
  44. ({ \
  45. int _read_ret; \
  46. _read_ret = __raw_readl(dd->io_base + offset); \
  47. pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \
  48. offset, _read_ret); \
  49. _read_ret; \
  50. })
  51. #else
  52. inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
  53. {
  54. return __raw_readl(dd->io_base + offset);
  55. }
  56. #endif
  57. #ifdef DEBUG
  58. #define omap_aes_write(dd, offset, value) \
  59. do { \
  60. pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \
  61. offset, value); \
  62. __raw_writel(value, dd->io_base + offset); \
  63. } while (0)
  64. #else
  65. inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
  66. u32 value)
  67. {
  68. __raw_writel(value, dd->io_base + offset);
  69. }
  70. #endif
  71. static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
  72. u32 value, u32 mask)
  73. {
  74. u32 val;
  75. val = omap_aes_read(dd, offset);
  76. val &= ~mask;
  77. val |= value;
  78. omap_aes_write(dd, offset, val);
  79. }
  80. static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
  81. u32 *value, int count)
  82. {
  83. for (; count--; value++, offset += 4)
  84. omap_aes_write(dd, offset, *value);
  85. }
  86. static int omap_aes_hw_init(struct omap_aes_dev *dd)
  87. {
  88. int err;
  89. if (!(dd->flags & FLAGS_INIT)) {
  90. dd->flags |= FLAGS_INIT;
  91. dd->err = 0;
  92. }
  93. err = pm_runtime_resume_and_get(dd->dev);
  94. if (err < 0) {
  95. dev_err(dd->dev, "failed to get sync: %d\n", err);
  96. return err;
  97. }
  98. return 0;
  99. }
  100. void omap_aes_clear_copy_flags(struct omap_aes_dev *dd)
  101. {
  102. dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_IN_DATA_ST_SHIFT);
  103. dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_OUT_DATA_ST_SHIFT);
  104. dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_ASSOC_DATA_ST_SHIFT);
  105. }
  106. int omap_aes_write_ctrl(struct omap_aes_dev *dd)
  107. {
  108. struct omap_aes_reqctx *rctx;
  109. unsigned int key32;
  110. int i, err;
  111. u32 val;
  112. err = omap_aes_hw_init(dd);
  113. if (err)
  114. return err;
  115. key32 = dd->ctx->keylen / sizeof(u32);
  116. /* RESET the key as previous HASH keys should not get affected*/
  117. if (dd->flags & FLAGS_GCM)
  118. for (i = 0; i < 0x40; i = i + 4)
  119. omap_aes_write(dd, i, 0x0);
  120. for (i = 0; i < key32; i++) {
  121. omap_aes_write(dd, AES_REG_KEY(dd, i),
  122. __le32_to_cpu(dd->ctx->key[i]));
  123. }
  124. if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
  125. omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
  126. if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) {
  127. rctx = aead_request_ctx(dd->aead_req);
  128. omap_aes_write_n(dd, AES_REG_IV(dd, 0), (u32 *)rctx->iv, 4);
  129. }
  130. val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
  131. if (dd->flags & FLAGS_CBC)
  132. val |= AES_REG_CTRL_CBC;
  133. if (dd->flags & (FLAGS_CTR | FLAGS_GCM))
  134. val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
  135. if (dd->flags & FLAGS_GCM)
  136. val |= AES_REG_CTRL_GCM;
  137. if (dd->flags & FLAGS_ENCRYPT)
  138. val |= AES_REG_CTRL_DIRECTION;
  139. omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
  140. return 0;
  141. }
  142. static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
  143. {
  144. u32 mask, val;
  145. val = dd->pdata->dma_start;
  146. if (dd->dma_lch_out != NULL)
  147. val |= dd->pdata->dma_enable_out;
  148. if (dd->dma_lch_in != NULL)
  149. val |= dd->pdata->dma_enable_in;
  150. mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
  151. dd->pdata->dma_start;
  152. omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
  153. }
  154. static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
  155. {
  156. omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
  157. omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
  158. if (dd->flags & FLAGS_GCM)
  159. omap_aes_write(dd, AES_REG_A_LEN, dd->assoc_len);
  160. omap_aes_dma_trigger_omap2(dd, length);
  161. }
  162. static void omap_aes_dma_stop(struct omap_aes_dev *dd)
  163. {
  164. u32 mask;
  165. mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
  166. dd->pdata->dma_start;
  167. omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
  168. }
  169. struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_reqctx *rctx)
  170. {
  171. struct omap_aes_dev *dd;
  172. spin_lock_bh(&list_lock);
  173. dd = list_first_entry(&dev_list, struct omap_aes_dev, list);
  174. list_move_tail(&dd->list, &dev_list);
  175. rctx->dd = dd;
  176. spin_unlock_bh(&list_lock);
  177. return dd;
  178. }
  179. static void omap_aes_dma_out_callback(void *data)
  180. {
  181. struct omap_aes_dev *dd = data;
  182. /* dma_lch_out - completed */
  183. tasklet_schedule(&dd->done_task);
  184. }
  185. static int omap_aes_dma_init(struct omap_aes_dev *dd)
  186. {
  187. int err;
  188. dd->dma_lch_out = NULL;
  189. dd->dma_lch_in = NULL;
  190. dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
  191. if (IS_ERR(dd->dma_lch_in)) {
  192. dev_err(dd->dev, "Unable to request in DMA channel\n");
  193. return PTR_ERR(dd->dma_lch_in);
  194. }
  195. dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
  196. if (IS_ERR(dd->dma_lch_out)) {
  197. dev_err(dd->dev, "Unable to request out DMA channel\n");
  198. err = PTR_ERR(dd->dma_lch_out);
  199. goto err_dma_out;
  200. }
  201. return 0;
  202. err_dma_out:
  203. dma_release_channel(dd->dma_lch_in);
  204. return err;
  205. }
  206. static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
  207. {
  208. if (dd->pio_only)
  209. return;
  210. dma_release_channel(dd->dma_lch_out);
  211. dma_release_channel(dd->dma_lch_in);
  212. }
  213. static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
  214. struct scatterlist *in_sg,
  215. struct scatterlist *out_sg,
  216. int in_sg_len, int out_sg_len)
  217. {
  218. struct dma_async_tx_descriptor *tx_in, *tx_out;
  219. struct dma_slave_config cfg;
  220. int ret;
  221. if (dd->pio_only) {
  222. scatterwalk_start(&dd->in_walk, dd->in_sg);
  223. scatterwalk_start(&dd->out_walk, dd->out_sg);
  224. /* Enable DATAIN interrupt and let it take
  225. care of the rest */
  226. omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
  227. return 0;
  228. }
  229. dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
  230. memset(&cfg, 0, sizeof(cfg));
  231. cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
  232. cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
  233. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  234. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  235. cfg.src_maxburst = DST_MAXBURST;
  236. cfg.dst_maxburst = DST_MAXBURST;
  237. /* IN */
  238. ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
  239. if (ret) {
  240. dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
  241. ret);
  242. return ret;
  243. }
  244. tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
  245. DMA_MEM_TO_DEV,
  246. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  247. if (!tx_in) {
  248. dev_err(dd->dev, "IN prep_slave_sg() failed\n");
  249. return -EINVAL;
  250. }
  251. /* No callback necessary */
  252. tx_in->callback_param = dd;
  253. /* OUT */
  254. ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
  255. if (ret) {
  256. dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
  257. ret);
  258. return ret;
  259. }
  260. tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
  261. DMA_DEV_TO_MEM,
  262. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  263. if (!tx_out) {
  264. dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
  265. return -EINVAL;
  266. }
  267. if (dd->flags & FLAGS_GCM)
  268. tx_out->callback = omap_aes_gcm_dma_out_callback;
  269. else
  270. tx_out->callback = omap_aes_dma_out_callback;
  271. tx_out->callback_param = dd;
  272. dmaengine_submit(tx_in);
  273. dmaengine_submit(tx_out);
  274. dma_async_issue_pending(dd->dma_lch_in);
  275. dma_async_issue_pending(dd->dma_lch_out);
  276. /* start DMA */
  277. dd->pdata->trigger(dd, dd->total);
  278. return 0;
  279. }
  280. int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
  281. {
  282. int err;
  283. pr_debug("total: %d\n", dd->total);
  284. if (!dd->pio_only) {
  285. err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
  286. DMA_TO_DEVICE);
  287. if (!err) {
  288. dev_err(dd->dev, "dma_map_sg() error\n");
  289. return -EINVAL;
  290. }
  291. err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
  292. DMA_FROM_DEVICE);
  293. if (!err) {
  294. dev_err(dd->dev, "dma_map_sg() error\n");
  295. return -EINVAL;
  296. }
  297. }
  298. err = omap_aes_crypt_dma(dd, dd->in_sg, dd->out_sg, dd->in_sg_len,
  299. dd->out_sg_len);
  300. if (err && !dd->pio_only) {
  301. dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
  302. dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
  303. DMA_FROM_DEVICE);
  304. }
  305. return err;
  306. }
  307. static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
  308. {
  309. struct ablkcipher_request *req = dd->req;
  310. pr_debug("err: %d\n", err);
  311. crypto_finalize_ablkcipher_request(dd->engine, req, err);
  312. pm_runtime_mark_last_busy(dd->dev);
  313. pm_runtime_put_autosuspend(dd->dev);
  314. }
  315. int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
  316. {
  317. pr_debug("total: %d\n", dd->total);
  318. omap_aes_dma_stop(dd);
  319. return 0;
  320. }
  321. static int omap_aes_handle_queue(struct omap_aes_dev *dd,
  322. struct ablkcipher_request *req)
  323. {
  324. if (req)
  325. return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
  326. return 0;
  327. }
  328. static int omap_aes_prepare_req(struct crypto_engine *engine,
  329. void *areq)
  330. {
  331. struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
  332. struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
  333. crypto_ablkcipher_reqtfm(req));
  334. struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  335. struct omap_aes_dev *dd = rctx->dd;
  336. int ret;
  337. u16 flags;
  338. if (!dd)
  339. return -ENODEV;
  340. /* assign new request to device */
  341. dd->req = req;
  342. dd->total = req->nbytes;
  343. dd->total_save = req->nbytes;
  344. dd->in_sg = req->src;
  345. dd->out_sg = req->dst;
  346. dd->orig_out = req->dst;
  347. flags = OMAP_CRYPTO_COPY_DATA;
  348. if (req->src == req->dst)
  349. flags |= OMAP_CRYPTO_FORCE_COPY;
  350. ret = omap_crypto_align_sg(&dd->in_sg, dd->total, AES_BLOCK_SIZE,
  351. dd->in_sgl, flags,
  352. FLAGS_IN_DATA_ST_SHIFT, &dd->flags);
  353. if (ret)
  354. return ret;
  355. ret = omap_crypto_align_sg(&dd->out_sg, dd->total, AES_BLOCK_SIZE,
  356. &dd->out_sgl, 0,
  357. FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
  358. if (ret)
  359. return ret;
  360. dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
  361. if (dd->in_sg_len < 0)
  362. return dd->in_sg_len;
  363. dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
  364. if (dd->out_sg_len < 0)
  365. return dd->out_sg_len;
  366. rctx->mode &= FLAGS_MODE_MASK;
  367. dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
  368. dd->ctx = ctx;
  369. rctx->dd = dd;
  370. return omap_aes_write_ctrl(dd);
  371. }
  372. static int omap_aes_crypt_req(struct crypto_engine *engine,
  373. void *areq)
  374. {
  375. struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
  376. struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  377. struct omap_aes_dev *dd = rctx->dd;
  378. if (!dd)
  379. return -ENODEV;
  380. return omap_aes_crypt_dma_start(dd);
  381. }
  382. static void omap_aes_done_task(unsigned long data)
  383. {
  384. struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
  385. pr_debug("enter done_task\n");
  386. if (!dd->pio_only) {
  387. dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
  388. DMA_FROM_DEVICE);
  389. dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
  390. dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
  391. DMA_FROM_DEVICE);
  392. omap_aes_crypt_dma_stop(dd);
  393. }
  394. omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save,
  395. FLAGS_IN_DATA_ST_SHIFT, dd->flags);
  396. omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
  397. FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
  398. omap_aes_finish_req(dd, 0);
  399. pr_debug("exit\n");
  400. }
  401. static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
  402. {
  403. struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
  404. crypto_ablkcipher_reqtfm(req));
  405. struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  406. struct omap_aes_dev *dd;
  407. int ret;
  408. pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
  409. !!(mode & FLAGS_ENCRYPT),
  410. !!(mode & FLAGS_CBC));
  411. if (req->nbytes < aes_fallback_sz) {
  412. SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
  413. skcipher_request_set_sync_tfm(subreq, ctx->fallback);
  414. skcipher_request_set_callback(subreq, req->base.flags, NULL,
  415. NULL);
  416. skcipher_request_set_crypt(subreq, req->src, req->dst,
  417. req->nbytes, req->info);
  418. if (mode & FLAGS_ENCRYPT)
  419. ret = crypto_skcipher_encrypt(subreq);
  420. else
  421. ret = crypto_skcipher_decrypt(subreq);
  422. skcipher_request_zero(subreq);
  423. return ret;
  424. }
  425. dd = omap_aes_find_dev(rctx);
  426. if (!dd)
  427. return -ENODEV;
  428. rctx->mode = mode;
  429. return omap_aes_handle_queue(dd, req);
  430. }
  431. /* ********************** ALG API ************************************ */
  432. static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  433. unsigned int keylen)
  434. {
  435. struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  436. int ret;
  437. if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  438. keylen != AES_KEYSIZE_256)
  439. return -EINVAL;
  440. pr_debug("enter, keylen: %d\n", keylen);
  441. memcpy(ctx->key, key, keylen);
  442. ctx->keylen = keylen;
  443. crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
  444. crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
  445. CRYPTO_TFM_REQ_MASK);
  446. ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
  447. if (!ret)
  448. return 0;
  449. return 0;
  450. }
  451. static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
  452. {
  453. return omap_aes_crypt(req, FLAGS_ENCRYPT);
  454. }
  455. static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
  456. {
  457. return omap_aes_crypt(req, 0);
  458. }
  459. static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
  460. {
  461. return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
  462. }
  463. static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
  464. {
  465. return omap_aes_crypt(req, FLAGS_CBC);
  466. }
  467. static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
  468. {
  469. return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
  470. }
  471. static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
  472. {
  473. return omap_aes_crypt(req, FLAGS_CTR);
  474. }
  475. static int omap_aes_prepare_req(struct crypto_engine *engine,
  476. void *req);
  477. static int omap_aes_crypt_req(struct crypto_engine *engine,
  478. void *req);
  479. static int omap_aes_cra_init(struct crypto_tfm *tfm)
  480. {
  481. const char *name = crypto_tfm_alg_name(tfm);
  482. struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  483. struct crypto_sync_skcipher *blk;
  484. blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
  485. if (IS_ERR(blk))
  486. return PTR_ERR(blk);
  487. ctx->fallback = blk;
  488. tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
  489. ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
  490. ctx->enginectx.op.unprepare_request = NULL;
  491. ctx->enginectx.op.do_one_request = omap_aes_crypt_req;
  492. return 0;
  493. }
  494. static int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
  495. {
  496. struct omap_aes_dev *dd = NULL;
  497. struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
  498. int err;
  499. /* Find AES device, currently picks the first device */
  500. spin_lock_bh(&list_lock);
  501. list_for_each_entry(dd, &dev_list, list) {
  502. break;
  503. }
  504. spin_unlock_bh(&list_lock);
  505. err = pm_runtime_get_sync(dd->dev);
  506. if (err < 0) {
  507. dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
  508. __func__, err);
  509. return err;
  510. }
  511. tfm->reqsize = sizeof(struct omap_aes_reqctx);
  512. ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0);
  513. if (IS_ERR(ctx->ctr)) {
  514. pr_warn("could not load aes driver for encrypting IV\n");
  515. return PTR_ERR(ctx->ctr);
  516. }
  517. return 0;
  518. }
  519. static void omap_aes_cra_exit(struct crypto_tfm *tfm)
  520. {
  521. struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  522. if (ctx->fallback)
  523. crypto_free_sync_skcipher(ctx->fallback);
  524. ctx->fallback = NULL;
  525. }
  526. static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
  527. {
  528. struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
  529. omap_aes_cra_exit(crypto_aead_tfm(tfm));
  530. if (ctx->ctr)
  531. crypto_free_skcipher(ctx->ctr);
  532. }
  533. /* ********************** ALGS ************************************ */
  534. static struct crypto_alg algs_ecb_cbc[] = {
  535. {
  536. .cra_name = "ecb(aes)",
  537. .cra_driver_name = "ecb-aes-omap",
  538. .cra_priority = 300,
  539. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  540. CRYPTO_ALG_KERN_DRIVER_ONLY |
  541. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  542. .cra_blocksize = AES_BLOCK_SIZE,
  543. .cra_ctxsize = sizeof(struct omap_aes_ctx),
  544. .cra_alignmask = 0,
  545. .cra_type = &crypto_ablkcipher_type,
  546. .cra_module = THIS_MODULE,
  547. .cra_init = omap_aes_cra_init,
  548. .cra_exit = omap_aes_cra_exit,
  549. .cra_u.ablkcipher = {
  550. .min_keysize = AES_MIN_KEY_SIZE,
  551. .max_keysize = AES_MAX_KEY_SIZE,
  552. .setkey = omap_aes_setkey,
  553. .encrypt = omap_aes_ecb_encrypt,
  554. .decrypt = omap_aes_ecb_decrypt,
  555. }
  556. },
  557. {
  558. .cra_name = "cbc(aes)",
  559. .cra_driver_name = "cbc-aes-omap",
  560. .cra_priority = 300,
  561. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  562. CRYPTO_ALG_KERN_DRIVER_ONLY |
  563. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  564. .cra_blocksize = AES_BLOCK_SIZE,
  565. .cra_ctxsize = sizeof(struct omap_aes_ctx),
  566. .cra_alignmask = 0,
  567. .cra_type = &crypto_ablkcipher_type,
  568. .cra_module = THIS_MODULE,
  569. .cra_init = omap_aes_cra_init,
  570. .cra_exit = omap_aes_cra_exit,
  571. .cra_u.ablkcipher = {
  572. .min_keysize = AES_MIN_KEY_SIZE,
  573. .max_keysize = AES_MAX_KEY_SIZE,
  574. .ivsize = AES_BLOCK_SIZE,
  575. .setkey = omap_aes_setkey,
  576. .encrypt = omap_aes_cbc_encrypt,
  577. .decrypt = omap_aes_cbc_decrypt,
  578. }
  579. }
  580. };
  581. static struct crypto_alg algs_ctr[] = {
  582. {
  583. .cra_name = "ctr(aes)",
  584. .cra_driver_name = "ctr-aes-omap",
  585. .cra_priority = 300,
  586. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  587. CRYPTO_ALG_KERN_DRIVER_ONLY |
  588. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  589. .cra_blocksize = AES_BLOCK_SIZE,
  590. .cra_ctxsize = sizeof(struct omap_aes_ctx),
  591. .cra_alignmask = 0,
  592. .cra_type = &crypto_ablkcipher_type,
  593. .cra_module = THIS_MODULE,
  594. .cra_init = omap_aes_cra_init,
  595. .cra_exit = omap_aes_cra_exit,
  596. .cra_u.ablkcipher = {
  597. .min_keysize = AES_MIN_KEY_SIZE,
  598. .max_keysize = AES_MAX_KEY_SIZE,
  599. .ivsize = AES_BLOCK_SIZE,
  600. .setkey = omap_aes_setkey,
  601. .encrypt = omap_aes_ctr_encrypt,
  602. .decrypt = omap_aes_ctr_decrypt,
  603. }
  604. } ,
  605. };
  606. static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
  607. {
  608. .algs_list = algs_ecb_cbc,
  609. .size = ARRAY_SIZE(algs_ecb_cbc),
  610. },
  611. };
  612. static struct aead_alg algs_aead_gcm[] = {
  613. {
  614. .base = {
  615. .cra_name = "gcm(aes)",
  616. .cra_driver_name = "gcm-aes-omap",
  617. .cra_priority = 300,
  618. .cra_flags = CRYPTO_ALG_ASYNC |
  619. CRYPTO_ALG_KERN_DRIVER_ONLY,
  620. .cra_blocksize = 1,
  621. .cra_ctxsize = sizeof(struct omap_aes_ctx),
  622. .cra_alignmask = 0xf,
  623. .cra_module = THIS_MODULE,
  624. },
  625. .init = omap_aes_gcm_cra_init,
  626. .exit = omap_aes_gcm_cra_exit,
  627. .ivsize = GCM_AES_IV_SIZE,
  628. .maxauthsize = AES_BLOCK_SIZE,
  629. .setkey = omap_aes_gcm_setkey,
  630. .encrypt = omap_aes_gcm_encrypt,
  631. .decrypt = omap_aes_gcm_decrypt,
  632. },
  633. {
  634. .base = {
  635. .cra_name = "rfc4106(gcm(aes))",
  636. .cra_driver_name = "rfc4106-gcm-aes-omap",
  637. .cra_priority = 300,
  638. .cra_flags = CRYPTO_ALG_ASYNC |
  639. CRYPTO_ALG_KERN_DRIVER_ONLY,
  640. .cra_blocksize = 1,
  641. .cra_ctxsize = sizeof(struct omap_aes_ctx),
  642. .cra_alignmask = 0xf,
  643. .cra_module = THIS_MODULE,
  644. },
  645. .init = omap_aes_gcm_cra_init,
  646. .exit = omap_aes_gcm_cra_exit,
  647. .maxauthsize = AES_BLOCK_SIZE,
  648. .ivsize = GCM_RFC4106_IV_SIZE,
  649. .setkey = omap_aes_4106gcm_setkey,
  650. .encrypt = omap_aes_4106gcm_encrypt,
  651. .decrypt = omap_aes_4106gcm_decrypt,
  652. },
  653. };
  654. static struct omap_aes_aead_algs omap_aes_aead_info = {
  655. .algs_list = algs_aead_gcm,
  656. .size = ARRAY_SIZE(algs_aead_gcm),
  657. };
  658. static const struct omap_aes_pdata omap_aes_pdata_omap2 = {
  659. .algs_info = omap_aes_algs_info_ecb_cbc,
  660. .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc),
  661. .trigger = omap_aes_dma_trigger_omap2,
  662. .key_ofs = 0x1c,
  663. .iv_ofs = 0x20,
  664. .ctrl_ofs = 0x30,
  665. .data_ofs = 0x34,
  666. .rev_ofs = 0x44,
  667. .mask_ofs = 0x48,
  668. .dma_enable_in = BIT(2),
  669. .dma_enable_out = BIT(3),
  670. .dma_start = BIT(5),
  671. .major_mask = 0xf0,
  672. .major_shift = 4,
  673. .minor_mask = 0x0f,
  674. .minor_shift = 0,
  675. };
  676. #ifdef CONFIG_OF
  677. static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = {
  678. {
  679. .algs_list = algs_ecb_cbc,
  680. .size = ARRAY_SIZE(algs_ecb_cbc),
  681. },
  682. {
  683. .algs_list = algs_ctr,
  684. .size = ARRAY_SIZE(algs_ctr),
  685. },
  686. };
  687. static const struct omap_aes_pdata omap_aes_pdata_omap3 = {
  688. .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
  689. .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
  690. .trigger = omap_aes_dma_trigger_omap2,
  691. .key_ofs = 0x1c,
  692. .iv_ofs = 0x20,
  693. .ctrl_ofs = 0x30,
  694. .data_ofs = 0x34,
  695. .rev_ofs = 0x44,
  696. .mask_ofs = 0x48,
  697. .dma_enable_in = BIT(2),
  698. .dma_enable_out = BIT(3),
  699. .dma_start = BIT(5),
  700. .major_mask = 0xf0,
  701. .major_shift = 4,
  702. .minor_mask = 0x0f,
  703. .minor_shift = 0,
  704. };
  705. static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
  706. .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
  707. .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
  708. .aead_algs_info = &omap_aes_aead_info,
  709. .trigger = omap_aes_dma_trigger_omap4,
  710. .key_ofs = 0x3c,
  711. .iv_ofs = 0x40,
  712. .ctrl_ofs = 0x50,
  713. .data_ofs = 0x60,
  714. .rev_ofs = 0x80,
  715. .mask_ofs = 0x84,
  716. .irq_status_ofs = 0x8c,
  717. .irq_enable_ofs = 0x90,
  718. .dma_enable_in = BIT(5),
  719. .dma_enable_out = BIT(6),
  720. .major_mask = 0x0700,
  721. .major_shift = 8,
  722. .minor_mask = 0x003f,
  723. .minor_shift = 0,
  724. };
  725. static irqreturn_t omap_aes_irq(int irq, void *dev_id)
  726. {
  727. struct omap_aes_dev *dd = dev_id;
  728. u32 status, i;
  729. u32 *src, *dst;
  730. status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
  731. if (status & AES_REG_IRQ_DATA_IN) {
  732. omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
  733. BUG_ON(!dd->in_sg);
  734. BUG_ON(_calc_walked(in) > dd->in_sg->length);
  735. src = sg_virt(dd->in_sg) + _calc_walked(in);
  736. for (i = 0; i < AES_BLOCK_WORDS; i++) {
  737. omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
  738. scatterwalk_advance(&dd->in_walk, 4);
  739. if (dd->in_sg->length == _calc_walked(in)) {
  740. dd->in_sg = sg_next(dd->in_sg);
  741. if (dd->in_sg) {
  742. scatterwalk_start(&dd->in_walk,
  743. dd->in_sg);
  744. src = sg_virt(dd->in_sg) +
  745. _calc_walked(in);
  746. }
  747. } else {
  748. src++;
  749. }
  750. }
  751. /* Clear IRQ status */
  752. status &= ~AES_REG_IRQ_DATA_IN;
  753. omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
  754. /* Enable DATA_OUT interrupt */
  755. omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
  756. } else if (status & AES_REG_IRQ_DATA_OUT) {
  757. omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
  758. BUG_ON(!dd->out_sg);
  759. BUG_ON(_calc_walked(out) > dd->out_sg->length);
  760. dst = sg_virt(dd->out_sg) + _calc_walked(out);
  761. for (i = 0; i < AES_BLOCK_WORDS; i++) {
  762. *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
  763. scatterwalk_advance(&dd->out_walk, 4);
  764. if (dd->out_sg->length == _calc_walked(out)) {
  765. dd->out_sg = sg_next(dd->out_sg);
  766. if (dd->out_sg) {
  767. scatterwalk_start(&dd->out_walk,
  768. dd->out_sg);
  769. dst = sg_virt(dd->out_sg) +
  770. _calc_walked(out);
  771. }
  772. } else {
  773. dst++;
  774. }
  775. }
  776. dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
  777. /* Clear IRQ status */
  778. status &= ~AES_REG_IRQ_DATA_OUT;
  779. omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
  780. if (!dd->total)
  781. /* All bytes read! */
  782. tasklet_schedule(&dd->done_task);
  783. else
  784. /* Enable DATA_IN interrupt for next block */
  785. omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
  786. }
  787. return IRQ_HANDLED;
  788. }
  789. static const struct of_device_id omap_aes_of_match[] = {
  790. {
  791. .compatible = "ti,omap2-aes",
  792. .data = &omap_aes_pdata_omap2,
  793. },
  794. {
  795. .compatible = "ti,omap3-aes",
  796. .data = &omap_aes_pdata_omap3,
  797. },
  798. {
  799. .compatible = "ti,omap4-aes",
  800. .data = &omap_aes_pdata_omap4,
  801. },
  802. {},
  803. };
  804. MODULE_DEVICE_TABLE(of, omap_aes_of_match);
  805. static int omap_aes_get_res_of(struct omap_aes_dev *dd,
  806. struct device *dev, struct resource *res)
  807. {
  808. struct device_node *node = dev->of_node;
  809. int err = 0;
  810. dd->pdata = of_device_get_match_data(dev);
  811. if (!dd->pdata) {
  812. dev_err(dev, "no compatible OF match\n");
  813. err = -EINVAL;
  814. goto err;
  815. }
  816. err = of_address_to_resource(node, 0, res);
  817. if (err < 0) {
  818. dev_err(dev, "can't translate OF node address\n");
  819. err = -EINVAL;
  820. goto err;
  821. }
  822. err:
  823. return err;
  824. }
  825. #else
  826. static const struct of_device_id omap_aes_of_match[] = {
  827. {},
  828. };
  829. static int omap_aes_get_res_of(struct omap_aes_dev *dd,
  830. struct device *dev, struct resource *res)
  831. {
  832. return -EINVAL;
  833. }
  834. #endif
  835. static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
  836. struct platform_device *pdev, struct resource *res)
  837. {
  838. struct device *dev = &pdev->dev;
  839. struct resource *r;
  840. int err = 0;
  841. /* Get the base address */
  842. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  843. if (!r) {
  844. dev_err(dev, "no MEM resource info\n");
  845. err = -ENODEV;
  846. goto err;
  847. }
  848. memcpy(res, r, sizeof(*res));
  849. /* Only OMAP2/3 can be non-DT */
  850. dd->pdata = &omap_aes_pdata_omap2;
  851. err:
  852. return err;
  853. }
  854. static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
  855. char *buf)
  856. {
  857. return sprintf(buf, "%d\n", aes_fallback_sz);
  858. }
  859. static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
  860. const char *buf, size_t size)
  861. {
  862. ssize_t status;
  863. long value;
  864. status = kstrtol(buf, 0, &value);
  865. if (status)
  866. return status;
  867. /* HW accelerator only works with buffers > 9 */
  868. if (value < 9) {
  869. dev_err(dev, "minimum fallback size 9\n");
  870. return -EINVAL;
  871. }
  872. aes_fallback_sz = value;
  873. return size;
  874. }
  875. static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
  876. char *buf)
  877. {
  878. struct omap_aes_dev *dd = dev_get_drvdata(dev);
  879. return sprintf(buf, "%d\n", dd->engine->queue.max_qlen);
  880. }
  881. static ssize_t queue_len_store(struct device *dev,
  882. struct device_attribute *attr, const char *buf,
  883. size_t size)
  884. {
  885. struct omap_aes_dev *dd;
  886. ssize_t status;
  887. long value;
  888. unsigned long flags;
  889. status = kstrtol(buf, 0, &value);
  890. if (status)
  891. return status;
  892. if (value < 1)
  893. return -EINVAL;
  894. /*
  895. * Changing the queue size in fly is safe, if size becomes smaller
  896. * than current size, it will just not accept new entries until
  897. * it has shrank enough.
  898. */
  899. spin_lock_bh(&list_lock);
  900. list_for_each_entry(dd, &dev_list, list) {
  901. spin_lock_irqsave(&dd->lock, flags);
  902. dd->engine->queue.max_qlen = value;
  903. dd->aead_queue.base.max_qlen = value;
  904. spin_unlock_irqrestore(&dd->lock, flags);
  905. }
  906. spin_unlock_bh(&list_lock);
  907. return size;
  908. }
  909. static DEVICE_ATTR_RW(queue_len);
  910. static DEVICE_ATTR_RW(fallback);
  911. static struct attribute *omap_aes_attrs[] = {
  912. &dev_attr_queue_len.attr,
  913. &dev_attr_fallback.attr,
  914. NULL,
  915. };
  916. static struct attribute_group omap_aes_attr_group = {
  917. .attrs = omap_aes_attrs,
  918. };
  919. static int omap_aes_probe(struct platform_device *pdev)
  920. {
  921. struct device *dev = &pdev->dev;
  922. struct omap_aes_dev *dd;
  923. struct crypto_alg *algp;
  924. struct aead_alg *aalg;
  925. struct resource res;
  926. int err = -ENOMEM, i, j, irq = -1;
  927. u32 reg;
  928. dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
  929. if (dd == NULL) {
  930. dev_err(dev, "unable to alloc data struct.\n");
  931. goto err_data;
  932. }
  933. dd->dev = dev;
  934. platform_set_drvdata(pdev, dd);
  935. aead_init_queue(&dd->aead_queue, OMAP_AES_QUEUE_LENGTH);
  936. err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
  937. omap_aes_get_res_pdev(dd, pdev, &res);
  938. if (err)
  939. goto err_res;
  940. dd->io_base = devm_ioremap_resource(dev, &res);
  941. if (IS_ERR(dd->io_base)) {
  942. err = PTR_ERR(dd->io_base);
  943. goto err_res;
  944. }
  945. dd->phys_base = res.start;
  946. pm_runtime_use_autosuspend(dev);
  947. pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
  948. pm_runtime_enable(dev);
  949. err = pm_runtime_resume_and_get(dev);
  950. if (err < 0) {
  951. dev_err(dev, "%s: failed to get_sync(%d)\n",
  952. __func__, err);
  953. goto err_pm_disable;
  954. }
  955. omap_aes_dma_stop(dd);
  956. reg = omap_aes_read(dd, AES_REG_REV(dd));
  957. pm_runtime_put_sync(dev);
  958. dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
  959. (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
  960. (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
  961. tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
  962. err = omap_aes_dma_init(dd);
  963. if (err == -EPROBE_DEFER) {
  964. goto err_irq;
  965. } else if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
  966. dd->pio_only = 1;
  967. irq = platform_get_irq(pdev, 0);
  968. if (irq < 0) {
  969. err = irq;
  970. goto err_irq;
  971. }
  972. err = devm_request_irq(dev, irq, omap_aes_irq, 0,
  973. dev_name(dev), dd);
  974. if (err) {
  975. dev_err(dev, "Unable to grab omap-aes IRQ\n");
  976. goto err_irq;
  977. }
  978. }
  979. spin_lock_init(&dd->lock);
  980. INIT_LIST_HEAD(&dd->list);
  981. spin_lock(&list_lock);
  982. list_add_tail(&dd->list, &dev_list);
  983. spin_unlock(&list_lock);
  984. /* Initialize crypto engine */
  985. dd->engine = crypto_engine_alloc_init(dev, 1);
  986. if (!dd->engine) {
  987. err = -ENOMEM;
  988. goto err_engine;
  989. }
  990. err = crypto_engine_start(dd->engine);
  991. if (err)
  992. goto err_engine;
  993. for (i = 0; i < dd->pdata->algs_info_size; i++) {
  994. if (!dd->pdata->algs_info[i].registered) {
  995. for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
  996. algp = &dd->pdata->algs_info[i].algs_list[j];
  997. pr_debug("reg alg: %s\n", algp->cra_name);
  998. err = crypto_register_alg(algp);
  999. if (err)
  1000. goto err_algs;
  1001. dd->pdata->algs_info[i].registered++;
  1002. }
  1003. }
  1004. }
  1005. if (dd->pdata->aead_algs_info &&
  1006. !dd->pdata->aead_algs_info->registered) {
  1007. for (i = 0; i < dd->pdata->aead_algs_info->size; i++) {
  1008. aalg = &dd->pdata->aead_algs_info->algs_list[i];
  1009. algp = &aalg->base;
  1010. pr_debug("reg alg: %s\n", algp->cra_name);
  1011. err = crypto_register_aead(aalg);
  1012. if (err)
  1013. goto err_aead_algs;
  1014. dd->pdata->aead_algs_info->registered++;
  1015. }
  1016. }
  1017. err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group);
  1018. if (err) {
  1019. dev_err(dev, "could not create sysfs device attrs\n");
  1020. goto err_aead_algs;
  1021. }
  1022. return 0;
  1023. err_aead_algs:
  1024. for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
  1025. aalg = &dd->pdata->aead_algs_info->algs_list[i];
  1026. crypto_unregister_aead(aalg);
  1027. }
  1028. err_algs:
  1029. for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
  1030. for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
  1031. crypto_unregister_alg(
  1032. &dd->pdata->algs_info[i].algs_list[j]);
  1033. err_engine:
  1034. if (dd->engine)
  1035. crypto_engine_exit(dd->engine);
  1036. omap_aes_dma_cleanup(dd);
  1037. err_irq:
  1038. tasklet_kill(&dd->done_task);
  1039. err_pm_disable:
  1040. pm_runtime_disable(dev);
  1041. err_res:
  1042. dd = NULL;
  1043. err_data:
  1044. dev_err(dev, "initialization failed.\n");
  1045. return err;
  1046. }
  1047. static int omap_aes_remove(struct platform_device *pdev)
  1048. {
  1049. struct omap_aes_dev *dd = platform_get_drvdata(pdev);
  1050. struct aead_alg *aalg;
  1051. int i, j;
  1052. if (!dd)
  1053. return -ENODEV;
  1054. spin_lock(&list_lock);
  1055. list_del(&dd->list);
  1056. spin_unlock(&list_lock);
  1057. for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
  1058. for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
  1059. crypto_unregister_alg(
  1060. &dd->pdata->algs_info[i].algs_list[j]);
  1061. for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
  1062. aalg = &dd->pdata->aead_algs_info->algs_list[i];
  1063. crypto_unregister_aead(aalg);
  1064. }
  1065. crypto_engine_exit(dd->engine);
  1066. tasklet_kill(&dd->done_task);
  1067. omap_aes_dma_cleanup(dd);
  1068. pm_runtime_disable(dd->dev);
  1069. dd = NULL;
  1070. return 0;
  1071. }
  1072. #ifdef CONFIG_PM_SLEEP
  1073. static int omap_aes_suspend(struct device *dev)
  1074. {
  1075. pm_runtime_put_sync(dev);
  1076. return 0;
  1077. }
  1078. static int omap_aes_resume(struct device *dev)
  1079. {
  1080. pm_runtime_resume_and_get(dev);
  1081. return 0;
  1082. }
  1083. #endif
  1084. static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
  1085. static struct platform_driver omap_aes_driver = {
  1086. .probe = omap_aes_probe,
  1087. .remove = omap_aes_remove,
  1088. .driver = {
  1089. .name = "omap-aes",
  1090. .pm = &omap_aes_pm_ops,
  1091. .of_match_table = omap_aes_of_match,
  1092. },
  1093. };
  1094. module_platform_driver(omap_aes_driver);
  1095. MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
  1096. MODULE_LICENSE("GPL v2");
  1097. MODULE_AUTHOR("Dmitry Kasatkin");