mtk-aes.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Cryptographic API.
  4. *
  5. * Driver for EIP97 AES acceleration.
  6. *
  7. * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
  8. *
  9. * Some ideas are from atmel-aes.c drivers.
  10. */
  11. #include <crypto/aes.h>
  12. #include <crypto/gcm.h>
  13. #include "mtk-platform.h"
  14. #define AES_QUEUE_SIZE 512
  15. #define AES_BUF_ORDER 2
  16. #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
  17. & ~(AES_BLOCK_SIZE - 1))
  18. #define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
  19. AES_BLOCK_SIZE * 2)
  20. #define AES_MAX_CT_SIZE 6
  21. #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
  22. /* AES-CBC/ECB/CTR/OFB/CFB command token */
  23. #define AES_CMD0 cpu_to_le32(0x05000000)
  24. #define AES_CMD1 cpu_to_le32(0x2d060000)
  25. #define AES_CMD2 cpu_to_le32(0xe4a63806)
  26. /* AES-GCM command token */
  27. #define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
  28. #define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
  29. #define AES_GCM_CMD2 cpu_to_le32(0x25000010)
  30. #define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
  31. #define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
  32. #define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
  33. #define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
  34. /* AES transform information word 0 fields */
  35. #define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
  36. #define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
  37. #define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
  38. #define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
  39. #define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
  40. #define AES_TFM_128BITS cpu_to_le32(0xb << 16)
  41. #define AES_TFM_192BITS cpu_to_le32(0xd << 16)
  42. #define AES_TFM_256BITS cpu_to_le32(0xf << 16)
  43. #define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
  44. #define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
  45. /* AES transform information word 1 fields */
  46. #define AES_TFM_ECB cpu_to_le32(0x0 << 0)
  47. #define AES_TFM_CBC cpu_to_le32(0x1 << 0)
  48. #define AES_TFM_OFB cpu_to_le32(0x4 << 0)
  49. #define AES_TFM_CFB128 cpu_to_le32(0x5 << 0)
  50. #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
  51. #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
  52. #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
  53. #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
  54. #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
  55. #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
  56. /* AES flags */
  57. #define AES_FLAGS_CIPHER_MSK GENMASK(4, 0)
  58. #define AES_FLAGS_ECB BIT(0)
  59. #define AES_FLAGS_CBC BIT(1)
  60. #define AES_FLAGS_CTR BIT(2)
  61. #define AES_FLAGS_OFB BIT(3)
  62. #define AES_FLAGS_CFB128 BIT(4)
  63. #define AES_FLAGS_GCM BIT(5)
  64. #define AES_FLAGS_ENCRYPT BIT(6)
  65. #define AES_FLAGS_BUSY BIT(7)
  66. #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
  67. /**
  68. * mtk_aes_info - hardware information of AES
  69. * @cmd: command token, hardware instruction
  70. * @tfm: transform state of cipher algorithm.
  71. * @state: contains keys and initial vectors.
  72. *
  73. * Memory layout of GCM buffer:
  74. * /-----------\
  75. * | AES KEY | 128/196/256 bits
  76. * |-----------|
  77. * | HASH KEY | a string 128 zero bits encrypted using the block cipher
  78. * |-----------|
  79. * | IVs | 4 * 4 bytes
  80. * \-----------/
  81. *
  82. * The engine requires all these info to do:
  83. * - Commands decoding and control of the engine's data path.
  84. * - Coordinating hardware data fetch and store operations.
  85. * - Result token construction and output.
  86. */
  87. struct mtk_aes_info {
  88. __le32 cmd[AES_MAX_CT_SIZE];
  89. __le32 tfm[2];
  90. __le32 state[AES_MAX_STATE_BUF_SIZE];
  91. };
  92. struct mtk_aes_reqctx {
  93. u64 mode;
  94. };
  95. struct mtk_aes_base_ctx {
  96. struct mtk_cryp *cryp;
  97. u32 keylen;
  98. __le32 key[12];
  99. __le32 keymode;
  100. mtk_aes_fn start;
  101. struct mtk_aes_info info;
  102. dma_addr_t ct_dma;
  103. dma_addr_t tfm_dma;
  104. __le32 ct_hdr;
  105. u32 ct_size;
  106. };
  107. struct mtk_aes_ctx {
  108. struct mtk_aes_base_ctx base;
  109. };
  110. struct mtk_aes_ctr_ctx {
  111. struct mtk_aes_base_ctx base;
  112. u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
  113. size_t offset;
  114. struct scatterlist src[2];
  115. struct scatterlist dst[2];
  116. };
  117. struct mtk_aes_gcm_ctx {
  118. struct mtk_aes_base_ctx base;
  119. u32 authsize;
  120. size_t textlen;
  121. struct crypto_skcipher *ctr;
  122. };
  123. struct mtk_aes_drv {
  124. struct list_head dev_list;
  125. /* Device list lock */
  126. spinlock_t lock;
  127. };
  128. static struct mtk_aes_drv mtk_aes = {
  129. .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
  130. .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
  131. };
  132. static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
  133. {
  134. return readl_relaxed(cryp->base + offset);
  135. }
  136. static inline void mtk_aes_write(struct mtk_cryp *cryp,
  137. u32 offset, u32 value)
  138. {
  139. writel_relaxed(value, cryp->base + offset);
  140. }
  141. static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
  142. {
  143. struct mtk_cryp *cryp = NULL;
  144. struct mtk_cryp *tmp;
  145. spin_lock_bh(&mtk_aes.lock);
  146. if (!ctx->cryp) {
  147. list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
  148. cryp = tmp;
  149. break;
  150. }
  151. ctx->cryp = cryp;
  152. } else {
  153. cryp = ctx->cryp;
  154. }
  155. spin_unlock_bh(&mtk_aes.lock);
  156. return cryp;
  157. }
  158. static inline size_t mtk_aes_padlen(size_t len)
  159. {
  160. len &= AES_BLOCK_SIZE - 1;
  161. return len ? AES_BLOCK_SIZE - len : 0;
  162. }
  163. static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
  164. struct mtk_aes_dma *dma)
  165. {
  166. int nents;
  167. if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
  168. return false;
  169. for (nents = 0; sg; sg = sg_next(sg), ++nents) {
  170. if (!IS_ALIGNED(sg->offset, sizeof(u32)))
  171. return false;
  172. if (len <= sg->length) {
  173. if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
  174. return false;
  175. dma->nents = nents + 1;
  176. dma->remainder = sg->length - len;
  177. sg->length = len;
  178. return true;
  179. }
  180. if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
  181. return false;
  182. len -= sg->length;
  183. }
  184. return false;
  185. }
  186. static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
  187. const struct mtk_aes_reqctx *rctx)
  188. {
  189. /* Clear all but persistent flags and set request flags. */
  190. aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
  191. }
  192. static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
  193. {
  194. struct scatterlist *sg = dma->sg;
  195. int nents = dma->nents;
  196. if (!dma->remainder)
  197. return;
  198. while (--nents > 0 && sg)
  199. sg = sg_next(sg);
  200. if (!sg)
  201. return;
  202. sg->length += dma->remainder;
  203. }
  204. static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
  205. {
  206. int i;
  207. for (i = 0; i < SIZE_IN_WORDS(size); i++)
  208. dst[i] = cpu_to_le32(src[i]);
  209. }
  210. static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
  211. {
  212. int i;
  213. for (i = 0; i < SIZE_IN_WORDS(size); i++)
  214. dst[i] = cpu_to_be32(src[i]);
  215. }
  216. static inline int mtk_aes_complete(struct mtk_cryp *cryp,
  217. struct mtk_aes_rec *aes,
  218. int err)
  219. {
  220. aes->flags &= ~AES_FLAGS_BUSY;
  221. aes->areq->complete(aes->areq, err);
  222. /* Handle new request */
  223. tasklet_schedule(&aes->queue_task);
  224. return err;
  225. }
  226. /*
  227. * Write descriptors for processing. This will configure the engine, load
  228. * the transform information and then start the packet processing.
  229. */
  230. static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  231. {
  232. struct mtk_ring *ring = cryp->ring[aes->id];
  233. struct mtk_desc *cmd = NULL, *res = NULL;
  234. struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
  235. u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
  236. int nents;
  237. /* Write command descriptors */
  238. for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
  239. cmd = ring->cmd_next;
  240. cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
  241. cmd->buf = cpu_to_le32(sg_dma_address(ssg));
  242. if (nents == 0) {
  243. cmd->hdr |= MTK_DESC_FIRST |
  244. MTK_DESC_CT_LEN(aes->ctx->ct_size);
  245. cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
  246. cmd->ct_hdr = aes->ctx->ct_hdr;
  247. cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
  248. }
  249. /* Shift ring buffer and check boundary */
  250. if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
  251. ring->cmd_next = ring->cmd_base;
  252. }
  253. cmd->hdr |= MTK_DESC_LAST;
  254. /* Prepare result descriptors */
  255. for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
  256. res = ring->res_next;
  257. res->hdr = MTK_DESC_BUF_LEN(dsg->length);
  258. res->buf = cpu_to_le32(sg_dma_address(dsg));
  259. if (nents == 0)
  260. res->hdr |= MTK_DESC_FIRST;
  261. /* Shift ring buffer and check boundary */
  262. if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
  263. ring->res_next = ring->res_base;
  264. }
  265. res->hdr |= MTK_DESC_LAST;
  266. /* Pointer to current result descriptor */
  267. ring->res_prev = res;
  268. /* Prepare enough space for authenticated tag */
  269. if (aes->flags & AES_FLAGS_GCM)
  270. res->hdr += AES_BLOCK_SIZE;
  271. /*
  272. * Make sure that all changes to the DMA ring are done before we
  273. * start engine.
  274. */
  275. wmb();
  276. /* Start DMA transfer */
  277. mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
  278. mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
  279. return -EINPROGRESS;
  280. }
  281. static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  282. {
  283. struct mtk_aes_base_ctx *ctx = aes->ctx;
  284. dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
  285. DMA_TO_DEVICE);
  286. if (aes->src.sg == aes->dst.sg) {
  287. dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
  288. DMA_BIDIRECTIONAL);
  289. if (aes->src.sg != &aes->aligned_sg)
  290. mtk_aes_restore_sg(&aes->src);
  291. } else {
  292. dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
  293. DMA_FROM_DEVICE);
  294. if (aes->dst.sg != &aes->aligned_sg)
  295. mtk_aes_restore_sg(&aes->dst);
  296. dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
  297. DMA_TO_DEVICE);
  298. if (aes->src.sg != &aes->aligned_sg)
  299. mtk_aes_restore_sg(&aes->src);
  300. }
  301. if (aes->dst.sg == &aes->aligned_sg)
  302. sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
  303. aes->buf, aes->total);
  304. }
  305. static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  306. {
  307. struct mtk_aes_base_ctx *ctx = aes->ctx;
  308. struct mtk_aes_info *info = &ctx->info;
  309. ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
  310. DMA_TO_DEVICE);
  311. if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
  312. goto exit;
  313. ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
  314. if (aes->src.sg == aes->dst.sg) {
  315. aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
  316. aes->src.nents,
  317. DMA_BIDIRECTIONAL);
  318. aes->dst.sg_len = aes->src.sg_len;
  319. if (unlikely(!aes->src.sg_len))
  320. goto sg_map_err;
  321. } else {
  322. aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
  323. aes->src.nents, DMA_TO_DEVICE);
  324. if (unlikely(!aes->src.sg_len))
  325. goto sg_map_err;
  326. aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
  327. aes->dst.nents, DMA_FROM_DEVICE);
  328. if (unlikely(!aes->dst.sg_len)) {
  329. dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
  330. DMA_TO_DEVICE);
  331. goto sg_map_err;
  332. }
  333. }
  334. return mtk_aes_xmit(cryp, aes);
  335. sg_map_err:
  336. dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
  337. exit:
  338. return mtk_aes_complete(cryp, aes, -EINVAL);
  339. }
  340. /* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */
  341. static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
  342. size_t len)
  343. {
  344. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  345. struct mtk_aes_base_ctx *ctx = aes->ctx;
  346. struct mtk_aes_info *info = &ctx->info;
  347. u32 cnt = 0;
  348. ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
  349. info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
  350. info->cmd[cnt++] = AES_CMD1;
  351. info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
  352. if (aes->flags & AES_FLAGS_ENCRYPT)
  353. info->tfm[0] |= AES_TFM_BASIC_OUT;
  354. else
  355. info->tfm[0] |= AES_TFM_BASIC_IN;
  356. switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
  357. case AES_FLAGS_CBC:
  358. info->tfm[1] = AES_TFM_CBC;
  359. break;
  360. case AES_FLAGS_ECB:
  361. info->tfm[1] = AES_TFM_ECB;
  362. goto ecb;
  363. case AES_FLAGS_CTR:
  364. info->tfm[1] = AES_TFM_CTR_LOAD;
  365. goto ctr;
  366. case AES_FLAGS_OFB:
  367. info->tfm[1] = AES_TFM_OFB;
  368. break;
  369. case AES_FLAGS_CFB128:
  370. info->tfm[1] = AES_TFM_CFB128;
  371. break;
  372. default:
  373. /* Should not happen... */
  374. return;
  375. }
  376. mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
  377. AES_BLOCK_SIZE);
  378. ctr:
  379. info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
  380. info->tfm[1] |= AES_TFM_FULL_IV;
  381. info->cmd[cnt++] = AES_CMD2;
  382. ecb:
  383. ctx->ct_size = cnt;
  384. }
  385. static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
  386. struct scatterlist *src, struct scatterlist *dst,
  387. size_t len)
  388. {
  389. size_t padlen = 0;
  390. bool src_aligned, dst_aligned;
  391. aes->total = len;
  392. aes->src.sg = src;
  393. aes->dst.sg = dst;
  394. aes->real_dst = dst;
  395. src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
  396. if (src == dst)
  397. dst_aligned = src_aligned;
  398. else
  399. dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
  400. if (!src_aligned || !dst_aligned) {
  401. padlen = mtk_aes_padlen(len);
  402. if (len + padlen > AES_BUF_SIZE)
  403. return mtk_aes_complete(cryp, aes, -ENOMEM);
  404. if (!src_aligned) {
  405. sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
  406. aes->src.sg = &aes->aligned_sg;
  407. aes->src.nents = 1;
  408. aes->src.remainder = 0;
  409. }
  410. if (!dst_aligned) {
  411. aes->dst.sg = &aes->aligned_sg;
  412. aes->dst.nents = 1;
  413. aes->dst.remainder = 0;
  414. }
  415. sg_init_table(&aes->aligned_sg, 1);
  416. sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
  417. }
  418. mtk_aes_info_init(cryp, aes, len + padlen);
  419. return mtk_aes_map(cryp, aes);
  420. }
  421. static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
  422. struct crypto_async_request *new_areq)
  423. {
  424. struct mtk_aes_rec *aes = cryp->aes[id];
  425. struct crypto_async_request *areq, *backlog;
  426. struct mtk_aes_base_ctx *ctx;
  427. unsigned long flags;
  428. int ret = 0;
  429. spin_lock_irqsave(&aes->lock, flags);
  430. if (new_areq)
  431. ret = crypto_enqueue_request(&aes->queue, new_areq);
  432. if (aes->flags & AES_FLAGS_BUSY) {
  433. spin_unlock_irqrestore(&aes->lock, flags);
  434. return ret;
  435. }
  436. backlog = crypto_get_backlog(&aes->queue);
  437. areq = crypto_dequeue_request(&aes->queue);
  438. if (areq)
  439. aes->flags |= AES_FLAGS_BUSY;
  440. spin_unlock_irqrestore(&aes->lock, flags);
  441. if (!areq)
  442. return ret;
  443. if (backlog)
  444. backlog->complete(backlog, -EINPROGRESS);
  445. ctx = crypto_tfm_ctx(areq->tfm);
  446. /* Write key into state buffer */
  447. memcpy(ctx->info.state, ctx->key, sizeof(ctx->key));
  448. aes->areq = areq;
  449. aes->ctx = ctx;
  450. return ctx->start(cryp, aes);
  451. }
  452. static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
  453. struct mtk_aes_rec *aes)
  454. {
  455. return mtk_aes_complete(cryp, aes, 0);
  456. }
  457. static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  458. {
  459. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  460. struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  461. mtk_aes_set_mode(aes, rctx);
  462. aes->resume = mtk_aes_transfer_complete;
  463. return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
  464. }
  465. static inline struct mtk_aes_ctr_ctx *
  466. mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
  467. {
  468. return container_of(ctx, struct mtk_aes_ctr_ctx, base);
  469. }
  470. static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  471. {
  472. struct mtk_aes_base_ctx *ctx = aes->ctx;
  473. struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
  474. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  475. struct scatterlist *src, *dst;
  476. u32 start, end, ctr, blocks;
  477. size_t datalen;
  478. bool fragmented = false;
  479. /* Check for transfer completion. */
  480. cctx->offset += aes->total;
  481. if (cctx->offset >= req->nbytes)
  482. return mtk_aes_transfer_complete(cryp, aes);
  483. /* Compute data length. */
  484. datalen = req->nbytes - cctx->offset;
  485. blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
  486. ctr = be32_to_cpu(cctx->iv[3]);
  487. /* Check 32bit counter overflow. */
  488. start = ctr;
  489. end = start + blocks - 1;
  490. if (end < start) {
  491. ctr |= 0xffffffff;
  492. datalen = AES_BLOCK_SIZE * -start;
  493. fragmented = true;
  494. }
  495. /* Jump to offset. */
  496. src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
  497. dst = ((req->src == req->dst) ? src :
  498. scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
  499. /* Write IVs into transform state buffer. */
  500. mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
  501. AES_BLOCK_SIZE);
  502. if (unlikely(fragmented)) {
  503. /*
  504. * Increment the counter manually to cope with the hardware
  505. * counter overflow.
  506. */
  507. cctx->iv[3] = cpu_to_be32(ctr);
  508. crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
  509. }
  510. return mtk_aes_dma(cryp, aes, src, dst, datalen);
  511. }
  512. static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  513. {
  514. struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
  515. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  516. struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  517. mtk_aes_set_mode(aes, rctx);
  518. memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
  519. cctx->offset = 0;
  520. aes->total = 0;
  521. aes->resume = mtk_aes_ctr_transfer;
  522. return mtk_aes_ctr_transfer(cryp, aes);
  523. }
  524. /* Check and set the AES key to transform state buffer */
  525. static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
  526. const u8 *key, u32 keylen)
  527. {
  528. struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  529. switch (keylen) {
  530. case AES_KEYSIZE_128:
  531. ctx->keymode = AES_TFM_128BITS;
  532. break;
  533. case AES_KEYSIZE_192:
  534. ctx->keymode = AES_TFM_192BITS;
  535. break;
  536. case AES_KEYSIZE_256:
  537. ctx->keymode = AES_TFM_256BITS;
  538. break;
  539. default:
  540. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  541. return -EINVAL;
  542. }
  543. ctx->keylen = SIZE_IN_WORDS(keylen);
  544. mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
  545. return 0;
  546. }
  547. static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
  548. {
  549. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  550. struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  551. struct mtk_aes_reqctx *rctx;
  552. struct mtk_cryp *cryp;
  553. cryp = mtk_aes_find_dev(ctx);
  554. if (!cryp)
  555. return -ENODEV;
  556. rctx = ablkcipher_request_ctx(req);
  557. rctx->mode = mode;
  558. return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT),
  559. &req->base);
  560. }
  561. static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
  562. {
  563. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
  564. }
  565. static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
  566. {
  567. return mtk_aes_crypt(req, AES_FLAGS_ECB);
  568. }
  569. static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
  570. {
  571. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
  572. }
  573. static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
  574. {
  575. return mtk_aes_crypt(req, AES_FLAGS_CBC);
  576. }
  577. static int mtk_aes_ctr_encrypt(struct ablkcipher_request *req)
  578. {
  579. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
  580. }
  581. static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
  582. {
  583. return mtk_aes_crypt(req, AES_FLAGS_CTR);
  584. }
  585. static int mtk_aes_ofb_encrypt(struct ablkcipher_request *req)
  586. {
  587. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
  588. }
  589. static int mtk_aes_ofb_decrypt(struct ablkcipher_request *req)
  590. {
  591. return mtk_aes_crypt(req, AES_FLAGS_OFB);
  592. }
  593. static int mtk_aes_cfb_encrypt(struct ablkcipher_request *req)
  594. {
  595. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128);
  596. }
  597. static int mtk_aes_cfb_decrypt(struct ablkcipher_request *req)
  598. {
  599. return mtk_aes_crypt(req, AES_FLAGS_CFB128);
  600. }
  601. static int mtk_aes_cra_init(struct crypto_tfm *tfm)
  602. {
  603. struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  604. tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
  605. ctx->base.start = mtk_aes_start;
  606. return 0;
  607. }
  608. static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm)
  609. {
  610. struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  611. tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
  612. ctx->base.start = mtk_aes_ctr_start;
  613. return 0;
  614. }
  615. static struct crypto_alg aes_algs[] = {
  616. {
  617. .cra_name = "cbc(aes)",
  618. .cra_driver_name = "cbc-aes-mtk",
  619. .cra_priority = 400,
  620. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  621. CRYPTO_ALG_ASYNC,
  622. .cra_init = mtk_aes_cra_init,
  623. .cra_blocksize = AES_BLOCK_SIZE,
  624. .cra_ctxsize = sizeof(struct mtk_aes_ctx),
  625. .cra_alignmask = 0xf,
  626. .cra_type = &crypto_ablkcipher_type,
  627. .cra_module = THIS_MODULE,
  628. .cra_u.ablkcipher = {
  629. .min_keysize = AES_MIN_KEY_SIZE,
  630. .max_keysize = AES_MAX_KEY_SIZE,
  631. .setkey = mtk_aes_setkey,
  632. .encrypt = mtk_aes_cbc_encrypt,
  633. .decrypt = mtk_aes_cbc_decrypt,
  634. .ivsize = AES_BLOCK_SIZE,
  635. }
  636. },
  637. {
  638. .cra_name = "ecb(aes)",
  639. .cra_driver_name = "ecb-aes-mtk",
  640. .cra_priority = 400,
  641. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  642. CRYPTO_ALG_ASYNC,
  643. .cra_init = mtk_aes_cra_init,
  644. .cra_blocksize = AES_BLOCK_SIZE,
  645. .cra_ctxsize = sizeof(struct mtk_aes_ctx),
  646. .cra_alignmask = 0xf,
  647. .cra_type = &crypto_ablkcipher_type,
  648. .cra_module = THIS_MODULE,
  649. .cra_u.ablkcipher = {
  650. .min_keysize = AES_MIN_KEY_SIZE,
  651. .max_keysize = AES_MAX_KEY_SIZE,
  652. .setkey = mtk_aes_setkey,
  653. .encrypt = mtk_aes_ecb_encrypt,
  654. .decrypt = mtk_aes_ecb_decrypt,
  655. }
  656. },
  657. {
  658. .cra_name = "ctr(aes)",
  659. .cra_driver_name = "ctr-aes-mtk",
  660. .cra_priority = 400,
  661. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  662. CRYPTO_ALG_ASYNC,
  663. .cra_init = mtk_aes_ctr_cra_init,
  664. .cra_blocksize = 1,
  665. .cra_ctxsize = sizeof(struct mtk_aes_ctr_ctx),
  666. .cra_alignmask = 0xf,
  667. .cra_type = &crypto_ablkcipher_type,
  668. .cra_module = THIS_MODULE,
  669. .cra_u.ablkcipher = {
  670. .min_keysize = AES_MIN_KEY_SIZE,
  671. .max_keysize = AES_MAX_KEY_SIZE,
  672. .ivsize = AES_BLOCK_SIZE,
  673. .setkey = mtk_aes_setkey,
  674. .encrypt = mtk_aes_ctr_encrypt,
  675. .decrypt = mtk_aes_ctr_decrypt,
  676. }
  677. },
  678. {
  679. .cra_name = "ofb(aes)",
  680. .cra_driver_name = "ofb-aes-mtk",
  681. .cra_priority = 400,
  682. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  683. CRYPTO_ALG_ASYNC,
  684. .cra_init = mtk_aes_cra_init,
  685. .cra_blocksize = 1,
  686. .cra_ctxsize = sizeof(struct mtk_aes_ctx),
  687. .cra_alignmask = 0xf,
  688. .cra_type = &crypto_ablkcipher_type,
  689. .cra_module = THIS_MODULE,
  690. .cra_u.ablkcipher = {
  691. .min_keysize = AES_MIN_KEY_SIZE,
  692. .max_keysize = AES_MAX_KEY_SIZE,
  693. .ivsize = AES_BLOCK_SIZE,
  694. .setkey = mtk_aes_setkey,
  695. .encrypt = mtk_aes_ofb_encrypt,
  696. .decrypt = mtk_aes_ofb_decrypt,
  697. }
  698. },
  699. {
  700. .cra_name = "cfb(aes)",
  701. .cra_driver_name = "cfb-aes-mtk",
  702. .cra_priority = 400,
  703. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  704. CRYPTO_ALG_ASYNC,
  705. .cra_init = mtk_aes_cra_init,
  706. .cra_blocksize = 1,
  707. .cra_ctxsize = sizeof(struct mtk_aes_ctx),
  708. .cra_alignmask = 0xf,
  709. .cra_type = &crypto_ablkcipher_type,
  710. .cra_module = THIS_MODULE,
  711. .cra_u.ablkcipher = {
  712. .min_keysize = AES_MIN_KEY_SIZE,
  713. .max_keysize = AES_MAX_KEY_SIZE,
  714. .ivsize = AES_BLOCK_SIZE,
  715. .setkey = mtk_aes_setkey,
  716. .encrypt = mtk_aes_cfb_encrypt,
  717. .decrypt = mtk_aes_cfb_decrypt,
  718. }
  719. },
  720. };
  721. static inline struct mtk_aes_gcm_ctx *
  722. mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
  723. {
  724. return container_of(ctx, struct mtk_aes_gcm_ctx, base);
  725. }
  726. /*
  727. * Engine will verify and compare tag automatically, so we just need
  728. * to check returned status which stored in the result descriptor.
  729. */
  730. static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
  731. struct mtk_aes_rec *aes)
  732. {
  733. u32 status = cryp->ring[aes->id]->res_prev->ct;
  734. return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
  735. -EBADMSG : 0);
  736. }
  737. /* Initialize transform information of GCM mode */
  738. static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
  739. struct mtk_aes_rec *aes,
  740. size_t len)
  741. {
  742. struct aead_request *req = aead_request_cast(aes->areq);
  743. struct mtk_aes_base_ctx *ctx = aes->ctx;
  744. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  745. struct mtk_aes_info *info = &ctx->info;
  746. u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
  747. u32 cnt = 0;
  748. ctx->ct_hdr = AES_CT_CTRL_HDR | len;
  749. info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
  750. info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
  751. info->cmd[cnt++] = AES_GCM_CMD2;
  752. info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
  753. if (aes->flags & AES_FLAGS_ENCRYPT) {
  754. info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
  755. info->tfm[0] = AES_TFM_GCM_OUT;
  756. } else {
  757. info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
  758. info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
  759. info->tfm[0] = AES_TFM_GCM_IN;
  760. }
  761. ctx->ct_size = cnt;
  762. info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
  763. ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
  764. ctx->keymode;
  765. info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
  766. AES_TFM_ENC_HASH;
  767. mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
  768. AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
  769. }
  770. static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
  771. struct scatterlist *src, struct scatterlist *dst,
  772. size_t len)
  773. {
  774. bool src_aligned, dst_aligned;
  775. aes->src.sg = src;
  776. aes->dst.sg = dst;
  777. aes->real_dst = dst;
  778. src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
  779. if (src == dst)
  780. dst_aligned = src_aligned;
  781. else
  782. dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
  783. if (!src_aligned || !dst_aligned) {
  784. if (aes->total > AES_BUF_SIZE)
  785. return mtk_aes_complete(cryp, aes, -ENOMEM);
  786. if (!src_aligned) {
  787. sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
  788. aes->src.sg = &aes->aligned_sg;
  789. aes->src.nents = 1;
  790. aes->src.remainder = 0;
  791. }
  792. if (!dst_aligned) {
  793. aes->dst.sg = &aes->aligned_sg;
  794. aes->dst.nents = 1;
  795. aes->dst.remainder = 0;
  796. }
  797. sg_init_table(&aes->aligned_sg, 1);
  798. sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
  799. }
  800. mtk_aes_gcm_info_init(cryp, aes, len);
  801. return mtk_aes_map(cryp, aes);
  802. }
  803. /* Todo: GMAC */
  804. static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  805. {
  806. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
  807. struct aead_request *req = aead_request_cast(aes->areq);
  808. struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
  809. u32 len = req->assoclen + req->cryptlen;
  810. mtk_aes_set_mode(aes, rctx);
  811. if (aes->flags & AES_FLAGS_ENCRYPT) {
  812. u32 tag[4];
  813. aes->resume = mtk_aes_transfer_complete;
  814. /* Compute total process length. */
  815. aes->total = len + gctx->authsize;
  816. /* Hardware will append authenticated tag to output buffer */
  817. scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
  818. } else {
  819. aes->resume = mtk_aes_gcm_tag_verify;
  820. aes->total = len;
  821. }
  822. return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
  823. }
  824. static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
  825. {
  826. struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
  827. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  828. struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
  829. struct mtk_cryp *cryp;
  830. bool enc = !!(mode & AES_FLAGS_ENCRYPT);
  831. cryp = mtk_aes_find_dev(ctx);
  832. if (!cryp)
  833. return -ENODEV;
  834. /* Compute text length. */
  835. gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize);
  836. /* Empty messages are not supported yet */
  837. if (!gctx->textlen && !req->assoclen)
  838. return -EINVAL;
  839. rctx->mode = AES_FLAGS_GCM | mode;
  840. return mtk_aes_handle_queue(cryp, enc, &req->base);
  841. }
  842. /*
  843. * Because of the hardware limitation, we need to pre-calculate key(H)
  844. * for the GHASH operation. The result of the encryption operation
  845. * need to be stored in the transform state buffer.
  846. */
  847. static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
  848. u32 keylen)
  849. {
  850. struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
  851. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  852. struct crypto_skcipher *ctr = gctx->ctr;
  853. struct {
  854. u32 hash[4];
  855. u8 iv[8];
  856. struct crypto_wait wait;
  857. struct scatterlist sg[1];
  858. struct skcipher_request req;
  859. } *data;
  860. int err;
  861. switch (keylen) {
  862. case AES_KEYSIZE_128:
  863. ctx->keymode = AES_TFM_128BITS;
  864. break;
  865. case AES_KEYSIZE_192:
  866. ctx->keymode = AES_TFM_192BITS;
  867. break;
  868. case AES_KEYSIZE_256:
  869. ctx->keymode = AES_TFM_256BITS;
  870. break;
  871. default:
  872. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  873. return -EINVAL;
  874. }
  875. ctx->keylen = SIZE_IN_WORDS(keylen);
  876. /* Same as crypto_gcm_setkey() from crypto/gcm.c */
  877. crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
  878. crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
  879. CRYPTO_TFM_REQ_MASK);
  880. err = crypto_skcipher_setkey(ctr, key, keylen);
  881. crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
  882. CRYPTO_TFM_RES_MASK);
  883. if (err)
  884. return err;
  885. data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
  886. GFP_KERNEL);
  887. if (!data)
  888. return -ENOMEM;
  889. crypto_init_wait(&data->wait);
  890. sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
  891. skcipher_request_set_tfm(&data->req, ctr);
  892. skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
  893. CRYPTO_TFM_REQ_MAY_BACKLOG,
  894. crypto_req_done, &data->wait);
  895. skcipher_request_set_crypt(&data->req, data->sg, data->sg,
  896. AES_BLOCK_SIZE, data->iv);
  897. err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
  898. &data->wait);
  899. if (err)
  900. goto out;
  901. mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
  902. mtk_aes_write_state_be(ctx->key + ctx->keylen, data->hash,
  903. AES_BLOCK_SIZE);
  904. out:
  905. kzfree(data);
  906. return err;
  907. }
  908. static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
  909. u32 authsize)
  910. {
  911. struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
  912. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  913. /* Same as crypto_gcm_authsize() from crypto/gcm.c */
  914. switch (authsize) {
  915. case 8:
  916. case 12:
  917. case 16:
  918. break;
  919. default:
  920. return -EINVAL;
  921. }
  922. gctx->authsize = authsize;
  923. return 0;
  924. }
  925. static int mtk_aes_gcm_encrypt(struct aead_request *req)
  926. {
  927. return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
  928. }
  929. static int mtk_aes_gcm_decrypt(struct aead_request *req)
  930. {
  931. return mtk_aes_gcm_crypt(req, 0);
  932. }
  933. static int mtk_aes_gcm_init(struct crypto_aead *aead)
  934. {
  935. struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
  936. ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
  937. CRYPTO_ALG_ASYNC);
  938. if (IS_ERR(ctx->ctr)) {
  939. pr_err("Error allocating ctr(aes)\n");
  940. return PTR_ERR(ctx->ctr);
  941. }
  942. crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
  943. ctx->base.start = mtk_aes_gcm_start;
  944. return 0;
  945. }
  946. static void mtk_aes_gcm_exit(struct crypto_aead *aead)
  947. {
  948. struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
  949. crypto_free_skcipher(ctx->ctr);
  950. }
  951. static struct aead_alg aes_gcm_alg = {
  952. .setkey = mtk_aes_gcm_setkey,
  953. .setauthsize = mtk_aes_gcm_setauthsize,
  954. .encrypt = mtk_aes_gcm_encrypt,
  955. .decrypt = mtk_aes_gcm_decrypt,
  956. .init = mtk_aes_gcm_init,
  957. .exit = mtk_aes_gcm_exit,
  958. .ivsize = GCM_AES_IV_SIZE,
  959. .maxauthsize = AES_BLOCK_SIZE,
  960. .base = {
  961. .cra_name = "gcm(aes)",
  962. .cra_driver_name = "gcm-aes-mtk",
  963. .cra_priority = 400,
  964. .cra_flags = CRYPTO_ALG_ASYNC,
  965. .cra_blocksize = 1,
  966. .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx),
  967. .cra_alignmask = 0xf,
  968. .cra_module = THIS_MODULE,
  969. },
  970. };
  971. static void mtk_aes_queue_task(unsigned long data)
  972. {
  973. struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
  974. mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
  975. }
  976. static void mtk_aes_done_task(unsigned long data)
  977. {
  978. struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
  979. struct mtk_cryp *cryp = aes->cryp;
  980. mtk_aes_unmap(cryp, aes);
  981. aes->resume(cryp, aes);
  982. }
  983. static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
  984. {
  985. struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
  986. struct mtk_cryp *cryp = aes->cryp;
  987. u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
  988. mtk_aes_write(cryp, RDR_STAT(aes->id), val);
  989. if (likely(AES_FLAGS_BUSY & aes->flags)) {
  990. mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
  991. mtk_aes_write(cryp, RDR_THRESH(aes->id),
  992. MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
  993. tasklet_schedule(&aes->done_task);
  994. } else {
  995. dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
  996. }
  997. return IRQ_HANDLED;
  998. }
  999. /*
  1000. * The purpose of creating encryption and decryption records is
  1001. * to process outbound/inbound data in parallel, it can improve
  1002. * performance in most use cases, such as IPSec VPN, especially
  1003. * under heavy network traffic.
  1004. */
  1005. static int mtk_aes_record_init(struct mtk_cryp *cryp)
  1006. {
  1007. struct mtk_aes_rec **aes = cryp->aes;
  1008. int i, err = -ENOMEM;
  1009. for (i = 0; i < MTK_REC_NUM; i++) {
  1010. aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
  1011. if (!aes[i])
  1012. goto err_cleanup;
  1013. aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
  1014. AES_BUF_ORDER);
  1015. if (!aes[i]->buf)
  1016. goto err_cleanup;
  1017. aes[i]->cryp = cryp;
  1018. spin_lock_init(&aes[i]->lock);
  1019. crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
  1020. tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
  1021. (unsigned long)aes[i]);
  1022. tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
  1023. (unsigned long)aes[i]);
  1024. }
  1025. /* Link to ring0 and ring1 respectively */
  1026. aes[0]->id = MTK_RING0;
  1027. aes[1]->id = MTK_RING1;
  1028. return 0;
  1029. err_cleanup:
  1030. for (; i--; ) {
  1031. free_page((unsigned long)aes[i]->buf);
  1032. kfree(aes[i]);
  1033. }
  1034. return err;
  1035. }
  1036. static void mtk_aes_record_free(struct mtk_cryp *cryp)
  1037. {
  1038. int i;
  1039. for (i = 0; i < MTK_REC_NUM; i++) {
  1040. tasklet_kill(&cryp->aes[i]->done_task);
  1041. tasklet_kill(&cryp->aes[i]->queue_task);
  1042. free_page((unsigned long)cryp->aes[i]->buf);
  1043. kfree(cryp->aes[i]);
  1044. }
  1045. }
  1046. static void mtk_aes_unregister_algs(void)
  1047. {
  1048. int i;
  1049. crypto_unregister_aead(&aes_gcm_alg);
  1050. for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
  1051. crypto_unregister_alg(&aes_algs[i]);
  1052. }
  1053. static int mtk_aes_register_algs(void)
  1054. {
  1055. int err, i;
  1056. for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
  1057. err = crypto_register_alg(&aes_algs[i]);
  1058. if (err)
  1059. goto err_aes_algs;
  1060. }
  1061. err = crypto_register_aead(&aes_gcm_alg);
  1062. if (err)
  1063. goto err_aes_algs;
  1064. return 0;
  1065. err_aes_algs:
  1066. for (; i--; )
  1067. crypto_unregister_alg(&aes_algs[i]);
  1068. return err;
  1069. }
  1070. int mtk_cipher_alg_register(struct mtk_cryp *cryp)
  1071. {
  1072. int ret;
  1073. INIT_LIST_HEAD(&cryp->aes_list);
  1074. /* Initialize two cipher records */
  1075. ret = mtk_aes_record_init(cryp);
  1076. if (ret)
  1077. goto err_record;
  1078. ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
  1079. 0, "mtk-aes", cryp->aes[0]);
  1080. if (ret) {
  1081. dev_err(cryp->dev, "unable to request AES irq.\n");
  1082. goto err_res;
  1083. }
  1084. ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
  1085. 0, "mtk-aes", cryp->aes[1]);
  1086. if (ret) {
  1087. dev_err(cryp->dev, "unable to request AES irq.\n");
  1088. goto err_res;
  1089. }
  1090. /* Enable ring0 and ring1 interrupt */
  1091. mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
  1092. mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
  1093. spin_lock(&mtk_aes.lock);
  1094. list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
  1095. spin_unlock(&mtk_aes.lock);
  1096. ret = mtk_aes_register_algs();
  1097. if (ret)
  1098. goto err_algs;
  1099. return 0;
  1100. err_algs:
  1101. spin_lock(&mtk_aes.lock);
  1102. list_del(&cryp->aes_list);
  1103. spin_unlock(&mtk_aes.lock);
  1104. err_res:
  1105. mtk_aes_record_free(cryp);
  1106. err_record:
  1107. dev_err(cryp->dev, "mtk-aes initialization failed.\n");
  1108. return ret;
  1109. }
  1110. void mtk_cipher_alg_release(struct mtk_cryp *cryp)
  1111. {
  1112. spin_lock(&mtk_aes.lock);
  1113. list_del(&cryp->aes_list);
  1114. spin_unlock(&mtk_aes.lock);
  1115. mtk_aes_unregister_algs();
  1116. mtk_aes_record_free(cryp);
  1117. }