nx-aes-ccm.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596
  1. /**
  2. * AES CCM routines supporting the Power 7+ Nest Accelerators driver
  3. *
  4. * Copyright (C) 2012 International Business Machines Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 only.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18. *
  19. * Author: Kent Yoder <yoder1@us.ibm.com>
  20. */
  21. #include <crypto/internal/aead.h>
  22. #include <crypto/aes.h>
  23. #include <crypto/algapi.h>
  24. #include <crypto/scatterwalk.h>
  25. #include <linux/module.h>
  26. #include <linux/types.h>
  27. #include <linux/crypto.h>
  28. #include <asm/vio.h>
  29. #include "nx_csbcpb.h"
  30. #include "nx.h"
  31. static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
  32. const u8 *in_key,
  33. unsigned int key_len)
  34. {
  35. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
  36. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  37. struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
  38. nx_ctx_init(nx_ctx, HCOP_FC_AES);
  39. switch (key_len) {
  40. case AES_KEYSIZE_128:
  41. NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
  42. NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
  43. nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
  44. break;
  45. default:
  46. return -EINVAL;
  47. }
  48. csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
  49. memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
  50. csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
  51. memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
  52. return 0;
  53. }
  54. static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
  55. const u8 *in_key,
  56. unsigned int key_len)
  57. {
  58. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
  59. if (key_len < 3)
  60. return -EINVAL;
  61. key_len -= 3;
  62. memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
  63. return ccm_aes_nx_set_key(tfm, in_key, key_len);
  64. }
  65. static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
  66. unsigned int authsize)
  67. {
  68. switch (authsize) {
  69. case 4:
  70. case 6:
  71. case 8:
  72. case 10:
  73. case 12:
  74. case 14:
  75. case 16:
  76. break;
  77. default:
  78. return -EINVAL;
  79. }
  80. return 0;
  81. }
  82. static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
  83. unsigned int authsize)
  84. {
  85. switch (authsize) {
  86. case 8:
  87. case 12:
  88. case 16:
  89. break;
  90. default:
  91. return -EINVAL;
  92. }
  93. return 0;
  94. }
  95. /* taken from crypto/ccm.c */
  96. static int set_msg_len(u8 *block, unsigned int msglen, int csize)
  97. {
  98. __be32 data;
  99. memset(block, 0, csize);
  100. block += csize;
  101. if (csize >= 4)
  102. csize = 4;
  103. else if (msglen > (unsigned int)(1 << (8 * csize)))
  104. return -EOVERFLOW;
  105. data = cpu_to_be32(msglen);
  106. memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
  107. return 0;
  108. }
  109. /* taken from crypto/ccm.c */
  110. static inline int crypto_ccm_check_iv(const u8 *iv)
  111. {
  112. /* 2 <= L <= 8, so 1 <= L' <= 7. */
  113. if (1 > iv[0] || iv[0] > 7)
  114. return -EINVAL;
  115. return 0;
  116. }
  117. /* based on code from crypto/ccm.c */
  118. static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
  119. unsigned int cryptlen, u8 *b0)
  120. {
  121. unsigned int l, lp, m = authsize;
  122. int rc;
  123. memcpy(b0, iv, 16);
  124. lp = b0[0];
  125. l = lp + 1;
  126. /* set m, bits 3-5 */
  127. *b0 |= (8 * ((m - 2) / 2));
  128. /* set adata, bit 6, if associated data is used */
  129. if (assoclen)
  130. *b0 |= 64;
  131. rc = set_msg_len(b0 + 16 - l, cryptlen, l);
  132. return rc;
  133. }
  134. static int generate_pat(u8 *iv,
  135. struct aead_request *req,
  136. struct nx_crypto_ctx *nx_ctx,
  137. unsigned int authsize,
  138. unsigned int nbytes,
  139. unsigned int assoclen,
  140. u8 *out)
  141. {
  142. struct nx_sg *nx_insg = nx_ctx->in_sg;
  143. struct nx_sg *nx_outsg = nx_ctx->out_sg;
  144. unsigned int iauth_len = 0;
  145. u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
  146. int rc;
  147. unsigned int max_sg_len;
  148. /* zero the ctr value */
  149. memset(iv + 15 - iv[0], 0, iv[0] + 1);
  150. /* page 78 of nx_wb.pdf has,
  151. * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
  152. * in length. If a full message is used, the AES CCA implementation
  153. * restricts the maximum AAD length to 2^32 -1 bytes.
  154. * If partial messages are used, the implementation supports
  155. * 2^64 -1 bytes maximum AAD length.
  156. *
  157. * However, in the cryptoapi's aead_request structure,
  158. * assoclen is an unsigned int, thus it cannot hold a length
  159. * value greater than 2^32 - 1.
  160. * Thus the AAD is further constrained by this and is never
  161. * greater than 2^32.
  162. */
  163. if (!assoclen) {
  164. b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
  165. } else if (assoclen <= 14) {
  166. /* if associated data is 14 bytes or less, we do 1 GCM
  167. * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
  168. * which is fed in through the source buffers here */
  169. b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
  170. b1 = nx_ctx->priv.ccm.iauth_tag;
  171. iauth_len = assoclen;
  172. } else if (assoclen <= 65280) {
  173. /* if associated data is less than (2^16 - 2^8), we construct
  174. * B1 differently and feed in the associated data to a CCA
  175. * operation */
  176. b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
  177. b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
  178. iauth_len = 14;
  179. } else {
  180. b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
  181. b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
  182. iauth_len = 10;
  183. }
  184. /* generate B0 */
  185. rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
  186. if (rc)
  187. return rc;
  188. /* generate B1:
  189. * add control info for associated data
  190. * RFC 3610 and NIST Special Publication 800-38C
  191. */
  192. if (b1) {
  193. memset(b1, 0, 16);
  194. if (assoclen <= 65280) {
  195. *(u16 *)b1 = assoclen;
  196. scatterwalk_map_and_copy(b1 + 2, req->src, 0,
  197. iauth_len, SCATTERWALK_FROM_SG);
  198. } else {
  199. *(u16 *)b1 = (u16)(0xfffe);
  200. *(u32 *)&b1[2] = assoclen;
  201. scatterwalk_map_and_copy(b1 + 6, req->src, 0,
  202. iauth_len, SCATTERWALK_FROM_SG);
  203. }
  204. }
  205. /* now copy any remaining AAD to scatterlist and call nx... */
  206. if (!assoclen) {
  207. return rc;
  208. } else if (assoclen <= 14) {
  209. unsigned int len = 16;
  210. nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
  211. if (len != 16)
  212. return -EINVAL;
  213. nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
  214. nx_ctx->ap->sglen);
  215. if (len != 16)
  216. return -EINVAL;
  217. /* inlen should be negative, indicating to phyp that its a
  218. * pointer to an sg list */
  219. nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
  220. sizeof(struct nx_sg);
  221. nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
  222. sizeof(struct nx_sg);
  223. NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
  224. NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
  225. result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
  226. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  227. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  228. if (rc)
  229. return rc;
  230. atomic_inc(&(nx_ctx->stats->aes_ops));
  231. atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
  232. } else {
  233. unsigned int processed = 0, to_process;
  234. processed += iauth_len;
  235. /* page_limit: number of sg entries that fit on one page */
  236. max_sg_len = min_t(u64, nx_ctx->ap->sglen,
  237. nx_driver.of.max_sg_len/sizeof(struct nx_sg));
  238. max_sg_len = min_t(u64, max_sg_len,
  239. nx_ctx->ap->databytelen/NX_PAGE_SIZE);
  240. do {
  241. to_process = min_t(u32, assoclen - processed,
  242. nx_ctx->ap->databytelen);
  243. nx_insg = nx_walk_and_build(nx_ctx->in_sg,
  244. nx_ctx->ap->sglen,
  245. req->src, processed,
  246. &to_process);
  247. if ((to_process + processed) < assoclen) {
  248. NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
  249. NX_FDM_INTERMEDIATE;
  250. } else {
  251. NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
  252. ~NX_FDM_INTERMEDIATE;
  253. }
  254. nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
  255. sizeof(struct nx_sg);
  256. result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
  257. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
  258. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  259. if (rc)
  260. return rc;
  261. memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
  262. nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
  263. AES_BLOCK_SIZE);
  264. NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
  265. atomic_inc(&(nx_ctx->stats->aes_ops));
  266. atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
  267. processed += to_process;
  268. } while (processed < assoclen);
  269. result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
  270. }
  271. memcpy(out, result, AES_BLOCK_SIZE);
  272. return rc;
  273. }
  274. static int ccm_nx_decrypt(struct aead_request *req,
  275. struct blkcipher_desc *desc,
  276. unsigned int assoclen)
  277. {
  278. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  279. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  280. unsigned int nbytes = req->cryptlen;
  281. unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
  282. struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
  283. unsigned long irq_flags;
  284. unsigned int processed = 0, to_process;
  285. int rc = -1;
  286. spin_lock_irqsave(&nx_ctx->lock, irq_flags);
  287. nbytes -= authsize;
  288. /* copy out the auth tag to compare with later */
  289. scatterwalk_map_and_copy(priv->oauth_tag,
  290. req->src, nbytes + req->assoclen, authsize,
  291. SCATTERWALK_FROM_SG);
  292. rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
  293. csbcpb->cpb.aes_ccm.in_pat_or_b0);
  294. if (rc)
  295. goto out;
  296. do {
  297. /* to_process: the AES_BLOCK_SIZE data chunk to process in this
  298. * update. This value is bound by sg list limits.
  299. */
  300. to_process = nbytes - processed;
  301. if ((to_process + processed) < nbytes)
  302. NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  303. else
  304. NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
  305. NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
  306. rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
  307. &to_process, processed + req->assoclen,
  308. csbcpb->cpb.aes_ccm.iv_or_ctr);
  309. if (rc)
  310. goto out;
  311. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  312. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  313. if (rc)
  314. goto out;
  315. /* for partial completion, copy following for next
  316. * entry into loop...
  317. */
  318. memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
  319. memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
  320. csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
  321. memcpy(csbcpb->cpb.aes_ccm.in_s0,
  322. csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
  323. NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  324. /* update stats */
  325. atomic_inc(&(nx_ctx->stats->aes_ops));
  326. atomic64_add(csbcpb->csb.processed_byte_count,
  327. &(nx_ctx->stats->aes_bytes));
  328. processed += to_process;
  329. } while (processed < nbytes);
  330. rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
  331. authsize) ? -EBADMSG : 0;
  332. out:
  333. spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
  334. return rc;
  335. }
  336. static int ccm_nx_encrypt(struct aead_request *req,
  337. struct blkcipher_desc *desc,
  338. unsigned int assoclen)
  339. {
  340. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  341. struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  342. unsigned int nbytes = req->cryptlen;
  343. unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
  344. unsigned long irq_flags;
  345. unsigned int processed = 0, to_process;
  346. int rc = -1;
  347. spin_lock_irqsave(&nx_ctx->lock, irq_flags);
  348. rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
  349. csbcpb->cpb.aes_ccm.in_pat_or_b0);
  350. if (rc)
  351. goto out;
  352. do {
  353. /* to process: the AES_BLOCK_SIZE data chunk to process in this
  354. * update. This value is bound by sg list limits.
  355. */
  356. to_process = nbytes - processed;
  357. if ((to_process + processed) < nbytes)
  358. NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  359. else
  360. NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
  361. NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
  362. rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
  363. &to_process, processed + req->assoclen,
  364. csbcpb->cpb.aes_ccm.iv_or_ctr);
  365. if (rc)
  366. goto out;
  367. rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
  368. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
  369. if (rc)
  370. goto out;
  371. /* for partial completion, copy following for next
  372. * entry into loop...
  373. */
  374. memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
  375. memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
  376. csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
  377. memcpy(csbcpb->cpb.aes_ccm.in_s0,
  378. csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
  379. NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  380. /* update stats */
  381. atomic_inc(&(nx_ctx->stats->aes_ops));
  382. atomic64_add(csbcpb->csb.processed_byte_count,
  383. &(nx_ctx->stats->aes_bytes));
  384. processed += to_process;
  385. } while (processed < nbytes);
  386. /* copy out the auth tag */
  387. scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
  388. req->dst, nbytes + req->assoclen, authsize,
  389. SCATTERWALK_TO_SG);
  390. out:
  391. spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
  392. return rc;
  393. }
  394. static int ccm4309_aes_nx_encrypt(struct aead_request *req)
  395. {
  396. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  397. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  398. struct blkcipher_desc desc;
  399. u8 *iv = rctx->iv;
  400. iv[0] = 3;
  401. memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
  402. memcpy(iv + 4, req->iv, 8);
  403. desc.info = iv;
  404. return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
  405. }
  406. static int ccm_aes_nx_encrypt(struct aead_request *req)
  407. {
  408. struct blkcipher_desc desc;
  409. int rc;
  410. desc.info = req->iv;
  411. rc = crypto_ccm_check_iv(desc.info);
  412. if (rc)
  413. return rc;
  414. return ccm_nx_encrypt(req, &desc, req->assoclen);
  415. }
  416. static int ccm4309_aes_nx_decrypt(struct aead_request *req)
  417. {
  418. struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
  419. struct nx_gcm_rctx *rctx = aead_request_ctx(req);
  420. struct blkcipher_desc desc;
  421. u8 *iv = rctx->iv;
  422. iv[0] = 3;
  423. memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
  424. memcpy(iv + 4, req->iv, 8);
  425. desc.info = iv;
  426. return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
  427. }
  428. static int ccm_aes_nx_decrypt(struct aead_request *req)
  429. {
  430. struct blkcipher_desc desc;
  431. int rc;
  432. desc.info = req->iv;
  433. rc = crypto_ccm_check_iv(desc.info);
  434. if (rc)
  435. return rc;
  436. return ccm_nx_decrypt(req, &desc, req->assoclen);
  437. }
  438. /* tell the block cipher walk routines that this is a stream cipher by
  439. * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
  440. * during encrypt/decrypt doesn't solve this problem, because it calls
  441. * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
  442. * but instead uses this tfm->blocksize. */
  443. struct aead_alg nx_ccm_aes_alg = {
  444. .base = {
  445. .cra_name = "ccm(aes)",
  446. .cra_driver_name = "ccm-aes-nx",
  447. .cra_priority = 300,
  448. .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
  449. .cra_blocksize = 1,
  450. .cra_ctxsize = sizeof(struct nx_crypto_ctx),
  451. .cra_module = THIS_MODULE,
  452. },
  453. .init = nx_crypto_ctx_aes_ccm_init,
  454. .exit = nx_crypto_ctx_aead_exit,
  455. .ivsize = AES_BLOCK_SIZE,
  456. .maxauthsize = AES_BLOCK_SIZE,
  457. .setkey = ccm_aes_nx_set_key,
  458. .setauthsize = ccm_aes_nx_setauthsize,
  459. .encrypt = ccm_aes_nx_encrypt,
  460. .decrypt = ccm_aes_nx_decrypt,
  461. };
  462. struct aead_alg nx_ccm4309_aes_alg = {
  463. .base = {
  464. .cra_name = "rfc4309(ccm(aes))",
  465. .cra_driver_name = "rfc4309-ccm-aes-nx",
  466. .cra_priority = 300,
  467. .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
  468. .cra_blocksize = 1,
  469. .cra_ctxsize = sizeof(struct nx_crypto_ctx),
  470. .cra_module = THIS_MODULE,
  471. },
  472. .init = nx_crypto_ctx_aes_ccm_init,
  473. .exit = nx_crypto_ctx_aead_exit,
  474. .ivsize = 8,
  475. .maxauthsize = AES_BLOCK_SIZE,
  476. .setkey = ccm4309_aes_nx_set_key,
  477. .setauthsize = ccm4309_aes_nx_setauthsize,
  478. .encrypt = ccm4309_aes_nx_encrypt,
  479. .decrypt = ccm4309_aes_nx_decrypt,
  480. };