sun4i-ss-cipher.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553
  1. /*
  2. * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
  3. *
  4. * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
  5. *
  6. * This file add support for AES cipher with 128,192,256 bits
  7. * keysize in CBC and ECB mode.
  8. * Add support also for DES and 3DES in CBC and ECB mode.
  9. *
  10. * You could find the datasheet in Documentation/arm/sunxi/README
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. */
  17. #include "sun4i-ss.h"
  18. static int sun4i_ss_opti_poll(struct skcipher_request *areq)
  19. {
  20. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  21. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  22. struct sun4i_ss_ctx *ss = op->ss;
  23. unsigned int ivsize = crypto_skcipher_ivsize(tfm);
  24. struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
  25. u32 mode = ctx->mode;
  26. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  27. u32 rx_cnt = SS_RX_DEFAULT;
  28. u32 tx_cnt = 0;
  29. u32 spaces;
  30. u32 v;
  31. int err = 0;
  32. unsigned int i;
  33. unsigned int ileft = areq->cryptlen;
  34. unsigned int oleft = areq->cryptlen;
  35. unsigned int todo;
  36. struct sg_mapping_iter mi, mo;
  37. unsigned int oi, oo; /* offset for in and out */
  38. unsigned long flags;
  39. if (!areq->cryptlen)
  40. return 0;
  41. if (!areq->iv) {
  42. dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
  43. return -EINVAL;
  44. }
  45. if (!areq->src || !areq->dst) {
  46. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  47. return -EINVAL;
  48. }
  49. spin_lock_irqsave(&ss->slock, flags);
  50. for (i = 0; i < op->keylen; i += 4)
  51. writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
  52. if (areq->iv) {
  53. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  54. v = *(u32 *)(areq->iv + i * 4);
  55. writel(v, ss->base + SS_IV0 + i * 4);
  56. }
  57. }
  58. writel(mode, ss->base + SS_CTL);
  59. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  60. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  61. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  62. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  63. sg_miter_next(&mi);
  64. sg_miter_next(&mo);
  65. if (!mi.addr || !mo.addr) {
  66. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  67. err = -EINVAL;
  68. goto release_ss;
  69. }
  70. ileft = areq->cryptlen / 4;
  71. oleft = areq->cryptlen / 4;
  72. oi = 0;
  73. oo = 0;
  74. do {
  75. todo = min(rx_cnt, ileft);
  76. todo = min_t(size_t, todo, (mi.length - oi) / 4);
  77. if (todo) {
  78. ileft -= todo;
  79. writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
  80. oi += todo * 4;
  81. }
  82. if (oi == mi.length) {
  83. sg_miter_next(&mi);
  84. oi = 0;
  85. }
  86. spaces = readl(ss->base + SS_FCSR);
  87. rx_cnt = SS_RXFIFO_SPACES(spaces);
  88. tx_cnt = SS_TXFIFO_SPACES(spaces);
  89. todo = min(tx_cnt, oleft);
  90. todo = min_t(size_t, todo, (mo.length - oo) / 4);
  91. if (todo) {
  92. oleft -= todo;
  93. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  94. oo += todo * 4;
  95. }
  96. if (oo == mo.length) {
  97. sg_miter_next(&mo);
  98. oo = 0;
  99. }
  100. } while (oleft);
  101. if (areq->iv) {
  102. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  103. v = readl(ss->base + SS_IV0 + i * 4);
  104. *(u32 *)(areq->iv + i * 4) = v;
  105. }
  106. }
  107. release_ss:
  108. sg_miter_stop(&mi);
  109. sg_miter_stop(&mo);
  110. writel(0, ss->base + SS_CTL);
  111. spin_unlock_irqrestore(&ss->slock, flags);
  112. return err;
  113. }
  114. /* Generic function that support SG with size not multiple of 4 */
  115. static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
  116. {
  117. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  118. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  119. struct sun4i_ss_ctx *ss = op->ss;
  120. int no_chunk = 1;
  121. struct scatterlist *in_sg = areq->src;
  122. struct scatterlist *out_sg = areq->dst;
  123. unsigned int ivsize = crypto_skcipher_ivsize(tfm);
  124. struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
  125. u32 mode = ctx->mode;
  126. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  127. u32 rx_cnt = SS_RX_DEFAULT;
  128. u32 tx_cnt = 0;
  129. u32 v;
  130. u32 spaces;
  131. int err = 0;
  132. unsigned int i;
  133. unsigned int ileft = areq->cryptlen;
  134. unsigned int oleft = areq->cryptlen;
  135. unsigned int todo;
  136. struct sg_mapping_iter mi, mo;
  137. unsigned int oi, oo; /* offset for in and out */
  138. char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
  139. char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
  140. unsigned int ob = 0; /* offset in buf */
  141. unsigned int obo = 0; /* offset in bufo*/
  142. unsigned int obl = 0; /* length of data in bufo */
  143. unsigned long flags;
  144. if (!areq->cryptlen)
  145. return 0;
  146. if (!areq->iv) {
  147. dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
  148. return -EINVAL;
  149. }
  150. if (!areq->src || !areq->dst) {
  151. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  152. return -EINVAL;
  153. }
  154. /*
  155. * if we have only SGs with size multiple of 4,
  156. * we can use the SS optimized function
  157. */
  158. while (in_sg && no_chunk == 1) {
  159. if (in_sg->length % 4)
  160. no_chunk = 0;
  161. in_sg = sg_next(in_sg);
  162. }
  163. while (out_sg && no_chunk == 1) {
  164. if (out_sg->length % 4)
  165. no_chunk = 0;
  166. out_sg = sg_next(out_sg);
  167. }
  168. if (no_chunk == 1)
  169. return sun4i_ss_opti_poll(areq);
  170. spin_lock_irqsave(&ss->slock, flags);
  171. for (i = 0; i < op->keylen; i += 4)
  172. writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
  173. if (areq->iv) {
  174. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  175. v = *(u32 *)(areq->iv + i * 4);
  176. writel(v, ss->base + SS_IV0 + i * 4);
  177. }
  178. }
  179. writel(mode, ss->base + SS_CTL);
  180. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  181. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  182. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  183. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  184. sg_miter_next(&mi);
  185. sg_miter_next(&mo);
  186. if (!mi.addr || !mo.addr) {
  187. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  188. err = -EINVAL;
  189. goto release_ss;
  190. }
  191. ileft = areq->cryptlen;
  192. oleft = areq->cryptlen;
  193. oi = 0;
  194. oo = 0;
  195. while (oleft) {
  196. if (ileft) {
  197. /*
  198. * todo is the number of consecutive 4byte word that we
  199. * can read from current SG
  200. */
  201. todo = min(rx_cnt, ileft / 4);
  202. todo = min_t(size_t, todo, (mi.length - oi) / 4);
  203. if (todo && !ob) {
  204. writesl(ss->base + SS_RXFIFO, mi.addr + oi,
  205. todo);
  206. ileft -= todo * 4;
  207. oi += todo * 4;
  208. } else {
  209. /*
  210. * not enough consecutive bytes, so we need to
  211. * linearize in buf. todo is in bytes
  212. * After that copy, if we have a multiple of 4
  213. * we need to be able to write all buf in one
  214. * pass, so it is why we min() with rx_cnt
  215. */
  216. todo = min(rx_cnt * 4 - ob, ileft);
  217. todo = min_t(size_t, todo, mi.length - oi);
  218. memcpy(buf + ob, mi.addr + oi, todo);
  219. ileft -= todo;
  220. oi += todo;
  221. ob += todo;
  222. if (!(ob % 4)) {
  223. writesl(ss->base + SS_RXFIFO, buf,
  224. ob / 4);
  225. ob = 0;
  226. }
  227. }
  228. if (oi == mi.length) {
  229. sg_miter_next(&mi);
  230. oi = 0;
  231. }
  232. }
  233. spaces = readl(ss->base + SS_FCSR);
  234. rx_cnt = SS_RXFIFO_SPACES(spaces);
  235. tx_cnt = SS_TXFIFO_SPACES(spaces);
  236. dev_dbg(ss->dev,
  237. "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
  238. mode,
  239. oi, mi.length, ileft, areq->cryptlen, rx_cnt,
  240. oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
  241. if (!tx_cnt)
  242. continue;
  243. /* todo in 4bytes word */
  244. todo = min(tx_cnt, oleft / 4);
  245. todo = min_t(size_t, todo, (mo.length - oo) / 4);
  246. if (todo) {
  247. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  248. oleft -= todo * 4;
  249. oo += todo * 4;
  250. if (oo == mo.length) {
  251. sg_miter_next(&mo);
  252. oo = 0;
  253. }
  254. } else {
  255. /*
  256. * read obl bytes in bufo, we read at maximum for
  257. * emptying the device
  258. */
  259. readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
  260. obl = tx_cnt * 4;
  261. obo = 0;
  262. do {
  263. /*
  264. * how many bytes we can copy ?
  265. * no more than remaining SG size
  266. * no more than remaining buffer
  267. * no need to test against oleft
  268. */
  269. todo = min_t(size_t,
  270. mo.length - oo, obl - obo);
  271. memcpy(mo.addr + oo, bufo + obo, todo);
  272. oleft -= todo;
  273. obo += todo;
  274. oo += todo;
  275. if (oo == mo.length) {
  276. sg_miter_next(&mo);
  277. oo = 0;
  278. }
  279. } while (obo < obl);
  280. /* bufo must be fully used here */
  281. }
  282. }
  283. if (areq->iv) {
  284. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  285. v = readl(ss->base + SS_IV0 + i * 4);
  286. *(u32 *)(areq->iv + i * 4) = v;
  287. }
  288. }
  289. release_ss:
  290. sg_miter_stop(&mi);
  291. sg_miter_stop(&mo);
  292. writel(0, ss->base + SS_CTL);
  293. spin_unlock_irqrestore(&ss->slock, flags);
  294. return err;
  295. }
  296. /* CBC AES */
  297. int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
  298. {
  299. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  300. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  301. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  302. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  303. op->keymode;
  304. return sun4i_ss_cipher_poll(areq);
  305. }
  306. int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
  307. {
  308. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  309. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  310. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  311. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  312. op->keymode;
  313. return sun4i_ss_cipher_poll(areq);
  314. }
  315. /* ECB AES */
  316. int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
  317. {
  318. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  319. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  320. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  321. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  322. op->keymode;
  323. return sun4i_ss_cipher_poll(areq);
  324. }
  325. int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
  326. {
  327. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  328. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  329. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  330. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  331. op->keymode;
  332. return sun4i_ss_cipher_poll(areq);
  333. }
  334. /* CBC DES */
  335. int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
  336. {
  337. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  338. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  339. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  340. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  341. op->keymode;
  342. return sun4i_ss_cipher_poll(areq);
  343. }
  344. int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
  345. {
  346. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  347. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  348. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  349. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  350. op->keymode;
  351. return sun4i_ss_cipher_poll(areq);
  352. }
  353. /* ECB DES */
  354. int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
  355. {
  356. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  357. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  358. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  359. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  360. op->keymode;
  361. return sun4i_ss_cipher_poll(areq);
  362. }
  363. int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
  364. {
  365. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  366. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  367. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  368. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  369. op->keymode;
  370. return sun4i_ss_cipher_poll(areq);
  371. }
  372. /* CBC 3DES */
  373. int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
  374. {
  375. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  376. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  377. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  378. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  379. op->keymode;
  380. return sun4i_ss_cipher_poll(areq);
  381. }
  382. int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
  383. {
  384. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  385. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  386. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  387. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  388. op->keymode;
  389. return sun4i_ss_cipher_poll(areq);
  390. }
  391. /* ECB 3DES */
  392. int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
  393. {
  394. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  395. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  396. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  397. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  398. op->keymode;
  399. return sun4i_ss_cipher_poll(areq);
  400. }
  401. int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
  402. {
  403. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  404. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  405. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  406. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  407. op->keymode;
  408. return sun4i_ss_cipher_poll(areq);
  409. }
  410. int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
  411. {
  412. struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
  413. struct sun4i_ss_alg_template *algt;
  414. memset(op, 0, sizeof(struct sun4i_tfm_ctx));
  415. algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
  416. alg.crypto.base);
  417. op->ss = algt->ss;
  418. crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
  419. sizeof(struct sun4i_cipher_req_ctx));
  420. return 0;
  421. }
  422. /* check and set the AES key, prepare the mode to be used */
  423. int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
  424. unsigned int keylen)
  425. {
  426. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  427. struct sun4i_ss_ctx *ss = op->ss;
  428. switch (keylen) {
  429. case 128 / 8:
  430. op->keymode = SS_AES_128BITS;
  431. break;
  432. case 192 / 8:
  433. op->keymode = SS_AES_192BITS;
  434. break;
  435. case 256 / 8:
  436. op->keymode = SS_AES_256BITS;
  437. break;
  438. default:
  439. dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
  440. crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  441. return -EINVAL;
  442. }
  443. op->keylen = keylen;
  444. memcpy(op->key, key, keylen);
  445. return 0;
  446. }
  447. /* check and set the DES key, prepare the mode to be used */
  448. int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
  449. unsigned int keylen)
  450. {
  451. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  452. struct sun4i_ss_ctx *ss = op->ss;
  453. u32 flags;
  454. u32 tmp[DES_EXPKEY_WORDS];
  455. int ret;
  456. if (unlikely(keylen != DES_KEY_SIZE)) {
  457. dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  458. crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  459. return -EINVAL;
  460. }
  461. flags = crypto_skcipher_get_flags(tfm);
  462. ret = des_ekey(tmp, key);
  463. if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
  464. crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
  465. dev_dbg(ss->dev, "Weak key %u\n", keylen);
  466. return -EINVAL;
  467. }
  468. op->keylen = keylen;
  469. memcpy(op->key, key, keylen);
  470. return 0;
  471. }
  472. /* check and set the 3DES key, prepare the mode to be used */
  473. int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
  474. unsigned int keylen)
  475. {
  476. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  477. struct sun4i_ss_ctx *ss = op->ss;
  478. if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
  479. dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  480. crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  481. return -EINVAL;
  482. }
  483. op->keylen = keylen;
  484. memcpy(op->key, key, keylen);
  485. return 0;
  486. }