ablkcipher.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495
  1. /*
  2. * Asynchronous block chaining cipher operations.
  3. *
  4. * This is the asynchronous version of blkcipher.c indicating completion
  5. * via a callback.
  6. *
  7. * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the Free
  11. * Software Foundation; either version 2 of the License, or (at your option)
  12. * any later version.
  13. *
  14. */
  15. #include <crypto/internal/skcipher.h>
  16. #include <linux/err.h>
  17. #include <linux/kernel.h>
  18. #include <linux/slab.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/cryptouser.h>
  21. #include <net/netlink.h>
  22. #include <crypto/scatterwalk.h>
  23. #include "internal.h"
  24. struct ablkcipher_buffer {
  25. struct list_head entry;
  26. struct scatter_walk dst;
  27. unsigned int len;
  28. void *data;
  29. };
  30. enum {
  31. ABLKCIPHER_WALK_SLOW = 1 << 0,
  32. };
  33. static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
  34. {
  35. scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
  36. }
  37. void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
  38. {
  39. struct ablkcipher_buffer *p, *tmp;
  40. list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
  41. ablkcipher_buffer_write(p);
  42. list_del(&p->entry);
  43. kfree(p);
  44. }
  45. }
  46. EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
  47. static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
  48. struct ablkcipher_buffer *p)
  49. {
  50. p->dst = walk->out;
  51. list_add_tail(&p->entry, &walk->buffers);
  52. }
  53. /* Get a spot of the specified length that does not straddle a page.
  54. * The caller needs to ensure that there is enough space for this operation.
  55. */
  56. static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
  57. {
  58. u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  59. return max(start, end_page);
  60. }
  61. static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
  62. unsigned int bsize)
  63. {
  64. unsigned int n = bsize;
  65. for (;;) {
  66. unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
  67. if (len_this_page > n)
  68. len_this_page = n;
  69. scatterwalk_advance(&walk->out, n);
  70. if (n == len_this_page)
  71. break;
  72. n -= len_this_page;
  73. scatterwalk_start(&walk->out, sg_next(walk->out.sg));
  74. }
  75. return bsize;
  76. }
  77. static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
  78. unsigned int n)
  79. {
  80. scatterwalk_advance(&walk->in, n);
  81. scatterwalk_advance(&walk->out, n);
  82. return n;
  83. }
  84. static int ablkcipher_walk_next(struct ablkcipher_request *req,
  85. struct ablkcipher_walk *walk);
  86. int ablkcipher_walk_done(struct ablkcipher_request *req,
  87. struct ablkcipher_walk *walk, int err)
  88. {
  89. struct crypto_tfm *tfm = req->base.tfm;
  90. unsigned int nbytes = 0;
  91. if (likely(err >= 0)) {
  92. unsigned int n = walk->nbytes - err;
  93. if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
  94. n = ablkcipher_done_fast(walk, n);
  95. else if (WARN_ON(err)) {
  96. err = -EINVAL;
  97. goto err;
  98. } else
  99. n = ablkcipher_done_slow(walk, n);
  100. nbytes = walk->total - n;
  101. err = 0;
  102. }
  103. scatterwalk_done(&walk->in, 0, nbytes);
  104. scatterwalk_done(&walk->out, 1, nbytes);
  105. err:
  106. walk->total = nbytes;
  107. walk->nbytes = nbytes;
  108. if (nbytes) {
  109. crypto_yield(req->base.flags);
  110. return ablkcipher_walk_next(req, walk);
  111. }
  112. if (walk->iv != req->info)
  113. memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
  114. kfree(walk->iv_buffer);
  115. return err;
  116. }
  117. EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
  118. static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
  119. struct ablkcipher_walk *walk,
  120. unsigned int bsize,
  121. unsigned int alignmask,
  122. void **src_p, void **dst_p)
  123. {
  124. unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
  125. struct ablkcipher_buffer *p;
  126. void *src, *dst, *base;
  127. unsigned int n;
  128. n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
  129. n += (aligned_bsize * 3 - (alignmask + 1) +
  130. (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
  131. p = kmalloc(n, GFP_ATOMIC);
  132. if (!p)
  133. return ablkcipher_walk_done(req, walk, -ENOMEM);
  134. base = p + 1;
  135. dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
  136. src = dst = ablkcipher_get_spot(dst, bsize);
  137. p->len = bsize;
  138. p->data = dst;
  139. scatterwalk_copychunks(src, &walk->in, bsize, 0);
  140. ablkcipher_queue_write(walk, p);
  141. walk->nbytes = bsize;
  142. walk->flags |= ABLKCIPHER_WALK_SLOW;
  143. *src_p = src;
  144. *dst_p = dst;
  145. return 0;
  146. }
  147. static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
  148. struct crypto_tfm *tfm,
  149. unsigned int alignmask)
  150. {
  151. unsigned bs = walk->blocksize;
  152. unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
  153. unsigned aligned_bs = ALIGN(bs, alignmask + 1);
  154. unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
  155. (alignmask + 1);
  156. u8 *iv;
  157. size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
  158. walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
  159. if (!walk->iv_buffer)
  160. return -ENOMEM;
  161. iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
  162. iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
  163. iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
  164. iv = ablkcipher_get_spot(iv, ivsize);
  165. walk->iv = memcpy(iv, walk->iv, ivsize);
  166. return 0;
  167. }
  168. static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
  169. struct ablkcipher_walk *walk)
  170. {
  171. walk->src.page = scatterwalk_page(&walk->in);
  172. walk->src.offset = offset_in_page(walk->in.offset);
  173. walk->dst.page = scatterwalk_page(&walk->out);
  174. walk->dst.offset = offset_in_page(walk->out.offset);
  175. return 0;
  176. }
  177. static int ablkcipher_walk_next(struct ablkcipher_request *req,
  178. struct ablkcipher_walk *walk)
  179. {
  180. struct crypto_tfm *tfm = req->base.tfm;
  181. unsigned int alignmask, bsize, n;
  182. void *src, *dst;
  183. int err;
  184. alignmask = crypto_tfm_alg_alignmask(tfm);
  185. n = walk->total;
  186. if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
  187. req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
  188. return ablkcipher_walk_done(req, walk, -EINVAL);
  189. }
  190. walk->flags &= ~ABLKCIPHER_WALK_SLOW;
  191. src = dst = NULL;
  192. bsize = min(walk->blocksize, n);
  193. n = scatterwalk_clamp(&walk->in, n);
  194. n = scatterwalk_clamp(&walk->out, n);
  195. if (n < bsize ||
  196. !scatterwalk_aligned(&walk->in, alignmask) ||
  197. !scatterwalk_aligned(&walk->out, alignmask)) {
  198. err = ablkcipher_next_slow(req, walk, bsize, alignmask,
  199. &src, &dst);
  200. goto set_phys_lowmem;
  201. }
  202. walk->nbytes = n;
  203. return ablkcipher_next_fast(req, walk);
  204. set_phys_lowmem:
  205. if (err >= 0) {
  206. walk->src.page = virt_to_page(src);
  207. walk->dst.page = virt_to_page(dst);
  208. walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
  209. walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
  210. }
  211. return err;
  212. }
  213. static int ablkcipher_walk_first(struct ablkcipher_request *req,
  214. struct ablkcipher_walk *walk)
  215. {
  216. struct crypto_tfm *tfm = req->base.tfm;
  217. unsigned int alignmask;
  218. alignmask = crypto_tfm_alg_alignmask(tfm);
  219. if (WARN_ON_ONCE(in_irq()))
  220. return -EDEADLK;
  221. walk->iv = req->info;
  222. walk->nbytes = walk->total;
  223. if (unlikely(!walk->total))
  224. return 0;
  225. walk->iv_buffer = NULL;
  226. if (unlikely(((unsigned long)walk->iv & alignmask))) {
  227. int err = ablkcipher_copy_iv(walk, tfm, alignmask);
  228. if (err)
  229. return err;
  230. }
  231. scatterwalk_start(&walk->in, walk->in.sg);
  232. scatterwalk_start(&walk->out, walk->out.sg);
  233. return ablkcipher_walk_next(req, walk);
  234. }
  235. int ablkcipher_walk_phys(struct ablkcipher_request *req,
  236. struct ablkcipher_walk *walk)
  237. {
  238. walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
  239. return ablkcipher_walk_first(req, walk);
  240. }
  241. EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
  242. static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
  243. unsigned int keylen)
  244. {
  245. struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
  246. unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
  247. int ret;
  248. u8 *buffer, *alignbuffer;
  249. unsigned long absize;
  250. absize = keylen + alignmask;
  251. buffer = kmalloc(absize, GFP_ATOMIC);
  252. if (!buffer)
  253. return -ENOMEM;
  254. alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  255. memcpy(alignbuffer, key, keylen);
  256. ret = cipher->setkey(tfm, alignbuffer, keylen);
  257. memset(alignbuffer, 0, keylen);
  258. kfree(buffer);
  259. return ret;
  260. }
  261. static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  262. unsigned int keylen)
  263. {
  264. struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
  265. unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
  266. if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
  267. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  268. return -EINVAL;
  269. }
  270. if ((unsigned long)key & alignmask)
  271. return setkey_unaligned(tfm, key, keylen);
  272. return cipher->setkey(tfm, key, keylen);
  273. }
  274. static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
  275. u32 mask)
  276. {
  277. return alg->cra_ctxsize;
  278. }
  279. static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
  280. u32 mask)
  281. {
  282. struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
  283. struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
  284. if (alg->ivsize > PAGE_SIZE / 8)
  285. return -EINVAL;
  286. crt->setkey = setkey;
  287. crt->encrypt = alg->encrypt;
  288. crt->decrypt = alg->decrypt;
  289. crt->base = __crypto_ablkcipher_cast(tfm);
  290. crt->ivsize = alg->ivsize;
  291. return 0;
  292. }
  293. #ifdef CONFIG_NET
  294. static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  295. {
  296. struct crypto_report_blkcipher rblkcipher;
  297. strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
  298. strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
  299. sizeof(rblkcipher.geniv));
  300. rblkcipher.blocksize = alg->cra_blocksize;
  301. rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
  302. rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
  303. rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
  304. if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
  305. sizeof(struct crypto_report_blkcipher), &rblkcipher))
  306. goto nla_put_failure;
  307. return 0;
  308. nla_put_failure:
  309. return -EMSGSIZE;
  310. }
  311. #else
  312. static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  313. {
  314. return -ENOSYS;
  315. }
  316. #endif
  317. static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  318. __attribute__ ((unused));
  319. static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  320. {
  321. struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
  322. seq_printf(m, "type : ablkcipher\n");
  323. seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
  324. "yes" : "no");
  325. seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
  326. seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
  327. seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
  328. seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
  329. seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>");
  330. }
  331. const struct crypto_type crypto_ablkcipher_type = {
  332. .ctxsize = crypto_ablkcipher_ctxsize,
  333. .init = crypto_init_ablkcipher_ops,
  334. #ifdef CONFIG_PROC_FS
  335. .show = crypto_ablkcipher_show,
  336. #endif
  337. .report = crypto_ablkcipher_report,
  338. };
  339. EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
  340. static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
  341. u32 mask)
  342. {
  343. struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
  344. struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
  345. if (alg->ivsize > PAGE_SIZE / 8)
  346. return -EINVAL;
  347. crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
  348. alg->setkey : setkey;
  349. crt->encrypt = alg->encrypt;
  350. crt->decrypt = alg->decrypt;
  351. crt->base = __crypto_ablkcipher_cast(tfm);
  352. crt->ivsize = alg->ivsize;
  353. return 0;
  354. }
  355. #ifdef CONFIG_NET
  356. static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  357. {
  358. struct crypto_report_blkcipher rblkcipher;
  359. strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
  360. strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
  361. sizeof(rblkcipher.geniv));
  362. rblkcipher.blocksize = alg->cra_blocksize;
  363. rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
  364. rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
  365. rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
  366. if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
  367. sizeof(struct crypto_report_blkcipher), &rblkcipher))
  368. goto nla_put_failure;
  369. return 0;
  370. nla_put_failure:
  371. return -EMSGSIZE;
  372. }
  373. #else
  374. static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  375. {
  376. return -ENOSYS;
  377. }
  378. #endif
  379. static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
  380. __attribute__ ((unused));
  381. static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
  382. {
  383. struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
  384. seq_printf(m, "type : givcipher\n");
  385. seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
  386. "yes" : "no");
  387. seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
  388. seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
  389. seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
  390. seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
  391. seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>");
  392. }
  393. const struct crypto_type crypto_givcipher_type = {
  394. .ctxsize = crypto_ablkcipher_ctxsize,
  395. .init = crypto_init_givcipher_ops,
  396. #ifdef CONFIG_PROC_FS
  397. .show = crypto_givcipher_show,
  398. #endif
  399. .report = crypto_givcipher_report,
  400. };
  401. EXPORT_SYMBOL_GPL(crypto_givcipher_type);