blkcipher.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560
  1. /*
  2. * Block chaining cipher operations.
  3. *
  4. * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  5. * multiple page boundaries by using temporary blocks. In user context,
  6. * the kernel is given a chance to schedule us once per page.
  7. *
  8. * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the Free
  12. * Software Foundation; either version 2 of the License, or (at your option)
  13. * any later version.
  14. *
  15. */
  16. #include <crypto/aead.h>
  17. #include <crypto/internal/skcipher.h>
  18. #include <crypto/scatterwalk.h>
  19. #include <linux/errno.h>
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/slab.h>
  24. #include <linux/string.h>
  25. #include <linux/cryptouser.h>
  26. #include <linux/compiler.h>
  27. #include <net/netlink.h>
  28. #include "internal.h"
  29. enum {
  30. BLKCIPHER_WALK_PHYS = 1 << 0,
  31. BLKCIPHER_WALK_SLOW = 1 << 1,
  32. BLKCIPHER_WALK_COPY = 1 << 2,
  33. BLKCIPHER_WALK_DIFF = 1 << 3,
  34. };
  35. static int blkcipher_walk_next(struct blkcipher_desc *desc,
  36. struct blkcipher_walk *walk);
  37. static int blkcipher_walk_first(struct blkcipher_desc *desc,
  38. struct blkcipher_walk *walk);
  39. static inline void blkcipher_map_src(struct blkcipher_walk *walk)
  40. {
  41. walk->src.virt.addr = scatterwalk_map(&walk->in);
  42. }
  43. static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
  44. {
  45. walk->dst.virt.addr = scatterwalk_map(&walk->out);
  46. }
  47. static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
  48. {
  49. scatterwalk_unmap(walk->src.virt.addr);
  50. }
  51. static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
  52. {
  53. scatterwalk_unmap(walk->dst.virt.addr);
  54. }
  55. /* Get a spot of the specified length that does not straddle a page.
  56. * The caller needs to ensure that there is enough space for this operation.
  57. */
  58. static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
  59. {
  60. u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  61. return max(start, end_page);
  62. }
  63. static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
  64. unsigned int bsize)
  65. {
  66. u8 *addr;
  67. addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
  68. addr = blkcipher_get_spot(addr, bsize);
  69. scatterwalk_copychunks(addr, &walk->out, bsize, 1);
  70. }
  71. static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
  72. unsigned int n)
  73. {
  74. if (walk->flags & BLKCIPHER_WALK_COPY) {
  75. blkcipher_map_dst(walk);
  76. memcpy(walk->dst.virt.addr, walk->page, n);
  77. blkcipher_unmap_dst(walk);
  78. } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
  79. if (walk->flags & BLKCIPHER_WALK_DIFF)
  80. blkcipher_unmap_dst(walk);
  81. blkcipher_unmap_src(walk);
  82. }
  83. scatterwalk_advance(&walk->in, n);
  84. scatterwalk_advance(&walk->out, n);
  85. }
  86. int blkcipher_walk_done(struct blkcipher_desc *desc,
  87. struct blkcipher_walk *walk, int err)
  88. {
  89. unsigned int n; /* bytes processed */
  90. bool more;
  91. if (unlikely(err < 0))
  92. goto finish;
  93. n = walk->nbytes - err;
  94. walk->total -= n;
  95. more = (walk->total != 0);
  96. if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
  97. blkcipher_done_fast(walk, n);
  98. } else {
  99. if (WARN_ON(err)) {
  100. /* unexpected case; didn't process all bytes */
  101. err = -EINVAL;
  102. goto finish;
  103. }
  104. blkcipher_done_slow(walk, n);
  105. }
  106. scatterwalk_done(&walk->in, 0, more);
  107. scatterwalk_done(&walk->out, 1, more);
  108. if (more) {
  109. crypto_yield(desc->flags);
  110. return blkcipher_walk_next(desc, walk);
  111. }
  112. err = 0;
  113. finish:
  114. walk->nbytes = 0;
  115. if (walk->iv != desc->info)
  116. memcpy(desc->info, walk->iv, walk->ivsize);
  117. if (walk->buffer != walk->page)
  118. kfree(walk->buffer);
  119. if (walk->page)
  120. free_page((unsigned long)walk->page);
  121. return err;
  122. }
  123. EXPORT_SYMBOL_GPL(blkcipher_walk_done);
  124. static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
  125. struct blkcipher_walk *walk,
  126. unsigned int bsize,
  127. unsigned int alignmask)
  128. {
  129. unsigned int n;
  130. unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
  131. if (walk->buffer)
  132. goto ok;
  133. walk->buffer = walk->page;
  134. if (walk->buffer)
  135. goto ok;
  136. n = aligned_bsize * 3 - (alignmask + 1) +
  137. (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
  138. walk->buffer = kmalloc(n, GFP_ATOMIC);
  139. if (!walk->buffer)
  140. return blkcipher_walk_done(desc, walk, -ENOMEM);
  141. ok:
  142. walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
  143. alignmask + 1);
  144. walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
  145. walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
  146. aligned_bsize, bsize);
  147. scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
  148. walk->nbytes = bsize;
  149. walk->flags |= BLKCIPHER_WALK_SLOW;
  150. return 0;
  151. }
  152. static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
  153. {
  154. u8 *tmp = walk->page;
  155. blkcipher_map_src(walk);
  156. memcpy(tmp, walk->src.virt.addr, walk->nbytes);
  157. blkcipher_unmap_src(walk);
  158. walk->src.virt.addr = tmp;
  159. walk->dst.virt.addr = tmp;
  160. return 0;
  161. }
  162. static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
  163. struct blkcipher_walk *walk)
  164. {
  165. unsigned long diff;
  166. walk->src.phys.page = scatterwalk_page(&walk->in);
  167. walk->src.phys.offset = offset_in_page(walk->in.offset);
  168. walk->dst.phys.page = scatterwalk_page(&walk->out);
  169. walk->dst.phys.offset = offset_in_page(walk->out.offset);
  170. if (walk->flags & BLKCIPHER_WALK_PHYS)
  171. return 0;
  172. diff = walk->src.phys.offset - walk->dst.phys.offset;
  173. diff |= walk->src.virt.page - walk->dst.virt.page;
  174. blkcipher_map_src(walk);
  175. walk->dst.virt.addr = walk->src.virt.addr;
  176. if (diff) {
  177. walk->flags |= BLKCIPHER_WALK_DIFF;
  178. blkcipher_map_dst(walk);
  179. }
  180. return 0;
  181. }
  182. static int blkcipher_walk_next(struct blkcipher_desc *desc,
  183. struct blkcipher_walk *walk)
  184. {
  185. unsigned int bsize;
  186. unsigned int n;
  187. int err;
  188. n = walk->total;
  189. if (unlikely(n < walk->cipher_blocksize)) {
  190. desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
  191. return blkcipher_walk_done(desc, walk, -EINVAL);
  192. }
  193. bsize = min(walk->walk_blocksize, n);
  194. walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
  195. BLKCIPHER_WALK_DIFF);
  196. if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
  197. !scatterwalk_aligned(&walk->out, walk->alignmask)) {
  198. walk->flags |= BLKCIPHER_WALK_COPY;
  199. if (!walk->page) {
  200. walk->page = (void *)__get_free_page(GFP_ATOMIC);
  201. if (!walk->page)
  202. n = 0;
  203. }
  204. }
  205. n = scatterwalk_clamp(&walk->in, n);
  206. n = scatterwalk_clamp(&walk->out, n);
  207. if (unlikely(n < bsize)) {
  208. err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
  209. goto set_phys_lowmem;
  210. }
  211. walk->nbytes = n;
  212. if (walk->flags & BLKCIPHER_WALK_COPY) {
  213. err = blkcipher_next_copy(walk);
  214. goto set_phys_lowmem;
  215. }
  216. return blkcipher_next_fast(desc, walk);
  217. set_phys_lowmem:
  218. if (walk->flags & BLKCIPHER_WALK_PHYS) {
  219. walk->src.phys.page = virt_to_page(walk->src.virt.addr);
  220. walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
  221. walk->src.phys.offset &= PAGE_SIZE - 1;
  222. walk->dst.phys.offset &= PAGE_SIZE - 1;
  223. }
  224. return err;
  225. }
  226. static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
  227. {
  228. unsigned bs = walk->walk_blocksize;
  229. unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
  230. unsigned int size = aligned_bs * 2 +
  231. walk->ivsize + max(aligned_bs, walk->ivsize) -
  232. (walk->alignmask + 1);
  233. u8 *iv;
  234. size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
  235. walk->buffer = kmalloc(size, GFP_ATOMIC);
  236. if (!walk->buffer)
  237. return -ENOMEM;
  238. iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
  239. iv = blkcipher_get_spot(iv, bs) + aligned_bs;
  240. iv = blkcipher_get_spot(iv, bs) + aligned_bs;
  241. iv = blkcipher_get_spot(iv, walk->ivsize);
  242. walk->iv = memcpy(iv, walk->iv, walk->ivsize);
  243. return 0;
  244. }
  245. int blkcipher_walk_virt(struct blkcipher_desc *desc,
  246. struct blkcipher_walk *walk)
  247. {
  248. walk->flags &= ~BLKCIPHER_WALK_PHYS;
  249. walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
  250. walk->cipher_blocksize = walk->walk_blocksize;
  251. walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
  252. walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
  253. return blkcipher_walk_first(desc, walk);
  254. }
  255. EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
  256. int blkcipher_walk_phys(struct blkcipher_desc *desc,
  257. struct blkcipher_walk *walk)
  258. {
  259. walk->flags |= BLKCIPHER_WALK_PHYS;
  260. walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
  261. walk->cipher_blocksize = walk->walk_blocksize;
  262. walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
  263. walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
  264. return blkcipher_walk_first(desc, walk);
  265. }
  266. EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
  267. static int blkcipher_walk_first(struct blkcipher_desc *desc,
  268. struct blkcipher_walk *walk)
  269. {
  270. if (WARN_ON_ONCE(in_irq()))
  271. return -EDEADLK;
  272. walk->iv = desc->info;
  273. walk->nbytes = walk->total;
  274. if (unlikely(!walk->total))
  275. return 0;
  276. walk->buffer = NULL;
  277. if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
  278. int err = blkcipher_copy_iv(walk);
  279. if (err)
  280. return err;
  281. }
  282. scatterwalk_start(&walk->in, walk->in.sg);
  283. scatterwalk_start(&walk->out, walk->out.sg);
  284. walk->page = NULL;
  285. return blkcipher_walk_next(desc, walk);
  286. }
  287. int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
  288. struct blkcipher_walk *walk,
  289. unsigned int blocksize)
  290. {
  291. walk->flags &= ~BLKCIPHER_WALK_PHYS;
  292. walk->walk_blocksize = blocksize;
  293. walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
  294. walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
  295. walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
  296. return blkcipher_walk_first(desc, walk);
  297. }
  298. EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
  299. int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
  300. struct blkcipher_walk *walk,
  301. struct crypto_aead *tfm,
  302. unsigned int blocksize)
  303. {
  304. walk->flags &= ~BLKCIPHER_WALK_PHYS;
  305. walk->walk_blocksize = blocksize;
  306. walk->cipher_blocksize = crypto_aead_blocksize(tfm);
  307. walk->ivsize = crypto_aead_ivsize(tfm);
  308. walk->alignmask = crypto_aead_alignmask(tfm);
  309. return blkcipher_walk_first(desc, walk);
  310. }
  311. EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
  312. static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
  313. unsigned int keylen)
  314. {
  315. struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
  316. unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
  317. int ret;
  318. u8 *buffer, *alignbuffer;
  319. unsigned long absize;
  320. absize = keylen + alignmask;
  321. buffer = kmalloc(absize, GFP_ATOMIC);
  322. if (!buffer)
  323. return -ENOMEM;
  324. alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  325. memcpy(alignbuffer, key, keylen);
  326. ret = cipher->setkey(tfm, alignbuffer, keylen);
  327. memset(alignbuffer, 0, keylen);
  328. kfree(buffer);
  329. return ret;
  330. }
  331. static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
  332. {
  333. struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
  334. unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
  335. if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
  336. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  337. return -EINVAL;
  338. }
  339. if ((unsigned long)key & alignmask)
  340. return setkey_unaligned(tfm, key, keylen);
  341. return cipher->setkey(tfm, key, keylen);
  342. }
  343. static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  344. unsigned int keylen)
  345. {
  346. return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
  347. }
  348. static int async_encrypt(struct ablkcipher_request *req)
  349. {
  350. struct crypto_tfm *tfm = req->base.tfm;
  351. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  352. struct blkcipher_desc desc = {
  353. .tfm = __crypto_blkcipher_cast(tfm),
  354. .info = req->info,
  355. .flags = req->base.flags,
  356. };
  357. return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
  358. }
  359. static int async_decrypt(struct ablkcipher_request *req)
  360. {
  361. struct crypto_tfm *tfm = req->base.tfm;
  362. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  363. struct blkcipher_desc desc = {
  364. .tfm = __crypto_blkcipher_cast(tfm),
  365. .info = req->info,
  366. .flags = req->base.flags,
  367. };
  368. return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
  369. }
  370. static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
  371. u32 mask)
  372. {
  373. struct blkcipher_alg *cipher = &alg->cra_blkcipher;
  374. unsigned int len = alg->cra_ctxsize;
  375. if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
  376. cipher->ivsize) {
  377. len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
  378. len += cipher->ivsize;
  379. }
  380. return len;
  381. }
  382. static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
  383. {
  384. struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
  385. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  386. crt->setkey = async_setkey;
  387. crt->encrypt = async_encrypt;
  388. crt->decrypt = async_decrypt;
  389. crt->base = __crypto_ablkcipher_cast(tfm);
  390. crt->ivsize = alg->ivsize;
  391. return 0;
  392. }
  393. static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
  394. {
  395. struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
  396. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  397. unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
  398. unsigned long addr;
  399. crt->setkey = setkey;
  400. crt->encrypt = alg->encrypt;
  401. crt->decrypt = alg->decrypt;
  402. addr = (unsigned long)crypto_tfm_ctx(tfm);
  403. addr = ALIGN(addr, align);
  404. addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
  405. crt->iv = (void *)addr;
  406. return 0;
  407. }
  408. static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
  409. {
  410. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  411. if (alg->ivsize > PAGE_SIZE / 8)
  412. return -EINVAL;
  413. if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
  414. return crypto_init_blkcipher_ops_sync(tfm);
  415. else
  416. return crypto_init_blkcipher_ops_async(tfm);
  417. }
  418. #ifdef CONFIG_NET
  419. static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  420. {
  421. struct crypto_report_blkcipher rblkcipher;
  422. strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
  423. strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
  424. sizeof(rblkcipher.geniv));
  425. rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
  426. rblkcipher.blocksize = alg->cra_blocksize;
  427. rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
  428. rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
  429. rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
  430. if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
  431. sizeof(struct crypto_report_blkcipher), &rblkcipher))
  432. goto nla_put_failure;
  433. return 0;
  434. nla_put_failure:
  435. return -EMSGSIZE;
  436. }
  437. #else
  438. static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  439. {
  440. return -ENOSYS;
  441. }
  442. #endif
  443. static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  444. __maybe_unused;
  445. static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  446. {
  447. seq_printf(m, "type : blkcipher\n");
  448. seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
  449. seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
  450. seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
  451. seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
  452. seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?:
  453. "<default>");
  454. }
  455. const struct crypto_type crypto_blkcipher_type = {
  456. .ctxsize = crypto_blkcipher_ctxsize,
  457. .init = crypto_init_blkcipher_ops,
  458. #ifdef CONFIG_PROC_FS
  459. .show = crypto_blkcipher_show,
  460. #endif
  461. .report = crypto_blkcipher_report,
  462. };
  463. EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
  464. MODULE_LICENSE("GPL");
  465. MODULE_DESCRIPTION("Generic block chaining cipher type");