blkcipher.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745
  1. /*
  2. * Block chaining cipher operations.
  3. *
  4. * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  5. * multiple page boundaries by using temporary blocks. In user context,
  6. * the kernel is given a chance to schedule us once per page.
  7. *
  8. * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the Free
  12. * Software Foundation; either version 2 of the License, or (at your option)
  13. * any later version.
  14. *
  15. */
  16. #include <crypto/aead.h>
  17. #include <crypto/internal/skcipher.h>
  18. #include <crypto/scatterwalk.h>
  19. #include <linux/errno.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/slab.h>
  26. #include <linux/string.h>
  27. #include <linux/cryptouser.h>
  28. #include <net/netlink.h>
  29. #include "internal.h"
  30. enum {
  31. BLKCIPHER_WALK_PHYS = 1 << 0,
  32. BLKCIPHER_WALK_SLOW = 1 << 1,
  33. BLKCIPHER_WALK_COPY = 1 << 2,
  34. BLKCIPHER_WALK_DIFF = 1 << 3,
  35. };
  36. static int blkcipher_walk_next(struct blkcipher_desc *desc,
  37. struct blkcipher_walk *walk);
  38. static int blkcipher_walk_first(struct blkcipher_desc *desc,
  39. struct blkcipher_walk *walk);
  40. static inline void blkcipher_map_src(struct blkcipher_walk *walk)
  41. {
  42. walk->src.virt.addr = scatterwalk_map(&walk->in);
  43. }
  44. static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
  45. {
  46. walk->dst.virt.addr = scatterwalk_map(&walk->out);
  47. }
  48. static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
  49. {
  50. scatterwalk_unmap(walk->src.virt.addr);
  51. }
  52. static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
  53. {
  54. scatterwalk_unmap(walk->dst.virt.addr);
  55. }
  56. /* Get a spot of the specified length that does not straddle a page.
  57. * The caller needs to ensure that there is enough space for this operation.
  58. */
  59. static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
  60. {
  61. u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  62. return max(start, end_page);
  63. }
  64. static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
  65. unsigned int bsize)
  66. {
  67. u8 *addr;
  68. addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
  69. addr = blkcipher_get_spot(addr, bsize);
  70. scatterwalk_copychunks(addr, &walk->out, bsize, 1);
  71. return bsize;
  72. }
  73. static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
  74. unsigned int n)
  75. {
  76. if (walk->flags & BLKCIPHER_WALK_COPY) {
  77. blkcipher_map_dst(walk);
  78. memcpy(walk->dst.virt.addr, walk->page, n);
  79. blkcipher_unmap_dst(walk);
  80. } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
  81. if (walk->flags & BLKCIPHER_WALK_DIFF)
  82. blkcipher_unmap_dst(walk);
  83. blkcipher_unmap_src(walk);
  84. }
  85. scatterwalk_advance(&walk->in, n);
  86. scatterwalk_advance(&walk->out, n);
  87. return n;
  88. }
  89. int blkcipher_walk_done(struct blkcipher_desc *desc,
  90. struct blkcipher_walk *walk, int err)
  91. {
  92. unsigned int nbytes = 0;
  93. if (likely(err >= 0)) {
  94. unsigned int n = walk->nbytes - err;
  95. if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
  96. n = blkcipher_done_fast(walk, n);
  97. else if (WARN_ON(err)) {
  98. err = -EINVAL;
  99. goto err;
  100. } else
  101. n = blkcipher_done_slow(walk, n);
  102. nbytes = walk->total - n;
  103. err = 0;
  104. }
  105. scatterwalk_done(&walk->in, 0, nbytes);
  106. scatterwalk_done(&walk->out, 1, nbytes);
  107. err:
  108. walk->total = nbytes;
  109. walk->nbytes = nbytes;
  110. if (nbytes) {
  111. crypto_yield(desc->flags);
  112. return blkcipher_walk_next(desc, walk);
  113. }
  114. if (walk->iv != desc->info)
  115. memcpy(desc->info, walk->iv, walk->ivsize);
  116. if (walk->buffer != walk->page)
  117. kfree(walk->buffer);
  118. if (walk->page)
  119. free_page((unsigned long)walk->page);
  120. return err;
  121. }
  122. EXPORT_SYMBOL_GPL(blkcipher_walk_done);
  123. static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
  124. struct blkcipher_walk *walk,
  125. unsigned int bsize,
  126. unsigned int alignmask)
  127. {
  128. unsigned int n;
  129. unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
  130. if (walk->buffer)
  131. goto ok;
  132. walk->buffer = walk->page;
  133. if (walk->buffer)
  134. goto ok;
  135. n = aligned_bsize * 3 - (alignmask + 1) +
  136. (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
  137. walk->buffer = kmalloc(n, GFP_ATOMIC);
  138. if (!walk->buffer)
  139. return blkcipher_walk_done(desc, walk, -ENOMEM);
  140. ok:
  141. walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
  142. alignmask + 1);
  143. walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
  144. walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
  145. aligned_bsize, bsize);
  146. scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
  147. walk->nbytes = bsize;
  148. walk->flags |= BLKCIPHER_WALK_SLOW;
  149. return 0;
  150. }
  151. static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
  152. {
  153. u8 *tmp = walk->page;
  154. blkcipher_map_src(walk);
  155. memcpy(tmp, walk->src.virt.addr, walk->nbytes);
  156. blkcipher_unmap_src(walk);
  157. walk->src.virt.addr = tmp;
  158. walk->dst.virt.addr = tmp;
  159. return 0;
  160. }
  161. static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
  162. struct blkcipher_walk *walk)
  163. {
  164. unsigned long diff;
  165. walk->src.phys.page = scatterwalk_page(&walk->in);
  166. walk->src.phys.offset = offset_in_page(walk->in.offset);
  167. walk->dst.phys.page = scatterwalk_page(&walk->out);
  168. walk->dst.phys.offset = offset_in_page(walk->out.offset);
  169. if (walk->flags & BLKCIPHER_WALK_PHYS)
  170. return 0;
  171. diff = walk->src.phys.offset - walk->dst.phys.offset;
  172. diff |= walk->src.virt.page - walk->dst.virt.page;
  173. blkcipher_map_src(walk);
  174. walk->dst.virt.addr = walk->src.virt.addr;
  175. if (diff) {
  176. walk->flags |= BLKCIPHER_WALK_DIFF;
  177. blkcipher_map_dst(walk);
  178. }
  179. return 0;
  180. }
  181. static int blkcipher_walk_next(struct blkcipher_desc *desc,
  182. struct blkcipher_walk *walk)
  183. {
  184. unsigned int bsize;
  185. unsigned int n;
  186. int err;
  187. n = walk->total;
  188. if (unlikely(n < walk->cipher_blocksize)) {
  189. desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
  190. return blkcipher_walk_done(desc, walk, -EINVAL);
  191. }
  192. walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
  193. BLKCIPHER_WALK_DIFF);
  194. if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
  195. !scatterwalk_aligned(&walk->out, walk->alignmask)) {
  196. walk->flags |= BLKCIPHER_WALK_COPY;
  197. if (!walk->page) {
  198. walk->page = (void *)__get_free_page(GFP_ATOMIC);
  199. if (!walk->page)
  200. n = 0;
  201. }
  202. }
  203. bsize = min(walk->walk_blocksize, n);
  204. n = scatterwalk_clamp(&walk->in, n);
  205. n = scatterwalk_clamp(&walk->out, n);
  206. if (unlikely(n < bsize)) {
  207. err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
  208. goto set_phys_lowmem;
  209. }
  210. walk->nbytes = n;
  211. if (walk->flags & BLKCIPHER_WALK_COPY) {
  212. err = blkcipher_next_copy(walk);
  213. goto set_phys_lowmem;
  214. }
  215. return blkcipher_next_fast(desc, walk);
  216. set_phys_lowmem:
  217. if (walk->flags & BLKCIPHER_WALK_PHYS) {
  218. walk->src.phys.page = virt_to_page(walk->src.virt.addr);
  219. walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
  220. walk->src.phys.offset &= PAGE_SIZE - 1;
  221. walk->dst.phys.offset &= PAGE_SIZE - 1;
  222. }
  223. return err;
  224. }
  225. static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
  226. {
  227. unsigned bs = walk->walk_blocksize;
  228. unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
  229. unsigned int size = aligned_bs * 2 +
  230. walk->ivsize + max(aligned_bs, walk->ivsize) -
  231. (walk->alignmask + 1);
  232. u8 *iv;
  233. size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
  234. walk->buffer = kmalloc(size, GFP_ATOMIC);
  235. if (!walk->buffer)
  236. return -ENOMEM;
  237. iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
  238. iv = blkcipher_get_spot(iv, bs) + aligned_bs;
  239. iv = blkcipher_get_spot(iv, bs) + aligned_bs;
  240. iv = blkcipher_get_spot(iv, walk->ivsize);
  241. walk->iv = memcpy(iv, walk->iv, walk->ivsize);
  242. return 0;
  243. }
  244. int blkcipher_walk_virt(struct blkcipher_desc *desc,
  245. struct blkcipher_walk *walk)
  246. {
  247. walk->flags &= ~BLKCIPHER_WALK_PHYS;
  248. walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
  249. walk->cipher_blocksize = walk->walk_blocksize;
  250. walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
  251. walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
  252. return blkcipher_walk_first(desc, walk);
  253. }
  254. EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
  255. int blkcipher_walk_phys(struct blkcipher_desc *desc,
  256. struct blkcipher_walk *walk)
  257. {
  258. walk->flags |= BLKCIPHER_WALK_PHYS;
  259. walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
  260. walk->cipher_blocksize = walk->walk_blocksize;
  261. walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
  262. walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
  263. return blkcipher_walk_first(desc, walk);
  264. }
  265. EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
  266. static int blkcipher_walk_first(struct blkcipher_desc *desc,
  267. struct blkcipher_walk *walk)
  268. {
  269. if (WARN_ON_ONCE(in_irq()))
  270. return -EDEADLK;
  271. walk->nbytes = walk->total;
  272. if (unlikely(!walk->total))
  273. return 0;
  274. walk->buffer = NULL;
  275. walk->iv = desc->info;
  276. if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
  277. int err = blkcipher_copy_iv(walk);
  278. if (err)
  279. return err;
  280. }
  281. scatterwalk_start(&walk->in, walk->in.sg);
  282. scatterwalk_start(&walk->out, walk->out.sg);
  283. walk->page = NULL;
  284. return blkcipher_walk_next(desc, walk);
  285. }
  286. int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
  287. struct blkcipher_walk *walk,
  288. unsigned int blocksize)
  289. {
  290. walk->flags &= ~BLKCIPHER_WALK_PHYS;
  291. walk->walk_blocksize = blocksize;
  292. walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
  293. walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
  294. walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
  295. return blkcipher_walk_first(desc, walk);
  296. }
  297. EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
  298. int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
  299. struct blkcipher_walk *walk,
  300. struct crypto_aead *tfm,
  301. unsigned int blocksize)
  302. {
  303. walk->flags &= ~BLKCIPHER_WALK_PHYS;
  304. walk->walk_blocksize = blocksize;
  305. walk->cipher_blocksize = crypto_aead_blocksize(tfm);
  306. walk->ivsize = crypto_aead_ivsize(tfm);
  307. walk->alignmask = crypto_aead_alignmask(tfm);
  308. return blkcipher_walk_first(desc, walk);
  309. }
  310. EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
  311. static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
  312. unsigned int keylen)
  313. {
  314. struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
  315. unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
  316. int ret;
  317. u8 *buffer, *alignbuffer;
  318. unsigned long absize;
  319. absize = keylen + alignmask;
  320. buffer = kmalloc(absize, GFP_ATOMIC);
  321. if (!buffer)
  322. return -ENOMEM;
  323. alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  324. memcpy(alignbuffer, key, keylen);
  325. ret = cipher->setkey(tfm, alignbuffer, keylen);
  326. memset(alignbuffer, 0, keylen);
  327. kfree(buffer);
  328. return ret;
  329. }
  330. static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
  331. {
  332. struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
  333. unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
  334. if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
  335. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  336. return -EINVAL;
  337. }
  338. if ((unsigned long)key & alignmask)
  339. return setkey_unaligned(tfm, key, keylen);
  340. return cipher->setkey(tfm, key, keylen);
  341. }
  342. static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  343. unsigned int keylen)
  344. {
  345. return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
  346. }
  347. static int async_encrypt(struct ablkcipher_request *req)
  348. {
  349. struct crypto_tfm *tfm = req->base.tfm;
  350. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  351. struct blkcipher_desc desc = {
  352. .tfm = __crypto_blkcipher_cast(tfm),
  353. .info = req->info,
  354. .flags = req->base.flags,
  355. };
  356. return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
  357. }
  358. static int async_decrypt(struct ablkcipher_request *req)
  359. {
  360. struct crypto_tfm *tfm = req->base.tfm;
  361. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  362. struct blkcipher_desc desc = {
  363. .tfm = __crypto_blkcipher_cast(tfm),
  364. .info = req->info,
  365. .flags = req->base.flags,
  366. };
  367. return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
  368. }
  369. static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
  370. u32 mask)
  371. {
  372. struct blkcipher_alg *cipher = &alg->cra_blkcipher;
  373. unsigned int len = alg->cra_ctxsize;
  374. if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
  375. cipher->ivsize) {
  376. len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
  377. len += cipher->ivsize;
  378. }
  379. return len;
  380. }
  381. static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
  382. {
  383. struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
  384. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  385. crt->setkey = async_setkey;
  386. crt->encrypt = async_encrypt;
  387. crt->decrypt = async_decrypt;
  388. if (!alg->ivsize) {
  389. crt->givencrypt = skcipher_null_givencrypt;
  390. crt->givdecrypt = skcipher_null_givdecrypt;
  391. }
  392. crt->base = __crypto_ablkcipher_cast(tfm);
  393. crt->ivsize = alg->ivsize;
  394. return 0;
  395. }
  396. static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
  397. {
  398. struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
  399. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  400. unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
  401. unsigned long addr;
  402. crt->setkey = setkey;
  403. crt->encrypt = alg->encrypt;
  404. crt->decrypt = alg->decrypt;
  405. addr = (unsigned long)crypto_tfm_ctx(tfm);
  406. addr = ALIGN(addr, align);
  407. addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
  408. crt->iv = (void *)addr;
  409. return 0;
  410. }
  411. static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
  412. {
  413. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  414. if (alg->ivsize > PAGE_SIZE / 8)
  415. return -EINVAL;
  416. if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
  417. return crypto_init_blkcipher_ops_sync(tfm);
  418. else
  419. return crypto_init_blkcipher_ops_async(tfm);
  420. }
  421. #ifdef CONFIG_NET
  422. static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  423. {
  424. struct crypto_report_blkcipher rblkcipher;
  425. strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
  426. strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
  427. sizeof(rblkcipher.geniv));
  428. rblkcipher.blocksize = alg->cra_blocksize;
  429. rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
  430. rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
  431. rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
  432. if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
  433. sizeof(struct crypto_report_blkcipher), &rblkcipher))
  434. goto nla_put_failure;
  435. return 0;
  436. nla_put_failure:
  437. return -EMSGSIZE;
  438. }
  439. #else
  440. static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  441. {
  442. return -ENOSYS;
  443. }
  444. #endif
  445. static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  446. __attribute__ ((unused));
  447. static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  448. {
  449. seq_printf(m, "type : blkcipher\n");
  450. seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
  451. seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
  452. seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
  453. seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
  454. seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?:
  455. "<default>");
  456. }
  457. const struct crypto_type crypto_blkcipher_type = {
  458. .ctxsize = crypto_blkcipher_ctxsize,
  459. .init = crypto_init_blkcipher_ops,
  460. #ifdef CONFIG_PROC_FS
  461. .show = crypto_blkcipher_show,
  462. #endif
  463. .report = crypto_blkcipher_report,
  464. };
  465. EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
  466. static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
  467. const char *name, u32 type, u32 mask)
  468. {
  469. struct crypto_alg *alg;
  470. int err;
  471. type = crypto_skcipher_type(type);
  472. mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
  473. alg = crypto_alg_mod_lookup(name, type, mask);
  474. if (IS_ERR(alg))
  475. return PTR_ERR(alg);
  476. err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
  477. crypto_mod_put(alg);
  478. return err;
  479. }
  480. struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
  481. struct rtattr **tb, u32 type,
  482. u32 mask)
  483. {
  484. struct {
  485. int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
  486. unsigned int keylen);
  487. int (*encrypt)(struct ablkcipher_request *req);
  488. int (*decrypt)(struct ablkcipher_request *req);
  489. unsigned int min_keysize;
  490. unsigned int max_keysize;
  491. unsigned int ivsize;
  492. const char *geniv;
  493. } balg;
  494. const char *name;
  495. struct crypto_skcipher_spawn *spawn;
  496. struct crypto_attr_type *algt;
  497. struct crypto_instance *inst;
  498. struct crypto_alg *alg;
  499. int err;
  500. algt = crypto_get_attr_type(tb);
  501. if (IS_ERR(algt))
  502. return ERR_CAST(algt);
  503. if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
  504. algt->mask)
  505. return ERR_PTR(-EINVAL);
  506. name = crypto_attr_alg_name(tb[1]);
  507. if (IS_ERR(name))
  508. return ERR_CAST(name);
  509. inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
  510. if (!inst)
  511. return ERR_PTR(-ENOMEM);
  512. spawn = crypto_instance_ctx(inst);
  513. /* Ignore async algorithms if necessary. */
  514. mask |= crypto_requires_sync(algt->type, algt->mask);
  515. crypto_set_skcipher_spawn(spawn, inst);
  516. err = crypto_grab_nivcipher(spawn, name, type, mask);
  517. if (err)
  518. goto err_free_inst;
  519. alg = crypto_skcipher_spawn_alg(spawn);
  520. if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
  521. CRYPTO_ALG_TYPE_BLKCIPHER) {
  522. balg.ivsize = alg->cra_blkcipher.ivsize;
  523. balg.min_keysize = alg->cra_blkcipher.min_keysize;
  524. balg.max_keysize = alg->cra_blkcipher.max_keysize;
  525. balg.setkey = async_setkey;
  526. balg.encrypt = async_encrypt;
  527. balg.decrypt = async_decrypt;
  528. balg.geniv = alg->cra_blkcipher.geniv;
  529. } else {
  530. balg.ivsize = alg->cra_ablkcipher.ivsize;
  531. balg.min_keysize = alg->cra_ablkcipher.min_keysize;
  532. balg.max_keysize = alg->cra_ablkcipher.max_keysize;
  533. balg.setkey = alg->cra_ablkcipher.setkey;
  534. balg.encrypt = alg->cra_ablkcipher.encrypt;
  535. balg.decrypt = alg->cra_ablkcipher.decrypt;
  536. balg.geniv = alg->cra_ablkcipher.geniv;
  537. }
  538. err = -EINVAL;
  539. if (!balg.ivsize)
  540. goto err_drop_alg;
  541. /*
  542. * This is only true if we're constructing an algorithm with its
  543. * default IV generator. For the default generator we elide the
  544. * template name and double-check the IV generator.
  545. */
  546. if (algt->mask & CRYPTO_ALG_GENIV) {
  547. if (!balg.geniv)
  548. balg.geniv = crypto_default_geniv(alg);
  549. err = -EAGAIN;
  550. if (strcmp(tmpl->name, balg.geniv))
  551. goto err_drop_alg;
  552. memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
  553. memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
  554. CRYPTO_MAX_ALG_NAME);
  555. } else {
  556. err = -ENAMETOOLONG;
  557. if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
  558. "%s(%s)", tmpl->name, alg->cra_name) >=
  559. CRYPTO_MAX_ALG_NAME)
  560. goto err_drop_alg;
  561. if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  562. "%s(%s)", tmpl->name, alg->cra_driver_name) >=
  563. CRYPTO_MAX_ALG_NAME)
  564. goto err_drop_alg;
  565. }
  566. inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
  567. inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
  568. inst->alg.cra_priority = alg->cra_priority;
  569. inst->alg.cra_blocksize = alg->cra_blocksize;
  570. inst->alg.cra_alignmask = alg->cra_alignmask;
  571. inst->alg.cra_type = &crypto_givcipher_type;
  572. inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
  573. inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
  574. inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
  575. inst->alg.cra_ablkcipher.geniv = balg.geniv;
  576. inst->alg.cra_ablkcipher.setkey = balg.setkey;
  577. inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
  578. inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
  579. out:
  580. return inst;
  581. err_drop_alg:
  582. crypto_drop_skcipher(spawn);
  583. err_free_inst:
  584. kfree(inst);
  585. inst = ERR_PTR(err);
  586. goto out;
  587. }
  588. EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
  589. void skcipher_geniv_free(struct crypto_instance *inst)
  590. {
  591. crypto_drop_skcipher(crypto_instance_ctx(inst));
  592. kfree(inst);
  593. }
  594. EXPORT_SYMBOL_GPL(skcipher_geniv_free);
  595. int skcipher_geniv_init(struct crypto_tfm *tfm)
  596. {
  597. struct crypto_instance *inst = (void *)tfm->__crt_alg;
  598. struct crypto_ablkcipher *cipher;
  599. cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
  600. if (IS_ERR(cipher))
  601. return PTR_ERR(cipher);
  602. tfm->crt_ablkcipher.base = cipher;
  603. tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
  604. return 0;
  605. }
  606. EXPORT_SYMBOL_GPL(skcipher_geniv_init);
  607. void skcipher_geniv_exit(struct crypto_tfm *tfm)
  608. {
  609. crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
  610. }
  611. EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
  612. MODULE_LICENSE("GPL");
  613. MODULE_DESCRIPTION("Generic block chaining cipher type");