keywrap.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. /*
  2. * Key Wrapping: RFC3394 / NIST SP800-38F
  3. *
  4. * Copyright (C) 2015, Stephan Mueller <smueller@chronox.de>
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, and the entire permission notice in its entirety,
  11. * including the disclaimer of warranties.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. The name of the author may not be used to endorse or promote
  16. * products derived from this software without specific prior
  17. * written permission.
  18. *
  19. * ALTERNATIVELY, this product may be distributed under the terms of
  20. * the GNU General Public License, in which case the provisions of the GPL2
  21. * are required INSTEAD OF the above restrictions. (This clause is
  22. * necessary due to a potential bad interaction between the GPL and
  23. * the restrictions contained in a BSD-style copyright.)
  24. *
  25. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  26. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  27. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
  28. * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
  29. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  30. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  31. * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  32. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  33. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  34. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  35. * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
  36. * DAMAGE.
  37. */
  38. /*
  39. * Note for using key wrapping:
  40. *
  41. * * The result of the encryption operation is the ciphertext starting
  42. * with the 2nd semiblock. The first semiblock is provided as the IV.
  43. * The IV used to start the encryption operation is the default IV.
  44. *
  45. * * The input for the decryption is the first semiblock handed in as an
  46. * IV. The ciphertext is the data starting with the 2nd semiblock. The
  47. * return code of the decryption operation will be EBADMSG in case an
  48. * integrity error occurs.
  49. *
  50. * To obtain the full result of an encryption as expected by SP800-38F, the
  51. * caller must allocate a buffer of plaintext + 8 bytes:
  52. *
  53. * unsigned int datalen = ptlen + crypto_skcipher_ivsize(tfm);
  54. * u8 data[datalen];
  55. * u8 *iv = data;
  56. * u8 *pt = data + crypto_skcipher_ivsize(tfm);
  57. * <ensure that pt contains the plaintext of size ptlen>
  58. * sg_init_one(&sg, ptdata, ptlen);
  59. * skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
  60. *
  61. * ==> After encryption, data now contains full KW result as per SP800-38F.
  62. *
  63. * In case of decryption, ciphertext now already has the expected length
  64. * and must be segmented appropriately:
  65. *
  66. * unsigned int datalen = CTLEN;
  67. * u8 data[datalen];
  68. * <ensure that data contains full ciphertext>
  69. * u8 *iv = data;
  70. * u8 *ct = data + crypto_skcipher_ivsize(tfm);
  71. * unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm);
  72. * sg_init_one(&sg, ctdata, ctlen);
  73. * skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
  74. *
  75. * ==> After decryption (which hopefully does not return EBADMSG), the ct
  76. * pointer now points to the plaintext of size ctlen.
  77. *
  78. * Note 2: KWP is not implemented as this would defy in-place operation.
  79. * If somebody wants to wrap non-aligned data, he should simply pad
  80. * the input with zeros to fill it up to the 8 byte boundary.
  81. */
  82. #include <linux/module.h>
  83. #include <linux/crypto.h>
  84. #include <linux/scatterlist.h>
  85. #include <crypto/scatterwalk.h>
  86. #include <crypto/internal/skcipher.h>
  87. struct crypto_kw_ctx {
  88. struct crypto_cipher *child;
  89. };
  90. struct crypto_kw_block {
  91. #define SEMIBSIZE 8
  92. u8 A[SEMIBSIZE];
  93. u8 R[SEMIBSIZE];
  94. };
  95. /* convert 64 bit integer into its string representation */
  96. static inline void crypto_kw_cpu_to_be64(u64 val, u8 *buf)
  97. {
  98. __be64 *a = (__be64 *)buf;
  99. *a = cpu_to_be64(val);
  100. }
  101. /*
  102. * Fast forward the SGL to the "end" length minus SEMIBSIZE.
  103. * The start in the SGL defined by the fast-forward is returned with
  104. * the walk variable
  105. */
  106. static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
  107. struct scatterlist *sg,
  108. unsigned int end)
  109. {
  110. unsigned int skip = 0;
  111. /* The caller should only operate on full SEMIBLOCKs. */
  112. BUG_ON(end < SEMIBSIZE);
  113. skip = end - SEMIBSIZE;
  114. while (sg) {
  115. if (sg->length > skip) {
  116. scatterwalk_start(walk, sg);
  117. scatterwalk_advance(walk, skip);
  118. break;
  119. } else
  120. skip -= sg->length;
  121. sg = sg_next(sg);
  122. }
  123. }
  124. static int crypto_kw_decrypt(struct blkcipher_desc *desc,
  125. struct scatterlist *dst, struct scatterlist *src,
  126. unsigned int nbytes)
  127. {
  128. struct crypto_blkcipher *tfm = desc->tfm;
  129. struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
  130. struct crypto_cipher *child = ctx->child;
  131. unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
  132. crypto_cipher_alignmask(child));
  133. unsigned int i;
  134. u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
  135. struct crypto_kw_block *block = (struct crypto_kw_block *)
  136. PTR_ALIGN(blockbuf + 0, alignmask + 1);
  137. u64 t = 6 * ((nbytes) >> 3);
  138. struct scatterlist *lsrc, *ldst;
  139. int ret = 0;
  140. /*
  141. * Require at least 2 semiblocks (note, the 3rd semiblock that is
  142. * required by SP800-38F is the IV.
  143. */
  144. if (nbytes < (2 * SEMIBSIZE) || nbytes % SEMIBSIZE)
  145. return -EINVAL;
  146. /* Place the IV into block A */
  147. memcpy(block->A, desc->info, SEMIBSIZE);
  148. /*
  149. * src scatterlist is read-only. dst scatterlist is r/w. During the
  150. * first loop, lsrc points to src and ldst to dst. For any
  151. * subsequent round, the code operates on dst only.
  152. */
  153. lsrc = src;
  154. ldst = dst;
  155. for (i = 0; i < 6; i++) {
  156. u8 tbe_buffer[SEMIBSIZE + alignmask];
  157. /* alignment for the crypto_xor and the _to_be64 operation */
  158. u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
  159. unsigned int tmp_nbytes = nbytes;
  160. struct scatter_walk src_walk, dst_walk;
  161. while (tmp_nbytes) {
  162. /* move pointer by tmp_nbytes in the SGL */
  163. crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes);
  164. /* get the source block */
  165. scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE,
  166. false);
  167. /* perform KW operation: get counter as byte string */
  168. crypto_kw_cpu_to_be64(t, tbe);
  169. /* perform KW operation: modify IV with counter */
  170. crypto_xor(block->A, tbe, SEMIBSIZE);
  171. t--;
  172. /* perform KW operation: decrypt block */
  173. crypto_cipher_decrypt_one(child, (u8*)block,
  174. (u8*)block);
  175. /* move pointer by tmp_nbytes in the SGL */
  176. crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes);
  177. /* Copy block->R into place */
  178. scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE,
  179. true);
  180. tmp_nbytes -= SEMIBSIZE;
  181. }
  182. /* we now start to operate on the dst SGL only */
  183. lsrc = dst;
  184. ldst = dst;
  185. }
  186. /* Perform authentication check */
  187. if (crypto_memneq("\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", block->A,
  188. SEMIBSIZE))
  189. ret = -EBADMSG;
  190. memzero_explicit(block, sizeof(struct crypto_kw_block));
  191. return ret;
  192. }
  193. static int crypto_kw_encrypt(struct blkcipher_desc *desc,
  194. struct scatterlist *dst, struct scatterlist *src,
  195. unsigned int nbytes)
  196. {
  197. struct crypto_blkcipher *tfm = desc->tfm;
  198. struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
  199. struct crypto_cipher *child = ctx->child;
  200. unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
  201. crypto_cipher_alignmask(child));
  202. unsigned int i;
  203. u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
  204. struct crypto_kw_block *block = (struct crypto_kw_block *)
  205. PTR_ALIGN(blockbuf + 0, alignmask + 1);
  206. u64 t = 1;
  207. struct scatterlist *lsrc, *ldst;
  208. /*
  209. * Require at least 2 semiblocks (note, the 3rd semiblock that is
  210. * required by SP800-38F is the IV that occupies the first semiblock.
  211. * This means that the dst memory must be one semiblock larger than src.
  212. * Also ensure that the given data is aligned to semiblock.
  213. */
  214. if (nbytes < (2 * SEMIBSIZE) || nbytes % SEMIBSIZE)
  215. return -EINVAL;
  216. /*
  217. * Place the predefined IV into block A -- for encrypt, the caller
  218. * does not need to provide an IV, but he needs to fetch the final IV.
  219. */
  220. memcpy(block->A, "\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", SEMIBSIZE);
  221. /*
  222. * src scatterlist is read-only. dst scatterlist is r/w. During the
  223. * first loop, lsrc points to src and ldst to dst. For any
  224. * subsequent round, the code operates on dst only.
  225. */
  226. lsrc = src;
  227. ldst = dst;
  228. for (i = 0; i < 6; i++) {
  229. u8 tbe_buffer[SEMIBSIZE + alignmask];
  230. u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
  231. unsigned int tmp_nbytes = nbytes;
  232. struct scatter_walk src_walk, dst_walk;
  233. scatterwalk_start(&src_walk, lsrc);
  234. scatterwalk_start(&dst_walk, ldst);
  235. while (tmp_nbytes) {
  236. /* get the source block */
  237. scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE,
  238. false);
  239. /* perform KW operation: encrypt block */
  240. crypto_cipher_encrypt_one(child, (u8 *)block,
  241. (u8 *)block);
  242. /* perform KW operation: get counter as byte string */
  243. crypto_kw_cpu_to_be64(t, tbe);
  244. /* perform KW operation: modify IV with counter */
  245. crypto_xor(block->A, tbe, SEMIBSIZE);
  246. t++;
  247. /* Copy block->R into place */
  248. scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE,
  249. true);
  250. tmp_nbytes -= SEMIBSIZE;
  251. }
  252. /* we now start to operate on the dst SGL only */
  253. lsrc = dst;
  254. ldst = dst;
  255. }
  256. /* establish the IV for the caller to pick up */
  257. memcpy(desc->info, block->A, SEMIBSIZE);
  258. memzero_explicit(block, sizeof(struct crypto_kw_block));
  259. return 0;
  260. }
  261. static int crypto_kw_setkey(struct crypto_tfm *parent, const u8 *key,
  262. unsigned int keylen)
  263. {
  264. struct crypto_kw_ctx *ctx = crypto_tfm_ctx(parent);
  265. struct crypto_cipher *child = ctx->child;
  266. int err;
  267. crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  268. crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
  269. CRYPTO_TFM_REQ_MASK);
  270. err = crypto_cipher_setkey(child, key, keylen);
  271. crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
  272. CRYPTO_TFM_RES_MASK);
  273. return err;
  274. }
  275. static int crypto_kw_init_tfm(struct crypto_tfm *tfm)
  276. {
  277. struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
  278. struct crypto_spawn *spawn = crypto_instance_ctx(inst);
  279. struct crypto_kw_ctx *ctx = crypto_tfm_ctx(tfm);
  280. struct crypto_cipher *cipher;
  281. cipher = crypto_spawn_cipher(spawn);
  282. if (IS_ERR(cipher))
  283. return PTR_ERR(cipher);
  284. ctx->child = cipher;
  285. return 0;
  286. }
  287. static void crypto_kw_exit_tfm(struct crypto_tfm *tfm)
  288. {
  289. struct crypto_kw_ctx *ctx = crypto_tfm_ctx(tfm);
  290. crypto_free_cipher(ctx->child);
  291. }
  292. static struct crypto_instance *crypto_kw_alloc(struct rtattr **tb)
  293. {
  294. struct crypto_instance *inst = NULL;
  295. struct crypto_alg *alg = NULL;
  296. int err;
  297. err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
  298. if (err)
  299. return ERR_PTR(err);
  300. alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
  301. CRYPTO_ALG_TYPE_MASK);
  302. if (IS_ERR(alg))
  303. return ERR_CAST(alg);
  304. inst = ERR_PTR(-EINVAL);
  305. /* Section 5.1 requirement for KW */
  306. if (alg->cra_blocksize != sizeof(struct crypto_kw_block))
  307. goto err;
  308. inst = crypto_alloc_instance("kw", alg);
  309. if (IS_ERR(inst))
  310. goto err;
  311. inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
  312. inst->alg.cra_priority = alg->cra_priority;
  313. inst->alg.cra_blocksize = SEMIBSIZE;
  314. inst->alg.cra_alignmask = 0;
  315. inst->alg.cra_type = &crypto_blkcipher_type;
  316. inst->alg.cra_blkcipher.ivsize = SEMIBSIZE;
  317. inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
  318. inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
  319. inst->alg.cra_ctxsize = sizeof(struct crypto_kw_ctx);
  320. inst->alg.cra_init = crypto_kw_init_tfm;
  321. inst->alg.cra_exit = crypto_kw_exit_tfm;
  322. inst->alg.cra_blkcipher.setkey = crypto_kw_setkey;
  323. inst->alg.cra_blkcipher.encrypt = crypto_kw_encrypt;
  324. inst->alg.cra_blkcipher.decrypt = crypto_kw_decrypt;
  325. err:
  326. crypto_mod_put(alg);
  327. return inst;
  328. }
  329. static void crypto_kw_free(struct crypto_instance *inst)
  330. {
  331. crypto_drop_spawn(crypto_instance_ctx(inst));
  332. kfree(inst);
  333. }
  334. static struct crypto_template crypto_kw_tmpl = {
  335. .name = "kw",
  336. .alloc = crypto_kw_alloc,
  337. .free = crypto_kw_free,
  338. .module = THIS_MODULE,
  339. };
  340. static int __init crypto_kw_init(void)
  341. {
  342. return crypto_register_template(&crypto_kw_tmpl);
  343. }
  344. static void __exit crypto_kw_exit(void)
  345. {
  346. crypto_unregister_template(&crypto_kw_tmpl);
  347. }
  348. module_init(crypto_kw_init);
  349. module_exit(crypto_kw_exit);
  350. MODULE_LICENSE("Dual BSD/GPL");
  351. MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
  352. MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)");
  353. MODULE_ALIAS_CRYPTO("kw");