lrw.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. /* LRW: as defined by Cyril Guyot in
  2. * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
  3. *
  4. * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
  5. *
  6. * Based on ecb.c
  7. * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the Free
  11. * Software Foundation; either version 2 of the License, or (at your option)
  12. * any later version.
  13. */
  14. /* This implementation is checked against the test vectors in the above
  15. * document and by a test vector provided by Ken Buchanan at
  16. * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
  17. *
  18. * The test vectors are included in the testing module tcrypt.[ch] */
  19. #include <crypto/algapi.h>
  20. #include <linux/err.h>
  21. #include <linux/init.h>
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/slab.h>
  26. #include <crypto/b128ops.h>
  27. #include <crypto/gf128mul.h>
  28. #include <crypto/lrw.h>
  29. struct priv {
  30. struct crypto_cipher *child;
  31. struct lrw_table_ctx table;
  32. };
  33. static inline void setbit128_bbe(void *b, int bit)
  34. {
  35. __set_bit(bit ^ (0x80 -
  36. #ifdef __BIG_ENDIAN
  37. BITS_PER_LONG
  38. #else
  39. BITS_PER_BYTE
  40. #endif
  41. ), b);
  42. }
  43. int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
  44. {
  45. be128 tmp = { 0 };
  46. int i;
  47. if (ctx->table)
  48. gf128mul_free_64k(ctx->table);
  49. /* initialize multiplication table for Key2 */
  50. ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
  51. if (!ctx->table)
  52. return -ENOMEM;
  53. /* initialize optimization table */
  54. for (i = 0; i < 128; i++) {
  55. setbit128_bbe(&tmp, i);
  56. ctx->mulinc[i] = tmp;
  57. gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
  58. }
  59. return 0;
  60. }
  61. EXPORT_SYMBOL_GPL(lrw_init_table);
  62. void lrw_free_table(struct lrw_table_ctx *ctx)
  63. {
  64. if (ctx->table)
  65. gf128mul_free_64k(ctx->table);
  66. }
  67. EXPORT_SYMBOL_GPL(lrw_free_table);
  68. static int setkey(struct crypto_tfm *parent, const u8 *key,
  69. unsigned int keylen)
  70. {
  71. struct priv *ctx = crypto_tfm_ctx(parent);
  72. struct crypto_cipher *child = ctx->child;
  73. int err, bsize = LRW_BLOCK_SIZE;
  74. const u8 *tweak = key + keylen - bsize;
  75. crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  76. crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
  77. CRYPTO_TFM_REQ_MASK);
  78. err = crypto_cipher_setkey(child, key, keylen - bsize);
  79. if (err)
  80. return err;
  81. crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
  82. CRYPTO_TFM_RES_MASK);
  83. return lrw_init_table(&ctx->table, tweak);
  84. }
  85. struct sinfo {
  86. be128 t;
  87. struct crypto_tfm *tfm;
  88. void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
  89. };
  90. static inline void inc(be128 *iv)
  91. {
  92. be64_add_cpu(&iv->b, 1);
  93. if (!iv->b)
  94. be64_add_cpu(&iv->a, 1);
  95. }
  96. static inline void lrw_round(struct sinfo *s, void *dst, const void *src)
  97. {
  98. be128_xor(dst, &s->t, src); /* PP <- T xor P */
  99. s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */
  100. be128_xor(dst, dst, &s->t); /* C <- T xor CC */
  101. }
  102. /* this returns the number of consequative 1 bits starting
  103. * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
  104. static inline int get_index128(be128 *block)
  105. {
  106. int x;
  107. __be32 *p = (__be32 *) block;
  108. for (p += 3, x = 0; x < 128; p--, x += 32) {
  109. u32 val = be32_to_cpup(p);
  110. if (!~val)
  111. continue;
  112. return x + ffz(val);
  113. }
  114. return x;
  115. }
  116. static int crypt(struct blkcipher_desc *d,
  117. struct blkcipher_walk *w, struct priv *ctx,
  118. void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
  119. {
  120. int err;
  121. unsigned int avail;
  122. const int bs = LRW_BLOCK_SIZE;
  123. struct sinfo s = {
  124. .tfm = crypto_cipher_tfm(ctx->child),
  125. .fn = fn
  126. };
  127. be128 *iv;
  128. u8 *wsrc;
  129. u8 *wdst;
  130. err = blkcipher_walk_virt(d, w);
  131. if (!(avail = w->nbytes))
  132. return err;
  133. wsrc = w->src.virt.addr;
  134. wdst = w->dst.virt.addr;
  135. /* calculate first value of T */
  136. iv = (be128 *)w->iv;
  137. s.t = *iv;
  138. /* T <- I*Key2 */
  139. gf128mul_64k_bbe(&s.t, ctx->table.table);
  140. goto first;
  141. for (;;) {
  142. do {
  143. /* T <- I*Key2, using the optimization
  144. * discussed in the specification */
  145. be128_xor(&s.t, &s.t,
  146. &ctx->table.mulinc[get_index128(iv)]);
  147. inc(iv);
  148. first:
  149. lrw_round(&s, wdst, wsrc);
  150. wsrc += bs;
  151. wdst += bs;
  152. } while ((avail -= bs) >= bs);
  153. err = blkcipher_walk_done(d, w, avail);
  154. if (!(avail = w->nbytes))
  155. break;
  156. wsrc = w->src.virt.addr;
  157. wdst = w->dst.virt.addr;
  158. }
  159. return err;
  160. }
  161. static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  162. struct scatterlist *src, unsigned int nbytes)
  163. {
  164. struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
  165. struct blkcipher_walk w;
  166. blkcipher_walk_init(&w, dst, src, nbytes);
  167. return crypt(desc, &w, ctx,
  168. crypto_cipher_alg(ctx->child)->cia_encrypt);
  169. }
  170. static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
  171. struct scatterlist *src, unsigned int nbytes)
  172. {
  173. struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
  174. struct blkcipher_walk w;
  175. blkcipher_walk_init(&w, dst, src, nbytes);
  176. return crypt(desc, &w, ctx,
  177. crypto_cipher_alg(ctx->child)->cia_decrypt);
  178. }
  179. int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
  180. struct scatterlist *ssrc, unsigned int nbytes,
  181. struct lrw_crypt_req *req)
  182. {
  183. const unsigned int bsize = LRW_BLOCK_SIZE;
  184. const unsigned int max_blks = req->tbuflen / bsize;
  185. struct lrw_table_ctx *ctx = req->table_ctx;
  186. struct blkcipher_walk walk;
  187. unsigned int nblocks;
  188. be128 *iv, *src, *dst, *t;
  189. be128 *t_buf = req->tbuf;
  190. int err, i;
  191. BUG_ON(max_blks < 1);
  192. blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
  193. err = blkcipher_walk_virt(desc, &walk);
  194. nbytes = walk.nbytes;
  195. if (!nbytes)
  196. return err;
  197. nblocks = min(walk.nbytes / bsize, max_blks);
  198. src = (be128 *)walk.src.virt.addr;
  199. dst = (be128 *)walk.dst.virt.addr;
  200. /* calculate first value of T */
  201. iv = (be128 *)walk.iv;
  202. t_buf[0] = *iv;
  203. /* T <- I*Key2 */
  204. gf128mul_64k_bbe(&t_buf[0], ctx->table);
  205. i = 0;
  206. goto first;
  207. for (;;) {
  208. do {
  209. for (i = 0; i < nblocks; i++) {
  210. /* T <- I*Key2, using the optimization
  211. * discussed in the specification */
  212. be128_xor(&t_buf[i], t,
  213. &ctx->mulinc[get_index128(iv)]);
  214. inc(iv);
  215. first:
  216. t = &t_buf[i];
  217. /* PP <- T xor P */
  218. be128_xor(dst + i, t, src + i);
  219. }
  220. /* CC <- E(Key2,PP) */
  221. req->crypt_fn(req->crypt_ctx, (u8 *)dst,
  222. nblocks * bsize);
  223. /* C <- T xor CC */
  224. for (i = 0; i < nblocks; i++)
  225. be128_xor(dst + i, dst + i, &t_buf[i]);
  226. src += nblocks;
  227. dst += nblocks;
  228. nbytes -= nblocks * bsize;
  229. nblocks = min(nbytes / bsize, max_blks);
  230. } while (nblocks > 0);
  231. err = blkcipher_walk_done(desc, &walk, nbytes);
  232. nbytes = walk.nbytes;
  233. if (!nbytes)
  234. break;
  235. nblocks = min(nbytes / bsize, max_blks);
  236. src = (be128 *)walk.src.virt.addr;
  237. dst = (be128 *)walk.dst.virt.addr;
  238. }
  239. return err;
  240. }
  241. EXPORT_SYMBOL_GPL(lrw_crypt);
  242. static int init_tfm(struct crypto_tfm *tfm)
  243. {
  244. struct crypto_cipher *cipher;
  245. struct crypto_instance *inst = (void *)tfm->__crt_alg;
  246. struct crypto_spawn *spawn = crypto_instance_ctx(inst);
  247. struct priv *ctx = crypto_tfm_ctx(tfm);
  248. u32 *flags = &tfm->crt_flags;
  249. cipher = crypto_spawn_cipher(spawn);
  250. if (IS_ERR(cipher))
  251. return PTR_ERR(cipher);
  252. if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) {
  253. *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
  254. crypto_free_cipher(cipher);
  255. return -EINVAL;
  256. }
  257. ctx->child = cipher;
  258. return 0;
  259. }
  260. static void exit_tfm(struct crypto_tfm *tfm)
  261. {
  262. struct priv *ctx = crypto_tfm_ctx(tfm);
  263. lrw_free_table(&ctx->table);
  264. crypto_free_cipher(ctx->child);
  265. }
  266. static struct crypto_instance *alloc(struct rtattr **tb)
  267. {
  268. struct crypto_instance *inst;
  269. struct crypto_alg *alg;
  270. int err;
  271. err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
  272. if (err)
  273. return ERR_PTR(err);
  274. alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
  275. CRYPTO_ALG_TYPE_MASK);
  276. if (IS_ERR(alg))
  277. return ERR_CAST(alg);
  278. inst = crypto_alloc_instance("lrw", alg);
  279. if (IS_ERR(inst))
  280. goto out_put_alg;
  281. inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
  282. inst->alg.cra_priority = alg->cra_priority;
  283. inst->alg.cra_blocksize = alg->cra_blocksize;
  284. if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7;
  285. else inst->alg.cra_alignmask = alg->cra_alignmask;
  286. inst->alg.cra_type = &crypto_blkcipher_type;
  287. if (!(alg->cra_blocksize % 4))
  288. inst->alg.cra_alignmask |= 3;
  289. inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
  290. inst->alg.cra_blkcipher.min_keysize =
  291. alg->cra_cipher.cia_min_keysize + alg->cra_blocksize;
  292. inst->alg.cra_blkcipher.max_keysize =
  293. alg->cra_cipher.cia_max_keysize + alg->cra_blocksize;
  294. inst->alg.cra_ctxsize = sizeof(struct priv);
  295. inst->alg.cra_init = init_tfm;
  296. inst->alg.cra_exit = exit_tfm;
  297. inst->alg.cra_blkcipher.setkey = setkey;
  298. inst->alg.cra_blkcipher.encrypt = encrypt;
  299. inst->alg.cra_blkcipher.decrypt = decrypt;
  300. out_put_alg:
  301. crypto_mod_put(alg);
  302. return inst;
  303. }
  304. static void free(struct crypto_instance *inst)
  305. {
  306. crypto_drop_spawn(crypto_instance_ctx(inst));
  307. kfree(inst);
  308. }
  309. static struct crypto_template crypto_tmpl = {
  310. .name = "lrw",
  311. .alloc = alloc,
  312. .free = free,
  313. .module = THIS_MODULE,
  314. };
  315. static int __init crypto_module_init(void)
  316. {
  317. return crypto_register_template(&crypto_tmpl);
  318. }
  319. static void __exit crypto_module_exit(void)
  320. {
  321. crypto_unregister_template(&crypto_tmpl);
  322. }
  323. module_init(crypto_module_init);
  324. module_exit(crypto_module_exit);
  325. MODULE_LICENSE("GPL");
  326. MODULE_DESCRIPTION("LRW block cipher mode");
  327. MODULE_ALIAS_CRYPTO("lrw");