aes_s390.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Cryptographic API.
  4. *
  5. * s390 implementation of the AES Cipher Algorithm.
  6. *
  7. * s390 Version:
  8. * Copyright IBM Corp. 2005, 2017
  9. * Author(s): Jan Glauber (jang@de.ibm.com)
  10. * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
  11. * Patrick Steuer <patrick.steuer@de.ibm.com>
  12. * Harald Freudenberger <freude@de.ibm.com>
  13. *
  14. * Derived from "crypto/aes_generic.c"
  15. */
  16. #define KMSG_COMPONENT "aes_s390"
  17. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  18. #include <crypto/aes.h>
  19. #include <crypto/algapi.h>
  20. #include <crypto/ghash.h>
  21. #include <crypto/internal/aead.h>
  22. #include <crypto/internal/skcipher.h>
  23. #include <crypto/scatterwalk.h>
  24. #include <linux/err.h>
  25. #include <linux/module.h>
  26. #include <linux/cpufeature.h>
  27. #include <linux/init.h>
  28. #include <linux/mutex.h>
  29. #include <linux/fips.h>
  30. #include <linux/string.h>
  31. #include <crypto/xts.h>
  32. #include <asm/cpacf.h>
  33. static u8 *ctrblk;
  34. static DEFINE_MUTEX(ctrblk_lock);
  35. static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
  36. kma_functions;
  37. struct s390_aes_ctx {
  38. u8 key[AES_MAX_KEY_SIZE];
  39. int key_len;
  40. unsigned long fc;
  41. union {
  42. struct crypto_skcipher *blk;
  43. struct crypto_cipher *cip;
  44. } fallback;
  45. };
  46. struct s390_xts_ctx {
  47. u8 key[32];
  48. u8 pcc_key[32];
  49. int key_len;
  50. unsigned long fc;
  51. struct crypto_skcipher *fallback;
  52. };
  53. struct gcm_sg_walk {
  54. struct scatter_walk walk;
  55. unsigned int walk_bytes;
  56. u8 *walk_ptr;
  57. unsigned int walk_bytes_remain;
  58. u8 buf[AES_BLOCK_SIZE];
  59. unsigned int buf_bytes;
  60. u8 *ptr;
  61. unsigned int nbytes;
  62. };
  63. static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
  64. unsigned int key_len)
  65. {
  66. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  67. int ret;
  68. sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  69. sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
  70. CRYPTO_TFM_REQ_MASK);
  71. ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
  72. if (ret) {
  73. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  74. tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
  75. CRYPTO_TFM_RES_MASK);
  76. }
  77. return ret;
  78. }
  79. static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  80. unsigned int key_len)
  81. {
  82. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  83. unsigned long fc;
  84. /* Pick the correct function code based on the key length */
  85. fc = (key_len == 16) ? CPACF_KM_AES_128 :
  86. (key_len == 24) ? CPACF_KM_AES_192 :
  87. (key_len == 32) ? CPACF_KM_AES_256 : 0;
  88. /* Check if the function code is available */
  89. sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  90. if (!sctx->fc)
  91. return setkey_fallback_cip(tfm, in_key, key_len);
  92. sctx->key_len = key_len;
  93. memcpy(sctx->key, in_key, key_len);
  94. return 0;
  95. }
  96. static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  97. {
  98. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  99. if (unlikely(!sctx->fc)) {
  100. crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
  101. return;
  102. }
  103. cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
  104. }
  105. static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  106. {
  107. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  108. if (unlikely(!sctx->fc)) {
  109. crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
  110. return;
  111. }
  112. cpacf_km(sctx->fc | CPACF_DECRYPT,
  113. &sctx->key, out, in, AES_BLOCK_SIZE);
  114. }
  115. static int fallback_init_cip(struct crypto_tfm *tfm)
  116. {
  117. const char *name = tfm->__crt_alg->cra_name;
  118. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  119. sctx->fallback.cip = crypto_alloc_cipher(name, 0,
  120. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  121. if (IS_ERR(sctx->fallback.cip)) {
  122. pr_err("Allocating AES fallback algorithm %s failed\n",
  123. name);
  124. return PTR_ERR(sctx->fallback.cip);
  125. }
  126. return 0;
  127. }
  128. static void fallback_exit_cip(struct crypto_tfm *tfm)
  129. {
  130. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  131. crypto_free_cipher(sctx->fallback.cip);
  132. sctx->fallback.cip = NULL;
  133. }
  134. static struct crypto_alg aes_alg = {
  135. .cra_name = "aes",
  136. .cra_driver_name = "aes-s390",
  137. .cra_priority = 300,
  138. .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
  139. CRYPTO_ALG_NEED_FALLBACK,
  140. .cra_blocksize = AES_BLOCK_SIZE,
  141. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  142. .cra_module = THIS_MODULE,
  143. .cra_init = fallback_init_cip,
  144. .cra_exit = fallback_exit_cip,
  145. .cra_u = {
  146. .cipher = {
  147. .cia_min_keysize = AES_MIN_KEY_SIZE,
  148. .cia_max_keysize = AES_MAX_KEY_SIZE,
  149. .cia_setkey = aes_set_key,
  150. .cia_encrypt = aes_encrypt,
  151. .cia_decrypt = aes_decrypt,
  152. }
  153. }
  154. };
  155. static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
  156. unsigned int len)
  157. {
  158. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  159. unsigned int ret;
  160. crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK);
  161. crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
  162. CRYPTO_TFM_REQ_MASK);
  163. ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len);
  164. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  165. tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) &
  166. CRYPTO_TFM_RES_MASK;
  167. return ret;
  168. }
  169. static int fallback_blk_dec(struct blkcipher_desc *desc,
  170. struct scatterlist *dst, struct scatterlist *src,
  171. unsigned int nbytes)
  172. {
  173. unsigned int ret;
  174. struct crypto_blkcipher *tfm = desc->tfm;
  175. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
  176. SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
  177. skcipher_request_set_tfm(req, sctx->fallback.blk);
  178. skcipher_request_set_callback(req, desc->flags, NULL, NULL);
  179. skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
  180. ret = crypto_skcipher_decrypt(req);
  181. skcipher_request_zero(req);
  182. return ret;
  183. }
  184. static int fallback_blk_enc(struct blkcipher_desc *desc,
  185. struct scatterlist *dst, struct scatterlist *src,
  186. unsigned int nbytes)
  187. {
  188. unsigned int ret;
  189. struct crypto_blkcipher *tfm = desc->tfm;
  190. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
  191. SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
  192. skcipher_request_set_tfm(req, sctx->fallback.blk);
  193. skcipher_request_set_callback(req, desc->flags, NULL, NULL);
  194. skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
  195. ret = crypto_skcipher_encrypt(req);
  196. return ret;
  197. }
  198. static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  199. unsigned int key_len)
  200. {
  201. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  202. unsigned long fc;
  203. /* Pick the correct function code based on the key length */
  204. fc = (key_len == 16) ? CPACF_KM_AES_128 :
  205. (key_len == 24) ? CPACF_KM_AES_192 :
  206. (key_len == 32) ? CPACF_KM_AES_256 : 0;
  207. /* Check if the function code is available */
  208. sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  209. if (!sctx->fc)
  210. return setkey_fallback_blk(tfm, in_key, key_len);
  211. sctx->key_len = key_len;
  212. memcpy(sctx->key, in_key, key_len);
  213. return 0;
  214. }
  215. static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
  216. struct blkcipher_walk *walk)
  217. {
  218. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  219. unsigned int nbytes, n;
  220. int ret;
  221. ret = blkcipher_walk_virt(desc, walk);
  222. while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
  223. /* only use complete blocks */
  224. n = nbytes & ~(AES_BLOCK_SIZE - 1);
  225. cpacf_km(sctx->fc | modifier, sctx->key,
  226. walk->dst.virt.addr, walk->src.virt.addr, n);
  227. ret = blkcipher_walk_done(desc, walk, nbytes - n);
  228. }
  229. return ret;
  230. }
  231. static int ecb_aes_encrypt(struct blkcipher_desc *desc,
  232. struct scatterlist *dst, struct scatterlist *src,
  233. unsigned int nbytes)
  234. {
  235. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  236. struct blkcipher_walk walk;
  237. if (unlikely(!sctx->fc))
  238. return fallback_blk_enc(desc, dst, src, nbytes);
  239. blkcipher_walk_init(&walk, dst, src, nbytes);
  240. return ecb_aes_crypt(desc, 0, &walk);
  241. }
  242. static int ecb_aes_decrypt(struct blkcipher_desc *desc,
  243. struct scatterlist *dst, struct scatterlist *src,
  244. unsigned int nbytes)
  245. {
  246. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  247. struct blkcipher_walk walk;
  248. if (unlikely(!sctx->fc))
  249. return fallback_blk_dec(desc, dst, src, nbytes);
  250. blkcipher_walk_init(&walk, dst, src, nbytes);
  251. return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
  252. }
  253. static int fallback_init_blk(struct crypto_tfm *tfm)
  254. {
  255. const char *name = tfm->__crt_alg->cra_name;
  256. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  257. sctx->fallback.blk = crypto_alloc_skcipher(name, 0,
  258. CRYPTO_ALG_ASYNC |
  259. CRYPTO_ALG_NEED_FALLBACK);
  260. if (IS_ERR(sctx->fallback.blk)) {
  261. pr_err("Allocating AES fallback algorithm %s failed\n",
  262. name);
  263. return PTR_ERR(sctx->fallback.blk);
  264. }
  265. return 0;
  266. }
  267. static void fallback_exit_blk(struct crypto_tfm *tfm)
  268. {
  269. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  270. crypto_free_skcipher(sctx->fallback.blk);
  271. }
  272. static struct crypto_alg ecb_aes_alg = {
  273. .cra_name = "ecb(aes)",
  274. .cra_driver_name = "ecb-aes-s390",
  275. .cra_priority = 401, /* combo: aes + ecb + 1 */
  276. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  277. CRYPTO_ALG_NEED_FALLBACK,
  278. .cra_blocksize = AES_BLOCK_SIZE,
  279. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  280. .cra_type = &crypto_blkcipher_type,
  281. .cra_module = THIS_MODULE,
  282. .cra_init = fallback_init_blk,
  283. .cra_exit = fallback_exit_blk,
  284. .cra_u = {
  285. .blkcipher = {
  286. .min_keysize = AES_MIN_KEY_SIZE,
  287. .max_keysize = AES_MAX_KEY_SIZE,
  288. .setkey = ecb_aes_set_key,
  289. .encrypt = ecb_aes_encrypt,
  290. .decrypt = ecb_aes_decrypt,
  291. }
  292. }
  293. };
  294. static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  295. unsigned int key_len)
  296. {
  297. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  298. unsigned long fc;
  299. /* Pick the correct function code based on the key length */
  300. fc = (key_len == 16) ? CPACF_KMC_AES_128 :
  301. (key_len == 24) ? CPACF_KMC_AES_192 :
  302. (key_len == 32) ? CPACF_KMC_AES_256 : 0;
  303. /* Check if the function code is available */
  304. sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
  305. if (!sctx->fc)
  306. return setkey_fallback_blk(tfm, in_key, key_len);
  307. sctx->key_len = key_len;
  308. memcpy(sctx->key, in_key, key_len);
  309. return 0;
  310. }
  311. static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
  312. struct blkcipher_walk *walk)
  313. {
  314. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  315. unsigned int nbytes, n;
  316. int ret;
  317. struct {
  318. u8 iv[AES_BLOCK_SIZE];
  319. u8 key[AES_MAX_KEY_SIZE];
  320. } param;
  321. ret = blkcipher_walk_virt(desc, walk);
  322. memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
  323. memcpy(param.key, sctx->key, sctx->key_len);
  324. while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
  325. /* only use complete blocks */
  326. n = nbytes & ~(AES_BLOCK_SIZE - 1);
  327. cpacf_kmc(sctx->fc | modifier, &param,
  328. walk->dst.virt.addr, walk->src.virt.addr, n);
  329. ret = blkcipher_walk_done(desc, walk, nbytes - n);
  330. }
  331. memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
  332. return ret;
  333. }
  334. static int cbc_aes_encrypt(struct blkcipher_desc *desc,
  335. struct scatterlist *dst, struct scatterlist *src,
  336. unsigned int nbytes)
  337. {
  338. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  339. struct blkcipher_walk walk;
  340. if (unlikely(!sctx->fc))
  341. return fallback_blk_enc(desc, dst, src, nbytes);
  342. blkcipher_walk_init(&walk, dst, src, nbytes);
  343. return cbc_aes_crypt(desc, 0, &walk);
  344. }
  345. static int cbc_aes_decrypt(struct blkcipher_desc *desc,
  346. struct scatterlist *dst, struct scatterlist *src,
  347. unsigned int nbytes)
  348. {
  349. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  350. struct blkcipher_walk walk;
  351. if (unlikely(!sctx->fc))
  352. return fallback_blk_dec(desc, dst, src, nbytes);
  353. blkcipher_walk_init(&walk, dst, src, nbytes);
  354. return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
  355. }
  356. static struct crypto_alg cbc_aes_alg = {
  357. .cra_name = "cbc(aes)",
  358. .cra_driver_name = "cbc-aes-s390",
  359. .cra_priority = 402, /* ecb-aes-s390 + 1 */
  360. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  361. CRYPTO_ALG_NEED_FALLBACK,
  362. .cra_blocksize = AES_BLOCK_SIZE,
  363. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  364. .cra_type = &crypto_blkcipher_type,
  365. .cra_module = THIS_MODULE,
  366. .cra_init = fallback_init_blk,
  367. .cra_exit = fallback_exit_blk,
  368. .cra_u = {
  369. .blkcipher = {
  370. .min_keysize = AES_MIN_KEY_SIZE,
  371. .max_keysize = AES_MAX_KEY_SIZE,
  372. .ivsize = AES_BLOCK_SIZE,
  373. .setkey = cbc_aes_set_key,
  374. .encrypt = cbc_aes_encrypt,
  375. .decrypt = cbc_aes_decrypt,
  376. }
  377. }
  378. };
  379. static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
  380. unsigned int len)
  381. {
  382. struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
  383. unsigned int ret;
  384. crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
  385. crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
  386. CRYPTO_TFM_REQ_MASK);
  387. ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
  388. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  389. tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) &
  390. CRYPTO_TFM_RES_MASK;
  391. return ret;
  392. }
  393. static int xts_fallback_decrypt(struct blkcipher_desc *desc,
  394. struct scatterlist *dst, struct scatterlist *src,
  395. unsigned int nbytes)
  396. {
  397. struct crypto_blkcipher *tfm = desc->tfm;
  398. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
  399. SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
  400. unsigned int ret;
  401. skcipher_request_set_tfm(req, xts_ctx->fallback);
  402. skcipher_request_set_callback(req, desc->flags, NULL, NULL);
  403. skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
  404. ret = crypto_skcipher_decrypt(req);
  405. skcipher_request_zero(req);
  406. return ret;
  407. }
  408. static int xts_fallback_encrypt(struct blkcipher_desc *desc,
  409. struct scatterlist *dst, struct scatterlist *src,
  410. unsigned int nbytes)
  411. {
  412. struct crypto_blkcipher *tfm = desc->tfm;
  413. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
  414. SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
  415. unsigned int ret;
  416. skcipher_request_set_tfm(req, xts_ctx->fallback);
  417. skcipher_request_set_callback(req, desc->flags, NULL, NULL);
  418. skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
  419. ret = crypto_skcipher_encrypt(req);
  420. skcipher_request_zero(req);
  421. return ret;
  422. }
  423. static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  424. unsigned int key_len)
  425. {
  426. struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
  427. unsigned long fc;
  428. int err;
  429. err = xts_check_key(tfm, in_key, key_len);
  430. if (err)
  431. return err;
  432. /* In fips mode only 128 bit or 256 bit keys are valid */
  433. if (fips_enabled && key_len != 32 && key_len != 64) {
  434. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  435. return -EINVAL;
  436. }
  437. /* Pick the correct function code based on the key length */
  438. fc = (key_len == 32) ? CPACF_KM_XTS_128 :
  439. (key_len == 64) ? CPACF_KM_XTS_256 : 0;
  440. /* Check if the function code is available */
  441. xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  442. if (!xts_ctx->fc)
  443. return xts_fallback_setkey(tfm, in_key, key_len);
  444. /* Split the XTS key into the two subkeys */
  445. key_len = key_len / 2;
  446. xts_ctx->key_len = key_len;
  447. memcpy(xts_ctx->key, in_key, key_len);
  448. memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
  449. return 0;
  450. }
  451. static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
  452. struct blkcipher_walk *walk)
  453. {
  454. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
  455. unsigned int offset, nbytes, n;
  456. int ret;
  457. struct {
  458. u8 key[32];
  459. u8 tweak[16];
  460. u8 block[16];
  461. u8 bit[16];
  462. u8 xts[16];
  463. } pcc_param;
  464. struct {
  465. u8 key[32];
  466. u8 init[16];
  467. } xts_param;
  468. ret = blkcipher_walk_virt(desc, walk);
  469. offset = xts_ctx->key_len & 0x10;
  470. memset(pcc_param.block, 0, sizeof(pcc_param.block));
  471. memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
  472. memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
  473. memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
  474. memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
  475. cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
  476. memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
  477. memcpy(xts_param.init, pcc_param.xts, 16);
  478. while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
  479. /* only use complete blocks */
  480. n = nbytes & ~(AES_BLOCK_SIZE - 1);
  481. cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
  482. walk->dst.virt.addr, walk->src.virt.addr, n);
  483. ret = blkcipher_walk_done(desc, walk, nbytes - n);
  484. }
  485. return ret;
  486. }
  487. static int xts_aes_encrypt(struct blkcipher_desc *desc,
  488. struct scatterlist *dst, struct scatterlist *src,
  489. unsigned int nbytes)
  490. {
  491. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
  492. struct blkcipher_walk walk;
  493. if (!nbytes)
  494. return -EINVAL;
  495. if (unlikely(!xts_ctx->fc))
  496. return xts_fallback_encrypt(desc, dst, src, nbytes);
  497. blkcipher_walk_init(&walk, dst, src, nbytes);
  498. return xts_aes_crypt(desc, 0, &walk);
  499. }
  500. static int xts_aes_decrypt(struct blkcipher_desc *desc,
  501. struct scatterlist *dst, struct scatterlist *src,
  502. unsigned int nbytes)
  503. {
  504. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
  505. struct blkcipher_walk walk;
  506. if (!nbytes)
  507. return -EINVAL;
  508. if (unlikely(!xts_ctx->fc))
  509. return xts_fallback_decrypt(desc, dst, src, nbytes);
  510. blkcipher_walk_init(&walk, dst, src, nbytes);
  511. return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
  512. }
  513. static int xts_fallback_init(struct crypto_tfm *tfm)
  514. {
  515. const char *name = tfm->__crt_alg->cra_name;
  516. struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
  517. xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
  518. CRYPTO_ALG_ASYNC |
  519. CRYPTO_ALG_NEED_FALLBACK);
  520. if (IS_ERR(xts_ctx->fallback)) {
  521. pr_err("Allocating XTS fallback algorithm %s failed\n",
  522. name);
  523. return PTR_ERR(xts_ctx->fallback);
  524. }
  525. return 0;
  526. }
  527. static void xts_fallback_exit(struct crypto_tfm *tfm)
  528. {
  529. struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
  530. crypto_free_skcipher(xts_ctx->fallback);
  531. }
  532. static struct crypto_alg xts_aes_alg = {
  533. .cra_name = "xts(aes)",
  534. .cra_driver_name = "xts-aes-s390",
  535. .cra_priority = 402, /* ecb-aes-s390 + 1 */
  536. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  537. CRYPTO_ALG_NEED_FALLBACK,
  538. .cra_blocksize = AES_BLOCK_SIZE,
  539. .cra_ctxsize = sizeof(struct s390_xts_ctx),
  540. .cra_type = &crypto_blkcipher_type,
  541. .cra_module = THIS_MODULE,
  542. .cra_init = xts_fallback_init,
  543. .cra_exit = xts_fallback_exit,
  544. .cra_u = {
  545. .blkcipher = {
  546. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  547. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  548. .ivsize = AES_BLOCK_SIZE,
  549. .setkey = xts_aes_set_key,
  550. .encrypt = xts_aes_encrypt,
  551. .decrypt = xts_aes_decrypt,
  552. }
  553. }
  554. };
  555. static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  556. unsigned int key_len)
  557. {
  558. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  559. unsigned long fc;
  560. /* Pick the correct function code based on the key length */
  561. fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
  562. (key_len == 24) ? CPACF_KMCTR_AES_192 :
  563. (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
  564. /* Check if the function code is available */
  565. sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
  566. if (!sctx->fc)
  567. return setkey_fallback_blk(tfm, in_key, key_len);
  568. sctx->key_len = key_len;
  569. memcpy(sctx->key, in_key, key_len);
  570. return 0;
  571. }
  572. static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
  573. {
  574. unsigned int i, n;
  575. /* only use complete blocks, max. PAGE_SIZE */
  576. memcpy(ctrptr, iv, AES_BLOCK_SIZE);
  577. n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
  578. for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
  579. memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
  580. crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
  581. ctrptr += AES_BLOCK_SIZE;
  582. }
  583. return n;
  584. }
  585. static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
  586. struct blkcipher_walk *walk)
  587. {
  588. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  589. u8 buf[AES_BLOCK_SIZE], *ctrptr;
  590. unsigned int n, nbytes;
  591. int ret, locked;
  592. locked = mutex_trylock(&ctrblk_lock);
  593. ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
  594. while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
  595. n = AES_BLOCK_SIZE;
  596. if (nbytes >= 2*AES_BLOCK_SIZE && locked)
  597. n = __ctrblk_init(ctrblk, walk->iv, nbytes);
  598. ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
  599. cpacf_kmctr(sctx->fc | modifier, sctx->key,
  600. walk->dst.virt.addr, walk->src.virt.addr,
  601. n, ctrptr);
  602. if (ctrptr == ctrblk)
  603. memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
  604. AES_BLOCK_SIZE);
  605. crypto_inc(walk->iv, AES_BLOCK_SIZE);
  606. ret = blkcipher_walk_done(desc, walk, nbytes - n);
  607. }
  608. if (locked)
  609. mutex_unlock(&ctrblk_lock);
  610. /*
  611. * final block may be < AES_BLOCK_SIZE, copy only nbytes
  612. */
  613. if (nbytes) {
  614. cpacf_kmctr(sctx->fc | modifier, sctx->key,
  615. buf, walk->src.virt.addr,
  616. AES_BLOCK_SIZE, walk->iv);
  617. memcpy(walk->dst.virt.addr, buf, nbytes);
  618. crypto_inc(walk->iv, AES_BLOCK_SIZE);
  619. ret = blkcipher_walk_done(desc, walk, 0);
  620. }
  621. return ret;
  622. }
  623. static int ctr_aes_encrypt(struct blkcipher_desc *desc,
  624. struct scatterlist *dst, struct scatterlist *src,
  625. unsigned int nbytes)
  626. {
  627. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  628. struct blkcipher_walk walk;
  629. if (unlikely(!sctx->fc))
  630. return fallback_blk_enc(desc, dst, src, nbytes);
  631. blkcipher_walk_init(&walk, dst, src, nbytes);
  632. return ctr_aes_crypt(desc, 0, &walk);
  633. }
  634. static int ctr_aes_decrypt(struct blkcipher_desc *desc,
  635. struct scatterlist *dst, struct scatterlist *src,
  636. unsigned int nbytes)
  637. {
  638. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  639. struct blkcipher_walk walk;
  640. if (unlikely(!sctx->fc))
  641. return fallback_blk_dec(desc, dst, src, nbytes);
  642. blkcipher_walk_init(&walk, dst, src, nbytes);
  643. return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
  644. }
  645. static struct crypto_alg ctr_aes_alg = {
  646. .cra_name = "ctr(aes)",
  647. .cra_driver_name = "ctr-aes-s390",
  648. .cra_priority = 402, /* ecb-aes-s390 + 1 */
  649. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  650. CRYPTO_ALG_NEED_FALLBACK,
  651. .cra_blocksize = 1,
  652. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  653. .cra_type = &crypto_blkcipher_type,
  654. .cra_module = THIS_MODULE,
  655. .cra_init = fallback_init_blk,
  656. .cra_exit = fallback_exit_blk,
  657. .cra_u = {
  658. .blkcipher = {
  659. .min_keysize = AES_MIN_KEY_SIZE,
  660. .max_keysize = AES_MAX_KEY_SIZE,
  661. .ivsize = AES_BLOCK_SIZE,
  662. .setkey = ctr_aes_set_key,
  663. .encrypt = ctr_aes_encrypt,
  664. .decrypt = ctr_aes_decrypt,
  665. }
  666. }
  667. };
  668. static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
  669. unsigned int keylen)
  670. {
  671. struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
  672. switch (keylen) {
  673. case AES_KEYSIZE_128:
  674. ctx->fc = CPACF_KMA_GCM_AES_128;
  675. break;
  676. case AES_KEYSIZE_192:
  677. ctx->fc = CPACF_KMA_GCM_AES_192;
  678. break;
  679. case AES_KEYSIZE_256:
  680. ctx->fc = CPACF_KMA_GCM_AES_256;
  681. break;
  682. default:
  683. return -EINVAL;
  684. }
  685. memcpy(ctx->key, key, keylen);
  686. ctx->key_len = keylen;
  687. return 0;
  688. }
  689. static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  690. {
  691. switch (authsize) {
  692. case 4:
  693. case 8:
  694. case 12:
  695. case 13:
  696. case 14:
  697. case 15:
  698. case 16:
  699. break;
  700. default:
  701. return -EINVAL;
  702. }
  703. return 0;
  704. }
  705. static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
  706. unsigned int len)
  707. {
  708. memset(gw, 0, sizeof(*gw));
  709. gw->walk_bytes_remain = len;
  710. scatterwalk_start(&gw->walk, sg);
  711. }
  712. static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
  713. {
  714. struct scatterlist *nextsg;
  715. gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
  716. while (!gw->walk_bytes) {
  717. nextsg = sg_next(gw->walk.sg);
  718. if (!nextsg)
  719. return 0;
  720. scatterwalk_start(&gw->walk, nextsg);
  721. gw->walk_bytes = scatterwalk_clamp(&gw->walk,
  722. gw->walk_bytes_remain);
  723. }
  724. gw->walk_ptr = scatterwalk_map(&gw->walk);
  725. return gw->walk_bytes;
  726. }
  727. static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
  728. unsigned int nbytes)
  729. {
  730. gw->walk_bytes_remain -= nbytes;
  731. scatterwalk_unmap(&gw->walk);
  732. scatterwalk_advance(&gw->walk, nbytes);
  733. scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
  734. gw->walk_ptr = NULL;
  735. }
  736. static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
  737. {
  738. int n;
  739. if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
  740. gw->ptr = gw->buf;
  741. gw->nbytes = gw->buf_bytes;
  742. goto out;
  743. }
  744. if (gw->walk_bytes_remain == 0) {
  745. gw->ptr = NULL;
  746. gw->nbytes = 0;
  747. goto out;
  748. }
  749. if (!_gcm_sg_clamp_and_map(gw)) {
  750. gw->ptr = NULL;
  751. gw->nbytes = 0;
  752. goto out;
  753. }
  754. if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
  755. gw->ptr = gw->walk_ptr;
  756. gw->nbytes = gw->walk_bytes;
  757. goto out;
  758. }
  759. while (1) {
  760. n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
  761. memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
  762. gw->buf_bytes += n;
  763. _gcm_sg_unmap_and_advance(gw, n);
  764. if (gw->buf_bytes >= minbytesneeded) {
  765. gw->ptr = gw->buf;
  766. gw->nbytes = gw->buf_bytes;
  767. goto out;
  768. }
  769. if (!_gcm_sg_clamp_and_map(gw)) {
  770. gw->ptr = NULL;
  771. gw->nbytes = 0;
  772. goto out;
  773. }
  774. }
  775. out:
  776. return gw->nbytes;
  777. }
  778. static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
  779. {
  780. if (gw->walk_bytes_remain == 0) {
  781. gw->ptr = NULL;
  782. gw->nbytes = 0;
  783. goto out;
  784. }
  785. if (!_gcm_sg_clamp_and_map(gw)) {
  786. gw->ptr = NULL;
  787. gw->nbytes = 0;
  788. goto out;
  789. }
  790. if (gw->walk_bytes >= minbytesneeded) {
  791. gw->ptr = gw->walk_ptr;
  792. gw->nbytes = gw->walk_bytes;
  793. goto out;
  794. }
  795. scatterwalk_unmap(&gw->walk);
  796. gw->walk_ptr = NULL;
  797. gw->ptr = gw->buf;
  798. gw->nbytes = sizeof(gw->buf);
  799. out:
  800. return gw->nbytes;
  801. }
  802. static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
  803. {
  804. if (gw->ptr == NULL)
  805. return 0;
  806. if (gw->ptr == gw->buf) {
  807. int n = gw->buf_bytes - bytesdone;
  808. if (n > 0) {
  809. memmove(gw->buf, gw->buf + bytesdone, n);
  810. gw->buf_bytes = n;
  811. } else
  812. gw->buf_bytes = 0;
  813. } else
  814. _gcm_sg_unmap_and_advance(gw, bytesdone);
  815. return bytesdone;
  816. }
  817. static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
  818. {
  819. int i, n;
  820. if (gw->ptr == NULL)
  821. return 0;
  822. if (gw->ptr == gw->buf) {
  823. for (i = 0; i < bytesdone; i += n) {
  824. if (!_gcm_sg_clamp_and_map(gw))
  825. return i;
  826. n = min(gw->walk_bytes, bytesdone - i);
  827. memcpy(gw->walk_ptr, gw->buf + i, n);
  828. _gcm_sg_unmap_and_advance(gw, n);
  829. }
  830. } else
  831. _gcm_sg_unmap_and_advance(gw, bytesdone);
  832. return bytesdone;
  833. }
  834. static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
  835. {
  836. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  837. struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
  838. unsigned int ivsize = crypto_aead_ivsize(tfm);
  839. unsigned int taglen = crypto_aead_authsize(tfm);
  840. unsigned int aadlen = req->assoclen;
  841. unsigned int pclen = req->cryptlen;
  842. int ret = 0;
  843. unsigned int n, len, in_bytes, out_bytes,
  844. min_bytes, bytes, aad_bytes, pc_bytes;
  845. struct gcm_sg_walk gw_in, gw_out;
  846. u8 tag[GHASH_DIGEST_SIZE];
  847. struct {
  848. u32 _[3]; /* reserved */
  849. u32 cv; /* Counter Value */
  850. u8 t[GHASH_DIGEST_SIZE];/* Tag */
  851. u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
  852. u64 taadl; /* Total AAD Length */
  853. u64 tpcl; /* Total Plain-/Cipher-text Length */
  854. u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
  855. u8 k[AES_MAX_KEY_SIZE]; /* Key */
  856. } param;
  857. /*
  858. * encrypt
  859. * req->src: aad||plaintext
  860. * req->dst: aad||ciphertext||tag
  861. * decrypt
  862. * req->src: aad||ciphertext||tag
  863. * req->dst: aad||plaintext, return 0 or -EBADMSG
  864. * aad, plaintext and ciphertext may be empty.
  865. */
  866. if (flags & CPACF_DECRYPT)
  867. pclen -= taglen;
  868. len = aadlen + pclen;
  869. memset(&param, 0, sizeof(param));
  870. param.cv = 1;
  871. param.taadl = aadlen * 8;
  872. param.tpcl = pclen * 8;
  873. memcpy(param.j0, req->iv, ivsize);
  874. *(u32 *)(param.j0 + ivsize) = 1;
  875. memcpy(param.k, ctx->key, ctx->key_len);
  876. gcm_walk_start(&gw_in, req->src, len);
  877. gcm_walk_start(&gw_out, req->dst, len);
  878. do {
  879. min_bytes = min_t(unsigned int,
  880. aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
  881. in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
  882. out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
  883. bytes = min(in_bytes, out_bytes);
  884. if (aadlen + pclen <= bytes) {
  885. aad_bytes = aadlen;
  886. pc_bytes = pclen;
  887. flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
  888. } else {
  889. if (aadlen <= bytes) {
  890. aad_bytes = aadlen;
  891. pc_bytes = (bytes - aadlen) &
  892. ~(AES_BLOCK_SIZE - 1);
  893. flags |= CPACF_KMA_LAAD;
  894. } else {
  895. aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
  896. pc_bytes = 0;
  897. }
  898. }
  899. if (aad_bytes > 0)
  900. memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
  901. cpacf_kma(ctx->fc | flags, &param,
  902. gw_out.ptr + aad_bytes,
  903. gw_in.ptr + aad_bytes, pc_bytes,
  904. gw_in.ptr, aad_bytes);
  905. n = aad_bytes + pc_bytes;
  906. if (gcm_in_walk_done(&gw_in, n) != n)
  907. return -ENOMEM;
  908. if (gcm_out_walk_done(&gw_out, n) != n)
  909. return -ENOMEM;
  910. aadlen -= aad_bytes;
  911. pclen -= pc_bytes;
  912. } while (aadlen + pclen > 0);
  913. if (flags & CPACF_DECRYPT) {
  914. scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
  915. if (crypto_memneq(tag, param.t, taglen))
  916. ret = -EBADMSG;
  917. } else
  918. scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
  919. memzero_explicit(&param, sizeof(param));
  920. return ret;
  921. }
  922. static int gcm_aes_encrypt(struct aead_request *req)
  923. {
  924. return gcm_aes_crypt(req, CPACF_ENCRYPT);
  925. }
  926. static int gcm_aes_decrypt(struct aead_request *req)
  927. {
  928. return gcm_aes_crypt(req, CPACF_DECRYPT);
  929. }
  930. static struct aead_alg gcm_aes_aead = {
  931. .setkey = gcm_aes_setkey,
  932. .setauthsize = gcm_aes_setauthsize,
  933. .encrypt = gcm_aes_encrypt,
  934. .decrypt = gcm_aes_decrypt,
  935. .ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
  936. .maxauthsize = GHASH_DIGEST_SIZE,
  937. .chunksize = AES_BLOCK_SIZE,
  938. .base = {
  939. .cra_blocksize = 1,
  940. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  941. .cra_priority = 900,
  942. .cra_name = "gcm(aes)",
  943. .cra_driver_name = "gcm-aes-s390",
  944. .cra_module = THIS_MODULE,
  945. },
  946. };
  947. static struct crypto_alg *aes_s390_algs_ptr[5];
  948. static int aes_s390_algs_num;
  949. static struct aead_alg *aes_s390_aead_alg;
  950. static int aes_s390_register_alg(struct crypto_alg *alg)
  951. {
  952. int ret;
  953. ret = crypto_register_alg(alg);
  954. if (!ret)
  955. aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
  956. return ret;
  957. }
  958. static void aes_s390_fini(void)
  959. {
  960. while (aes_s390_algs_num--)
  961. crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
  962. if (ctrblk)
  963. free_page((unsigned long) ctrblk);
  964. if (aes_s390_aead_alg)
  965. crypto_unregister_aead(aes_s390_aead_alg);
  966. }
  967. static int __init aes_s390_init(void)
  968. {
  969. int ret;
  970. /* Query available functions for KM, KMC, KMCTR and KMA */
  971. cpacf_query(CPACF_KM, &km_functions);
  972. cpacf_query(CPACF_KMC, &kmc_functions);
  973. cpacf_query(CPACF_KMCTR, &kmctr_functions);
  974. cpacf_query(CPACF_KMA, &kma_functions);
  975. if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
  976. cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
  977. cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
  978. ret = aes_s390_register_alg(&aes_alg);
  979. if (ret)
  980. goto out_err;
  981. ret = aes_s390_register_alg(&ecb_aes_alg);
  982. if (ret)
  983. goto out_err;
  984. }
  985. if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
  986. cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
  987. cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
  988. ret = aes_s390_register_alg(&cbc_aes_alg);
  989. if (ret)
  990. goto out_err;
  991. }
  992. if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
  993. cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
  994. ret = aes_s390_register_alg(&xts_aes_alg);
  995. if (ret)
  996. goto out_err;
  997. }
  998. if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
  999. cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
  1000. cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
  1001. ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
  1002. if (!ctrblk) {
  1003. ret = -ENOMEM;
  1004. goto out_err;
  1005. }
  1006. ret = aes_s390_register_alg(&ctr_aes_alg);
  1007. if (ret)
  1008. goto out_err;
  1009. }
  1010. if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
  1011. cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
  1012. cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
  1013. ret = crypto_register_aead(&gcm_aes_aead);
  1014. if (ret)
  1015. goto out_err;
  1016. aes_s390_aead_alg = &gcm_aes_aead;
  1017. }
  1018. return 0;
  1019. out_err:
  1020. aes_s390_fini();
  1021. return ret;
  1022. }
  1023. module_cpu_feature_match(MSA, aes_s390_init);
  1024. module_exit(aes_s390_fini);
  1025. MODULE_ALIAS_CRYPTO("aes-all");
  1026. MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
  1027. MODULE_LICENSE("GPL");