cryptosoft.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513
  1. /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
  2. /*-
  3. * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
  4. * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
  5. *
  6. * This code was written by Angelos D. Keromytis in Athens, Greece, in
  7. * February 2000. Network Security Technologies Inc. (NSTI) kindly
  8. * supported the development of this code.
  9. *
  10. * Copyright (c) 2000, 2001 Angelos D. Keromytis
  11. * Copyright (c) 2014 The FreeBSD Foundation
  12. * All rights reserved.
  13. *
  14. * Portions of this software were developed by John-Mark Gurney
  15. * under sponsorship of the FreeBSD Foundation and
  16. * Rubicon Communications, LLC (Netgate).
  17. *
  18. * Permission to use, copy, and modify this software with or without fee
  19. * is hereby granted, provided that this entire notice is included in
  20. * all source code copies of any software which is or includes a copy or
  21. * modification of this software.
  22. *
  23. * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
  24. * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
  25. * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
  26. * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
  27. * PURPOSE.
  28. */
  29. #include <sys/cdefs.h>
  30. __FBSDID("$FreeBSD$");
  31. #include <sys/param.h>
  32. #include <sys/systm.h>
  33. #include <sys/malloc.h>
  34. #include <sys/mbuf.h>
  35. #include <sys/module.h>
  36. #include <sys/sysctl.h>
  37. #include <sys/errno.h>
  38. #include <sys/random.h>
  39. #include <sys/kernel.h>
  40. #include <sys/uio.h>
  41. #include <sys/lock.h>
  42. #include <sys/rwlock.h>
  43. #include <sys/endian.h>
  44. #include <sys/limits.h>
  45. #include <sys/mutex.h>
  46. #include <crypto/sha1.h>
  47. #include <opencrypto/rmd160.h>
  48. #include <opencrypto/cryptodev.h>
  49. #include <opencrypto/xform.h>
  50. #include <sys/kobj.h>
  51. #include <sys/bus.h>
  52. #include "cryptodev_if.h"
  53. struct swcr_auth {
  54. void *sw_ictx;
  55. void *sw_octx;
  56. struct auth_hash *sw_axf;
  57. uint16_t sw_mlen;
  58. };
  59. struct swcr_encdec {
  60. void *sw_kschedule;
  61. struct enc_xform *sw_exf;
  62. };
  63. struct swcr_compdec {
  64. struct comp_algo *sw_cxf;
  65. };
  66. struct swcr_session {
  67. struct mtx swcr_lock;
  68. int (*swcr_process)(struct swcr_session *, struct cryptop *);
  69. struct swcr_auth swcr_auth;
  70. struct swcr_encdec swcr_encdec;
  71. struct swcr_compdec swcr_compdec;
  72. };
  73. static int32_t swcr_id;
  74. static void swcr_freesession(device_t dev, crypto_session_t cses);
  75. /* Used for CRYPTO_NULL_CBC. */
  76. static int
  77. swcr_null(struct swcr_session *ses, struct cryptop *crp)
  78. {
  79. return (0);
  80. }
  81. /*
  82. * Apply a symmetric encryption/decryption algorithm.
  83. */
  84. static int
  85. swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
  86. {
  87. unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
  88. unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
  89. const struct crypto_session_params *csp;
  90. struct swcr_encdec *sw;
  91. struct enc_xform *exf;
  92. int i, blks, inlen, ivlen, outlen, resid;
  93. struct crypto_buffer_cursor cc_in, cc_out;
  94. const unsigned char *inblk;
  95. unsigned char *outblk;
  96. int error;
  97. bool encrypting;
  98. error = 0;
  99. sw = &ses->swcr_encdec;
  100. exf = sw->sw_exf;
  101. ivlen = exf->ivsize;
  102. if (exf->native_blocksize == 0) {
  103. /* Check for non-padded data */
  104. if ((crp->crp_payload_length % exf->blocksize) != 0)
  105. return (EINVAL);
  106. blks = exf->blocksize;
  107. } else
  108. blks = exf->native_blocksize;
  109. if (exf == &enc_xform_aes_icm &&
  110. (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
  111. return (EINVAL);
  112. if (crp->crp_cipher_key != NULL) {
  113. csp = crypto_get_params(crp->crp_session);
  114. error = exf->setkey(sw->sw_kschedule,
  115. crp->crp_cipher_key, csp->csp_cipher_klen);
  116. if (error)
  117. return (error);
  118. }
  119. crypto_read_iv(crp, iv);
  120. if (exf->reinit) {
  121. /*
  122. * xforms that provide a reinit method perform all IV
  123. * handling themselves.
  124. */
  125. exf->reinit(sw->sw_kschedule, iv);
  126. }
  127. ivp = iv;
  128. crypto_cursor_init(&cc_in, &crp->crp_buf);
  129. crypto_cursor_advance(&cc_in, crp->crp_payload_start);
  130. inlen = crypto_cursor_seglen(&cc_in);
  131. inblk = crypto_cursor_segbase(&cc_in);
  132. if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
  133. crypto_cursor_init(&cc_out, &crp->crp_obuf);
  134. crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
  135. } else
  136. cc_out = cc_in;
  137. outlen = crypto_cursor_seglen(&cc_out);
  138. outblk = crypto_cursor_segbase(&cc_out);
  139. resid = crp->crp_payload_length;
  140. encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
  141. /*
  142. * Loop through encrypting blocks. 'inlen' is the remaining
  143. * length of the current segment in the input buffer.
  144. * 'outlen' is the remaining length of current segment in the
  145. * output buffer.
  146. */
  147. while (resid >= blks) {
  148. /*
  149. * If the current block is not contained within the
  150. * current input/output segment, use 'blk' as a local
  151. * buffer.
  152. */
  153. if (inlen < blks) {
  154. crypto_cursor_copydata(&cc_in, blks, blk);
  155. inblk = blk;
  156. }
  157. if (outlen < blks)
  158. outblk = blk;
  159. /*
  160. * Ciphers without a 'reinit' hook are assumed to be
  161. * used in CBC mode where the chaining is done here.
  162. */
  163. if (exf->reinit != NULL) {
  164. if (encrypting)
  165. exf->encrypt(sw->sw_kschedule, inblk, outblk);
  166. else
  167. exf->decrypt(sw->sw_kschedule, inblk, outblk);
  168. } else if (encrypting) {
  169. /* XOR with previous block */
  170. for (i = 0; i < blks; i++)
  171. outblk[i] = inblk[i] ^ ivp[i];
  172. exf->encrypt(sw->sw_kschedule, outblk, outblk);
  173. /*
  174. * Keep encrypted block for XOR'ing
  175. * with next block
  176. */
  177. memcpy(iv, outblk, blks);
  178. ivp = iv;
  179. } else { /* decrypt */
  180. /*
  181. * Keep encrypted block for XOR'ing
  182. * with next block
  183. */
  184. nivp = (ivp == iv) ? iv2 : iv;
  185. memcpy(nivp, inblk, blks);
  186. exf->decrypt(sw->sw_kschedule, inblk, outblk);
  187. /* XOR with previous block */
  188. for (i = 0; i < blks; i++)
  189. outblk[i] ^= ivp[i];
  190. ivp = nivp;
  191. }
  192. if (inlen < blks) {
  193. inlen = crypto_cursor_seglen(&cc_in);
  194. inblk = crypto_cursor_segbase(&cc_in);
  195. } else {
  196. crypto_cursor_advance(&cc_in, blks);
  197. inlen -= blks;
  198. inblk += blks;
  199. }
  200. if (outlen < blks) {
  201. crypto_cursor_copyback(&cc_out, blks, blk);
  202. outlen = crypto_cursor_seglen(&cc_out);
  203. outblk = crypto_cursor_segbase(&cc_out);
  204. } else {
  205. crypto_cursor_advance(&cc_out, blks);
  206. outlen -= blks;
  207. outblk += blks;
  208. }
  209. resid -= blks;
  210. }
  211. /* Handle trailing partial block for stream ciphers. */
  212. if (resid > 0) {
  213. KASSERT(exf->native_blocksize != 0,
  214. ("%s: partial block of %d bytes for cipher %s",
  215. __func__, i, exf->name));
  216. KASSERT(exf->reinit != NULL,
  217. ("%s: partial block cipher %s without reinit hook",
  218. __func__, exf->name));
  219. KASSERT(resid < blks, ("%s: partial block too big", __func__));
  220. inlen = crypto_cursor_seglen(&cc_in);
  221. outlen = crypto_cursor_seglen(&cc_out);
  222. if (inlen < resid) {
  223. crypto_cursor_copydata(&cc_in, resid, blk);
  224. inblk = blk;
  225. } else
  226. inblk = crypto_cursor_segbase(&cc_in);
  227. if (outlen < resid)
  228. outblk = blk;
  229. else
  230. outblk = crypto_cursor_segbase(&cc_out);
  231. if (encrypting)
  232. exf->encrypt_last(sw->sw_kschedule, inblk, outblk,
  233. resid);
  234. else
  235. exf->decrypt_last(sw->sw_kschedule, inblk, outblk,
  236. resid);
  237. if (outlen < resid)
  238. crypto_cursor_copyback(&cc_out, resid, blk);
  239. }
  240. explicit_bzero(blk, sizeof(blk));
  241. explicit_bzero(iv, sizeof(iv));
  242. explicit_bzero(iv2, sizeof(iv2));
  243. return (0);
  244. }
  245. static void
  246. swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
  247. const uint8_t *key, int klen)
  248. {
  249. switch (axf->type) {
  250. case CRYPTO_SHA1_HMAC:
  251. case CRYPTO_SHA2_224_HMAC:
  252. case CRYPTO_SHA2_256_HMAC:
  253. case CRYPTO_SHA2_384_HMAC:
  254. case CRYPTO_SHA2_512_HMAC:
  255. case CRYPTO_NULL_HMAC:
  256. case CRYPTO_RIPEMD160_HMAC:
  257. hmac_init_ipad(axf, key, klen, sw->sw_ictx);
  258. hmac_init_opad(axf, key, klen, sw->sw_octx);
  259. break;
  260. case CRYPTO_POLY1305:
  261. case CRYPTO_BLAKE2B:
  262. case CRYPTO_BLAKE2S:
  263. axf->Setkey(sw->sw_ictx, key, klen);
  264. axf->Init(sw->sw_ictx);
  265. break;
  266. default:
  267. panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
  268. }
  269. }
  270. /*
  271. * Compute or verify hash.
  272. */
  273. static int
  274. swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
  275. {
  276. u_char aalg[HASH_MAX_LEN];
  277. const struct crypto_session_params *csp;
  278. struct swcr_auth *sw;
  279. struct auth_hash *axf;
  280. union authctx ctx;
  281. int err;
  282. sw = &ses->swcr_auth;
  283. axf = sw->sw_axf;
  284. csp = crypto_get_params(crp->crp_session);
  285. if (crp->crp_auth_key != NULL) {
  286. swcr_authprepare(axf, sw, crp->crp_auth_key,
  287. csp->csp_auth_klen);
  288. }
  289. bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
  290. if (crp->crp_aad != NULL)
  291. err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
  292. else
  293. err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
  294. axf->Update, &ctx);
  295. if (err)
  296. goto out;
  297. if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
  298. CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
  299. err = crypto_apply_buf(&crp->crp_obuf,
  300. crp->crp_payload_output_start, crp->crp_payload_length,
  301. axf->Update, &ctx);
  302. else
  303. err = crypto_apply(crp, crp->crp_payload_start,
  304. crp->crp_payload_length, axf->Update, &ctx);
  305. if (err)
  306. goto out;
  307. if (csp->csp_flags & CSP_F_ESN)
  308. axf->Update(&ctx, crp->crp_esn, 4);
  309. axf->Final(aalg, &ctx);
  310. if (sw->sw_octx != NULL) {
  311. bcopy(sw->sw_octx, &ctx, axf->ctxsize);
  312. axf->Update(&ctx, aalg, axf->hashsize);
  313. axf->Final(aalg, &ctx);
  314. }
  315. if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
  316. u_char uaalg[HASH_MAX_LEN];
  317. crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
  318. if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
  319. err = EBADMSG;
  320. explicit_bzero(uaalg, sizeof(uaalg));
  321. } else {
  322. /* Inject the authentication data */
  323. crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
  324. }
  325. explicit_bzero(aalg, sizeof(aalg));
  326. out:
  327. explicit_bzero(&ctx, sizeof(ctx));
  328. return (err);
  329. }
  330. CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */
  331. CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */
  332. static int
  333. swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
  334. {
  335. uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
  336. u_char *blk = (u_char *)blkbuf;
  337. u_char tag[GMAC_DIGEST_LEN];
  338. u_char iv[AES_BLOCK_LEN];
  339. struct crypto_buffer_cursor cc;
  340. const u_char *inblk;
  341. union authctx ctx;
  342. struct swcr_auth *swa;
  343. struct auth_hash *axf;
  344. uint32_t *blkp;
  345. int blksz, error, ivlen, len, resid;
  346. swa = &ses->swcr_auth;
  347. axf = swa->sw_axf;
  348. bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
  349. blksz = GMAC_BLOCK_LEN;
  350. KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
  351. __func__));
  352. /* Initialize the IV */
  353. ivlen = AES_GCM_IV_LEN;
  354. crypto_read_iv(crp, iv);
  355. axf->Reinit(&ctx, iv, ivlen);
  356. crypto_cursor_init(&cc, &crp->crp_buf);
  357. crypto_cursor_advance(&cc, crp->crp_payload_start);
  358. for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) {
  359. len = crypto_cursor_seglen(&cc);
  360. if (len >= blksz) {
  361. inblk = crypto_cursor_segbase(&cc);
  362. len = rounddown(MIN(len, resid), blksz);
  363. crypto_cursor_advance(&cc, len);
  364. } else {
  365. len = blksz;
  366. crypto_cursor_copydata(&cc, len, blk);
  367. inblk = blk;
  368. }
  369. axf->Update(&ctx, inblk, len);
  370. }
  371. if (resid > 0) {
  372. memset(blk, 0, blksz);
  373. crypto_cursor_copydata(&cc, resid, blk);
  374. axf->Update(&ctx, blk, blksz);
  375. }
  376. /* length block */
  377. memset(blk, 0, blksz);
  378. blkp = (uint32_t *)blk + 1;
  379. *blkp = htobe32(crp->crp_payload_length * 8);
  380. axf->Update(&ctx, blk, blksz);
  381. /* Finalize MAC */
  382. axf->Final(tag, &ctx);
  383. error = 0;
  384. if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
  385. u_char tag2[GMAC_DIGEST_LEN];
  386. crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
  387. tag2);
  388. if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0)
  389. error = EBADMSG;
  390. explicit_bzero(tag2, sizeof(tag2));
  391. } else {
  392. /* Inject the authentication data */
  393. crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
  394. }
  395. explicit_bzero(blkbuf, sizeof(blkbuf));
  396. explicit_bzero(tag, sizeof(tag));
  397. explicit_bzero(iv, sizeof(iv));
  398. return (error);
  399. }
  400. static int
  401. swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
  402. {
  403. uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
  404. u_char *blk = (u_char *)blkbuf;
  405. u_char tag[GMAC_DIGEST_LEN];
  406. u_char iv[AES_BLOCK_LEN];
  407. struct crypto_buffer_cursor cc_in, cc_out;
  408. const u_char *inblk;
  409. u_char *outblk;
  410. union authctx ctx;
  411. struct swcr_auth *swa;
  412. struct swcr_encdec *swe;
  413. struct auth_hash *axf;
  414. struct enc_xform *exf;
  415. uint32_t *blkp;
  416. int blksz, error, ivlen, len, r, resid;
  417. swa = &ses->swcr_auth;
  418. axf = swa->sw_axf;
  419. bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
  420. blksz = GMAC_BLOCK_LEN;
  421. KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
  422. __func__));
  423. swe = &ses->swcr_encdec;
  424. exf = swe->sw_exf;
  425. KASSERT(axf->blocksize == exf->native_blocksize,
  426. ("%s: blocksize mismatch", __func__));
  427. if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
  428. return (EINVAL);
  429. /* Initialize the IV */
  430. ivlen = AES_GCM_IV_LEN;
  431. bcopy(crp->crp_iv, iv, ivlen);
  432. /* Supply MAC with IV */
  433. axf->Reinit(&ctx, iv, ivlen);
  434. /* Supply MAC with AAD */
  435. if (crp->crp_aad != NULL) {
  436. len = rounddown(crp->crp_aad_length, blksz);
  437. if (len != 0)
  438. axf->Update(&ctx, crp->crp_aad, len);
  439. if (crp->crp_aad_length != len) {
  440. memset(blk, 0, blksz);
  441. memcpy(blk, (char *)crp->crp_aad + len,
  442. crp->crp_aad_length - len);
  443. axf->Update(&ctx, blk, blksz);
  444. }
  445. } else {
  446. crypto_cursor_init(&cc_in, &crp->crp_buf);
  447. crypto_cursor_advance(&cc_in, crp->crp_aad_start);
  448. for (resid = crp->crp_aad_length; resid >= blksz;
  449. resid -= len) {
  450. len = crypto_cursor_seglen(&cc_in);
  451. if (len >= blksz) {
  452. inblk = crypto_cursor_segbase(&cc_in);
  453. len = rounddown(MIN(len, resid), blksz);
  454. crypto_cursor_advance(&cc_in, len);
  455. } else {
  456. len = blksz;
  457. crypto_cursor_copydata(&cc_in, len, blk);
  458. inblk = blk;
  459. }
  460. axf->Update(&ctx, inblk, len);
  461. }
  462. if (resid > 0) {
  463. memset(blk, 0, blksz);
  464. crypto_cursor_copydata(&cc_in, resid, blk);
  465. axf->Update(&ctx, blk, blksz);
  466. }
  467. }
  468. exf->reinit(swe->sw_kschedule, iv);
  469. /* Do encryption with MAC */
  470. crypto_cursor_init(&cc_in, &crp->crp_buf);
  471. crypto_cursor_advance(&cc_in, crp->crp_payload_start);
  472. if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
  473. crypto_cursor_init(&cc_out, &crp->crp_obuf);
  474. crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
  475. } else
  476. cc_out = cc_in;
  477. for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
  478. if (crypto_cursor_seglen(&cc_in) < blksz) {
  479. crypto_cursor_copydata(&cc_in, blksz, blk);
  480. inblk = blk;
  481. } else {
  482. inblk = crypto_cursor_segbase(&cc_in);
  483. crypto_cursor_advance(&cc_in, blksz);
  484. }
  485. if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
  486. if (crypto_cursor_seglen(&cc_out) < blksz)
  487. outblk = blk;
  488. else
  489. outblk = crypto_cursor_segbase(&cc_out);
  490. exf->encrypt(swe->sw_kschedule, inblk, outblk);
  491. axf->Update(&ctx, outblk, blksz);
  492. if (outblk == blk)
  493. crypto_cursor_copyback(&cc_out, blksz, blk);
  494. else
  495. crypto_cursor_advance(&cc_out, blksz);
  496. } else {
  497. axf->Update(&ctx, inblk, blksz);
  498. }
  499. }
  500. if (resid > 0) {
  501. crypto_cursor_copydata(&cc_in, resid, blk);
  502. if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
  503. exf->encrypt_last(swe->sw_kschedule, blk, blk, resid);
  504. crypto_cursor_copyback(&cc_out, resid, blk);
  505. }
  506. axf->Update(&ctx, blk, resid);
  507. }
  508. /* length block */
  509. memset(blk, 0, blksz);
  510. blkp = (uint32_t *)blk + 1;
  511. *blkp = htobe32(crp->crp_aad_length * 8);
  512. blkp = (uint32_t *)blk + 3;
  513. *blkp = htobe32(crp->crp_payload_length * 8);
  514. axf->Update(&ctx, blk, blksz);
  515. /* Finalize MAC */
  516. axf->Final(tag, &ctx);
  517. /* Validate tag */
  518. error = 0;
  519. if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
  520. u_char tag2[GMAC_DIGEST_LEN];
  521. crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2);
  522. r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
  523. explicit_bzero(tag2, sizeof(tag2));
  524. if (r != 0) {
  525. error = EBADMSG;
  526. goto out;
  527. }
  528. /* tag matches, decrypt data */
  529. crypto_cursor_init(&cc_in, &crp->crp_buf);
  530. crypto_cursor_advance(&cc_in, crp->crp_payload_start);
  531. for (resid = crp->crp_payload_length; resid > blksz;
  532. resid -= blksz) {
  533. if (crypto_cursor_seglen(&cc_in) < blksz) {
  534. crypto_cursor_copydata(&cc_in, blksz, blk);
  535. inblk = blk;
  536. } else {
  537. inblk = crypto_cursor_segbase(&cc_in);
  538. crypto_cursor_advance(&cc_in, blksz);
  539. }
  540. if (crypto_cursor_seglen(&cc_out) < blksz)
  541. outblk = blk;
  542. else
  543. outblk = crypto_cursor_segbase(&cc_out);
  544. exf->decrypt(swe->sw_kschedule, inblk, outblk);
  545. if (outblk == blk)
  546. crypto_cursor_copyback(&cc_out, blksz, blk);
  547. else
  548. crypto_cursor_advance(&cc_out, blksz);
  549. }
  550. if (resid > 0) {
  551. crypto_cursor_copydata(&cc_in, resid, blk);
  552. exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
  553. crypto_cursor_copyback(&cc_out, resid, blk);
  554. }
  555. } else {
  556. /* Inject the authentication data */
  557. crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
  558. }
  559. out:
  560. explicit_bzero(blkbuf, sizeof(blkbuf));
  561. explicit_bzero(tag, sizeof(tag));
  562. explicit_bzero(iv, sizeof(iv));
  563. return (error);
  564. }
  565. static int
  566. swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
  567. {
  568. u_char tag[AES_CBC_MAC_HASH_LEN];
  569. u_char iv[AES_BLOCK_LEN];
  570. union authctx ctx;
  571. struct swcr_auth *swa;
  572. struct auth_hash *axf;
  573. int error, ivlen;
  574. swa = &ses->swcr_auth;
  575. axf = swa->sw_axf;
  576. bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
  577. /* Initialize the IV */
  578. ivlen = AES_CCM_IV_LEN;
  579. crypto_read_iv(crp, iv);
  580. /*
  581. * AES CCM-CBC-MAC needs to know the length of both the auth
  582. * data and payload data before doing the auth computation.
  583. */
  584. ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
  585. ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
  586. axf->Reinit(&ctx, iv, ivlen);
  587. if (crp->crp_aad != NULL)
  588. error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
  589. else
  590. error = crypto_apply(crp, crp->crp_payload_start,
  591. crp->crp_payload_length, axf->Update, &ctx);
  592. if (error)
  593. return (error);
  594. /* Finalize MAC */
  595. axf->Final(tag, &ctx);
  596. if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
  597. u_char tag2[AES_CBC_MAC_HASH_LEN];
  598. crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
  599. tag2);
  600. if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0)
  601. error = EBADMSG;
  602. explicit_bzero(tag2, sizeof(tag));
  603. } else {
  604. /* Inject the authentication data */
  605. crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
  606. }
  607. explicit_bzero(tag, sizeof(tag));
  608. explicit_bzero(iv, sizeof(iv));
  609. return (error);
  610. }
  611. static int
  612. swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
  613. {
  614. uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
  615. u_char *blk = (u_char *)blkbuf;
  616. u_char tag[AES_CBC_MAC_HASH_LEN];
  617. u_char iv[AES_BLOCK_LEN];
  618. struct crypto_buffer_cursor cc_in, cc_out;
  619. const u_char *inblk;
  620. u_char *outblk;
  621. union authctx ctx;
  622. struct swcr_auth *swa;
  623. struct swcr_encdec *swe;
  624. struct auth_hash *axf;
  625. struct enc_xform *exf;
  626. int blksz, error, ivlen, r, resid;
  627. swa = &ses->swcr_auth;
  628. axf = swa->sw_axf;
  629. bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
  630. blksz = AES_BLOCK_LEN;
  631. KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
  632. __func__));
  633. swe = &ses->swcr_encdec;
  634. exf = swe->sw_exf;
  635. KASSERT(axf->blocksize == exf->native_blocksize,
  636. ("%s: blocksize mismatch", __func__));
  637. if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
  638. return (EINVAL);
  639. /* Initialize the IV */
  640. ivlen = AES_CCM_IV_LEN;
  641. bcopy(crp->crp_iv, iv, ivlen);
  642. /*
  643. * AES CCM-CBC-MAC needs to know the length of both the auth
  644. * data and payload data before doing the auth computation.
  645. */
  646. ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
  647. ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
  648. /* Supply MAC with IV */
  649. axf->Reinit(&ctx, iv, ivlen);
  650. /* Supply MAC with AAD */
  651. if (crp->crp_aad != NULL)
  652. error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
  653. else
  654. error = crypto_apply(crp, crp->crp_aad_start,
  655. crp->crp_aad_length, axf->Update, &ctx);
  656. if (error)
  657. return (error);
  658. exf->reinit(swe->sw_kschedule, iv);
  659. /* Do encryption/decryption with MAC */
  660. crypto_cursor_init(&cc_in, &crp->crp_buf);
  661. crypto_cursor_advance(&cc_in, crp->crp_payload_start);
  662. if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
  663. crypto_cursor_init(&cc_out, &crp->crp_obuf);
  664. crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
  665. } else
  666. cc_out = cc_in;
  667. for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
  668. if (crypto_cursor_seglen(&cc_in) < blksz) {
  669. crypto_cursor_copydata(&cc_in, blksz, blk);
  670. inblk = blk;
  671. } else {
  672. inblk = crypto_cursor_segbase(&cc_in);
  673. crypto_cursor_advance(&cc_in, blksz);
  674. }
  675. if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
  676. if (crypto_cursor_seglen(&cc_out) < blksz)
  677. outblk = blk;
  678. else
  679. outblk = crypto_cursor_segbase(&cc_out);
  680. axf->Update(&ctx, inblk, blksz);
  681. exf->encrypt(swe->sw_kschedule, inblk, outblk);
  682. if (outblk == blk)
  683. crypto_cursor_copyback(&cc_out, blksz, blk);
  684. else
  685. crypto_cursor_advance(&cc_out, blksz);
  686. } else {
  687. /*
  688. * One of the problems with CCM+CBC is that
  689. * the authentication is done on the
  690. * unencrypted data. As a result, we have to
  691. * decrypt the data twice: once to generate
  692. * the tag and a second time after the tag is
  693. * verified.
  694. */
  695. exf->decrypt(swe->sw_kschedule, inblk, blk);
  696. axf->Update(&ctx, blk, blksz);
  697. }
  698. }
  699. if (resid > 0) {
  700. crypto_cursor_copydata(&cc_in, resid, blk);
  701. if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
  702. axf->Update(&ctx, blk, resid);
  703. exf->encrypt_last(swe->sw_kschedule, blk, blk, resid);
  704. crypto_cursor_copyback(&cc_out, resid, blk);
  705. } else {
  706. exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
  707. axf->Update(&ctx, blk, resid);
  708. }
  709. }
  710. /* Finalize MAC */
  711. axf->Final(tag, &ctx);
  712. /* Validate tag */
  713. error = 0;
  714. if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
  715. u_char tag2[AES_CBC_MAC_HASH_LEN];
  716. crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
  717. tag2);
  718. r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
  719. explicit_bzero(tag2, sizeof(tag2));
  720. if (r != 0) {
  721. error = EBADMSG;
  722. goto out;
  723. }
  724. /* tag matches, decrypt data */
  725. exf->reinit(swe->sw_kschedule, iv);
  726. crypto_cursor_init(&cc_in, &crp->crp_buf);
  727. crypto_cursor_advance(&cc_in, crp->crp_payload_start);
  728. for (resid = crp->crp_payload_length; resid > blksz;
  729. resid -= blksz) {
  730. if (crypto_cursor_seglen(&cc_in) < blksz) {
  731. crypto_cursor_copydata(&cc_in, blksz, blk);
  732. inblk = blk;
  733. } else {
  734. inblk = crypto_cursor_segbase(&cc_in);
  735. crypto_cursor_advance(&cc_in, blksz);
  736. }
  737. if (crypto_cursor_seglen(&cc_out) < blksz)
  738. outblk = blk;
  739. else
  740. outblk = crypto_cursor_segbase(&cc_out);
  741. exf->decrypt(swe->sw_kschedule, inblk, outblk);
  742. if (outblk == blk)
  743. crypto_cursor_copyback(&cc_out, blksz, blk);
  744. else
  745. crypto_cursor_advance(&cc_out, blksz);
  746. }
  747. if (resid > 0) {
  748. crypto_cursor_copydata(&cc_in, resid, blk);
  749. exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
  750. crypto_cursor_copyback(&cc_out, resid, blk);
  751. }
  752. } else {
  753. /* Inject the authentication data */
  754. crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
  755. }
  756. out:
  757. explicit_bzero(blkbuf, sizeof(blkbuf));
  758. explicit_bzero(tag, sizeof(tag));
  759. explicit_bzero(iv, sizeof(iv));
  760. return (error);
  761. }
  762. /*
  763. * Apply a cipher and a digest to perform EtA.
  764. */
  765. static int
  766. swcr_eta(struct swcr_session *ses, struct cryptop *crp)
  767. {
  768. int error;
  769. if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
  770. error = swcr_encdec(ses, crp);
  771. if (error == 0)
  772. error = swcr_authcompute(ses, crp);
  773. } else {
  774. error = swcr_authcompute(ses, crp);
  775. if (error == 0)
  776. error = swcr_encdec(ses, crp);
  777. }
  778. return (error);
  779. }
  780. /*
  781. * Apply a compression/decompression algorithm
  782. */
  783. static int
  784. swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
  785. {
  786. uint8_t *data, *out;
  787. struct comp_algo *cxf;
  788. int adj;
  789. uint32_t result;
  790. cxf = ses->swcr_compdec.sw_cxf;
  791. /* We must handle the whole buffer of data in one time
  792. * then if there is not all the data in the mbuf, we must
  793. * copy in a buffer.
  794. */
  795. data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT);
  796. if (data == NULL)
  797. return (EINVAL);
  798. crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
  799. data);
  800. if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
  801. result = cxf->compress(data, crp->crp_payload_length, &out);
  802. else
  803. result = cxf->decompress(data, crp->crp_payload_length, &out);
  804. free(data, M_CRYPTO_DATA);
  805. if (result == 0)
  806. return (EINVAL);
  807. crp->crp_olen = result;
  808. /* Check the compressed size when doing compression */
  809. if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
  810. if (result >= crp->crp_payload_length) {
  811. /* Compression was useless, we lost time */
  812. free(out, M_CRYPTO_DATA);
  813. return (0);
  814. }
  815. }
  816. /* Copy back the (de)compressed data. m_copyback is
  817. * extending the mbuf as necessary.
  818. */
  819. crypto_copyback(crp, crp->crp_payload_start, result, out);
  820. if (result < crp->crp_payload_length) {
  821. switch (crp->crp_buf.cb_type) {
  822. case CRYPTO_BUF_MBUF:
  823. adj = result - crp->crp_payload_length;
  824. m_adj(crp->crp_buf.cb_mbuf, adj);
  825. break;
  826. case CRYPTO_BUF_UIO: {
  827. struct uio *uio = crp->crp_buf.cb_uio;
  828. int ind;
  829. adj = crp->crp_payload_length - result;
  830. ind = uio->uio_iovcnt - 1;
  831. while (adj > 0 && ind >= 0) {
  832. if (adj < uio->uio_iov[ind].iov_len) {
  833. uio->uio_iov[ind].iov_len -= adj;
  834. break;
  835. }
  836. adj -= uio->uio_iov[ind].iov_len;
  837. uio->uio_iov[ind].iov_len = 0;
  838. ind--;
  839. uio->uio_iovcnt--;
  840. }
  841. }
  842. break;
  843. case CRYPTO_BUF_VMPAGE:
  844. adj = crp->crp_payload_length - result;
  845. crp->crp_buf.cb_vm_page_len -= adj;
  846. break;
  847. default:
  848. break;
  849. }
  850. }
  851. free(out, M_CRYPTO_DATA);
  852. return 0;
  853. }
  854. static int
  855. swcr_setup_cipher(struct swcr_session *ses,
  856. const struct crypto_session_params *csp)
  857. {
  858. struct swcr_encdec *swe;
  859. struct enc_xform *txf;
  860. int error;
  861. swe = &ses->swcr_encdec;
  862. txf = crypto_cipher(csp);
  863. MPASS(txf->ivsize == csp->csp_ivlen);
  864. if (txf->ctxsize != 0) {
  865. swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA,
  866. M_NOWAIT);
  867. if (swe->sw_kschedule == NULL)
  868. return (ENOMEM);
  869. }
  870. if (csp->csp_cipher_key != NULL) {
  871. error = txf->setkey(swe->sw_kschedule,
  872. csp->csp_cipher_key, csp->csp_cipher_klen);
  873. if (error)
  874. return (error);
  875. }
  876. swe->sw_exf = txf;
  877. return (0);
  878. }
  879. static int
  880. swcr_setup_auth(struct swcr_session *ses,
  881. const struct crypto_session_params *csp)
  882. {
  883. struct swcr_auth *swa;
  884. struct auth_hash *axf;
  885. swa = &ses->swcr_auth;
  886. axf = crypto_auth_hash(csp);
  887. swa->sw_axf = axf;
  888. if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
  889. return (EINVAL);
  890. if (csp->csp_auth_mlen == 0)
  891. swa->sw_mlen = axf->hashsize;
  892. else
  893. swa->sw_mlen = csp->csp_auth_mlen;
  894. swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
  895. if (swa->sw_ictx == NULL)
  896. return (ENOBUFS);
  897. switch (csp->csp_auth_alg) {
  898. case CRYPTO_SHA1_HMAC:
  899. case CRYPTO_SHA2_224_HMAC:
  900. case CRYPTO_SHA2_256_HMAC:
  901. case CRYPTO_SHA2_384_HMAC:
  902. case CRYPTO_SHA2_512_HMAC:
  903. case CRYPTO_NULL_HMAC:
  904. case CRYPTO_RIPEMD160_HMAC:
  905. swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
  906. M_NOWAIT);
  907. if (swa->sw_octx == NULL)
  908. return (ENOBUFS);
  909. if (csp->csp_auth_key != NULL) {
  910. swcr_authprepare(axf, swa, csp->csp_auth_key,
  911. csp->csp_auth_klen);
  912. }
  913. if (csp->csp_mode == CSP_MODE_DIGEST)
  914. ses->swcr_process = swcr_authcompute;
  915. break;
  916. case CRYPTO_SHA1:
  917. case CRYPTO_SHA2_224:
  918. case CRYPTO_SHA2_256:
  919. case CRYPTO_SHA2_384:
  920. case CRYPTO_SHA2_512:
  921. axf->Init(swa->sw_ictx);
  922. if (csp->csp_mode == CSP_MODE_DIGEST)
  923. ses->swcr_process = swcr_authcompute;
  924. break;
  925. case CRYPTO_AES_NIST_GMAC:
  926. axf->Init(swa->sw_ictx);
  927. axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
  928. csp->csp_auth_klen);
  929. if (csp->csp_mode == CSP_MODE_DIGEST)
  930. ses->swcr_process = swcr_gmac;
  931. break;
  932. case CRYPTO_POLY1305:
  933. case CRYPTO_BLAKE2B:
  934. case CRYPTO_BLAKE2S:
  935. /*
  936. * Blake2b and Blake2s support an optional key but do
  937. * not require one.
  938. */
  939. if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
  940. axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
  941. csp->csp_auth_klen);
  942. axf->Init(swa->sw_ictx);
  943. if (csp->csp_mode == CSP_MODE_DIGEST)
  944. ses->swcr_process = swcr_authcompute;
  945. break;
  946. case CRYPTO_AES_CCM_CBC_MAC:
  947. axf->Init(swa->sw_ictx);
  948. axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
  949. csp->csp_auth_klen);
  950. if (csp->csp_mode == CSP_MODE_DIGEST)
  951. ses->swcr_process = swcr_ccm_cbc_mac;
  952. break;
  953. }
  954. return (0);
  955. }
  956. static int
  957. swcr_setup_gcm(struct swcr_session *ses,
  958. const struct crypto_session_params *csp)
  959. {
  960. struct swcr_auth *swa;
  961. struct auth_hash *axf;
  962. if (csp->csp_ivlen != AES_GCM_IV_LEN)
  963. return (EINVAL);
  964. /* First, setup the auth side. */
  965. swa = &ses->swcr_auth;
  966. switch (csp->csp_cipher_klen * 8) {
  967. case 128:
  968. axf = &auth_hash_nist_gmac_aes_128;
  969. break;
  970. case 192:
  971. axf = &auth_hash_nist_gmac_aes_192;
  972. break;
  973. case 256:
  974. axf = &auth_hash_nist_gmac_aes_256;
  975. break;
  976. default:
  977. return (EINVAL);
  978. }
  979. swa->sw_axf = axf;
  980. if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
  981. return (EINVAL);
  982. if (csp->csp_auth_mlen == 0)
  983. swa->sw_mlen = axf->hashsize;
  984. else
  985. swa->sw_mlen = csp->csp_auth_mlen;
  986. swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
  987. if (swa->sw_ictx == NULL)
  988. return (ENOBUFS);
  989. axf->Init(swa->sw_ictx);
  990. if (csp->csp_cipher_key != NULL)
  991. axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
  992. csp->csp_cipher_klen);
  993. /* Second, setup the cipher side. */
  994. return (swcr_setup_cipher(ses, csp));
  995. }
  996. static int
  997. swcr_setup_ccm(struct swcr_session *ses,
  998. const struct crypto_session_params *csp)
  999. {
  1000. struct swcr_auth *swa;
  1001. struct auth_hash *axf;
  1002. if (csp->csp_ivlen != AES_CCM_IV_LEN)
  1003. return (EINVAL);
  1004. /* First, setup the auth side. */
  1005. swa = &ses->swcr_auth;
  1006. switch (csp->csp_cipher_klen * 8) {
  1007. case 128:
  1008. axf = &auth_hash_ccm_cbc_mac_128;
  1009. break;
  1010. case 192:
  1011. axf = &auth_hash_ccm_cbc_mac_192;
  1012. break;
  1013. case 256:
  1014. axf = &auth_hash_ccm_cbc_mac_256;
  1015. break;
  1016. default:
  1017. return (EINVAL);
  1018. }
  1019. swa->sw_axf = axf;
  1020. if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
  1021. return (EINVAL);
  1022. if (csp->csp_auth_mlen == 0)
  1023. swa->sw_mlen = axf->hashsize;
  1024. else
  1025. swa->sw_mlen = csp->csp_auth_mlen;
  1026. swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
  1027. if (swa->sw_ictx == NULL)
  1028. return (ENOBUFS);
  1029. axf->Init(swa->sw_ictx);
  1030. if (csp->csp_cipher_key != NULL)
  1031. axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
  1032. csp->csp_cipher_klen);
  1033. /* Second, setup the cipher side. */
  1034. return (swcr_setup_cipher(ses, csp));
  1035. }
  1036. static bool
  1037. swcr_auth_supported(const struct crypto_session_params *csp)
  1038. {
  1039. struct auth_hash *axf;
  1040. axf = crypto_auth_hash(csp);
  1041. if (axf == NULL)
  1042. return (false);
  1043. switch (csp->csp_auth_alg) {
  1044. case CRYPTO_SHA1_HMAC:
  1045. case CRYPTO_SHA2_224_HMAC:
  1046. case CRYPTO_SHA2_256_HMAC:
  1047. case CRYPTO_SHA2_384_HMAC:
  1048. case CRYPTO_SHA2_512_HMAC:
  1049. case CRYPTO_NULL_HMAC:
  1050. case CRYPTO_RIPEMD160_HMAC:
  1051. break;
  1052. case CRYPTO_AES_NIST_GMAC:
  1053. switch (csp->csp_auth_klen * 8) {
  1054. case 128:
  1055. case 192:
  1056. case 256:
  1057. break;
  1058. default:
  1059. return (false);
  1060. }
  1061. if (csp->csp_auth_key == NULL)
  1062. return (false);
  1063. if (csp->csp_ivlen != AES_GCM_IV_LEN)
  1064. return (false);
  1065. break;
  1066. case CRYPTO_POLY1305:
  1067. if (csp->csp_auth_klen != POLY1305_KEY_LEN)
  1068. return (false);
  1069. break;
  1070. case CRYPTO_AES_CCM_CBC_MAC:
  1071. switch (csp->csp_auth_klen * 8) {
  1072. case 128:
  1073. case 192:
  1074. case 256:
  1075. break;
  1076. default:
  1077. return (false);
  1078. }
  1079. if (csp->csp_auth_key == NULL)
  1080. return (false);
  1081. if (csp->csp_ivlen != AES_CCM_IV_LEN)
  1082. return (false);
  1083. break;
  1084. }
  1085. return (true);
  1086. }
  1087. static bool
  1088. swcr_cipher_supported(const struct crypto_session_params *csp)
  1089. {
  1090. struct enc_xform *txf;
  1091. txf = crypto_cipher(csp);
  1092. if (txf == NULL)
  1093. return (false);
  1094. if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
  1095. txf->ivsize != csp->csp_ivlen)
  1096. return (false);
  1097. return (true);
  1098. }
  1099. #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
  1100. static int
  1101. swcr_probesession(device_t dev, const struct crypto_session_params *csp)
  1102. {
  1103. if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
  1104. return (EINVAL);
  1105. switch (csp->csp_mode) {
  1106. case CSP_MODE_COMPRESS:
  1107. switch (csp->csp_cipher_alg) {
  1108. case CRYPTO_DEFLATE_COMP:
  1109. break;
  1110. default:
  1111. return (EINVAL);
  1112. }
  1113. break;
  1114. case CSP_MODE_CIPHER:
  1115. switch (csp->csp_cipher_alg) {
  1116. case CRYPTO_AES_NIST_GCM_16:
  1117. case CRYPTO_AES_CCM_16:
  1118. return (EINVAL);
  1119. default:
  1120. if (!swcr_cipher_supported(csp))
  1121. return (EINVAL);
  1122. break;
  1123. }
  1124. break;
  1125. case CSP_MODE_DIGEST:
  1126. if (!swcr_auth_supported(csp))
  1127. return (EINVAL);
  1128. break;
  1129. case CSP_MODE_AEAD:
  1130. switch (csp->csp_cipher_alg) {
  1131. case CRYPTO_AES_NIST_GCM_16:
  1132. case CRYPTO_AES_CCM_16:
  1133. break;
  1134. default:
  1135. return (EINVAL);
  1136. }
  1137. break;
  1138. case CSP_MODE_ETA:
  1139. /* AEAD algorithms cannot be used for EtA. */
  1140. switch (csp->csp_cipher_alg) {
  1141. case CRYPTO_AES_NIST_GCM_16:
  1142. case CRYPTO_AES_CCM_16:
  1143. return (EINVAL);
  1144. }
  1145. switch (csp->csp_auth_alg) {
  1146. case CRYPTO_AES_NIST_GMAC:
  1147. case CRYPTO_AES_CCM_CBC_MAC:
  1148. return (EINVAL);
  1149. }
  1150. if (!swcr_cipher_supported(csp) ||
  1151. !swcr_auth_supported(csp))
  1152. return (EINVAL);
  1153. break;
  1154. default:
  1155. return (EINVAL);
  1156. }
  1157. return (CRYPTODEV_PROBE_SOFTWARE);
  1158. }
  1159. /*
  1160. * Generate a new software session.
  1161. */
  1162. static int
  1163. swcr_newsession(device_t dev, crypto_session_t cses,
  1164. const struct crypto_session_params *csp)
  1165. {
  1166. struct swcr_session *ses;
  1167. struct swcr_encdec *swe;
  1168. struct swcr_auth *swa;
  1169. struct comp_algo *cxf;
  1170. int error;
  1171. ses = crypto_get_driver_session(cses);
  1172. mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
  1173. error = 0;
  1174. swe = &ses->swcr_encdec;
  1175. swa = &ses->swcr_auth;
  1176. switch (csp->csp_mode) {
  1177. case CSP_MODE_COMPRESS:
  1178. switch (csp->csp_cipher_alg) {
  1179. case CRYPTO_DEFLATE_COMP:
  1180. cxf = &comp_algo_deflate;
  1181. break;
  1182. #ifdef INVARIANTS
  1183. default:
  1184. panic("bad compression algo");
  1185. #endif
  1186. }
  1187. ses->swcr_compdec.sw_cxf = cxf;
  1188. ses->swcr_process = swcr_compdec;
  1189. break;
  1190. case CSP_MODE_CIPHER:
  1191. switch (csp->csp_cipher_alg) {
  1192. case CRYPTO_NULL_CBC:
  1193. ses->swcr_process = swcr_null;
  1194. break;
  1195. #ifdef INVARIANTS
  1196. case CRYPTO_AES_NIST_GCM_16:
  1197. case CRYPTO_AES_CCM_16:
  1198. panic("bad cipher algo");
  1199. #endif
  1200. default:
  1201. error = swcr_setup_cipher(ses, csp);
  1202. if (error == 0)
  1203. ses->swcr_process = swcr_encdec;
  1204. }
  1205. break;
  1206. case CSP_MODE_DIGEST:
  1207. error = swcr_setup_auth(ses, csp);
  1208. break;
  1209. case CSP_MODE_AEAD:
  1210. switch (csp->csp_cipher_alg) {
  1211. case CRYPTO_AES_NIST_GCM_16:
  1212. error = swcr_setup_gcm(ses, csp);
  1213. if (error == 0)
  1214. ses->swcr_process = swcr_gcm;
  1215. break;
  1216. case CRYPTO_AES_CCM_16:
  1217. error = swcr_setup_ccm(ses, csp);
  1218. if (error == 0)
  1219. ses->swcr_process = swcr_ccm;
  1220. break;
  1221. #ifdef INVARIANTS
  1222. default:
  1223. panic("bad aead algo");
  1224. #endif
  1225. }
  1226. break;
  1227. case CSP_MODE_ETA:
  1228. #ifdef INVARIANTS
  1229. switch (csp->csp_cipher_alg) {
  1230. case CRYPTO_AES_NIST_GCM_16:
  1231. case CRYPTO_AES_CCM_16:
  1232. panic("bad eta cipher algo");
  1233. }
  1234. switch (csp->csp_auth_alg) {
  1235. case CRYPTO_AES_NIST_GMAC:
  1236. case CRYPTO_AES_CCM_CBC_MAC:
  1237. panic("bad eta auth algo");
  1238. }
  1239. #endif
  1240. error = swcr_setup_auth(ses, csp);
  1241. if (error)
  1242. break;
  1243. if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
  1244. /* Effectively degrade to digest mode. */
  1245. ses->swcr_process = swcr_authcompute;
  1246. break;
  1247. }
  1248. error = swcr_setup_cipher(ses, csp);
  1249. if (error == 0)
  1250. ses->swcr_process = swcr_eta;
  1251. break;
  1252. default:
  1253. error = EINVAL;
  1254. }
  1255. if (error)
  1256. swcr_freesession(dev, cses);
  1257. return (error);
  1258. }
  1259. static void
  1260. swcr_freesession(device_t dev, crypto_session_t cses)
  1261. {
  1262. struct swcr_session *ses;
  1263. ses = crypto_get_driver_session(cses);
  1264. mtx_destroy(&ses->swcr_lock);
  1265. zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA);
  1266. zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA);
  1267. zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA);
  1268. }
  1269. /*
  1270. * Process a software request.
  1271. */
  1272. static int
  1273. swcr_process(device_t dev, struct cryptop *crp, int hint)
  1274. {
  1275. struct swcr_session *ses;
  1276. ses = crypto_get_driver_session(crp->crp_session);
  1277. mtx_lock(&ses->swcr_lock);
  1278. crp->crp_etype = ses->swcr_process(ses, crp);
  1279. mtx_unlock(&ses->swcr_lock);
  1280. crypto_done(crp);
  1281. return (0);
  1282. }
  1283. static void
  1284. swcr_identify(driver_t *drv, device_t parent)
  1285. {
  1286. /* NB: order 10 is so we get attached after h/w devices */
  1287. if (device_find_child(parent, "cryptosoft", -1) == NULL &&
  1288. BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
  1289. panic("cryptosoft: could not attach");
  1290. }
  1291. static int
  1292. swcr_probe(device_t dev)
  1293. {
  1294. device_set_desc(dev, "software crypto");
  1295. return (BUS_PROBE_NOWILDCARD);
  1296. }
  1297. static int
  1298. swcr_attach(device_t dev)
  1299. {
  1300. swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
  1301. CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
  1302. if (swcr_id < 0) {
  1303. device_printf(dev, "cannot initialize!");
  1304. return (ENXIO);
  1305. }
  1306. return (0);
  1307. }
  1308. static int
  1309. swcr_detach(device_t dev)
  1310. {
  1311. crypto_unregister_all(swcr_id);
  1312. return 0;
  1313. }
  1314. static device_method_t swcr_methods[] = {
  1315. DEVMETHOD(device_identify, swcr_identify),
  1316. DEVMETHOD(device_probe, swcr_probe),
  1317. DEVMETHOD(device_attach, swcr_attach),
  1318. DEVMETHOD(device_detach, swcr_detach),
  1319. DEVMETHOD(cryptodev_probesession, swcr_probesession),
  1320. DEVMETHOD(cryptodev_newsession, swcr_newsession),
  1321. DEVMETHOD(cryptodev_freesession,swcr_freesession),
  1322. DEVMETHOD(cryptodev_process, swcr_process),
  1323. {0, 0},
  1324. };
  1325. static driver_t swcr_driver = {
  1326. "cryptosoft",
  1327. swcr_methods,
  1328. 0, /* NB: no softc */
  1329. };
  1330. static devclass_t swcr_devclass;
  1331. /*
  1332. * NB: We explicitly reference the crypto module so we
  1333. * get the necessary ordering when built as a loadable
  1334. * module. This is required because we bundle the crypto
  1335. * module code together with the cryptosoft driver (otherwise
  1336. * normal module dependencies would handle things).
  1337. */
  1338. extern int crypto_modevent(struct module *, int, void *);
  1339. /* XXX where to attach */
  1340. DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
  1341. MODULE_VERSION(cryptosoft, 1);
  1342. MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);