sha256_mb.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014
  1. /*
  2. * Multi buffer SHA256 algorithm Glue Code
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * Copyright(c) 2016 Intel Corporation.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of version 2 of the GNU General Public License as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * Contact Information:
  21. * Megha Dey <megha.dey@linux.intel.com>
  22. *
  23. * BSD LICENSE
  24. *
  25. * Copyright(c) 2016 Intel Corporation.
  26. *
  27. * Redistribution and use in source and binary forms, with or without
  28. * modification, are permitted provided that the following conditions
  29. * are met:
  30. *
  31. * * Redistributions of source code must retain the above copyright
  32. * notice, this list of conditions and the following disclaimer.
  33. * * Redistributions in binary form must reproduce the above copyright
  34. * notice, this list of conditions and the following disclaimer in
  35. * the documentation and/or other materials provided with the
  36. * distribution.
  37. * * Neither the name of Intel Corporation nor the names of its
  38. * contributors may be used to endorse or promote products derived
  39. * from this software without specific prior written permission.
  40. *
  41. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  42. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  43. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  44. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  45. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  46. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  47. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  48. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  49. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  50. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  51. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  52. */
  53. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  54. #include <crypto/internal/hash.h>
  55. #include <linux/init.h>
  56. #include <linux/module.h>
  57. #include <linux/mm.h>
  58. #include <linux/cryptohash.h>
  59. #include <linux/types.h>
  60. #include <linux/list.h>
  61. #include <crypto/scatterwalk.h>
  62. #include <crypto/sha.h>
  63. #include <crypto/mcryptd.h>
  64. #include <crypto/crypto_wq.h>
  65. #include <asm/byteorder.h>
  66. #include <linux/hardirq.h>
  67. #include <asm/fpu/api.h>
  68. #include "sha256_mb_ctx.h"
  69. #define FLUSH_INTERVAL 1000 /* in usec */
  70. static struct mcryptd_alg_state sha256_mb_alg_state;
  71. struct sha256_mb_ctx {
  72. struct mcryptd_ahash *mcryptd_tfm;
  73. };
  74. static inline struct mcryptd_hash_request_ctx
  75. *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx)
  76. {
  77. struct ahash_request *areq;
  78. areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
  79. return container_of(areq, struct mcryptd_hash_request_ctx, areq);
  80. }
  81. static inline struct ahash_request
  82. *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
  83. {
  84. return container_of((void *) ctx, struct ahash_request, __ctx);
  85. }
  86. static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
  87. struct ahash_request *areq)
  88. {
  89. rctx->flag = HASH_UPDATE;
  90. }
  91. static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state);
  92. static asmlinkage struct job_sha256* (*sha256_job_mgr_submit)
  93. (struct sha256_mb_mgr *state, struct job_sha256 *job);
  94. static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
  95. (struct sha256_mb_mgr *state);
  96. static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
  97. (struct sha256_mb_mgr *state);
  98. inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
  99. uint64_t total_len)
  100. {
  101. uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1);
  102. memset(&padblock[i], 0, SHA256_BLOCK_SIZE);
  103. padblock[i] = 0x80;
  104. i += ((SHA256_BLOCK_SIZE - 1) &
  105. (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1)))
  106. + 1 + SHA256_PADLENGTHFIELD_SIZE;
  107. #if SHA256_PADLENGTHFIELD_SIZE == 16
  108. *((uint64_t *) &padblock[i - 16]) = 0;
  109. #endif
  110. *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
  111. /* Number of extra blocks to hash */
  112. return i >> SHA256_LOG2_BLOCK_SIZE;
  113. }
  114. static struct sha256_hash_ctx
  115. *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr,
  116. struct sha256_hash_ctx *ctx)
  117. {
  118. while (ctx) {
  119. if (ctx->status & HASH_CTX_STS_COMPLETE) {
  120. /* Clear PROCESSING bit */
  121. ctx->status = HASH_CTX_STS_COMPLETE;
  122. return ctx;
  123. }
  124. /*
  125. * If the extra blocks are empty, begin hashing what remains
  126. * in the user's buffer.
  127. */
  128. if (ctx->partial_block_buffer_length == 0 &&
  129. ctx->incoming_buffer_length) {
  130. const void *buffer = ctx->incoming_buffer;
  131. uint32_t len = ctx->incoming_buffer_length;
  132. uint32_t copy_len;
  133. /*
  134. * Only entire blocks can be hashed.
  135. * Copy remainder to extra blocks buffer.
  136. */
  137. copy_len = len & (SHA256_BLOCK_SIZE-1);
  138. if (copy_len) {
  139. len -= copy_len;
  140. memcpy(ctx->partial_block_buffer,
  141. ((const char *) buffer + len),
  142. copy_len);
  143. ctx->partial_block_buffer_length = copy_len;
  144. }
  145. ctx->incoming_buffer_length = 0;
  146. /* len should be a multiple of the block size now */
  147. assert((len % SHA256_BLOCK_SIZE) == 0);
  148. /* Set len to the number of blocks to be hashed */
  149. len >>= SHA256_LOG2_BLOCK_SIZE;
  150. if (len) {
  151. ctx->job.buffer = (uint8_t *) buffer;
  152. ctx->job.len = len;
  153. ctx = (struct sha256_hash_ctx *)
  154. sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
  155. continue;
  156. }
  157. }
  158. /*
  159. * If the extra blocks are not empty, then we are
  160. * either on the last block(s) or we need more
  161. * user input before continuing.
  162. */
  163. if (ctx->status & HASH_CTX_STS_LAST) {
  164. uint8_t *buf = ctx->partial_block_buffer;
  165. uint32_t n_extra_blocks =
  166. sha256_pad(buf, ctx->total_length);
  167. ctx->status = (HASH_CTX_STS_PROCESSING |
  168. HASH_CTX_STS_COMPLETE);
  169. ctx->job.buffer = buf;
  170. ctx->job.len = (uint32_t) n_extra_blocks;
  171. ctx = (struct sha256_hash_ctx *)
  172. sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
  173. continue;
  174. }
  175. ctx->status = HASH_CTX_STS_IDLE;
  176. return ctx;
  177. }
  178. return NULL;
  179. }
  180. static struct sha256_hash_ctx
  181. *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr)
  182. {
  183. /*
  184. * If get_comp_job returns NULL, there are no jobs complete.
  185. * If get_comp_job returns a job, verify that it is safe to return to
  186. * the user. If it is not ready, resubmit the job to finish processing.
  187. * If sha256_ctx_mgr_resubmit returned a job, it is ready to be
  188. * returned. Otherwise, all jobs currently being managed by the
  189. * hash_ctx_mgr still need processing.
  190. */
  191. struct sha256_hash_ctx *ctx;
  192. ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr);
  193. return sha256_ctx_mgr_resubmit(mgr, ctx);
  194. }
  195. static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr)
  196. {
  197. sha256_job_mgr_init(&mgr->mgr);
  198. }
  199. static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
  200. struct sha256_hash_ctx *ctx,
  201. const void *buffer,
  202. uint32_t len,
  203. int flags)
  204. {
  205. if (flags & ~(HASH_UPDATE | HASH_LAST)) {
  206. /* User should not pass anything other than UPDATE or LAST */
  207. ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
  208. return ctx;
  209. }
  210. if (ctx->status & HASH_CTX_STS_PROCESSING) {
  211. /* Cannot submit to a currently processing job. */
  212. ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
  213. return ctx;
  214. }
  215. if (ctx->status & HASH_CTX_STS_COMPLETE) {
  216. /* Cannot update a finished job. */
  217. ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
  218. return ctx;
  219. }
  220. /* If we made it here, there was no error during this call to submit */
  221. ctx->error = HASH_CTX_ERROR_NONE;
  222. /* Store buffer ptr info from user */
  223. ctx->incoming_buffer = buffer;
  224. ctx->incoming_buffer_length = len;
  225. /*
  226. * Store the user's request flags and mark this ctx as currently
  227. * being processed.
  228. */
  229. ctx->status = (flags & HASH_LAST) ?
  230. (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
  231. HASH_CTX_STS_PROCESSING;
  232. /* Advance byte counter */
  233. ctx->total_length += len;
  234. /*
  235. * If there is anything currently buffered in the extra blocks,
  236. * append to it until it contains a whole block.
  237. * Or if the user's buffer contains less than a whole block,
  238. * append as much as possible to the extra block.
  239. */
  240. if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) {
  241. /*
  242. * Compute how many bytes to copy from user buffer into
  243. * extra block
  244. */
  245. uint32_t copy_len = SHA256_BLOCK_SIZE -
  246. ctx->partial_block_buffer_length;
  247. if (len < copy_len)
  248. copy_len = len;
  249. if (copy_len) {
  250. /* Copy and update relevant pointers and counters */
  251. memcpy(
  252. &ctx->partial_block_buffer[ctx->partial_block_buffer_length],
  253. buffer, copy_len);
  254. ctx->partial_block_buffer_length += copy_len;
  255. ctx->incoming_buffer = (const void *)
  256. ((const char *)buffer + copy_len);
  257. ctx->incoming_buffer_length = len - copy_len;
  258. }
  259. /* The extra block should never contain more than 1 block */
  260. assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
  261. /*
  262. * If the extra block buffer contains exactly 1 block,
  263. * it can be hashed.
  264. */
  265. if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
  266. ctx->partial_block_buffer_length = 0;
  267. ctx->job.buffer = ctx->partial_block_buffer;
  268. ctx->job.len = 1;
  269. ctx = (struct sha256_hash_ctx *)
  270. sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
  271. }
  272. }
  273. return sha256_ctx_mgr_resubmit(mgr, ctx);
  274. }
  275. static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr)
  276. {
  277. struct sha256_hash_ctx *ctx;
  278. while (1) {
  279. ctx = (struct sha256_hash_ctx *)
  280. sha256_job_mgr_flush(&mgr->mgr);
  281. /* If flush returned 0, there are no more jobs in flight. */
  282. if (!ctx)
  283. return NULL;
  284. /*
  285. * If flush returned a job, resubmit the job to finish
  286. * processing.
  287. */
  288. ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
  289. /*
  290. * If sha256_ctx_mgr_resubmit returned a job, it is ready to
  291. * be returned. Otherwise, all jobs currently being managed by
  292. * the sha256_ctx_mgr still need processing. Loop.
  293. */
  294. if (ctx)
  295. return ctx;
  296. }
  297. }
  298. static int sha256_mb_init(struct ahash_request *areq)
  299. {
  300. struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
  301. hash_ctx_init(sctx);
  302. sctx->job.result_digest[0] = SHA256_H0;
  303. sctx->job.result_digest[1] = SHA256_H1;
  304. sctx->job.result_digest[2] = SHA256_H2;
  305. sctx->job.result_digest[3] = SHA256_H3;
  306. sctx->job.result_digest[4] = SHA256_H4;
  307. sctx->job.result_digest[5] = SHA256_H5;
  308. sctx->job.result_digest[6] = SHA256_H6;
  309. sctx->job.result_digest[7] = SHA256_H7;
  310. sctx->total_length = 0;
  311. sctx->partial_block_buffer_length = 0;
  312. sctx->status = HASH_CTX_STS_IDLE;
  313. return 0;
  314. }
  315. static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
  316. {
  317. int i;
  318. struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
  319. __be32 *dst = (__be32 *) rctx->out;
  320. for (i = 0; i < 8; ++i)
  321. dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
  322. return 0;
  323. }
  324. static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
  325. struct mcryptd_alg_cstate *cstate, bool flush)
  326. {
  327. int flag = HASH_UPDATE;
  328. int nbytes, err = 0;
  329. struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
  330. struct sha256_hash_ctx *sha_ctx;
  331. /* more work ? */
  332. while (!(rctx->flag & HASH_DONE)) {
  333. nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
  334. if (nbytes < 0) {
  335. err = nbytes;
  336. goto out;
  337. }
  338. /* check if the walk is done */
  339. if (crypto_ahash_walk_last(&rctx->walk)) {
  340. rctx->flag |= HASH_DONE;
  341. if (rctx->flag & HASH_FINAL)
  342. flag |= HASH_LAST;
  343. }
  344. sha_ctx = (struct sha256_hash_ctx *)
  345. ahash_request_ctx(&rctx->areq);
  346. kernel_fpu_begin();
  347. sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx,
  348. rctx->walk.data, nbytes, flag);
  349. if (!sha_ctx) {
  350. if (flush)
  351. sha_ctx = sha256_ctx_mgr_flush(cstate->mgr);
  352. }
  353. kernel_fpu_end();
  354. if (sha_ctx)
  355. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  356. else {
  357. rctx = NULL;
  358. goto out;
  359. }
  360. }
  361. /* copy the results */
  362. if (rctx->flag & HASH_FINAL)
  363. sha256_mb_set_results(rctx);
  364. out:
  365. *ret_rctx = rctx;
  366. return err;
  367. }
  368. static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
  369. struct mcryptd_alg_cstate *cstate,
  370. int err)
  371. {
  372. struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
  373. struct sha256_hash_ctx *sha_ctx;
  374. struct mcryptd_hash_request_ctx *req_ctx;
  375. int ret;
  376. /* remove from work list */
  377. spin_lock(&cstate->work_lock);
  378. list_del(&rctx->waiter);
  379. spin_unlock(&cstate->work_lock);
  380. if (irqs_disabled())
  381. rctx->complete(&req->base, err);
  382. else {
  383. local_bh_disable();
  384. rctx->complete(&req->base, err);
  385. local_bh_enable();
  386. }
  387. /* check to see if there are other jobs that are done */
  388. sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
  389. while (sha_ctx) {
  390. req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  391. ret = sha_finish_walk(&req_ctx, cstate, false);
  392. if (req_ctx) {
  393. spin_lock(&cstate->work_lock);
  394. list_del(&req_ctx->waiter);
  395. spin_unlock(&cstate->work_lock);
  396. req = cast_mcryptd_ctx_to_req(req_ctx);
  397. if (irqs_disabled())
  398. req_ctx->complete(&req->base, ret);
  399. else {
  400. local_bh_disable();
  401. req_ctx->complete(&req->base, ret);
  402. local_bh_enable();
  403. }
  404. }
  405. sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
  406. }
  407. return 0;
  408. }
  409. static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
  410. struct mcryptd_alg_cstate *cstate)
  411. {
  412. unsigned long next_flush;
  413. unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
  414. /* initialize tag */
  415. rctx->tag.arrival = jiffies; /* tag the arrival time */
  416. rctx->tag.seq_num = cstate->next_seq_num++;
  417. next_flush = rctx->tag.arrival + delay;
  418. rctx->tag.expire = next_flush;
  419. spin_lock(&cstate->work_lock);
  420. list_add_tail(&rctx->waiter, &cstate->work_list);
  421. spin_unlock(&cstate->work_lock);
  422. mcryptd_arm_flusher(cstate, delay);
  423. }
  424. static int sha256_mb_update(struct ahash_request *areq)
  425. {
  426. struct mcryptd_hash_request_ctx *rctx =
  427. container_of(areq, struct mcryptd_hash_request_ctx, areq);
  428. struct mcryptd_alg_cstate *cstate =
  429. this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
  430. struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
  431. struct sha256_hash_ctx *sha_ctx;
  432. int ret = 0, nbytes;
  433. /* sanity check */
  434. if (rctx->tag.cpu != smp_processor_id()) {
  435. pr_err("mcryptd error: cpu clash\n");
  436. goto done;
  437. }
  438. /* need to init context */
  439. req_ctx_init(rctx, areq);
  440. nbytes = crypto_ahash_walk_first(req, &rctx->walk);
  441. if (nbytes < 0) {
  442. ret = nbytes;
  443. goto done;
  444. }
  445. if (crypto_ahash_walk_last(&rctx->walk))
  446. rctx->flag |= HASH_DONE;
  447. /* submit */
  448. sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
  449. sha256_mb_add_list(rctx, cstate);
  450. kernel_fpu_begin();
  451. sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
  452. nbytes, HASH_UPDATE);
  453. kernel_fpu_end();
  454. /* check if anything is returned */
  455. if (!sha_ctx)
  456. return -EINPROGRESS;
  457. if (sha_ctx->error) {
  458. ret = sha_ctx->error;
  459. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  460. goto done;
  461. }
  462. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  463. ret = sha_finish_walk(&rctx, cstate, false);
  464. if (!rctx)
  465. return -EINPROGRESS;
  466. done:
  467. sha_complete_job(rctx, cstate, ret);
  468. return ret;
  469. }
  470. static int sha256_mb_finup(struct ahash_request *areq)
  471. {
  472. struct mcryptd_hash_request_ctx *rctx =
  473. container_of(areq, struct mcryptd_hash_request_ctx, areq);
  474. struct mcryptd_alg_cstate *cstate =
  475. this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
  476. struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
  477. struct sha256_hash_ctx *sha_ctx;
  478. int ret = 0, flag = HASH_UPDATE, nbytes;
  479. /* sanity check */
  480. if (rctx->tag.cpu != smp_processor_id()) {
  481. pr_err("mcryptd error: cpu clash\n");
  482. goto done;
  483. }
  484. /* need to init context */
  485. req_ctx_init(rctx, areq);
  486. nbytes = crypto_ahash_walk_first(req, &rctx->walk);
  487. if (nbytes < 0) {
  488. ret = nbytes;
  489. goto done;
  490. }
  491. if (crypto_ahash_walk_last(&rctx->walk)) {
  492. rctx->flag |= HASH_DONE;
  493. flag = HASH_LAST;
  494. }
  495. /* submit */
  496. rctx->flag |= HASH_FINAL;
  497. sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
  498. sha256_mb_add_list(rctx, cstate);
  499. kernel_fpu_begin();
  500. sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
  501. nbytes, flag);
  502. kernel_fpu_end();
  503. /* check if anything is returned */
  504. if (!sha_ctx)
  505. return -EINPROGRESS;
  506. if (sha_ctx->error) {
  507. ret = sha_ctx->error;
  508. goto done;
  509. }
  510. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  511. ret = sha_finish_walk(&rctx, cstate, false);
  512. if (!rctx)
  513. return -EINPROGRESS;
  514. done:
  515. sha_complete_job(rctx, cstate, ret);
  516. return ret;
  517. }
  518. static int sha256_mb_final(struct ahash_request *areq)
  519. {
  520. struct mcryptd_hash_request_ctx *rctx =
  521. container_of(areq, struct mcryptd_hash_request_ctx,
  522. areq);
  523. struct mcryptd_alg_cstate *cstate =
  524. this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
  525. struct sha256_hash_ctx *sha_ctx;
  526. int ret = 0;
  527. u8 data;
  528. /* sanity check */
  529. if (rctx->tag.cpu != smp_processor_id()) {
  530. pr_err("mcryptd error: cpu clash\n");
  531. goto done;
  532. }
  533. /* need to init context */
  534. req_ctx_init(rctx, areq);
  535. rctx->flag |= HASH_DONE | HASH_FINAL;
  536. sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
  537. /* flag HASH_FINAL and 0 data size */
  538. sha256_mb_add_list(rctx, cstate);
  539. kernel_fpu_begin();
  540. sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
  541. HASH_LAST);
  542. kernel_fpu_end();
  543. /* check if anything is returned */
  544. if (!sha_ctx)
  545. return -EINPROGRESS;
  546. if (sha_ctx->error) {
  547. ret = sha_ctx->error;
  548. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  549. goto done;
  550. }
  551. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  552. ret = sha_finish_walk(&rctx, cstate, false);
  553. if (!rctx)
  554. return -EINPROGRESS;
  555. done:
  556. sha_complete_job(rctx, cstate, ret);
  557. return ret;
  558. }
  559. static int sha256_mb_export(struct ahash_request *areq, void *out)
  560. {
  561. struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
  562. memcpy(out, sctx, sizeof(*sctx));
  563. return 0;
  564. }
  565. static int sha256_mb_import(struct ahash_request *areq, const void *in)
  566. {
  567. struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
  568. memcpy(sctx, in, sizeof(*sctx));
  569. return 0;
  570. }
  571. static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm)
  572. {
  573. struct mcryptd_ahash *mcryptd_tfm;
  574. struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
  575. struct mcryptd_hash_ctx *mctx;
  576. mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb",
  577. CRYPTO_ALG_INTERNAL,
  578. CRYPTO_ALG_INTERNAL);
  579. if (IS_ERR(mcryptd_tfm))
  580. return PTR_ERR(mcryptd_tfm);
  581. mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
  582. mctx->alg_state = &sha256_mb_alg_state;
  583. ctx->mcryptd_tfm = mcryptd_tfm;
  584. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  585. sizeof(struct ahash_request) +
  586. crypto_ahash_reqsize(&mcryptd_tfm->base));
  587. return 0;
  588. }
  589. static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm)
  590. {
  591. struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
  592. mcryptd_free_ahash(ctx->mcryptd_tfm);
  593. }
  594. static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm)
  595. {
  596. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  597. sizeof(struct ahash_request) +
  598. sizeof(struct sha256_hash_ctx));
  599. return 0;
  600. }
  601. static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm)
  602. {
  603. struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
  604. mcryptd_free_ahash(ctx->mcryptd_tfm);
  605. }
  606. static struct ahash_alg sha256_mb_areq_alg = {
  607. .init = sha256_mb_init,
  608. .update = sha256_mb_update,
  609. .final = sha256_mb_final,
  610. .finup = sha256_mb_finup,
  611. .export = sha256_mb_export,
  612. .import = sha256_mb_import,
  613. .halg = {
  614. .digestsize = SHA256_DIGEST_SIZE,
  615. .statesize = sizeof(struct sha256_hash_ctx),
  616. .base = {
  617. .cra_name = "__sha256-mb",
  618. .cra_driver_name = "__intel_sha256-mb",
  619. .cra_priority = 100,
  620. /*
  621. * use ASYNC flag as some buffers in multi-buffer
  622. * algo may not have completed before hashing thread
  623. * sleep
  624. */
  625. .cra_flags = CRYPTO_ALG_ASYNC |
  626. CRYPTO_ALG_INTERNAL,
  627. .cra_blocksize = SHA256_BLOCK_SIZE,
  628. .cra_module = THIS_MODULE,
  629. .cra_list = LIST_HEAD_INIT
  630. (sha256_mb_areq_alg.halg.base.cra_list),
  631. .cra_init = sha256_mb_areq_init_tfm,
  632. .cra_exit = sha256_mb_areq_exit_tfm,
  633. .cra_ctxsize = sizeof(struct sha256_hash_ctx),
  634. }
  635. }
  636. };
  637. static int sha256_mb_async_init(struct ahash_request *req)
  638. {
  639. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  640. struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  641. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  642. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  643. memcpy(mcryptd_req, req, sizeof(*req));
  644. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  645. return crypto_ahash_init(mcryptd_req);
  646. }
  647. static int sha256_mb_async_update(struct ahash_request *req)
  648. {
  649. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  650. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  651. struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  652. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  653. memcpy(mcryptd_req, req, sizeof(*req));
  654. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  655. return crypto_ahash_update(mcryptd_req);
  656. }
  657. static int sha256_mb_async_finup(struct ahash_request *req)
  658. {
  659. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  660. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  661. struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  662. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  663. memcpy(mcryptd_req, req, sizeof(*req));
  664. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  665. return crypto_ahash_finup(mcryptd_req);
  666. }
  667. static int sha256_mb_async_final(struct ahash_request *req)
  668. {
  669. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  670. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  671. struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  672. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  673. memcpy(mcryptd_req, req, sizeof(*req));
  674. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  675. return crypto_ahash_final(mcryptd_req);
  676. }
  677. static int sha256_mb_async_digest(struct ahash_request *req)
  678. {
  679. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  680. struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  681. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  682. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  683. memcpy(mcryptd_req, req, sizeof(*req));
  684. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  685. return crypto_ahash_digest(mcryptd_req);
  686. }
  687. static int sha256_mb_async_export(struct ahash_request *req, void *out)
  688. {
  689. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  690. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  691. struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  692. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  693. memcpy(mcryptd_req, req, sizeof(*req));
  694. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  695. return crypto_ahash_export(mcryptd_req, out);
  696. }
  697. static int sha256_mb_async_import(struct ahash_request *req, const void *in)
  698. {
  699. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  700. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  701. struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  702. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  703. struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
  704. struct mcryptd_hash_request_ctx *rctx;
  705. struct ahash_request *areq;
  706. memcpy(mcryptd_req, req, sizeof(*req));
  707. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  708. rctx = ahash_request_ctx(mcryptd_req);
  709. areq = &rctx->areq;
  710. ahash_request_set_tfm(areq, child);
  711. ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
  712. rctx->complete, req);
  713. return crypto_ahash_import(mcryptd_req, in);
  714. }
  715. static struct ahash_alg sha256_mb_async_alg = {
  716. .init = sha256_mb_async_init,
  717. .update = sha256_mb_async_update,
  718. .final = sha256_mb_async_final,
  719. .finup = sha256_mb_async_finup,
  720. .export = sha256_mb_async_export,
  721. .import = sha256_mb_async_import,
  722. .digest = sha256_mb_async_digest,
  723. .halg = {
  724. .digestsize = SHA256_DIGEST_SIZE,
  725. .statesize = sizeof(struct sha256_hash_ctx),
  726. .base = {
  727. .cra_name = "sha256",
  728. .cra_driver_name = "sha256_mb",
  729. /*
  730. * Low priority, since with few concurrent hash requests
  731. * this is extremely slow due to the flush delay. Users
  732. * whose workloads would benefit from this can request
  733. * it explicitly by driver name, or can increase its
  734. * priority at runtime using NETLINK_CRYPTO.
  735. */
  736. .cra_priority = 50,
  737. .cra_flags = CRYPTO_ALG_ASYNC,
  738. .cra_blocksize = SHA256_BLOCK_SIZE,
  739. .cra_module = THIS_MODULE,
  740. .cra_list = LIST_HEAD_INIT
  741. (sha256_mb_async_alg.halg.base.cra_list),
  742. .cra_init = sha256_mb_async_init_tfm,
  743. .cra_exit = sha256_mb_async_exit_tfm,
  744. .cra_ctxsize = sizeof(struct sha256_mb_ctx),
  745. .cra_alignmask = 0,
  746. },
  747. },
  748. };
  749. static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate)
  750. {
  751. struct mcryptd_hash_request_ctx *rctx;
  752. unsigned long cur_time;
  753. unsigned long next_flush = 0;
  754. struct sha256_hash_ctx *sha_ctx;
  755. cur_time = jiffies;
  756. while (!list_empty(&cstate->work_list)) {
  757. rctx = list_entry(cstate->work_list.next,
  758. struct mcryptd_hash_request_ctx, waiter);
  759. if (time_before(cur_time, rctx->tag.expire))
  760. break;
  761. kernel_fpu_begin();
  762. sha_ctx = (struct sha256_hash_ctx *)
  763. sha256_ctx_mgr_flush(cstate->mgr);
  764. kernel_fpu_end();
  765. if (!sha_ctx) {
  766. pr_err("sha256_mb error: nothing got"
  767. " flushed for non-empty list\n");
  768. break;
  769. }
  770. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  771. sha_finish_walk(&rctx, cstate, true);
  772. sha_complete_job(rctx, cstate, 0);
  773. }
  774. if (!list_empty(&cstate->work_list)) {
  775. rctx = list_entry(cstate->work_list.next,
  776. struct mcryptd_hash_request_ctx, waiter);
  777. /* get the hash context and then flush time */
  778. next_flush = rctx->tag.expire;
  779. mcryptd_arm_flusher(cstate, get_delay(next_flush));
  780. }
  781. return next_flush;
  782. }
  783. static int __init sha256_mb_mod_init(void)
  784. {
  785. int cpu;
  786. int err;
  787. struct mcryptd_alg_cstate *cpu_state;
  788. /* check for dependent cpu features */
  789. if (!boot_cpu_has(X86_FEATURE_AVX2) ||
  790. !boot_cpu_has(X86_FEATURE_BMI2))
  791. return -ENODEV;
  792. /* initialize multibuffer structures */
  793. sha256_mb_alg_state.alg_cstate = alloc_percpu
  794. (struct mcryptd_alg_cstate);
  795. sha256_job_mgr_init = sha256_mb_mgr_init_avx2;
  796. sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2;
  797. sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2;
  798. sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2;
  799. if (!sha256_mb_alg_state.alg_cstate)
  800. return -ENOMEM;
  801. for_each_possible_cpu(cpu) {
  802. cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
  803. cpu_state->next_flush = 0;
  804. cpu_state->next_seq_num = 0;
  805. cpu_state->flusher_engaged = false;
  806. INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
  807. cpu_state->cpu = cpu;
  808. cpu_state->alg_state = &sha256_mb_alg_state;
  809. cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr),
  810. GFP_KERNEL);
  811. if (!cpu_state->mgr)
  812. goto err2;
  813. sha256_ctx_mgr_init(cpu_state->mgr);
  814. INIT_LIST_HEAD(&cpu_state->work_list);
  815. spin_lock_init(&cpu_state->work_lock);
  816. }
  817. sha256_mb_alg_state.flusher = &sha256_mb_flusher;
  818. err = crypto_register_ahash(&sha256_mb_areq_alg);
  819. if (err)
  820. goto err2;
  821. err = crypto_register_ahash(&sha256_mb_async_alg);
  822. if (err)
  823. goto err1;
  824. return 0;
  825. err1:
  826. crypto_unregister_ahash(&sha256_mb_areq_alg);
  827. err2:
  828. for_each_possible_cpu(cpu) {
  829. cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
  830. kfree(cpu_state->mgr);
  831. }
  832. free_percpu(sha256_mb_alg_state.alg_cstate);
  833. return -ENODEV;
  834. }
  835. static void __exit sha256_mb_mod_fini(void)
  836. {
  837. int cpu;
  838. struct mcryptd_alg_cstate *cpu_state;
  839. crypto_unregister_ahash(&sha256_mb_async_alg);
  840. crypto_unregister_ahash(&sha256_mb_areq_alg);
  841. for_each_possible_cpu(cpu) {
  842. cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
  843. kfree(cpu_state->mgr);
  844. }
  845. free_percpu(sha256_mb_alg_state.alg_cstate);
  846. }
  847. module_init(sha256_mb_mod_init);
  848. module_exit(sha256_mb_mod_fini);
  849. MODULE_LICENSE("GPL");
  850. MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated");
  851. MODULE_ALIAS_CRYPTO("sha256");