sha1_mb.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012
  1. /*
  2. * Multi buffer SHA1 algorithm Glue Code
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * Copyright(c) 2014 Intel Corporation.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of version 2 of the GNU General Public License as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * Contact Information:
  21. * Tim Chen <tim.c.chen@linux.intel.com>
  22. *
  23. * BSD LICENSE
  24. *
  25. * Copyright(c) 2014 Intel Corporation.
  26. *
  27. * Redistribution and use in source and binary forms, with or without
  28. * modification, are permitted provided that the following conditions
  29. * are met:
  30. *
  31. * * Redistributions of source code must retain the above copyright
  32. * notice, this list of conditions and the following disclaimer.
  33. * * Redistributions in binary form must reproduce the above copyright
  34. * notice, this list of conditions and the following disclaimer in
  35. * the documentation and/or other materials provided with the
  36. * distribution.
  37. * * Neither the name of Intel Corporation nor the names of its
  38. * contributors may be used to endorse or promote products derived
  39. * from this software without specific prior written permission.
  40. *
  41. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  42. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  43. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  44. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  45. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  46. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  47. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  48. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  49. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  50. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  51. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  52. */
  53. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  54. #include <crypto/internal/hash.h>
  55. #include <linux/init.h>
  56. #include <linux/module.h>
  57. #include <linux/mm.h>
  58. #include <linux/cryptohash.h>
  59. #include <linux/types.h>
  60. #include <linux/list.h>
  61. #include <crypto/scatterwalk.h>
  62. #include <crypto/sha.h>
  63. #include <crypto/mcryptd.h>
  64. #include <crypto/crypto_wq.h>
  65. #include <asm/byteorder.h>
  66. #include <linux/hardirq.h>
  67. #include <asm/fpu/api.h>
  68. #include "sha1_mb_ctx.h"
  69. #define FLUSH_INTERVAL 1000 /* in usec */
  70. static struct mcryptd_alg_state sha1_mb_alg_state;
  71. struct sha1_mb_ctx {
  72. struct mcryptd_ahash *mcryptd_tfm;
  73. };
  74. static inline struct mcryptd_hash_request_ctx
  75. *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
  76. {
  77. struct ahash_request *areq;
  78. areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
  79. return container_of(areq, struct mcryptd_hash_request_ctx, areq);
  80. }
  81. static inline struct ahash_request
  82. *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
  83. {
  84. return container_of((void *) ctx, struct ahash_request, __ctx);
  85. }
  86. static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
  87. struct ahash_request *areq)
  88. {
  89. rctx->flag = HASH_UPDATE;
  90. }
  91. static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
  92. static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)
  93. (struct sha1_mb_mgr *state, struct job_sha1 *job);
  94. static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
  95. (struct sha1_mb_mgr *state);
  96. static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
  97. (struct sha1_mb_mgr *state);
  98. static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
  99. uint64_t total_len)
  100. {
  101. uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
  102. memset(&padblock[i], 0, SHA1_BLOCK_SIZE);
  103. padblock[i] = 0x80;
  104. i += ((SHA1_BLOCK_SIZE - 1) &
  105. (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1)))
  106. + 1 + SHA1_PADLENGTHFIELD_SIZE;
  107. #if SHA1_PADLENGTHFIELD_SIZE == 16
  108. *((uint64_t *) &padblock[i - 16]) = 0;
  109. #endif
  110. *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
  111. /* Number of extra blocks to hash */
  112. return i >> SHA1_LOG2_BLOCK_SIZE;
  113. }
  114. static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr,
  115. struct sha1_hash_ctx *ctx)
  116. {
  117. while (ctx) {
  118. if (ctx->status & HASH_CTX_STS_COMPLETE) {
  119. /* Clear PROCESSING bit */
  120. ctx->status = HASH_CTX_STS_COMPLETE;
  121. return ctx;
  122. }
  123. /*
  124. * If the extra blocks are empty, begin hashing what remains
  125. * in the user's buffer.
  126. */
  127. if (ctx->partial_block_buffer_length == 0 &&
  128. ctx->incoming_buffer_length) {
  129. const void *buffer = ctx->incoming_buffer;
  130. uint32_t len = ctx->incoming_buffer_length;
  131. uint32_t copy_len;
  132. /*
  133. * Only entire blocks can be hashed.
  134. * Copy remainder to extra blocks buffer.
  135. */
  136. copy_len = len & (SHA1_BLOCK_SIZE-1);
  137. if (copy_len) {
  138. len -= copy_len;
  139. memcpy(ctx->partial_block_buffer,
  140. ((const char *) buffer + len),
  141. copy_len);
  142. ctx->partial_block_buffer_length = copy_len;
  143. }
  144. ctx->incoming_buffer_length = 0;
  145. /* len should be a multiple of the block size now */
  146. assert((len % SHA1_BLOCK_SIZE) == 0);
  147. /* Set len to the number of blocks to be hashed */
  148. len >>= SHA1_LOG2_BLOCK_SIZE;
  149. if (len) {
  150. ctx->job.buffer = (uint8_t *) buffer;
  151. ctx->job.len = len;
  152. ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr,
  153. &ctx->job);
  154. continue;
  155. }
  156. }
  157. /*
  158. * If the extra blocks are not empty, then we are
  159. * either on the last block(s) or we need more
  160. * user input before continuing.
  161. */
  162. if (ctx->status & HASH_CTX_STS_LAST) {
  163. uint8_t *buf = ctx->partial_block_buffer;
  164. uint32_t n_extra_blocks =
  165. sha1_pad(buf, ctx->total_length);
  166. ctx->status = (HASH_CTX_STS_PROCESSING |
  167. HASH_CTX_STS_COMPLETE);
  168. ctx->job.buffer = buf;
  169. ctx->job.len = (uint32_t) n_extra_blocks;
  170. ctx = (struct sha1_hash_ctx *)
  171. sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
  172. continue;
  173. }
  174. ctx->status = HASH_CTX_STS_IDLE;
  175. return ctx;
  176. }
  177. return NULL;
  178. }
  179. static struct sha1_hash_ctx
  180. *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
  181. {
  182. /*
  183. * If get_comp_job returns NULL, there are no jobs complete.
  184. * If get_comp_job returns a job, verify that it is safe to return to
  185. * the user.
  186. * If it is not ready, resubmit the job to finish processing.
  187. * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
  188. * Otherwise, all jobs currently being managed by the hash_ctx_mgr
  189. * still need processing.
  190. */
  191. struct sha1_hash_ctx *ctx;
  192. ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr);
  193. return sha1_ctx_mgr_resubmit(mgr, ctx);
  194. }
  195. static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr)
  196. {
  197. sha1_job_mgr_init(&mgr->mgr);
  198. }
  199. static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
  200. struct sha1_hash_ctx *ctx,
  201. const void *buffer,
  202. uint32_t len,
  203. int flags)
  204. {
  205. if (flags & ~(HASH_UPDATE | HASH_LAST)) {
  206. /* User should not pass anything other than UPDATE or LAST */
  207. ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
  208. return ctx;
  209. }
  210. if (ctx->status & HASH_CTX_STS_PROCESSING) {
  211. /* Cannot submit to a currently processing job. */
  212. ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
  213. return ctx;
  214. }
  215. if (ctx->status & HASH_CTX_STS_COMPLETE) {
  216. /* Cannot update a finished job. */
  217. ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
  218. return ctx;
  219. }
  220. /*
  221. * If we made it here, there were no errors during this call to
  222. * submit
  223. */
  224. ctx->error = HASH_CTX_ERROR_NONE;
  225. /* Store buffer ptr info from user */
  226. ctx->incoming_buffer = buffer;
  227. ctx->incoming_buffer_length = len;
  228. /*
  229. * Store the user's request flags and mark this ctx as currently
  230. * being processed.
  231. */
  232. ctx->status = (flags & HASH_LAST) ?
  233. (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
  234. HASH_CTX_STS_PROCESSING;
  235. /* Advance byte counter */
  236. ctx->total_length += len;
  237. /*
  238. * If there is anything currently buffered in the extra blocks,
  239. * append to it until it contains a whole block.
  240. * Or if the user's buffer contains less than a whole block,
  241. * append as much as possible to the extra block.
  242. */
  243. if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) {
  244. /*
  245. * Compute how many bytes to copy from user buffer into
  246. * extra block
  247. */
  248. uint32_t copy_len = SHA1_BLOCK_SIZE -
  249. ctx->partial_block_buffer_length;
  250. if (len < copy_len)
  251. copy_len = len;
  252. if (copy_len) {
  253. /* Copy and update relevant pointers and counters */
  254. memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
  255. buffer, copy_len);
  256. ctx->partial_block_buffer_length += copy_len;
  257. ctx->incoming_buffer = (const void *)
  258. ((const char *)buffer + copy_len);
  259. ctx->incoming_buffer_length = len - copy_len;
  260. }
  261. /*
  262. * The extra block should never contain more than 1 block
  263. * here
  264. */
  265. assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
  266. /*
  267. * If the extra block buffer contains exactly 1 block, it can
  268. * be hashed.
  269. */
  270. if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
  271. ctx->partial_block_buffer_length = 0;
  272. ctx->job.buffer = ctx->partial_block_buffer;
  273. ctx->job.len = 1;
  274. ctx = (struct sha1_hash_ctx *)
  275. sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
  276. }
  277. }
  278. return sha1_ctx_mgr_resubmit(mgr, ctx);
  279. }
  280. static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
  281. {
  282. struct sha1_hash_ctx *ctx;
  283. while (1) {
  284. ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr);
  285. /* If flush returned 0, there are no more jobs in flight. */
  286. if (!ctx)
  287. return NULL;
  288. /*
  289. * If flush returned a job, resubmit the job to finish
  290. * processing.
  291. */
  292. ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
  293. /*
  294. * If sha1_ctx_mgr_resubmit returned a job, it is ready to be
  295. * returned. Otherwise, all jobs currently being managed by the
  296. * sha1_ctx_mgr still need processing. Loop.
  297. */
  298. if (ctx)
  299. return ctx;
  300. }
  301. }
  302. static int sha1_mb_init(struct ahash_request *areq)
  303. {
  304. struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
  305. hash_ctx_init(sctx);
  306. sctx->job.result_digest[0] = SHA1_H0;
  307. sctx->job.result_digest[1] = SHA1_H1;
  308. sctx->job.result_digest[2] = SHA1_H2;
  309. sctx->job.result_digest[3] = SHA1_H3;
  310. sctx->job.result_digest[4] = SHA1_H4;
  311. sctx->total_length = 0;
  312. sctx->partial_block_buffer_length = 0;
  313. sctx->status = HASH_CTX_STS_IDLE;
  314. return 0;
  315. }
  316. static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
  317. {
  318. int i;
  319. struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
  320. __be32 *dst = (__be32 *) rctx->out;
  321. for (i = 0; i < 5; ++i)
  322. dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
  323. return 0;
  324. }
  325. static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
  326. struct mcryptd_alg_cstate *cstate, bool flush)
  327. {
  328. int flag = HASH_UPDATE;
  329. int nbytes, err = 0;
  330. struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
  331. struct sha1_hash_ctx *sha_ctx;
  332. /* more work ? */
  333. while (!(rctx->flag & HASH_DONE)) {
  334. nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
  335. if (nbytes < 0) {
  336. err = nbytes;
  337. goto out;
  338. }
  339. /* check if the walk is done */
  340. if (crypto_ahash_walk_last(&rctx->walk)) {
  341. rctx->flag |= HASH_DONE;
  342. if (rctx->flag & HASH_FINAL)
  343. flag |= HASH_LAST;
  344. }
  345. sha_ctx = (struct sha1_hash_ctx *)
  346. ahash_request_ctx(&rctx->areq);
  347. kernel_fpu_begin();
  348. sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
  349. rctx->walk.data, nbytes, flag);
  350. if (!sha_ctx) {
  351. if (flush)
  352. sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
  353. }
  354. kernel_fpu_end();
  355. if (sha_ctx)
  356. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  357. else {
  358. rctx = NULL;
  359. goto out;
  360. }
  361. }
  362. /* copy the results */
  363. if (rctx->flag & HASH_FINAL)
  364. sha1_mb_set_results(rctx);
  365. out:
  366. *ret_rctx = rctx;
  367. return err;
  368. }
  369. static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
  370. struct mcryptd_alg_cstate *cstate,
  371. int err)
  372. {
  373. struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
  374. struct sha1_hash_ctx *sha_ctx;
  375. struct mcryptd_hash_request_ctx *req_ctx;
  376. int ret;
  377. /* remove from work list */
  378. spin_lock(&cstate->work_lock);
  379. list_del(&rctx->waiter);
  380. spin_unlock(&cstate->work_lock);
  381. if (irqs_disabled())
  382. rctx->complete(&req->base, err);
  383. else {
  384. local_bh_disable();
  385. rctx->complete(&req->base, err);
  386. local_bh_enable();
  387. }
  388. /* check to see if there are other jobs that are done */
  389. sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
  390. while (sha_ctx) {
  391. req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  392. ret = sha_finish_walk(&req_ctx, cstate, false);
  393. if (req_ctx) {
  394. spin_lock(&cstate->work_lock);
  395. list_del(&req_ctx->waiter);
  396. spin_unlock(&cstate->work_lock);
  397. req = cast_mcryptd_ctx_to_req(req_ctx);
  398. if (irqs_disabled())
  399. req_ctx->complete(&req->base, ret);
  400. else {
  401. local_bh_disable();
  402. req_ctx->complete(&req->base, ret);
  403. local_bh_enable();
  404. }
  405. }
  406. sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
  407. }
  408. return 0;
  409. }
  410. static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
  411. struct mcryptd_alg_cstate *cstate)
  412. {
  413. unsigned long next_flush;
  414. unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
  415. /* initialize tag */
  416. rctx->tag.arrival = jiffies; /* tag the arrival time */
  417. rctx->tag.seq_num = cstate->next_seq_num++;
  418. next_flush = rctx->tag.arrival + delay;
  419. rctx->tag.expire = next_flush;
  420. spin_lock(&cstate->work_lock);
  421. list_add_tail(&rctx->waiter, &cstate->work_list);
  422. spin_unlock(&cstate->work_lock);
  423. mcryptd_arm_flusher(cstate, delay);
  424. }
  425. static int sha1_mb_update(struct ahash_request *areq)
  426. {
  427. struct mcryptd_hash_request_ctx *rctx =
  428. container_of(areq, struct mcryptd_hash_request_ctx, areq);
  429. struct mcryptd_alg_cstate *cstate =
  430. this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
  431. struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
  432. struct sha1_hash_ctx *sha_ctx;
  433. int ret = 0, nbytes;
  434. /* sanity check */
  435. if (rctx->tag.cpu != smp_processor_id()) {
  436. pr_err("mcryptd error: cpu clash\n");
  437. goto done;
  438. }
  439. /* need to init context */
  440. req_ctx_init(rctx, areq);
  441. nbytes = crypto_ahash_walk_first(req, &rctx->walk);
  442. if (nbytes < 0) {
  443. ret = nbytes;
  444. goto done;
  445. }
  446. if (crypto_ahash_walk_last(&rctx->walk))
  447. rctx->flag |= HASH_DONE;
  448. /* submit */
  449. sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
  450. sha1_mb_add_list(rctx, cstate);
  451. kernel_fpu_begin();
  452. sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
  453. nbytes, HASH_UPDATE);
  454. kernel_fpu_end();
  455. /* check if anything is returned */
  456. if (!sha_ctx)
  457. return -EINPROGRESS;
  458. if (sha_ctx->error) {
  459. ret = sha_ctx->error;
  460. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  461. goto done;
  462. }
  463. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  464. ret = sha_finish_walk(&rctx, cstate, false);
  465. if (!rctx)
  466. return -EINPROGRESS;
  467. done:
  468. sha_complete_job(rctx, cstate, ret);
  469. return ret;
  470. }
  471. static int sha1_mb_finup(struct ahash_request *areq)
  472. {
  473. struct mcryptd_hash_request_ctx *rctx =
  474. container_of(areq, struct mcryptd_hash_request_ctx, areq);
  475. struct mcryptd_alg_cstate *cstate =
  476. this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
  477. struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
  478. struct sha1_hash_ctx *sha_ctx;
  479. int ret = 0, flag = HASH_UPDATE, nbytes;
  480. /* sanity check */
  481. if (rctx->tag.cpu != smp_processor_id()) {
  482. pr_err("mcryptd error: cpu clash\n");
  483. goto done;
  484. }
  485. /* need to init context */
  486. req_ctx_init(rctx, areq);
  487. nbytes = crypto_ahash_walk_first(req, &rctx->walk);
  488. if (nbytes < 0) {
  489. ret = nbytes;
  490. goto done;
  491. }
  492. if (crypto_ahash_walk_last(&rctx->walk)) {
  493. rctx->flag |= HASH_DONE;
  494. flag = HASH_LAST;
  495. }
  496. /* submit */
  497. rctx->flag |= HASH_FINAL;
  498. sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
  499. sha1_mb_add_list(rctx, cstate);
  500. kernel_fpu_begin();
  501. sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
  502. nbytes, flag);
  503. kernel_fpu_end();
  504. /* check if anything is returned */
  505. if (!sha_ctx)
  506. return -EINPROGRESS;
  507. if (sha_ctx->error) {
  508. ret = sha_ctx->error;
  509. goto done;
  510. }
  511. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  512. ret = sha_finish_walk(&rctx, cstate, false);
  513. if (!rctx)
  514. return -EINPROGRESS;
  515. done:
  516. sha_complete_job(rctx, cstate, ret);
  517. return ret;
  518. }
  519. static int sha1_mb_final(struct ahash_request *areq)
  520. {
  521. struct mcryptd_hash_request_ctx *rctx =
  522. container_of(areq, struct mcryptd_hash_request_ctx, areq);
  523. struct mcryptd_alg_cstate *cstate =
  524. this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
  525. struct sha1_hash_ctx *sha_ctx;
  526. int ret = 0;
  527. u8 data;
  528. /* sanity check */
  529. if (rctx->tag.cpu != smp_processor_id()) {
  530. pr_err("mcryptd error: cpu clash\n");
  531. goto done;
  532. }
  533. /* need to init context */
  534. req_ctx_init(rctx, areq);
  535. rctx->flag |= HASH_DONE | HASH_FINAL;
  536. sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
  537. /* flag HASH_FINAL and 0 data size */
  538. sha1_mb_add_list(rctx, cstate);
  539. kernel_fpu_begin();
  540. sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
  541. HASH_LAST);
  542. kernel_fpu_end();
  543. /* check if anything is returned */
  544. if (!sha_ctx)
  545. return -EINPROGRESS;
  546. if (sha_ctx->error) {
  547. ret = sha_ctx->error;
  548. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  549. goto done;
  550. }
  551. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  552. ret = sha_finish_walk(&rctx, cstate, false);
  553. if (!rctx)
  554. return -EINPROGRESS;
  555. done:
  556. sha_complete_job(rctx, cstate, ret);
  557. return ret;
  558. }
  559. static int sha1_mb_export(struct ahash_request *areq, void *out)
  560. {
  561. struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
  562. memcpy(out, sctx, sizeof(*sctx));
  563. return 0;
  564. }
  565. static int sha1_mb_import(struct ahash_request *areq, const void *in)
  566. {
  567. struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
  568. memcpy(sctx, in, sizeof(*sctx));
  569. return 0;
  570. }
  571. static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
  572. {
  573. struct mcryptd_ahash *mcryptd_tfm;
  574. struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
  575. struct mcryptd_hash_ctx *mctx;
  576. mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
  577. CRYPTO_ALG_INTERNAL,
  578. CRYPTO_ALG_INTERNAL);
  579. if (IS_ERR(mcryptd_tfm))
  580. return PTR_ERR(mcryptd_tfm);
  581. mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
  582. mctx->alg_state = &sha1_mb_alg_state;
  583. ctx->mcryptd_tfm = mcryptd_tfm;
  584. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  585. sizeof(struct ahash_request) +
  586. crypto_ahash_reqsize(&mcryptd_tfm->base));
  587. return 0;
  588. }
  589. static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
  590. {
  591. struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
  592. mcryptd_free_ahash(ctx->mcryptd_tfm);
  593. }
  594. static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
  595. {
  596. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  597. sizeof(struct ahash_request) +
  598. sizeof(struct sha1_hash_ctx));
  599. return 0;
  600. }
  601. static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
  602. {
  603. struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
  604. mcryptd_free_ahash(ctx->mcryptd_tfm);
  605. }
  606. static struct ahash_alg sha1_mb_areq_alg = {
  607. .init = sha1_mb_init,
  608. .update = sha1_mb_update,
  609. .final = sha1_mb_final,
  610. .finup = sha1_mb_finup,
  611. .export = sha1_mb_export,
  612. .import = sha1_mb_import,
  613. .halg = {
  614. .digestsize = SHA1_DIGEST_SIZE,
  615. .statesize = sizeof(struct sha1_hash_ctx),
  616. .base = {
  617. .cra_name = "__sha1-mb",
  618. .cra_driver_name = "__intel_sha1-mb",
  619. .cra_priority = 100,
  620. /*
  621. * use ASYNC flag as some buffers in multi-buffer
  622. * algo may not have completed before hashing thread
  623. * sleep
  624. */
  625. .cra_flags = CRYPTO_ALG_ASYNC |
  626. CRYPTO_ALG_INTERNAL,
  627. .cra_blocksize = SHA1_BLOCK_SIZE,
  628. .cra_module = THIS_MODULE,
  629. .cra_list = LIST_HEAD_INIT
  630. (sha1_mb_areq_alg.halg.base.cra_list),
  631. .cra_init = sha1_mb_areq_init_tfm,
  632. .cra_exit = sha1_mb_areq_exit_tfm,
  633. .cra_ctxsize = sizeof(struct sha1_hash_ctx),
  634. }
  635. }
  636. };
  637. static int sha1_mb_async_init(struct ahash_request *req)
  638. {
  639. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  640. struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  641. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  642. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  643. memcpy(mcryptd_req, req, sizeof(*req));
  644. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  645. return crypto_ahash_init(mcryptd_req);
  646. }
  647. static int sha1_mb_async_update(struct ahash_request *req)
  648. {
  649. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  650. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  651. struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  652. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  653. memcpy(mcryptd_req, req, sizeof(*req));
  654. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  655. return crypto_ahash_update(mcryptd_req);
  656. }
  657. static int sha1_mb_async_finup(struct ahash_request *req)
  658. {
  659. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  660. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  661. struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  662. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  663. memcpy(mcryptd_req, req, sizeof(*req));
  664. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  665. return crypto_ahash_finup(mcryptd_req);
  666. }
  667. static int sha1_mb_async_final(struct ahash_request *req)
  668. {
  669. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  670. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  671. struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  672. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  673. memcpy(mcryptd_req, req, sizeof(*req));
  674. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  675. return crypto_ahash_final(mcryptd_req);
  676. }
  677. static int sha1_mb_async_digest(struct ahash_request *req)
  678. {
  679. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  680. struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  681. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  682. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  683. memcpy(mcryptd_req, req, sizeof(*req));
  684. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  685. return crypto_ahash_digest(mcryptd_req);
  686. }
  687. static int sha1_mb_async_export(struct ahash_request *req, void *out)
  688. {
  689. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  690. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  691. struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  692. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  693. memcpy(mcryptd_req, req, sizeof(*req));
  694. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  695. return crypto_ahash_export(mcryptd_req, out);
  696. }
  697. static int sha1_mb_async_import(struct ahash_request *req, const void *in)
  698. {
  699. struct ahash_request *mcryptd_req = ahash_request_ctx(req);
  700. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  701. struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
  702. struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
  703. struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
  704. struct mcryptd_hash_request_ctx *rctx;
  705. struct ahash_request *areq;
  706. memcpy(mcryptd_req, req, sizeof(*req));
  707. ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
  708. rctx = ahash_request_ctx(mcryptd_req);
  709. areq = &rctx->areq;
  710. ahash_request_set_tfm(areq, child);
  711. ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
  712. rctx->complete, req);
  713. return crypto_ahash_import(mcryptd_req, in);
  714. }
  715. static struct ahash_alg sha1_mb_async_alg = {
  716. .init = sha1_mb_async_init,
  717. .update = sha1_mb_async_update,
  718. .final = sha1_mb_async_final,
  719. .finup = sha1_mb_async_finup,
  720. .digest = sha1_mb_async_digest,
  721. .export = sha1_mb_async_export,
  722. .import = sha1_mb_async_import,
  723. .halg = {
  724. .digestsize = SHA1_DIGEST_SIZE,
  725. .statesize = sizeof(struct sha1_hash_ctx),
  726. .base = {
  727. .cra_name = "sha1",
  728. .cra_driver_name = "sha1_mb",
  729. /*
  730. * Low priority, since with few concurrent hash requests
  731. * this is extremely slow due to the flush delay. Users
  732. * whose workloads would benefit from this can request
  733. * it explicitly by driver name, or can increase its
  734. * priority at runtime using NETLINK_CRYPTO.
  735. */
  736. .cra_priority = 50,
  737. .cra_flags = CRYPTO_ALG_ASYNC,
  738. .cra_blocksize = SHA1_BLOCK_SIZE,
  739. .cra_module = THIS_MODULE,
  740. .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
  741. .cra_init = sha1_mb_async_init_tfm,
  742. .cra_exit = sha1_mb_async_exit_tfm,
  743. .cra_ctxsize = sizeof(struct sha1_mb_ctx),
  744. .cra_alignmask = 0,
  745. },
  746. },
  747. };
  748. static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
  749. {
  750. struct mcryptd_hash_request_ctx *rctx;
  751. unsigned long cur_time;
  752. unsigned long next_flush = 0;
  753. struct sha1_hash_ctx *sha_ctx;
  754. cur_time = jiffies;
  755. while (!list_empty(&cstate->work_list)) {
  756. rctx = list_entry(cstate->work_list.next,
  757. struct mcryptd_hash_request_ctx, waiter);
  758. if (time_before(cur_time, rctx->tag.expire))
  759. break;
  760. kernel_fpu_begin();
  761. sha_ctx = (struct sha1_hash_ctx *)
  762. sha1_ctx_mgr_flush(cstate->mgr);
  763. kernel_fpu_end();
  764. if (!sha_ctx) {
  765. pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
  766. break;
  767. }
  768. rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
  769. sha_finish_walk(&rctx, cstate, true);
  770. sha_complete_job(rctx, cstate, 0);
  771. }
  772. if (!list_empty(&cstate->work_list)) {
  773. rctx = list_entry(cstate->work_list.next,
  774. struct mcryptd_hash_request_ctx, waiter);
  775. /* get the hash context and then flush time */
  776. next_flush = rctx->tag.expire;
  777. mcryptd_arm_flusher(cstate, get_delay(next_flush));
  778. }
  779. return next_flush;
  780. }
  781. static int __init sha1_mb_mod_init(void)
  782. {
  783. int cpu;
  784. int err;
  785. struct mcryptd_alg_cstate *cpu_state;
  786. /* check for dependent cpu features */
  787. if (!boot_cpu_has(X86_FEATURE_AVX2) ||
  788. !boot_cpu_has(X86_FEATURE_BMI2))
  789. return -ENODEV;
  790. /* initialize multibuffer structures */
  791. sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate);
  792. sha1_job_mgr_init = sha1_mb_mgr_init_avx2;
  793. sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2;
  794. sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2;
  795. sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2;
  796. if (!sha1_mb_alg_state.alg_cstate)
  797. return -ENOMEM;
  798. for_each_possible_cpu(cpu) {
  799. cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
  800. cpu_state->next_flush = 0;
  801. cpu_state->next_seq_num = 0;
  802. cpu_state->flusher_engaged = false;
  803. INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
  804. cpu_state->cpu = cpu;
  805. cpu_state->alg_state = &sha1_mb_alg_state;
  806. cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr),
  807. GFP_KERNEL);
  808. if (!cpu_state->mgr)
  809. goto err2;
  810. sha1_ctx_mgr_init(cpu_state->mgr);
  811. INIT_LIST_HEAD(&cpu_state->work_list);
  812. spin_lock_init(&cpu_state->work_lock);
  813. }
  814. sha1_mb_alg_state.flusher = &sha1_mb_flusher;
  815. err = crypto_register_ahash(&sha1_mb_areq_alg);
  816. if (err)
  817. goto err2;
  818. err = crypto_register_ahash(&sha1_mb_async_alg);
  819. if (err)
  820. goto err1;
  821. return 0;
  822. err1:
  823. crypto_unregister_ahash(&sha1_mb_areq_alg);
  824. err2:
  825. for_each_possible_cpu(cpu) {
  826. cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
  827. kfree(cpu_state->mgr);
  828. }
  829. free_percpu(sha1_mb_alg_state.alg_cstate);
  830. return -ENODEV;
  831. }
  832. static void __exit sha1_mb_mod_fini(void)
  833. {
  834. int cpu;
  835. struct mcryptd_alg_cstate *cpu_state;
  836. crypto_unregister_ahash(&sha1_mb_async_alg);
  837. crypto_unregister_ahash(&sha1_mb_areq_alg);
  838. for_each_possible_cpu(cpu) {
  839. cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
  840. kfree(cpu_state->mgr);
  841. }
  842. free_percpu(sha1_mb_alg_state.alg_cstate);
  843. }
  844. module_init(sha1_mb_mod_init);
  845. module_exit(sha1_mb_mod_fini);
  846. MODULE_LICENSE("GPL");
  847. MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
  848. MODULE_ALIAS_CRYPTO("sha1");