crypto_engine.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494
  1. /*
  2. * Handle async block request by crypto hardware engine.
  3. *
  4. * Copyright (C) 2016 Linaro, Inc.
  5. *
  6. * Author: Baolin Wang <baolin.wang@linaro.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <linux/err.h>
  15. #include <linux/delay.h>
  16. #include <crypto/engine.h>
  17. #include <uapi/linux/sched/types.h>
  18. #include "internal.h"
  19. #define CRYPTO_ENGINE_MAX_QLEN 10
  20. /**
  21. * crypto_finalize_request - finalize one request if the request is done
  22. * @engine: the hardware engine
  23. * @req: the request need to be finalized
  24. * @err: error number
  25. */
  26. static void crypto_finalize_request(struct crypto_engine *engine,
  27. struct crypto_async_request *req, int err)
  28. {
  29. unsigned long flags;
  30. bool finalize_cur_req = false;
  31. int ret;
  32. struct crypto_engine_ctx *enginectx;
  33. spin_lock_irqsave(&engine->queue_lock, flags);
  34. if (engine->cur_req == req)
  35. finalize_cur_req = true;
  36. spin_unlock_irqrestore(&engine->queue_lock, flags);
  37. if (finalize_cur_req) {
  38. enginectx = crypto_tfm_ctx(req->tfm);
  39. if (engine->cur_req_prepared &&
  40. enginectx->op.unprepare_request) {
  41. ret = enginectx->op.unprepare_request(engine, req);
  42. if (ret)
  43. dev_err(engine->dev, "failed to unprepare request\n");
  44. }
  45. spin_lock_irqsave(&engine->queue_lock, flags);
  46. engine->cur_req = NULL;
  47. engine->cur_req_prepared = false;
  48. spin_unlock_irqrestore(&engine->queue_lock, flags);
  49. }
  50. req->complete(req, err);
  51. kthread_queue_work(engine->kworker, &engine->pump_requests);
  52. }
  53. /**
  54. * crypto_pump_requests - dequeue one request from engine queue to process
  55. * @engine: the hardware engine
  56. * @in_kthread: true if we are in the context of the request pump thread
  57. *
  58. * This function checks if there is any request in the engine queue that
  59. * needs processing and if so call out to the driver to initialize hardware
  60. * and handle each request.
  61. */
  62. static void crypto_pump_requests(struct crypto_engine *engine,
  63. bool in_kthread)
  64. {
  65. struct crypto_async_request *async_req, *backlog;
  66. unsigned long flags;
  67. bool was_busy = false;
  68. int ret;
  69. struct crypto_engine_ctx *enginectx;
  70. spin_lock_irqsave(&engine->queue_lock, flags);
  71. /* Make sure we are not already running a request */
  72. if (engine->cur_req)
  73. goto out;
  74. /* If another context is idling then defer */
  75. if (engine->idling) {
  76. kthread_queue_work(engine->kworker, &engine->pump_requests);
  77. goto out;
  78. }
  79. /* Check if the engine queue is idle */
  80. if (!crypto_queue_len(&engine->queue) || !engine->running) {
  81. if (!engine->busy)
  82. goto out;
  83. /* Only do teardown in the thread */
  84. if (!in_kthread) {
  85. kthread_queue_work(engine->kworker,
  86. &engine->pump_requests);
  87. goto out;
  88. }
  89. engine->busy = false;
  90. engine->idling = true;
  91. spin_unlock_irqrestore(&engine->queue_lock, flags);
  92. if (engine->unprepare_crypt_hardware &&
  93. engine->unprepare_crypt_hardware(engine))
  94. dev_err(engine->dev, "failed to unprepare crypt hardware\n");
  95. spin_lock_irqsave(&engine->queue_lock, flags);
  96. engine->idling = false;
  97. goto out;
  98. }
  99. /* Get the fist request from the engine queue to handle */
  100. backlog = crypto_get_backlog(&engine->queue);
  101. async_req = crypto_dequeue_request(&engine->queue);
  102. if (!async_req)
  103. goto out;
  104. engine->cur_req = async_req;
  105. if (backlog)
  106. backlog->complete(backlog, -EINPROGRESS);
  107. if (engine->busy)
  108. was_busy = true;
  109. else
  110. engine->busy = true;
  111. spin_unlock_irqrestore(&engine->queue_lock, flags);
  112. /* Until here we get the request need to be encrypted successfully */
  113. if (!was_busy && engine->prepare_crypt_hardware) {
  114. ret = engine->prepare_crypt_hardware(engine);
  115. if (ret) {
  116. dev_err(engine->dev, "failed to prepare crypt hardware\n");
  117. goto req_err;
  118. }
  119. }
  120. enginectx = crypto_tfm_ctx(async_req->tfm);
  121. if (enginectx->op.prepare_request) {
  122. ret = enginectx->op.prepare_request(engine, async_req);
  123. if (ret) {
  124. dev_err(engine->dev, "failed to prepare request: %d\n",
  125. ret);
  126. goto req_err;
  127. }
  128. engine->cur_req_prepared = true;
  129. }
  130. if (!enginectx->op.do_one_request) {
  131. dev_err(engine->dev, "failed to do request\n");
  132. ret = -EINVAL;
  133. goto req_err;
  134. }
  135. ret = enginectx->op.do_one_request(engine, async_req);
  136. if (ret) {
  137. dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
  138. goto req_err;
  139. }
  140. return;
  141. req_err:
  142. crypto_finalize_request(engine, async_req, ret);
  143. return;
  144. out:
  145. spin_unlock_irqrestore(&engine->queue_lock, flags);
  146. }
  147. static void crypto_pump_work(struct kthread_work *work)
  148. {
  149. struct crypto_engine *engine =
  150. container_of(work, struct crypto_engine, pump_requests);
  151. crypto_pump_requests(engine, true);
  152. }
  153. /**
  154. * crypto_transfer_request - transfer the new request into the engine queue
  155. * @engine: the hardware engine
  156. * @req: the request need to be listed into the engine queue
  157. */
  158. static int crypto_transfer_request(struct crypto_engine *engine,
  159. struct crypto_async_request *req,
  160. bool need_pump)
  161. {
  162. unsigned long flags;
  163. int ret;
  164. spin_lock_irqsave(&engine->queue_lock, flags);
  165. if (!engine->running) {
  166. spin_unlock_irqrestore(&engine->queue_lock, flags);
  167. return -ESHUTDOWN;
  168. }
  169. ret = crypto_enqueue_request(&engine->queue, req);
  170. if (!engine->busy && need_pump)
  171. kthread_queue_work(engine->kworker, &engine->pump_requests);
  172. spin_unlock_irqrestore(&engine->queue_lock, flags);
  173. return ret;
  174. }
  175. /**
  176. * crypto_transfer_request_to_engine - transfer one request to list
  177. * into the engine queue
  178. * @engine: the hardware engine
  179. * @req: the request need to be listed into the engine queue
  180. */
  181. static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
  182. struct crypto_async_request *req)
  183. {
  184. return crypto_transfer_request(engine, req, true);
  185. }
  186. /**
  187. * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
  188. * to list into the engine queue
  189. * @engine: the hardware engine
  190. * @req: the request need to be listed into the engine queue
  191. * TODO: Remove this function when skcipher conversion is finished
  192. */
  193. int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
  194. struct ablkcipher_request *req)
  195. {
  196. return crypto_transfer_request_to_engine(engine, &req->base);
  197. }
  198. EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
  199. /**
  200. * crypto_transfer_aead_request_to_engine - transfer one aead_request
  201. * to list into the engine queue
  202. * @engine: the hardware engine
  203. * @req: the request need to be listed into the engine queue
  204. */
  205. int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
  206. struct aead_request *req)
  207. {
  208. return crypto_transfer_request_to_engine(engine, &req->base);
  209. }
  210. EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
  211. /**
  212. * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
  213. * to list into the engine queue
  214. * @engine: the hardware engine
  215. * @req: the request need to be listed into the engine queue
  216. */
  217. int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
  218. struct akcipher_request *req)
  219. {
  220. return crypto_transfer_request_to_engine(engine, &req->base);
  221. }
  222. EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
  223. /**
  224. * crypto_transfer_hash_request_to_engine - transfer one ahash_request
  225. * to list into the engine queue
  226. * @engine: the hardware engine
  227. * @req: the request need to be listed into the engine queue
  228. */
  229. int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
  230. struct ahash_request *req)
  231. {
  232. return crypto_transfer_request_to_engine(engine, &req->base);
  233. }
  234. EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
  235. /**
  236. * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
  237. * to list into the engine queue
  238. * @engine: the hardware engine
  239. * @req: the request need to be listed into the engine queue
  240. */
  241. int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
  242. struct skcipher_request *req)
  243. {
  244. return crypto_transfer_request_to_engine(engine, &req->base);
  245. }
  246. EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
  247. /**
  248. * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
  249. * the request is done
  250. * @engine: the hardware engine
  251. * @req: the request need to be finalized
  252. * @err: error number
  253. * TODO: Remove this function when skcipher conversion is finished
  254. */
  255. void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
  256. struct ablkcipher_request *req, int err)
  257. {
  258. return crypto_finalize_request(engine, &req->base, err);
  259. }
  260. EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
  261. /**
  262. * crypto_finalize_aead_request - finalize one aead_request if
  263. * the request is done
  264. * @engine: the hardware engine
  265. * @req: the request need to be finalized
  266. * @err: error number
  267. */
  268. void crypto_finalize_aead_request(struct crypto_engine *engine,
  269. struct aead_request *req, int err)
  270. {
  271. return crypto_finalize_request(engine, &req->base, err);
  272. }
  273. EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
  274. /**
  275. * crypto_finalize_akcipher_request - finalize one akcipher_request if
  276. * the request is done
  277. * @engine: the hardware engine
  278. * @req: the request need to be finalized
  279. * @err: error number
  280. */
  281. void crypto_finalize_akcipher_request(struct crypto_engine *engine,
  282. struct akcipher_request *req, int err)
  283. {
  284. return crypto_finalize_request(engine, &req->base, err);
  285. }
  286. EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
  287. /**
  288. * crypto_finalize_hash_request - finalize one ahash_request if
  289. * the request is done
  290. * @engine: the hardware engine
  291. * @req: the request need to be finalized
  292. * @err: error number
  293. */
  294. void crypto_finalize_hash_request(struct crypto_engine *engine,
  295. struct ahash_request *req, int err)
  296. {
  297. return crypto_finalize_request(engine, &req->base, err);
  298. }
  299. EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
  300. /**
  301. * crypto_finalize_skcipher_request - finalize one skcipher_request if
  302. * the request is done
  303. * @engine: the hardware engine
  304. * @req: the request need to be finalized
  305. * @err: error number
  306. */
  307. void crypto_finalize_skcipher_request(struct crypto_engine *engine,
  308. struct skcipher_request *req, int err)
  309. {
  310. return crypto_finalize_request(engine, &req->base, err);
  311. }
  312. EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
  313. /**
  314. * crypto_engine_start - start the hardware engine
  315. * @engine: the hardware engine need to be started
  316. *
  317. * Return 0 on success, else on fail.
  318. */
  319. int crypto_engine_start(struct crypto_engine *engine)
  320. {
  321. unsigned long flags;
  322. spin_lock_irqsave(&engine->queue_lock, flags);
  323. if (engine->running || engine->busy) {
  324. spin_unlock_irqrestore(&engine->queue_lock, flags);
  325. return -EBUSY;
  326. }
  327. engine->running = true;
  328. spin_unlock_irqrestore(&engine->queue_lock, flags);
  329. kthread_queue_work(engine->kworker, &engine->pump_requests);
  330. return 0;
  331. }
  332. EXPORT_SYMBOL_GPL(crypto_engine_start);
  333. /**
  334. * crypto_engine_stop - stop the hardware engine
  335. * @engine: the hardware engine need to be stopped
  336. *
  337. * Return 0 on success, else on fail.
  338. */
  339. int crypto_engine_stop(struct crypto_engine *engine)
  340. {
  341. unsigned long flags;
  342. unsigned int limit = 500;
  343. int ret = 0;
  344. spin_lock_irqsave(&engine->queue_lock, flags);
  345. /*
  346. * If the engine queue is not empty or the engine is on busy state,
  347. * we need to wait for a while to pump the requests of engine queue.
  348. */
  349. while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
  350. spin_unlock_irqrestore(&engine->queue_lock, flags);
  351. msleep(20);
  352. spin_lock_irqsave(&engine->queue_lock, flags);
  353. }
  354. if (crypto_queue_len(&engine->queue) || engine->busy)
  355. ret = -EBUSY;
  356. else
  357. engine->running = false;
  358. spin_unlock_irqrestore(&engine->queue_lock, flags);
  359. if (ret)
  360. dev_warn(engine->dev, "could not stop engine\n");
  361. return ret;
  362. }
  363. EXPORT_SYMBOL_GPL(crypto_engine_stop);
  364. /**
  365. * crypto_engine_alloc_init - allocate crypto hardware engine structure and
  366. * initialize it.
  367. * @dev: the device attached with one hardware engine
  368. * @rt: whether this queue is set to run as a realtime task
  369. *
  370. * This must be called from context that can sleep.
  371. * Return: the crypto engine structure on success, else NULL.
  372. */
  373. struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
  374. {
  375. struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
  376. struct crypto_engine *engine;
  377. if (!dev)
  378. return NULL;
  379. engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
  380. if (!engine)
  381. return NULL;
  382. engine->dev = dev;
  383. engine->rt = rt;
  384. engine->running = false;
  385. engine->busy = false;
  386. engine->idling = false;
  387. engine->cur_req_prepared = false;
  388. engine->priv_data = dev;
  389. snprintf(engine->name, sizeof(engine->name),
  390. "%s-engine", dev_name(dev));
  391. crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
  392. spin_lock_init(&engine->queue_lock);
  393. engine->kworker = kthread_create_worker(0, "%s", engine->name);
  394. if (IS_ERR(engine->kworker)) {
  395. dev_err(dev, "failed to create crypto request pump task\n");
  396. return NULL;
  397. }
  398. kthread_init_work(&engine->pump_requests, crypto_pump_work);
  399. if (engine->rt) {
  400. dev_info(dev, "will run requests pump with realtime priority\n");
  401. sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
  402. }
  403. return engine;
  404. }
  405. EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
  406. /**
  407. * crypto_engine_exit - free the resources of hardware engine when exit
  408. * @engine: the hardware engine need to be freed
  409. *
  410. * Return 0 for success.
  411. */
  412. int crypto_engine_exit(struct crypto_engine *engine)
  413. {
  414. int ret;
  415. ret = crypto_engine_stop(engine);
  416. if (ret)
  417. return ret;
  418. kthread_destroy_worker(engine->kworker);
  419. return 0;
  420. }
  421. EXPORT_SYMBOL_GPL(crypto_engine_exit);
  422. MODULE_LICENSE("GPL");
  423. MODULE_DESCRIPTION("Crypto hardware engine framework");