crypto_engine.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. /*
  2. * Handle async block request by crypto hardware engine.
  3. *
  4. * Copyright (C) 2016 Linaro, Inc.
  5. *
  6. * Author: Baolin Wang <baolin.wang@linaro.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the Free
  10. * Software Foundation; either version 2 of the License, or (at your option)
  11. * any later version.
  12. *
  13. */
  14. #include <linux/err.h>
  15. #include <linux/delay.h>
  16. #include <crypto/engine.h>
  17. #include <crypto/internal/hash.h>
  18. #include "internal.h"
  19. #define CRYPTO_ENGINE_MAX_QLEN 10
  20. /**
  21. * crypto_pump_requests - dequeue one request from engine queue to process
  22. * @engine: the hardware engine
  23. * @in_kthread: true if we are in the context of the request pump thread
  24. *
  25. * This function checks if there is any request in the engine queue that
  26. * needs processing and if so call out to the driver to initialize hardware
  27. * and handle each request.
  28. */
  29. static void crypto_pump_requests(struct crypto_engine *engine,
  30. bool in_kthread)
  31. {
  32. struct crypto_async_request *async_req, *backlog;
  33. struct ahash_request *hreq;
  34. struct ablkcipher_request *breq;
  35. unsigned long flags;
  36. bool was_busy = false;
  37. int ret, rtype;
  38. spin_lock_irqsave(&engine->queue_lock, flags);
  39. /* Make sure we are not already running a request */
  40. if (engine->cur_req)
  41. goto out;
  42. /* If another context is idling then defer */
  43. if (engine->idling) {
  44. kthread_queue_work(&engine->kworker, &engine->pump_requests);
  45. goto out;
  46. }
  47. /* Check if the engine queue is idle */
  48. if (!crypto_queue_len(&engine->queue) || !engine->running) {
  49. if (!engine->busy)
  50. goto out;
  51. /* Only do teardown in the thread */
  52. if (!in_kthread) {
  53. kthread_queue_work(&engine->kworker,
  54. &engine->pump_requests);
  55. goto out;
  56. }
  57. engine->busy = false;
  58. engine->idling = true;
  59. spin_unlock_irqrestore(&engine->queue_lock, flags);
  60. if (engine->unprepare_crypt_hardware &&
  61. engine->unprepare_crypt_hardware(engine))
  62. pr_err("failed to unprepare crypt hardware\n");
  63. spin_lock_irqsave(&engine->queue_lock, flags);
  64. engine->idling = false;
  65. goto out;
  66. }
  67. /* Get the fist request from the engine queue to handle */
  68. backlog = crypto_get_backlog(&engine->queue);
  69. async_req = crypto_dequeue_request(&engine->queue);
  70. if (!async_req)
  71. goto out;
  72. engine->cur_req = async_req;
  73. if (backlog)
  74. backlog->complete(backlog, -EINPROGRESS);
  75. if (engine->busy)
  76. was_busy = true;
  77. else
  78. engine->busy = true;
  79. spin_unlock_irqrestore(&engine->queue_lock, flags);
  80. rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
  81. /* Until here we get the request need to be encrypted successfully */
  82. if (!was_busy && engine->prepare_crypt_hardware) {
  83. ret = engine->prepare_crypt_hardware(engine);
  84. if (ret) {
  85. pr_err("failed to prepare crypt hardware\n");
  86. goto req_err;
  87. }
  88. }
  89. switch (rtype) {
  90. case CRYPTO_ALG_TYPE_AHASH:
  91. hreq = ahash_request_cast(engine->cur_req);
  92. if (engine->prepare_hash_request) {
  93. ret = engine->prepare_hash_request(engine, hreq);
  94. if (ret) {
  95. pr_err("failed to prepare request: %d\n", ret);
  96. goto req_err;
  97. }
  98. engine->cur_req_prepared = true;
  99. }
  100. ret = engine->hash_one_request(engine, hreq);
  101. if (ret) {
  102. pr_err("failed to hash one request from queue\n");
  103. goto req_err;
  104. }
  105. return;
  106. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  107. breq = ablkcipher_request_cast(engine->cur_req);
  108. if (engine->prepare_cipher_request) {
  109. ret = engine->prepare_cipher_request(engine, breq);
  110. if (ret) {
  111. pr_err("failed to prepare request: %d\n", ret);
  112. goto req_err;
  113. }
  114. engine->cur_req_prepared = true;
  115. }
  116. ret = engine->cipher_one_request(engine, breq);
  117. if (ret) {
  118. pr_err("failed to cipher one request from queue\n");
  119. goto req_err;
  120. }
  121. return;
  122. default:
  123. pr_err("failed to prepare request of unknown type\n");
  124. return;
  125. }
  126. req_err:
  127. switch (rtype) {
  128. case CRYPTO_ALG_TYPE_AHASH:
  129. hreq = ahash_request_cast(engine->cur_req);
  130. crypto_finalize_hash_request(engine, hreq, ret);
  131. break;
  132. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  133. breq = ablkcipher_request_cast(engine->cur_req);
  134. crypto_finalize_cipher_request(engine, breq, ret);
  135. break;
  136. }
  137. return;
  138. out:
  139. spin_unlock_irqrestore(&engine->queue_lock, flags);
  140. }
  141. static void crypto_pump_work(struct kthread_work *work)
  142. {
  143. struct crypto_engine *engine =
  144. container_of(work, struct crypto_engine, pump_requests);
  145. crypto_pump_requests(engine, true);
  146. }
  147. /**
  148. * crypto_transfer_cipher_request - transfer the new request into the
  149. * enginequeue
  150. * @engine: the hardware engine
  151. * @req: the request need to be listed into the engine queue
  152. */
  153. int crypto_transfer_cipher_request(struct crypto_engine *engine,
  154. struct ablkcipher_request *req,
  155. bool need_pump)
  156. {
  157. unsigned long flags;
  158. int ret;
  159. spin_lock_irqsave(&engine->queue_lock, flags);
  160. if (!engine->running) {
  161. spin_unlock_irqrestore(&engine->queue_lock, flags);
  162. return -ESHUTDOWN;
  163. }
  164. ret = ablkcipher_enqueue_request(&engine->queue, req);
  165. if (!engine->busy && need_pump)
  166. kthread_queue_work(&engine->kworker, &engine->pump_requests);
  167. spin_unlock_irqrestore(&engine->queue_lock, flags);
  168. return ret;
  169. }
  170. EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
  171. /**
  172. * crypto_transfer_cipher_request_to_engine - transfer one request to list
  173. * into the engine queue
  174. * @engine: the hardware engine
  175. * @req: the request need to be listed into the engine queue
  176. */
  177. int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
  178. struct ablkcipher_request *req)
  179. {
  180. return crypto_transfer_cipher_request(engine, req, true);
  181. }
  182. EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
  183. /**
  184. * crypto_transfer_hash_request - transfer the new request into the
  185. * enginequeue
  186. * @engine: the hardware engine
  187. * @req: the request need to be listed into the engine queue
  188. */
  189. int crypto_transfer_hash_request(struct crypto_engine *engine,
  190. struct ahash_request *req, bool need_pump)
  191. {
  192. unsigned long flags;
  193. int ret;
  194. spin_lock_irqsave(&engine->queue_lock, flags);
  195. if (!engine->running) {
  196. spin_unlock_irqrestore(&engine->queue_lock, flags);
  197. return -ESHUTDOWN;
  198. }
  199. ret = ahash_enqueue_request(&engine->queue, req);
  200. if (!engine->busy && need_pump)
  201. kthread_queue_work(&engine->kworker, &engine->pump_requests);
  202. spin_unlock_irqrestore(&engine->queue_lock, flags);
  203. return ret;
  204. }
  205. EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
  206. /**
  207. * crypto_transfer_hash_request_to_engine - transfer one request to list
  208. * into the engine queue
  209. * @engine: the hardware engine
  210. * @req: the request need to be listed into the engine queue
  211. */
  212. int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
  213. struct ahash_request *req)
  214. {
  215. return crypto_transfer_hash_request(engine, req, true);
  216. }
  217. EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
  218. /**
  219. * crypto_finalize_cipher_request - finalize one request if the request is done
  220. * @engine: the hardware engine
  221. * @req: the request need to be finalized
  222. * @err: error number
  223. */
  224. void crypto_finalize_cipher_request(struct crypto_engine *engine,
  225. struct ablkcipher_request *req, int err)
  226. {
  227. unsigned long flags;
  228. bool finalize_cur_req = false;
  229. int ret;
  230. spin_lock_irqsave(&engine->queue_lock, flags);
  231. if (engine->cur_req == &req->base)
  232. finalize_cur_req = true;
  233. spin_unlock_irqrestore(&engine->queue_lock, flags);
  234. if (finalize_cur_req) {
  235. if (engine->cur_req_prepared &&
  236. engine->unprepare_cipher_request) {
  237. ret = engine->unprepare_cipher_request(engine, req);
  238. if (ret)
  239. pr_err("failed to unprepare request\n");
  240. }
  241. spin_lock_irqsave(&engine->queue_lock, flags);
  242. engine->cur_req = NULL;
  243. engine->cur_req_prepared = false;
  244. spin_unlock_irqrestore(&engine->queue_lock, flags);
  245. }
  246. req->base.complete(&req->base, err);
  247. kthread_queue_work(&engine->kworker, &engine->pump_requests);
  248. }
  249. EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
  250. /**
  251. * crypto_finalize_hash_request - finalize one request if the request is done
  252. * @engine: the hardware engine
  253. * @req: the request need to be finalized
  254. * @err: error number
  255. */
  256. void crypto_finalize_hash_request(struct crypto_engine *engine,
  257. struct ahash_request *req, int err)
  258. {
  259. unsigned long flags;
  260. bool finalize_cur_req = false;
  261. int ret;
  262. spin_lock_irqsave(&engine->queue_lock, flags);
  263. if (engine->cur_req == &req->base)
  264. finalize_cur_req = true;
  265. spin_unlock_irqrestore(&engine->queue_lock, flags);
  266. if (finalize_cur_req) {
  267. if (engine->cur_req_prepared &&
  268. engine->unprepare_hash_request) {
  269. ret = engine->unprepare_hash_request(engine, req);
  270. if (ret)
  271. pr_err("failed to unprepare request\n");
  272. }
  273. spin_lock_irqsave(&engine->queue_lock, flags);
  274. engine->cur_req = NULL;
  275. engine->cur_req_prepared = false;
  276. spin_unlock_irqrestore(&engine->queue_lock, flags);
  277. }
  278. req->base.complete(&req->base, err);
  279. kthread_queue_work(&engine->kworker, &engine->pump_requests);
  280. }
  281. EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
  282. /**
  283. * crypto_engine_start - start the hardware engine
  284. * @engine: the hardware engine need to be started
  285. *
  286. * Return 0 on success, else on fail.
  287. */
  288. int crypto_engine_start(struct crypto_engine *engine)
  289. {
  290. unsigned long flags;
  291. spin_lock_irqsave(&engine->queue_lock, flags);
  292. if (engine->running || engine->busy) {
  293. spin_unlock_irqrestore(&engine->queue_lock, flags);
  294. return -EBUSY;
  295. }
  296. engine->running = true;
  297. spin_unlock_irqrestore(&engine->queue_lock, flags);
  298. kthread_queue_work(&engine->kworker, &engine->pump_requests);
  299. return 0;
  300. }
  301. EXPORT_SYMBOL_GPL(crypto_engine_start);
  302. /**
  303. * crypto_engine_stop - stop the hardware engine
  304. * @engine: the hardware engine need to be stopped
  305. *
  306. * Return 0 on success, else on fail.
  307. */
  308. int crypto_engine_stop(struct crypto_engine *engine)
  309. {
  310. unsigned long flags;
  311. unsigned int limit = 500;
  312. int ret = 0;
  313. spin_lock_irqsave(&engine->queue_lock, flags);
  314. /*
  315. * If the engine queue is not empty or the engine is on busy state,
  316. * we need to wait for a while to pump the requests of engine queue.
  317. */
  318. while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
  319. spin_unlock_irqrestore(&engine->queue_lock, flags);
  320. msleep(20);
  321. spin_lock_irqsave(&engine->queue_lock, flags);
  322. }
  323. if (crypto_queue_len(&engine->queue) || engine->busy)
  324. ret = -EBUSY;
  325. else
  326. engine->running = false;
  327. spin_unlock_irqrestore(&engine->queue_lock, flags);
  328. if (ret)
  329. pr_warn("could not stop engine\n");
  330. return ret;
  331. }
  332. EXPORT_SYMBOL_GPL(crypto_engine_stop);
  333. /**
  334. * crypto_engine_alloc_init - allocate crypto hardware engine structure and
  335. * initialize it.
  336. * @dev: the device attached with one hardware engine
  337. * @rt: whether this queue is set to run as a realtime task
  338. *
  339. * This must be called from context that can sleep.
  340. * Return: the crypto engine structure on success, else NULL.
  341. */
  342. struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
  343. {
  344. struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
  345. struct crypto_engine *engine;
  346. if (!dev)
  347. return NULL;
  348. engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
  349. if (!engine)
  350. return NULL;
  351. engine->rt = rt;
  352. engine->running = false;
  353. engine->busy = false;
  354. engine->idling = false;
  355. engine->cur_req_prepared = false;
  356. engine->priv_data = dev;
  357. snprintf(engine->name, sizeof(engine->name),
  358. "%s-engine", dev_name(dev));
  359. crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
  360. spin_lock_init(&engine->queue_lock);
  361. kthread_init_worker(&engine->kworker);
  362. engine->kworker_task = kthread_run(kthread_worker_fn,
  363. &engine->kworker, "%s",
  364. engine->name);
  365. if (IS_ERR(engine->kworker_task)) {
  366. dev_err(dev, "failed to create crypto request pump task\n");
  367. return NULL;
  368. }
  369. kthread_init_work(&engine->pump_requests, crypto_pump_work);
  370. if (engine->rt) {
  371. dev_info(dev, "will run requests pump with realtime priority\n");
  372. sched_setscheduler(engine->kworker_task, SCHED_FIFO, &param);
  373. }
  374. return engine;
  375. }
  376. EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
  377. /**
  378. * crypto_engine_exit - free the resources of hardware engine when exit
  379. * @engine: the hardware engine need to be freed
  380. *
  381. * Return 0 for success.
  382. */
  383. int crypto_engine_exit(struct crypto_engine *engine)
  384. {
  385. int ret;
  386. ret = crypto_engine_stop(engine);
  387. if (ret)
  388. return ret;
  389. kthread_flush_worker(&engine->kworker);
  390. kthread_stop(engine->kworker_task);
  391. return 0;
  392. }
  393. EXPORT_SYMBOL_GPL(crypto_engine_exit);
  394. MODULE_LICENSE("GPL");
  395. MODULE_DESCRIPTION("Crypto hardware engine framework");