cc_request_mgr.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
  3. #include <linux/kernel.h>
  4. #include <linux/nospec.h>
  5. #include "cc_driver.h"
  6. #include "cc_buffer_mgr.h"
  7. #include "cc_request_mgr.h"
  8. #include "cc_pm.h"
  9. #define CC_MAX_POLL_ITER 10
  10. /* The highest descriptor count in used */
  11. #define CC_MAX_DESC_SEQ_LEN 23
  12. struct cc_req_mgr_handle {
  13. /* Request manager resources */
  14. unsigned int hw_queue_size; /* HW capability */
  15. unsigned int min_free_hw_slots;
  16. unsigned int max_used_sw_slots;
  17. struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
  18. u32 req_queue_head;
  19. u32 req_queue_tail;
  20. u32 axi_completed;
  21. u32 q_free_slots;
  22. /* This lock protects access to HW register
  23. * that must be single request at a time
  24. */
  25. spinlock_t hw_lock;
  26. struct cc_hw_desc compl_desc;
  27. u8 *dummy_comp_buff;
  28. dma_addr_t dummy_comp_buff_dma;
  29. /* backlog queue */
  30. struct list_head backlog;
  31. unsigned int bl_len;
  32. spinlock_t bl_lock; /* protect backlog queue */
  33. #ifdef COMP_IN_WQ
  34. struct workqueue_struct *workq;
  35. struct delayed_work compwork;
  36. #else
  37. struct tasklet_struct comptask;
  38. #endif
  39. };
  40. struct cc_bl_item {
  41. struct cc_crypto_req creq;
  42. struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
  43. unsigned int len;
  44. struct list_head list;
  45. bool notif;
  46. };
  47. static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
  48. { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
  49. BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
  50. BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
  51. BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
  52. BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
  53. BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
  54. BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
  55. BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
  56. { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
  57. BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
  58. BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
  59. BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
  60. BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
  61. BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
  62. BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
  63. BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
  64. };
  65. static void comp_handler(unsigned long devarg);
  66. #ifdef COMP_IN_WQ
  67. static void comp_work_handler(struct work_struct *work);
  68. #endif
  69. static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
  70. {
  71. alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
  72. slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
  73. return cc_cpp_int_masks[alg][slot];
  74. }
  75. void cc_req_mgr_fini(struct cc_drvdata *drvdata)
  76. {
  77. struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
  78. struct device *dev = drvdata_to_dev(drvdata);
  79. if (!req_mgr_h)
  80. return; /* Not allocated */
  81. if (req_mgr_h->dummy_comp_buff_dma) {
  82. dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
  83. req_mgr_h->dummy_comp_buff_dma);
  84. }
  85. dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
  86. req_mgr_h->min_free_hw_slots));
  87. dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
  88. #ifdef COMP_IN_WQ
  89. flush_workqueue(req_mgr_h->workq);
  90. destroy_workqueue(req_mgr_h->workq);
  91. #else
  92. /* Kill tasklet */
  93. tasklet_kill(&req_mgr_h->comptask);
  94. #endif
  95. kzfree(req_mgr_h);
  96. drvdata->request_mgr_handle = NULL;
  97. }
  98. int cc_req_mgr_init(struct cc_drvdata *drvdata)
  99. {
  100. struct cc_req_mgr_handle *req_mgr_h;
  101. struct device *dev = drvdata_to_dev(drvdata);
  102. int rc = 0;
  103. req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
  104. if (!req_mgr_h) {
  105. rc = -ENOMEM;
  106. goto req_mgr_init_err;
  107. }
  108. drvdata->request_mgr_handle = req_mgr_h;
  109. spin_lock_init(&req_mgr_h->hw_lock);
  110. spin_lock_init(&req_mgr_h->bl_lock);
  111. INIT_LIST_HEAD(&req_mgr_h->backlog);
  112. #ifdef COMP_IN_WQ
  113. dev_dbg(dev, "Initializing completion workqueue\n");
  114. req_mgr_h->workq = create_singlethread_workqueue("ccree");
  115. if (!req_mgr_h->workq) {
  116. dev_err(dev, "Failed creating work queue\n");
  117. rc = -ENOMEM;
  118. goto req_mgr_init_err;
  119. }
  120. INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
  121. #else
  122. dev_dbg(dev, "Initializing completion tasklet\n");
  123. tasklet_init(&req_mgr_h->comptask, comp_handler,
  124. (unsigned long)drvdata);
  125. #endif
  126. req_mgr_h->hw_queue_size = cc_ioread(drvdata,
  127. CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
  128. dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
  129. if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
  130. dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
  131. req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
  132. rc = -ENOMEM;
  133. goto req_mgr_init_err;
  134. }
  135. req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
  136. req_mgr_h->max_used_sw_slots = 0;
  137. /* Allocate DMA word for "dummy" completion descriptor use */
  138. req_mgr_h->dummy_comp_buff =
  139. dma_alloc_coherent(dev, sizeof(u32),
  140. &req_mgr_h->dummy_comp_buff_dma,
  141. GFP_KERNEL);
  142. if (!req_mgr_h->dummy_comp_buff) {
  143. dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
  144. sizeof(u32));
  145. rc = -ENOMEM;
  146. goto req_mgr_init_err;
  147. }
  148. /* Init. "dummy" completion descriptor */
  149. hw_desc_init(&req_mgr_h->compl_desc);
  150. set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
  151. set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
  152. sizeof(u32), NS_BIT, 1);
  153. set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
  154. set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
  155. return 0;
  156. req_mgr_init_err:
  157. cc_req_mgr_fini(drvdata);
  158. return rc;
  159. }
  160. static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
  161. unsigned int seq_len)
  162. {
  163. int i, w;
  164. void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
  165. struct device *dev = drvdata_to_dev(drvdata);
  166. /*
  167. * We do indeed write all 6 command words to the same
  168. * register. The HW supports this.
  169. */
  170. for (i = 0; i < seq_len; i++) {
  171. for (w = 0; w <= 5; w++)
  172. writel_relaxed(seq[i].word[w], reg);
  173. if (cc_dump_desc)
  174. dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
  175. i, seq[i].word[0], seq[i].word[1],
  176. seq[i].word[2], seq[i].word[3],
  177. seq[i].word[4], seq[i].word[5]);
  178. }
  179. }
  180. /*!
  181. * Completion will take place if and only if user requested completion
  182. * by cc_send_sync_request().
  183. *
  184. * \param dev
  185. * \param dx_compl_h The completion event to signal
  186. */
  187. static void request_mgr_complete(struct device *dev, void *dx_compl_h,
  188. int dummy)
  189. {
  190. struct completion *this_compl = dx_compl_h;
  191. complete(this_compl);
  192. }
  193. static int cc_queues_status(struct cc_drvdata *drvdata,
  194. struct cc_req_mgr_handle *req_mgr_h,
  195. unsigned int total_seq_len)
  196. {
  197. unsigned long poll_queue;
  198. struct device *dev = drvdata_to_dev(drvdata);
  199. /* SW queue is checked only once as it will not
  200. * be chaned during the poll because the spinlock_bh
  201. * is held by the thread
  202. */
  203. if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
  204. req_mgr_h->req_queue_tail) {
  205. dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
  206. req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
  207. return -ENOSPC;
  208. }
  209. if (req_mgr_h->q_free_slots >= total_seq_len)
  210. return 0;
  211. /* Wait for space in HW queue. Poll constant num of iterations. */
  212. for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
  213. req_mgr_h->q_free_slots =
  214. cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
  215. if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
  216. req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
  217. if (req_mgr_h->q_free_slots >= total_seq_len) {
  218. /* If there is enough place return */
  219. return 0;
  220. }
  221. dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
  222. req_mgr_h->q_free_slots, total_seq_len);
  223. }
  224. /* No room in the HW queue try again later */
  225. dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
  226. req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
  227. req_mgr_h->q_free_slots, total_seq_len);
  228. return -ENOSPC;
  229. }
  230. /*!
  231. * Enqueue caller request to crypto hardware.
  232. * Need to be called with HW lock held and PM running
  233. *
  234. * \param drvdata
  235. * \param cc_req The request to enqueue
  236. * \param desc The crypto sequence
  237. * \param len The crypto sequence length
  238. * \param add_comp If "true": add an artificial dout DMA to mark completion
  239. *
  240. * \return int Returns -EINPROGRESS or error code
  241. */
  242. static int cc_do_send_request(struct cc_drvdata *drvdata,
  243. struct cc_crypto_req *cc_req,
  244. struct cc_hw_desc *desc, unsigned int len,
  245. bool add_comp)
  246. {
  247. struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
  248. unsigned int used_sw_slots;
  249. unsigned int total_seq_len = len; /*initial sequence length*/
  250. struct device *dev = drvdata_to_dev(drvdata);
  251. used_sw_slots = ((req_mgr_h->req_queue_head -
  252. req_mgr_h->req_queue_tail) &
  253. (MAX_REQUEST_QUEUE_SIZE - 1));
  254. if (used_sw_slots > req_mgr_h->max_used_sw_slots)
  255. req_mgr_h->max_used_sw_slots = used_sw_slots;
  256. /* Enqueue request - must be locked with HW lock*/
  257. req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
  258. req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
  259. (MAX_REQUEST_QUEUE_SIZE - 1);
  260. /* TODO: Use circ_buf.h ? */
  261. dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
  262. /*
  263. * We are about to push command to the HW via the command registers
  264. * that may refernece hsot memory. We need to issue a memory barrier
  265. * to make sure there are no outstnading memory writes
  266. */
  267. wmb();
  268. /* STAT_PHASE_4: Push sequence */
  269. enqueue_seq(drvdata, desc, len);
  270. if (add_comp) {
  271. enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
  272. total_seq_len++;
  273. }
  274. if (req_mgr_h->q_free_slots < total_seq_len) {
  275. /* This situation should never occur. Maybe indicating problem
  276. * with resuming power. Set the free slot count to 0 and hope
  277. * for the best.
  278. */
  279. dev_err(dev, "HW free slot count mismatch.");
  280. req_mgr_h->q_free_slots = 0;
  281. } else {
  282. /* Update the free slots in HW queue */
  283. req_mgr_h->q_free_slots -= total_seq_len;
  284. }
  285. /* Operation still in process */
  286. return -EINPROGRESS;
  287. }
  288. static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
  289. struct cc_bl_item *bli)
  290. {
  291. struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
  292. struct device *dev = drvdata_to_dev(drvdata);
  293. spin_lock_bh(&mgr->bl_lock);
  294. list_add_tail(&bli->list, &mgr->backlog);
  295. ++mgr->bl_len;
  296. dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
  297. spin_unlock_bh(&mgr->bl_lock);
  298. tasklet_schedule(&mgr->comptask);
  299. }
  300. static void cc_proc_backlog(struct cc_drvdata *drvdata)
  301. {
  302. struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
  303. struct cc_bl_item *bli;
  304. struct cc_crypto_req *creq;
  305. void *req;
  306. struct device *dev = drvdata_to_dev(drvdata);
  307. int rc;
  308. spin_lock(&mgr->bl_lock);
  309. while (mgr->bl_len) {
  310. bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
  311. dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
  312. spin_unlock(&mgr->bl_lock);
  313. creq = &bli->creq;
  314. req = creq->user_arg;
  315. /*
  316. * Notify the request we're moving out of the backlog
  317. * but only if we haven't done so already.
  318. */
  319. if (!bli->notif) {
  320. creq->user_cb(dev, req, -EINPROGRESS);
  321. bli->notif = true;
  322. }
  323. spin_lock(&mgr->hw_lock);
  324. rc = cc_queues_status(drvdata, mgr, bli->len);
  325. if (rc) {
  326. /*
  327. * There is still not room in the FIFO for
  328. * this request. Bail out. We'll return here
  329. * on the next completion irq.
  330. */
  331. spin_unlock(&mgr->hw_lock);
  332. return;
  333. }
  334. rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
  335. bli->len, false);
  336. spin_unlock(&mgr->hw_lock);
  337. if (rc != -EINPROGRESS) {
  338. cc_pm_put_suspend(dev);
  339. creq->user_cb(dev, req, rc);
  340. }
  341. /* Remove ourselves from the backlog list */
  342. spin_lock(&mgr->bl_lock);
  343. list_del(&bli->list);
  344. --mgr->bl_len;
  345. kfree(bli);
  346. }
  347. spin_unlock(&mgr->bl_lock);
  348. }
  349. int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
  350. struct cc_hw_desc *desc, unsigned int len,
  351. struct crypto_async_request *req)
  352. {
  353. int rc;
  354. struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
  355. struct device *dev = drvdata_to_dev(drvdata);
  356. bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
  357. gfp_t flags = cc_gfp_flags(req);
  358. struct cc_bl_item *bli;
  359. rc = cc_pm_get(dev);
  360. if (rc) {
  361. dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
  362. return rc;
  363. }
  364. spin_lock_bh(&mgr->hw_lock);
  365. rc = cc_queues_status(drvdata, mgr, len);
  366. #ifdef CC_DEBUG_FORCE_BACKLOG
  367. if (backlog_ok)
  368. rc = -ENOSPC;
  369. #endif /* CC_DEBUG_FORCE_BACKLOG */
  370. if (rc == -ENOSPC && backlog_ok) {
  371. spin_unlock_bh(&mgr->hw_lock);
  372. bli = kmalloc(sizeof(*bli), flags);
  373. if (!bli) {
  374. cc_pm_put_suspend(dev);
  375. return -ENOMEM;
  376. }
  377. memcpy(&bli->creq, cc_req, sizeof(*cc_req));
  378. memcpy(&bli->desc, desc, len * sizeof(*desc));
  379. bli->len = len;
  380. bli->notif = false;
  381. cc_enqueue_backlog(drvdata, bli);
  382. return -EBUSY;
  383. }
  384. if (!rc)
  385. rc = cc_do_send_request(drvdata, cc_req, desc, len, false);
  386. spin_unlock_bh(&mgr->hw_lock);
  387. return rc;
  388. }
  389. int cc_send_sync_request(struct cc_drvdata *drvdata,
  390. struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
  391. unsigned int len)
  392. {
  393. int rc;
  394. struct device *dev = drvdata_to_dev(drvdata);
  395. struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
  396. init_completion(&cc_req->seq_compl);
  397. cc_req->user_cb = request_mgr_complete;
  398. cc_req->user_arg = &cc_req->seq_compl;
  399. rc = cc_pm_get(dev);
  400. if (rc) {
  401. dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
  402. return rc;
  403. }
  404. while (true) {
  405. spin_lock_bh(&mgr->hw_lock);
  406. rc = cc_queues_status(drvdata, mgr, len + 1);
  407. if (!rc)
  408. break;
  409. spin_unlock_bh(&mgr->hw_lock);
  410. if (rc != -EAGAIN) {
  411. cc_pm_put_suspend(dev);
  412. return rc;
  413. }
  414. wait_for_completion_interruptible(&drvdata->hw_queue_avail);
  415. reinit_completion(&drvdata->hw_queue_avail);
  416. }
  417. rc = cc_do_send_request(drvdata, cc_req, desc, len, true);
  418. spin_unlock_bh(&mgr->hw_lock);
  419. if (rc != -EINPROGRESS) {
  420. cc_pm_put_suspend(dev);
  421. return rc;
  422. }
  423. wait_for_completion(&cc_req->seq_compl);
  424. return 0;
  425. }
  426. /*!
  427. * Enqueue caller request to crypto hardware during init process.
  428. * assume this function is not called in middle of a flow,
  429. * since we set QUEUE_LAST_IND flag in the last descriptor.
  430. *
  431. * \param drvdata
  432. * \param desc The crypto sequence
  433. * \param len The crypto sequence length
  434. *
  435. * \return int Returns "0" upon success
  436. */
  437. int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
  438. unsigned int len)
  439. {
  440. struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
  441. unsigned int total_seq_len = len; /*initial sequence length*/
  442. int rc = 0;
  443. /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
  444. */
  445. rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
  446. if (rc)
  447. return rc;
  448. set_queue_last_ind(drvdata, &desc[(len - 1)]);
  449. /*
  450. * We are about to push command to the HW via the command registers
  451. * that may refernece hsot memory. We need to issue a memory barrier
  452. * to make sure there are no outstnading memory writes
  453. */
  454. wmb();
  455. enqueue_seq(drvdata, desc, len);
  456. /* Update the free slots in HW queue */
  457. req_mgr_h->q_free_slots =
  458. cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
  459. return 0;
  460. }
  461. void complete_request(struct cc_drvdata *drvdata)
  462. {
  463. struct cc_req_mgr_handle *request_mgr_handle =
  464. drvdata->request_mgr_handle;
  465. complete(&drvdata->hw_queue_avail);
  466. #ifdef COMP_IN_WQ
  467. queue_delayed_work(request_mgr_handle->workq,
  468. &request_mgr_handle->compwork, 0);
  469. #else
  470. tasklet_schedule(&request_mgr_handle->comptask);
  471. #endif
  472. }
  473. #ifdef COMP_IN_WQ
  474. static void comp_work_handler(struct work_struct *work)
  475. {
  476. struct cc_drvdata *drvdata =
  477. container_of(work, struct cc_drvdata, compwork.work);
  478. comp_handler((unsigned long)drvdata);
  479. }
  480. #endif
  481. static void proc_completions(struct cc_drvdata *drvdata)
  482. {
  483. struct cc_crypto_req *cc_req;
  484. struct device *dev = drvdata_to_dev(drvdata);
  485. struct cc_req_mgr_handle *request_mgr_handle =
  486. drvdata->request_mgr_handle;
  487. unsigned int *tail = &request_mgr_handle->req_queue_tail;
  488. unsigned int *head = &request_mgr_handle->req_queue_head;
  489. int rc;
  490. u32 mask;
  491. while (request_mgr_handle->axi_completed) {
  492. request_mgr_handle->axi_completed--;
  493. /* Dequeue request */
  494. if (*head == *tail) {
  495. /* We are supposed to handle a completion but our
  496. * queue is empty. This is not normal. Return and
  497. * hope for the best.
  498. */
  499. dev_err(dev, "Request queue is empty head == tail %u\n",
  500. *head);
  501. break;
  502. }
  503. cc_req = &request_mgr_handle->req_queue[*tail];
  504. if (cc_req->cpp.is_cpp) {
  505. dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
  506. cc_req->cpp.slot, cc_req->cpp.alg);
  507. mask = cc_cpp_int_mask(cc_req->cpp.alg,
  508. cc_req->cpp.slot);
  509. rc = (drvdata->irq & mask ? -EPERM : 0);
  510. dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
  511. drvdata->irq, rc);
  512. } else {
  513. dev_dbg(dev, "None CPP request completion\n");
  514. rc = 0;
  515. }
  516. if (cc_req->user_cb)
  517. cc_req->user_cb(dev, cc_req->user_arg, rc);
  518. *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
  519. dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
  520. dev_dbg(dev, "Request completed. axi_completed=%d\n",
  521. request_mgr_handle->axi_completed);
  522. cc_pm_put_suspend(dev);
  523. }
  524. }
  525. static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
  526. {
  527. return FIELD_GET(AXIM_MON_COMP_VALUE,
  528. cc_ioread(drvdata, drvdata->axim_mon_offset));
  529. }
  530. /* Deferred service handler, run as interrupt-fired tasklet */
  531. static void comp_handler(unsigned long devarg)
  532. {
  533. struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
  534. struct cc_req_mgr_handle *request_mgr_handle =
  535. drvdata->request_mgr_handle;
  536. struct device *dev = drvdata_to_dev(drvdata);
  537. u32 irq;
  538. dev_dbg(dev, "Completion handler called!\n");
  539. irq = (drvdata->irq & drvdata->comp_mask);
  540. /* To avoid the interrupt from firing as we unmask it,
  541. * we clear it now
  542. */
  543. cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
  544. /* Avoid race with above clear: Test completion counter once more */
  545. request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
  546. dev_dbg(dev, "AXI completion after updated: %d\n",
  547. request_mgr_handle->axi_completed);
  548. while (request_mgr_handle->axi_completed) {
  549. do {
  550. drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
  551. irq = (drvdata->irq & drvdata->comp_mask);
  552. proc_completions(drvdata);
  553. /* At this point (after proc_completions()),
  554. * request_mgr_handle->axi_completed is 0.
  555. */
  556. request_mgr_handle->axi_completed +=
  557. cc_axi_comp_count(drvdata);
  558. } while (request_mgr_handle->axi_completed > 0);
  559. cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
  560. request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
  561. }
  562. /* after verifing that there is nothing to do,
  563. * unmask AXI completion interrupt
  564. */
  565. cc_iowrite(drvdata, CC_REG(HOST_IMR),
  566. cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
  567. cc_proc_backlog(drvdata);
  568. dev_dbg(dev, "Comp. handler done.\n");
  569. }