queue.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. /*
  2. * Copyright (C) 2003 Russell King, All Rights Reserved.
  3. * Copyright 2006-2007 Pierre Ossman
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. */
  10. #include <linux/slab.h>
  11. #include <linux/module.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/freezer.h>
  14. #include <linux/kthread.h>
  15. #include <linux/scatterlist.h>
  16. #include <linux/dma-mapping.h>
  17. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  18. #include <linux/delay.h>
  19. #endif
  20. #include <linux/mmc/card.h>
  21. #include <linux/mmc/host.h>
  22. #include <linux/sched/rt.h>
  23. #include <uapi/linux/sched/types.h>
  24. #include <mt-plat/mtk_io_boost.h>
  25. #include "mtk_mmc_block.h"
  26. #include "queue.h"
  27. #include "block.h"
  28. #include "core.h"
  29. #include "crypto.h"
  30. #include "card.h"
  31. #include "mmc_crypto.h"
  32. #include "cqhci-crypto.h"
  33. /*
  34. * Prepare a MMC request. This just filters out odd stuff.
  35. */
  36. static int mmc_prep_request(struct request_queue *q, struct request *req)
  37. {
  38. struct mmc_queue *mq = q->queuedata;
  39. if (mq && mmc_card_removed(mq->card))
  40. return BLKPREP_KILL;
  41. req->rq_flags |= RQF_DONTPREP;
  42. return BLKPREP_OK;
  43. }
  44. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  45. static void mmc_queue_softirq_done(struct request *req)
  46. {
  47. blk_end_request_all(req, 0);
  48. }
  49. int mmc_is_cmdq_full(struct mmc_queue *mq, struct request *req)
  50. {
  51. struct mmc_host *host;
  52. int cnt, rt;
  53. u8 cmp_depth;
  54. host = mq->card->host;
  55. rt = IS_RT_CLASS_REQ(req);
  56. cnt = atomic_read(&host->areq_cnt);
  57. cmp_depth = host->card->ext_csd.cmdq_depth;
  58. if (!rt &&
  59. cmp_depth > EMMC_MIN_RT_CLASS_TAG_COUNT)
  60. cmp_depth -= EMMC_MIN_RT_CLASS_TAG_COUNT;
  61. if (cnt >= cmp_depth)
  62. return 1;
  63. return 0;
  64. }
  65. #endif
  66. #ifdef CONFIG_MTK_EMMC_HW_CQ
  67. static struct request *mmc_peek_request(struct mmc_queue *mq)
  68. {
  69. struct request_queue *q = mq->queue;
  70. mq->cmdq_req_peeked = NULL;
  71. spin_lock_irq(q->queue_lock);
  72. if (!blk_queue_stopped(q))
  73. mq->cmdq_req_peeked = blk_peek_request(q);
  74. spin_unlock_irq(q->queue_lock);
  75. return mq->cmdq_req_peeked;
  76. }
  77. static bool mmc_check_blk_queue_start_tag(struct request_queue *q,
  78. struct request *req)
  79. {
  80. int ret;
  81. spin_lock_irq(q->queue_lock);
  82. ret = blk_queue_start_tag(q, req);
  83. spin_unlock_irq(q->queue_lock);
  84. return !!ret;
  85. }
  86. static bool mmc_check_blk_queue_start(struct mmc_cmdq_context_info *ctx,
  87. struct mmc_queue *mq)
  88. {
  89. struct request_queue *q = mq->queue;
  90. if (!test_bit(CMDQ_STATE_ERR, &ctx->curr_state)
  91. && !mmc_check_blk_queue_start_tag(q, mq->cmdq_req_peeked))
  92. return true;
  93. return false;
  94. }
  95. static inline void mmc_cmdq_ready_wait(struct mmc_host *host,
  96. struct mmc_queue *mq)
  97. {
  98. struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
  99. /*
  100. * Wait until all of the following conditions are true:
  101. * 1. There is a request pending in the block layer queue
  102. * to be processed.
  103. * 2. If the peeked request is flush/discard then there shouldn't
  104. * be any other direct command active.
  105. * 3. cmdq state should be unhalted.
  106. * 4. cmdq state shouldn't be in error state.
  107. * 5. free tag available to process the new request.
  108. */
  109. wait_event(ctx->wait, kthread_should_stop()
  110. || (!test_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx->curr_state)
  111. && mmc_peek_request(mq)
  112. && ((!(!host->card->part_curr && !mmc_card_suspended(host->card)
  113. && mmc_host_halt(host))
  114. && !(!host->card->part_curr && mmc_host_cq_disable(host) &&
  115. !mmc_card_suspended(host->card)))
  116. || (host->claimed && host->claimer != current))
  117. && mmc_check_blk_queue_start(ctx, mq)));
  118. }
  119. static int mmc_cmdq_thread(void *d)
  120. {
  121. struct mmc_queue *mq = d;
  122. struct mmc_card *card = mq->card;
  123. struct mmc_host *host = card->host;
  124. struct sched_param scheduler_params = {0};
  125. scheduler_params.sched_priority = 1;
  126. sched_setscheduler(current, SCHED_FIFO, &scheduler_params);
  127. current->flags |= PF_MEMALLOC;
  128. mt_bio_queue_alloc(current, NULL);
  129. while (1) {
  130. int ret = 0;
  131. mmc_cmdq_ready_wait(host, mq);
  132. if (kthread_should_stop())
  133. break;
  134. mt_biolog_cqhci_check();
  135. ret = mmc_cmdq_down_rwsem(host, mq->cmdq_req_peeked);
  136. if (ret) {
  137. mmc_cmdq_up_rwsem(host);
  138. continue;
  139. }
  140. ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked);
  141. mmc_cmdq_up_rwsem(host);
  142. /*
  143. * Don't requeue if issue_fn fails, just bug on.
  144. * We don't expect failure here and there is no recovery other
  145. * than fixing the actual issue if there is any.
  146. * Also we end the request if there is a partition switch error,
  147. * so we should not requeue the request here.
  148. */
  149. } /* loop */
  150. mt_bio_queue_free(current);
  151. return 0;
  152. }
  153. static void mmc_cmdq_dispatch_req(struct request_queue *q)
  154. {
  155. struct mmc_queue *mq = q->queuedata;
  156. wake_up(&mq->card->host->cmdq_ctx.wait);
  157. }
  158. #endif
  159. static int mmc_queue_thread(void *d)
  160. {
  161. struct mmc_queue *mq = d;
  162. struct request_queue *q = mq->queue;
  163. struct mmc_context_info *cntx = &mq->card->host->context_info;
  164. struct sched_param scheduler_params = {0};
  165. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  166. int cmdq_full = 0;
  167. unsigned int tmo;
  168. #endif
  169. bool part_cmdq_en = false;
  170. scheduler_params.sched_priority = 1;
  171. sched_setscheduler(current, SCHED_FIFO, &scheduler_params);
  172. current->flags |= PF_MEMALLOC;
  173. down(&mq->thread_sem);
  174. mt_bio_queue_alloc(current, q);
  175. #if defined(CONFIG_MTK_IO_BOOST)
  176. mtk_iobst_register_tid(current->pid);
  177. #endif
  178. do {
  179. struct request *req;
  180. spin_lock_irq(q->queue_lock);
  181. set_current_state(TASK_INTERRUPTIBLE);
  182. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  183. req = blk_peek_request(q);
  184. if (!req)
  185. goto fetch_done;
  186. part_cmdq_en = mmc_blk_part_cmdq_en(mq);
  187. if (part_cmdq_en && mmc_is_cmdq_full(mq, req)) {
  188. req = NULL;
  189. cmdq_full = 1;
  190. goto fetch_done;
  191. }
  192. #endif
  193. req = blk_fetch_request(q);
  194. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  195. fetch_done:
  196. #endif
  197. mq->asleep = false;
  198. cntx->is_waiting_last_req = false;
  199. cntx->is_new_req = false;
  200. if (!req) {
  201. /*
  202. * Dispatch queue is empty so set flags for
  203. * mmc_request_fn() to wake us up.
  204. */
  205. if (atomic_read(&mq->qcnt))
  206. cntx->is_waiting_last_req = true;
  207. else
  208. mq->asleep = true;
  209. }
  210. spin_unlock_irq(q->queue_lock);
  211. if (req || (!part_cmdq_en && atomic_read(&mq->qcnt))) {
  212. set_current_state(TASK_RUNNING);
  213. mmc_blk_issue_rq(mq, req);
  214. cond_resched();
  215. } else {
  216. if (kthread_should_stop()) {
  217. set_current_state(TASK_RUNNING);
  218. break;
  219. }
  220. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  221. if (!cmdq_full) {
  222. /* no request */
  223. up(&mq->thread_sem);
  224. schedule();
  225. down(&mq->thread_sem);
  226. } else {
  227. /* queue full */
  228. cmdq_full = 0;
  229. /* wait when queue full */
  230. tmo = schedule_timeout(HZ);
  231. if (!tmo)
  232. pr_info("%s:sched_tmo,areq_cnt=%d\n",
  233. __func__,
  234. atomic_read(&mq->card->host->areq_cnt));
  235. }
  236. #else
  237. up(&mq->thread_sem);
  238. schedule();
  239. down(&mq->thread_sem);
  240. #endif
  241. }
  242. } while (1);
  243. mt_bio_queue_free(current);
  244. up(&mq->thread_sem);
  245. return 0;
  246. }
  247. /*
  248. * Generic MMC request handler. This is called for any queue on a
  249. * particular host. When the host is not busy, we look for a request
  250. * on any queue on this host, and attempt to issue it. This may
  251. * not be the queue we were asked to process.
  252. */
  253. static void mmc_request_fn(struct request_queue *q)
  254. {
  255. struct mmc_queue *mq = q->queuedata;
  256. struct request *req;
  257. struct mmc_context_info *cntx;
  258. if (!mq) {
  259. while ((req = blk_fetch_request(q)) != NULL) {
  260. req->rq_flags |= RQF_QUIET;
  261. __blk_end_request_all(req, BLK_STS_IOERR);
  262. }
  263. return;
  264. }
  265. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  266. /* just wake up thread for cmdq */
  267. if (mmc_blk_part_cmdq_en(mq)) {
  268. wake_up_process(mq->thread);
  269. return;
  270. }
  271. #endif
  272. cntx = &mq->card->host->context_info;
  273. if (cntx->is_waiting_last_req) {
  274. cntx->is_new_req = true;
  275. wake_up_interruptible(&cntx->wait);
  276. }
  277. if (mq->asleep)
  278. wake_up_process(mq->thread);
  279. }
  280. static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
  281. {
  282. struct scatterlist *sg;
  283. sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
  284. if (sg)
  285. sg_init_table(sg, sg_len);
  286. return sg;
  287. }
  288. static void mmc_queue_setup_discard(struct request_queue *q,
  289. struct mmc_card *card)
  290. {
  291. unsigned max_discard;
  292. max_discard = mmc_calc_max_discard(card);
  293. if (!max_discard)
  294. return;
  295. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
  296. blk_queue_max_discard_sectors(q, max_discard);
  297. q->limits.discard_granularity = card->pref_erase << 9;
  298. /* granularity must not be greater than max. discard */
  299. if (card->pref_erase > max_discard)
  300. q->limits.discard_granularity = SECTOR_SIZE;
  301. if (mmc_can_secure_erase_trim(card))
  302. queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
  303. }
  304. /**
  305. * mmc_init_request() - initialize the MMC-specific per-request data
  306. * @q: the request queue
  307. * @req: the request
  308. * @gfp: memory allocation policy
  309. */
  310. static int mmc_init_request(struct request_queue *q, struct request *req,
  311. gfp_t gfp)
  312. {
  313. struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
  314. struct mmc_queue *mq;
  315. struct mmc_card *card;
  316. struct mmc_host *host;
  317. /* add "if" to fix the error condition:
  318. * STEP 1:remove sdcard call mmc_cleanup_queue,
  319. * get queue_lock, then queuedata = NULL, put queue_lock;
  320. * STEP 2:generic_make _request call blk_queue_bio,
  321. * get queue_lock, then call get_request, mempool_alloc,
  322. * alloc_request_size, mmc_init_request, queuedata is NULL
  323. * in this time.
  324. * STEP 3: null pointer exception.
  325. */
  326. if (q->queuedata)
  327. mq = q->queuedata;
  328. else
  329. return -ENODEV;
  330. card = mq->card;
  331. host = card->host;
  332. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  333. /* cmdq use preallocate sg buffer */
  334. if (mmc_blk_part_cmdq_en(mq))
  335. return 0;
  336. #endif
  337. mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
  338. if (!mq_rq->sg)
  339. return -ENOMEM;
  340. return 0;
  341. }
  342. static void mmc_exit_request(struct request_queue *q, struct request *req)
  343. {
  344. struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
  345. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  346. /* cmdq use preallocate sg buffer */
  347. if (q->queuedata &&
  348. mmc_blk_part_cmdq_en(q->queuedata))
  349. return;
  350. #endif
  351. kfree(mq_rq->sg);
  352. mq_rq->sg = NULL;
  353. }
  354. #ifdef CONFIG_MTK_EMMC_HW_CQ
  355. /*
  356. * mmc_blk_cmdq_setup_queue
  357. * @mq: mmc queue
  358. * @card: card to attach to this queue
  359. *
  360. * Setup queue for CMDQ supporting MMC card
  361. */
  362. void mmc_cmdq_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
  363. {
  364. u64 limit = BLK_BOUNCE_HIGH;
  365. struct mmc_host *host = card->host;
  366. if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
  367. limit = *mmc_dev(host)->dma_mask;
  368. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
  369. if (mmc_can_erase(card))
  370. mmc_queue_setup_discard(mq->queue, card);
  371. blk_queue_bounce_limit(mq->queue, limit);
  372. blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count,
  373. host->max_req_size / 512));
  374. blk_queue_max_segment_size(mq->queue, host->max_seg_size);
  375. blk_queue_max_segments(mq->queue, host->max_segs);
  376. }
  377. #endif
  378. /**
  379. * mmc_init_queue - initialise a queue structure.
  380. * @mq: mmc queue
  381. * @card: mmc card to attach this queue
  382. * @lock: queue lock
  383. * @subname: partition subname
  384. * @area_type: eMMC area type for cmdq use
  385. * Initialise a MMC card request queue.
  386. */
  387. int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
  388. spinlock_t *lock, const char *subname, int area_type)
  389. {
  390. struct mmc_host *host = card->host;
  391. u64 limit = BLK_BOUNCE_HIGH;
  392. int ret = -ENOMEM;
  393. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  394. int i;
  395. #endif
  396. if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
  397. limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
  398. mq->card = card;
  399. #if defined(CONFIG_MTK_EMMC_CQ_SUPPORT) || defined(CONFIG_MTK_EMMC_HW_CQ)
  400. if (card->ext_csd.cmdq_support &&
  401. (area_type == MMC_BLK_DATA_AREA_MAIN)) {
  402. #ifdef CONFIG_MTK_EMMC_HW_CQ
  403. /* for cqe */
  404. if (host->caps2 & MMC_CAP2_CQE) {
  405. pr_notice("%s: init cqhci\n", mmc_hostname(host));
  406. mq->queue = blk_alloc_queue(GFP_KERNEL);
  407. if (!mq->queue)
  408. return -ENOMEM;
  409. mq->queue->queue_lock = lock;
  410. mq->queue->request_fn = mmc_cmdq_dispatch_req;
  411. mq->queue->cmd_size = sizeof(struct mmc_queue_req);
  412. mq->queue->queuedata = mq;
  413. ret = blk_init_allocated_queue(mq->queue);
  414. if (ret) {
  415. blk_cleanup_queue(mq->queue);
  416. return ret;
  417. }
  418. mmc_cmdq_setup_queue(mq, card);
  419. ret = mmc_cmdq_init(mq, card);
  420. if (ret) {
  421. pr_notice("%s: %d: cmdq: unable to set-up\n",
  422. mmc_hostname(host), ret);
  423. blk_cleanup_queue(mq->queue);
  424. } else {
  425. sema_init(&mq->thread_sem, 1);
  426. /* hook for pm qos cmdq init */
  427. if (card->host->cmdq_ops->init)
  428. card->host->cmdq_ops->init(host);
  429. mq->thread = kthread_run(mmc_cmdq_thread, mq,
  430. "mmc-cmdqd/%d%s",
  431. host->index,
  432. subname ? subname : "");
  433. if (IS_ERR(mq->thread)) {
  434. pr_notice("%s: %d: cmdq: failed to start mmc-cmdqd thread\n",
  435. mmc_hostname(host), ret);
  436. ret = PTR_ERR(mq->thread);
  437. }
  438. /* inline crypto */
  439. mmc_crypto_setup_queue(host, mq->queue);
  440. return ret;
  441. }
  442. }
  443. #endif
  444. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  445. if (!(host->caps2 & MMC_CAP2_CQE)) {
  446. pr_notice("%s: init cq\n", mmc_hostname(host));
  447. atomic_set(&host->cq_rw, false);
  448. atomic_set(&host->cq_w, false);
  449. atomic_set(&host->cq_wait_rdy, 0);
  450. host->wp_error = 0;
  451. host->task_id_index = 0;
  452. atomic_set(&host->is_data_dma, 0);
  453. host->cur_rw_task = CQ_TASK_IDLE;
  454. atomic_set(&host->cq_tuning_now, 0);
  455. for (i = 0; i < EMMC_MAX_QUEUE_DEPTH; i++) {
  456. host->data_mrq_queued[i] = false;
  457. atomic_set(&mq->mqrq[i].index, 0);
  458. }
  459. host->cmdq_thread = kthread_run(mmc_run_queue_thread,
  460. host,
  461. "exe_cq/%d", host->index);
  462. if (IS_ERR(host->cmdq_thread)) {
  463. pr_notice("%s: cmdq: failed to start exe_cq thread\n",
  464. mmc_hostname(host));
  465. }
  466. }
  467. #endif
  468. }
  469. #endif
  470. mq->queue = blk_alloc_queue(GFP_KERNEL);
  471. if (!mq->queue)
  472. return -ENOMEM;
  473. mq->queue->queue_lock = lock;
  474. mq->queue->request_fn = mmc_request_fn;
  475. mq->queue->init_rq_fn = mmc_init_request;
  476. mq->queue->exit_rq_fn = mmc_exit_request;
  477. mq->queue->cmd_size = sizeof(struct mmc_queue_req);
  478. mq->queue->queuedata = mq;
  479. mq->queue->backing_dev_info->ra_pages = 128;
  480. atomic_set(&mq->qcnt, 0);
  481. ret = blk_init_allocated_queue(mq->queue);
  482. if (ret) {
  483. blk_cleanup_queue(mq->queue);
  484. return ret;
  485. }
  486. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  487. if (mmc_card_mmc(card)) {
  488. for (i = 0; i < card->ext_csd.cmdq_depth; i++)
  489. atomic_set(&mq->mqrq[i].index, 0);
  490. }
  491. #endif
  492. blk_queue_prep_rq(mq->queue, mmc_prep_request);
  493. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
  494. queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
  495. if (mmc_can_erase(card))
  496. mmc_queue_setup_discard(mq->queue, card);
  497. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  498. blk_queue_softirq_done(mq->queue, mmc_queue_softirq_done);
  499. #endif
  500. blk_queue_bounce_limit(mq->queue, limit);
  501. blk_queue_max_hw_sectors(mq->queue,
  502. min(host->max_blk_count, host->max_req_size / 512));
  503. blk_queue_max_segments(mq->queue, host->max_segs);
  504. blk_queue_max_segment_size(mq->queue, host->max_seg_size);
  505. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  506. if (mmc_card_mmc(card)) {
  507. for (i = 0; i < card->ext_csd.cmdq_depth; i++) {
  508. mq->mqrq[i].sg = mmc_alloc_sg(host->max_segs,
  509. GFP_KERNEL);
  510. if (!mq->mqrq[i].sg)
  511. goto cleanup_queue;
  512. }
  513. }
  514. #endif
  515. sema_init(&mq->thread_sem, 1);
  516. /* sw-cqhci inline crypto */
  517. mmc_crypto_setup_queue(host, mq->queue);
  518. mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
  519. host->index, subname ? subname : "");
  520. if (IS_ERR(mq->thread)) {
  521. ret = PTR_ERR(mq->thread);
  522. goto cleanup_queue;
  523. }
  524. return 0;
  525. cleanup_queue:
  526. #ifdef CONFIG_MTK_EMMC_CQ_SUPPORT
  527. if (mmc_card_mmc(card)) {
  528. for (i = 0; i < card->ext_csd.cmdq_depth; i++) {
  529. kfree(mq->mqrq[i].sg);
  530. mq->mqrq[i].sg = NULL;
  531. }
  532. }
  533. #endif
  534. blk_cleanup_queue(mq->queue);
  535. return ret;
  536. }
  537. void mmc_cleanup_queue(struct mmc_queue *mq)
  538. {
  539. struct request_queue *q = mq->queue;
  540. unsigned long flags;
  541. /* Make sure the queue isn't suspended, as that will deadlock */
  542. mmc_queue_resume(mq);
  543. /* Then terminate our worker thread */
  544. kthread_stop(mq->thread);
  545. /* Empty the queue */
  546. spin_lock_irqsave(q->queue_lock, flags);
  547. q->queuedata = NULL;
  548. blk_start_queue(q);
  549. spin_unlock_irqrestore(q->queue_lock, flags);
  550. if (likely(!blk_queue_dead(q)))
  551. blk_cleanup_queue(q);
  552. mq->card = NULL;
  553. }
  554. EXPORT_SYMBOL(mmc_cleanup_queue);
  555. #ifdef CONFIG_MTK_EMMC_HW_CQ
  556. static void mmc_cmdq_softirq_done(struct request *rq)
  557. {
  558. struct mmc_queue *mq = rq->q->queuedata;
  559. mq->cmdq_complete_fn(rq);
  560. }
  561. static void mmc_cmdq_error_work(struct work_struct *work)
  562. {
  563. struct mmc_queue *mq = container_of(work, struct mmc_queue,
  564. cmdq_err_work);
  565. mq->cmdq_error_fn(mq);
  566. }
  567. enum blk_eh_timer_return mmc_cmdq_rq_timed_out(struct request *req)
  568. {
  569. struct mmc_queue *mq = req->q->queuedata;
  570. pr_notice("%s: request with tag: %d flags: 0x%x timed out\n",
  571. mmc_hostname(mq->card->host), req->tag, req->cmd_flags);
  572. return mq->cmdq_req_timed_out(req);
  573. }
  574. int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card)
  575. {
  576. int i, ret = 0;
  577. /* one slot is reserved for dcmd requests */
  578. int q_depth = card->ext_csd.cmdq_depth - 1;
  579. card->cqe_init = false;
  580. if (!(card->host->caps2 & MMC_CAP2_CQE)) {
  581. ret = -ENOTSUPP;
  582. goto out;
  583. }
  584. init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq);
  585. init_waitqueue_head(&card->host->cmdq_ctx.wait);
  586. init_rwsem(&card->host->cmdq_ctx.err_rwsem);
  587. mq->mqrq_cmdq = kcalloc(q_depth,
  588. sizeof(struct mmc_queue_req), GFP_KERNEL);
  589. if (!mq->mqrq_cmdq) {
  590. /* mark for check patch */
  591. /* pr_notice("%s: unable to alloc mqrq's for q_depth %d\n",
  592. * mmc_card_name(card), q_depth);
  593. */
  594. ret = -ENOMEM;
  595. goto out;
  596. }
  597. /* sg is allocated for data request slots only */
  598. for (i = 0; i < q_depth; i++) {
  599. mq->mqrq_cmdq[i].sg =
  600. mmc_alloc_sg(card->host->max_segs, GFP_KERNEL);
  601. if (mq->mqrq_cmdq[i].sg == NULL) {
  602. pr_notice("%s: unable to allocate cmdq sg of size %d\n",
  603. mmc_card_name(card),
  604. card->host->max_segs);
  605. goto free_mqrq_sg;
  606. }
  607. }
  608. ret = blk_queue_init_tags(mq->queue, q_depth, NULL, BLK_TAG_ALLOC_FIFO);
  609. if (ret) {
  610. pr_notice("%s: unable to allocate cmdq tags %d\n",
  611. mmc_card_name(card), q_depth);
  612. goto free_mqrq_sg;
  613. }
  614. blk_queue_softirq_done(mq->queue, mmc_cmdq_softirq_done);
  615. INIT_WORK(&mq->cmdq_err_work, mmc_cmdq_error_work);
  616. init_completion(&mq->cmdq_shutdown_complete);
  617. init_completion(&mq->cmdq_pending_req_done);
  618. blk_queue_rq_timed_out(mq->queue, mmc_cmdq_rq_timed_out);
  619. blk_queue_rq_timeout(mq->queue, 120 * HZ);
  620. card->cqe_init = true;
  621. goto out;
  622. free_mqrq_sg:
  623. for (i = 0; i < q_depth; i++)
  624. kfree(mq->mqrq_cmdq[i].sg);
  625. kfree(mq->mqrq_cmdq);
  626. mq->mqrq_cmdq = NULL;
  627. out:
  628. return ret;
  629. }
  630. void mmc_cmdq_clean(struct mmc_queue *mq, struct mmc_card *card)
  631. {
  632. int i;
  633. int q_depth = card->ext_csd.cmdq_depth - 1;
  634. blk_free_tags(mq->queue->queue_tags);
  635. mq->queue->queue_tags = NULL;
  636. blk_queue_free_tags(mq->queue);
  637. for (i = 0; i < q_depth; i++)
  638. kfree(mq->mqrq_cmdq[i].sg);
  639. kfree(mq->mqrq_cmdq);
  640. mq->mqrq_cmdq = NULL;
  641. }
  642. #endif
  643. /*
  644. * mmc_queue_suspend - suspend a MMC request queue
  645. * @mq: MMC queue to suspend
  646. * @wait: Wait till MMC request queue is empty
  647. *
  648. * Stop the block request queue, and wait for our thread to
  649. * complete any outstanding requests. This ensures that we
  650. * won't suspend while a request is being processed.
  651. */
  652. #ifdef CONFIG_MTK_EMMC_HW_CQ
  653. int mmc_queue_suspend(struct mmc_queue *mq, int wait)
  654. {
  655. struct request_queue *q = mq->queue;
  656. unsigned long flags;
  657. int rc = 0;
  658. struct mmc_card *card = mq->card;
  659. struct request *req;
  660. if (card->cqe_init && blk_queue_tagged(q)) {
  661. struct mmc_host *host = card->host;
  662. if (test_and_set_bit(MMC_QUEUE_SUSPENDED, &mq->flags))
  663. goto out;
  664. if (wait) {
  665. /*
  666. * After blk_stop_queue is called, wait for all
  667. * active_reqs to complete.
  668. * Then wait for cmdq thread to exit before calling
  669. * cmdq shutdown to avoid race between issuing
  670. * requests and shutdown of cmdq.
  671. */
  672. spin_lock_irqsave(q->queue_lock, flags);
  673. blk_stop_queue(q);
  674. spin_unlock_irqrestore(q->queue_lock, flags);
  675. if (host->cmdq_ctx.active_reqs)
  676. wait_for_completion(
  677. &mq->cmdq_shutdown_complete);
  678. kthread_stop(mq->thread);
  679. mq->cmdq_shutdown(mq);
  680. } else {
  681. spin_lock_irqsave(q->queue_lock, flags);
  682. blk_stop_queue(q);
  683. wake_up(&host->cmdq_ctx.wait);
  684. req = blk_peek_request(q);
  685. if (req || mq->cmdq_req_peeked ||
  686. host->cmdq_ctx.active_reqs) {
  687. clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags);
  688. blk_start_queue(q);
  689. rc = -EBUSY;
  690. }
  691. spin_unlock_irqrestore(q->queue_lock, flags);
  692. }
  693. goto out;
  694. }
  695. /* non-cq case */
  696. if (!(test_and_set_bit(MMC_QUEUE_SUSPENDED, &mq->flags))) {
  697. spin_lock_irqsave(q->queue_lock, flags);
  698. blk_stop_queue(q);
  699. spin_unlock_irqrestore(q->queue_lock, flags);
  700. down(&mq->thread_sem);
  701. rc = 0;
  702. }
  703. out:
  704. return rc;
  705. }
  706. /*
  707. * mmc_queue_resume - resume a previously suspended MMC request queue
  708. * @mq: MMC queue to resume
  709. */
  710. void mmc_queue_resume(struct mmc_queue *mq)
  711. {
  712. struct request_queue *q = mq->queue;
  713. struct mmc_card *card = mq->card;
  714. unsigned long flags;
  715. if (test_and_clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags)) {
  716. if (!(card->cqe_init && blk_queue_tagged(q)))
  717. up(&mq->thread_sem);
  718. spin_lock_irqsave(q->queue_lock, flags);
  719. blk_start_queue(q);
  720. spin_unlock_irqrestore(q->queue_lock, flags);
  721. }
  722. }
  723. #else
  724. /**
  725. * mmc_queue_suspend - suspend a MMC request queue
  726. * @mq: MMC queue to suspend
  727. *
  728. * Stop the block request queue, and wait for our thread to
  729. * complete any outstanding requests. This ensures that we
  730. * won't suspend while a request is being processed.
  731. */
  732. void mmc_queue_suspend(struct mmc_queue *mq)
  733. {
  734. struct request_queue *q = mq->queue;
  735. unsigned long flags;
  736. if (!mq->suspended) {
  737. mq->suspended |= true;
  738. spin_lock_irqsave(q->queue_lock, flags);
  739. blk_stop_queue(q);
  740. spin_unlock_irqrestore(q->queue_lock, flags);
  741. down(&mq->thread_sem);
  742. }
  743. }
  744. /**
  745. * mmc_queue_resume - resume a previously suspended MMC request queue
  746. * @mq: MMC queue to resume
  747. */
  748. void mmc_queue_resume(struct mmc_queue *mq)
  749. {
  750. struct request_queue *q = mq->queue;
  751. unsigned long flags;
  752. if (mq->suspended) {
  753. mq->suspended = false;
  754. up(&mq->thread_sem);
  755. spin_lock_irqsave(q->queue_lock, flags);
  756. blk_start_queue(q);
  757. spin_unlock_irqrestore(q->queue_lock, flags);
  758. }
  759. }
  760. #endif
  761. #ifdef CONFIG_MTK_EMMC_HW_CQ
  762. /*
  763. * Prepare the sg list(s) to be handed of to the cmdq host driver
  764. */
  765. unsigned int mmc_cmdq_queue_map_sg(struct mmc_queue *mq,
  766. struct mmc_queue_req *mqrq)
  767. {
  768. struct request *req = mqrq->req;
  769. return blk_rq_map_sg(mq->queue, req, mqrq->sg);
  770. }
  771. #endif
  772. /*
  773. * Prepare the sg list(s) to be handed of to the host driver
  774. */
  775. unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
  776. {
  777. struct request *req = mmc_queue_req_to_req(mqrq);
  778. return blk_rq_map_sg(mq->queue, req, mqrq->sg);
  779. }