123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538 |
- /*
- * blk-mq scheduling framework
- *
- * Copyright (C) 2016 Jens Axboe
- */
- #include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/blk-mq.h>
- #include <trace/events/block.h>
- #include "blk.h"
- #include "blk-mq.h"
- #include "blk-mq-debugfs.h"
- #include "blk-mq-sched.h"
- #include "blk-mq-tag.h"
- #include "blk-wbt.h"
- void blk_mq_sched_free_hctx_data(struct request_queue *q,
- void (*exit)(struct blk_mq_hw_ctx *))
- {
- struct blk_mq_hw_ctx *hctx;
- int i;
- queue_for_each_hw_ctx(q, hctx, i) {
- if (exit && hctx->sched_data)
- exit(hctx);
- kfree(hctx->sched_data);
- hctx->sched_data = NULL;
- }
- }
- EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
- void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
- {
- struct request_queue *q = rq->q;
- struct io_context *ioc = rq_ioc(bio);
- struct io_cq *icq;
- spin_lock_irq(q->queue_lock);
- icq = ioc_lookup_icq(ioc, q);
- spin_unlock_irq(q->queue_lock);
- if (!icq) {
- icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
- if (!icq)
- return;
- }
- get_io_context(icq->ioc);
- rq->elv.icq = icq;
- }
- /*
- * Mark a hardware queue as needing a restart. For shared queues, maintain
- * a count of how many hardware queues are marked for restart.
- */
- void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
- {
- if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- return;
- set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
- }
- EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
- void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
- {
- if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- return;
- clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
- blk_mq_run_hw_queue(hctx, true);
- }
- /*
- * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
- * its queue by itself in its completion handler, so we don't need to
- * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
- */
- static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
- {
- struct request_queue *q = hctx->queue;
- struct elevator_queue *e = q->elevator;
- LIST_HEAD(rq_list);
- do {
- struct request *rq;
- if (e->type->ops.mq.has_work &&
- !e->type->ops.mq.has_work(hctx))
- break;
- if (!blk_mq_get_dispatch_budget(hctx))
- break;
- rq = e->type->ops.mq.dispatch_request(hctx);
- if (!rq) {
- blk_mq_put_dispatch_budget(hctx);
- break;
- }
- /*
- * Now this rq owns the budget which has to be released
- * if this rq won't be queued to driver via .queue_rq()
- * in blk_mq_dispatch_rq_list().
- */
- list_add(&rq->queuelist, &rq_list);
- } while (blk_mq_dispatch_rq_list(q, &rq_list, true));
- }
- static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_ctx *ctx)
- {
- unsigned idx = ctx->index_hw;
- if (++idx == hctx->nr_ctx)
- idx = 0;
- return hctx->ctxs[idx];
- }
- /*
- * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
- * its queue by itself in its completion handler, so we don't need to
- * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
- */
- static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
- {
- struct request_queue *q = hctx->queue;
- LIST_HEAD(rq_list);
- struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
- do {
- struct request *rq;
- if (!sbitmap_any_bit_set(&hctx->ctx_map))
- break;
- if (!blk_mq_get_dispatch_budget(hctx))
- break;
- rq = blk_mq_dequeue_from_ctx(hctx, ctx);
- if (!rq) {
- blk_mq_put_dispatch_budget(hctx);
- break;
- }
- /*
- * Now this rq owns the budget which has to be released
- * if this rq won't be queued to driver via .queue_rq()
- * in blk_mq_dispatch_rq_list().
- */
- list_add(&rq->queuelist, &rq_list);
- /* round robin for fair dispatch */
- ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
- } while (blk_mq_dispatch_rq_list(q, &rq_list, true));
- WRITE_ONCE(hctx->dispatch_from, ctx);
- }
- void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
- {
- struct request_queue *q = hctx->queue;
- struct elevator_queue *e = q->elevator;
- const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
- LIST_HEAD(rq_list);
- /* RCU or SRCU read lock is needed before checking quiesced flag */
- if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
- return;
- hctx->run++;
- /*
- * If we have previous entries on our dispatch list, grab them first for
- * more fair dispatch.
- */
- if (!list_empty_careful(&hctx->dispatch)) {
- spin_lock(&hctx->lock);
- if (!list_empty(&hctx->dispatch))
- list_splice_init(&hctx->dispatch, &rq_list);
- spin_unlock(&hctx->lock);
- }
- /*
- * Only ask the scheduler for requests, if we didn't have residual
- * requests from the dispatch list. This is to avoid the case where
- * we only ever dispatch a fraction of the requests available because
- * of low device queue depth. Once we pull requests out of the IO
- * scheduler, we can no longer merge or sort them. So it's best to
- * leave them there for as long as we can. Mark the hw queue as
- * needing a restart in that case.
- *
- * We want to dispatch from the scheduler if there was nothing
- * on the dispatch list or we were able to dispatch from the
- * dispatch list.
- */
- if (!list_empty(&rq_list)) {
- blk_mq_sched_mark_restart_hctx(hctx);
- if (blk_mq_dispatch_rq_list(q, &rq_list, false)) {
- if (has_sched_dispatch)
- blk_mq_do_dispatch_sched(hctx);
- else
- blk_mq_do_dispatch_ctx(hctx);
- }
- } else if (has_sched_dispatch) {
- blk_mq_do_dispatch_sched(hctx);
- } else if (hctx->dispatch_busy) {
- /* dequeue request one by one from sw queue if queue is busy */
- blk_mq_do_dispatch_ctx(hctx);
- } else {
- blk_mq_flush_busy_ctxs(hctx, &rq_list);
- blk_mq_dispatch_rq_list(q, &rq_list, false);
- }
- }
- bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
- struct request **merged_request)
- {
- struct request *rq;
- switch (elv_merge(q, &rq, bio)) {
- case ELEVATOR_BACK_MERGE:
- if (!blk_mq_sched_allow_merge(q, rq, bio))
- return false;
- if (!bio_attempt_back_merge(q, rq, bio))
- return false;
- *merged_request = attempt_back_merge(q, rq);
- if (!*merged_request)
- elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
- return true;
- case ELEVATOR_FRONT_MERGE:
- if (!blk_mq_sched_allow_merge(q, rq, bio))
- return false;
- if (!bio_attempt_front_merge(q, rq, bio))
- return false;
- *merged_request = attempt_front_merge(q, rq);
- if (!*merged_request)
- elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
- return true;
- case ELEVATOR_DISCARD_MERGE:
- return bio_attempt_discard_merge(q, rq, bio);
- default:
- return false;
- }
- }
- EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
- /*
- * Iterate list of requests and see if we can merge this bio with any
- * of them.
- */
- bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
- struct bio *bio)
- {
- struct request *rq;
- int checked = 8;
- list_for_each_entry_reverse(rq, list, queuelist) {
- bool merged = false;
- if (!checked--)
- break;
- if (!blk_rq_merge_ok(rq, bio))
- continue;
- switch (blk_try_merge(rq, bio)) {
- case ELEVATOR_BACK_MERGE:
- if (blk_mq_sched_allow_merge(q, rq, bio))
- merged = bio_attempt_back_merge(q, rq, bio);
- break;
- case ELEVATOR_FRONT_MERGE:
- if (blk_mq_sched_allow_merge(q, rq, bio))
- merged = bio_attempt_front_merge(q, rq, bio);
- break;
- case ELEVATOR_DISCARD_MERGE:
- merged = bio_attempt_discard_merge(q, rq, bio);
- break;
- default:
- continue;
- }
- return merged;
- }
- return false;
- }
- EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge);
- /*
- * Reverse check our software queue for entries that we could potentially
- * merge with. Currently includes a hand-wavy stop count of 8, to not spend
- * too much time checking for merges.
- */
- static bool blk_mq_attempt_merge(struct request_queue *q,
- struct blk_mq_ctx *ctx, struct bio *bio)
- {
- lockdep_assert_held(&ctx->lock);
- if (blk_mq_bio_list_merge(q, &ctx->rq_list, bio)) {
- ctx->rq_merged++;
- return true;
- }
- return false;
- }
- bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
- {
- struct elevator_queue *e = q->elevator;
- struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
- bool ret = false;
- if (e && e->type->ops.mq.bio_merge) {
- blk_mq_put_ctx(ctx);
- return e->type->ops.mq.bio_merge(hctx, bio);
- }
- if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
- !list_empty_careful(&ctx->rq_list)) {
- /* default per sw-queue merge */
- spin_lock(&ctx->lock);
- ret = blk_mq_attempt_merge(q, ctx, bio);
- spin_unlock(&ctx->lock);
- }
- blk_mq_put_ctx(ctx);
- return ret;
- }
- bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
- {
- return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
- }
- EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
- void blk_mq_sched_request_inserted(struct request *rq)
- {
- trace_block_rq_insert(rq->q, rq);
- }
- EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
- static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
- bool has_sched,
- struct request *rq)
- {
- /* dispatch flush rq directly */
- if (rq->rq_flags & RQF_FLUSH_SEQ) {
- spin_lock(&hctx->lock);
- list_add(&rq->queuelist, &hctx->dispatch);
- spin_unlock(&hctx->lock);
- return true;
- }
- if (has_sched)
- rq->rq_flags |= RQF_SORTED;
- return false;
- }
- void blk_mq_sched_insert_request(struct request *rq, bool at_head,
- bool run_queue, bool async)
- {
- struct request_queue *q = rq->q;
- struct elevator_queue *e = q->elevator;
- struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
- /* flush rq in flush machinery need to be dispatched directly */
- if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
- blk_insert_flush(rq);
- goto run;
- }
- WARN_ON(e && (rq->tag != -1));
- if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
- goto run;
- if (e && e->type->ops.mq.insert_requests) {
- LIST_HEAD(list);
- list_add(&rq->queuelist, &list);
- e->type->ops.mq.insert_requests(hctx, &list, at_head);
- } else {
- spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq, at_head);
- spin_unlock(&ctx->lock);
- }
- run:
- if (run_queue)
- blk_mq_run_hw_queue(hctx, async);
- }
- void blk_mq_sched_insert_requests(struct request_queue *q,
- struct blk_mq_ctx *ctx,
- struct list_head *list, bool run_queue_async)
- {
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
- struct elevator_queue *e = hctx->queue->elevator;
- if (e && e->type->ops.mq.insert_requests)
- e->type->ops.mq.insert_requests(hctx, list, false);
- else {
- /*
- * try to issue requests directly if the hw queue isn't
- * busy in case of 'none' scheduler, and this way may save
- * us one extra enqueue & dequeue to sw queue.
- */
- if (!hctx->dispatch_busy && !e && !run_queue_async) {
- blk_mq_try_issue_list_directly(hctx, list);
- if (list_empty(list))
- return;
- }
- blk_mq_insert_requests(hctx, ctx, list);
- }
- blk_mq_run_hw_queue(hctx, run_queue_async);
- }
- static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
- struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_idx)
- {
- if (hctx->sched_tags) {
- blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
- blk_mq_free_rq_map(hctx->sched_tags);
- hctx->sched_tags = NULL;
- }
- }
- static int blk_mq_sched_alloc_tags(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_idx)
- {
- struct blk_mq_tag_set *set = q->tag_set;
- int ret;
- hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
- set->reserved_tags);
- if (!hctx->sched_tags)
- return -ENOMEM;
- ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
- if (ret)
- blk_mq_sched_free_tags(set, hctx, hctx_idx);
- return ret;
- }
- static void blk_mq_sched_tags_teardown(struct request_queue *q)
- {
- struct blk_mq_tag_set *set = q->tag_set;
- struct blk_mq_hw_ctx *hctx;
- int i;
- queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_sched_free_tags(set, hctx, i);
- }
- int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
- {
- struct blk_mq_hw_ctx *hctx;
- struct elevator_queue *eq;
- unsigned int i;
- int ret;
- if (!e) {
- q->elevator = NULL;
- q->nr_requests = q->tag_set->queue_depth;
- return 0;
- }
- /*
- * Default to double of smaller one between hw queue_depth and 128,
- * since we don't split into sync/async like the old code did.
- * Additionally, this is a per-hw queue depth.
- */
- q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
- BLKDEV_MAX_RQ);
- queue_for_each_hw_ctx(q, hctx, i) {
- ret = blk_mq_sched_alloc_tags(q, hctx, i);
- if (ret)
- goto err;
- }
- ret = e->ops.mq.init_sched(q, e);
- if (ret)
- goto err;
- blk_mq_debugfs_register_sched(q);
- queue_for_each_hw_ctx(q, hctx, i) {
- if (e->ops.mq.init_hctx) {
- ret = e->ops.mq.init_hctx(hctx, i);
- if (ret) {
- eq = q->elevator;
- blk_mq_exit_sched(q, eq);
- kobject_put(&eq->kobj);
- return ret;
- }
- }
- blk_mq_debugfs_register_sched_hctx(q, hctx);
- }
- return 0;
- err:
- blk_mq_sched_tags_teardown(q);
- q->elevator = NULL;
- return ret;
- }
- void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
- {
- struct blk_mq_hw_ctx *hctx;
- unsigned int i;
- queue_for_each_hw_ctx(q, hctx, i) {
- blk_mq_debugfs_unregister_sched_hctx(hctx);
- if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
- e->type->ops.mq.exit_hctx(hctx, i);
- hctx->sched_data = NULL;
- }
- }
- blk_mq_debugfs_unregister_sched(q);
- if (e->type->ops.mq.exit_sched)
- e->type->ops.mq.exit_sched(e);
- blk_mq_sched_tags_teardown(q);
- q->elevator = NULL;
- }
|