blk-mq-debugfs.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. /*
  2. * Copyright (C) 2017 Facebook
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program. If not, see <https://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/blk-mq.h>
  20. #include "blk.h"
  21. #include "blk-mq.h"
  22. #include "blk-mq-debugfs.h"
  23. #include "blk-mq-tag.h"
  24. static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
  25. {
  26. if (stat->nr_samples) {
  27. seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
  28. stat->nr_samples, stat->mean, stat->min, stat->max);
  29. } else {
  30. seq_puts(m, "samples=0");
  31. }
  32. }
  33. static int queue_poll_stat_show(void *data, struct seq_file *m)
  34. {
  35. struct request_queue *q = data;
  36. int bucket;
  37. for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
  38. seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket));
  39. print_stat(m, &q->poll_stat[2*bucket]);
  40. seq_puts(m, "\n");
  41. seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket));
  42. print_stat(m, &q->poll_stat[2*bucket+1]);
  43. seq_puts(m, "\n");
  44. }
  45. return 0;
  46. }
  47. static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
  48. __acquires(&q->requeue_lock)
  49. {
  50. struct request_queue *q = m->private;
  51. spin_lock_irq(&q->requeue_lock);
  52. return seq_list_start(&q->requeue_list, *pos);
  53. }
  54. static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
  55. {
  56. struct request_queue *q = m->private;
  57. return seq_list_next(v, &q->requeue_list, pos);
  58. }
  59. static void queue_requeue_list_stop(struct seq_file *m, void *v)
  60. __releases(&q->requeue_lock)
  61. {
  62. struct request_queue *q = m->private;
  63. spin_unlock_irq(&q->requeue_lock);
  64. }
  65. static const struct seq_operations queue_requeue_list_seq_ops = {
  66. .start = queue_requeue_list_start,
  67. .next = queue_requeue_list_next,
  68. .stop = queue_requeue_list_stop,
  69. .show = blk_mq_debugfs_rq_show,
  70. };
  71. static int blk_flags_show(struct seq_file *m, const unsigned long flags,
  72. const char *const *flag_name, int flag_name_count)
  73. {
  74. bool sep = false;
  75. int i;
  76. for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
  77. if (!(flags & BIT(i)))
  78. continue;
  79. if (sep)
  80. seq_puts(m, "|");
  81. sep = true;
  82. if (i < flag_name_count && flag_name[i])
  83. seq_puts(m, flag_name[i]);
  84. else
  85. seq_printf(m, "%d", i);
  86. }
  87. return 0;
  88. }
  89. static int queue_pm_only_show(void *data, struct seq_file *m)
  90. {
  91. struct request_queue *q = data;
  92. seq_printf(m, "%d\n", atomic_read(&q->pm_only));
  93. return 0;
  94. }
  95. #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
  96. static const char *const blk_queue_flag_name[] = {
  97. QUEUE_FLAG_NAME(QUEUED),
  98. QUEUE_FLAG_NAME(STOPPED),
  99. QUEUE_FLAG_NAME(DYING),
  100. QUEUE_FLAG_NAME(BYPASS),
  101. QUEUE_FLAG_NAME(BIDI),
  102. QUEUE_FLAG_NAME(NOMERGES),
  103. QUEUE_FLAG_NAME(SAME_COMP),
  104. QUEUE_FLAG_NAME(FAIL_IO),
  105. QUEUE_FLAG_NAME(NONROT),
  106. QUEUE_FLAG_NAME(IO_STAT),
  107. QUEUE_FLAG_NAME(DISCARD),
  108. QUEUE_FLAG_NAME(NOXMERGES),
  109. QUEUE_FLAG_NAME(ADD_RANDOM),
  110. QUEUE_FLAG_NAME(SECERASE),
  111. QUEUE_FLAG_NAME(SAME_FORCE),
  112. QUEUE_FLAG_NAME(DEAD),
  113. QUEUE_FLAG_NAME(INIT_DONE),
  114. QUEUE_FLAG_NAME(NO_SG_MERGE),
  115. QUEUE_FLAG_NAME(POLL),
  116. QUEUE_FLAG_NAME(WC),
  117. QUEUE_FLAG_NAME(FUA),
  118. QUEUE_FLAG_NAME(FLUSH_NQ),
  119. QUEUE_FLAG_NAME(DAX),
  120. QUEUE_FLAG_NAME(STATS),
  121. QUEUE_FLAG_NAME(POLL_STATS),
  122. QUEUE_FLAG_NAME(REGISTERED),
  123. QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
  124. QUEUE_FLAG_NAME(QUIESCED),
  125. };
  126. #undef QUEUE_FLAG_NAME
  127. static int queue_state_show(void *data, struct seq_file *m)
  128. {
  129. struct request_queue *q = data;
  130. blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
  131. ARRAY_SIZE(blk_queue_flag_name));
  132. seq_puts(m, "\n");
  133. return 0;
  134. }
  135. static ssize_t queue_state_write(void *data, const char __user *buf,
  136. size_t count, loff_t *ppos)
  137. {
  138. struct request_queue *q = data;
  139. char opbuf[16] = { }, *op;
  140. /*
  141. * The "state" attribute is removed after blk_cleanup_queue() has called
  142. * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
  143. * triggering a use-after-free.
  144. */
  145. if (blk_queue_dead(q))
  146. return -ENOENT;
  147. if (count >= sizeof(opbuf)) {
  148. pr_err("%s: operation too long\n", __func__);
  149. goto inval;
  150. }
  151. if (copy_from_user(opbuf, buf, count))
  152. return -EFAULT;
  153. op = strstrip(opbuf);
  154. if (strcmp(op, "run") == 0) {
  155. blk_mq_run_hw_queues(q, true);
  156. } else if (strcmp(op, "start") == 0) {
  157. blk_mq_start_stopped_hw_queues(q, true);
  158. } else if (strcmp(op, "kick") == 0) {
  159. blk_mq_kick_requeue_list(q);
  160. } else {
  161. pr_err("%s: unsupported operation '%s'\n", __func__, op);
  162. inval:
  163. pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
  164. return -EINVAL;
  165. }
  166. return count;
  167. }
  168. static int queue_write_hint_show(void *data, struct seq_file *m)
  169. {
  170. struct request_queue *q = data;
  171. int i;
  172. for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
  173. seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
  174. return 0;
  175. }
  176. static ssize_t queue_write_hint_store(void *data, const char __user *buf,
  177. size_t count, loff_t *ppos)
  178. {
  179. struct request_queue *q = data;
  180. int i;
  181. for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
  182. q->write_hints[i] = 0;
  183. return count;
  184. }
  185. static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
  186. { "poll_stat", 0400, queue_poll_stat_show },
  187. { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
  188. { "pm_only", 0600, queue_pm_only_show, NULL },
  189. { "state", 0600, queue_state_show, queue_state_write },
  190. { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
  191. { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
  192. { },
  193. };
  194. #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
  195. static const char *const hctx_state_name[] = {
  196. HCTX_STATE_NAME(STOPPED),
  197. HCTX_STATE_NAME(TAG_ACTIVE),
  198. HCTX_STATE_NAME(SCHED_RESTART),
  199. };
  200. #undef HCTX_STATE_NAME
  201. static int hctx_state_show(void *data, struct seq_file *m)
  202. {
  203. struct blk_mq_hw_ctx *hctx = data;
  204. blk_flags_show(m, hctx->state, hctx_state_name,
  205. ARRAY_SIZE(hctx_state_name));
  206. seq_puts(m, "\n");
  207. return 0;
  208. }
  209. #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
  210. static const char *const alloc_policy_name[] = {
  211. BLK_TAG_ALLOC_NAME(FIFO),
  212. BLK_TAG_ALLOC_NAME(RR),
  213. };
  214. #undef BLK_TAG_ALLOC_NAME
  215. #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
  216. static const char *const hctx_flag_name[] = {
  217. HCTX_FLAG_NAME(SHOULD_MERGE),
  218. HCTX_FLAG_NAME(TAG_SHARED),
  219. HCTX_FLAG_NAME(SG_MERGE),
  220. HCTX_FLAG_NAME(BLOCKING),
  221. HCTX_FLAG_NAME(NO_SCHED),
  222. };
  223. #undef HCTX_FLAG_NAME
  224. static int hctx_flags_show(void *data, struct seq_file *m)
  225. {
  226. struct blk_mq_hw_ctx *hctx = data;
  227. const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
  228. seq_puts(m, "alloc_policy=");
  229. if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
  230. alloc_policy_name[alloc_policy])
  231. seq_puts(m, alloc_policy_name[alloc_policy]);
  232. else
  233. seq_printf(m, "%d", alloc_policy);
  234. seq_puts(m, " ");
  235. blk_flags_show(m,
  236. hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
  237. hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
  238. seq_puts(m, "\n");
  239. return 0;
  240. }
  241. #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
  242. static const char *const op_name[] = {
  243. REQ_OP_NAME(READ),
  244. REQ_OP_NAME(WRITE),
  245. REQ_OP_NAME(FLUSH),
  246. REQ_OP_NAME(DISCARD),
  247. REQ_OP_NAME(ZONE_REPORT),
  248. REQ_OP_NAME(SECURE_ERASE),
  249. REQ_OP_NAME(ZONE_RESET),
  250. REQ_OP_NAME(WRITE_SAME),
  251. REQ_OP_NAME(WRITE_ZEROES),
  252. REQ_OP_NAME(SCSI_IN),
  253. REQ_OP_NAME(SCSI_OUT),
  254. REQ_OP_NAME(DRV_IN),
  255. REQ_OP_NAME(DRV_OUT),
  256. };
  257. #undef REQ_OP_NAME
  258. #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
  259. static const char *const cmd_flag_name[] = {
  260. CMD_FLAG_NAME(FAILFAST_DEV),
  261. CMD_FLAG_NAME(FAILFAST_TRANSPORT),
  262. CMD_FLAG_NAME(FAILFAST_DRIVER),
  263. CMD_FLAG_NAME(SYNC),
  264. CMD_FLAG_NAME(META),
  265. CMD_FLAG_NAME(PRIO),
  266. CMD_FLAG_NAME(NOMERGE),
  267. CMD_FLAG_NAME(IDLE),
  268. CMD_FLAG_NAME(INTEGRITY),
  269. CMD_FLAG_NAME(FUA),
  270. CMD_FLAG_NAME(PREFLUSH),
  271. CMD_FLAG_NAME(RAHEAD),
  272. CMD_FLAG_NAME(BACKGROUND),
  273. CMD_FLAG_NAME(NOUNMAP),
  274. CMD_FLAG_NAME(NOWAIT),
  275. };
  276. #undef CMD_FLAG_NAME
  277. #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
  278. static const char *const rqf_name[] = {
  279. RQF_NAME(SORTED),
  280. RQF_NAME(STARTED),
  281. RQF_NAME(QUEUED),
  282. RQF_NAME(SOFTBARRIER),
  283. RQF_NAME(FLUSH_SEQ),
  284. RQF_NAME(MIXED_MERGE),
  285. RQF_NAME(MQ_INFLIGHT),
  286. RQF_NAME(DONTPREP),
  287. RQF_NAME(PREEMPT),
  288. RQF_NAME(COPY_USER),
  289. RQF_NAME(FAILED),
  290. RQF_NAME(QUIET),
  291. RQF_NAME(ELVPRIV),
  292. RQF_NAME(IO_STAT),
  293. RQF_NAME(ALLOCED),
  294. RQF_NAME(PM),
  295. RQF_NAME(HASHED),
  296. RQF_NAME(STATS),
  297. RQF_NAME(SPECIAL_PAYLOAD),
  298. RQF_NAME(ZONE_WRITE_LOCKED),
  299. RQF_NAME(MQ_POLL_SLEPT),
  300. };
  301. #undef RQF_NAME
  302. static const char *const blk_mq_rq_state_name_array[] = {
  303. [MQ_RQ_IDLE] = "idle",
  304. [MQ_RQ_IN_FLIGHT] = "in_flight",
  305. [MQ_RQ_COMPLETE] = "complete",
  306. };
  307. static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
  308. {
  309. if (WARN_ON_ONCE((unsigned int)rq_state >=
  310. ARRAY_SIZE(blk_mq_rq_state_name_array)))
  311. return "(?)";
  312. return blk_mq_rq_state_name_array[rq_state];
  313. }
  314. int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
  315. {
  316. const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
  317. const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
  318. seq_printf(m, "%p {.op=", rq);
  319. if (op < ARRAY_SIZE(op_name) && op_name[op])
  320. seq_printf(m, "%s", op_name[op]);
  321. else
  322. seq_printf(m, "%d", op);
  323. seq_puts(m, ", .cmd_flags=");
  324. blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
  325. ARRAY_SIZE(cmd_flag_name));
  326. seq_puts(m, ", .rq_flags=");
  327. blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
  328. ARRAY_SIZE(rqf_name));
  329. seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
  330. seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
  331. rq->internal_tag);
  332. if (mq_ops->show_rq)
  333. mq_ops->show_rq(m, rq);
  334. seq_puts(m, "}\n");
  335. return 0;
  336. }
  337. EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
  338. int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
  339. {
  340. return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
  341. }
  342. EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
  343. static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
  344. __acquires(&hctx->lock)
  345. {
  346. struct blk_mq_hw_ctx *hctx = m->private;
  347. spin_lock(&hctx->lock);
  348. return seq_list_start(&hctx->dispatch, *pos);
  349. }
  350. static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
  351. {
  352. struct blk_mq_hw_ctx *hctx = m->private;
  353. return seq_list_next(v, &hctx->dispatch, pos);
  354. }
  355. static void hctx_dispatch_stop(struct seq_file *m, void *v)
  356. __releases(&hctx->lock)
  357. {
  358. struct blk_mq_hw_ctx *hctx = m->private;
  359. spin_unlock(&hctx->lock);
  360. }
  361. static const struct seq_operations hctx_dispatch_seq_ops = {
  362. .start = hctx_dispatch_start,
  363. .next = hctx_dispatch_next,
  364. .stop = hctx_dispatch_stop,
  365. .show = blk_mq_debugfs_rq_show,
  366. };
  367. struct show_busy_params {
  368. struct seq_file *m;
  369. struct blk_mq_hw_ctx *hctx;
  370. };
  371. /*
  372. * Note: the state of a request may change while this function is in progress,
  373. * e.g. due to a concurrent blk_mq_finish_request() call.
  374. */
  375. static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
  376. {
  377. const struct show_busy_params *params = data;
  378. if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
  379. blk_mq_rq_state(rq) != MQ_RQ_IDLE)
  380. __blk_mq_debugfs_rq_show(params->m,
  381. list_entry_rq(&rq->queuelist));
  382. }
  383. static int hctx_busy_show(void *data, struct seq_file *m)
  384. {
  385. struct blk_mq_hw_ctx *hctx = data;
  386. struct show_busy_params params = { .m = m, .hctx = hctx };
  387. blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
  388. &params);
  389. return 0;
  390. }
  391. static int hctx_ctx_map_show(void *data, struct seq_file *m)
  392. {
  393. struct blk_mq_hw_ctx *hctx = data;
  394. sbitmap_bitmap_show(&hctx->ctx_map, m);
  395. return 0;
  396. }
  397. static void blk_mq_debugfs_tags_show(struct seq_file *m,
  398. struct blk_mq_tags *tags)
  399. {
  400. seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
  401. seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
  402. seq_printf(m, "active_queues=%d\n",
  403. atomic_read(&tags->active_queues));
  404. seq_puts(m, "\nbitmap_tags:\n");
  405. sbitmap_queue_show(&tags->bitmap_tags, m);
  406. if (tags->nr_reserved_tags) {
  407. seq_puts(m, "\nbreserved_tags:\n");
  408. sbitmap_queue_show(&tags->breserved_tags, m);
  409. }
  410. }
  411. static int hctx_tags_show(void *data, struct seq_file *m)
  412. {
  413. struct blk_mq_hw_ctx *hctx = data;
  414. struct request_queue *q = hctx->queue;
  415. int res;
  416. res = mutex_lock_interruptible(&q->sysfs_lock);
  417. if (res)
  418. goto out;
  419. if (hctx->tags)
  420. blk_mq_debugfs_tags_show(m, hctx->tags);
  421. mutex_unlock(&q->sysfs_lock);
  422. out:
  423. return res;
  424. }
  425. static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
  426. {
  427. struct blk_mq_hw_ctx *hctx = data;
  428. struct request_queue *q = hctx->queue;
  429. int res;
  430. res = mutex_lock_interruptible(&q->sysfs_lock);
  431. if (res)
  432. goto out;
  433. if (hctx->tags)
  434. sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
  435. mutex_unlock(&q->sysfs_lock);
  436. out:
  437. return res;
  438. }
  439. static int hctx_sched_tags_show(void *data, struct seq_file *m)
  440. {
  441. struct blk_mq_hw_ctx *hctx = data;
  442. struct request_queue *q = hctx->queue;
  443. int res;
  444. res = mutex_lock_interruptible(&q->sysfs_lock);
  445. if (res)
  446. goto out;
  447. if (hctx->sched_tags)
  448. blk_mq_debugfs_tags_show(m, hctx->sched_tags);
  449. mutex_unlock(&q->sysfs_lock);
  450. out:
  451. return res;
  452. }
  453. static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
  454. {
  455. struct blk_mq_hw_ctx *hctx = data;
  456. struct request_queue *q = hctx->queue;
  457. int res;
  458. res = mutex_lock_interruptible(&q->sysfs_lock);
  459. if (res)
  460. goto out;
  461. if (hctx->sched_tags)
  462. sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
  463. mutex_unlock(&q->sysfs_lock);
  464. out:
  465. return res;
  466. }
  467. static int hctx_io_poll_show(void *data, struct seq_file *m)
  468. {
  469. struct blk_mq_hw_ctx *hctx = data;
  470. seq_printf(m, "considered=%lu\n", hctx->poll_considered);
  471. seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
  472. seq_printf(m, "success=%lu\n", hctx->poll_success);
  473. return 0;
  474. }
  475. static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
  476. size_t count, loff_t *ppos)
  477. {
  478. struct blk_mq_hw_ctx *hctx = data;
  479. hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
  480. return count;
  481. }
  482. static int hctx_dispatched_show(void *data, struct seq_file *m)
  483. {
  484. struct blk_mq_hw_ctx *hctx = data;
  485. int i;
  486. seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
  487. for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
  488. unsigned int d = 1U << (i - 1);
  489. seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
  490. }
  491. seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
  492. return 0;
  493. }
  494. static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
  495. size_t count, loff_t *ppos)
  496. {
  497. struct blk_mq_hw_ctx *hctx = data;
  498. int i;
  499. for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
  500. hctx->dispatched[i] = 0;
  501. return count;
  502. }
  503. static int hctx_queued_show(void *data, struct seq_file *m)
  504. {
  505. struct blk_mq_hw_ctx *hctx = data;
  506. seq_printf(m, "%lu\n", hctx->queued);
  507. return 0;
  508. }
  509. static ssize_t hctx_queued_write(void *data, const char __user *buf,
  510. size_t count, loff_t *ppos)
  511. {
  512. struct blk_mq_hw_ctx *hctx = data;
  513. hctx->queued = 0;
  514. return count;
  515. }
  516. static int hctx_run_show(void *data, struct seq_file *m)
  517. {
  518. struct blk_mq_hw_ctx *hctx = data;
  519. seq_printf(m, "%lu\n", hctx->run);
  520. return 0;
  521. }
  522. static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
  523. loff_t *ppos)
  524. {
  525. struct blk_mq_hw_ctx *hctx = data;
  526. hctx->run = 0;
  527. return count;
  528. }
  529. static int hctx_active_show(void *data, struct seq_file *m)
  530. {
  531. struct blk_mq_hw_ctx *hctx = data;
  532. seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
  533. return 0;
  534. }
  535. static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
  536. {
  537. struct blk_mq_hw_ctx *hctx = data;
  538. seq_printf(m, "%u\n", hctx->dispatch_busy);
  539. return 0;
  540. }
  541. static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
  542. __acquires(&ctx->lock)
  543. {
  544. struct blk_mq_ctx *ctx = m->private;
  545. spin_lock(&ctx->lock);
  546. return seq_list_start(&ctx->rq_list, *pos);
  547. }
  548. static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
  549. {
  550. struct blk_mq_ctx *ctx = m->private;
  551. return seq_list_next(v, &ctx->rq_list, pos);
  552. }
  553. static void ctx_rq_list_stop(struct seq_file *m, void *v)
  554. __releases(&ctx->lock)
  555. {
  556. struct blk_mq_ctx *ctx = m->private;
  557. spin_unlock(&ctx->lock);
  558. }
  559. static const struct seq_operations ctx_rq_list_seq_ops = {
  560. .start = ctx_rq_list_start,
  561. .next = ctx_rq_list_next,
  562. .stop = ctx_rq_list_stop,
  563. .show = blk_mq_debugfs_rq_show,
  564. };
  565. static int ctx_dispatched_show(void *data, struct seq_file *m)
  566. {
  567. struct blk_mq_ctx *ctx = data;
  568. seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
  569. return 0;
  570. }
  571. static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
  572. size_t count, loff_t *ppos)
  573. {
  574. struct blk_mq_ctx *ctx = data;
  575. ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
  576. return count;
  577. }
  578. static int ctx_merged_show(void *data, struct seq_file *m)
  579. {
  580. struct blk_mq_ctx *ctx = data;
  581. seq_printf(m, "%lu\n", ctx->rq_merged);
  582. return 0;
  583. }
  584. static ssize_t ctx_merged_write(void *data, const char __user *buf,
  585. size_t count, loff_t *ppos)
  586. {
  587. struct blk_mq_ctx *ctx = data;
  588. ctx->rq_merged = 0;
  589. return count;
  590. }
  591. static int ctx_completed_show(void *data, struct seq_file *m)
  592. {
  593. struct blk_mq_ctx *ctx = data;
  594. seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
  595. return 0;
  596. }
  597. static ssize_t ctx_completed_write(void *data, const char __user *buf,
  598. size_t count, loff_t *ppos)
  599. {
  600. struct blk_mq_ctx *ctx = data;
  601. ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
  602. return count;
  603. }
  604. static int blk_mq_debugfs_show(struct seq_file *m, void *v)
  605. {
  606. const struct blk_mq_debugfs_attr *attr = m->private;
  607. void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
  608. return attr->show(data, m);
  609. }
  610. static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
  611. size_t count, loff_t *ppos)
  612. {
  613. struct seq_file *m = file->private_data;
  614. const struct blk_mq_debugfs_attr *attr = m->private;
  615. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  616. /*
  617. * Attributes that only implement .seq_ops are read-only and 'attr' is
  618. * the same with 'data' in this case.
  619. */
  620. if (attr == data || !attr->write)
  621. return -EPERM;
  622. return attr->write(data, buf, count, ppos);
  623. }
  624. static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
  625. {
  626. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  627. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  628. struct seq_file *m;
  629. int ret;
  630. if (attr->seq_ops) {
  631. ret = seq_open(file, attr->seq_ops);
  632. if (!ret) {
  633. m = file->private_data;
  634. m->private = data;
  635. }
  636. return ret;
  637. }
  638. if (WARN_ON_ONCE(!attr->show))
  639. return -EPERM;
  640. return single_open(file, blk_mq_debugfs_show, inode->i_private);
  641. }
  642. static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
  643. {
  644. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  645. if (attr->show)
  646. return single_release(inode, file);
  647. else
  648. return seq_release(inode, file);
  649. }
  650. static const struct file_operations blk_mq_debugfs_fops = {
  651. .open = blk_mq_debugfs_open,
  652. .read = seq_read,
  653. .write = blk_mq_debugfs_write,
  654. .llseek = seq_lseek,
  655. .release = blk_mq_debugfs_release,
  656. };
  657. static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
  658. {"state", 0400, hctx_state_show},
  659. {"flags", 0400, hctx_flags_show},
  660. {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
  661. {"busy", 0400, hctx_busy_show},
  662. {"ctx_map", 0400, hctx_ctx_map_show},
  663. {"tags", 0400, hctx_tags_show},
  664. {"tags_bitmap", 0400, hctx_tags_bitmap_show},
  665. {"sched_tags", 0400, hctx_sched_tags_show},
  666. {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
  667. {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
  668. {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
  669. {"queued", 0600, hctx_queued_show, hctx_queued_write},
  670. {"run", 0600, hctx_run_show, hctx_run_write},
  671. {"active", 0400, hctx_active_show},
  672. {"dispatch_busy", 0400, hctx_dispatch_busy_show},
  673. {},
  674. };
  675. static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
  676. {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops},
  677. {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
  678. {"merged", 0600, ctx_merged_show, ctx_merged_write},
  679. {"completed", 0600, ctx_completed_show, ctx_completed_write},
  680. {},
  681. };
  682. static bool debugfs_create_files(struct dentry *parent, void *data,
  683. const struct blk_mq_debugfs_attr *attr)
  684. {
  685. d_inode(parent)->i_private = data;
  686. for (; attr->name; attr++) {
  687. if (!debugfs_create_file(attr->name, attr->mode, parent,
  688. (void *)attr, &blk_mq_debugfs_fops))
  689. return false;
  690. }
  691. return true;
  692. }
  693. int blk_mq_debugfs_register(struct request_queue *q)
  694. {
  695. struct blk_mq_hw_ctx *hctx;
  696. int i;
  697. if (!blk_debugfs_root)
  698. return -ENOENT;
  699. q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
  700. blk_debugfs_root);
  701. if (!q->debugfs_dir)
  702. return -ENOMEM;
  703. if (!debugfs_create_files(q->debugfs_dir, q,
  704. blk_mq_debugfs_queue_attrs))
  705. goto err;
  706. /*
  707. * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
  708. * didn't exist yet (because we don't know what to name the directory
  709. * until the queue is registered to a gendisk).
  710. */
  711. if (q->elevator && !q->sched_debugfs_dir)
  712. blk_mq_debugfs_register_sched(q);
  713. /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
  714. queue_for_each_hw_ctx(q, hctx, i) {
  715. if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
  716. goto err;
  717. if (q->elevator && !hctx->sched_debugfs_dir &&
  718. blk_mq_debugfs_register_sched_hctx(q, hctx))
  719. goto err;
  720. }
  721. return 0;
  722. err:
  723. blk_mq_debugfs_unregister(q);
  724. return -ENOMEM;
  725. }
  726. void blk_mq_debugfs_unregister(struct request_queue *q)
  727. {
  728. debugfs_remove_recursive(q->debugfs_dir);
  729. q->sched_debugfs_dir = NULL;
  730. q->debugfs_dir = NULL;
  731. }
  732. static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
  733. struct blk_mq_ctx *ctx)
  734. {
  735. struct dentry *ctx_dir;
  736. char name[20];
  737. snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
  738. ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
  739. if (!ctx_dir)
  740. return -ENOMEM;
  741. if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
  742. return -ENOMEM;
  743. return 0;
  744. }
  745. int blk_mq_debugfs_register_hctx(struct request_queue *q,
  746. struct blk_mq_hw_ctx *hctx)
  747. {
  748. struct blk_mq_ctx *ctx;
  749. char name[20];
  750. int i;
  751. if (!q->debugfs_dir)
  752. return -ENOENT;
  753. snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
  754. hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
  755. if (!hctx->debugfs_dir)
  756. return -ENOMEM;
  757. if (!debugfs_create_files(hctx->debugfs_dir, hctx,
  758. blk_mq_debugfs_hctx_attrs))
  759. goto err;
  760. hctx_for_each_ctx(hctx, ctx, i) {
  761. if (blk_mq_debugfs_register_ctx(hctx, ctx))
  762. goto err;
  763. }
  764. return 0;
  765. err:
  766. blk_mq_debugfs_unregister_hctx(hctx);
  767. return -ENOMEM;
  768. }
  769. void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  770. {
  771. debugfs_remove_recursive(hctx->debugfs_dir);
  772. hctx->sched_debugfs_dir = NULL;
  773. hctx->debugfs_dir = NULL;
  774. }
  775. int blk_mq_debugfs_register_hctxs(struct request_queue *q)
  776. {
  777. struct blk_mq_hw_ctx *hctx;
  778. int i;
  779. queue_for_each_hw_ctx(q, hctx, i) {
  780. if (blk_mq_debugfs_register_hctx(q, hctx))
  781. return -ENOMEM;
  782. }
  783. return 0;
  784. }
  785. void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
  786. {
  787. struct blk_mq_hw_ctx *hctx;
  788. int i;
  789. queue_for_each_hw_ctx(q, hctx, i)
  790. blk_mq_debugfs_unregister_hctx(hctx);
  791. }
  792. int blk_mq_debugfs_register_sched(struct request_queue *q)
  793. {
  794. struct elevator_type *e = q->elevator->type;
  795. if (!q->debugfs_dir)
  796. return -ENOENT;
  797. if (!e->queue_debugfs_attrs)
  798. return 0;
  799. q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
  800. if (!q->sched_debugfs_dir)
  801. return -ENOMEM;
  802. if (!debugfs_create_files(q->sched_debugfs_dir, q,
  803. e->queue_debugfs_attrs))
  804. goto err;
  805. return 0;
  806. err:
  807. blk_mq_debugfs_unregister_sched(q);
  808. return -ENOMEM;
  809. }
  810. void blk_mq_debugfs_unregister_sched(struct request_queue *q)
  811. {
  812. debugfs_remove_recursive(q->sched_debugfs_dir);
  813. q->sched_debugfs_dir = NULL;
  814. }
  815. int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
  816. struct blk_mq_hw_ctx *hctx)
  817. {
  818. struct elevator_type *e = q->elevator->type;
  819. if (!hctx->debugfs_dir)
  820. return -ENOENT;
  821. if (!e->hctx_debugfs_attrs)
  822. return 0;
  823. hctx->sched_debugfs_dir = debugfs_create_dir("sched",
  824. hctx->debugfs_dir);
  825. if (!hctx->sched_debugfs_dir)
  826. return -ENOMEM;
  827. if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
  828. e->hctx_debugfs_attrs))
  829. return -ENOMEM;
  830. return 0;
  831. }
  832. void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
  833. {
  834. debugfs_remove_recursive(hctx->sched_debugfs_dir);
  835. hctx->sched_debugfs_dir = NULL;
  836. }