blk-mq-sysfs.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. #include <linux/kernel.h>
  2. #include <linux/module.h>
  3. #include <linux/backing-dev.h>
  4. #include <linux/bio.h>
  5. #include <linux/blkdev.h>
  6. #include <linux/mm.h>
  7. #include <linux/init.h>
  8. #include <linux/slab.h>
  9. #include <linux/workqueue.h>
  10. #include <linux/smp.h>
  11. #include <linux/blk-mq.h>
  12. #include "blk-mq.h"
  13. #include "blk-mq-tag.h"
  14. static void blk_mq_sysfs_release(struct kobject *kobj)
  15. {
  16. }
  17. struct blk_mq_ctx_sysfs_entry {
  18. struct attribute attr;
  19. ssize_t (*show)(struct blk_mq_ctx *, char *);
  20. ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
  21. };
  22. struct blk_mq_hw_ctx_sysfs_entry {
  23. struct attribute attr;
  24. ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  25. ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
  26. };
  27. static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
  28. char *page)
  29. {
  30. struct blk_mq_ctx_sysfs_entry *entry;
  31. struct blk_mq_ctx *ctx;
  32. struct request_queue *q;
  33. ssize_t res;
  34. entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  35. ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  36. q = ctx->queue;
  37. if (!entry->show)
  38. return -EIO;
  39. res = -ENOENT;
  40. mutex_lock(&q->sysfs_lock);
  41. if (!blk_queue_dying(q))
  42. res = entry->show(ctx, page);
  43. mutex_unlock(&q->sysfs_lock);
  44. return res;
  45. }
  46. static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
  47. const char *page, size_t length)
  48. {
  49. struct blk_mq_ctx_sysfs_entry *entry;
  50. struct blk_mq_ctx *ctx;
  51. struct request_queue *q;
  52. ssize_t res;
  53. entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  54. ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  55. q = ctx->queue;
  56. if (!entry->store)
  57. return -EIO;
  58. res = -ENOENT;
  59. mutex_lock(&q->sysfs_lock);
  60. if (!blk_queue_dying(q))
  61. res = entry->store(ctx, page, length);
  62. mutex_unlock(&q->sysfs_lock);
  63. return res;
  64. }
  65. static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
  66. struct attribute *attr, char *page)
  67. {
  68. struct blk_mq_hw_ctx_sysfs_entry *entry;
  69. struct blk_mq_hw_ctx *hctx;
  70. struct request_queue *q;
  71. ssize_t res;
  72. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  73. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  74. q = hctx->queue;
  75. if (!entry->show)
  76. return -EIO;
  77. res = -ENOENT;
  78. mutex_lock(&q->sysfs_lock);
  79. if (!blk_queue_dying(q))
  80. res = entry->show(hctx, page);
  81. mutex_unlock(&q->sysfs_lock);
  82. return res;
  83. }
  84. static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
  85. struct attribute *attr, const char *page,
  86. size_t length)
  87. {
  88. struct blk_mq_hw_ctx_sysfs_entry *entry;
  89. struct blk_mq_hw_ctx *hctx;
  90. struct request_queue *q;
  91. ssize_t res;
  92. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  93. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  94. q = hctx->queue;
  95. if (!entry->store)
  96. return -EIO;
  97. res = -ENOENT;
  98. mutex_lock(&q->sysfs_lock);
  99. if (!blk_queue_dying(q))
  100. res = entry->store(hctx, page, length);
  101. mutex_unlock(&q->sysfs_lock);
  102. return res;
  103. }
  104. static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
  105. {
  106. return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
  107. ctx->rq_dispatched[0]);
  108. }
  109. static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
  110. {
  111. return sprintf(page, "%lu\n", ctx->rq_merged);
  112. }
  113. static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
  114. {
  115. return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
  116. ctx->rq_completed[0]);
  117. }
  118. static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
  119. {
  120. struct request *rq;
  121. int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
  122. list_for_each_entry(rq, list, queuelist) {
  123. const int rq_len = 2 * sizeof(rq) + 2;
  124. /* if the output will be truncated */
  125. if (PAGE_SIZE - 1 < len + rq_len) {
  126. /* backspacing if it can't hold '\t...\n' */
  127. if (PAGE_SIZE - 1 < len + 5)
  128. len -= rq_len;
  129. len += snprintf(page + len, PAGE_SIZE - 1 - len,
  130. "\t...\n");
  131. break;
  132. }
  133. len += snprintf(page + len, PAGE_SIZE - 1 - len,
  134. "\t%p\n", rq);
  135. }
  136. return len;
  137. }
  138. static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
  139. {
  140. ssize_t ret;
  141. spin_lock(&ctx->lock);
  142. ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
  143. spin_unlock(&ctx->lock);
  144. return ret;
  145. }
  146. static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
  147. {
  148. return sprintf(page, "considered=%lu, invoked=%lu, success=%lu\n",
  149. hctx->poll_considered, hctx->poll_invoked,
  150. hctx->poll_success);
  151. }
  152. static ssize_t blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx *hctx,
  153. const char *page, size_t size)
  154. {
  155. hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
  156. return size;
  157. }
  158. static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
  159. char *page)
  160. {
  161. return sprintf(page, "%lu\n", hctx->queued);
  162. }
  163. static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
  164. {
  165. return sprintf(page, "%lu\n", hctx->run);
  166. }
  167. static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
  168. char *page)
  169. {
  170. char *start_page = page;
  171. int i;
  172. page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
  173. for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
  174. unsigned int d = 1U << (i - 1);
  175. page += sprintf(page, "%8u\t%lu\n", d, hctx->dispatched[i]);
  176. }
  177. page += sprintf(page, "%8u+\t%lu\n", 1U << (i - 1),
  178. hctx->dispatched[i]);
  179. return page - start_page;
  180. }
  181. static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
  182. char *page)
  183. {
  184. ssize_t ret;
  185. spin_lock(&hctx->lock);
  186. ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
  187. spin_unlock(&hctx->lock);
  188. return ret;
  189. }
  190. static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
  191. {
  192. return blk_mq_tag_sysfs_show(hctx->tags, page);
  193. }
  194. static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
  195. {
  196. return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
  197. }
  198. static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
  199. {
  200. unsigned int i, first = 1;
  201. ssize_t ret = 0;
  202. for_each_cpu(i, hctx->cpumask) {
  203. if (first)
  204. ret += sprintf(ret + page, "%u", i);
  205. else
  206. ret += sprintf(ret + page, ", %u", i);
  207. first = 0;
  208. }
  209. ret += sprintf(ret + page, "\n");
  210. return ret;
  211. }
  212. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
  213. .attr = {.name = "dispatched", .mode = S_IRUGO },
  214. .show = blk_mq_sysfs_dispatched_show,
  215. };
  216. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
  217. .attr = {.name = "merged", .mode = S_IRUGO },
  218. .show = blk_mq_sysfs_merged_show,
  219. };
  220. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
  221. .attr = {.name = "completed", .mode = S_IRUGO },
  222. .show = blk_mq_sysfs_completed_show,
  223. };
  224. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
  225. .attr = {.name = "rq_list", .mode = S_IRUGO },
  226. .show = blk_mq_sysfs_rq_list_show,
  227. };
  228. static struct attribute *default_ctx_attrs[] = {
  229. &blk_mq_sysfs_dispatched.attr,
  230. &blk_mq_sysfs_merged.attr,
  231. &blk_mq_sysfs_completed.attr,
  232. &blk_mq_sysfs_rq_list.attr,
  233. NULL,
  234. };
  235. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
  236. .attr = {.name = "queued", .mode = S_IRUGO },
  237. .show = blk_mq_hw_sysfs_queued_show,
  238. };
  239. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
  240. .attr = {.name = "run", .mode = S_IRUGO },
  241. .show = blk_mq_hw_sysfs_run_show,
  242. };
  243. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
  244. .attr = {.name = "dispatched", .mode = S_IRUGO },
  245. .show = blk_mq_hw_sysfs_dispatched_show,
  246. };
  247. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
  248. .attr = {.name = "active", .mode = S_IRUGO },
  249. .show = blk_mq_hw_sysfs_active_show,
  250. };
  251. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
  252. .attr = {.name = "pending", .mode = S_IRUGO },
  253. .show = blk_mq_hw_sysfs_rq_list_show,
  254. };
  255. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
  256. .attr = {.name = "tags", .mode = S_IRUGO },
  257. .show = blk_mq_hw_sysfs_tags_show,
  258. };
  259. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
  260. .attr = {.name = "cpu_list", .mode = S_IRUGO },
  261. .show = blk_mq_hw_sysfs_cpus_show,
  262. };
  263. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
  264. .attr = {.name = "io_poll", .mode = S_IWUSR | S_IRUGO },
  265. .show = blk_mq_hw_sysfs_poll_show,
  266. .store = blk_mq_hw_sysfs_poll_store,
  267. };
  268. static struct attribute *default_hw_ctx_attrs[] = {
  269. &blk_mq_hw_sysfs_queued.attr,
  270. &blk_mq_hw_sysfs_run.attr,
  271. &blk_mq_hw_sysfs_dispatched.attr,
  272. &blk_mq_hw_sysfs_pending.attr,
  273. &blk_mq_hw_sysfs_tags.attr,
  274. &blk_mq_hw_sysfs_cpus.attr,
  275. &blk_mq_hw_sysfs_active.attr,
  276. &blk_mq_hw_sysfs_poll.attr,
  277. NULL,
  278. };
  279. static const struct sysfs_ops blk_mq_sysfs_ops = {
  280. .show = blk_mq_sysfs_show,
  281. .store = blk_mq_sysfs_store,
  282. };
  283. static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
  284. .show = blk_mq_hw_sysfs_show,
  285. .store = blk_mq_hw_sysfs_store,
  286. };
  287. static struct kobj_type blk_mq_ktype = {
  288. .sysfs_ops = &blk_mq_sysfs_ops,
  289. .release = blk_mq_sysfs_release,
  290. };
  291. static struct kobj_type blk_mq_ctx_ktype = {
  292. .sysfs_ops = &blk_mq_sysfs_ops,
  293. .default_attrs = default_ctx_attrs,
  294. .release = blk_mq_sysfs_release,
  295. };
  296. static struct kobj_type blk_mq_hw_ktype = {
  297. .sysfs_ops = &blk_mq_hw_sysfs_ops,
  298. .default_attrs = default_hw_ctx_attrs,
  299. .release = blk_mq_sysfs_release,
  300. };
  301. static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  302. {
  303. struct blk_mq_ctx *ctx;
  304. int i;
  305. if (!hctx->nr_ctx)
  306. return;
  307. hctx_for_each_ctx(hctx, ctx, i)
  308. kobject_del(&ctx->kobj);
  309. kobject_del(&hctx->kobj);
  310. }
  311. static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
  312. {
  313. struct request_queue *q = hctx->queue;
  314. struct blk_mq_ctx *ctx;
  315. int i, ret;
  316. if (!hctx->nr_ctx)
  317. return 0;
  318. ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
  319. if (ret)
  320. return ret;
  321. hctx_for_each_ctx(hctx, ctx, i) {
  322. ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
  323. if (ret)
  324. break;
  325. }
  326. return ret;
  327. }
  328. static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
  329. {
  330. struct blk_mq_hw_ctx *hctx;
  331. struct blk_mq_ctx *ctx;
  332. int i, j;
  333. queue_for_each_hw_ctx(q, hctx, i) {
  334. blk_mq_unregister_hctx(hctx);
  335. hctx_for_each_ctx(hctx, ctx, j)
  336. kobject_put(&ctx->kobj);
  337. kobject_put(&hctx->kobj);
  338. }
  339. kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
  340. kobject_del(&q->mq_kobj);
  341. kobject_put(&q->mq_kobj);
  342. kobject_put(&dev->kobj);
  343. q->mq_sysfs_init_done = false;
  344. }
  345. void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
  346. {
  347. blk_mq_disable_hotplug();
  348. __blk_mq_unregister_dev(dev, q);
  349. blk_mq_enable_hotplug();
  350. }
  351. void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
  352. {
  353. kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
  354. }
  355. void blk_mq_sysfs_init(struct request_queue *q)
  356. {
  357. struct blk_mq_ctx *ctx;
  358. int cpu;
  359. kobject_init(&q->mq_kobj, &blk_mq_ktype);
  360. for_each_possible_cpu(cpu) {
  361. ctx = per_cpu_ptr(q->queue_ctx, cpu);
  362. kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
  363. }
  364. }
  365. int blk_mq_register_dev(struct device *dev, struct request_queue *q)
  366. {
  367. struct blk_mq_hw_ctx *hctx;
  368. int ret, i;
  369. blk_mq_disable_hotplug();
  370. ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
  371. if (ret < 0)
  372. goto out;
  373. kobject_uevent(&q->mq_kobj, KOBJ_ADD);
  374. queue_for_each_hw_ctx(q, hctx, i) {
  375. ret = blk_mq_register_hctx(hctx);
  376. if (ret)
  377. break;
  378. }
  379. if (ret)
  380. __blk_mq_unregister_dev(dev, q);
  381. else
  382. q->mq_sysfs_init_done = true;
  383. out:
  384. blk_mq_enable_hotplug();
  385. return ret;
  386. }
  387. EXPORT_SYMBOL_GPL(blk_mq_register_dev);
  388. void blk_mq_sysfs_unregister(struct request_queue *q)
  389. {
  390. struct blk_mq_hw_ctx *hctx;
  391. int i;
  392. if (!q->mq_sysfs_init_done)
  393. return;
  394. queue_for_each_hw_ctx(q, hctx, i)
  395. blk_mq_unregister_hctx(hctx);
  396. }
  397. int blk_mq_sysfs_register(struct request_queue *q)
  398. {
  399. struct blk_mq_hw_ctx *hctx;
  400. int i, ret = 0;
  401. if (!q->mq_sysfs_init_done)
  402. return ret;
  403. queue_for_each_hw_ctx(q, hctx, i) {
  404. ret = blk_mq_register_hctx(hctx);
  405. if (ret)
  406. break;
  407. }
  408. return ret;
  409. }