blk-mq-sysfs.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. #include <linux/kernel.h>
  2. #include <linux/module.h>
  3. #include <linux/backing-dev.h>
  4. #include <linux/bio.h>
  5. #include <linux/blkdev.h>
  6. #include <linux/mm.h>
  7. #include <linux/init.h>
  8. #include <linux/slab.h>
  9. #include <linux/workqueue.h>
  10. #include <linux/smp.h>
  11. #include <linux/blk-mq.h>
  12. #include "blk.h"
  13. #include "blk-mq.h"
  14. #include "blk-mq-tag.h"
  15. static void blk_mq_sysfs_release(struct kobject *kobj)
  16. {
  17. }
  18. static void blk_mq_hw_sysfs_release(struct kobject *kobj)
  19. {
  20. struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
  21. kobj);
  22. if (hctx->flags & BLK_MQ_F_BLOCKING)
  23. cleanup_srcu_struct(hctx->srcu);
  24. blk_free_flush_queue(hctx->fq);
  25. sbitmap_free(&hctx->ctx_map);
  26. free_cpumask_var(hctx->cpumask);
  27. kfree(hctx->ctxs);
  28. kfree(hctx);
  29. }
  30. struct blk_mq_ctx_sysfs_entry {
  31. struct attribute attr;
  32. ssize_t (*show)(struct blk_mq_ctx *, char *);
  33. ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
  34. };
  35. struct blk_mq_hw_ctx_sysfs_entry {
  36. struct attribute attr;
  37. ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  38. ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
  39. };
  40. static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
  41. char *page)
  42. {
  43. struct blk_mq_ctx_sysfs_entry *entry;
  44. struct blk_mq_ctx *ctx;
  45. struct request_queue *q;
  46. ssize_t res;
  47. entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  48. ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  49. q = ctx->queue;
  50. if (!entry->show)
  51. return -EIO;
  52. res = -ENOENT;
  53. mutex_lock(&q->sysfs_lock);
  54. if (!blk_queue_dying(q))
  55. res = entry->show(ctx, page);
  56. mutex_unlock(&q->sysfs_lock);
  57. return res;
  58. }
  59. static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
  60. const char *page, size_t length)
  61. {
  62. struct blk_mq_ctx_sysfs_entry *entry;
  63. struct blk_mq_ctx *ctx;
  64. struct request_queue *q;
  65. ssize_t res;
  66. entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  67. ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  68. q = ctx->queue;
  69. if (!entry->store)
  70. return -EIO;
  71. res = -ENOENT;
  72. mutex_lock(&q->sysfs_lock);
  73. if (!blk_queue_dying(q))
  74. res = entry->store(ctx, page, length);
  75. mutex_unlock(&q->sysfs_lock);
  76. return res;
  77. }
  78. static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
  79. struct attribute *attr, char *page)
  80. {
  81. struct blk_mq_hw_ctx_sysfs_entry *entry;
  82. struct blk_mq_hw_ctx *hctx;
  83. struct request_queue *q;
  84. ssize_t res;
  85. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  86. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  87. q = hctx->queue;
  88. if (!entry->show)
  89. return -EIO;
  90. res = -ENOENT;
  91. mutex_lock(&q->sysfs_lock);
  92. if (!blk_queue_dying(q))
  93. res = entry->show(hctx, page);
  94. mutex_unlock(&q->sysfs_lock);
  95. return res;
  96. }
  97. static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
  98. struct attribute *attr, const char *page,
  99. size_t length)
  100. {
  101. struct blk_mq_hw_ctx_sysfs_entry *entry;
  102. struct blk_mq_hw_ctx *hctx;
  103. struct request_queue *q;
  104. ssize_t res;
  105. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  106. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  107. q = hctx->queue;
  108. if (!entry->store)
  109. return -EIO;
  110. res = -ENOENT;
  111. mutex_lock(&q->sysfs_lock);
  112. if (!blk_queue_dying(q))
  113. res = entry->store(hctx, page, length);
  114. mutex_unlock(&q->sysfs_lock);
  115. return res;
  116. }
  117. static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
  118. char *page)
  119. {
  120. return sprintf(page, "%u\n", hctx->tags->nr_tags);
  121. }
  122. static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
  123. char *page)
  124. {
  125. return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
  126. }
  127. static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
  128. {
  129. const size_t size = PAGE_SIZE - 1;
  130. unsigned int i, first = 1;
  131. int ret = 0, pos = 0;
  132. for_each_cpu(i, hctx->cpumask) {
  133. if (first)
  134. ret = snprintf(pos + page, size - pos, "%u", i);
  135. else
  136. ret = snprintf(pos + page, size - pos, ", %u", i);
  137. if (ret >= size - pos)
  138. break;
  139. first = 0;
  140. pos += ret;
  141. }
  142. ret = snprintf(pos + page, size + 1 - pos, "\n");
  143. return pos + ret;
  144. }
  145. static struct attribute *default_ctx_attrs[] = {
  146. NULL,
  147. };
  148. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
  149. .attr = {.name = "nr_tags", .mode = 0444 },
  150. .show = blk_mq_hw_sysfs_nr_tags_show,
  151. };
  152. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
  153. .attr = {.name = "nr_reserved_tags", .mode = 0444 },
  154. .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
  155. };
  156. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
  157. .attr = {.name = "cpu_list", .mode = 0444 },
  158. .show = blk_mq_hw_sysfs_cpus_show,
  159. };
  160. static struct attribute *default_hw_ctx_attrs[] = {
  161. &blk_mq_hw_sysfs_nr_tags.attr,
  162. &blk_mq_hw_sysfs_nr_reserved_tags.attr,
  163. &blk_mq_hw_sysfs_cpus.attr,
  164. NULL,
  165. };
  166. static const struct sysfs_ops blk_mq_sysfs_ops = {
  167. .show = blk_mq_sysfs_show,
  168. .store = blk_mq_sysfs_store,
  169. };
  170. static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
  171. .show = blk_mq_hw_sysfs_show,
  172. .store = blk_mq_hw_sysfs_store,
  173. };
  174. static struct kobj_type blk_mq_ktype = {
  175. .sysfs_ops = &blk_mq_sysfs_ops,
  176. .release = blk_mq_sysfs_release,
  177. };
  178. static struct kobj_type blk_mq_ctx_ktype = {
  179. .sysfs_ops = &blk_mq_sysfs_ops,
  180. .default_attrs = default_ctx_attrs,
  181. .release = blk_mq_sysfs_release,
  182. };
  183. static struct kobj_type blk_mq_hw_ktype = {
  184. .sysfs_ops = &blk_mq_hw_sysfs_ops,
  185. .default_attrs = default_hw_ctx_attrs,
  186. .release = blk_mq_hw_sysfs_release,
  187. };
  188. static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  189. {
  190. struct blk_mq_ctx *ctx;
  191. int i;
  192. if (!hctx->nr_ctx)
  193. return;
  194. hctx_for_each_ctx(hctx, ctx, i)
  195. kobject_del(&ctx->kobj);
  196. kobject_del(&hctx->kobj);
  197. }
  198. static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
  199. {
  200. struct request_queue *q = hctx->queue;
  201. struct blk_mq_ctx *ctx;
  202. int i, ret;
  203. if (!hctx->nr_ctx)
  204. return 0;
  205. ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
  206. if (ret)
  207. return ret;
  208. hctx_for_each_ctx(hctx, ctx, i) {
  209. ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
  210. if (ret)
  211. break;
  212. }
  213. return ret;
  214. }
  215. void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
  216. {
  217. struct blk_mq_hw_ctx *hctx;
  218. int i;
  219. lockdep_assert_held(&q->sysfs_lock);
  220. queue_for_each_hw_ctx(q, hctx, i)
  221. blk_mq_unregister_hctx(hctx);
  222. kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
  223. kobject_del(&q->mq_kobj);
  224. kobject_put(&dev->kobj);
  225. q->mq_sysfs_init_done = false;
  226. }
  227. void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
  228. {
  229. kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
  230. }
  231. void blk_mq_sysfs_deinit(struct request_queue *q)
  232. {
  233. struct blk_mq_ctx *ctx;
  234. int cpu;
  235. for_each_possible_cpu(cpu) {
  236. ctx = per_cpu_ptr(q->queue_ctx, cpu);
  237. kobject_put(&ctx->kobj);
  238. }
  239. kobject_put(&q->mq_kobj);
  240. }
  241. void blk_mq_sysfs_init(struct request_queue *q)
  242. {
  243. struct blk_mq_ctx *ctx;
  244. int cpu;
  245. kobject_init(&q->mq_kobj, &blk_mq_ktype);
  246. for_each_possible_cpu(cpu) {
  247. ctx = per_cpu_ptr(q->queue_ctx, cpu);
  248. kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
  249. }
  250. }
  251. int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
  252. {
  253. struct blk_mq_hw_ctx *hctx;
  254. int ret, i;
  255. WARN_ON_ONCE(!q->kobj.parent);
  256. lockdep_assert_held(&q->sysfs_lock);
  257. ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
  258. if (ret < 0)
  259. goto out;
  260. kobject_uevent(&q->mq_kobj, KOBJ_ADD);
  261. queue_for_each_hw_ctx(q, hctx, i) {
  262. ret = blk_mq_register_hctx(hctx);
  263. if (ret)
  264. goto unreg;
  265. }
  266. q->mq_sysfs_init_done = true;
  267. out:
  268. return ret;
  269. unreg:
  270. while (--i >= 0)
  271. blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
  272. kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
  273. kobject_del(&q->mq_kobj);
  274. kobject_put(&dev->kobj);
  275. return ret;
  276. }
  277. int blk_mq_register_dev(struct device *dev, struct request_queue *q)
  278. {
  279. int ret;
  280. mutex_lock(&q->sysfs_lock);
  281. ret = __blk_mq_register_dev(dev, q);
  282. mutex_unlock(&q->sysfs_lock);
  283. return ret;
  284. }
  285. EXPORT_SYMBOL_GPL(blk_mq_register_dev);
  286. void blk_mq_sysfs_unregister(struct request_queue *q)
  287. {
  288. struct blk_mq_hw_ctx *hctx;
  289. int i;
  290. mutex_lock(&q->sysfs_lock);
  291. if (!q->mq_sysfs_init_done)
  292. goto unlock;
  293. queue_for_each_hw_ctx(q, hctx, i)
  294. blk_mq_unregister_hctx(hctx);
  295. unlock:
  296. mutex_unlock(&q->sysfs_lock);
  297. }
  298. int blk_mq_sysfs_register(struct request_queue *q)
  299. {
  300. struct blk_mq_hw_ctx *hctx;
  301. int i, ret = 0;
  302. mutex_lock(&q->sysfs_lock);
  303. if (!q->mq_sysfs_init_done)
  304. goto unlock;
  305. queue_for_each_hw_ctx(q, hctx, i) {
  306. ret = blk_mq_register_hctx(hctx);
  307. if (ret)
  308. break;
  309. }
  310. unlock:
  311. mutex_unlock(&q->sysfs_lock);
  312. return ret;
  313. }