blk-mq.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. #ifndef INT_BLK_MQ_H
  2. #define INT_BLK_MQ_H
  3. struct blk_mq_tag_set;
  4. struct blk_mq_ctx {
  5. struct {
  6. spinlock_t lock;
  7. struct list_head rq_list;
  8. } ____cacheline_aligned_in_smp;
  9. unsigned int cpu;
  10. unsigned int index_hw;
  11. /* incremented at dispatch time */
  12. unsigned long rq_dispatched[2];
  13. unsigned long rq_merged;
  14. /* incremented at completion time */
  15. unsigned long ____cacheline_aligned_in_smp rq_completed[2];
  16. struct request_queue *queue;
  17. struct kobject kobj;
  18. } ____cacheline_aligned_in_smp;
  19. void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
  20. void blk_mq_freeze_queue(struct request_queue *q);
  21. void blk_mq_free_queue(struct request_queue *q);
  22. int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
  23. void blk_mq_wake_waiters(struct request_queue *q);
  24. /*
  25. * CPU hotplug helpers
  26. */
  27. void blk_mq_enable_hotplug(void);
  28. void blk_mq_disable_hotplug(void);
  29. /*
  30. * CPU -> queue mappings
  31. */
  32. int blk_mq_map_queues(struct blk_mq_tag_set *set);
  33. extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
  34. static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
  35. int cpu)
  36. {
  37. return q->queue_hw_ctx[q->mq_map[cpu]];
  38. }
  39. /*
  40. * sysfs helpers
  41. */
  42. extern void blk_mq_sysfs_init(struct request_queue *q);
  43. extern int blk_mq_sysfs_register(struct request_queue *q);
  44. extern void blk_mq_sysfs_unregister(struct request_queue *q);
  45. extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
  46. extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
  47. void blk_mq_release(struct request_queue *q);
  48. static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
  49. unsigned int cpu)
  50. {
  51. return per_cpu_ptr(q->queue_ctx, cpu);
  52. }
  53. /*
  54. * This assumes per-cpu software queueing queues. They could be per-node
  55. * as well, for instance. For now this is hardcoded as-is. Note that we don't
  56. * care about preemption, since we know the ctx's are persistent. This does
  57. * mean that we can't rely on ctx always matching the currently running CPU.
  58. */
  59. static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
  60. {
  61. return __blk_mq_get_ctx(q, get_cpu());
  62. }
  63. static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
  64. {
  65. put_cpu();
  66. }
  67. struct blk_mq_alloc_data {
  68. /* input parameter */
  69. struct request_queue *q;
  70. unsigned int flags;
  71. /* input & output parameter */
  72. struct blk_mq_ctx *ctx;
  73. struct blk_mq_hw_ctx *hctx;
  74. };
  75. static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
  76. struct request_queue *q, unsigned int flags,
  77. struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
  78. {
  79. data->q = q;
  80. data->flags = flags;
  81. data->ctx = ctx;
  82. data->hctx = hctx;
  83. }
  84. static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
  85. {
  86. return hctx->nr_ctx && hctx->tags;
  87. }
  88. #endif