blk-rq-qos.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. #ifndef RQ_QOS_H
  2. #define RQ_QOS_H
  3. #include <linux/kernel.h>
  4. #include <linux/blkdev.h>
  5. #include <linux/blk_types.h>
  6. #include <linux/atomic.h>
  7. #include <linux/wait.h>
  8. enum rq_qos_id {
  9. RQ_QOS_WBT,
  10. RQ_QOS_CGROUP,
  11. };
  12. struct rq_wait {
  13. wait_queue_head_t wait;
  14. atomic_t inflight;
  15. };
  16. struct rq_qos {
  17. struct rq_qos_ops *ops;
  18. struct request_queue *q;
  19. enum rq_qos_id id;
  20. struct rq_qos *next;
  21. };
  22. struct rq_qos_ops {
  23. void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
  24. void (*track)(struct rq_qos *, struct request *, struct bio *);
  25. void (*issue)(struct rq_qos *, struct request *);
  26. void (*requeue)(struct rq_qos *, struct request *);
  27. void (*done)(struct rq_qos *, struct request *);
  28. void (*done_bio)(struct rq_qos *, struct bio *);
  29. void (*cleanup)(struct rq_qos *, struct bio *);
  30. void (*exit)(struct rq_qos *);
  31. };
  32. struct rq_depth {
  33. unsigned int max_depth;
  34. int scale_step;
  35. bool scaled_max;
  36. unsigned int queue_depth;
  37. unsigned int default_depth;
  38. };
  39. static inline struct rq_qos *rq_qos_id(struct request_queue *q,
  40. enum rq_qos_id id)
  41. {
  42. struct rq_qos *rqos;
  43. for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
  44. if (rqos->id == id)
  45. break;
  46. }
  47. return rqos;
  48. }
  49. static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
  50. {
  51. return rq_qos_id(q, RQ_QOS_WBT);
  52. }
  53. static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
  54. {
  55. return rq_qos_id(q, RQ_QOS_CGROUP);
  56. }
  57. static inline void rq_wait_init(struct rq_wait *rq_wait)
  58. {
  59. atomic_set(&rq_wait->inflight, 0);
  60. init_waitqueue_head(&rq_wait->wait);
  61. }
  62. static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
  63. {
  64. rqos->next = q->rq_qos;
  65. q->rq_qos = rqos;
  66. }
  67. static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
  68. {
  69. struct rq_qos **cur;
  70. for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
  71. if (*cur == rqos) {
  72. *cur = rqos->next;
  73. break;
  74. }
  75. }
  76. }
  77. bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
  78. bool rq_depth_scale_up(struct rq_depth *rqd);
  79. bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
  80. bool rq_depth_calc_max_depth(struct rq_depth *rqd);
  81. void rq_qos_cleanup(struct request_queue *, struct bio *);
  82. void rq_qos_done(struct request_queue *, struct request *);
  83. void rq_qos_issue(struct request_queue *, struct request *);
  84. void rq_qos_requeue(struct request_queue *, struct request *);
  85. void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
  86. void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
  87. void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
  88. void rq_qos_exit(struct request_queue *);
  89. #endif