blk-rq-qos.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. #include "blk-rq-qos.h"
  2. /*
  3. * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
  4. * false if 'v' + 1 would be bigger than 'below'.
  5. */
  6. static bool atomic_inc_below(atomic_t *v, unsigned int below)
  7. {
  8. unsigned int cur = atomic_read(v);
  9. for (;;) {
  10. unsigned int old;
  11. if (cur >= below)
  12. return false;
  13. old = atomic_cmpxchg(v, cur, cur + 1);
  14. if (old == cur)
  15. break;
  16. cur = old;
  17. }
  18. return true;
  19. }
  20. bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
  21. {
  22. return atomic_inc_below(&rq_wait->inflight, limit);
  23. }
  24. void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
  25. {
  26. struct rq_qos *rqos;
  27. for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
  28. if (rqos->ops->cleanup)
  29. rqos->ops->cleanup(rqos, bio);
  30. }
  31. }
  32. void rq_qos_done(struct request_queue *q, struct request *rq)
  33. {
  34. struct rq_qos *rqos;
  35. for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
  36. if (rqos->ops->done)
  37. rqos->ops->done(rqos, rq);
  38. }
  39. }
  40. void rq_qos_issue(struct request_queue *q, struct request *rq)
  41. {
  42. struct rq_qos *rqos;
  43. for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
  44. if (rqos->ops->issue)
  45. rqos->ops->issue(rqos, rq);
  46. }
  47. }
  48. void rq_qos_requeue(struct request_queue *q, struct request *rq)
  49. {
  50. struct rq_qos *rqos;
  51. for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
  52. if (rqos->ops->requeue)
  53. rqos->ops->requeue(rqos, rq);
  54. }
  55. }
  56. void rq_qos_throttle(struct request_queue *q, struct bio *bio,
  57. spinlock_t *lock)
  58. {
  59. struct rq_qos *rqos;
  60. for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
  61. if (rqos->ops->throttle)
  62. rqos->ops->throttle(rqos, bio, lock);
  63. }
  64. }
  65. void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio)
  66. {
  67. struct rq_qos *rqos;
  68. for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
  69. if (rqos->ops->track)
  70. rqos->ops->track(rqos, rq, bio);
  71. }
  72. }
  73. void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
  74. {
  75. struct rq_qos *rqos;
  76. for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
  77. if (rqos->ops->done_bio)
  78. rqos->ops->done_bio(rqos, bio);
  79. }
  80. }
  81. /*
  82. * Return true, if we can't increase the depth further by scaling
  83. */
  84. bool rq_depth_calc_max_depth(struct rq_depth *rqd)
  85. {
  86. unsigned int depth;
  87. bool ret = false;
  88. /*
  89. * For QD=1 devices, this is a special case. It's important for those
  90. * to have one request ready when one completes, so force a depth of
  91. * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
  92. * since the device can't have more than that in flight. If we're
  93. * scaling down, then keep a setting of 1/1/1.
  94. */
  95. if (rqd->queue_depth == 1) {
  96. if (rqd->scale_step > 0)
  97. rqd->max_depth = 1;
  98. else {
  99. rqd->max_depth = 2;
  100. ret = true;
  101. }
  102. } else {
  103. /*
  104. * scale_step == 0 is our default state. If we have suffered
  105. * latency spikes, step will be > 0, and we shrink the
  106. * allowed write depths. If step is < 0, we're only doing
  107. * writes, and we allow a temporarily higher depth to
  108. * increase performance.
  109. */
  110. depth = min_t(unsigned int, rqd->default_depth,
  111. rqd->queue_depth);
  112. if (rqd->scale_step > 0)
  113. depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
  114. else if (rqd->scale_step < 0) {
  115. unsigned int maxd = 3 * rqd->queue_depth / 4;
  116. depth = 1 + ((depth - 1) << -rqd->scale_step);
  117. if (depth > maxd) {
  118. depth = maxd;
  119. ret = true;
  120. }
  121. }
  122. rqd->max_depth = depth;
  123. }
  124. return ret;
  125. }
  126. /* Returns true on success and false if scaling up wasn't possible */
  127. bool rq_depth_scale_up(struct rq_depth *rqd)
  128. {
  129. /*
  130. * Hit max in previous round, stop here
  131. */
  132. if (rqd->scaled_max)
  133. return false;
  134. rqd->scale_step--;
  135. rqd->scaled_max = rq_depth_calc_max_depth(rqd);
  136. return true;
  137. }
  138. /*
  139. * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
  140. * had a latency violation. Returns true on success and returns false if
  141. * scaling down wasn't possible.
  142. */
  143. bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
  144. {
  145. /*
  146. * Stop scaling down when we've hit the limit. This also prevents
  147. * ->scale_step from going to crazy values, if the device can't
  148. * keep up.
  149. */
  150. if (rqd->max_depth == 1)
  151. return false;
  152. if (rqd->scale_step < 0 && hard_throttle)
  153. rqd->scale_step = 0;
  154. else
  155. rqd->scale_step++;
  156. rqd->scaled_max = false;
  157. rq_depth_calc_max_depth(rqd);
  158. return true;
  159. }
  160. void rq_qos_exit(struct request_queue *q)
  161. {
  162. while (q->rq_qos) {
  163. struct rq_qos *rqos = q->rq_qos;
  164. q->rq_qos = rqos->next;
  165. rqos->ops->exit(rqos);
  166. }
  167. }