blk-stat.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. /*
  2. * Block stat tracking code
  3. *
  4. * Copyright (C) 2016 Jens Axboe
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/rculist.h>
  8. #include <linux/blk-mq.h>
  9. #include "blk-stat.h"
  10. #include "blk-mq.h"
  11. #include "blk.h"
  12. struct blk_queue_stats {
  13. struct list_head callbacks;
  14. spinlock_t lock;
  15. bool enable_accounting;
  16. };
  17. void blk_rq_stat_init(struct blk_rq_stat *stat)
  18. {
  19. stat->min = -1ULL;
  20. stat->max = stat->nr_samples = stat->mean = 0;
  21. stat->batch = 0;
  22. }
  23. /* src is a per-cpu stat, mean isn't initialized */
  24. void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
  25. {
  26. if (!src->nr_samples)
  27. return;
  28. dst->min = min(dst->min, src->min);
  29. dst->max = max(dst->max, src->max);
  30. dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
  31. dst->nr_samples + src->nr_samples);
  32. dst->nr_samples += src->nr_samples;
  33. }
  34. void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
  35. {
  36. stat->min = min(stat->min, value);
  37. stat->max = max(stat->max, value);
  38. stat->batch += value;
  39. stat->nr_samples++;
  40. }
  41. void blk_stat_add(struct request *rq, u64 now)
  42. {
  43. struct request_queue *q = rq->q;
  44. struct blk_stat_callback *cb;
  45. struct blk_rq_stat *stat;
  46. int bucket;
  47. u64 value;
  48. value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
  49. blk_throtl_stat_add(rq, value);
  50. rcu_read_lock();
  51. list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
  52. if (!blk_stat_is_active(cb))
  53. continue;
  54. bucket = cb->bucket_fn(rq);
  55. if (bucket < 0)
  56. continue;
  57. stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
  58. blk_rq_stat_add(stat, value);
  59. put_cpu_ptr(cb->cpu_stat);
  60. }
  61. rcu_read_unlock();
  62. }
  63. static void blk_stat_timer_fn(struct timer_list *t)
  64. {
  65. struct blk_stat_callback *cb = from_timer(cb, t, timer);
  66. unsigned int bucket;
  67. int cpu;
  68. for (bucket = 0; bucket < cb->buckets; bucket++)
  69. blk_rq_stat_init(&cb->stat[bucket]);
  70. for_each_online_cpu(cpu) {
  71. struct blk_rq_stat *cpu_stat;
  72. cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
  73. for (bucket = 0; bucket < cb->buckets; bucket++) {
  74. blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
  75. blk_rq_stat_init(&cpu_stat[bucket]);
  76. }
  77. }
  78. cb->timer_fn(cb);
  79. }
  80. struct blk_stat_callback *
  81. blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
  82. int (*bucket_fn)(const struct request *),
  83. unsigned int buckets, void *data)
  84. {
  85. struct blk_stat_callback *cb;
  86. cb = kmalloc(sizeof(*cb), GFP_KERNEL);
  87. if (!cb)
  88. return NULL;
  89. cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
  90. GFP_KERNEL);
  91. if (!cb->stat) {
  92. kfree(cb);
  93. return NULL;
  94. }
  95. cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
  96. __alignof__(struct blk_rq_stat));
  97. if (!cb->cpu_stat) {
  98. kfree(cb->stat);
  99. kfree(cb);
  100. return NULL;
  101. }
  102. cb->timer_fn = timer_fn;
  103. cb->bucket_fn = bucket_fn;
  104. cb->data = data;
  105. cb->buckets = buckets;
  106. timer_setup(&cb->timer, blk_stat_timer_fn, 0);
  107. return cb;
  108. }
  109. EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
  110. void blk_stat_add_callback(struct request_queue *q,
  111. struct blk_stat_callback *cb)
  112. {
  113. unsigned int bucket;
  114. int cpu;
  115. for_each_possible_cpu(cpu) {
  116. struct blk_rq_stat *cpu_stat;
  117. cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
  118. for (bucket = 0; bucket < cb->buckets; bucket++)
  119. blk_rq_stat_init(&cpu_stat[bucket]);
  120. }
  121. spin_lock(&q->stats->lock);
  122. list_add_tail_rcu(&cb->list, &q->stats->callbacks);
  123. blk_queue_flag_set(QUEUE_FLAG_STATS, q);
  124. spin_unlock(&q->stats->lock);
  125. }
  126. EXPORT_SYMBOL_GPL(blk_stat_add_callback);
  127. void blk_stat_remove_callback(struct request_queue *q,
  128. struct blk_stat_callback *cb)
  129. {
  130. spin_lock(&q->stats->lock);
  131. list_del_rcu(&cb->list);
  132. if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
  133. blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
  134. spin_unlock(&q->stats->lock);
  135. del_timer_sync(&cb->timer);
  136. }
  137. EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
  138. static void blk_stat_free_callback_rcu(struct rcu_head *head)
  139. {
  140. struct blk_stat_callback *cb;
  141. cb = container_of(head, struct blk_stat_callback, rcu);
  142. free_percpu(cb->cpu_stat);
  143. kfree(cb->stat);
  144. kfree(cb);
  145. }
  146. void blk_stat_free_callback(struct blk_stat_callback *cb)
  147. {
  148. if (cb)
  149. call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
  150. }
  151. EXPORT_SYMBOL_GPL(blk_stat_free_callback);
  152. void blk_stat_enable_accounting(struct request_queue *q)
  153. {
  154. spin_lock(&q->stats->lock);
  155. q->stats->enable_accounting = true;
  156. blk_queue_flag_set(QUEUE_FLAG_STATS, q);
  157. spin_unlock(&q->stats->lock);
  158. }
  159. struct blk_queue_stats *blk_alloc_queue_stats(void)
  160. {
  161. struct blk_queue_stats *stats;
  162. stats = kmalloc(sizeof(*stats), GFP_KERNEL);
  163. if (!stats)
  164. return NULL;
  165. INIT_LIST_HEAD(&stats->callbacks);
  166. spin_lock_init(&stats->lock);
  167. stats->enable_accounting = false;
  168. return stats;
  169. }
  170. void blk_free_queue_stats(struct blk_queue_stats *stats)
  171. {
  172. if (!stats)
  173. return;
  174. WARN_ON(!list_empty(&stats->callbacks));
  175. kfree(stats);
  176. }