percpu_counter.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /*
  2. * Fast batching percpu counters.
  3. */
  4. #include <linux/percpu_counter.h>
  5. #include <linux/notifier.h>
  6. #include <linux/mutex.h>
  7. #include <linux/init.h>
  8. #include <linux/cpu.h>
  9. #include <linux/module.h>
  10. #include <linux/debugobjects.h>
  11. #ifdef CONFIG_HOTPLUG_CPU
  12. static LIST_HEAD(percpu_counters);
  13. static DEFINE_SPINLOCK(percpu_counters_lock);
  14. #endif
  15. #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
  16. static struct debug_obj_descr percpu_counter_debug_descr;
  17. static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
  18. {
  19. struct percpu_counter *fbc = addr;
  20. switch (state) {
  21. case ODEBUG_STATE_ACTIVE:
  22. percpu_counter_destroy(fbc);
  23. debug_object_free(fbc, &percpu_counter_debug_descr);
  24. return true;
  25. default:
  26. return false;
  27. }
  28. }
  29. static struct debug_obj_descr percpu_counter_debug_descr = {
  30. .name = "percpu_counter",
  31. .fixup_free = percpu_counter_fixup_free,
  32. };
  33. static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
  34. {
  35. debug_object_init(fbc, &percpu_counter_debug_descr);
  36. debug_object_activate(fbc, &percpu_counter_debug_descr);
  37. }
  38. static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
  39. {
  40. debug_object_deactivate(fbc, &percpu_counter_debug_descr);
  41. debug_object_free(fbc, &percpu_counter_debug_descr);
  42. }
  43. #else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
  44. static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
  45. { }
  46. static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
  47. { }
  48. #endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
  49. void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  50. {
  51. int cpu;
  52. unsigned long flags;
  53. raw_spin_lock_irqsave(&fbc->lock, flags);
  54. for_each_possible_cpu(cpu) {
  55. s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  56. *pcount = 0;
  57. }
  58. fbc->count = amount;
  59. raw_spin_unlock_irqrestore(&fbc->lock, flags);
  60. }
  61. EXPORT_SYMBOL(percpu_counter_set);
  62. void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
  63. {
  64. s64 count;
  65. preempt_disable();
  66. count = __this_cpu_read(*fbc->counters) + amount;
  67. if (count >= batch || count <= -batch) {
  68. unsigned long flags;
  69. raw_spin_lock_irqsave(&fbc->lock, flags);
  70. fbc->count += count;
  71. __this_cpu_sub(*fbc->counters, count - amount);
  72. raw_spin_unlock_irqrestore(&fbc->lock, flags);
  73. } else {
  74. this_cpu_add(*fbc->counters, amount);
  75. }
  76. preempt_enable();
  77. }
  78. EXPORT_SYMBOL(__percpu_counter_add);
  79. /*
  80. * Add up all the per-cpu counts, return the result. This is a more accurate
  81. * but much slower version of percpu_counter_read_positive()
  82. */
  83. s64 __percpu_counter_sum(struct percpu_counter *fbc)
  84. {
  85. s64 ret;
  86. int cpu;
  87. unsigned long flags;
  88. raw_spin_lock_irqsave(&fbc->lock, flags);
  89. ret = fbc->count;
  90. for_each_online_cpu(cpu) {
  91. s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  92. ret += *pcount;
  93. }
  94. raw_spin_unlock_irqrestore(&fbc->lock, flags);
  95. return ret;
  96. }
  97. EXPORT_SYMBOL(__percpu_counter_sum);
  98. int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
  99. struct lock_class_key *key)
  100. {
  101. unsigned long flags __maybe_unused;
  102. raw_spin_lock_init(&fbc->lock);
  103. lockdep_set_class(&fbc->lock, key);
  104. fbc->count = amount;
  105. fbc->counters = alloc_percpu_gfp(s32, gfp);
  106. if (!fbc->counters)
  107. return -ENOMEM;
  108. debug_percpu_counter_activate(fbc);
  109. #ifdef CONFIG_HOTPLUG_CPU
  110. INIT_LIST_HEAD(&fbc->list);
  111. spin_lock_irqsave(&percpu_counters_lock, flags);
  112. list_add(&fbc->list, &percpu_counters);
  113. spin_unlock_irqrestore(&percpu_counters_lock, flags);
  114. #endif
  115. return 0;
  116. }
  117. EXPORT_SYMBOL(__percpu_counter_init);
  118. void percpu_counter_destroy(struct percpu_counter *fbc)
  119. {
  120. unsigned long flags __maybe_unused;
  121. if (!fbc->counters)
  122. return;
  123. debug_percpu_counter_deactivate(fbc);
  124. #ifdef CONFIG_HOTPLUG_CPU
  125. spin_lock_irqsave(&percpu_counters_lock, flags);
  126. list_del(&fbc->list);
  127. spin_unlock_irqrestore(&percpu_counters_lock, flags);
  128. #endif
  129. free_percpu(fbc->counters);
  130. fbc->counters = NULL;
  131. }
  132. EXPORT_SYMBOL(percpu_counter_destroy);
  133. int percpu_counter_batch __read_mostly = 32;
  134. EXPORT_SYMBOL(percpu_counter_batch);
  135. static void compute_batch_value(void)
  136. {
  137. int nr = num_online_cpus();
  138. percpu_counter_batch = max(32, nr*2);
  139. }
  140. static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
  141. unsigned long action, void *hcpu)
  142. {
  143. #ifdef CONFIG_HOTPLUG_CPU
  144. unsigned int cpu;
  145. struct percpu_counter *fbc;
  146. compute_batch_value();
  147. if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
  148. return NOTIFY_OK;
  149. cpu = (unsigned long)hcpu;
  150. spin_lock_irq(&percpu_counters_lock);
  151. list_for_each_entry(fbc, &percpu_counters, list) {
  152. s32 *pcount;
  153. unsigned long flags;
  154. raw_spin_lock_irqsave(&fbc->lock, flags);
  155. pcount = per_cpu_ptr(fbc->counters, cpu);
  156. fbc->count += *pcount;
  157. *pcount = 0;
  158. raw_spin_unlock_irqrestore(&fbc->lock, flags);
  159. }
  160. spin_unlock_irq(&percpu_counters_lock);
  161. #endif
  162. return NOTIFY_OK;
  163. }
  164. /*
  165. * Compare counter against given value.
  166. * Return 1 if greater, 0 if equal and -1 if less
  167. */
  168. int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
  169. {
  170. s64 count;
  171. count = percpu_counter_read(fbc);
  172. /* Check to see if rough count will be sufficient for comparison */
  173. if (abs(count - rhs) > (batch * num_online_cpus())) {
  174. if (count > rhs)
  175. return 1;
  176. else
  177. return -1;
  178. }
  179. /* Need to use precise count */
  180. count = percpu_counter_sum(fbc);
  181. if (count > rhs)
  182. return 1;
  183. else if (count < rhs)
  184. return -1;
  185. else
  186. return 0;
  187. }
  188. EXPORT_SYMBOL(__percpu_counter_compare);
  189. static int __init percpu_counter_startup(void)
  190. {
  191. compute_batch_value();
  192. hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
  193. return 0;
  194. }
  195. module_init(percpu_counter_startup);