vmstat.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_VMSTAT_H
  3. #define _LINUX_VMSTAT_H
  4. #include <linux/types.h>
  5. #include <linux/percpu.h>
  6. #include <linux/mmzone.h>
  7. #include <linux/vm_event_item.h>
  8. #include <linux/atomic.h>
  9. #include <linux/static_key.h>
  10. extern int sysctl_stat_interval;
  11. #ifdef CONFIG_NUMA
  12. #define ENABLE_NUMA_STAT 1
  13. #define DISABLE_NUMA_STAT 0
  14. extern int sysctl_vm_numa_stat;
  15. DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
  16. extern int sysctl_vm_numa_stat_handler(struct ctl_table *table,
  17. int write, void __user *buffer, size_t *length, loff_t *ppos);
  18. #endif
  19. struct reclaim_stat {
  20. unsigned nr_dirty;
  21. unsigned nr_unqueued_dirty;
  22. unsigned nr_congested;
  23. unsigned nr_writeback;
  24. unsigned nr_immediate;
  25. unsigned nr_activate[2];
  26. unsigned nr_ref_keep;
  27. unsigned nr_unmap_fail;
  28. };
  29. #ifdef CONFIG_VM_EVENT_COUNTERS
  30. /*
  31. * Light weight per cpu counter implementation.
  32. *
  33. * Counters should only be incremented and no critical kernel component
  34. * should rely on the counter values.
  35. *
  36. * Counters are handled completely inline. On many platforms the code
  37. * generated will simply be the increment of a global address.
  38. */
  39. struct vm_event_state {
  40. unsigned long event[NR_VM_EVENT_ITEMS];
  41. };
  42. DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  43. /*
  44. * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
  45. * local_irq_disable overhead.
  46. */
  47. static inline void __count_vm_event(enum vm_event_item item)
  48. {
  49. raw_cpu_inc(vm_event_states.event[item]);
  50. }
  51. static inline void count_vm_event(enum vm_event_item item)
  52. {
  53. this_cpu_inc(vm_event_states.event[item]);
  54. }
  55. static inline void __count_vm_events(enum vm_event_item item, long delta)
  56. {
  57. raw_cpu_add(vm_event_states.event[item], delta);
  58. }
  59. static inline void count_vm_events(enum vm_event_item item, long delta)
  60. {
  61. this_cpu_add(vm_event_states.event[item], delta);
  62. }
  63. extern void all_vm_events(unsigned long *);
  64. extern void vm_events_fold_cpu(int cpu);
  65. #else
  66. /* Disable counters */
  67. static inline void count_vm_event(enum vm_event_item item)
  68. {
  69. }
  70. static inline void count_vm_events(enum vm_event_item item, long delta)
  71. {
  72. }
  73. static inline void __count_vm_event(enum vm_event_item item)
  74. {
  75. }
  76. static inline void __count_vm_events(enum vm_event_item item, long delta)
  77. {
  78. }
  79. static inline void all_vm_events(unsigned long *ret)
  80. {
  81. }
  82. static inline void vm_events_fold_cpu(int cpu)
  83. {
  84. }
  85. #endif /* CONFIG_VM_EVENT_COUNTERS */
  86. #ifdef CONFIG_NUMA_BALANCING
  87. #define count_vm_numa_event(x) count_vm_event(x)
  88. #define count_vm_numa_events(x, y) count_vm_events(x, y)
  89. #else
  90. #define count_vm_numa_event(x) do {} while (0)
  91. #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
  92. #endif /* CONFIG_NUMA_BALANCING */
  93. #ifdef CONFIG_DEBUG_TLBFLUSH
  94. #define count_vm_tlb_event(x) count_vm_event(x)
  95. #define count_vm_tlb_events(x, y) count_vm_events(x, y)
  96. #else
  97. #define count_vm_tlb_event(x) do {} while (0)
  98. #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
  99. #endif
  100. #ifdef CONFIG_DEBUG_VM_VMACACHE
  101. #define count_vm_vmacache_event(x) count_vm_event(x)
  102. #else
  103. #define count_vm_vmacache_event(x) do {} while (0)
  104. #endif
  105. #define __count_zid_vm_events(item, zid, delta) \
  106. __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
  107. /*
  108. * Zone and node-based page accounting with per cpu differentials.
  109. */
  110. extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
  111. extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
  112. extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
  113. #ifdef CONFIG_NUMA
  114. static inline void zone_numa_state_add(long x, struct zone *zone,
  115. enum numa_stat_item item)
  116. {
  117. atomic_long_add(x, &zone->vm_numa_stat[item]);
  118. atomic_long_add(x, &vm_numa_stat[item]);
  119. }
  120. static inline unsigned long global_numa_state(enum numa_stat_item item)
  121. {
  122. long x = atomic_long_read(&vm_numa_stat[item]);
  123. return x;
  124. }
  125. static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
  126. enum numa_stat_item item)
  127. {
  128. long x = atomic_long_read(&zone->vm_numa_stat[item]);
  129. int cpu;
  130. for_each_online_cpu(cpu)
  131. x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
  132. return x;
  133. }
  134. #endif /* CONFIG_NUMA */
  135. static inline void zone_page_state_add(long x, struct zone *zone,
  136. enum zone_stat_item item)
  137. {
  138. atomic_long_add(x, &zone->vm_stat[item]);
  139. atomic_long_add(x, &vm_zone_stat[item]);
  140. }
  141. static inline void node_page_state_add(long x, struct pglist_data *pgdat,
  142. enum node_stat_item item)
  143. {
  144. atomic_long_add(x, &pgdat->vm_stat[item]);
  145. atomic_long_add(x, &vm_node_stat[item]);
  146. }
  147. static inline unsigned long global_zone_page_state(enum zone_stat_item item)
  148. {
  149. long x = atomic_long_read(&vm_zone_stat[item]);
  150. #ifdef CONFIG_SMP
  151. if (x < 0)
  152. x = 0;
  153. #endif
  154. return x;
  155. }
  156. static inline unsigned long global_node_page_state(enum node_stat_item item)
  157. {
  158. long x = atomic_long_read(&vm_node_stat[item]);
  159. #ifdef CONFIG_SMP
  160. if (x < 0)
  161. x = 0;
  162. #endif
  163. return x;
  164. }
  165. static inline unsigned long zone_page_state(struct zone *zone,
  166. enum zone_stat_item item)
  167. {
  168. long x = atomic_long_read(&zone->vm_stat[item]);
  169. #ifdef CONFIG_SMP
  170. if (x < 0)
  171. x = 0;
  172. #endif
  173. return x;
  174. }
  175. /*
  176. * More accurate version that also considers the currently pending
  177. * deltas. For that we need to loop over all cpus to find the current
  178. * deltas. There is no synchronization so the result cannot be
  179. * exactly accurate either.
  180. */
  181. static inline unsigned long zone_page_state_snapshot(struct zone *zone,
  182. enum zone_stat_item item)
  183. {
  184. long x = atomic_long_read(&zone->vm_stat[item]);
  185. #ifdef CONFIG_SMP
  186. int cpu;
  187. for_each_online_cpu(cpu)
  188. x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
  189. if (x < 0)
  190. x = 0;
  191. #endif
  192. return x;
  193. }
  194. #ifdef CONFIG_NUMA
  195. extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
  196. extern unsigned long sum_zone_node_page_state(int node,
  197. enum zone_stat_item item);
  198. extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
  199. extern unsigned long node_page_state(struct pglist_data *pgdat,
  200. enum node_stat_item item);
  201. #else
  202. #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
  203. #define node_page_state(node, item) global_node_page_state(item)
  204. #endif /* CONFIG_NUMA */
  205. #ifdef CONFIG_SMP
  206. void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
  207. void __inc_zone_page_state(struct page *, enum zone_stat_item);
  208. void __dec_zone_page_state(struct page *, enum zone_stat_item);
  209. void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
  210. void __inc_node_page_state(struct page *, enum node_stat_item);
  211. void __dec_node_page_state(struct page *, enum node_stat_item);
  212. void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
  213. void inc_zone_page_state(struct page *, enum zone_stat_item);
  214. void dec_zone_page_state(struct page *, enum zone_stat_item);
  215. void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
  216. void inc_node_page_state(struct page *, enum node_stat_item);
  217. void dec_node_page_state(struct page *, enum node_stat_item);
  218. extern void inc_node_state(struct pglist_data *, enum node_stat_item);
  219. extern void __inc_zone_state(struct zone *, enum zone_stat_item);
  220. extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
  221. extern void dec_zone_state(struct zone *, enum zone_stat_item);
  222. extern void __dec_zone_state(struct zone *, enum zone_stat_item);
  223. extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
  224. void quiet_vmstat(void);
  225. void cpu_vm_stats_fold(int cpu);
  226. void refresh_zone_stat_thresholds(void);
  227. struct ctl_table;
  228. int vmstat_refresh(struct ctl_table *, int write,
  229. void __user *buffer, size_t *lenp, loff_t *ppos);
  230. void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
  231. int calculate_pressure_threshold(struct zone *zone);
  232. int calculate_normal_threshold(struct zone *zone);
  233. void set_pgdat_percpu_threshold(pg_data_t *pgdat,
  234. int (*calculate_pressure)(struct zone *));
  235. #else /* CONFIG_SMP */
  236. /*
  237. * We do not maintain differentials in a single processor configuration.
  238. * The functions directly modify the zone and global counters.
  239. */
  240. static inline void __mod_zone_page_state(struct zone *zone,
  241. enum zone_stat_item item, long delta)
  242. {
  243. zone_page_state_add(delta, zone, item);
  244. }
  245. static inline void __mod_node_page_state(struct pglist_data *pgdat,
  246. enum node_stat_item item, int delta)
  247. {
  248. node_page_state_add(delta, pgdat, item);
  249. }
  250. static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  251. {
  252. atomic_long_inc(&zone->vm_stat[item]);
  253. atomic_long_inc(&vm_zone_stat[item]);
  254. }
  255. static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
  256. {
  257. atomic_long_inc(&pgdat->vm_stat[item]);
  258. atomic_long_inc(&vm_node_stat[item]);
  259. }
  260. static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  261. {
  262. atomic_long_dec(&zone->vm_stat[item]);
  263. atomic_long_dec(&vm_zone_stat[item]);
  264. }
  265. static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
  266. {
  267. atomic_long_dec(&pgdat->vm_stat[item]);
  268. atomic_long_dec(&vm_node_stat[item]);
  269. }
  270. static inline void __inc_zone_page_state(struct page *page,
  271. enum zone_stat_item item)
  272. {
  273. __inc_zone_state(page_zone(page), item);
  274. }
  275. static inline void __inc_node_page_state(struct page *page,
  276. enum node_stat_item item)
  277. {
  278. __inc_node_state(page_pgdat(page), item);
  279. }
  280. static inline void __dec_zone_page_state(struct page *page,
  281. enum zone_stat_item item)
  282. {
  283. __dec_zone_state(page_zone(page), item);
  284. }
  285. static inline void __dec_node_page_state(struct page *page,
  286. enum node_stat_item item)
  287. {
  288. __dec_node_state(page_pgdat(page), item);
  289. }
  290. /*
  291. * We only use atomic operations to update counters. So there is no need to
  292. * disable interrupts.
  293. */
  294. #define inc_zone_page_state __inc_zone_page_state
  295. #define dec_zone_page_state __dec_zone_page_state
  296. #define mod_zone_page_state __mod_zone_page_state
  297. #define inc_node_page_state __inc_node_page_state
  298. #define dec_node_page_state __dec_node_page_state
  299. #define mod_node_page_state __mod_node_page_state
  300. #define inc_zone_state __inc_zone_state
  301. #define inc_node_state __inc_node_state
  302. #define dec_zone_state __dec_zone_state
  303. #define set_pgdat_percpu_threshold(pgdat, callback) { }
  304. static inline void refresh_zone_stat_thresholds(void) { }
  305. static inline void cpu_vm_stats_fold(int cpu) { }
  306. static inline void quiet_vmstat(void) { }
  307. static inline void drain_zonestat(struct zone *zone,
  308. struct per_cpu_pageset *pset) { }
  309. #endif /* CONFIG_SMP */
  310. static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
  311. int migratetype)
  312. {
  313. __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
  314. if (is_migrate_cma(migratetype))
  315. __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
  316. }
  317. extern const char * const vmstat_text[];
  318. #endif /* _LINUX_VMSTAT_H */