vm_pagequeue.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. /*-
  2. * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
  3. *
  4. * Copyright (c) 1991, 1993
  5. * The Regents of the University of California. All rights reserved.
  6. *
  7. * This code is derived from software contributed to Berkeley by
  8. * The Mach Operating System project at Carnegie-Mellon University.
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. * 1. Redistributions of source code must retain the above copyright
  14. * notice, this list of conditions and the following disclaimer.
  15. * 2. Redistributions in binary form must reproduce the above copyright
  16. * notice, this list of conditions and the following disclaimer in the
  17. * documentation and/or other materials provided with the distribution.
  18. * 3. Neither the name of the University nor the names of its contributors
  19. * may be used to endorse or promote products derived from this software
  20. * without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  23. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  26. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  27. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  28. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  29. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  30. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  31. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  32. * SUCH DAMAGE.
  33. *
  34. *
  35. * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  36. * All rights reserved.
  37. *
  38. * Authors: Avadis Tevanian, Jr., Michael Wayne Young
  39. *
  40. * Permission to use, copy, modify and distribute this software and
  41. * its documentation is hereby granted, provided that both the copyright
  42. * notice and this permission notice appear in all copies of the
  43. * software, derivative works or modified versions, and any portions
  44. * thereof, and that both notices appear in supporting documentation.
  45. *
  46. * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  47. * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
  48. * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  49. *
  50. * Carnegie Mellon requests users of this software to return to
  51. *
  52. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  53. * School of Computer Science
  54. * Carnegie Mellon University
  55. * Pittsburgh PA 15213-3890
  56. *
  57. * any improvements or extensions that they make and grant Carnegie the
  58. * rights to redistribute these changes.
  59. */
  60. #ifndef _VM_PAGEQUEUE_
  61. #define _VM_PAGEQUEUE_
  62. #ifdef _KERNEL
  63. struct vm_pagequeue {
  64. struct mtx pq_mutex;
  65. struct pglist pq_pl;
  66. int pq_cnt;
  67. const char * const pq_name;
  68. uint64_t pq_pdpages;
  69. } __aligned(CACHE_LINE_SIZE);
  70. #if __SIZEOF_LONG__ == 8
  71. #define VM_BATCHQUEUE_SIZE 63
  72. #else
  73. #define VM_BATCHQUEUE_SIZE 15
  74. #endif
  75. struct vm_batchqueue {
  76. vm_page_t bq_pa[VM_BATCHQUEUE_SIZE];
  77. int bq_cnt;
  78. } __aligned(CACHE_LINE_SIZE);
  79. #include <vm/uma.h>
  80. #include <sys/_blockcount.h>
  81. #include <sys/pidctrl.h>
  82. struct sysctl_oid;
  83. /*
  84. * One vm_domain per NUMA domain. Contains pagequeues, free page structures,
  85. * and accounting.
  86. *
  87. * Lock Key:
  88. * f vmd_free_mtx
  89. * p vmd_pageout_mtx
  90. * d vm_domainset_lock
  91. * a atomic
  92. * c const after boot
  93. * q page queue lock
  94. *
  95. * A unique page daemon thread manages each vm_domain structure and is
  96. * responsible for ensuring that some free memory is available by freeing
  97. * inactive pages and aging active pages. To decide how many pages to process,
  98. * it uses thresholds derived from the number of pages in the domain:
  99. *
  100. * vmd_page_count
  101. * ---
  102. * |
  103. * |-> vmd_inactive_target (~3%)
  104. * | - The active queue scan target is given by
  105. * | (vmd_inactive_target + vmd_free_target - vmd_free_count).
  106. * |
  107. * |
  108. * |-> vmd_free_target (~2%)
  109. * | - Target for page reclamation.
  110. * |
  111. * |-> vmd_pageout_wakeup_thresh (~1.8%)
  112. * | - Threshold for waking up the page daemon.
  113. * |
  114. * |
  115. * |-> vmd_free_min (~0.5%)
  116. * | - First low memory threshold.
  117. * | - Causes per-CPU caching to be lazily disabled in UMA.
  118. * | - vm_wait() sleeps below this threshold.
  119. * |
  120. * |-> vmd_free_severe (~0.25%)
  121. * | - Second low memory threshold.
  122. * | - Triggers aggressive UMA reclamation, disables delayed buffer
  123. * | writes.
  124. * |
  125. * |-> vmd_free_reserved (~0.13%)
  126. * | - Minimum for VM_ALLOC_NORMAL page allocations.
  127. * |-> vmd_pageout_free_min (32 + 2 pages)
  128. * | - Minimum for waking a page daemon thread sleeping in vm_wait().
  129. * |-> vmd_interrupt_free_min (2 pages)
  130. * | - Minimum for VM_ALLOC_SYSTEM page allocations.
  131. * ---
  132. *
  133. *--
  134. * Free page count regulation:
  135. *
  136. * The page daemon attempts to ensure that the free page count is above the free
  137. * target. It wakes up periodically (every 100ms) to input the current free
  138. * page shortage (free_target - free_count) to a PID controller, which in
  139. * response outputs the number of pages to attempt to reclaim. The shortage's
  140. * current magnitude, rate of change, and cumulative value are together used to
  141. * determine the controller's output. The page daemon target thus adapts
  142. * dynamically to the system's demand for free pages, resulting in less
  143. * burstiness than a simple hysteresis loop.
  144. *
  145. * When the free page count drops below the wakeup threshold,
  146. * vm_domain_allocate() proactively wakes up the page daemon. This helps ensure
  147. * that the system responds promptly to a large instantaneous free page
  148. * shortage.
  149. *
  150. * The page daemon also attempts to ensure that some fraction of the system's
  151. * memory is present in the inactive (I) and laundry (L) page queues, so that it
  152. * can respond promptly to a sudden free page shortage. In particular, the page
  153. * daemon thread aggressively scans active pages so long as the following
  154. * condition holds:
  155. *
  156. * len(I) + len(L) + free_target - free_count < inactive_target
  157. *
  158. * Otherwise, when the inactive target is met, the page daemon periodically
  159. * scans a small portion of the active queue in order to maintain up-to-date
  160. * per-page access history. Unreferenced pages in the active queue thus
  161. * eventually migrate to the inactive queue.
  162. *
  163. * The per-domain laundry thread periodically launders dirty pages based on the
  164. * number of clean pages freed by the page daemon since the last laundering. If
  165. * the page daemon fails to meet its scan target (i.e., the PID controller
  166. * output) because of a shortage of clean inactive pages, the laundry thread
  167. * attempts to launder enough pages to meet the free page target.
  168. *
  169. *--
  170. * Page allocation priorities:
  171. *
  172. * The system defines three page allocation priorities: VM_ALLOC_NORMAL,
  173. * VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT. An interrupt-priority allocation can
  174. * claim any free page. This priority is used in the pmap layer when attempting
  175. * to allocate a page for the kernel page tables; in such cases an allocation
  176. * failure will usually result in a kernel panic. The system priority is used
  177. * for most other kernel memory allocations, for instance by UMA's slab
  178. * allocator or the buffer cache. Such allocations will fail if the free count
  179. * is below interrupt_free_min. All other allocations occur at the normal
  180. * priority, which is typically used for allocation of user pages, for instance
  181. * in the page fault handler or when allocating page table pages or pv_entry
  182. * structures for user pmaps. Such allocations fail if the free count is below
  183. * the free_reserved threshold.
  184. *
  185. *--
  186. * Free memory shortages:
  187. *
  188. * The system uses the free_min and free_severe thresholds to apply
  189. * back-pressure and give the page daemon a chance to recover. When a page
  190. * allocation fails due to a shortage and the allocating thread cannot handle
  191. * failure, it may call vm_wait() to sleep until free pages are available.
  192. * vm_domain_freecnt_inc() wakes sleeping threads once the free page count rises
  193. * above the free_min threshold; the page daemon and laundry threads are given
  194. * priority and will wake up once free_count reaches the (much smaller)
  195. * pageout_free_min threshold.
  196. *
  197. * On NUMA systems, the domainset iterators always prefer NUMA domains where the
  198. * free page count is above the free_min threshold. This means that given the
  199. * choice between two NUMA domains, one above the free_min threshold and one
  200. * below, the former will be used to satisfy the allocation request regardless
  201. * of the domain selection policy.
  202. *
  203. * In addition to reclaiming memory from the page queues, the vm_lowmem event
  204. * fires every ten seconds so long as the system is under memory pressure (i.e.,
  205. * vmd_free_count < vmd_free_target). This allows kernel subsystems to register
  206. * for notifications of free page shortages, upon which they may shrink their
  207. * caches. Following a vm_lowmem event, UMA's caches are pruned to ensure that
  208. * they do not contain an excess of unused memory. When a domain is below the
  209. * free_min threshold, UMA limits the population of per-CPU caches. When a
  210. * domain falls below the free_severe threshold, UMA's caches are completely
  211. * drained.
  212. *
  213. * If the system encounters a global memory shortage, it may resort to the
  214. * out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a
  215. * last-ditch attempt to free up some pages. Either of the two following
  216. * conditions will activate the OOM killer:
  217. *
  218. * 1. The page daemons collectively fail to reclaim any pages during their
  219. * inactive queue scans. After vm_pageout_oom_seq consecutive scans fail,
  220. * the page daemon thread votes for an OOM kill, and an OOM kill is
  221. * triggered when all page daemons have voted. This heuristic is strict and
  222. * may fail to trigger even when the system is effectively deadlocked.
  223. *
  224. * 2. Threads in the user fault handler are repeatedly unable to make progress
  225. * while allocating a page to satisfy the fault. After
  226. * vm_pfault_oom_attempts page allocation failures with intervening
  227. * vm_wait() calls, the faulting thread will trigger an OOM kill.
  228. */
  229. struct vm_domain {
  230. struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
  231. struct mtx_padalign vmd_free_mtx;
  232. struct mtx_padalign vmd_pageout_mtx;
  233. struct vm_pgcache {
  234. int domain;
  235. int pool;
  236. uma_zone_t zone;
  237. } vmd_pgcache[VM_NFREEPOOL];
  238. struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */
  239. struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
  240. u_int vmd_domain; /* (c) Domain number. */
  241. u_int vmd_page_count; /* (c) Total page count. */
  242. long vmd_segs; /* (c) bitmask of the segments */
  243. u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */
  244. u_int vmd_pageout_deficit; /* (a) Estimated number of pages deficit */
  245. uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
  246. /* Paging control variables, used within single threaded page daemon. */
  247. struct pidctrl vmd_pid; /* Pageout controller. */
  248. boolean_t vmd_oom;
  249. u_int vmd_inactive_threads;
  250. u_int vmd_inactive_shortage; /* Per-thread shortage. */
  251. blockcount_t vmd_inactive_running; /* Number of inactive threads. */
  252. blockcount_t vmd_inactive_starting; /* Number of threads started. */
  253. volatile u_int vmd_addl_shortage; /* Shortage accumulator. */
  254. volatile u_int vmd_inactive_freed; /* Successful inactive frees. */
  255. volatile u_int vmd_inactive_us; /* Microseconds for above. */
  256. u_int vmd_inactive_pps; /* Exponential decay frees/second. */
  257. int vmd_oom_seq;
  258. int vmd_last_active_scan;
  259. struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */
  260. struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
  261. struct vm_page vmd_clock[2]; /* markers for active queue scan */
  262. int vmd_pageout_wanted; /* (a, p) pageout daemon wait channel */
  263. int vmd_pageout_pages_needed; /* (d) page daemon waiting for pages? */
  264. bool vmd_minset; /* (d) Are we in vm_min_domains? */
  265. bool vmd_severeset; /* (d) Are we in vm_severe_domains? */
  266. enum {
  267. VM_LAUNDRY_IDLE = 0,
  268. VM_LAUNDRY_BACKGROUND,
  269. VM_LAUNDRY_SHORTFALL
  270. } vmd_laundry_request;
  271. /* Paging thresholds and targets. */
  272. u_int vmd_clean_pages_freed; /* (q) accumulator for laundry thread */
  273. u_int vmd_background_launder_target; /* (c) */
  274. u_int vmd_free_reserved; /* (c) pages reserved for deadlock */
  275. u_int vmd_free_target; /* (c) pages desired free */
  276. u_int vmd_free_min; /* (c) pages desired free */
  277. u_int vmd_inactive_target; /* (c) pages desired inactive */
  278. u_int vmd_pageout_free_min; /* (c) min pages reserved for kernel */
  279. u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */
  280. u_int vmd_interrupt_free_min; /* (c) reserved pages for int code */
  281. u_int vmd_free_severe; /* (c) severe page depletion point */
  282. /* Name for sysctl etc. */
  283. struct sysctl_oid *vmd_oid;
  284. char vmd_name[sizeof(__XSTRING(MAXMEMDOM))];
  285. } __aligned(CACHE_LINE_SIZE);
  286. extern struct vm_domain vm_dom[MAXMEMDOM];
  287. #define VM_DOMAIN(n) (&vm_dom[(n)])
  288. #define VM_DOMAIN_EMPTY(n) (vm_dom[(n)].vmd_page_count == 0)
  289. #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
  290. #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
  291. #define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex)
  292. #define vm_pagequeue_trylock(pq) mtx_trylock(&(pq)->pq_mutex)
  293. #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
  294. #define vm_domain_free_assert_locked(n) \
  295. mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
  296. #define vm_domain_free_assert_unlocked(n) \
  297. mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
  298. #define vm_domain_free_lock(d) \
  299. mtx_lock(vm_domain_free_lockptr((d)))
  300. #define vm_domain_free_lockptr(d) \
  301. (&(d)->vmd_free_mtx)
  302. #define vm_domain_free_trylock(d) \
  303. mtx_trylock(vm_domain_free_lockptr((d)))
  304. #define vm_domain_free_unlock(d) \
  305. mtx_unlock(vm_domain_free_lockptr((d)))
  306. #define vm_domain_pageout_lockptr(d) \
  307. (&(d)->vmd_pageout_mtx)
  308. #define vm_domain_pageout_assert_locked(n) \
  309. mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED)
  310. #define vm_domain_pageout_assert_unlocked(n) \
  311. mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED)
  312. #define vm_domain_pageout_lock(d) \
  313. mtx_lock(vm_domain_pageout_lockptr((d)))
  314. #define vm_domain_pageout_unlock(d) \
  315. mtx_unlock(vm_domain_pageout_lockptr((d)))
  316. static __inline void
  317. vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
  318. {
  319. vm_pagequeue_assert_locked(pq);
  320. pq->pq_cnt += addend;
  321. }
  322. #define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1)
  323. #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
  324. static inline void
  325. vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
  326. {
  327. TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
  328. vm_pagequeue_cnt_dec(pq);
  329. }
  330. static inline void
  331. vm_batchqueue_init(struct vm_batchqueue *bq)
  332. {
  333. bq->bq_cnt = 0;
  334. }
  335. static inline bool
  336. vm_batchqueue_empty(const struct vm_batchqueue *bq)
  337. {
  338. return (bq->bq_cnt == 0);
  339. }
  340. static inline int
  341. vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
  342. {
  343. int slots_free;
  344. slots_free = nitems(bq->bq_pa) - bq->bq_cnt;
  345. if (slots_free > 0) {
  346. bq->bq_pa[bq->bq_cnt++] = m;
  347. return (slots_free);
  348. }
  349. return (slots_free);
  350. }
  351. static inline vm_page_t
  352. vm_batchqueue_pop(struct vm_batchqueue *bq)
  353. {
  354. if (bq->bq_cnt == 0)
  355. return (NULL);
  356. return (bq->bq_pa[--bq->bq_cnt]);
  357. }
  358. void vm_domain_set(struct vm_domain *vmd);
  359. void vm_domain_clear(struct vm_domain *vmd);
  360. int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
  361. /*
  362. * vm_pagequeue_domain:
  363. *
  364. * Return the memory domain the page belongs to.
  365. */
  366. static inline struct vm_domain *
  367. vm_pagequeue_domain(vm_page_t m)
  368. {
  369. return (VM_DOMAIN(vm_page_domain(m)));
  370. }
  371. /*
  372. * Return the number of pages we need to free-up or cache
  373. * A positive number indicates that we do not have enough free pages.
  374. */
  375. static inline int
  376. vm_paging_target(struct vm_domain *vmd)
  377. {
  378. return (vmd->vmd_free_target - vmd->vmd_free_count);
  379. }
  380. /*
  381. * Returns TRUE if the pagedaemon needs to be woken up.
  382. */
  383. static inline int
  384. vm_paging_needed(struct vm_domain *vmd, u_int free_count)
  385. {
  386. return (free_count < vmd->vmd_pageout_wakeup_thresh);
  387. }
  388. /*
  389. * Returns TRUE if the domain is below the min paging target.
  390. */
  391. static inline int
  392. vm_paging_min(struct vm_domain *vmd)
  393. {
  394. return (vmd->vmd_free_min > vmd->vmd_free_count);
  395. }
  396. /*
  397. * Returns TRUE if the domain is below the severe paging target.
  398. */
  399. static inline int
  400. vm_paging_severe(struct vm_domain *vmd)
  401. {
  402. return (vmd->vmd_free_severe > vmd->vmd_free_count);
  403. }
  404. /*
  405. * Return the number of pages we need to launder.
  406. * A positive number indicates that we have a shortfall of clean pages.
  407. */
  408. static inline int
  409. vm_laundry_target(struct vm_domain *vmd)
  410. {
  411. return (vm_paging_target(vmd));
  412. }
  413. void pagedaemon_wakeup(int domain);
  414. static inline void
  415. vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
  416. {
  417. u_int old, new;
  418. old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
  419. new = old + adj;
  420. /*
  421. * Only update bitsets on transitions. Notice we short-circuit the
  422. * rest of the checks if we're above min already.
  423. */
  424. if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min ||
  425. (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) ||
  426. (old < vmd->vmd_pageout_free_min &&
  427. new >= vmd->vmd_pageout_free_min)))
  428. vm_domain_clear(vmd);
  429. }
  430. #endif /* _KERNEL */
  431. #endif /* !_VM_PAGEQUEUE_ */