vm_page.h 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036
  1. /*-
  2. * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
  3. *
  4. * Copyright (c) 1991, 1993
  5. * The Regents of the University of California. All rights reserved.
  6. *
  7. * This code is derived from software contributed to Berkeley by
  8. * The Mach Operating System project at Carnegie-Mellon University.
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. * 1. Redistributions of source code must retain the above copyright
  14. * notice, this list of conditions and the following disclaimer.
  15. * 2. Redistributions in binary form must reproduce the above copyright
  16. * notice, this list of conditions and the following disclaimer in the
  17. * documentation and/or other materials provided with the distribution.
  18. * 3. Neither the name of the University nor the names of its contributors
  19. * may be used to endorse or promote products derived from this software
  20. * without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  23. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  26. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  27. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  28. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  29. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  30. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  31. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  32. * SUCH DAMAGE.
  33. *
  34. *
  35. * Copyright (c) 1987, 1990 Carnegie-Mellon University.
  36. * All rights reserved.
  37. *
  38. * Authors: Avadis Tevanian, Jr., Michael Wayne Young
  39. *
  40. * Permission to use, copy, modify and distribute this software and
  41. * its documentation is hereby granted, provided that both the copyright
  42. * notice and this permission notice appear in all copies of the
  43. * software, derivative works or modified versions, and any portions
  44. * thereof, and that both notices appear in supporting documentation.
  45. *
  46. * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  47. * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
  48. * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  49. *
  50. * Carnegie Mellon requests users of this software to return to
  51. *
  52. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  53. * School of Computer Science
  54. * Carnegie Mellon University
  55. * Pittsburgh PA 15213-3890
  56. *
  57. * any improvements or extensions that they make and grant Carnegie the
  58. * rights to redistribute these changes.
  59. */
  60. /*
  61. * Resident memory system definitions.
  62. */
  63. #ifndef _VM_PAGE_
  64. #define _VM_PAGE_
  65. #include <vm/pmap.h>
  66. #include <vm/_vm_phys.h>
  67. /*
  68. * Management of resident (logical) pages.
  69. *
  70. * A small structure is kept for each resident
  71. * page, indexed by page number. Each structure
  72. * is an element of several collections:
  73. *
  74. * A radix tree used to quickly
  75. * perform object/offset lookups
  76. *
  77. * A list of all pages for a given object,
  78. * so they can be quickly deactivated at
  79. * time of deallocation.
  80. *
  81. * An ordered list of pages due for pageout.
  82. *
  83. * In addition, the structure contains the object
  84. * and offset to which this page belongs (for pageout),
  85. * and sundry status bits.
  86. *
  87. * In general, operations on this structure's mutable fields are
  88. * synchronized using either one of or a combination of locks. If a
  89. * field is annotated with two of these locks then holding either is
  90. * sufficient for read access but both are required for write access.
  91. * The queue lock for a page depends on the value of its queue field and is
  92. * described in detail below.
  93. *
  94. * The following annotations are possible:
  95. * (A) the field must be accessed using atomic(9) and may require
  96. * additional synchronization.
  97. * (B) the page busy lock.
  98. * (C) the field is immutable.
  99. * (F) the per-domain lock for the free queues.
  100. * (M) Machine dependent, defined by pmap layer.
  101. * (O) the object that the page belongs to.
  102. * (Q) the page's queue lock.
  103. *
  104. * The busy lock is an embedded reader-writer lock that protects the
  105. * page's contents and identity (i.e., its <object, pindex> tuple) as
  106. * well as certain valid/dirty modifications. To avoid bloating the
  107. * the page structure, the busy lock lacks some of the features available
  108. * the kernel's general-purpose synchronization primitives. As a result,
  109. * busy lock ordering rules are not verified, lock recursion is not
  110. * detected, and an attempt to xbusy a busy page or sbusy an xbusy page
  111. * results will trigger a panic rather than causing the thread to block.
  112. * vm_page_sleep_if_busy() can be used to sleep until the page's busy
  113. * state changes, after which the caller must re-lookup the page and
  114. * re-evaluate its state. vm_page_busy_acquire() will block until
  115. * the lock is acquired.
  116. *
  117. * The valid field is protected by the page busy lock (B) and object
  118. * lock (O). Transitions from invalid to valid are generally done
  119. * via I/O or zero filling and do not require the object lock.
  120. * These must be protected with the busy lock to prevent page-in or
  121. * creation races. Page invalidation generally happens as a result
  122. * of truncate or msync. When invalidated, pages must not be present
  123. * in pmap and must hold the object lock to prevent concurrent
  124. * speculative read-only mappings that do not require busy. I/O
  125. * routines may check for validity without a lock if they are prepared
  126. * to handle invalidation races with higher level locks (vnode) or are
  127. * unconcerned with races so long as they hold a reference to prevent
  128. * recycling. When a valid bit is set while holding a shared busy
  129. * lock (A) atomic operations are used to protect against concurrent
  130. * modification.
  131. *
  132. * In contrast, the synchronization of accesses to the page's
  133. * dirty field is a mix of machine dependent (M) and busy (B). In
  134. * the machine-independent layer, the page busy must be held to
  135. * operate on the field. However, the pmap layer is permitted to
  136. * set all bits within the field without holding that lock. If the
  137. * underlying architecture does not support atomic read-modify-write
  138. * operations on the field's type, then the machine-independent
  139. * layer uses a 32-bit atomic on the aligned 32-bit word that
  140. * contains the dirty field. In the machine-independent layer,
  141. * the implementation of read-modify-write operations on the
  142. * field is encapsulated in vm_page_clear_dirty_mask(). An
  143. * exclusive busy lock combined with pmap_remove_{write/all}() is the
  144. * only way to ensure a page can not become dirty. I/O generally
  145. * removes the page from pmap to ensure exclusive access and atomic
  146. * writes.
  147. *
  148. * The ref_count field tracks references to the page. References that
  149. * prevent the page from being reclaimable are called wirings and are
  150. * counted in the low bits of ref_count. The containing object's
  151. * reference, if one exists, is counted using the VPRC_OBJREF bit in the
  152. * ref_count field. Additionally, the VPRC_BLOCKED bit is used to
  153. * atomically check for wirings and prevent new wirings via
  154. * pmap_extract_and_hold(). When a page belongs to an object, it may be
  155. * wired only when the object is locked, or the page is busy, or by
  156. * pmap_extract_and_hold(). As a result, if the object is locked and the
  157. * page is not busy (or is exclusively busied by the current thread), and
  158. * the page is unmapped, its wire count will not increase. The ref_count
  159. * field is updated using atomic operations in most cases, except when it
  160. * is known that no other references to the page exist, such as in the page
  161. * allocator. A page may be present in the page queues, or even actively
  162. * scanned by the page daemon, without an explicitly counted referenced.
  163. * The page daemon must therefore handle the possibility of a concurrent
  164. * free of the page.
  165. *
  166. * The queue state of a page consists of the queue and act_count fields of
  167. * its atomically updated state, and the subset of atomic flags specified
  168. * by PGA_QUEUE_STATE_MASK. The queue field contains the page's page queue
  169. * index, or PQ_NONE if it does not belong to a page queue. To modify the
  170. * queue field, the page queue lock corresponding to the old value must be
  171. * held, unless that value is PQ_NONE, in which case the queue index must
  172. * be updated using an atomic RMW operation. There is one exception to
  173. * this rule: the page daemon may transition the queue field from
  174. * PQ_INACTIVE to PQ_NONE immediately prior to freeing the page during an
  175. * inactive queue scan. At that point the page is already dequeued and no
  176. * other references to that vm_page structure can exist. The PGA_ENQUEUED
  177. * flag, when set, indicates that the page structure is physically inserted
  178. * into the queue corresponding to the page's queue index, and may only be
  179. * set or cleared with the corresponding page queue lock held.
  180. *
  181. * To avoid contention on page queue locks, page queue operations (enqueue,
  182. * dequeue, requeue) are batched using fixed-size per-CPU queues. A
  183. * deferred operation is requested by setting one of the flags in
  184. * PGA_QUEUE_OP_MASK and inserting an entry into a batch queue. When a
  185. * queue is full, an attempt to insert a new entry will lock the page
  186. * queues and trigger processing of the pending entries. The
  187. * type-stability of vm_page structures is crucial to this scheme since the
  188. * processing of entries in a given batch queue may be deferred
  189. * indefinitely. In particular, a page may be freed with pending batch
  190. * queue entries. The page queue operation flags must be set using atomic
  191. * RWM operations.
  192. */
  193. #if PAGE_SIZE == 4096
  194. #define VM_PAGE_BITS_ALL 0xffu
  195. typedef uint8_t vm_page_bits_t;
  196. #elif PAGE_SIZE == 8192
  197. #define VM_PAGE_BITS_ALL 0xffffu
  198. typedef uint16_t vm_page_bits_t;
  199. #elif PAGE_SIZE == 16384
  200. #define VM_PAGE_BITS_ALL 0xffffffffu
  201. typedef uint32_t vm_page_bits_t;
  202. #elif PAGE_SIZE == 32768
  203. #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
  204. typedef uint64_t vm_page_bits_t;
  205. #endif
  206. typedef union vm_page_astate {
  207. struct {
  208. uint16_t flags;
  209. uint8_t queue;
  210. uint8_t act_count;
  211. };
  212. uint32_t _bits;
  213. } vm_page_astate_t;
  214. struct vm_page {
  215. union {
  216. TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
  217. struct {
  218. SLIST_ENTRY(vm_page) ss; /* private slists */
  219. } s;
  220. struct {
  221. u_long p;
  222. u_long v;
  223. } memguard;
  224. struct {
  225. void *slab;
  226. void *zone;
  227. } uma;
  228. } plinks;
  229. TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */
  230. vm_object_t object; /* which object am I in (O) */
  231. vm_pindex_t pindex; /* offset into object (O,P) */
  232. vm_paddr_t phys_addr; /* physical address of page (C) */
  233. struct md_page md; /* machine dependent stuff */
  234. u_int ref_count; /* page references (A) */
  235. u_int busy_lock; /* busy owners lock (A) */
  236. union vm_page_astate a; /* state accessed atomically (A) */
  237. uint8_t order; /* index of the buddy queue (F) */
  238. uint8_t pool; /* vm_phys freepool index (F) */
  239. uint8_t flags; /* page PG_* flags (P) */
  240. uint8_t oflags; /* page VPO_* flags (O) */
  241. int8_t psind; /* pagesizes[] index (O) */
  242. int8_t segind; /* vm_phys segment index (C) */
  243. /* NOTE that these must support one bit per DEV_BSIZE in a page */
  244. /* so, on normal X86 kernels, they must be at least 8 bits wide */
  245. vm_page_bits_t valid; /* valid DEV_BSIZE chunk map (O,B) */
  246. vm_page_bits_t dirty; /* dirty DEV_BSIZE chunk map (M,B) */
  247. };
  248. /*
  249. * Special bits used in the ref_count field.
  250. *
  251. * ref_count is normally used to count wirings that prevent the page from being
  252. * reclaimed, but also supports several special types of references that do not
  253. * prevent reclamation. Accesses to the ref_count field must be atomic unless
  254. * the page is unallocated.
  255. *
  256. * VPRC_OBJREF is the reference held by the containing object. It can set or
  257. * cleared only when the corresponding object's write lock is held.
  258. *
  259. * VPRC_BLOCKED is used to atomically block wirings via pmap lookups while
  260. * attempting to tear down all mappings of a given page. The page busy lock and
  261. * object write lock must both be held in order to set or clear this bit.
  262. */
  263. #define VPRC_BLOCKED 0x40000000u /* mappings are being removed */
  264. #define VPRC_OBJREF 0x80000000u /* object reference, cleared with (O) */
  265. #define VPRC_WIRE_COUNT(c) ((c) & ~(VPRC_BLOCKED | VPRC_OBJREF))
  266. #define VPRC_WIRE_COUNT_MAX (~(VPRC_BLOCKED | VPRC_OBJREF))
  267. /*
  268. * Page flags stored in oflags:
  269. *
  270. * Access to these page flags is synchronized by the lock on the object
  271. * containing the page (O).
  272. *
  273. * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
  274. * indicates that the page is not under PV management but
  275. * otherwise should be treated as a normal page. Pages not
  276. * under PV management cannot be paged out via the
  277. * object/vm_page_t because there is no knowledge of their pte
  278. * mappings, and such pages are also not on any PQ queue.
  279. *
  280. */
  281. #define VPO_KMEM_EXEC 0x01 /* kmem mapping allows execution */
  282. #define VPO_SWAPSLEEP 0x02 /* waiting for swap to finish */
  283. #define VPO_UNMANAGED 0x04 /* no PV management for page */
  284. #define VPO_SWAPINPROG 0x08 /* swap I/O in progress on page */
  285. /*
  286. * Busy page implementation details.
  287. * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
  288. * even if the support for owner identity is removed because of size
  289. * constraints. Checks on lock recursion are then not possible, while the
  290. * lock assertions effectiveness is someway reduced.
  291. */
  292. #define VPB_BIT_SHARED 0x01
  293. #define VPB_BIT_EXCLUSIVE 0x02
  294. #define VPB_BIT_WAITERS 0x04
  295. #define VPB_BIT_FLAGMASK \
  296. (VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
  297. #define VPB_SHARERS_SHIFT 3
  298. #define VPB_SHARERS(x) \
  299. (((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
  300. #define VPB_SHARERS_WORD(x) ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
  301. #define VPB_ONE_SHARER (1 << VPB_SHARERS_SHIFT)
  302. #define VPB_SINGLE_EXCLUSIVE VPB_BIT_EXCLUSIVE
  303. #ifdef INVARIANTS
  304. #define VPB_CURTHREAD_EXCLUSIVE \
  305. (VPB_BIT_EXCLUSIVE | ((u_int)(uintptr_t)curthread & ~VPB_BIT_FLAGMASK))
  306. #else
  307. #define VPB_CURTHREAD_EXCLUSIVE VPB_SINGLE_EXCLUSIVE
  308. #endif
  309. #define VPB_UNBUSIED VPB_SHARERS_WORD(0)
  310. /* Freed lock blocks both shared and exclusive. */
  311. #define VPB_FREED (0xffffffff - VPB_BIT_SHARED)
  312. #define PQ_NONE 255
  313. #define PQ_INACTIVE 0
  314. #define PQ_ACTIVE 1
  315. #define PQ_LAUNDRY 2
  316. #define PQ_UNSWAPPABLE 3
  317. #define PQ_COUNT 4
  318. #ifndef VM_PAGE_HAVE_PGLIST
  319. TAILQ_HEAD(pglist, vm_page);
  320. #define VM_PAGE_HAVE_PGLIST
  321. #endif
  322. SLIST_HEAD(spglist, vm_page);
  323. #ifdef _KERNEL
  324. extern vm_page_t bogus_page;
  325. #endif /* _KERNEL */
  326. extern struct mtx_padalign pa_lock[];
  327. #if defined(__arm__)
  328. #define PDRSHIFT PDR_SHIFT
  329. #elif !defined(PDRSHIFT)
  330. #define PDRSHIFT 21
  331. #endif
  332. #define pa_index(pa) ((pa) >> PDRSHIFT)
  333. #define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
  334. #define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa)))
  335. #define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa))
  336. #define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa))
  337. #define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa))
  338. #define PA_UNLOCK_COND(pa) \
  339. do { \
  340. if ((pa) != 0) { \
  341. PA_UNLOCK((pa)); \
  342. (pa) = 0; \
  343. } \
  344. } while (0)
  345. #define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a))
  346. #if defined(KLD_MODULE) && !defined(KLD_TIED)
  347. #define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
  348. #define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
  349. #define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
  350. #else /* !KLD_MODULE */
  351. #define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
  352. #define vm_page_lock(m) mtx_lock(vm_page_lockptr((m)))
  353. #define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m)))
  354. #define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m)))
  355. #endif
  356. #if defined(INVARIANTS)
  357. #define vm_page_assert_locked(m) \
  358. vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
  359. #define vm_page_lock_assert(m, a) \
  360. vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
  361. #else
  362. #define vm_page_assert_locked(m)
  363. #define vm_page_lock_assert(m, a)
  364. #endif
  365. /*
  366. * The vm_page's aflags are updated using atomic operations. To set or clear
  367. * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
  368. * must be used. Neither these flags nor these functions are part of the KBI.
  369. *
  370. * PGA_REFERENCED may be cleared only if the page is locked. It is set by
  371. * both the MI and MD VM layers. However, kernel loadable modules should not
  372. * directly set this flag. They should call vm_page_reference() instead.
  373. *
  374. * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
  375. * When it does so, the object must be locked, or the page must be
  376. * exclusive busied. The MI VM layer must never access this flag
  377. * directly. Instead, it should call pmap_page_is_write_mapped().
  378. *
  379. * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
  380. * at least one executable mapping. It is not consumed by the MI VM layer.
  381. *
  382. * PGA_NOSYNC must be set and cleared with the page busy lock held.
  383. *
  384. * PGA_ENQUEUED is set and cleared when a page is inserted into or removed
  385. * from a page queue, respectively. It determines whether the plinks.q field
  386. * of the page is valid. To set or clear this flag, page's "queue" field must
  387. * be a valid queue index, and the corresponding page queue lock must be held.
  388. *
  389. * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page
  390. * queue, and cleared when the dequeue request is processed. A page may
  391. * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue
  392. * is requested after the page is scheduled to be enqueued but before it is
  393. * actually inserted into the page queue.
  394. *
  395. * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued
  396. * in its page queue.
  397. *
  398. * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of
  399. * the inactive queue, thus bypassing LRU.
  400. *
  401. * The PGA_DEQUEUE, PGA_REQUEUE and PGA_REQUEUE_HEAD flags must be set using an
  402. * atomic RMW operation to ensure that the "queue" field is a valid queue index,
  403. * and the corresponding page queue lock must be held when clearing any of the
  404. * flags.
  405. *
  406. * PGA_SWAP_FREE is used to defer freeing swap space to the pageout daemon
  407. * when the context that dirties the page does not have the object write lock
  408. * held.
  409. */
  410. #define PGA_WRITEABLE 0x0001 /* page may be mapped writeable */
  411. #define PGA_REFERENCED 0x0002 /* page has been referenced */
  412. #define PGA_EXECUTABLE 0x0004 /* page may be mapped executable */
  413. #define PGA_ENQUEUED 0x0008 /* page is enqueued in a page queue */
  414. #define PGA_DEQUEUE 0x0010 /* page is due to be dequeued */
  415. #define PGA_REQUEUE 0x0020 /* page is due to be requeued */
  416. #define PGA_REQUEUE_HEAD 0x0040 /* page requeue should bypass LRU */
  417. #define PGA_NOSYNC 0x0080 /* do not collect for syncer */
  418. #define PGA_SWAP_FREE 0x0100 /* page with swap space was dirtied */
  419. #define PGA_SWAP_SPACE 0x0200 /* page has allocated swap space */
  420. #define PGA_QUEUE_OP_MASK (PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD)
  421. #define PGA_QUEUE_STATE_MASK (PGA_ENQUEUED | PGA_QUEUE_OP_MASK)
  422. /*
  423. * Page flags. Updates to these flags are not synchronized, and thus they must
  424. * be set during page allocation or free to avoid races.
  425. *
  426. * The PG_PCPU_CACHE flag is set at allocation time if the page was
  427. * allocated from a per-CPU cache. It is cleared the next time that the
  428. * page is allocated from the physical memory allocator.
  429. */
  430. #define PG_PCPU_CACHE 0x01 /* was allocated from per-CPU caches */
  431. #define PG_FICTITIOUS 0x02 /* physical page doesn't exist */
  432. #define PG_ZERO 0x04 /* page is zeroed */
  433. #define PG_MARKER 0x08 /* special queue marker page */
  434. #define PG_NODUMP 0x10 /* don't include this page in a dump */
  435. /*
  436. * Misc constants.
  437. */
  438. #define ACT_DECLINE 1
  439. #define ACT_ADVANCE 3
  440. #define ACT_INIT 5
  441. #define ACT_MAX 64
  442. #ifdef _KERNEL
  443. #include <sys/kassert.h>
  444. #include <machine/atomic.h>
  445. /*
  446. * Each pageable resident page falls into one of five lists:
  447. *
  448. * free
  449. * Available for allocation now.
  450. *
  451. * inactive
  452. * Low activity, candidates for reclamation.
  453. * This list is approximately LRU ordered.
  454. *
  455. * laundry
  456. * This is the list of pages that should be
  457. * paged out next.
  458. *
  459. * unswappable
  460. * Dirty anonymous pages that cannot be paged
  461. * out because no swap device is configured.
  462. *
  463. * active
  464. * Pages that are "active", i.e., they have been
  465. * recently referenced.
  466. *
  467. */
  468. extern vm_page_t vm_page_array; /* First resident page in table */
  469. extern long vm_page_array_size; /* number of vm_page_t's */
  470. extern long first_page; /* first physical page number */
  471. #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
  472. /*
  473. * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
  474. * page to which the given physical address belongs. The correct vm_page_t
  475. * object is returned for addresses that are not page-aligned.
  476. */
  477. vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
  478. /*
  479. * Page allocation parameters for vm_page for the functions
  480. * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
  481. * vm_page_alloc_freelist(). Some functions support only a subset
  482. * of the flags, and ignore others, see the flags legend.
  483. *
  484. * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
  485. * and the vm_page_grab*() functions. See these functions for details.
  486. *
  487. * Bits 0 - 1 define class.
  488. * Bits 2 - 15 dedicated for flags.
  489. * Legend:
  490. * (a) - vm_page_alloc() supports the flag.
  491. * (c) - vm_page_alloc_contig() supports the flag.
  492. * (g) - vm_page_grab() supports the flag.
  493. * (n) - vm_page_alloc_noobj() and vm_page_alloc_freelist() support the flag.
  494. * (p) - vm_page_grab_pages() supports the flag.
  495. * Bits above 15 define the count of additional pages that the caller
  496. * intends to allocate.
  497. */
  498. #define VM_ALLOC_NORMAL 0
  499. #define VM_ALLOC_INTERRUPT 1
  500. #define VM_ALLOC_SYSTEM 2
  501. #define VM_ALLOC_CLASS_MASK 3
  502. #define VM_ALLOC_WAITOK 0x0008 /* (acn) Sleep and retry */
  503. #define VM_ALLOC_WAITFAIL 0x0010 /* (acn) Sleep and return error */
  504. #define VM_ALLOC_WIRED 0x0020 /* (acgnp) Allocate a wired page */
  505. #define VM_ALLOC_ZERO 0x0040 /* (acgnp) Allocate a zeroed page */
  506. #define VM_ALLOC_NORECLAIM 0x0080 /* (c) Do not reclaim after failure */
  507. #define VM_ALLOC_AVAIL0 0x0100
  508. #define VM_ALLOC_NOBUSY 0x0200 /* (acgp) Do not excl busy the page */
  509. #define VM_ALLOC_NOCREAT 0x0400 /* (gp) Don't create a page */
  510. #define VM_ALLOC_AVAIL1 0x0800
  511. #define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */
  512. #define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */
  513. #define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */
  514. #define VM_ALLOC_NOWAIT 0x8000 /* (acgnp) Do not sleep */
  515. #define VM_ALLOC_COUNT_MAX 0xffff
  516. #define VM_ALLOC_COUNT_SHIFT 16
  517. #define VM_ALLOC_COUNT_MASK (VM_ALLOC_COUNT(VM_ALLOC_COUNT_MAX))
  518. #define VM_ALLOC_COUNT(count) ({ \
  519. KASSERT((count) <= VM_ALLOC_COUNT_MAX, \
  520. ("%s: invalid VM_ALLOC_COUNT value", __func__)); \
  521. (count) << VM_ALLOC_COUNT_SHIFT; \
  522. })
  523. #ifdef M_NOWAIT
  524. static inline int
  525. malloc2vm_flags(int malloc_flags)
  526. {
  527. int pflags;
  528. KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
  529. (malloc_flags & M_NOWAIT) != 0,
  530. ("M_USE_RESERVE requires M_NOWAIT"));
  531. pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
  532. VM_ALLOC_SYSTEM;
  533. if ((malloc_flags & M_ZERO) != 0)
  534. pflags |= VM_ALLOC_ZERO;
  535. if ((malloc_flags & M_NODUMP) != 0)
  536. pflags |= VM_ALLOC_NODUMP;
  537. if ((malloc_flags & M_NOWAIT))
  538. pflags |= VM_ALLOC_NOWAIT;
  539. if ((malloc_flags & M_WAITOK))
  540. pflags |= VM_ALLOC_WAITOK;
  541. if ((malloc_flags & M_NORECLAIM))
  542. pflags |= VM_ALLOC_NORECLAIM;
  543. return (pflags);
  544. }
  545. #endif
  546. /*
  547. * Predicates supported by vm_page_ps_test():
  548. *
  549. * PS_ALL_DIRTY is true only if the entire (super)page is dirty.
  550. * However, it can be spuriously false when the (super)page has become
  551. * dirty in the pmap but that information has not been propagated to the
  552. * machine-independent layer.
  553. */
  554. #define PS_ALL_DIRTY 0x1
  555. #define PS_ALL_VALID 0x2
  556. #define PS_NONE_BUSY 0x4
  557. bool vm_page_busy_acquire(vm_page_t m, int allocflags);
  558. void vm_page_busy_downgrade(vm_page_t m);
  559. int vm_page_busy_tryupgrade(vm_page_t m);
  560. bool vm_page_busy_sleep(vm_page_t m, const char *msg, int allocflags);
  561. void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m,
  562. vm_pindex_t pindex, const char *wmesg, int allocflags);
  563. void vm_page_free(vm_page_t m);
  564. void vm_page_free_zero(vm_page_t m);
  565. void vm_page_activate (vm_page_t);
  566. void vm_page_advise(vm_page_t m, int advice);
  567. vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
  568. vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int);
  569. vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
  570. vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int,
  571. vm_page_t);
  572. vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
  573. u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
  574. vm_paddr_t boundary, vm_memattr_t memattr);
  575. vm_page_t vm_page_alloc_contig_domain(vm_object_t object,
  576. vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low,
  577. vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
  578. vm_memattr_t memattr);
  579. vm_page_t vm_page_alloc_freelist(int, int);
  580. vm_page_t vm_page_alloc_freelist_domain(int, int, int);
  581. vm_page_t vm_page_alloc_noobj(int);
  582. vm_page_t vm_page_alloc_noobj_domain(int, int);
  583. vm_page_t vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low,
  584. vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
  585. vm_memattr_t memattr);
  586. vm_page_t vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages,
  587. vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
  588. vm_memattr_t memattr);
  589. void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
  590. bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
  591. vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int);
  592. vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int);
  593. int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
  594. vm_page_t *ma, int count);
  595. int vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex,
  596. int allocflags, vm_page_t *ma, int count);
  597. int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
  598. int allocflags);
  599. int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object,
  600. vm_pindex_t pindex, int allocflags);
  601. void vm_page_deactivate(vm_page_t);
  602. void vm_page_deactivate_noreuse(vm_page_t);
  603. void vm_page_dequeue(vm_page_t m);
  604. void vm_page_dequeue_deferred(vm_page_t m);
  605. vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
  606. void vm_page_free_invalid(vm_page_t);
  607. vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
  608. void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
  609. void vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags);
  610. void vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind, int pool);
  611. int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
  612. void vm_page_invalid(vm_page_t m);
  613. void vm_page_launder(vm_page_t m);
  614. vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t);
  615. vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t);
  616. vm_page_t vm_page_next(vm_page_t m);
  617. void vm_page_pqbatch_drain(void);
  618. void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
  619. bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old,
  620. vm_page_astate_t new);
  621. vm_page_t vm_page_prev(vm_page_t m);
  622. bool vm_page_ps_test(vm_page_t m, int psind, int flags, vm_page_t skip_m);
  623. void vm_page_putfake(vm_page_t m);
  624. void vm_page_readahead_finish(vm_page_t m);
  625. int vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
  626. vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
  627. int vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
  628. vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
  629. int vm_page_reclaim_contig_domain_ext(int domain, int req, u_long npages,
  630. vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
  631. int desired_runs);
  632. void vm_page_reference(vm_page_t m);
  633. #define VPR_TRYFREE 0x01
  634. #define VPR_NOREUSE 0x02
  635. void vm_page_release(vm_page_t m, int flags);
  636. void vm_page_release_locked(vm_page_t m, int flags);
  637. vm_page_t vm_page_relookup(vm_object_t, vm_pindex_t);
  638. bool vm_page_remove(vm_page_t);
  639. bool vm_page_remove_xbusy(vm_page_t);
  640. int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t);
  641. void vm_page_replace(vm_page_t mnew, vm_object_t object,
  642. vm_pindex_t pindex, vm_page_t mold);
  643. int vm_page_sbusied(vm_page_t m);
  644. vm_page_bits_t vm_page_set_dirty(vm_page_t m);
  645. void vm_page_set_valid_range(vm_page_t m, int base, int size);
  646. vm_offset_t vm_page_startup(vm_offset_t vaddr);
  647. void vm_page_sunbusy(vm_page_t m);
  648. bool vm_page_try_remove_all(vm_page_t m);
  649. bool vm_page_try_remove_write(vm_page_t m);
  650. int vm_page_trysbusy(vm_page_t m);
  651. int vm_page_tryxbusy(vm_page_t m);
  652. void vm_page_unhold_pages(vm_page_t *ma, int count);
  653. void vm_page_unswappable(vm_page_t m);
  654. void vm_page_unwire(vm_page_t m, uint8_t queue);
  655. bool vm_page_unwire_noq(vm_page_t m);
  656. void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
  657. void vm_page_wire(vm_page_t);
  658. bool vm_page_wire_mapped(vm_page_t m);
  659. void vm_page_xunbusy_hard(vm_page_t m);
  660. void vm_page_xunbusy_hard_unchecked(vm_page_t m);
  661. void vm_page_set_validclean (vm_page_t, int, int);
  662. void vm_page_clear_dirty(vm_page_t, int, int);
  663. void vm_page_set_invalid(vm_page_t, int, int);
  664. void vm_page_valid(vm_page_t m);
  665. int vm_page_is_valid(vm_page_t, int, int);
  666. void vm_page_test_dirty(vm_page_t);
  667. vm_page_bits_t vm_page_bits(int base, int size);
  668. void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
  669. void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count);
  670. void vm_page_dirty_KBI(vm_page_t m);
  671. void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
  672. void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
  673. int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
  674. #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
  675. void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
  676. void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
  677. #endif
  678. #define vm_page_busy_fetch(m) atomic_load_int(&(m)->busy_lock)
  679. #define vm_page_assert_busied(m) \
  680. KASSERT(vm_page_busied(m), \
  681. ("vm_page_assert_busied: page %p not busy @ %s:%d", \
  682. (m), __FILE__, __LINE__))
  683. #define vm_page_assert_sbusied(m) \
  684. KASSERT(vm_page_sbusied(m), \
  685. ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
  686. (m), __FILE__, __LINE__))
  687. #define vm_page_assert_unbusied(m) \
  688. KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) != \
  689. VPB_CURTHREAD_EXCLUSIVE, \
  690. ("vm_page_assert_unbusied: page %p busy_lock %#x owned" \
  691. " by me (%p) @ %s:%d", \
  692. (m), (m)->busy_lock, curthread, __FILE__, __LINE__)); \
  693. #define vm_page_assert_xbusied_unchecked(m) do { \
  694. KASSERT(vm_page_xbusied(m), \
  695. ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
  696. (m), __FILE__, __LINE__)); \
  697. } while (0)
  698. #define vm_page_assert_xbusied(m) do { \
  699. vm_page_assert_xbusied_unchecked(m); \
  700. KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) == \
  701. VPB_CURTHREAD_EXCLUSIVE, \
  702. ("vm_page_assert_xbusied: page %p busy_lock %#x not owned" \
  703. " by me (%p) @ %s:%d", \
  704. (m), (m)->busy_lock, curthread, __FILE__, __LINE__)); \
  705. } while (0)
  706. #define vm_page_busied(m) \
  707. (vm_page_busy_fetch(m) != VPB_UNBUSIED)
  708. #define vm_page_xbusied(m) \
  709. ((vm_page_busy_fetch(m) & VPB_SINGLE_EXCLUSIVE) != 0)
  710. #define vm_page_busy_freed(m) \
  711. (vm_page_busy_fetch(m) == VPB_FREED)
  712. /* Note: page m's lock must not be owned by the caller. */
  713. #define vm_page_xunbusy(m) do { \
  714. if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
  715. VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \
  716. vm_page_xunbusy_hard(m); \
  717. } while (0)
  718. #define vm_page_xunbusy_unchecked(m) do { \
  719. if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
  720. VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \
  721. vm_page_xunbusy_hard_unchecked(m); \
  722. } while (0)
  723. #ifdef INVARIANTS
  724. void vm_page_object_busy_assert(vm_page_t m);
  725. #define VM_PAGE_OBJECT_BUSY_ASSERT(m) vm_page_object_busy_assert(m)
  726. void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
  727. #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \
  728. vm_page_assert_pga_writeable(m, bits)
  729. /*
  730. * Claim ownership of a page's xbusy state. In non-INVARIANTS kernels this
  731. * operation is a no-op since ownership is not tracked. In particular
  732. * this macro does not provide any synchronization with the previous owner.
  733. */
  734. #define vm_page_xbusy_claim(m) do { \
  735. u_int _busy_lock; \
  736. \
  737. vm_page_assert_xbusied_unchecked((m)); \
  738. do { \
  739. _busy_lock = vm_page_busy_fetch(m); \
  740. } while (!atomic_cmpset_int(&(m)->busy_lock, _busy_lock, \
  741. (_busy_lock & VPB_BIT_FLAGMASK) | VPB_CURTHREAD_EXCLUSIVE)); \
  742. } while (0)
  743. #else
  744. #define VM_PAGE_OBJECT_BUSY_ASSERT(m) (void)0
  745. #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0
  746. #define vm_page_xbusy_claim(m)
  747. #endif
  748. #if BYTE_ORDER == BIG_ENDIAN
  749. #define VM_PAGE_AFLAG_SHIFT 16
  750. #else
  751. #define VM_PAGE_AFLAG_SHIFT 0
  752. #endif
  753. /*
  754. * Load a snapshot of a page's 32-bit atomic state.
  755. */
  756. static inline vm_page_astate_t
  757. vm_page_astate_load(vm_page_t m)
  758. {
  759. vm_page_astate_t a;
  760. a._bits = atomic_load_32(&m->a._bits);
  761. return (a);
  762. }
  763. /*
  764. * Atomically compare and set a page's atomic state.
  765. */
  766. static inline bool
  767. vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
  768. {
  769. KASSERT(new.queue == PQ_INACTIVE || (new.flags & PGA_REQUEUE_HEAD) == 0,
  770. ("%s: invalid head requeue request for page %p", __func__, m));
  771. KASSERT((new.flags & PGA_ENQUEUED) == 0 || new.queue != PQ_NONE,
  772. ("%s: setting PGA_ENQUEUED with PQ_NONE in page %p", __func__, m));
  773. KASSERT(new._bits != old->_bits,
  774. ("%s: bits are unchanged", __func__));
  775. return (atomic_fcmpset_32(&m->a._bits, &old->_bits, new._bits) != 0);
  776. }
  777. /*
  778. * Clear the given bits in the specified page.
  779. */
  780. static inline void
  781. vm_page_aflag_clear(vm_page_t m, uint16_t bits)
  782. {
  783. uint32_t *addr, val;
  784. /*
  785. * Access the whole 32-bit word containing the aflags field with an
  786. * atomic update. Parallel non-atomic updates to the other fields
  787. * within this word are handled properly by the atomic update.
  788. */
  789. addr = (void *)&m->a;
  790. val = bits << VM_PAGE_AFLAG_SHIFT;
  791. atomic_clear_32(addr, val);
  792. }
  793. /*
  794. * Set the given bits in the specified page.
  795. */
  796. static inline void
  797. vm_page_aflag_set(vm_page_t m, uint16_t bits)
  798. {
  799. uint32_t *addr, val;
  800. VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
  801. /*
  802. * Access the whole 32-bit word containing the aflags field with an
  803. * atomic update. Parallel non-atomic updates to the other fields
  804. * within this word are handled properly by the atomic update.
  805. */
  806. addr = (void *)&m->a;
  807. val = bits << VM_PAGE_AFLAG_SHIFT;
  808. atomic_set_32(addr, val);
  809. }
  810. /*
  811. * vm_page_dirty:
  812. *
  813. * Set all bits in the page's dirty field.
  814. *
  815. * The object containing the specified page must be locked if the
  816. * call is made from the machine-independent layer.
  817. *
  818. * See vm_page_clear_dirty_mask().
  819. */
  820. static __inline void
  821. vm_page_dirty(vm_page_t m)
  822. {
  823. /* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
  824. #if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(INVARIANTS)
  825. vm_page_dirty_KBI(m);
  826. #else
  827. m->dirty = VM_PAGE_BITS_ALL;
  828. #endif
  829. }
  830. /*
  831. * vm_page_undirty:
  832. *
  833. * Set page to not be dirty. Note: does not clear pmap modify bits
  834. */
  835. static __inline void
  836. vm_page_undirty(vm_page_t m)
  837. {
  838. VM_PAGE_OBJECT_BUSY_ASSERT(m);
  839. m->dirty = 0;
  840. }
  841. static inline uint8_t
  842. _vm_page_queue(vm_page_astate_t as)
  843. {
  844. if ((as.flags & PGA_DEQUEUE) != 0)
  845. return (PQ_NONE);
  846. return (as.queue);
  847. }
  848. /*
  849. * vm_page_queue:
  850. *
  851. * Return the index of the queue containing m.
  852. */
  853. static inline uint8_t
  854. vm_page_queue(vm_page_t m)
  855. {
  856. return (_vm_page_queue(vm_page_astate_load(m)));
  857. }
  858. static inline bool
  859. vm_page_active(vm_page_t m)
  860. {
  861. return (vm_page_queue(m) == PQ_ACTIVE);
  862. }
  863. static inline bool
  864. vm_page_inactive(vm_page_t m)
  865. {
  866. return (vm_page_queue(m) == PQ_INACTIVE);
  867. }
  868. static inline bool
  869. vm_page_in_laundry(vm_page_t m)
  870. {
  871. uint8_t queue;
  872. queue = vm_page_queue(m);
  873. return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE);
  874. }
  875. static inline void
  876. vm_page_clearref(vm_page_t m)
  877. {
  878. u_int r;
  879. r = m->ref_count;
  880. while (atomic_fcmpset_int(&m->ref_count, &r, r & (VPRC_BLOCKED |
  881. VPRC_OBJREF)) == 0)
  882. ;
  883. }
  884. /*
  885. * vm_page_drop:
  886. *
  887. * Release a reference to a page and return the old reference count.
  888. */
  889. static inline u_int
  890. vm_page_drop(vm_page_t m, u_int val)
  891. {
  892. u_int old;
  893. /*
  894. * Synchronize with vm_page_free_prep(): ensure that all updates to the
  895. * page structure are visible before it is freed.
  896. */
  897. atomic_thread_fence_rel();
  898. old = atomic_fetchadd_int(&m->ref_count, -val);
  899. KASSERT(old != VPRC_BLOCKED,
  900. ("vm_page_drop: page %p has an invalid refcount value", m));
  901. return (old);
  902. }
  903. /*
  904. * vm_page_wired:
  905. *
  906. * Perform a racy check to determine whether a reference prevents the page
  907. * from being reclaimable. If the page's object is locked, and the page is
  908. * unmapped and exclusively busied by the current thread, no new wirings
  909. * may be created.
  910. */
  911. static inline bool
  912. vm_page_wired(vm_page_t m)
  913. {
  914. return (VPRC_WIRE_COUNT(m->ref_count) > 0);
  915. }
  916. static inline bool
  917. vm_page_all_valid(vm_page_t m)
  918. {
  919. return (m->valid == VM_PAGE_BITS_ALL);
  920. }
  921. static inline bool
  922. vm_page_any_valid(vm_page_t m)
  923. {
  924. return (m->valid != 0);
  925. }
  926. static inline bool
  927. vm_page_none_valid(vm_page_t m)
  928. {
  929. return (m->valid == 0);
  930. }
  931. static inline int
  932. vm_page_domain(vm_page_t m __numa_used)
  933. {
  934. #ifdef NUMA
  935. int domn, segind;
  936. segind = m->segind;
  937. KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m));
  938. domn = vm_phys_segs[segind].domain;
  939. KASSERT(domn >= 0 && domn < vm_ndomains, ("domain %d m %p", domn, m));
  940. return (domn);
  941. #else
  942. return (0);
  943. #endif
  944. }
  945. #endif /* _KERNEL */
  946. #endif /* !_VM_PAGE_ */