page.h 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. /*
  2. * Copyright (c) 2010-2017 Richard Braun.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. *
  18. * Physical page management.
  19. *
  20. * A page is said to be managed if it's linked to a VM object, in which
  21. * case there is at least one reference to it.
  22. */
  23. #ifndef VM_VM_PAGE_H
  24. #define VM_VM_PAGE_H
  25. #include <assert.h>
  26. #include <errno.h>
  27. #include <stdbool.h>
  28. #include <stddef.h>
  29. #include <stdint.h>
  30. #include <kern/atomic.h>
  31. #include <kern/init.h>
  32. #include <kern/list.h>
  33. #include <kern/log2.h>
  34. #include <kern/macros.h>
  35. #include <kern/slist_types.h>
  36. #include <kern/spinlock_types.h>
  37. #include <kern/stream.h>
  38. #include <machine/page.h>
  39. #include <machine/pmap.h>
  40. #include <machine/pmem.h>
  41. #include <machine/types.h>
  42. #include <vm/object.h>
  43. /*
  44. * Byte/page conversion and rounding macros (not inline functions to
  45. * be easily usable on both virtual and physical addresses, which may not
  46. * have the same type size).
  47. */
  48. #define vm_page_btop(bytes) ((bytes) >> PAGE_SHIFT)
  49. #define vm_page_ptob(pages) ((pages) << PAGE_SHIFT)
  50. #define vm_page_trunc(bytes) P2ALIGN (bytes, PAGE_SIZE)
  51. #define vm_page_round(bytes) P2ROUND (bytes, PAGE_SIZE)
  52. #define vm_page_end(bytes) P2END (bytes, PAGE_SIZE)
  53. #define vm_page_aligned(bytes) P2ALIGNED (bytes, PAGE_SIZE)
  54. /*
  55. * Zone selectors.
  56. *
  57. * Selector-to-zone-list translation table :
  58. * DMA DMA
  59. * DMA32 DMA32 DMA
  60. * DIRECTMAP DIRECTMAP DMA32 DMA
  61. * HIGHMEM HIGHMEM DIRECTMAP DMA32 DMA
  62. */
  63. #define VM_PAGE_SEL_DMA 0
  64. #define VM_PAGE_SEL_DMA32 1
  65. #define VM_PAGE_SEL_DIRECTMAP 2
  66. #define VM_PAGE_SEL_HIGHMEM 3
  67. // Page usage types.
  68. #define VM_PAGE_FREE 0 // Page unused.
  69. #define VM_PAGE_RESERVED 1 // Page reserved at boot time.
  70. #define VM_PAGE_TABLE 2 // Page is part of the page table.
  71. #define VM_PAGE_PMAP 3 // Page stores pmap-specific data.
  72. #define VM_PAGE_KMEM 4 // Page is a direct-mapped kmem slab.
  73. #define VM_PAGE_OBJECT 5 // Page is part of a VM object.
  74. #define VM_PAGE_KERNEL 6 // Type for generic kernel allocations.
  75. // Flags passed to the (de)allocation functions.
  76. #define VM_PAGE_SLEEP 0x80
  77. // Page 'cleanliness'.
  78. #define VM_PAGE_CLEAN 0
  79. #define VM_PAGE_DIRTY 1
  80. #define VM_PAGE_LAUNDRY 2
  81. struct vm_object;
  82. // Physical page descriptor.
  83. struct vm_page
  84. {
  85. union
  86. {
  87. struct list node;
  88. struct slist rset;
  89. };
  90. phys_addr_t phys_addr;
  91. union
  92. {
  93. uint32_t whole;
  94. struct
  95. {
  96. #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  97. uint8_t type;
  98. uint8_t zone_index;
  99. uint8_t order;
  100. uint8_t dirty;
  101. #else
  102. uint8_t dirty;
  103. uint8_t order;
  104. uint8_t zone_index;
  105. uint8_t type;
  106. #endif
  107. };
  108. };
  109. uint32_t nr_refs;
  110. void *priv;
  111. // VM object back reference.
  112. struct vm_object *object;
  113. uint64_t offset;
  114. // RSET-specific members.
  115. struct spinlock rset_lock;
  116. };
  117. static inline uint32_t
  118. vm_page_type (const struct vm_page *page)
  119. {
  120. return (page->type);
  121. }
  122. void vm_page_set_type (struct vm_page *page, uint32_t order, uint16_t type);
  123. static inline uint32_t
  124. vm_page_order (size_t size)
  125. {
  126. return (log2_order (vm_page_btop (vm_page_round (size))));
  127. }
  128. static inline phys_addr_t
  129. vm_page_to_pa (const struct vm_page *page)
  130. {
  131. return (page->phys_addr);
  132. }
  133. static inline uintptr_t
  134. vm_page_direct_va (phys_addr_t pa)
  135. {
  136. assert (pa < PMEM_DIRECTMAP_LIMIT);
  137. return ((uintptr_t)pa + PMAP_START_DIRECTMAP_ADDRESS);
  138. }
  139. static inline phys_addr_t
  140. vm_page_direct_pa (uintptr_t va)
  141. {
  142. assert (va >= PMAP_START_DIRECTMAP_ADDRESS);
  143. assert (va < PMAP_END_DIRECTMAP_ADDRESS);
  144. return (va - PMAP_START_DIRECTMAP_ADDRESS);
  145. }
  146. static inline void*
  147. vm_page_direct_ptr (const struct vm_page *page)
  148. {
  149. return ((void *)vm_page_direct_va (vm_page_to_pa (page)));
  150. }
  151. // Associate private data with a page.
  152. static inline void
  153. vm_page_set_priv (struct vm_page *page, void *priv)
  154. {
  155. page->priv = priv;
  156. }
  157. static inline void*
  158. vm_page_get_priv (const struct vm_page *page)
  159. {
  160. return (page->priv);
  161. }
  162. static inline void
  163. vm_page_unlink (struct vm_page *page)
  164. {
  165. assert (page->object);
  166. page->object = NULL;
  167. }
  168. /*
  169. * Load physical memory into the vm_page module at boot time.
  170. *
  171. * All addresses must be page-aligned. Zones can be loaded in any order.
  172. */
  173. void vm_page_load (uint32_t zone_index, phys_addr_t start, phys_addr_t end);
  174. /*
  175. * Load available physical memory into the vm_page module at boot time.
  176. *
  177. * The zone referred to must have been loaded with vm_page_load
  178. * before loading its heap.
  179. */
  180. void vm_page_load_heap (uint32_t zone_index, phys_addr_t start,
  181. phys_addr_t end);
  182. /*
  183. * Return true if the vm_page module is completely initialized, false
  184. * otherwise, in which case only vm_page_bootalloc() can be used for
  185. * allocations.
  186. */
  187. int vm_page_ready (void);
  188. /*
  189. * Make the given page managed by the vm_page module.
  190. *
  191. * If additional memory can be made usable after the VM system is initialized,
  192. * it should be reported through this function.
  193. */
  194. void vm_page_handle (struct vm_page *page);
  195. // Return the page descriptor for the given physical address.
  196. struct vm_page* vm_page_lookup (phys_addr_t pa);
  197. /*
  198. * Allocate a block of 2^order physical pages.
  199. *
  200. * The selector is used to determine the zones from which allocation can
  201. * be attempted.
  202. *
  203. * If successful, the returned pages have no references.
  204. */
  205. struct vm_page* vm_page_alloc (uint32_t order, uint32_t selector,
  206. uint32_t type, uint32_t flags);
  207. /*
  208. * Release a block of 2^order physical pages.
  209. *
  210. * The pages must have no references.
  211. */
  212. void vm_page_free (struct vm_page *page, uint32_t order, uint32_t flags);
  213. // Deallocate a list of pages.
  214. void vm_page_list_free (struct list *pages);
  215. // Return the name of the given zone.
  216. const char* vm_page_zone_name (uint32_t zone_index);
  217. // Log information about physical pages.
  218. void vm_page_info (struct stream *stream);
  219. // Clear the contents of a page.
  220. void vm_page_zero (struct vm_page *page);
  221. // Mark a page as being clean.
  222. void vm_page_clean (struct vm_page *page, uint32_t expected);
  223. // Interfaces to manage page cleaning.
  224. void vm_page_wash_begin (struct vm_page *page);
  225. void vm_page_wash_end (struct vm_page *page);
  226. static inline bool
  227. vm_page_referenced (const struct vm_page *page)
  228. {
  229. return (atomic_load_rlx (&page->nr_refs) != 0);
  230. }
  231. static inline void
  232. vm_page_ref (struct vm_page *page)
  233. {
  234. uint32_t nr_refs = atomic_add_rlx (&page->nr_refs, 1);
  235. assert (nr_refs != (uint32_t)-1);
  236. }
  237. static inline bool
  238. vm_page_unref_nofree (struct vm_page *page)
  239. {
  240. uint32_t nr_refs = atomic_sub_acq_rel (&page->nr_refs, 1);
  241. assert (nr_refs != 0);
  242. return (nr_refs == 1);
  243. }
  244. static inline void
  245. vm_page_detach (struct vm_page *page)
  246. {
  247. void vm_object_detach (struct vm_object *, struct vm_page *);
  248. vm_object_detach (page->object, page);
  249. vm_page_unlink (page);
  250. }
  251. static inline bool
  252. vm_page_can_free (struct vm_page *page)
  253. {
  254. return (!page->dirty || !page->object ||
  255. !(page->object->flags & VM_OBJECT_FLUSHES));
  256. }
  257. static inline void
  258. vm_page_unref (struct vm_page *page)
  259. {
  260. if (vm_page_unref_nofree (page) && vm_page_can_free (page))
  261. {
  262. int flags = page->type == VM_PAGE_OBJECT ? VM_PAGE_SLEEP : 0;
  263. if (flags == VM_PAGE_SLEEP && page->object)
  264. vm_page_detach (page);
  265. vm_page_free (page, 0, flags);
  266. }
  267. }
  268. static inline int
  269. vm_page_tryref (struct vm_page *page)
  270. {
  271. return (atomic_try_inc (&page->nr_refs, ATOMIC_ACQUIRE) ? 0 : EAGAIN);
  272. }
  273. static inline void
  274. vm_page_set_cow (struct vm_page *page)
  275. {
  276. uintptr_t prev = (uintptr_t)vm_page_get_priv (page);
  277. vm_page_set_priv (page, (void *)(prev | 1));
  278. }
  279. static inline void
  280. vm_page_clr_cow (struct vm_page *page)
  281. {
  282. uintptr_t prev = (uintptr_t)vm_page_get_priv (page);
  283. vm_page_set_priv (page, (void *)(prev & ~1));
  284. }
  285. static inline bool
  286. vm_page_is_cow (struct vm_page *page)
  287. {
  288. return (((uintptr_t)vm_page_get_priv (page)) & 1);
  289. }
  290. static inline void
  291. vm_page_init_refcount (struct vm_page *page)
  292. {
  293. page->nr_refs = 1;
  294. }
  295. static inline uintptr_t
  296. vm_page_anon_va (const struct vm_page *page)
  297. {
  298. return ((uintptr_t)page->offset);
  299. }
  300. static inline uint64_t
  301. vm_page_anon_offset (uintptr_t addr)
  302. {
  303. return (addr);
  304. }
  305. static inline bool
  306. vm_page_mark_dirty (struct vm_page *page)
  307. {
  308. while (1)
  309. {
  310. uint32_t tmp = atomic_load_rlx (&page->whole);
  311. if ((tmp & 0xff) == VM_PAGE_DIRTY)
  312. return (false);
  313. else if (atomic_cas_bool_acq (&page->whole, tmp,
  314. (tmp & ~0xff) | VM_PAGE_DIRTY))
  315. return (true);
  316. atomic_spin_nop ();
  317. }
  318. }
  319. /*
  320. * This init operation provides :
  321. * - module fully initialized
  322. */
  323. INIT_OP_DECLARE (vm_page_setup);
  324. #endif