vm_page.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. /*
  2. * Copyright (c) 2010-2017 Richard Braun.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. *
  18. * Physical page management.
  19. *
  20. * A page is said to be managed if it's linked to a VM object, in which
  21. * case there is at least one reference to it.
  22. */
  23. #ifndef VM_VM_PAGE_H
  24. #define VM_VM_PAGE_H
  25. #include <assert.h>
  26. #include <errno.h>
  27. #include <stdbool.h>
  28. #include <stddef.h>
  29. #include <stdint.h>
  30. #include <kern/atomic.h>
  31. #include <kern/init.h>
  32. #include <kern/list.h>
  33. #include <kern/log2.h>
  34. #include <kern/macros.h>
  35. #include <machine/page.h>
  36. #include <machine/pmap.h>
  37. #include <machine/pmem.h>
  38. #include <machine/types.h>
  39. #include <vm/vm_object_types.h>
  40. /*
  41. * Byte/page conversion and rounding macros (not inline functions to
  42. * be easily usable on both virtual and physical addresses, which may not
  43. * have the same type size).
  44. */
  45. #define vm_page_btop(bytes) ((bytes) >> PAGE_SHIFT)
  46. #define vm_page_ptob(pages) ((pages) << PAGE_SHIFT)
  47. #define vm_page_trunc(bytes) P2ALIGN(bytes, PAGE_SIZE)
  48. #define vm_page_round(bytes) P2ROUND(bytes, PAGE_SIZE)
  49. #define vm_page_end(bytes) P2END(bytes, PAGE_SIZE)
  50. #define vm_page_aligned(bytes) P2ALIGNED(bytes, PAGE_SIZE)
  51. /*
  52. * Zone selectors.
  53. *
  54. * Selector-to-zone-list translation table :
  55. * DMA DMA
  56. * DMA32 DMA32 DMA
  57. * DIRECTMAP DIRECTMAP DMA32 DMA
  58. * HIGHMEM HIGHMEM DIRECTMAP DMA32 DMA
  59. */
  60. #define VM_PAGE_SEL_DMA 0
  61. #define VM_PAGE_SEL_DMA32 1
  62. #define VM_PAGE_SEL_DIRECTMAP 2
  63. #define VM_PAGE_SEL_HIGHMEM 3
  64. /*
  65. * Page usage types.
  66. */
  67. #define VM_PAGE_FREE 0 /* Page unused */
  68. #define VM_PAGE_RESERVED 1 /* Page reserved at boot time */
  69. #define VM_PAGE_TABLE 2 /* Page is part of the page table */
  70. #define VM_PAGE_PMAP 3 /* Page stores pmap-specific data */
  71. #define VM_PAGE_KMEM 4 /* Page is a direct-mapped kmem slab */
  72. #define VM_PAGE_OBJECT 5 /* Page is part of a VM object */
  73. #define VM_PAGE_KERNEL 6 /* Type for generic kernel allocations */
  74. /*
  75. * Physical page descriptor.
  76. */
  77. struct vm_page {
  78. struct list node;
  79. unsigned short type;
  80. unsigned short zone_index;
  81. unsigned short order;
  82. phys_addr_t phys_addr;
  83. void *priv;
  84. unsigned int nr_refs;
  85. /* VM object back reference */
  86. struct vm_object *object;
  87. uint64_t offset;
  88. };
  89. static inline unsigned short
  90. vm_page_type(const struct vm_page *page)
  91. {
  92. return page->type;
  93. }
  94. void vm_page_set_type(struct vm_page *page, unsigned int order,
  95. unsigned short type);
  96. static inline unsigned int
  97. vm_page_order(size_t size)
  98. {
  99. return log2_order(vm_page_btop(vm_page_round(size)));
  100. }
  101. static inline phys_addr_t
  102. vm_page_to_pa(const struct vm_page *page)
  103. {
  104. return page->phys_addr;
  105. }
  106. static inline uintptr_t
  107. vm_page_direct_va(phys_addr_t pa)
  108. {
  109. assert(pa < PMEM_DIRECTMAP_LIMIT);
  110. return ((uintptr_t)pa + PMAP_START_DIRECTMAP_ADDRESS);
  111. }
  112. static inline phys_addr_t
  113. vm_page_direct_pa(uintptr_t va)
  114. {
  115. assert(va >= PMAP_START_DIRECTMAP_ADDRESS);
  116. assert(va < PMAP_END_DIRECTMAP_ADDRESS);
  117. return (va - PMAP_START_DIRECTMAP_ADDRESS);
  118. }
  119. static inline void *
  120. vm_page_direct_ptr(const struct vm_page *page)
  121. {
  122. return (void *)vm_page_direct_va(vm_page_to_pa(page));
  123. }
  124. /*
  125. * Associate private data with a page.
  126. */
  127. static inline void
  128. vm_page_set_priv(struct vm_page *page, void *priv)
  129. {
  130. page->priv = priv;
  131. }
  132. static inline void *
  133. vm_page_get_priv(const struct vm_page *page)
  134. {
  135. return page->priv;
  136. }
  137. static inline void
  138. vm_page_link(struct vm_page *page, struct vm_object *object, uint64_t offset)
  139. {
  140. assert(object != NULL);
  141. page->object = object;
  142. page->offset = offset;
  143. }
  144. static inline void
  145. vm_page_unlink(struct vm_page *page)
  146. {
  147. assert(page->object != NULL);
  148. page->object = NULL;
  149. }
  150. /*
  151. * Load physical memory into the vm_page module at boot time.
  152. *
  153. * All addresses must be page-aligned. Zones can be loaded in any order.
  154. */
  155. void vm_page_load(unsigned int zone_index, phys_addr_t start, phys_addr_t end);
  156. /*
  157. * Load available physical memory into the vm_page module at boot time.
  158. *
  159. * The zone referred to must have been loaded with vm_page_load
  160. * before loading its heap.
  161. */
  162. void vm_page_load_heap(unsigned int zone_index, phys_addr_t start,
  163. phys_addr_t end);
  164. /*
  165. * Return true if the vm_page module is completely initialized, false
  166. * otherwise, in which case only vm_page_bootalloc() can be used for
  167. * allocations.
  168. */
  169. int vm_page_ready(void);
  170. /*
  171. * Make the given page managed by the vm_page module.
  172. *
  173. * If additional memory can be made usable after the VM system is initialized,
  174. * it should be reported through this function.
  175. */
  176. void vm_page_manage(struct vm_page *page);
  177. /*
  178. * Return the page descriptor for the given physical address.
  179. */
  180. struct vm_page * vm_page_lookup(phys_addr_t pa);
  181. /*
  182. * Allocate a block of 2^order physical pages.
  183. *
  184. * The selector is used to determine the zones from which allocation can
  185. * be attempted.
  186. *
  187. * If successful, the returned pages have no references.
  188. */
  189. struct vm_page * vm_page_alloc(unsigned int order, unsigned int selector,
  190. unsigned short type);
  191. /*
  192. * Release a block of 2^order physical pages.
  193. *
  194. * The pages must have no references.
  195. */
  196. void vm_page_free(struct vm_page *page, unsigned int order);
  197. /*
  198. * Return the name of the given zone.
  199. */
  200. const char * vm_page_zone_name(unsigned int zone_index);
  201. /*
  202. * Log information about physical pages.
  203. */
  204. void vm_page_log_info(void);
  205. static inline bool
  206. vm_page_referenced(const struct vm_page *page)
  207. {
  208. return atomic_load(&page->nr_refs, ATOMIC_RELAXED) != 0;
  209. }
  210. static inline void
  211. vm_page_ref(struct vm_page *page)
  212. {
  213. unsigned int nr_refs;
  214. nr_refs = atomic_fetch_add(&page->nr_refs, 1, ATOMIC_RELAXED);
  215. assert(nr_refs != (unsigned int)-1);
  216. }
  217. static inline void
  218. vm_page_unref(struct vm_page *page)
  219. {
  220. unsigned int nr_refs;
  221. nr_refs = atomic_fetch_sub(&page->nr_refs, 1, ATOMIC_ACQ_REL);
  222. assert(nr_refs != 0);
  223. if (nr_refs == 1) {
  224. vm_page_free(page, 0);
  225. }
  226. }
  227. static inline int
  228. vm_page_tryref(struct vm_page *page)
  229. {
  230. unsigned int nr_refs, prev;
  231. do {
  232. nr_refs = atomic_load(&page->nr_refs, ATOMIC_RELAXED);
  233. if (nr_refs == 0) {
  234. return EAGAIN;
  235. }
  236. prev = atomic_cas(&page->nr_refs, nr_refs,
  237. nr_refs + 1, ATOMIC_ACQUIRE);
  238. } while (prev != nr_refs);
  239. return 0;
  240. }
  241. /*
  242. * This init operation provides :
  243. * - module fully initialized
  244. */
  245. INIT_OP_DECLARE(vm_page_setup);
  246. #endif /* VM_VM_PAGE_H */