pmap.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433
  1. /* $OpenBSD: pmap.h,v 1.59 2015/03/27 20:25:39 miod Exp $ */
  2. /* $NetBSD: pmap.h,v 1.30 1997/08/04 20:00:47 pk Exp $ */
  3. /*
  4. * Copyright (c) 1996
  5. * The President and Fellows of Harvard College. All rights reserved.
  6. * Copyright (c) 1992, 1993
  7. * The Regents of the University of California. All rights reserved.
  8. *
  9. * This software was developed by the Computer Systems Engineering group
  10. * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
  11. * contributed to Berkeley.
  12. *
  13. * All advertising materials mentioning features or use of this software
  14. * must display the following acknowledgement:
  15. * This product includes software developed by Aaron Brown and
  16. * Harvard University.
  17. * This product includes software developed by the University of
  18. * California, Lawrence Berkeley Laboratory.
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. * 1. Redistributions of source code must retain the above copyright
  24. * notice, this list of conditions and the following disclaimer.
  25. * 2. Redistributions in binary form must reproduce the above copyright
  26. * notice, this list of conditions and the following disclaimer in the
  27. * documentation and/or other materials provided with the distribution.
  28. * 3. All advertising materials mentioning features or use of this software
  29. * must display the following acknowledgement:
  30. * This product includes software developed by Aaron Brown and
  31. * Harvard University.
  32. * This product includes software developed by the University of
  33. * California, Berkeley and its contributors.
  34. * 4. Neither the name of the University nor the names of its contributors
  35. * may be used to endorse or promote products derived from this software
  36. * without specific prior written permission.
  37. *
  38. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  39. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  40. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  41. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  42. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  43. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  44. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  45. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  46. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  47. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  48. * SUCH DAMAGE.
  49. *
  50. * @(#)pmap.h 8.1 (Berkeley) 6/11/93
  51. */
  52. #ifndef _MACHINE_PMAP_H_
  53. #define _MACHINE_PMAP_H_
  54. #include <machine/pte.h>
  55. /*
  56. * Pmap structure.
  57. *
  58. * The pmap structure really comes in two variants, one---a single
  59. * instance---for kernel virtual memory and the other---up to nproc
  60. * instances---for user virtual memory. Unfortunately, we have to mash
  61. * both into the same structure. Fortunately, they are almost the same.
  62. *
  63. * The kernel begins at 0xf8000000 and runs to 0xffffffff (although
  64. * some of this is not actually used). Kernel space is mapped identically
  65. * into all user contexts.
  66. * There is no point in duplicating this mapping in each user process
  67. * so they do not appear in the user structures.
  68. *
  69. * User space begins at 0x00000000 and runs through 0x1fffffff,
  70. * then has a `hole', then resumes at 0xe0000000 and runs until it
  71. * hits the kernel space at 0xf8000000. This can be mapped
  72. * contiguously by ignorning the top two bits and pretending the
  73. * space goes from 0 to 37ffffff. Typically the lower range is
  74. * used for text+data and the upper for stack, but the code here
  75. * makes no such distinction.
  76. *
  77. * Since each virtual segment covers 256 kbytes, the user space
  78. * requires 3584 segments, while the kernel (including DVMA on 4/4c)
  79. * requires only 512 segments.
  80. *
  81. *
  82. ** FOR THE SUN4/SUN4C
  83. *
  84. * The segment map entry for virtual segment vseg is offset in
  85. * pmap->pm_rsegmap by 0 if pmap is not the kernel pmap, or by
  86. * NUSEG if it is. We keep a pointer called pmap->pm_segmap
  87. * pre-offset by this value. pmap->pm_segmap thus contains the
  88. * values to be loaded into the user portion of the hardware segment
  89. * map so as to reach the proper PMEGs within the MMU. The kernel
  90. * mappings are `set early' and are always valid in every context
  91. * (every change is always propagated immediately).
  92. *
  93. * The PMEGs within the MMU are loaded `on demand'; when a PMEG is
  94. * taken away from context `c', the pmap for context c has its
  95. * corresponding pm_segmap[vseg] entry marked invalid (the MMU segment
  96. * map entry is also made invalid at the same time). Thus
  97. * pm_segmap[vseg] is the `invalid pmeg' number (127 or 511) whenever
  98. * the corresponding PTEs are not actually in the MMU. On the other
  99. * hand, pm_pte[vseg] is NULL only if no pages in that virtual segment
  100. * are in core; otherwise it points to a copy of the 32 or 64 PTEs that
  101. * must be loaded in the MMU in order to reach those pages.
  102. * pm_npte[vseg] counts the number of valid pages in each vseg.
  103. *
  104. * XXX performance: faster to count valid bits?
  105. *
  106. * The kernel pmap cannot malloc() PTEs since malloc() will sometimes
  107. * allocate a new virtual segment. Since kernel mappings are never
  108. * `stolen' out of the MMU, we just keep all its PTEs there, and
  109. * have no software copies. Its mmu entries are nonetheless kept on lists
  110. * so that the code that fiddles with mmu lists has something to fiddle.
  111. *
  112. ** FOR THE SUN4M
  113. *
  114. * On this architecture, the virtual-to-physical translation (page) tables
  115. * are *not* stored within the MMU as they are in the earlier Sun architect-
  116. * ures; instead, they are maintained entirely within physical memory (there
  117. * is a TLB cache to prevent the high performance hit from keeping all page
  118. * tables in core). Thus there is no need to dynamically allocate PMEGs or
  119. * SMEGs; only contexts must be shared.
  120. *
  121. * We maintain two parallel sets of tables: one is the actual MMU-edible
  122. * hierarchy of page tables in allocated kernel memory; these tables refer
  123. * to each other by physical address pointers in SRMMU format (thus they
  124. * are not very useful to the kernel's management routines). The other set
  125. * of tables is similar to those used for the Sun4/400's 3-level MMU; it
  126. * is a hierarchy of regmap and segmap structures which contain kernel virtual
  127. * pointers to each other. These must (unfortunately) be kept in sync.
  128. *
  129. */
  130. #define NKREG_4C \
  131. ((unsigned int)(-VM_MIN_KERNEL_ADDRESS_SUN4 / NBPRG)) /* 16 */
  132. #define NUREG_4C (256 - NKREG_4C) /* 240 */
  133. #define NKREG_4M \
  134. ((unsigned int)(-VM_MIN_KERNEL_ADDRESS_SRMMU / NBPRG)) /* 64 */
  135. #define NUREG_4M (256 - NKREG_4M) /* 192 */
  136. #define NKREG_MAX NKREG_4M
  137. struct regmap {
  138. struct segmap *rg_segmap; /* point to NSGPRG PMEGs */
  139. int *rg_seg_ptps; /* SRMMU-edible segment tables (NULL
  140. * indicates invalid region (4m) */
  141. smeg_t rg_smeg; /* the MMU region number (4c) */
  142. u_char rg_nsegmap; /* number of valid PMEGS */
  143. };
  144. struct segmap {
  145. int *sg_pte; /* points to NPTESG PTEs */
  146. pmeg_t sg_pmeg; /* the MMU segment number (4c) */
  147. u_char sg_npte; /* number of valid PTEs per seg */
  148. };
  149. #ifdef _KERNEL
  150. TAILQ_HEAD(mmuhd,mmuentry);
  151. /*
  152. * data appearing in both user and kernel pmaps
  153. *
  154. * note: if we want the same binaries to work on the 4/4c and 4m, we have to
  155. * include the fields for both to make sure that the struct kproc
  156. * is the same size.
  157. */
  158. struct pmap {
  159. union ctxinfo *pm_ctx; /* current context, if any */
  160. int pm_ctxnum; /* current context's number */
  161. int pm_refcount; /* just what it says */
  162. struct mmuhd pm_reglist; /* MMU regions on this pmap (4/4c) */
  163. struct mmuhd pm_seglist; /* MMU segments on this pmap (4/4c) */
  164. void *pm_regstore;
  165. struct regmap *pm_regmap;
  166. int *pm_reg_ptps; /* SRMMU-edible region table for 4m */
  167. int pm_reg_ptps_pa; /* _Physical_ address of pm_reg_ptps */
  168. int pm_gap_start; /* Starting with this vreg there's */
  169. int pm_gap_end; /* no valid mapping until here */
  170. struct pmap_statistics pm_stats; /* pmap statistics */
  171. };
  172. typedef struct pmap *pmap_t;
  173. #define PMAP_NULL ((pmap_t)0)
  174. extern struct pmap kernel_pmap_store;
  175. /*
  176. * Since PTEs also contain type bits, we have to have some way
  177. * to tell pmap_enter `this is an IO page' or `this is not to
  178. * be cached'. Since physical addresses are always aligned, we
  179. * can do this with the low order bits.
  180. *
  181. * The ordering below is important: PMAP_PGTYPE << PG_TNC must give
  182. * exactly the PG_NC and PG_TYPE bits.
  183. */
  184. #define PMAP_OBIO 1 /* tells pmap_enter to use PG_OBIO */
  185. #define PMAP_VME16 2 /* etc */
  186. #define PMAP_VME32 3 /* etc */
  187. #define PMAP_NC 4 /* tells pmap_enter to set PG_NC */
  188. #define PMAP_TNC_4 7 /* mask to get PG_TYPE & PG_NC */
  189. #define PMAP_T2PTE_4(x) (((x) & PMAP_TNC_4) << PG_TNC_SHIFT)
  190. #define PMAP_IOENC_4(io) (io)
  191. /*
  192. * On a SRMMU machine, the iospace is encoded in bits [3-6] of the
  193. * physical address passed to pmap_enter().
  194. */
  195. #define PMAP_TYPE_SRMMU 0x78 /* mask to get 4m page type */
  196. #define PMAP_PTESHFT_SRMMU 25 /* right shift to put type in pte */
  197. #define PMAP_SHFT_SRMMU 3 /* left shift to extract iospace */
  198. #define PMAP_TNC_SRMMU 127 /* mask to get PG_TYPE & PG_NC */
  199. /*#define PMAP_IOC 0x00800000 -* IO cacheable, NOT shifted */
  200. #define PMAP_T2PTE_SRMMU(x) (((x) & PMAP_TYPE_SRMMU) << PMAP_PTESHFT_SRMMU)
  201. #define PMAP_IOENC_SRMMU(io) ((io) << PMAP_SHFT_SRMMU)
  202. /* Encode IO space for pmap_enter() */
  203. #define PMAP_IOENC(io) (CPU_ISSUN4M ? PMAP_IOENC_SRMMU(io) : PMAP_IOENC_4(io))
  204. int pmap_dumpsize(void);
  205. int pmap_dumpmmu(int (*)(dev_t, daddr_t, caddr_t, size_t), daddr_t);
  206. #define pmap_kernel() (&kernel_pmap_store)
  207. #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
  208. #define PMAP_PREFER(fo, ap) pmap_prefer((fo), (ap))
  209. extern int cache_alias_dist;
  210. /* pmap prefer alignment */
  211. #define PMAP_PREFER_ALIGN() cache_alias_dist
  212. /* pmap prefer offset in alignment */
  213. #define PMAP_PREFER_OFFSET(of) \
  214. ((of) & (cache_alias_dist ? cache_alias_dist - 1 : 0))
  215. #define PMAP_EXCLUDE_DECLS /* tells MI pmap.h *not* to include decls */
  216. /* FUNCTION DECLARATIONS FOR COMMON PMAP MODULE */
  217. struct proc;
  218. void pmap_activate(struct proc *);
  219. void pmap_bootstrap(int nmmu, int nctx, int nregion);
  220. vaddr_t pmap_prefer(vaddr_t, vaddr_t);
  221. int pmap_pa_exists(paddr_t);
  222. void pmap_unwire(pmap_t, vaddr_t);
  223. void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
  224. pmap_t pmap_create(void);
  225. void pmap_destroy(pmap_t);
  226. void pmap_init(void);
  227. void pmap_kremove(vaddr_t, vsize_t);
  228. vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, int);
  229. void pmap_reference(pmap_t);
  230. void pmap_release(pmap_t);
  231. void pmap_remove(pmap_t, vaddr_t, vaddr_t);
  232. void pmap_remove_holes(struct vmspace *);
  233. void pmap_virtual_space(vaddr_t *, vaddr_t *);
  234. void pmap_redzone(void);
  235. void kvm_setcache(caddr_t, int, int);
  236. #define kvm_uncache(addr, npages) kvm_setcache(addr, npages, 0)
  237. #define kvm_recache(addr, npages) kvm_setcache(addr, npages, 1)
  238. void pmap_cache_enable(void);
  239. struct user;
  240. void switchexit(struct proc *);
  241. int mmu_pagein(struct pmap *pm, vaddr_t, int);
  242. void pmap_writetext(unsigned char *, int);
  243. #define pmap_collect(pm) do { /* nothing */ } while (0)
  244. #define pmap_copy(DP,SP,D,L,S) do { /* nothing */ } while (0)
  245. #define pmap_deactivate(p) do { /* nothing */ } while (0)
  246. #define pmap_proc_iflush(p,va,len) do { /* nothing */ } while (0)
  247. #define pmap_update(pm) do { /* nothing */ } while (0)
  248. /* SUN4/SUN4C SPECIFIC DECLARATIONS */
  249. #if defined(SUN4) || defined(SUN4C) || defined(SUN4E)
  250. boolean_t pmap_clear_modify4_4c(struct vm_page *);
  251. boolean_t pmap_clear_reference4_4c(struct vm_page *);
  252. int pmap_enter4_4c(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
  253. boolean_t pmap_extract4_4c(pmap_t, vaddr_t, paddr_t *);
  254. boolean_t pmap_is_modified4_4c(struct vm_page *);
  255. boolean_t pmap_is_referenced4_4c(struct vm_page *);
  256. void pmap_kenter_pa4_4c(vaddr_t, paddr_t, vm_prot_t);
  257. void pmap_page_protect4_4c(struct vm_page *, vm_prot_t);
  258. void pmap_protect4_4c(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
  259. void pmap_copy_page4_4c(struct vm_page *, struct vm_page *);
  260. void pmap_zero_page4_4c(struct vm_page *);
  261. void pmap_changeprot4_4c(pmap_t, vaddr_t, vm_prot_t, int);
  262. #endif
  263. /* SIMILAR DECLARATIONS FOR SUN4M MODULE */
  264. #if defined(SUN4M)
  265. boolean_t pmap_clear_modify4m(struct vm_page *);
  266. boolean_t pmap_clear_reference4m(struct vm_page *);
  267. int pmap_enter4m(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
  268. boolean_t pmap_extract4m(pmap_t, vaddr_t, paddr_t *);
  269. boolean_t pmap_is_modified4m(struct vm_page *);
  270. boolean_t pmap_is_referenced4m(struct vm_page *);
  271. void pmap_kenter_pa4m(vaddr_t, paddr_t, vm_prot_t);
  272. void pmap_page_protect4m(struct vm_page *, vm_prot_t);
  273. void pmap_protect4m(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
  274. void pmap_copy_page4m(struct vm_page *, struct vm_page *);
  275. void pmap_zero_page4m(struct vm_page *);
  276. void pmap_changeprot4m(pmap_t, vaddr_t, vm_prot_t, int);
  277. #endif /* defined SUN4M */
  278. #if !(defined(SUN4D) || defined(SUN4M)) && (defined(SUN4) || defined(SUN4C) || defined(SUN4E))
  279. #define pmap_clear_modify pmap_clear_modify4_4c
  280. #define pmap_clear_reference pmap_clear_reference4_4c
  281. #define pmap_copy_page pmap_copy_page4_4c
  282. #define pmap_enter pmap_enter4_4c
  283. #define pmap_extract pmap_extract4_4c
  284. #define pmap_is_modified pmap_is_modified4_4c
  285. #define pmap_is_referenced pmap_is_referenced4_4c
  286. #define pmap_kenter_pa pmap_kenter_pa4_4c
  287. #define pmap_page_protect pmap_page_protect4_4c
  288. #define pmap_protect pmap_protect4_4c
  289. #define pmap_zero_page pmap_zero_page4_4c
  290. #define pmap_changeprot pmap_changeprot4_4c
  291. #elif (defined(SUN4D) || defined(SUN4M)) && !(defined(SUN4) || defined(SUN4C) || defined(SUN4E))
  292. #define pmap_clear_modify pmap_clear_modify4m
  293. #define pmap_clear_reference pmap_clear_reference4m
  294. #define pmap_copy_page pmap_copy_page4m
  295. #define pmap_enter pmap_enter4m
  296. #define pmap_extract pmap_extract4m
  297. #define pmap_is_modified pmap_is_modified4m
  298. #define pmap_is_referenced pmap_is_referenced4m
  299. #define pmap_kenter_pa pmap_kenter_pa4m
  300. #define pmap_page_protect pmap_page_protect4m
  301. #define pmap_protect pmap_protect4m
  302. #define pmap_zero_page pmap_zero_page4m
  303. #define pmap_changeprot pmap_changeprot4m
  304. #else /* must use function pointers */
  305. extern boolean_t (*pmap_clear_modify_p)(struct vm_page *);
  306. extern boolean_t (*pmap_clear_reference_p)(struct vm_page *);
  307. extern int (*pmap_enter_p)(pmap_t, vaddr_t, paddr_t,
  308. vm_prot_t, int);
  309. extern boolean_t (*pmap_extract_p)(pmap_t, vaddr_t, paddr_t *);
  310. extern boolean_t (*pmap_is_modified_p)(struct vm_page *);
  311. extern boolean_t (*pmap_is_referenced_p)(struct vm_page *);
  312. extern void (*pmap_kenter_pa_p)(vaddr_t, paddr_t, vm_prot_t);
  313. extern void (*pmap_page_protect_p)(struct vm_page *,
  314. vm_prot_t);
  315. extern void (*pmap_protect_p)(pmap_t, vaddr_t, vaddr_t,
  316. vm_prot_t);
  317. extern void (*pmap_copy_page_p)(struct vm_page *, struct vm_page *);
  318. extern void (*pmap_zero_page_p)(struct vm_page *);
  319. extern void (*pmap_changeprot_p)(pmap_t, vaddr_t,
  320. vm_prot_t, int);
  321. #define pmap_clear_modify (*pmap_clear_modify_p)
  322. #define pmap_clear_reference (*pmap_clear_reference_p)
  323. #define pmap_copy_page (*pmap_copy_page_p)
  324. #define pmap_enter (*pmap_enter_p)
  325. #define pmap_extract (*pmap_extract_p)
  326. #define pmap_is_modified (*pmap_is_modified_p)
  327. #define pmap_is_referenced (*pmap_is_referenced_p)
  328. #define pmap_kenter_pa (*pmap_kenter_pa_p)
  329. #define pmap_page_protect (*pmap_page_protect_p)
  330. #define pmap_protect (*pmap_protect_p)
  331. #define pmap_zero_page (*pmap_zero_page_p)
  332. #define pmap_changeprot (*pmap_changeprot_p)
  333. #endif
  334. #endif /* _KERNEL */
  335. /*
  336. * For each managed physical page, there is a list of all currently
  337. * valid virtual mappings of that page. Since there is usually one
  338. * (or zero) mapping per page, the table begins with an initial entry,
  339. * rather than a pointer; this head entry is empty iff its pv_pmap
  340. * field is NULL.
  341. *
  342. * Note that these are per machine independent page (so there may be
  343. * only one for every two hardware pages, e.g.). Since the virtual
  344. * address is aligned on a page boundary, the low order bits are free
  345. * for storing flags. Only the head of each list has flags.
  346. */
  347. struct pvlist {
  348. struct pvlist *pv_next; /* next pvlist, if any */
  349. struct pmap *pv_pmap; /* pmap of this va */
  350. vaddr_t pv_va; /* virtual address */
  351. int pv_flags; /* flags (below) */
  352. };
  353. struct vm_page_md {
  354. struct pvlist pv_head;
  355. };
  356. #ifdef _KERNEL
  357. /*
  358. * Flags in pv_flags. Note that PV_MOD must be 1 and PV_REF must be 2
  359. * since they must line up with the bits in the hardware PTEs (see pte.h).
  360. * SUN4M bits are at a slightly different location in the PTE.
  361. * Note: the REF, MOD and ANC flag bits occur only in the head of a pvlist.
  362. * The cacheable bit (either PV_NC or PV_C4M) is meaningful in each
  363. * individual pv entry.
  364. */
  365. #define PV_MOD 1 /* page modified */
  366. #define PV_REF 2 /* page referenced */
  367. #define PV_NC 4 /* page cannot be cached */
  368. #define PV_REF4M 1 /* page referenced (SRMMU) */
  369. #define PV_MOD4M 2 /* page modified (SRMMU) */
  370. #define PV_C4M 4 /* page _can_ be cached (SRMMU) */
  371. #define PV_ANC 0x10 /* page has incongruent aliases */
  372. #define VM_MDPAGE_INIT(pg) do { \
  373. (pg)->mdpage.pv_head.pv_next = NULL; \
  374. (pg)->mdpage.pv_head.pv_pmap = NULL; \
  375. (pg)->mdpage.pv_head.pv_va = 0; \
  376. (pg)->mdpage.pv_head.pv_flags = 0; \
  377. } while (0)
  378. #endif /* _KERNEL */
  379. #endif /* _MACHINE_PMAP_H_ */