pgtable.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_IA64_PGTABLE_H
  3. #define _ASM_IA64_PGTABLE_H
  4. /*
  5. * This file contains the functions and defines necessary to modify and use
  6. * the IA-64 page table tree.
  7. *
  8. * This hopefully works with any (fixed) IA-64 page-size, as defined
  9. * in <asm/page.h>.
  10. *
  11. * Copyright (C) 1998-2005 Hewlett-Packard Co
  12. * David Mosberger-Tang <davidm@hpl.hp.com>
  13. */
  14. #include <asm/mman.h>
  15. #include <asm/page.h>
  16. #include <asm/processor.h>
  17. #include <asm/types.h>
  18. #define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
  19. /*
  20. * First, define the various bits in a PTE. Note that the PTE format
  21. * matches the VHPT short format, the firt doubleword of the VHPD long
  22. * format, and the first doubleword of the TLB insertion format.
  23. */
  24. #define _PAGE_P_BIT 0
  25. #define _PAGE_A_BIT 5
  26. #define _PAGE_D_BIT 6
  27. #define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */
  28. #define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
  29. #define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
  30. #define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
  31. #define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
  32. #define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
  33. #define _PAGE_MA_MASK (0x7 << 2)
  34. #define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
  35. #define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */
  36. #define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */
  37. #define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */
  38. #define _PAGE_PL_MASK (3 << 7)
  39. #define _PAGE_AR_R (0 << 9) /* read only */
  40. #define _PAGE_AR_RX (1 << 9) /* read & execute */
  41. #define _PAGE_AR_RW (2 << 9) /* read & write */
  42. #define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
  43. #define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
  44. #define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
  45. #define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
  46. #define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
  47. #define _PAGE_AR_MASK (7 << 9)
  48. #define _PAGE_AR_SHIFT 9
  49. #define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */
  50. #define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */
  51. #define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
  52. #define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
  53. #define _PAGE_PROTNONE (__IA64_UL(1) << 63)
  54. #define _PFN_MASK _PAGE_PPN_MASK
  55. /* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
  56. #define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
  57. #define _PAGE_SIZE_4K 12
  58. #define _PAGE_SIZE_8K 13
  59. #define _PAGE_SIZE_16K 14
  60. #define _PAGE_SIZE_64K 16
  61. #define _PAGE_SIZE_256K 18
  62. #define _PAGE_SIZE_1M 20
  63. #define _PAGE_SIZE_4M 22
  64. #define _PAGE_SIZE_16M 24
  65. #define _PAGE_SIZE_64M 26
  66. #define _PAGE_SIZE_256M 28
  67. #define _PAGE_SIZE_1G 30
  68. #define _PAGE_SIZE_4G 32
  69. #define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
  70. #define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
  71. #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
  72. /*
  73. * How many pointers will a page table level hold expressed in shift
  74. */
  75. #define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3)
  76. /*
  77. * Definitions for fourth level:
  78. */
  79. #define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT))
  80. /*
  81. * Definitions for third level:
  82. *
  83. * PMD_SHIFT determines the size of the area a third-level page table
  84. * can map.
  85. */
  86. #define PMD_SHIFT (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT))
  87. #define PMD_SIZE (1UL << PMD_SHIFT)
  88. #define PMD_MASK (~(PMD_SIZE-1))
  89. #define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT))
  90. #if CONFIG_PGTABLE_LEVELS == 4
  91. /*
  92. * Definitions for second level:
  93. *
  94. * PUD_SHIFT determines the size of the area a second-level page table
  95. * can map.
  96. */
  97. #define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
  98. #define PUD_SIZE (1UL << PUD_SHIFT)
  99. #define PUD_MASK (~(PUD_SIZE-1))
  100. #define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT))
  101. #endif
  102. /*
  103. * Definitions for first level:
  104. *
  105. * PGDIR_SHIFT determines what a first-level page table entry can map.
  106. */
  107. #if CONFIG_PGTABLE_LEVELS == 4
  108. #define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
  109. #else
  110. #define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
  111. #endif
  112. #define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
  113. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  114. #define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT
  115. #define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT)
  116. #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
  117. #define FIRST_USER_ADDRESS 0UL
  118. /*
  119. * All the normal masks have the "page accessed" bits on, as any time
  120. * they are used, the page is accessed. They are cleared only by the
  121. * page-out routines.
  122. */
  123. #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
  124. #define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
  125. #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
  126. #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
  127. #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
  128. #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
  129. #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
  130. #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
  131. #define PAGE_KERNEL_UC __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX | \
  132. _PAGE_MA_UC)
  133. # ifndef __ASSEMBLY__
  134. #include <linux/sched/mm.h> /* for mm_struct */
  135. #include <linux/bitops.h>
  136. #include <asm/cacheflush.h>
  137. #include <asm/mmu_context.h>
  138. /*
  139. * Next come the mappings that determine how mmap() protection bits
  140. * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The
  141. * _P version gets used for a private shared memory segment, the _S
  142. * version gets used for a shared memory segment with MAP_SHARED on.
  143. * In a private shared memory segment, we do a copy-on-write if a task
  144. * attempts to write to the page.
  145. */
  146. /* xwr */
  147. #define __P000 PAGE_NONE
  148. #define __P001 PAGE_READONLY
  149. #define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
  150. #define __P011 PAGE_READONLY /* ditto */
  151. #define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
  152. #define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
  153. #define __P110 PAGE_COPY_EXEC
  154. #define __P111 PAGE_COPY_EXEC
  155. #define __S000 PAGE_NONE
  156. #define __S001 PAGE_READONLY
  157. #define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
  158. #define __S011 PAGE_SHARED
  159. #define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
  160. #define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
  161. #define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
  162. #define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
  163. #define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
  164. #if CONFIG_PGTABLE_LEVELS == 4
  165. #define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
  166. #endif
  167. #define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
  168. #define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
  169. /*
  170. * Some definitions to translate between mem_map, PTEs, and page addresses:
  171. */
  172. /* Quick test to see if ADDR is a (potentially) valid physical address. */
  173. static inline long
  174. ia64_phys_addr_valid (unsigned long addr)
  175. {
  176. return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
  177. }
  178. /*
  179. * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
  180. * memory. For the return value to be meaningful, ADDR must be >=
  181. * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
  182. * require a hash-, or multi-level tree-lookup or something of that
  183. * sort) but it guarantees to return TRUE only if accessing the page
  184. * at that address does not cause an error. Note that there may be
  185. * addresses for which kern_addr_valid() returns FALSE even though an
  186. * access would not cause an error (e.g., this is typically true for
  187. * memory mapped I/O regions.
  188. *
  189. * XXX Need to implement this for IA-64.
  190. */
  191. #define kern_addr_valid(addr) (1)
  192. /*
  193. * Now come the defines and routines to manage and access the three-level
  194. * page table.
  195. */
  196. #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
  197. #ifdef CONFIG_VIRTUAL_MEM_MAP
  198. # define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
  199. extern unsigned long VMALLOC_END;
  200. #else
  201. #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
  202. /* SPARSEMEM_VMEMMAP uses half of vmalloc... */
  203. # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10)))
  204. # define vmemmap ((struct page *)VMALLOC_END)
  205. #else
  206. # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
  207. #endif
  208. #endif
  209. /* fs/proc/kcore.c */
  210. #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
  211. #define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
  212. #define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3)
  213. #define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */
  214. /*
  215. * Conversion functions: convert page frame number (pfn) and a protection value to a page
  216. * table entry (pte).
  217. */
  218. #define pfn_pte(pfn, pgprot) \
  219. ({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
  220. /* Extract pfn from pte. */
  221. #define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
  222. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  223. /* This takes a physical page address that is used by the remapping functions */
  224. #define mk_pte_phys(physpage, pgprot) \
  225. ({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
  226. #define pte_modify(_pte, newprot) \
  227. (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
  228. #define pte_none(pte) (!pte_val(pte))
  229. #define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
  230. #define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
  231. /* pte_page() returns the "struct page *" corresponding to the PTE: */
  232. #define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
  233. #define pmd_none(pmd) (!pmd_val(pmd))
  234. #define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
  235. #define pmd_present(pmd) (pmd_val(pmd) != 0UL)
  236. #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
  237. #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
  238. #define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
  239. #define pud_none(pud) (!pud_val(pud))
  240. #define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
  241. #define pud_present(pud) (pud_val(pud) != 0UL)
  242. #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
  243. #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
  244. #define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET))
  245. #if CONFIG_PGTABLE_LEVELS == 4
  246. #define pgd_none(pgd) (!pgd_val(pgd))
  247. #define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
  248. #define pgd_present(pgd) (pgd_val(pgd) != 0UL)
  249. #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
  250. #define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
  251. #define pgd_page(pgd) virt_to_page((pgd_val(pgd) + PAGE_OFFSET))
  252. #endif
  253. /*
  254. * The following have defined behavior only work if pte_present() is true.
  255. */
  256. #define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
  257. #define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
  258. #define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
  259. #define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
  260. #define pte_special(pte) 0
  261. /*
  262. * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
  263. * access rights:
  264. */
  265. #define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
  266. #define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
  267. #define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
  268. #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
  269. #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
  270. #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
  271. #define pte_mkhuge(pte) (__pte(pte_val(pte)))
  272. #define pte_mkspecial(pte) (pte)
  273. /*
  274. * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
  275. * sync icache and dcache when we insert *new* executable page.
  276. * __ia64_sync_icache_dcache() check Pg_arch_1 bit and flush icache
  277. * if necessary.
  278. *
  279. * set_pte() is also called by the kernel, but we can expect that the kernel
  280. * flushes icache explicitly if necessary.
  281. */
  282. #define pte_present_exec_user(pte)\
  283. ((pte_val(pte) & (_PAGE_P | _PAGE_PL_MASK | _PAGE_AR_RX)) == \
  284. (_PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX))
  285. extern void __ia64_sync_icache_dcache(pte_t pteval);
  286. static inline void set_pte(pte_t *ptep, pte_t pteval)
  287. {
  288. /* page is present && page is user && page is executable
  289. * && (page swapin or new page or page migraton
  290. * || copy_on_write with page copying.)
  291. */
  292. if (pte_present_exec_user(pteval) &&
  293. (!pte_present(*ptep) ||
  294. pte_pfn(*ptep) != pte_pfn(pteval)))
  295. /* load_module() calles flush_icache_range() explicitly*/
  296. __ia64_sync_icache_dcache(pteval);
  297. *ptep = pteval;
  298. }
  299. #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  300. /*
  301. * Make page protection values cacheable, uncacheable, or write-
  302. * combining. Note that "protection" is really a misnomer here as the
  303. * protection value contains the memory attribute bits, dirty bits, and
  304. * various other bits as well.
  305. */
  306. #define pgprot_cacheable(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WB)
  307. #define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
  308. #define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
  309. struct file;
  310. extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  311. unsigned long size, pgprot_t vma_prot);
  312. #define __HAVE_PHYS_MEM_ACCESS_PROT
  313. static inline unsigned long
  314. pgd_index (unsigned long address)
  315. {
  316. unsigned long region = address >> 61;
  317. unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
  318. return (region << (PAGE_SHIFT - 6)) | l1index;
  319. }
  320. /* The offset in the 1-level directory is given by the 3 region bits
  321. (61..63) and the level-1 bits. */
  322. static inline pgd_t*
  323. pgd_offset (const struct mm_struct *mm, unsigned long address)
  324. {
  325. return mm->pgd + pgd_index(address);
  326. }
  327. /* In the kernel's mapped region we completely ignore the region number
  328. (since we know it's in region number 5). */
  329. #define pgd_offset_k(addr) \
  330. (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
  331. /* Look up a pgd entry in the gate area. On IA-64, the gate-area
  332. resides in the kernel-mapped segment, hence we use pgd_offset_k()
  333. here. */
  334. #define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
  335. #if CONFIG_PGTABLE_LEVELS == 4
  336. /* Find an entry in the second-level page table.. */
  337. #define pud_offset(dir,addr) \
  338. ((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
  339. #endif
  340. /* Find an entry in the third-level page table.. */
  341. #define pmd_offset(dir,addr) \
  342. ((pmd_t *) pud_page_vaddr(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
  343. /*
  344. * Find an entry in the third-level page table. This looks more complicated than it
  345. * should be because some platforms place page tables in high memory.
  346. */
  347. #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  348. #define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
  349. #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
  350. #define pte_unmap(pte) do { } while (0)
  351. /* atomic versions of the some PTE manipulations: */
  352. static inline int
  353. ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
  354. {
  355. #ifdef CONFIG_SMP
  356. if (!pte_young(*ptep))
  357. return 0;
  358. return test_and_clear_bit(_PAGE_A_BIT, ptep);
  359. #else
  360. pte_t pte = *ptep;
  361. if (!pte_young(pte))
  362. return 0;
  363. set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
  364. return 1;
  365. #endif
  366. }
  367. static inline pte_t
  368. ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  369. {
  370. #ifdef CONFIG_SMP
  371. return __pte(xchg((long *) ptep, 0));
  372. #else
  373. pte_t pte = *ptep;
  374. pte_clear(mm, addr, ptep);
  375. return pte;
  376. #endif
  377. }
  378. static inline void
  379. ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  380. {
  381. #ifdef CONFIG_SMP
  382. unsigned long new, old;
  383. do {
  384. old = pte_val(*ptep);
  385. new = pte_val(pte_wrprotect(__pte (old)));
  386. } while (cmpxchg((unsigned long *) ptep, old, new) != old);
  387. #else
  388. pte_t old_pte = *ptep;
  389. set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
  390. #endif
  391. }
  392. static inline int
  393. pte_same (pte_t a, pte_t b)
  394. {
  395. return pte_val(a) == pte_val(b);
  396. }
  397. #define update_mmu_cache(vma, address, ptep) do { } while (0)
  398. extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  399. extern void paging_init (void);
  400. /*
  401. * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
  402. * bits in the swap-type field of the swap pte. It would be nice to
  403. * enforce that, but we can't easily include <linux/swap.h> here.
  404. * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
  405. *
  406. * Format of swap pte:
  407. * bit 0 : present bit (must be zero)
  408. * bits 1- 7: swap-type
  409. * bits 8-62: swap offset
  410. * bit 63 : _PAGE_PROTNONE bit
  411. */
  412. #define __swp_type(entry) (((entry).val >> 1) & 0x7f)
  413. #define __swp_offset(entry) (((entry).val << 1) >> 9)
  414. #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 8) })
  415. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  416. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  417. /*
  418. * ZERO_PAGE is a global shared page that is always zero: used
  419. * for zero-mapped memory areas etc..
  420. */
  421. extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
  422. extern struct page *zero_page_memmap_ptr;
  423. #define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
  424. /* We provide our own get_unmapped_area to cope with VA holes for userland */
  425. #define HAVE_ARCH_UNMAPPED_AREA
  426. #ifdef CONFIG_HUGETLB_PAGE
  427. #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
  428. #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
  429. #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
  430. #endif
  431. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  432. /*
  433. * Update PTEP with ENTRY, which is guaranteed to be a less
  434. * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and
  435. * WRITABLE bits turned on, when the value at PTEP did not. The
  436. * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
  437. *
  438. * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
  439. * having to worry about races. On SMP machines, there are only two
  440. * cases where this is true:
  441. *
  442. * (1) *PTEP has the PRESENT bit turned OFF
  443. * (2) ENTRY has the DIRTY bit turned ON
  444. *
  445. * On ia64, we could implement this routine with a cmpxchg()-loop
  446. * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
  447. * However, like on x86, we can get a more streamlined version by
  448. * observing that it is OK to drop ACCESSED bit updates when
  449. * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is
  450. * result in an extra Access-bit fault, which would then turn on the
  451. * ACCESSED bit in the low-level fault handler (iaccess_bit or
  452. * daccess_bit in ivt.S).
  453. */
  454. #ifdef CONFIG_SMP
  455. # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
  456. ({ \
  457. int __changed = !pte_same(*(__ptep), __entry); \
  458. if (__changed && __safely_writable) { \
  459. set_pte(__ptep, __entry); \
  460. flush_tlb_page(__vma, __addr); \
  461. } \
  462. __changed; \
  463. })
  464. #else
  465. # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
  466. ({ \
  467. int __changed = !pte_same(*(__ptep), __entry); \
  468. if (__changed) { \
  469. set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \
  470. flush_tlb_page(__vma, __addr); \
  471. } \
  472. __changed; \
  473. })
  474. #endif
  475. # ifdef CONFIG_VIRTUAL_MEM_MAP
  476. /* arch mem_map init routine is needed due to holes in a virtual mem_map */
  477. # define __HAVE_ARCH_MEMMAP_INIT
  478. extern void memmap_init (unsigned long size, int nid, unsigned long zone,
  479. unsigned long start_pfn);
  480. # endif /* CONFIG_VIRTUAL_MEM_MAP */
  481. # endif /* !__ASSEMBLY__ */
  482. /*
  483. * Identity-mapped regions use a large page size. We'll call such large pages
  484. * "granules". If you can think of a better name that's unambiguous, let me
  485. * know...
  486. */
  487. #if defined(CONFIG_IA64_GRANULE_64MB)
  488. # define IA64_GRANULE_SHIFT _PAGE_SIZE_64M
  489. #elif defined(CONFIG_IA64_GRANULE_16MB)
  490. # define IA64_GRANULE_SHIFT _PAGE_SIZE_16M
  491. #endif
  492. #define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)
  493. /*
  494. * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
  495. */
  496. #define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
  497. #define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
  498. /*
  499. * No page table caches to initialise
  500. */
  501. #define pgtable_cache_init() do { } while (0)
  502. /* These tell get_user_pages() that the first gate page is accessible from user-level. */
  503. #define FIXADDR_USER_START GATE_ADDR
  504. #ifdef HAVE_BUGGY_SEGREL
  505. # define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE)
  506. #else
  507. # define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
  508. #endif
  509. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  510. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  511. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  512. #define __HAVE_ARCH_PTE_SAME
  513. #define __HAVE_ARCH_PGD_OFFSET_GATE
  514. #if CONFIG_PGTABLE_LEVELS == 3
  515. #define __ARCH_USE_5LEVEL_HACK
  516. #include <asm-generic/pgtable-nopud.h>
  517. #endif
  518. #include <asm-generic/5level-fixup.h>
  519. #include <asm-generic/pgtable.h>
  520. #endif /* _ASM_IA64_PGTABLE_H */