pgtable-generic.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * mm/pgtable-generic.c
  4. *
  5. * Generic pgtable methods declared in asm-generic/pgtable.h
  6. *
  7. * Copyright (C) 2010 Linus Torvalds
  8. */
  9. #include <linux/pagemap.h>
  10. #include <asm/tlb.h>
  11. #include <asm-generic/pgtable.h>
  12. /*
  13. * If a p?d_bad entry is found while walking page tables, report
  14. * the error, before resetting entry to p?d_none. Usually (but
  15. * very seldom) called out from the p?d_none_or_clear_bad macros.
  16. */
  17. void pgd_clear_bad(pgd_t *pgd)
  18. {
  19. pgd_ERROR(*pgd);
  20. pgd_clear(pgd);
  21. }
  22. void p4d_clear_bad(p4d_t *p4d)
  23. {
  24. p4d_ERROR(*p4d);
  25. p4d_clear(p4d);
  26. }
  27. void pud_clear_bad(pud_t *pud)
  28. {
  29. pud_ERROR(*pud);
  30. pud_clear(pud);
  31. }
  32. void pmd_clear_bad(pmd_t *pmd)
  33. {
  34. pmd_ERROR(*pmd);
  35. pmd_clear(pmd);
  36. }
  37. #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  38. /*
  39. * Only sets the access flags (dirty, accessed), as well as write
  40. * permission. Furthermore, we know it always gets set to a "more
  41. * permissive" setting, which allows most architectures to optimize
  42. * this. We return whether the PTE actually changed, which in turn
  43. * instructs the caller to do things like update__mmu_cache. This
  44. * used to be done in the caller, but sparc needs minor faults to
  45. * force that call on sun4c so we changed this macro slightly
  46. */
  47. int ptep_set_access_flags(struct vm_area_struct *vma,
  48. unsigned long address, pte_t *ptep,
  49. pte_t entry, int dirty)
  50. {
  51. int changed = !pte_same(*ptep, entry);
  52. if (changed) {
  53. set_pte_at(vma->vm_mm, address, ptep, entry);
  54. flush_tlb_fix_spurious_fault(vma, address);
  55. }
  56. return changed;
  57. }
  58. #endif
  59. #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  60. int ptep_clear_flush_young(struct vm_area_struct *vma,
  61. unsigned long address, pte_t *ptep)
  62. {
  63. int young;
  64. young = ptep_test_and_clear_young(vma, address, ptep);
  65. if (young)
  66. flush_tlb_page(vma, address);
  67. return young;
  68. }
  69. #endif
  70. #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
  71. pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
  72. pte_t *ptep)
  73. {
  74. struct mm_struct *mm = (vma)->vm_mm;
  75. pte_t pte;
  76. pte = ptep_get_and_clear(mm, address, ptep);
  77. if (pte_accessible(mm, pte))
  78. flush_tlb_page(vma, address);
  79. return pte;
  80. }
  81. #endif
  82. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  83. #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  84. int pmdp_set_access_flags(struct vm_area_struct *vma,
  85. unsigned long address, pmd_t *pmdp,
  86. pmd_t entry, int dirty)
  87. {
  88. int changed = !pmd_same(*pmdp, entry);
  89. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  90. if (changed) {
  91. set_pmd_at(vma->vm_mm, address, pmdp, entry);
  92. flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  93. }
  94. return changed;
  95. }
  96. #endif
  97. #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  98. int pmdp_clear_flush_young(struct vm_area_struct *vma,
  99. unsigned long address, pmd_t *pmdp)
  100. {
  101. int young;
  102. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  103. young = pmdp_test_and_clear_young(vma, address, pmdp);
  104. if (young)
  105. flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  106. return young;
  107. }
  108. #endif
  109. #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
  110. pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
  111. pmd_t *pmdp)
  112. {
  113. pmd_t pmd;
  114. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  115. VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
  116. !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
  117. pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
  118. flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  119. return pmd;
  120. }
  121. #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
  122. pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
  123. pud_t *pudp)
  124. {
  125. pud_t pud;
  126. VM_BUG_ON(address & ~HPAGE_PUD_MASK);
  127. VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
  128. pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
  129. flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
  130. return pud;
  131. }
  132. #endif
  133. #endif
  134. #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
  135. void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
  136. pgtable_t pgtable)
  137. {
  138. assert_spin_locked(pmd_lockptr(mm, pmdp));
  139. /* FIFO */
  140. if (!pmd_huge_pte(mm, pmdp))
  141. INIT_LIST_HEAD(&pgtable->lru);
  142. else
  143. list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
  144. pmd_huge_pte(mm, pmdp) = pgtable;
  145. }
  146. #endif
  147. #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
  148. /* no "address" argument so destroys page coloring of some arch */
  149. pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
  150. {
  151. pgtable_t pgtable;
  152. assert_spin_locked(pmd_lockptr(mm, pmdp));
  153. /* FIFO */
  154. pgtable = pmd_huge_pte(mm, pmdp);
  155. pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
  156. struct page, lru);
  157. if (pmd_huge_pte(mm, pmdp))
  158. list_del(&pgtable->lru);
  159. return pgtable;
  160. }
  161. #endif
  162. #ifndef __HAVE_ARCH_PMDP_INVALIDATE
  163. pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
  164. pmd_t *pmdp)
  165. {
  166. pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mknotpresent(*pmdp));
  167. flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  168. return old;
  169. }
  170. #endif
  171. #ifndef pmdp_collapse_flush
  172. pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
  173. pmd_t *pmdp)
  174. {
  175. /*
  176. * pmd and hugepage pte format are same. So we could
  177. * use the same function.
  178. */
  179. pmd_t pmd;
  180. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  181. VM_BUG_ON(pmd_trans_huge(*pmdp));
  182. pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
  183. /* collapse entails shooting down ptes not pmd */
  184. flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  185. return pmd;
  186. }
  187. #endif
  188. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */