tlb.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* arch/sparc64/mm/tlb.c
  3. *
  4. * Copyright (C) 2004 David S. Miller <davem@redhat.com>
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/percpu.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/preempt.h>
  11. #include <asm/pgtable.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/tlb.h>
  17. /* Heavily inspired by the ppc64 code. */
  18. static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
  19. void flush_tlb_pending(void)
  20. {
  21. struct tlb_batch *tb = &get_cpu_var(tlb_batch);
  22. struct mm_struct *mm = tb->mm;
  23. if (!tb->tlb_nr)
  24. goto out;
  25. flush_tsb_user(tb);
  26. if (CTX_VALID(mm->context)) {
  27. if (tb->tlb_nr == 1) {
  28. global_flush_tlb_page(mm, tb->vaddrs[0]);
  29. } else {
  30. #ifdef CONFIG_SMP
  31. smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
  32. &tb->vaddrs[0]);
  33. #else
  34. __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
  35. tb->tlb_nr, &tb->vaddrs[0]);
  36. #endif
  37. }
  38. }
  39. tb->tlb_nr = 0;
  40. out:
  41. put_cpu_var(tlb_batch);
  42. }
  43. void arch_enter_lazy_mmu_mode(void)
  44. {
  45. struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
  46. tb->active = 1;
  47. }
  48. void arch_leave_lazy_mmu_mode(void)
  49. {
  50. struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
  51. if (tb->tlb_nr)
  52. flush_tlb_pending();
  53. tb->active = 0;
  54. }
  55. static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
  56. bool exec, unsigned int hugepage_shift)
  57. {
  58. struct tlb_batch *tb = &get_cpu_var(tlb_batch);
  59. unsigned long nr;
  60. vaddr &= PAGE_MASK;
  61. if (exec)
  62. vaddr |= 0x1UL;
  63. nr = tb->tlb_nr;
  64. if (unlikely(nr != 0 && mm != tb->mm)) {
  65. flush_tlb_pending();
  66. nr = 0;
  67. }
  68. if (!tb->active) {
  69. flush_tsb_user_page(mm, vaddr, hugepage_shift);
  70. global_flush_tlb_page(mm, vaddr);
  71. goto out;
  72. }
  73. if (nr == 0) {
  74. tb->mm = mm;
  75. tb->hugepage_shift = hugepage_shift;
  76. }
  77. if (tb->hugepage_shift != hugepage_shift) {
  78. flush_tlb_pending();
  79. tb->hugepage_shift = hugepage_shift;
  80. nr = 0;
  81. }
  82. tb->vaddrs[nr] = vaddr;
  83. tb->tlb_nr = ++nr;
  84. if (nr >= TLB_BATCH_NR)
  85. flush_tlb_pending();
  86. out:
  87. put_cpu_var(tlb_batch);
  88. }
  89. void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
  90. pte_t *ptep, pte_t orig, int fullmm,
  91. unsigned int hugepage_shift)
  92. {
  93. if (tlb_type != hypervisor &&
  94. pte_dirty(orig)) {
  95. unsigned long paddr, pfn = pte_pfn(orig);
  96. struct address_space *mapping;
  97. struct page *page;
  98. if (!pfn_valid(pfn))
  99. goto no_cache_flush;
  100. page = pfn_to_page(pfn);
  101. if (PageReserved(page))
  102. goto no_cache_flush;
  103. /* A real file page? */
  104. mapping = page_mapping(page);
  105. if (!mapping)
  106. goto no_cache_flush;
  107. paddr = (unsigned long) page_address(page);
  108. if ((paddr ^ vaddr) & (1 << 13))
  109. flush_dcache_page_all(mm, page);
  110. }
  111. no_cache_flush:
  112. if (!fullmm)
  113. tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
  114. }
  115. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  116. static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
  117. pmd_t pmd)
  118. {
  119. unsigned long end;
  120. pte_t *pte;
  121. pte = pte_offset_map(&pmd, vaddr);
  122. end = vaddr + HPAGE_SIZE;
  123. while (vaddr < end) {
  124. if (pte_val(*pte) & _PAGE_VALID) {
  125. bool exec = pte_exec(*pte);
  126. tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
  127. }
  128. pte++;
  129. vaddr += PAGE_SIZE;
  130. }
  131. pte_unmap(pte);
  132. }
  133. static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
  134. pmd_t orig, pmd_t pmd)
  135. {
  136. if (mm == &init_mm)
  137. return;
  138. if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
  139. /*
  140. * Note that this routine only sets pmds for THP pages.
  141. * Hugetlb pages are handled elsewhere. We need to check
  142. * for huge zero page. Huge zero pages are like hugetlb
  143. * pages in that there is no RSS, but there is the need
  144. * for TSB entries. So, huge zero page counts go into
  145. * hugetlb_pte_count.
  146. */
  147. if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
  148. if (is_huge_zero_page(pmd_page(pmd)))
  149. mm->context.hugetlb_pte_count++;
  150. else
  151. mm->context.thp_pte_count++;
  152. } else {
  153. if (is_huge_zero_page(pmd_page(orig)))
  154. mm->context.hugetlb_pte_count--;
  155. else
  156. mm->context.thp_pte_count--;
  157. }
  158. /* Do not try to allocate the TSB hash table if we
  159. * don't have one already. We have various locks held
  160. * and thus we'll end up doing a GFP_KERNEL allocation
  161. * in an atomic context.
  162. *
  163. * Instead, we let the first TLB miss on a hugepage
  164. * take care of this.
  165. */
  166. }
  167. if (!pmd_none(orig)) {
  168. addr &= HPAGE_MASK;
  169. if (pmd_trans_huge(orig)) {
  170. pte_t orig_pte = __pte(pmd_val(orig));
  171. bool exec = pte_exec(orig_pte);
  172. tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
  173. tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
  174. REAL_HPAGE_SHIFT);
  175. } else {
  176. tlb_batch_pmd_scan(mm, addr, orig);
  177. }
  178. }
  179. }
  180. void set_pmd_at(struct mm_struct *mm, unsigned long addr,
  181. pmd_t *pmdp, pmd_t pmd)
  182. {
  183. pmd_t orig = *pmdp;
  184. *pmdp = pmd;
  185. __set_pmd_acct(mm, addr, orig, pmd);
  186. }
  187. static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
  188. unsigned long address, pmd_t *pmdp, pmd_t pmd)
  189. {
  190. pmd_t old;
  191. do {
  192. old = *pmdp;
  193. } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
  194. __set_pmd_acct(vma->vm_mm, address, old, pmd);
  195. return old;
  196. }
  197. /*
  198. * This routine is only called when splitting a THP
  199. */
  200. pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
  201. pmd_t *pmdp)
  202. {
  203. pmd_t old, entry;
  204. entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
  205. old = pmdp_establish(vma, address, pmdp, entry);
  206. flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  207. /*
  208. * set_pmd_at() will not be called in a way to decrement
  209. * thp_pte_count when splitting a THP, so do it now.
  210. * Sanity check pmd before doing the actual decrement.
  211. */
  212. if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
  213. !is_huge_zero_page(pmd_page(entry)))
  214. (vma->vm_mm)->context.thp_pte_count--;
  215. return old;
  216. }
  217. void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
  218. pgtable_t pgtable)
  219. {
  220. struct list_head *lh = (struct list_head *) pgtable;
  221. assert_spin_locked(&mm->page_table_lock);
  222. /* FIFO */
  223. if (!pmd_huge_pte(mm, pmdp))
  224. INIT_LIST_HEAD(lh);
  225. else
  226. list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
  227. pmd_huge_pte(mm, pmdp) = pgtable;
  228. }
  229. pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
  230. {
  231. struct list_head *lh;
  232. pgtable_t pgtable;
  233. assert_spin_locked(&mm->page_table_lock);
  234. /* FIFO */
  235. pgtable = pmd_huge_pte(mm, pmdp);
  236. lh = (struct list_head *) pgtable;
  237. if (list_empty(lh))
  238. pmd_huge_pte(mm, pmdp) = NULL;
  239. else {
  240. pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
  241. list_del(lh);
  242. }
  243. pte_val(pgtable[0]) = 0;
  244. pte_val(pgtable[1]) = 0;
  245. return pgtable;
  246. }
  247. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */