tlb.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __UM_TLB_H
  3. #define __UM_TLB_H
  4. #include <linux/pagemap.h>
  5. #include <linux/swap.h>
  6. #include <asm/percpu.h>
  7. #include <asm/pgalloc.h>
  8. #include <asm/tlbflush.h>
  9. #define tlb_start_vma(tlb, vma) do { } while (0)
  10. #define tlb_end_vma(tlb, vma) do { } while (0)
  11. #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
  12. /* struct mmu_gather is an opaque type used by the mm code for passing around
  13. * any data needed by arch specific code for tlb_remove_page.
  14. */
  15. struct mmu_gather {
  16. struct mm_struct *mm;
  17. unsigned int need_flush; /* Really unmapped some ptes? */
  18. unsigned long start;
  19. unsigned long end;
  20. unsigned int fullmm; /* non-zero means full mm flush */
  21. };
  22. static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  23. unsigned long address)
  24. {
  25. if (tlb->start > address)
  26. tlb->start = address;
  27. if (tlb->end < address + PAGE_SIZE)
  28. tlb->end = address + PAGE_SIZE;
  29. }
  30. static inline void init_tlb_gather(struct mmu_gather *tlb)
  31. {
  32. tlb->need_flush = 0;
  33. tlb->start = TASK_SIZE;
  34. tlb->end = 0;
  35. if (tlb->fullmm) {
  36. tlb->start = 0;
  37. tlb->end = TASK_SIZE;
  38. }
  39. }
  40. static inline void
  41. arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
  42. unsigned long start, unsigned long end)
  43. {
  44. tlb->mm = mm;
  45. tlb->start = start;
  46. tlb->end = end;
  47. tlb->fullmm = !(start | (end+1));
  48. init_tlb_gather(tlb);
  49. }
  50. extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  51. unsigned long end);
  52. static inline void
  53. tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  54. {
  55. flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
  56. }
  57. static inline void
  58. tlb_flush_mmu_free(struct mmu_gather *tlb)
  59. {
  60. init_tlb_gather(tlb);
  61. }
  62. static inline void
  63. tlb_flush_mmu(struct mmu_gather *tlb)
  64. {
  65. if (!tlb->need_flush)
  66. return;
  67. tlb_flush_mmu_tlbonly(tlb);
  68. tlb_flush_mmu_free(tlb);
  69. }
  70. /* arch_tlb_finish_mmu
  71. * Called at the end of the shootdown operation to free up any resources
  72. * that were required.
  73. */
  74. static inline void
  75. arch_tlb_finish_mmu(struct mmu_gather *tlb,
  76. unsigned long start, unsigned long end, bool force)
  77. {
  78. if (force) {
  79. tlb->start = start;
  80. tlb->end = end;
  81. tlb->need_flush = 1;
  82. }
  83. tlb_flush_mmu(tlb);
  84. /* keep the page table cache within bounds */
  85. check_pgt_cache();
  86. }
  87. /* tlb_remove_page
  88. * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
  89. * while handling the additional races in SMP caused by other CPUs
  90. * caching valid mappings in their TLBs.
  91. */
  92. static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  93. {
  94. tlb->need_flush = 1;
  95. free_page_and_swap_cache(page);
  96. return false; /* avoid calling tlb_flush_mmu */
  97. }
  98. static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  99. {
  100. __tlb_remove_page(tlb, page);
  101. }
  102. static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
  103. struct page *page, int page_size)
  104. {
  105. return __tlb_remove_page(tlb, page);
  106. }
  107. static inline void tlb_remove_page_size(struct mmu_gather *tlb,
  108. struct page *page, int page_size)
  109. {
  110. return tlb_remove_page(tlb, page);
  111. }
  112. /**
  113. * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
  114. *
  115. * Record the fact that pte's were really umapped in ->need_flush, so we can
  116. * later optimise away the tlb invalidate. This helps when userspace is
  117. * unmapping already-unmapped pages, which happens quite a lot.
  118. */
  119. #define tlb_remove_tlb_entry(tlb, ptep, address) \
  120. do { \
  121. tlb->need_flush = 1; \
  122. __tlb_remove_tlb_entry(tlb, ptep, address); \
  123. } while (0)
  124. #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
  125. tlb_remove_tlb_entry(tlb, ptep, address)
  126. #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
  127. static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
  128. unsigned int page_size)
  129. {
  130. }
  131. #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
  132. #define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
  133. #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
  134. #define tlb_migrate_finish(mm) do {} while (0)
  135. #endif