tlbflush.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _S390_TLBFLUSH_H
  3. #define _S390_TLBFLUSH_H
  4. #include <linux/mm.h>
  5. #include <linux/sched.h>
  6. #include <asm/processor.h>
  7. #include <asm/pgalloc.h>
  8. #include <asm/pgtable.h>
  9. /*
  10. * Flush all TLB entries on the local CPU.
  11. */
  12. static inline void __tlb_flush_local(void)
  13. {
  14. asm volatile("ptlb" : : : "memory");
  15. }
  16. /*
  17. * Flush TLB entries for a specific ASCE on all CPUs
  18. */
  19. static inline void __tlb_flush_idte(unsigned long asce)
  20. {
  21. unsigned long opt;
  22. opt = IDTE_PTOA;
  23. if (MACHINE_HAS_TLB_GUEST)
  24. opt |= IDTE_GUEST_ASCE;
  25. /* Global TLB flush for the mm */
  26. asm volatile(
  27. " .insn rrf,0xb98e0000,0,%0,%1,0"
  28. : : "a" (opt), "a" (asce) : "cc");
  29. }
  30. #ifdef CONFIG_SMP
  31. void smp_ptlb_all(void);
  32. /*
  33. * Flush all TLB entries on all CPUs.
  34. */
  35. static inline void __tlb_flush_global(void)
  36. {
  37. unsigned int dummy = 0;
  38. csp(&dummy, 0, 0);
  39. }
  40. /*
  41. * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
  42. * this implicates multiple ASCEs!).
  43. */
  44. static inline void __tlb_flush_mm(struct mm_struct *mm)
  45. {
  46. unsigned long gmap_asce;
  47. /*
  48. * If the machine has IDTE we prefer to do a per mm flush
  49. * on all cpus instead of doing a local flush if the mm
  50. * only ran on the local cpu.
  51. */
  52. preempt_disable();
  53. atomic_inc(&mm->context.flush_count);
  54. /* Reset TLB flush mask */
  55. cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
  56. barrier();
  57. gmap_asce = READ_ONCE(mm->context.gmap_asce);
  58. if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
  59. if (gmap_asce)
  60. __tlb_flush_idte(gmap_asce);
  61. __tlb_flush_idte(mm->context.asce);
  62. } else {
  63. /* Global TLB flush */
  64. __tlb_flush_global();
  65. }
  66. atomic_dec(&mm->context.flush_count);
  67. preempt_enable();
  68. }
  69. static inline void __tlb_flush_kernel(void)
  70. {
  71. if (MACHINE_HAS_IDTE)
  72. __tlb_flush_idte(init_mm.context.asce);
  73. else
  74. __tlb_flush_global();
  75. }
  76. #else
  77. #define __tlb_flush_global() __tlb_flush_local()
  78. /*
  79. * Flush TLB entries for a specific ASCE on all CPUs.
  80. */
  81. static inline void __tlb_flush_mm(struct mm_struct *mm)
  82. {
  83. __tlb_flush_local();
  84. }
  85. static inline void __tlb_flush_kernel(void)
  86. {
  87. __tlb_flush_local();
  88. }
  89. #endif
  90. static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
  91. {
  92. spin_lock(&mm->context.lock);
  93. if (mm->context.flush_mm) {
  94. mm->context.flush_mm = 0;
  95. __tlb_flush_mm(mm);
  96. }
  97. spin_unlock(&mm->context.lock);
  98. }
  99. /*
  100. * TLB flushing:
  101. * flush_tlb() - flushes the current mm struct TLBs
  102. * flush_tlb_all() - flushes all processes TLBs
  103. * flush_tlb_mm(mm) - flushes the specified mm context TLB's
  104. * flush_tlb_page(vma, vmaddr) - flushes one page
  105. * flush_tlb_range(vma, start, end) - flushes a range of pages
  106. * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
  107. */
  108. /*
  109. * flush_tlb_mm goes together with ptep_set_wrprotect for the
  110. * copy_page_range operation and flush_tlb_range is related to
  111. * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
  112. * ptep_get_and_clear do not flush the TLBs directly if the mm has
  113. * only one user. At the end of the update the flush_tlb_mm and
  114. * flush_tlb_range functions need to do the flush.
  115. */
  116. #define flush_tlb() do { } while (0)
  117. #define flush_tlb_all() do { } while (0)
  118. #define flush_tlb_page(vma, addr) do { } while (0)
  119. static inline void flush_tlb_mm(struct mm_struct *mm)
  120. {
  121. __tlb_flush_mm_lazy(mm);
  122. }
  123. static inline void flush_tlb_range(struct vm_area_struct *vma,
  124. unsigned long start, unsigned long end)
  125. {
  126. __tlb_flush_mm_lazy(vma->vm_mm);
  127. }
  128. static inline void flush_tlb_kernel_range(unsigned long start,
  129. unsigned long end)
  130. {
  131. __tlb_flush_kernel();
  132. }
  133. #endif /* _S390_TLBFLUSH_H */