tlbflush.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ALPHA_TLBFLUSH_H
  3. #define _ALPHA_TLBFLUSH_H
  4. #include <linux/mm.h>
  5. #include <linux/sched.h>
  6. #include <asm/compiler.h>
  7. #include <asm/pgalloc.h>
  8. #ifndef __EXTERN_INLINE
  9. #define __EXTERN_INLINE extern inline
  10. #define __MMU_EXTERN_INLINE
  11. #endif
  12. extern void __load_new_mm_context(struct mm_struct *);
  13. /* Use a few helper functions to hide the ugly broken ASN
  14. numbers on early Alphas (ev4 and ev45). */
  15. __EXTERN_INLINE void
  16. ev4_flush_tlb_current(struct mm_struct *mm)
  17. {
  18. __load_new_mm_context(mm);
  19. tbiap();
  20. }
  21. __EXTERN_INLINE void
  22. ev5_flush_tlb_current(struct mm_struct *mm)
  23. {
  24. __load_new_mm_context(mm);
  25. }
  26. /* Flush just one page in the current TLB set. We need to be very
  27. careful about the icache here, there is no way to invalidate a
  28. specific icache page. */
  29. __EXTERN_INLINE void
  30. ev4_flush_tlb_current_page(struct mm_struct * mm,
  31. struct vm_area_struct *vma,
  32. unsigned long addr)
  33. {
  34. int tbi_flag = 2;
  35. if (vma->vm_flags & VM_EXEC) {
  36. __load_new_mm_context(mm);
  37. tbi_flag = 3;
  38. }
  39. tbi(tbi_flag, addr);
  40. }
  41. __EXTERN_INLINE void
  42. ev5_flush_tlb_current_page(struct mm_struct * mm,
  43. struct vm_area_struct *vma,
  44. unsigned long addr)
  45. {
  46. if (vma->vm_flags & VM_EXEC)
  47. __load_new_mm_context(mm);
  48. else
  49. tbi(2, addr);
  50. }
  51. #ifdef CONFIG_ALPHA_GENERIC
  52. # define flush_tlb_current alpha_mv.mv_flush_tlb_current
  53. # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
  54. #else
  55. # ifdef CONFIG_ALPHA_EV4
  56. # define flush_tlb_current ev4_flush_tlb_current
  57. # define flush_tlb_current_page ev4_flush_tlb_current_page
  58. # else
  59. # define flush_tlb_current ev5_flush_tlb_current
  60. # define flush_tlb_current_page ev5_flush_tlb_current_page
  61. # endif
  62. #endif
  63. #ifdef __MMU_EXTERN_INLINE
  64. #undef __EXTERN_INLINE
  65. #undef __MMU_EXTERN_INLINE
  66. #endif
  67. /* Flush current user mapping. */
  68. static inline void
  69. flush_tlb(void)
  70. {
  71. flush_tlb_current(current->active_mm);
  72. }
  73. /* Flush someone else's user mapping. */
  74. static inline void
  75. flush_tlb_other(struct mm_struct *mm)
  76. {
  77. unsigned long *mmc = &mm->context[smp_processor_id()];
  78. /* Check it's not zero first to avoid cacheline ping pong
  79. when possible. */
  80. if (*mmc) *mmc = 0;
  81. }
  82. #ifndef CONFIG_SMP
  83. /* Flush everything (kernel mapping may also have changed
  84. due to vmalloc/vfree). */
  85. static inline void flush_tlb_all(void)
  86. {
  87. tbia();
  88. }
  89. /* Flush a specified user mapping. */
  90. static inline void
  91. flush_tlb_mm(struct mm_struct *mm)
  92. {
  93. if (mm == current->active_mm)
  94. flush_tlb_current(mm);
  95. else
  96. flush_tlb_other(mm);
  97. }
  98. /* Page-granular tlb flush. */
  99. static inline void
  100. flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  101. {
  102. struct mm_struct *mm = vma->vm_mm;
  103. if (mm == current->active_mm)
  104. flush_tlb_current_page(mm, vma, addr);
  105. else
  106. flush_tlb_other(mm);
  107. }
  108. /* Flush a specified range of user mapping. On the Alpha we flush
  109. the whole user tlb. */
  110. static inline void
  111. flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  112. unsigned long end)
  113. {
  114. flush_tlb_mm(vma->vm_mm);
  115. }
  116. #else /* CONFIG_SMP */
  117. extern void flush_tlb_all(void);
  118. extern void flush_tlb_mm(struct mm_struct *);
  119. extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
  120. extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
  121. unsigned long);
  122. #endif /* CONFIG_SMP */
  123. static inline void flush_tlb_kernel_range(unsigned long start,
  124. unsigned long end)
  125. {
  126. flush_tlb_all();
  127. }
  128. #endif /* _ALPHA_TLBFLUSH_H */