tlbflush.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. #ifndef _ALPHA_TLBFLUSH_H
  2. #define _ALPHA_TLBFLUSH_H
  3. #include <linux/mm.h>
  4. #include <linux/sched.h>
  5. #include <asm/compiler.h>
  6. #include <asm/pgalloc.h>
  7. #ifndef __EXTERN_INLINE
  8. #define __EXTERN_INLINE extern inline
  9. #define __MMU_EXTERN_INLINE
  10. #endif
  11. extern void __load_new_mm_context(struct mm_struct *);
  12. /* Use a few helper functions to hide the ugly broken ASN
  13. numbers on early Alphas (ev4 and ev45). */
  14. __EXTERN_INLINE void
  15. ev4_flush_tlb_current(struct mm_struct *mm)
  16. {
  17. __load_new_mm_context(mm);
  18. tbiap();
  19. }
  20. __EXTERN_INLINE void
  21. ev5_flush_tlb_current(struct mm_struct *mm)
  22. {
  23. __load_new_mm_context(mm);
  24. }
  25. /* Flush just one page in the current TLB set. We need to be very
  26. careful about the icache here, there is no way to invalidate a
  27. specific icache page. */
  28. __EXTERN_INLINE void
  29. ev4_flush_tlb_current_page(struct mm_struct * mm,
  30. struct vm_area_struct *vma,
  31. unsigned long addr)
  32. {
  33. int tbi_flag = 2;
  34. if (vma->vm_flags & VM_EXEC) {
  35. __load_new_mm_context(mm);
  36. tbi_flag = 3;
  37. }
  38. tbi(tbi_flag, addr);
  39. }
  40. __EXTERN_INLINE void
  41. ev5_flush_tlb_current_page(struct mm_struct * mm,
  42. struct vm_area_struct *vma,
  43. unsigned long addr)
  44. {
  45. if (vma->vm_flags & VM_EXEC)
  46. __load_new_mm_context(mm);
  47. else
  48. tbi(2, addr);
  49. }
  50. #ifdef CONFIG_ALPHA_GENERIC
  51. # define flush_tlb_current alpha_mv.mv_flush_tlb_current
  52. # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
  53. #else
  54. # ifdef CONFIG_ALPHA_EV4
  55. # define flush_tlb_current ev4_flush_tlb_current
  56. # define flush_tlb_current_page ev4_flush_tlb_current_page
  57. # else
  58. # define flush_tlb_current ev5_flush_tlb_current
  59. # define flush_tlb_current_page ev5_flush_tlb_current_page
  60. # endif
  61. #endif
  62. #ifdef __MMU_EXTERN_INLINE
  63. #undef __EXTERN_INLINE
  64. #undef __MMU_EXTERN_INLINE
  65. #endif
  66. /* Flush current user mapping. */
  67. static inline void
  68. flush_tlb(void)
  69. {
  70. flush_tlb_current(current->active_mm);
  71. }
  72. /* Flush someone else's user mapping. */
  73. static inline void
  74. flush_tlb_other(struct mm_struct *mm)
  75. {
  76. unsigned long *mmc = &mm->context[smp_processor_id()];
  77. /* Check it's not zero first to avoid cacheline ping pong
  78. when possible. */
  79. if (*mmc) *mmc = 0;
  80. }
  81. #ifndef CONFIG_SMP
  82. /* Flush everything (kernel mapping may also have changed
  83. due to vmalloc/vfree). */
  84. static inline void flush_tlb_all(void)
  85. {
  86. tbia();
  87. }
  88. /* Flush a specified user mapping. */
  89. static inline void
  90. flush_tlb_mm(struct mm_struct *mm)
  91. {
  92. if (mm == current->active_mm)
  93. flush_tlb_current(mm);
  94. else
  95. flush_tlb_other(mm);
  96. }
  97. /* Page-granular tlb flush. */
  98. static inline void
  99. flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  100. {
  101. struct mm_struct *mm = vma->vm_mm;
  102. if (mm == current->active_mm)
  103. flush_tlb_current_page(mm, vma, addr);
  104. else
  105. flush_tlb_other(mm);
  106. }
  107. /* Flush a specified range of user mapping. On the Alpha we flush
  108. the whole user tlb. */
  109. static inline void
  110. flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  111. unsigned long end)
  112. {
  113. flush_tlb_mm(vma->vm_mm);
  114. }
  115. #else /* CONFIG_SMP */
  116. extern void flush_tlb_all(void);
  117. extern void flush_tlb_mm(struct mm_struct *);
  118. extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
  119. extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
  120. unsigned long);
  121. #endif /* CONFIG_SMP */
  122. static inline void flush_tlb_kernel_range(unsigned long start,
  123. unsigned long end)
  124. {
  125. flush_tlb_all();
  126. }
  127. #endif /* _ALPHA_TLBFLUSH_H */