tlbflush.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _M68K_TLBFLUSH_H
  3. #define _M68K_TLBFLUSH_H
  4. #ifdef CONFIG_MMU
  5. #ifndef CONFIG_SUN3
  6. #include <asm/current.h>
  7. #include <asm/mcfmmu.h>
  8. static inline void flush_tlb_kernel_page(void *addr)
  9. {
  10. if (CPU_IS_COLDFIRE) {
  11. mmu_write(MMUOR, MMUOR_CNL);
  12. } else if (CPU_IS_040_OR_060) {
  13. mm_segment_t old_fs = get_fs();
  14. set_fs(KERNEL_DS);
  15. __asm__ __volatile__(".chip 68040\n\t"
  16. "pflush (%0)\n\t"
  17. ".chip 68k"
  18. : : "a" (addr));
  19. set_fs(old_fs);
  20. } else if (CPU_IS_020_OR_030)
  21. __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
  22. }
  23. /*
  24. * flush all user-space atc entries.
  25. */
  26. static inline void __flush_tlb(void)
  27. {
  28. if (CPU_IS_COLDFIRE) {
  29. mmu_write(MMUOR, MMUOR_CNL);
  30. } else if (CPU_IS_040_OR_060) {
  31. __asm__ __volatile__(".chip 68040\n\t"
  32. "pflushan\n\t"
  33. ".chip 68k");
  34. } else if (CPU_IS_020_OR_030) {
  35. __asm__ __volatile__("pflush #0,#4");
  36. }
  37. }
  38. static inline void __flush_tlb040_one(unsigned long addr)
  39. {
  40. __asm__ __volatile__(".chip 68040\n\t"
  41. "pflush (%0)\n\t"
  42. ".chip 68k"
  43. : : "a" (addr));
  44. }
  45. static inline void __flush_tlb_one(unsigned long addr)
  46. {
  47. if (CPU_IS_COLDFIRE)
  48. mmu_write(MMUOR, MMUOR_CNL);
  49. else if (CPU_IS_040_OR_060)
  50. __flush_tlb040_one(addr);
  51. else if (CPU_IS_020_OR_030)
  52. __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
  53. }
  54. #define flush_tlb() __flush_tlb()
  55. /*
  56. * flush all atc entries (both kernel and user-space entries).
  57. */
  58. static inline void flush_tlb_all(void)
  59. {
  60. if (CPU_IS_COLDFIRE) {
  61. mmu_write(MMUOR, MMUOR_CNL);
  62. } else if (CPU_IS_040_OR_060) {
  63. __asm__ __volatile__(".chip 68040\n\t"
  64. "pflusha\n\t"
  65. ".chip 68k");
  66. } else if (CPU_IS_020_OR_030) {
  67. __asm__ __volatile__("pflusha");
  68. }
  69. }
  70. static inline void flush_tlb_mm(struct mm_struct *mm)
  71. {
  72. if (mm == current->active_mm)
  73. __flush_tlb();
  74. }
  75. static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  76. {
  77. if (vma->vm_mm == current->active_mm) {
  78. mm_segment_t old_fs = get_fs();
  79. set_fs(USER_DS);
  80. __flush_tlb_one(addr);
  81. set_fs(old_fs);
  82. }
  83. }
  84. static inline void flush_tlb_range(struct vm_area_struct *vma,
  85. unsigned long start, unsigned long end)
  86. {
  87. if (vma->vm_mm == current->active_mm)
  88. __flush_tlb();
  89. }
  90. static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  91. {
  92. flush_tlb_all();
  93. }
  94. #else
  95. /* Reserved PMEGs. */
  96. extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
  97. extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
  98. extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
  99. extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
  100. /* Flush all userspace mappings one by one... (why no flush command,
  101. sun?) */
  102. static inline void flush_tlb_all(void)
  103. {
  104. unsigned long addr;
  105. unsigned char ctx, oldctx;
  106. oldctx = sun3_get_context();
  107. for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
  108. for(ctx = 0; ctx < 8; ctx++) {
  109. sun3_put_context(ctx);
  110. sun3_put_segmap(addr, SUN3_INVALID_PMEG);
  111. }
  112. }
  113. sun3_put_context(oldctx);
  114. /* erase all of the userspace pmeg maps, we've clobbered them
  115. all anyway */
  116. for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
  117. if(pmeg_alloc[addr] == 1) {
  118. pmeg_alloc[addr] = 0;
  119. pmeg_ctx[addr] = 0;
  120. pmeg_vaddr[addr] = 0;
  121. }
  122. }
  123. }
  124. /* Clear user TLB entries within the context named in mm */
  125. static inline void flush_tlb_mm (struct mm_struct *mm)
  126. {
  127. unsigned char oldctx;
  128. unsigned char seg;
  129. unsigned long i;
  130. oldctx = sun3_get_context();
  131. sun3_put_context(mm->context);
  132. for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
  133. seg = sun3_get_segmap(i);
  134. if(seg == SUN3_INVALID_PMEG)
  135. continue;
  136. sun3_put_segmap(i, SUN3_INVALID_PMEG);
  137. pmeg_alloc[seg] = 0;
  138. pmeg_ctx[seg] = 0;
  139. pmeg_vaddr[seg] = 0;
  140. }
  141. sun3_put_context(oldctx);
  142. }
  143. /* Flush a single TLB page. In this case, we're limited to flushing a
  144. single PMEG */
  145. static inline void flush_tlb_page (struct vm_area_struct *vma,
  146. unsigned long addr)
  147. {
  148. unsigned char oldctx;
  149. unsigned char i;
  150. oldctx = sun3_get_context();
  151. sun3_put_context(vma->vm_mm->context);
  152. addr &= ~SUN3_PMEG_MASK;
  153. if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
  154. {
  155. pmeg_alloc[i] = 0;
  156. pmeg_ctx[i] = 0;
  157. pmeg_vaddr[i] = 0;
  158. sun3_put_segmap (addr, SUN3_INVALID_PMEG);
  159. }
  160. sun3_put_context(oldctx);
  161. }
  162. /* Flush a range of pages from TLB. */
  163. static inline void flush_tlb_range (struct vm_area_struct *vma,
  164. unsigned long start, unsigned long end)
  165. {
  166. struct mm_struct *mm = vma->vm_mm;
  167. unsigned char seg, oldctx;
  168. start &= ~SUN3_PMEG_MASK;
  169. oldctx = sun3_get_context();
  170. sun3_put_context(mm->context);
  171. while(start < end)
  172. {
  173. if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
  174. goto next;
  175. if(pmeg_ctx[seg] == mm->context) {
  176. pmeg_alloc[seg] = 0;
  177. pmeg_ctx[seg] = 0;
  178. pmeg_vaddr[seg] = 0;
  179. }
  180. sun3_put_segmap(start, SUN3_INVALID_PMEG);
  181. next:
  182. start += SUN3_PMEG_SIZE;
  183. }
  184. }
  185. static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  186. {
  187. flush_tlb_all();
  188. }
  189. /* Flush kernel page from TLB. */
  190. static inline void flush_tlb_kernel_page (unsigned long addr)
  191. {
  192. sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
  193. }
  194. #endif
  195. #else /* !CONFIG_MMU */
  196. /*
  197. * flush all user-space atc entries.
  198. */
  199. static inline void __flush_tlb(void)
  200. {
  201. BUG();
  202. }
  203. static inline void __flush_tlb_one(unsigned long addr)
  204. {
  205. BUG();
  206. }
  207. #define flush_tlb() __flush_tlb()
  208. /*
  209. * flush all atc entries (both kernel and user-space entries).
  210. */
  211. static inline void flush_tlb_all(void)
  212. {
  213. BUG();
  214. }
  215. static inline void flush_tlb_mm(struct mm_struct *mm)
  216. {
  217. BUG();
  218. }
  219. static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  220. {
  221. BUG();
  222. }
  223. static inline void flush_tlb_range(struct mm_struct *mm,
  224. unsigned long start, unsigned long end)
  225. {
  226. BUG();
  227. }
  228. static inline void flush_tlb_kernel_page(unsigned long addr)
  229. {
  230. BUG();
  231. }
  232. #endif /* CONFIG_MMU */
  233. #endif /* _M68K_TLBFLUSH_H */