cacheflush_mm.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _M68K_CACHEFLUSH_H
  3. #define _M68K_CACHEFLUSH_H
  4. #include <linux/mm.h>
  5. #ifdef CONFIG_COLDFIRE
  6. #include <asm/mcfsim.h>
  7. #endif
  8. /* cache code */
  9. #define FLUSH_I_AND_D (0x00000808)
  10. #define FLUSH_I (0x00000008)
  11. #ifndef ICACHE_MAX_ADDR
  12. #define ICACHE_MAX_ADDR 0
  13. #define ICACHE_SET_MASK 0
  14. #define DCACHE_MAX_ADDR 0
  15. #define DCACHE_SETMASK 0
  16. #endif
  17. #ifndef CACHE_MODE
  18. #define CACHE_MODE 0
  19. #define CACR_ICINVA 0
  20. #define CACR_DCINVA 0
  21. #define CACR_BCINVA 0
  22. #endif
  23. /*
  24. * ColdFire architecture has no way to clear individual cache lines, so we
  25. * are stuck invalidating all the cache entries when we want a clear operation.
  26. */
  27. static inline void clear_cf_icache(unsigned long start, unsigned long end)
  28. {
  29. __asm__ __volatile__ (
  30. "movec %0,%%cacr\n\t"
  31. "nop"
  32. :
  33. : "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA));
  34. }
  35. static inline void clear_cf_dcache(unsigned long start, unsigned long end)
  36. {
  37. __asm__ __volatile__ (
  38. "movec %0,%%cacr\n\t"
  39. "nop"
  40. :
  41. : "r" (CACHE_MODE | CACR_DCINVA));
  42. }
  43. static inline void clear_cf_bcache(unsigned long start, unsigned long end)
  44. {
  45. __asm__ __volatile__ (
  46. "movec %0,%%cacr\n\t"
  47. "nop"
  48. :
  49. : "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA | CACR_DCINVA));
  50. }
  51. /*
  52. * Use the ColdFire cpushl instruction to push (and invalidate) cache lines.
  53. * The start and end addresses are cache line numbers not memory addresses.
  54. */
  55. static inline void flush_cf_icache(unsigned long start, unsigned long end)
  56. {
  57. unsigned long set;
  58. for (set = start; set <= end; set += (0x10 - 3)) {
  59. __asm__ __volatile__ (
  60. "cpushl %%ic,(%0)\n\t"
  61. "addq%.l #1,%0\n\t"
  62. "cpushl %%ic,(%0)\n\t"
  63. "addq%.l #1,%0\n\t"
  64. "cpushl %%ic,(%0)\n\t"
  65. "addq%.l #1,%0\n\t"
  66. "cpushl %%ic,(%0)"
  67. : "=a" (set)
  68. : "a" (set));
  69. }
  70. }
  71. static inline void flush_cf_dcache(unsigned long start, unsigned long end)
  72. {
  73. unsigned long set;
  74. for (set = start; set <= end; set += (0x10 - 3)) {
  75. __asm__ __volatile__ (
  76. "cpushl %%dc,(%0)\n\t"
  77. "addq%.l #1,%0\n\t"
  78. "cpushl %%dc,(%0)\n\t"
  79. "addq%.l #1,%0\n\t"
  80. "cpushl %%dc,(%0)\n\t"
  81. "addq%.l #1,%0\n\t"
  82. "cpushl %%dc,(%0)"
  83. : "=a" (set)
  84. : "a" (set));
  85. }
  86. }
  87. static inline void flush_cf_bcache(unsigned long start, unsigned long end)
  88. {
  89. unsigned long set;
  90. for (set = start; set <= end; set += (0x10 - 3)) {
  91. __asm__ __volatile__ (
  92. "cpushl %%bc,(%0)\n\t"
  93. "addq%.l #1,%0\n\t"
  94. "cpushl %%bc,(%0)\n\t"
  95. "addq%.l #1,%0\n\t"
  96. "cpushl %%bc,(%0)\n\t"
  97. "addq%.l #1,%0\n\t"
  98. "cpushl %%bc,(%0)"
  99. : "=a" (set)
  100. : "a" (set));
  101. }
  102. }
  103. /*
  104. * Cache handling functions
  105. */
  106. static inline void flush_icache(void)
  107. {
  108. if (CPU_IS_COLDFIRE) {
  109. flush_cf_icache(0, ICACHE_MAX_ADDR);
  110. } else if (CPU_IS_040_OR_060) {
  111. asm volatile ( "nop\n"
  112. " .chip 68040\n"
  113. " cpusha %bc\n"
  114. " .chip 68k");
  115. } else {
  116. unsigned long tmp;
  117. asm volatile ( "movec %%cacr,%0\n"
  118. " or.w %1,%0\n"
  119. " movec %0,%%cacr"
  120. : "=&d" (tmp)
  121. : "id" (FLUSH_I));
  122. }
  123. }
  124. /*
  125. * invalidate the cache for the specified memory range.
  126. * It starts at the physical address specified for
  127. * the given number of bytes.
  128. */
  129. extern void cache_clear(unsigned long paddr, int len);
  130. /*
  131. * push any dirty cache in the specified memory range.
  132. * It starts at the physical address specified for
  133. * the given number of bytes.
  134. */
  135. extern void cache_push(unsigned long paddr, int len);
  136. /*
  137. * push and invalidate pages in the specified user virtual
  138. * memory range.
  139. */
  140. extern void cache_push_v(unsigned long vaddr, int len);
  141. /* This is needed whenever the virtual mapping of the current
  142. process changes. */
  143. #define __flush_cache_all() \
  144. ({ \
  145. if (CPU_IS_COLDFIRE) { \
  146. flush_cf_dcache(0, DCACHE_MAX_ADDR); \
  147. } else if (CPU_IS_040_OR_060) { \
  148. __asm__ __volatile__("nop\n\t" \
  149. ".chip 68040\n\t" \
  150. "cpusha %dc\n\t" \
  151. ".chip 68k"); \
  152. } else { \
  153. unsigned long _tmp; \
  154. __asm__ __volatile__("movec %%cacr,%0\n\t" \
  155. "orw %1,%0\n\t" \
  156. "movec %0,%%cacr" \
  157. : "=&d" (_tmp) \
  158. : "di" (FLUSH_I_AND_D)); \
  159. } \
  160. })
  161. #define __flush_cache_030() \
  162. ({ \
  163. if (CPU_IS_020_OR_030) { \
  164. unsigned long _tmp; \
  165. __asm__ __volatile__("movec %%cacr,%0\n\t" \
  166. "orw %1,%0\n\t" \
  167. "movec %0,%%cacr" \
  168. : "=&d" (_tmp) \
  169. : "di" (FLUSH_I_AND_D)); \
  170. } \
  171. })
  172. #define flush_cache_all() __flush_cache_all()
  173. #define flush_cache_vmap(start, end) flush_cache_all()
  174. #define flush_cache_vunmap(start, end) flush_cache_all()
  175. static inline void flush_cache_mm(struct mm_struct *mm)
  176. {
  177. if (mm == current->mm)
  178. __flush_cache_030();
  179. }
  180. #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  181. /* flush_cache_range/flush_cache_page must be macros to avoid
  182. a dependency on linux/mm.h, which includes this file... */
  183. static inline void flush_cache_range(struct vm_area_struct *vma,
  184. unsigned long start,
  185. unsigned long end)
  186. {
  187. if (vma->vm_mm == current->mm)
  188. __flush_cache_030();
  189. }
  190. static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
  191. {
  192. if (vma->vm_mm == current->mm)
  193. __flush_cache_030();
  194. }
  195. /* Push the page at kernel virtual address and clear the icache */
  196. /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
  197. static inline void __flush_page_to_ram(void *vaddr)
  198. {
  199. if (CPU_IS_COLDFIRE) {
  200. unsigned long addr, start, end;
  201. addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
  202. start = addr & ICACHE_SET_MASK;
  203. end = (addr + PAGE_SIZE - 1) & ICACHE_SET_MASK;
  204. if (start > end) {
  205. flush_cf_bcache(0, end);
  206. end = ICACHE_MAX_ADDR;
  207. }
  208. flush_cf_bcache(start, end);
  209. } else if (CPU_IS_040_OR_060) {
  210. __asm__ __volatile__("nop\n\t"
  211. ".chip 68040\n\t"
  212. "cpushp %%bc,(%0)\n\t"
  213. ".chip 68k"
  214. : : "a" (__pa(vaddr)));
  215. } else {
  216. unsigned long _tmp;
  217. __asm__ __volatile__("movec %%cacr,%0\n\t"
  218. "orw %1,%0\n\t"
  219. "movec %0,%%cacr"
  220. : "=&d" (_tmp)
  221. : "di" (FLUSH_I));
  222. }
  223. }
  224. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  225. #define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
  226. #define flush_dcache_mmap_lock(mapping) do { } while (0)
  227. #define flush_dcache_mmap_unlock(mapping) do { } while (0)
  228. #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
  229. extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
  230. unsigned long addr, int len);
  231. extern void flush_icache_range(unsigned long address, unsigned long endaddr);
  232. static inline void copy_to_user_page(struct vm_area_struct *vma,
  233. struct page *page, unsigned long vaddr,
  234. void *dst, void *src, int len)
  235. {
  236. flush_cache_page(vma, vaddr, page_to_pfn(page));
  237. memcpy(dst, src, len);
  238. flush_icache_user_range(vma, page, vaddr, len);
  239. }
  240. static inline void copy_from_user_page(struct vm_area_struct *vma,
  241. struct page *page, unsigned long vaddr,
  242. void *dst, void *src, int len)
  243. {
  244. flush_cache_page(vma, vaddr, page_to_pfn(page));
  245. memcpy(dst, src, len);
  246. }
  247. #endif /* _M68K_CACHEFLUSH_H */