cacheflush.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2005-2017 Andes Technology Corporation
  3. #include <linux/mm.h>
  4. #include <linux/sched.h>
  5. #include <linux/fs.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/module.h>
  8. #include <asm/cacheflush.h>
  9. #include <asm/proc-fns.h>
  10. #include <asm/shmparam.h>
  11. #include <asm/cache_info.h>
  12. extern struct cache_info L1_cache_info[2];
  13. void flush_icache_range(unsigned long start, unsigned long end)
  14. {
  15. unsigned long line_size, flags;
  16. line_size = L1_cache_info[DCACHE].line_size;
  17. start = start & ~(line_size - 1);
  18. end = (end + line_size - 1) & ~(line_size - 1);
  19. local_irq_save(flags);
  20. cpu_cache_wbinval_range(start, end, 1);
  21. local_irq_restore(flags);
  22. }
  23. EXPORT_SYMBOL(flush_icache_range);
  24. void flush_icache_page(struct vm_area_struct *vma, struct page *page)
  25. {
  26. unsigned long flags;
  27. unsigned long kaddr;
  28. local_irq_save(flags);
  29. kaddr = (unsigned long)kmap_atomic(page);
  30. cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
  31. kunmap_atomic((void *)kaddr);
  32. local_irq_restore(flags);
  33. }
  34. EXPORT_SYMBOL(flush_icache_page);
  35. void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
  36. unsigned long addr, int len)
  37. {
  38. unsigned long kaddr;
  39. kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK);
  40. flush_icache_range(kaddr, kaddr + len);
  41. kunmap_atomic((void *)kaddr);
  42. }
  43. void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
  44. pte_t * pte)
  45. {
  46. struct page *page;
  47. unsigned long pfn = pte_pfn(*pte);
  48. unsigned long flags;
  49. if (!pfn_valid(pfn))
  50. return;
  51. if (vma->vm_mm == current->active_mm) {
  52. local_irq_save(flags);
  53. __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
  54. __nds32__tlbop_rwr(*pte);
  55. __nds32__isb();
  56. local_irq_restore(flags);
  57. }
  58. page = pfn_to_page(pfn);
  59. if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
  60. (vma->vm_flags & VM_EXEC)) {
  61. unsigned long kaddr;
  62. local_irq_save(flags);
  63. kaddr = (unsigned long)kmap_atomic(page);
  64. cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
  65. kunmap_atomic((void *)kaddr);
  66. local_irq_restore(flags);
  67. }
  68. }
  69. #ifdef CONFIG_CPU_CACHE_ALIASING
  70. extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
  71. static inline unsigned long aliasing(unsigned long addr, unsigned long page)
  72. {
  73. return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
  74. }
  75. static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
  76. {
  77. unsigned long kaddr, pte;
  78. #define BASE_ADDR0 0xffffc000
  79. kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
  80. pte = (pa | PAGE_KERNEL);
  81. __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
  82. __nds32__tlbop_rwlk(pte);
  83. __nds32__isb();
  84. return kaddr;
  85. }
  86. static inline void kunmap01(unsigned long kaddr)
  87. {
  88. __nds32__tlbop_unlk(kaddr);
  89. __nds32__tlbop_inv(kaddr);
  90. __nds32__isb();
  91. }
  92. static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
  93. {
  94. unsigned long kaddr, pte;
  95. #define BASE_ADDR1 0xffff8000
  96. kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
  97. pte = (pa | PAGE_KERNEL);
  98. __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
  99. __nds32__tlbop_rwlk(pte);
  100. __nds32__isb();
  101. return kaddr;
  102. }
  103. void flush_cache_mm(struct mm_struct *mm)
  104. {
  105. unsigned long flags;
  106. local_irq_save(flags);
  107. cpu_dcache_wbinval_all();
  108. cpu_icache_inval_all();
  109. local_irq_restore(flags);
  110. }
  111. void flush_cache_dup_mm(struct mm_struct *mm)
  112. {
  113. }
  114. void flush_cache_range(struct vm_area_struct *vma,
  115. unsigned long start, unsigned long end)
  116. {
  117. unsigned long flags;
  118. if ((end - start) > 8 * PAGE_SIZE) {
  119. cpu_dcache_wbinval_all();
  120. if (vma->vm_flags & VM_EXEC)
  121. cpu_icache_inval_all();
  122. return;
  123. }
  124. local_irq_save(flags);
  125. while (start < end) {
  126. if (va_present(vma->vm_mm, start))
  127. cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
  128. start += PAGE_SIZE;
  129. }
  130. local_irq_restore(flags);
  131. return;
  132. }
  133. void flush_cache_page(struct vm_area_struct *vma,
  134. unsigned long addr, unsigned long pfn)
  135. {
  136. unsigned long vto, flags;
  137. local_irq_save(flags);
  138. vto = kremap0(addr, pfn << PAGE_SHIFT);
  139. cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
  140. kunmap01(vto);
  141. local_irq_restore(flags);
  142. }
  143. void flush_cache_vmap(unsigned long start, unsigned long end)
  144. {
  145. cpu_dcache_wbinval_all();
  146. cpu_icache_inval_all();
  147. }
  148. void flush_cache_vunmap(unsigned long start, unsigned long end)
  149. {
  150. cpu_dcache_wbinval_all();
  151. cpu_icache_inval_all();
  152. }
  153. void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
  154. struct page *to)
  155. {
  156. cpu_dcache_wbinval_page((unsigned long)vaddr);
  157. cpu_icache_inval_page((unsigned long)vaddr);
  158. copy_page(vto, vfrom);
  159. cpu_dcache_wbinval_page((unsigned long)vto);
  160. cpu_icache_inval_page((unsigned long)vto);
  161. }
  162. void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
  163. {
  164. cpu_dcache_wbinval_page((unsigned long)vaddr);
  165. cpu_icache_inval_page((unsigned long)vaddr);
  166. clear_page(addr);
  167. cpu_dcache_wbinval_page((unsigned long)addr);
  168. cpu_icache_inval_page((unsigned long)addr);
  169. }
  170. void copy_user_highpage(struct page *to, struct page *from,
  171. unsigned long vaddr, struct vm_area_struct *vma)
  172. {
  173. unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
  174. kto = ((unsigned long)page_address(to) & PAGE_MASK);
  175. kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
  176. pto = page_to_phys(to);
  177. pfrom = page_to_phys(from);
  178. local_irq_save(flags);
  179. if (aliasing(vaddr, (unsigned long)kfrom))
  180. cpu_dcache_wb_page((unsigned long)kfrom);
  181. vto = kremap0(vaddr, pto);
  182. vfrom = kremap1(vaddr, pfrom);
  183. copy_page((void *)vto, (void *)vfrom);
  184. kunmap01(vfrom);
  185. kunmap01(vto);
  186. local_irq_restore(flags);
  187. }
  188. EXPORT_SYMBOL(copy_user_highpage);
  189. void clear_user_highpage(struct page *page, unsigned long vaddr)
  190. {
  191. unsigned long vto, flags, kto;
  192. kto = ((unsigned long)page_address(page) & PAGE_MASK);
  193. local_irq_save(flags);
  194. if (aliasing(kto, vaddr) && kto != 0) {
  195. cpu_dcache_inval_page(kto);
  196. cpu_icache_inval_page(kto);
  197. }
  198. vto = kremap0(vaddr, page_to_phys(page));
  199. clear_page((void *)vto);
  200. kunmap01(vto);
  201. local_irq_restore(flags);
  202. }
  203. EXPORT_SYMBOL(clear_user_highpage);
  204. void flush_dcache_page(struct page *page)
  205. {
  206. struct address_space *mapping;
  207. mapping = page_mapping(page);
  208. if (mapping && !mapping_mapped(mapping))
  209. set_bit(PG_dcache_dirty, &page->flags);
  210. else {
  211. unsigned long kaddr, flags;
  212. kaddr = (unsigned long)page_address(page);
  213. local_irq_save(flags);
  214. cpu_dcache_wbinval_page(kaddr);
  215. if (mapping) {
  216. unsigned long vaddr, kto;
  217. vaddr = page->index << PAGE_SHIFT;
  218. if (aliasing(vaddr, kaddr)) {
  219. kto = kremap0(vaddr, page_to_phys(page));
  220. cpu_dcache_wbinval_page(kto);
  221. kunmap01(kto);
  222. }
  223. }
  224. local_irq_restore(flags);
  225. }
  226. }
  227. EXPORT_SYMBOL(flush_dcache_page);
  228. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  229. unsigned long vaddr, void *dst, void *src, int len)
  230. {
  231. unsigned long line_size, start, end, vto, flags;
  232. local_irq_save(flags);
  233. vto = kremap0(vaddr, page_to_phys(page));
  234. dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
  235. memcpy(dst, src, len);
  236. if (vma->vm_flags & VM_EXEC) {
  237. line_size = L1_cache_info[DCACHE].line_size;
  238. start = (unsigned long)dst & ~(line_size - 1);
  239. end =
  240. ((unsigned long)dst + len + line_size - 1) & ~(line_size -
  241. 1);
  242. cpu_cache_wbinval_range(start, end, 1);
  243. }
  244. kunmap01(vto);
  245. local_irq_restore(flags);
  246. }
  247. void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  248. unsigned long vaddr, void *dst, void *src, int len)
  249. {
  250. unsigned long vto, flags;
  251. local_irq_save(flags);
  252. vto = kremap0(vaddr, page_to_phys(page));
  253. src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
  254. memcpy(dst, src, len);
  255. kunmap01(vto);
  256. local_irq_restore(flags);
  257. }
  258. void flush_anon_page(struct vm_area_struct *vma,
  259. struct page *page, unsigned long vaddr)
  260. {
  261. unsigned long kaddr, flags, ktmp;
  262. if (!PageAnon(page))
  263. return;
  264. if (vma->vm_mm != current->active_mm)
  265. return;
  266. local_irq_save(flags);
  267. if (vma->vm_flags & VM_EXEC)
  268. cpu_icache_inval_page(vaddr & PAGE_MASK);
  269. kaddr = (unsigned long)page_address(page);
  270. if (aliasing(vaddr, kaddr)) {
  271. ktmp = kremap0(vaddr, page_to_phys(page));
  272. cpu_dcache_wbinval_page(ktmp);
  273. kunmap01(ktmp);
  274. }
  275. local_irq_restore(flags);
  276. }
  277. void flush_kernel_dcache_page(struct page *page)
  278. {
  279. unsigned long flags;
  280. local_irq_save(flags);
  281. cpu_dcache_wbinval_page((unsigned long)page_address(page));
  282. local_irq_restore(flags);
  283. }
  284. EXPORT_SYMBOL(flush_kernel_dcache_page);
  285. void flush_kernel_vmap_range(void *addr, int size)
  286. {
  287. unsigned long flags;
  288. local_irq_save(flags);
  289. cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr + size);
  290. local_irq_restore(flags);
  291. }
  292. EXPORT_SYMBOL(flush_kernel_vmap_range);
  293. void invalidate_kernel_vmap_range(void *addr, int size)
  294. {
  295. unsigned long flags;
  296. local_irq_save(flags);
  297. cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
  298. local_irq_restore(flags);
  299. }
  300. EXPORT_SYMBOL(invalidate_kernel_vmap_range);
  301. #endif