cacheflush.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. #ifndef _PARISC_CACHEFLUSH_H
  2. #define _PARISC_CACHEFLUSH_H
  3. #include <linux/mm.h>
  4. #include <linux/uaccess.h>
  5. #include <asm/tlbflush.h>
  6. /* The usual comment is "Caches aren't brain-dead on the <architecture>".
  7. * Unfortunately, that doesn't apply to PA-RISC. */
  8. /* Internal implementation */
  9. void flush_data_cache_local(void *); /* flushes local data-cache only */
  10. void flush_instruction_cache_local(void *); /* flushes local code-cache only */
  11. #ifdef CONFIG_SMP
  12. void flush_data_cache(void); /* flushes data-cache only (all processors) */
  13. void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
  14. #else
  15. #define flush_data_cache() flush_data_cache_local(NULL)
  16. #define flush_instruction_cache() flush_instruction_cache_local(NULL)
  17. #endif
  18. #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  19. void flush_user_icache_range_asm(unsigned long, unsigned long);
  20. void flush_kernel_icache_range_asm(unsigned long, unsigned long);
  21. void flush_user_dcache_range_asm(unsigned long, unsigned long);
  22. void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
  23. void flush_kernel_dcache_page_asm(void *);
  24. void flush_kernel_icache_page(void *);
  25. void flush_user_dcache_range(unsigned long, unsigned long);
  26. void flush_user_icache_range(unsigned long, unsigned long);
  27. /* Cache flush operations */
  28. void flush_cache_all_local(void);
  29. void flush_cache_all(void);
  30. void flush_cache_mm(struct mm_struct *mm);
  31. #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  32. void flush_kernel_dcache_page_addr(void *addr);
  33. static inline void flush_kernel_dcache_page(struct page *page)
  34. {
  35. flush_kernel_dcache_page_addr(page_address(page));
  36. }
  37. #define flush_kernel_dcache_range(start,size) \
  38. flush_kernel_dcache_range_asm((start), (start)+(size));
  39. /* vmap range flushes and invalidates. Architecturally, we don't need
  40. * the invalidate, because the CPU should refuse to speculate once an
  41. * area has been flushed, so invalidate is left empty */
  42. static inline void flush_kernel_vmap_range(void *vaddr, int size)
  43. {
  44. unsigned long start = (unsigned long)vaddr;
  45. flush_kernel_dcache_range_asm(start, start + size);
  46. }
  47. static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
  48. {
  49. unsigned long start = (unsigned long)vaddr;
  50. void *cursor = vaddr;
  51. for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
  52. struct page *page = vmalloc_to_page(cursor);
  53. if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
  54. flush_kernel_dcache_page(page);
  55. }
  56. flush_kernel_dcache_range_asm(start, start + size);
  57. }
  58. #define flush_cache_vmap(start, end) flush_cache_all()
  59. #define flush_cache_vunmap(start, end) flush_cache_all()
  60. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  61. extern void flush_dcache_page(struct page *page);
  62. #define flush_dcache_mmap_lock(mapping) \
  63. spin_lock_irq(&(mapping)->tree_lock)
  64. #define flush_dcache_mmap_unlock(mapping) \
  65. spin_unlock_irq(&(mapping)->tree_lock)
  66. #define flush_icache_page(vma,page) do { \
  67. flush_kernel_dcache_page(page); \
  68. flush_kernel_icache_page(page_address(page)); \
  69. } while (0)
  70. #define flush_icache_range(s,e) do { \
  71. flush_kernel_dcache_range_asm(s,e); \
  72. flush_kernel_icache_range_asm(s,e); \
  73. } while (0)
  74. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  75. do { \
  76. flush_cache_page(vma, vaddr, page_to_pfn(page)); \
  77. memcpy(dst, src, len); \
  78. flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
  79. } while (0)
  80. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  81. do { \
  82. flush_cache_page(vma, vaddr, page_to_pfn(page)); \
  83. memcpy(dst, src, len); \
  84. } while (0)
  85. void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
  86. void flush_cache_range(struct vm_area_struct *vma,
  87. unsigned long start, unsigned long end);
  88. /* defined in pacache.S exported in cache.c used by flush_anon_page */
  89. void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  90. #define ARCH_HAS_FLUSH_ANON_PAGE
  91. static inline void
  92. flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
  93. {
  94. if (PageAnon(page)) {
  95. flush_tlb_page(vma, vmaddr);
  96. preempt_disable();
  97. flush_dcache_page_asm(page_to_phys(page), vmaddr);
  98. preempt_enable();
  99. }
  100. }
  101. #ifdef CONFIG_DEBUG_RODATA
  102. void mark_rodata_ro(void);
  103. #endif
  104. #include <asm/kmap_types.h>
  105. #define ARCH_HAS_KMAP
  106. static inline void *kmap(struct page *page)
  107. {
  108. might_sleep();
  109. return page_address(page);
  110. }
  111. static inline void kunmap(struct page *page)
  112. {
  113. flush_kernel_dcache_page_addr(page_address(page));
  114. }
  115. static inline void *kmap_atomic(struct page *page)
  116. {
  117. preempt_disable();
  118. pagefault_disable();
  119. return page_address(page);
  120. }
  121. static inline void __kunmap_atomic(void *addr)
  122. {
  123. flush_kernel_dcache_page_addr(addr);
  124. pagefault_enable();
  125. preempt_enable();
  126. }
  127. #define kmap_atomic_prot(page, prot) kmap_atomic(page)
  128. #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
  129. #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
  130. #endif /* _PARISC_CACHEFLUSH_H */