cacheflush.h 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_CACHEFLUSH_H
  3. #define __ASM_CACHEFLUSH_H
  4. /* Keep includes the same across arches. */
  5. #include <linux/mm.h>
  6. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
  7. /*
  8. * The cache doesn't need to be flushed when TLB entries change when
  9. * the cache is mapped to physical memory, not virtual memory
  10. */
  11. #ifndef flush_cache_all
  12. static inline void flush_cache_all(void)
  13. {
  14. }
  15. #endif
  16. #ifndef flush_cache_mm
  17. static inline void flush_cache_mm(struct mm_struct *mm)
  18. {
  19. }
  20. #endif
  21. #ifndef flush_cache_dup_mm
  22. static inline void flush_cache_dup_mm(struct mm_struct *mm)
  23. {
  24. }
  25. #endif
  26. #ifndef flush_cache_range
  27. static inline void flush_cache_range(struct vm_area_struct *vma,
  28. unsigned long start,
  29. unsigned long end)
  30. {
  31. }
  32. #endif
  33. #ifndef flush_cache_page
  34. static inline void flush_cache_page(struct vm_area_struct *vma,
  35. unsigned long vmaddr,
  36. unsigned long pfn)
  37. {
  38. }
  39. #endif
  40. #ifndef flush_dcache_page
  41. static inline void flush_dcache_page(struct page *page)
  42. {
  43. }
  44. #endif
  45. #ifndef flush_dcache_mmap_lock
  46. static inline void flush_dcache_mmap_lock(struct address_space *mapping)
  47. {
  48. }
  49. #endif
  50. #ifndef flush_dcache_mmap_unlock
  51. static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
  52. {
  53. }
  54. #endif
  55. #ifndef flush_icache_range
  56. static inline void flush_icache_range(unsigned long start, unsigned long end)
  57. {
  58. }
  59. #endif
  60. #ifndef flush_icache_page
  61. static inline void flush_icache_page(struct vm_area_struct *vma,
  62. struct page *page)
  63. {
  64. }
  65. #endif
  66. #ifndef flush_icache_user_range
  67. static inline void flush_icache_user_range(struct vm_area_struct *vma,
  68. struct page *page,
  69. unsigned long addr, int len)
  70. {
  71. }
  72. #endif
  73. #ifndef flush_cache_vmap
  74. static inline void flush_cache_vmap(unsigned long start, unsigned long end)
  75. {
  76. }
  77. #endif
  78. #ifndef flush_cache_vunmap
  79. static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
  80. {
  81. }
  82. #endif
  83. #ifndef copy_to_user_page
  84. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  85. do { \
  86. memcpy(dst, src, len); \
  87. flush_icache_user_range(vma, page, vaddr, len); \
  88. } while (0)
  89. #endif
  90. #ifndef copy_from_user_page
  91. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  92. memcpy(dst, src, len)
  93. #endif
  94. #endif /* __ASM_CACHEFLUSH_H */