cacheflush.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef __ASM_AVR32_CACHEFLUSH_H
  9. #define __ASM_AVR32_CACHEFLUSH_H
  10. /* Keep includes the same across arches. */
  11. #include <linux/mm.h>
  12. #define CACHE_OP_ICACHE_INVALIDATE 0x01
  13. #define CACHE_OP_DCACHE_INVALIDATE 0x0b
  14. #define CACHE_OP_DCACHE_CLEAN 0x0c
  15. #define CACHE_OP_DCACHE_CLEAN_INVAL 0x0d
  16. /*
  17. * Invalidate any cacheline containing virtual address vaddr without
  18. * writing anything back to memory.
  19. *
  20. * Note that this function may corrupt unrelated data structures when
  21. * applied on buffers that are not cacheline aligned in both ends.
  22. */
  23. static inline void invalidate_dcache_line(void *vaddr)
  24. {
  25. asm volatile("cache %0[0], %1"
  26. :
  27. : "r"(vaddr), "n"(CACHE_OP_DCACHE_INVALIDATE)
  28. : "memory");
  29. }
  30. /*
  31. * Make sure any cacheline containing virtual address vaddr is written
  32. * to memory.
  33. */
  34. static inline void clean_dcache_line(void *vaddr)
  35. {
  36. asm volatile("cache %0[0], %1"
  37. :
  38. : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN)
  39. : "memory");
  40. }
  41. /*
  42. * Make sure any cacheline containing virtual address vaddr is written
  43. * to memory and then invalidate it.
  44. */
  45. static inline void flush_dcache_line(void *vaddr)
  46. {
  47. asm volatile("cache %0[0], %1"
  48. :
  49. : "r"(vaddr), "n"(CACHE_OP_DCACHE_CLEAN_INVAL)
  50. : "memory");
  51. }
  52. /*
  53. * Invalidate any instruction cacheline containing virtual address
  54. * vaddr.
  55. */
  56. static inline void invalidate_icache_line(void *vaddr)
  57. {
  58. asm volatile("cache %0[0], %1"
  59. :
  60. : "r"(vaddr), "n"(CACHE_OP_ICACHE_INVALIDATE)
  61. : "memory");
  62. }
  63. /*
  64. * Applies the above functions on all lines that are touched by the
  65. * specified virtual address range.
  66. */
  67. void invalidate_dcache_region(void *start, size_t len);
  68. void clean_dcache_region(void *start, size_t len);
  69. void flush_dcache_region(void *start, size_t len);
  70. void invalidate_icache_region(void *start, size_t len);
  71. /*
  72. * Make sure any pending writes are completed before continuing.
  73. */
  74. #define flush_write_buffer() asm volatile("sync 0" : : : "memory")
  75. /*
  76. * The following functions are called when a virtual mapping changes.
  77. * We do not need to flush anything in this case.
  78. */
  79. #define flush_cache_all() do { } while (0)
  80. #define flush_cache_mm(mm) do { } while (0)
  81. #define flush_cache_dup_mm(mm) do { } while (0)
  82. #define flush_cache_range(vma, start, end) do { } while (0)
  83. #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
  84. #define flush_cache_vmap(start, end) do { } while (0)
  85. #define flush_cache_vunmap(start, end) do { } while (0)
  86. /*
  87. * I think we need to implement this one to be able to reliably
  88. * execute pages from RAMDISK. However, if we implement the
  89. * flush_dcache_*() functions, it might not be needed anymore.
  90. *
  91. * #define flush_icache_page(vma, page) do { } while (0)
  92. */
  93. extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
  94. /*
  95. * These are (I think) related to D-cache aliasing. We might need to
  96. * do something here, but only for certain configurations. No such
  97. * configurations exist at this time.
  98. */
  99. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
  100. #define flush_dcache_page(page) do { } while (0)
  101. #define flush_dcache_mmap_lock(page) do { } while (0)
  102. #define flush_dcache_mmap_unlock(page) do { } while (0)
  103. /*
  104. * These are for I/D cache coherency. In this case, we do need to
  105. * flush with all configurations.
  106. */
  107. extern void flush_icache_range(unsigned long start, unsigned long end);
  108. extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  109. unsigned long vaddr, void *dst, const void *src,
  110. unsigned long len);
  111. static inline void copy_from_user_page(struct vm_area_struct *vma,
  112. struct page *page, unsigned long vaddr, void *dst,
  113. const void *src, unsigned long len)
  114. {
  115. memcpy(dst, src, len);
  116. }
  117. #endif /* __ASM_AVR32_CACHEFLUSH_H */