cache-tauros2.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. /*
  2. * arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support
  3. *
  4. * Copyright (C) 2008 Marvell Semiconductor
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. *
  10. * References:
  11. * - PJ1 CPU Core Datasheet,
  12. * Document ID MV-S104837-01, Rev 0.7, January 24 2008.
  13. * - PJ4 CPU Core Datasheet,
  14. * Document ID MV-S105190-00, Rev 0.7, March 14 2008.
  15. */
  16. #include <linux/init.h>
  17. #include <linux/of.h>
  18. #include <linux/of_address.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/cp15.h>
  21. #include <asm/cputype.h>
  22. #include <asm/hardware/cache-tauros2.h>
  23. /* CP15 PJ4 Control configuration register */
  24. #define CCR_L2C_PREFETCH_DISABLE BIT(24)
  25. #define CCR_L2C_ECC_ENABLE BIT(23)
  26. #define CCR_L2C_WAY7_4_DISABLE BIT(21)
  27. #define CCR_L2C_BURST8_ENABLE BIT(20)
  28. /*
  29. * When Tauros2 is used on a CPU that supports the v7 hierarchical
  30. * cache operations, the cache handling code in proc-v7.S takes care
  31. * of everything, including handling DMA coherency.
  32. *
  33. * So, we only need to register outer cache operations here if we're
  34. * being used on a pre-v7 CPU, and we only need to build support for
  35. * outer cache operations into the kernel image if the kernel has been
  36. * configured to support a pre-v7 CPU.
  37. */
  38. #ifdef CONFIG_CPU_32v5
  39. /*
  40. * Low-level cache maintenance operations.
  41. */
  42. static inline void tauros2_clean_pa(unsigned long addr)
  43. {
  44. __asm__("mcr p15, 1, %0, c7, c11, 3" : : "r" (addr));
  45. }
  46. static inline void tauros2_clean_inv_pa(unsigned long addr)
  47. {
  48. __asm__("mcr p15, 1, %0, c7, c15, 3" : : "r" (addr));
  49. }
  50. static inline void tauros2_inv_pa(unsigned long addr)
  51. {
  52. __asm__("mcr p15, 1, %0, c7, c7, 3" : : "r" (addr));
  53. }
  54. /*
  55. * Linux primitives.
  56. *
  57. * Note that the end addresses passed to Linux primitives are
  58. * noninclusive.
  59. */
  60. #define CACHE_LINE_SIZE 32
  61. static void tauros2_inv_range(unsigned long start, unsigned long end)
  62. {
  63. /*
  64. * Clean and invalidate partial first cache line.
  65. */
  66. if (start & (CACHE_LINE_SIZE - 1)) {
  67. tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
  68. start = (start | (CACHE_LINE_SIZE - 1)) + 1;
  69. }
  70. /*
  71. * Clean and invalidate partial last cache line.
  72. */
  73. if (end & (CACHE_LINE_SIZE - 1)) {
  74. tauros2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
  75. end &= ~(CACHE_LINE_SIZE - 1);
  76. }
  77. /*
  78. * Invalidate all full cache lines between 'start' and 'end'.
  79. */
  80. while (start < end) {
  81. tauros2_inv_pa(start);
  82. start += CACHE_LINE_SIZE;
  83. }
  84. dsb();
  85. }
  86. static void tauros2_clean_range(unsigned long start, unsigned long end)
  87. {
  88. start &= ~(CACHE_LINE_SIZE - 1);
  89. while (start < end) {
  90. tauros2_clean_pa(start);
  91. start += CACHE_LINE_SIZE;
  92. }
  93. dsb();
  94. }
  95. static void tauros2_flush_range(unsigned long start, unsigned long end)
  96. {
  97. start &= ~(CACHE_LINE_SIZE - 1);
  98. while (start < end) {
  99. tauros2_clean_inv_pa(start);
  100. start += CACHE_LINE_SIZE;
  101. }
  102. dsb();
  103. }
  104. static void tauros2_disable(void)
  105. {
  106. __asm__ __volatile__ (
  107. "mcr p15, 1, %0, c7, c11, 0 @L2 Cache Clean All\n\t"
  108. "mrc p15, 0, %0, c1, c0, 0\n\t"
  109. "bic %0, %0, #(1 << 26)\n\t"
  110. "mcr p15, 0, %0, c1, c0, 0 @Disable L2 Cache\n\t"
  111. : : "r" (0x0));
  112. }
  113. static void tauros2_resume(void)
  114. {
  115. __asm__ __volatile__ (
  116. "mcr p15, 1, %0, c7, c7, 0 @L2 Cache Invalidate All\n\t"
  117. "mrc p15, 0, %0, c1, c0, 0\n\t"
  118. "orr %0, %0, #(1 << 26)\n\t"
  119. "mcr p15, 0, %0, c1, c0, 0 @Enable L2 Cache\n\t"
  120. : : "r" (0x0));
  121. }
  122. #endif
  123. static inline u32 __init read_extra_features(void)
  124. {
  125. u32 u;
  126. __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
  127. return u;
  128. }
  129. static inline void __init write_extra_features(u32 u)
  130. {
  131. __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
  132. }
  133. static inline int __init cpuid_scheme(void)
  134. {
  135. return !!((processor_id & 0x000f0000) == 0x000f0000);
  136. }
  137. static inline u32 __init read_mmfr3(void)
  138. {
  139. u32 mmfr3;
  140. __asm__("mrc p15, 0, %0, c0, c1, 7\n" : "=r" (mmfr3));
  141. return mmfr3;
  142. }
  143. static inline u32 __init read_actlr(void)
  144. {
  145. u32 actlr;
  146. __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
  147. return actlr;
  148. }
  149. static inline void __init write_actlr(u32 actlr)
  150. {
  151. __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
  152. }
  153. static void enable_extra_feature(unsigned int features)
  154. {
  155. u32 u;
  156. u = read_extra_features();
  157. if (features & CACHE_TAUROS2_PREFETCH_ON)
  158. u &= ~CCR_L2C_PREFETCH_DISABLE;
  159. else
  160. u |= CCR_L2C_PREFETCH_DISABLE;
  161. pr_info("Tauros2: %s L2 prefetch.\n",
  162. (features & CACHE_TAUROS2_PREFETCH_ON)
  163. ? "Enabling" : "Disabling");
  164. if (features & CACHE_TAUROS2_LINEFILL_BURST8)
  165. u |= CCR_L2C_BURST8_ENABLE;
  166. else
  167. u &= ~CCR_L2C_BURST8_ENABLE;
  168. pr_info("Tauros2: %s burst8 line fill.\n",
  169. (features & CACHE_TAUROS2_LINEFILL_BURST8)
  170. ? "Enabling" : "Disabling");
  171. write_extra_features(u);
  172. }
  173. static void __init tauros2_internal_init(unsigned int features)
  174. {
  175. char *mode = NULL;
  176. enable_extra_feature(features);
  177. #ifdef CONFIG_CPU_32v5
  178. if ((processor_id & 0xff0f0000) == 0x56050000) {
  179. u32 feat;
  180. /*
  181. * v5 CPUs with Tauros2 have the L2 cache enable bit
  182. * located in the CPU Extra Features register.
  183. */
  184. feat = read_extra_features();
  185. if (!(feat & 0x00400000)) {
  186. pr_info("Tauros2: Enabling L2 cache.\n");
  187. write_extra_features(feat | 0x00400000);
  188. }
  189. mode = "ARMv5";
  190. outer_cache.inv_range = tauros2_inv_range;
  191. outer_cache.clean_range = tauros2_clean_range;
  192. outer_cache.flush_range = tauros2_flush_range;
  193. outer_cache.disable = tauros2_disable;
  194. outer_cache.resume = tauros2_resume;
  195. }
  196. #endif
  197. #ifdef CONFIG_CPU_32v7
  198. /*
  199. * Check whether this CPU has support for the v7 hierarchical
  200. * cache ops. (PJ4 is in its v7 personality mode if the MMFR3
  201. * register indicates support for the v7 hierarchical cache
  202. * ops.)
  203. *
  204. * (Although strictly speaking there may exist CPUs that
  205. * implement the v7 cache ops but are only ARMv6 CPUs (due to
  206. * not complying with all of the other ARMv7 requirements),
  207. * there are no real-life examples of Tauros2 being used on
  208. * such CPUs as of yet.)
  209. */
  210. if (cpuid_scheme() && (read_mmfr3() & 0xf) == 1) {
  211. u32 actlr;
  212. /*
  213. * When Tauros2 is used in an ARMv7 system, the L2
  214. * enable bit is located in the Auxiliary System Control
  215. * Register (which is the only register allowed by the
  216. * ARMv7 spec to contain fine-grained cache control bits).
  217. */
  218. actlr = read_actlr();
  219. if (!(actlr & 0x00000002)) {
  220. pr_info("Tauros2: Enabling L2 cache.\n");
  221. write_actlr(actlr | 0x00000002);
  222. }
  223. mode = "ARMv7";
  224. }
  225. #endif
  226. if (mode == NULL) {
  227. pr_crit("Tauros2: Unable to detect CPU mode.\n");
  228. return;
  229. }
  230. pr_info("Tauros2: L2 cache support initialised "
  231. "in %s mode.\n", mode);
  232. }
  233. #ifdef CONFIG_OF
  234. static const struct of_device_id tauros2_ids[] __initconst = {
  235. { .compatible = "marvell,tauros2-cache"},
  236. {}
  237. };
  238. #endif
  239. void __init tauros2_init(unsigned int features)
  240. {
  241. #ifdef CONFIG_OF
  242. struct device_node *node;
  243. int ret;
  244. unsigned int f;
  245. node = of_find_matching_node(NULL, tauros2_ids);
  246. if (!node) {
  247. pr_info("Not found marvell,tauros2-cache, disable it\n");
  248. } else {
  249. ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f);
  250. if (ret) {
  251. pr_info("Not found marvell,tauros-cache-features property, "
  252. "disable extra features\n");
  253. features = 0;
  254. } else
  255. features = f;
  256. }
  257. #endif
  258. tauros2_internal_init(features);
  259. }