cache-b15-rac.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. /*
  2. * Broadcom Brahma-B15 CPU read-ahead cache management functions
  3. *
  4. * Copyright (C) 2015-2016 Broadcom
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/err.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/io.h>
  13. #include <linux/bitops.h>
  14. #include <linux/of_address.h>
  15. #include <linux/notifier.h>
  16. #include <linux/cpu.h>
  17. #include <linux/syscore_ops.h>
  18. #include <linux/reboot.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/hardware/cache-b15-rac.h>
  21. extern void v7_flush_kern_cache_all(void);
  22. /* RAC register offsets, relative to the HIF_CPU_BIUCTRL register base */
  23. #define RAC_CONFIG0_REG (0x78)
  24. #define RACENPREF_MASK (0x3)
  25. #define RACPREFINST_SHIFT (0)
  26. #define RACENINST_SHIFT (2)
  27. #define RACPREFDATA_SHIFT (4)
  28. #define RACENDATA_SHIFT (6)
  29. #define RAC_CPU_SHIFT (8)
  30. #define RACCFG_MASK (0xff)
  31. #define RAC_CONFIG1_REG (0x7c)
  32. /* Brahma-B15 is a quad-core only design */
  33. #define B15_RAC_FLUSH_REG (0x80)
  34. /* Brahma-B53 is an octo-core design */
  35. #define B53_RAC_FLUSH_REG (0x84)
  36. #define FLUSH_RAC (1 << 0)
  37. /* Bitmask to enable instruction and data prefetching with a 256-bytes stride */
  38. #define RAC_DATA_INST_EN_MASK (1 << RACPREFINST_SHIFT | \
  39. RACENPREF_MASK << RACENINST_SHIFT | \
  40. 1 << RACPREFDATA_SHIFT | \
  41. RACENPREF_MASK << RACENDATA_SHIFT)
  42. #define RAC_ENABLED 0
  43. /* Special state where we want to bypass the spinlock and call directly
  44. * into the v7 cache maintenance operations during suspend/resume
  45. */
  46. #define RAC_SUSPENDED 1
  47. static void __iomem *b15_rac_base;
  48. static DEFINE_SPINLOCK(rac_lock);
  49. static u32 rac_config0_reg;
  50. static u32 rac_flush_offset;
  51. /* Initialization flag to avoid checking for b15_rac_base, and to prevent
  52. * multi-platform kernels from crashing here as well.
  53. */
  54. static unsigned long b15_rac_flags;
  55. static inline u32 __b15_rac_disable(void)
  56. {
  57. u32 val = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
  58. __raw_writel(0, b15_rac_base + RAC_CONFIG0_REG);
  59. dmb();
  60. return val;
  61. }
  62. static inline void __b15_rac_flush(void)
  63. {
  64. u32 reg;
  65. __raw_writel(FLUSH_RAC, b15_rac_base + rac_flush_offset);
  66. do {
  67. /* This dmb() is required to force the Bus Interface Unit
  68. * to clean oustanding writes, and forces an idle cycle
  69. * to be inserted.
  70. */
  71. dmb();
  72. reg = __raw_readl(b15_rac_base + rac_flush_offset);
  73. } while (reg & FLUSH_RAC);
  74. }
  75. static inline u32 b15_rac_disable_and_flush(void)
  76. {
  77. u32 reg;
  78. reg = __b15_rac_disable();
  79. __b15_rac_flush();
  80. return reg;
  81. }
  82. static inline void __b15_rac_enable(u32 val)
  83. {
  84. __raw_writel(val, b15_rac_base + RAC_CONFIG0_REG);
  85. /* dsb() is required here to be consistent with __flush_icache_all() */
  86. dsb();
  87. }
  88. #define BUILD_RAC_CACHE_OP(name, bar) \
  89. void b15_flush_##name(void) \
  90. { \
  91. unsigned int do_flush; \
  92. u32 val = 0; \
  93. \
  94. if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) { \
  95. v7_flush_##name(); \
  96. bar; \
  97. return; \
  98. } \
  99. \
  100. spin_lock(&rac_lock); \
  101. do_flush = test_bit(RAC_ENABLED, &b15_rac_flags); \
  102. if (do_flush) \
  103. val = b15_rac_disable_and_flush(); \
  104. v7_flush_##name(); \
  105. if (!do_flush) \
  106. bar; \
  107. else \
  108. __b15_rac_enable(val); \
  109. spin_unlock(&rac_lock); \
  110. }
  111. #define nobarrier
  112. /* The readahead cache present in the Brahma-B15 CPU is a special piece of
  113. * hardware after the integrated L2 cache of the B15 CPU complex whose purpose
  114. * is to prefetch instruction and/or data with a line size of either 64 bytes
  115. * or 256 bytes. The rationale is that the data-bus of the CPU interface is
  116. * optimized for 256-bytes transactions, and enabling the readahead cache
  117. * provides a significant performance boost we want it enabled (typically
  118. * twice the performance for a memcpy benchmark application).
  119. *
  120. * The readahead cache is transparent for Modified Virtual Addresses
  121. * cache maintenance operations: ICIMVAU, DCIMVAC, DCCMVAC, DCCMVAU and
  122. * DCCIMVAC.
  123. *
  124. * It is however not transparent for the following cache maintenance
  125. * operations: DCISW, DCCSW, DCCISW, ICIALLUIS and ICIALLU which is precisely
  126. * what we are patching here with our BUILD_RAC_CACHE_OP here.
  127. */
  128. BUILD_RAC_CACHE_OP(kern_cache_all, nobarrier);
  129. static void b15_rac_enable(void)
  130. {
  131. unsigned int cpu;
  132. u32 enable = 0;
  133. for_each_possible_cpu(cpu)
  134. enable |= (RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT));
  135. b15_rac_disable_and_flush();
  136. __b15_rac_enable(enable);
  137. }
  138. static int b15_rac_reboot_notifier(struct notifier_block *nb,
  139. unsigned long action,
  140. void *data)
  141. {
  142. /* During kexec, we are not yet migrated on the boot CPU, so we need to
  143. * make sure we are SMP safe here. Once the RAC is disabled, flag it as
  144. * suspended such that the hotplug notifier returns early.
  145. */
  146. if (action == SYS_RESTART) {
  147. spin_lock(&rac_lock);
  148. b15_rac_disable_and_flush();
  149. clear_bit(RAC_ENABLED, &b15_rac_flags);
  150. set_bit(RAC_SUSPENDED, &b15_rac_flags);
  151. spin_unlock(&rac_lock);
  152. }
  153. return NOTIFY_DONE;
  154. }
  155. static struct notifier_block b15_rac_reboot_nb = {
  156. .notifier_call = b15_rac_reboot_notifier,
  157. };
  158. /* The CPU hotplug case is the most interesting one, we basically need to make
  159. * sure that the RAC is disabled for the entire system prior to having a CPU
  160. * die, in particular prior to this dying CPU having exited the coherency
  161. * domain.
  162. *
  163. * Once this CPU is marked dead, we can safely re-enable the RAC for the
  164. * remaining CPUs in the system which are still online.
  165. *
  166. * Offlining a CPU is the problematic case, onlining a CPU is not much of an
  167. * issue since the CPU and its cache-level hierarchy will start filling with
  168. * the RAC disabled, so L1 and L2 only.
  169. *
  170. * In this function, we should NOT have to verify any unsafe setting/condition
  171. * b15_rac_base:
  172. *
  173. * It is protected by the RAC_ENABLED flag which is cleared by default, and
  174. * being cleared when initial procedure is done. b15_rac_base had been set at
  175. * that time.
  176. *
  177. * RAC_ENABLED:
  178. * There is a small timing windows, in b15_rac_init(), between
  179. * cpuhp_setup_state_*()
  180. * ...
  181. * set RAC_ENABLED
  182. * However, there is no hotplug activity based on the Linux booting procedure.
  183. *
  184. * Since we have to disable RAC for all cores, we keep RAC on as long as as
  185. * possible (disable it as late as possible) to gain the cache benefit.
  186. *
  187. * Thus, dying/dead states are chosen here
  188. *
  189. * We are choosing not do disable the RAC on a per-CPU basis, here, if we did
  190. * we would want to consider disabling it as early as possible to benefit the
  191. * other active CPUs.
  192. */
  193. /* Running on the dying CPU */
  194. static int b15_rac_dying_cpu(unsigned int cpu)
  195. {
  196. /* During kexec/reboot, the RAC is disabled via the reboot notifier
  197. * return early here.
  198. */
  199. if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
  200. return 0;
  201. spin_lock(&rac_lock);
  202. /* Indicate that we are starting a hotplug procedure */
  203. __clear_bit(RAC_ENABLED, &b15_rac_flags);
  204. /* Disable the readahead cache and save its value to a global */
  205. rac_config0_reg = b15_rac_disable_and_flush();
  206. spin_unlock(&rac_lock);
  207. return 0;
  208. }
  209. /* Running on a non-dying CPU */
  210. static int b15_rac_dead_cpu(unsigned int cpu)
  211. {
  212. /* During kexec/reboot, the RAC is disabled via the reboot notifier
  213. * return early here.
  214. */
  215. if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
  216. return 0;
  217. spin_lock(&rac_lock);
  218. /* And enable it */
  219. __b15_rac_enable(rac_config0_reg);
  220. __set_bit(RAC_ENABLED, &b15_rac_flags);
  221. spin_unlock(&rac_lock);
  222. return 0;
  223. }
  224. static int b15_rac_suspend(void)
  225. {
  226. /* Suspend the read-ahead cache oeprations, forcing our cache
  227. * implementation to fallback to the regular ARMv7 calls.
  228. *
  229. * We are guaranteed to be running on the boot CPU at this point and
  230. * with every other CPU quiesced, so setting RAC_SUSPENDED is not racy
  231. * here.
  232. */
  233. rac_config0_reg = b15_rac_disable_and_flush();
  234. set_bit(RAC_SUSPENDED, &b15_rac_flags);
  235. return 0;
  236. }
  237. static void b15_rac_resume(void)
  238. {
  239. /* Coming out of a S3 suspend/resume cycle, the read-ahead cache
  240. * register RAC_CONFIG0_REG will be restored to its default value, make
  241. * sure we re-enable it and set the enable flag, we are also guaranteed
  242. * to run on the boot CPU, so not racy again.
  243. */
  244. __b15_rac_enable(rac_config0_reg);
  245. clear_bit(RAC_SUSPENDED, &b15_rac_flags);
  246. }
  247. static struct syscore_ops b15_rac_syscore_ops = {
  248. .suspend = b15_rac_suspend,
  249. .resume = b15_rac_resume,
  250. };
  251. static int __init b15_rac_init(void)
  252. {
  253. struct device_node *dn, *cpu_dn;
  254. int ret = 0, cpu;
  255. u32 reg, en_mask = 0;
  256. dn = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
  257. if (!dn)
  258. return -ENODEV;
  259. if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
  260. goto out;
  261. b15_rac_base = of_iomap(dn, 0);
  262. if (!b15_rac_base) {
  263. pr_err("failed to remap BIU control base\n");
  264. ret = -ENOMEM;
  265. goto out;
  266. }
  267. cpu_dn = of_get_cpu_node(0, NULL);
  268. if (!cpu_dn) {
  269. ret = -ENODEV;
  270. goto out;
  271. }
  272. if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15"))
  273. rac_flush_offset = B15_RAC_FLUSH_REG;
  274. else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
  275. rac_flush_offset = B53_RAC_FLUSH_REG;
  276. else {
  277. pr_err("Unsupported CPU\n");
  278. of_node_put(cpu_dn);
  279. ret = -EINVAL;
  280. goto out;
  281. }
  282. of_node_put(cpu_dn);
  283. ret = register_reboot_notifier(&b15_rac_reboot_nb);
  284. if (ret) {
  285. pr_err("failed to register reboot notifier\n");
  286. iounmap(b15_rac_base);
  287. goto out;
  288. }
  289. if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
  290. ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
  291. "arm/cache-b15-rac:dead",
  292. NULL, b15_rac_dead_cpu);
  293. if (ret)
  294. goto out_unmap;
  295. ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
  296. "arm/cache-b15-rac:dying",
  297. NULL, b15_rac_dying_cpu);
  298. if (ret)
  299. goto out_cpu_dead;
  300. }
  301. if (IS_ENABLED(CONFIG_PM_SLEEP))
  302. register_syscore_ops(&b15_rac_syscore_ops);
  303. spin_lock(&rac_lock);
  304. reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
  305. for_each_possible_cpu(cpu)
  306. en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT));
  307. WARN(reg & en_mask, "Read-ahead cache not previously disabled\n");
  308. b15_rac_enable();
  309. set_bit(RAC_ENABLED, &b15_rac_flags);
  310. spin_unlock(&rac_lock);
  311. pr_info("Broadcom Brahma-B15 readahead cache at: 0x%p\n",
  312. b15_rac_base + RAC_CONFIG0_REG);
  313. goto out;
  314. out_cpu_dead:
  315. cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING);
  316. out_unmap:
  317. unregister_reboot_notifier(&b15_rac_reboot_nb);
  318. iounmap(b15_rac_base);
  319. out:
  320. of_node_put(dn);
  321. return ret;
  322. }
  323. arch_initcall(b15_rac_init);