cache-uniphier.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. /*
  2. * Copyright (C) 2015-2016 Socionext Inc.
  3. * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #define pr_fmt(fmt) "uniphier: " fmt
  16. #include <linux/bitops.h>
  17. #include <linux/init.h>
  18. #include <linux/io.h>
  19. #include <linux/log2.h>
  20. #include <linux/of_address.h>
  21. #include <linux/slab.h>
  22. #include <asm/hardware/cache-uniphier.h>
  23. #include <asm/outercache.h>
  24. /* control registers */
  25. #define UNIPHIER_SSCC 0x0 /* Control Register */
  26. #define UNIPHIER_SSCC_BST BIT(20) /* UCWG burst read */
  27. #define UNIPHIER_SSCC_ACT BIT(19) /* Inst-Data separate */
  28. #define UNIPHIER_SSCC_WTG BIT(18) /* WT gathering on */
  29. #define UNIPHIER_SSCC_PRD BIT(17) /* enable pre-fetch */
  30. #define UNIPHIER_SSCC_ON BIT(0) /* enable cache */
  31. #define UNIPHIER_SSCLPDAWCR 0x30 /* Unified/Data Active Way Control */
  32. #define UNIPHIER_SSCLPIAWCR 0x34 /* Instruction Active Way Control */
  33. /* revision registers */
  34. #define UNIPHIER_SSCID 0x0 /* ID Register */
  35. /* operation registers */
  36. #define UNIPHIER_SSCOPE 0x244 /* Cache Operation Primitive Entry */
  37. #define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */
  38. #define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */
  39. #define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */
  40. #define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
  41. #define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
  42. #define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */
  43. #define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
  44. #define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
  45. #define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
  46. #define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */
  47. #define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
  48. #define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
  49. #define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
  50. #define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */
  51. #define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */
  52. #define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/
  53. #define UNIPHIER_SSCOPPQSEF_FE BIT(1)
  54. #define UNIPHIER_SSCOPPQSEF_OE BIT(0)
  55. #define UNIPHIER_SSCOLPQS 0x260 /* Cache Operation Queue Status */
  56. #define UNIPHIER_SSCOLPQS_EF BIT(2)
  57. #define UNIPHIER_SSCOLPQS_EST BIT(1)
  58. #define UNIPHIER_SSCOLPQS_QST BIT(0)
  59. /* Is the operation region specified by address range? */
  60. #define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
  61. ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
  62. /**
  63. * uniphier_cache_data - UniPhier outer cache specific data
  64. *
  65. * @ctrl_base: virtual base address of control registers
  66. * @rev_base: virtual base address of revision registers
  67. * @op_base: virtual base address of operation registers
  68. * @way_mask: each bit specifies if the way is present
  69. * @nsets: number of associativity sets
  70. * @line_size: line size in bytes
  71. * @range_op_max_size: max size that can be handled by a single range operation
  72. * @list: list node to include this level in the whole cache hierarchy
  73. */
  74. struct uniphier_cache_data {
  75. void __iomem *ctrl_base;
  76. void __iomem *rev_base;
  77. void __iomem *op_base;
  78. void __iomem *way_ctrl_base;
  79. u32 way_mask;
  80. u32 nsets;
  81. u32 line_size;
  82. u32 range_op_max_size;
  83. struct list_head list;
  84. };
  85. /*
  86. * List of the whole outer cache hierarchy. This list is only modified during
  87. * the early boot stage, so no mutex is taken for the access to the list.
  88. */
  89. static LIST_HEAD(uniphier_cache_list);
  90. /**
  91. * __uniphier_cache_sync - perform a sync point for a particular cache level
  92. *
  93. * @data: cache controller specific data
  94. */
  95. static void __uniphier_cache_sync(struct uniphier_cache_data *data)
  96. {
  97. /* This sequence need not be atomic. Do not disable IRQ. */
  98. writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC,
  99. data->op_base + UNIPHIER_SSCOPE);
  100. /* need a read back to confirm */
  101. readl_relaxed(data->op_base + UNIPHIER_SSCOPE);
  102. }
  103. /**
  104. * __uniphier_cache_maint_common - run a queue operation for a particular level
  105. *
  106. * @data: cache controller specific data
  107. * @start: start address of range operation (don't care for "all" operation)
  108. * @size: data size of range operation (don't care for "all" operation)
  109. * @operation: flags to specify the desired cache operation
  110. */
  111. static void __uniphier_cache_maint_common(struct uniphier_cache_data *data,
  112. unsigned long start,
  113. unsigned long size,
  114. u32 operation)
  115. {
  116. unsigned long flags;
  117. /*
  118. * No spin lock is necessary here because:
  119. *
  120. * [1] This outer cache controller is able to accept maintenance
  121. * operations from multiple CPUs at a time in an SMP system; if a
  122. * maintenance operation is under way and another operation is issued,
  123. * the new one is stored in the queue. The controller performs one
  124. * operation after another. If the queue is full, the status register,
  125. * UNIPHIER_SSCOPPQSEF, indicates that the queue registration has
  126. * failed. The status registers, UNIPHIER_{SSCOPPQSEF, SSCOLPQS}, have
  127. * different instances for each CPU, i.e. each CPU can track the status
  128. * of the maintenance operations triggered by itself.
  129. *
  130. * [2] The cache command registers, UNIPHIER_{SSCOQM, SSCOQAD, SSCOQSZ,
  131. * SSCOQWN}, are shared between multiple CPUs, but the hardware still
  132. * guarantees the registration sequence is atomic; the write access to
  133. * them are arbitrated by the hardware. The first accessor to the
  134. * register, UNIPHIER_SSCOQM, holds the access right and it is released
  135. * by reading the status register, UNIPHIER_SSCOPPQSEF. While one CPU
  136. * is holding the access right, other CPUs fail to register operations.
  137. * One CPU should not hold the access right for a long time, so local
  138. * IRQs should be disabled while the following sequence.
  139. */
  140. local_irq_save(flags);
  141. /* clear the complete notification flag */
  142. writel_relaxed(UNIPHIER_SSCOLPQS_EF, data->op_base + UNIPHIER_SSCOLPQS);
  143. do {
  144. /* set cache operation */
  145. writel_relaxed(UNIPHIER_SSCOQM_CE | operation,
  146. data->op_base + UNIPHIER_SSCOQM);
  147. /* set address range if needed */
  148. if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) {
  149. writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
  150. writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
  151. }
  152. } while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
  153. (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
  154. /* wait until the operation is completed */
  155. while (likely(readl_relaxed(data->op_base + UNIPHIER_SSCOLPQS) !=
  156. UNIPHIER_SSCOLPQS_EF))
  157. cpu_relax();
  158. local_irq_restore(flags);
  159. }
  160. static void __uniphier_cache_maint_all(struct uniphier_cache_data *data,
  161. u32 operation)
  162. {
  163. __uniphier_cache_maint_common(data, 0, 0,
  164. UNIPHIER_SSCOQM_S_ALL | operation);
  165. __uniphier_cache_sync(data);
  166. }
  167. static void __uniphier_cache_maint_range(struct uniphier_cache_data *data,
  168. unsigned long start, unsigned long end,
  169. u32 operation)
  170. {
  171. unsigned long size;
  172. /*
  173. * If the start address is not aligned,
  174. * perform a cache operation for the first cache-line
  175. */
  176. start = start & ~(data->line_size - 1);
  177. size = end - start;
  178. if (unlikely(size >= (unsigned long)(-data->line_size))) {
  179. /* this means cache operation for all range */
  180. __uniphier_cache_maint_all(data, operation);
  181. return;
  182. }
  183. /*
  184. * If the end address is not aligned,
  185. * perform a cache operation for the last cache-line
  186. */
  187. size = ALIGN(size, data->line_size);
  188. while (size) {
  189. unsigned long chunk_size = min_t(unsigned long, size,
  190. data->range_op_max_size);
  191. __uniphier_cache_maint_common(data, start, chunk_size,
  192. UNIPHIER_SSCOQM_S_RANGE | operation);
  193. start += chunk_size;
  194. size -= chunk_size;
  195. }
  196. __uniphier_cache_sync(data);
  197. }
  198. static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
  199. {
  200. u32 val = 0;
  201. if (on)
  202. val = UNIPHIER_SSCC_WTG | UNIPHIER_SSCC_PRD | UNIPHIER_SSCC_ON;
  203. writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
  204. }
  205. static void __init __uniphier_cache_set_active_ways(
  206. struct uniphier_cache_data *data)
  207. {
  208. unsigned int cpu;
  209. for_each_possible_cpu(cpu)
  210. writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu);
  211. }
  212. static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
  213. u32 operation)
  214. {
  215. struct uniphier_cache_data *data;
  216. list_for_each_entry(data, &uniphier_cache_list, list)
  217. __uniphier_cache_maint_range(data, start, end, operation);
  218. }
  219. static void uniphier_cache_maint_all(u32 operation)
  220. {
  221. struct uniphier_cache_data *data;
  222. list_for_each_entry(data, &uniphier_cache_list, list)
  223. __uniphier_cache_maint_all(data, operation);
  224. }
  225. static void uniphier_cache_inv_range(unsigned long start, unsigned long end)
  226. {
  227. uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV);
  228. }
  229. static void uniphier_cache_clean_range(unsigned long start, unsigned long end)
  230. {
  231. uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN);
  232. }
  233. static void uniphier_cache_flush_range(unsigned long start, unsigned long end)
  234. {
  235. uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH);
  236. }
  237. static void __init uniphier_cache_inv_all(void)
  238. {
  239. uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
  240. }
  241. static void uniphier_cache_flush_all(void)
  242. {
  243. uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
  244. }
  245. static void uniphier_cache_disable(void)
  246. {
  247. struct uniphier_cache_data *data;
  248. list_for_each_entry_reverse(data, &uniphier_cache_list, list)
  249. __uniphier_cache_enable(data, false);
  250. uniphier_cache_flush_all();
  251. }
  252. static void __init uniphier_cache_enable(void)
  253. {
  254. struct uniphier_cache_data *data;
  255. uniphier_cache_inv_all();
  256. list_for_each_entry(data, &uniphier_cache_list, list) {
  257. __uniphier_cache_enable(data, true);
  258. __uniphier_cache_set_active_ways(data);
  259. }
  260. }
  261. static void uniphier_cache_sync(void)
  262. {
  263. struct uniphier_cache_data *data;
  264. list_for_each_entry(data, &uniphier_cache_list, list)
  265. __uniphier_cache_sync(data);
  266. }
  267. static const struct of_device_id uniphier_cache_match[] __initconst = {
  268. { .compatible = "socionext,uniphier-system-cache" },
  269. { /* sentinel */ }
  270. };
  271. static int __init __uniphier_cache_init(struct device_node *np,
  272. unsigned int *cache_level)
  273. {
  274. struct uniphier_cache_data *data;
  275. u32 level, cache_size;
  276. struct device_node *next_np;
  277. int ret = 0;
  278. if (!of_match_node(uniphier_cache_match, np)) {
  279. pr_err("L%d: not compatible with uniphier cache\n",
  280. *cache_level);
  281. return -EINVAL;
  282. }
  283. if (of_property_read_u32(np, "cache-level", &level)) {
  284. pr_err("L%d: cache-level is not specified\n", *cache_level);
  285. return -EINVAL;
  286. }
  287. if (level != *cache_level) {
  288. pr_err("L%d: cache-level is unexpected value %d\n",
  289. *cache_level, level);
  290. return -EINVAL;
  291. }
  292. if (!of_property_read_bool(np, "cache-unified")) {
  293. pr_err("L%d: cache-unified is not specified\n", *cache_level);
  294. return -EINVAL;
  295. }
  296. data = kzalloc(sizeof(*data), GFP_KERNEL);
  297. if (!data)
  298. return -ENOMEM;
  299. if (of_property_read_u32(np, "cache-line-size", &data->line_size) ||
  300. !is_power_of_2(data->line_size)) {
  301. pr_err("L%d: cache-line-size is unspecified or invalid\n",
  302. *cache_level);
  303. ret = -EINVAL;
  304. goto err;
  305. }
  306. if (of_property_read_u32(np, "cache-sets", &data->nsets) ||
  307. !is_power_of_2(data->nsets)) {
  308. pr_err("L%d: cache-sets is unspecified or invalid\n",
  309. *cache_level);
  310. ret = -EINVAL;
  311. goto err;
  312. }
  313. if (of_property_read_u32(np, "cache-size", &cache_size) ||
  314. cache_size == 0 || cache_size % (data->nsets * data->line_size)) {
  315. pr_err("L%d: cache-size is unspecified or invalid\n",
  316. *cache_level);
  317. ret = -EINVAL;
  318. goto err;
  319. }
  320. data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1,
  321. 0);
  322. data->ctrl_base = of_iomap(np, 0);
  323. if (!data->ctrl_base) {
  324. pr_err("L%d: failed to map control register\n", *cache_level);
  325. ret = -ENOMEM;
  326. goto err;
  327. }
  328. data->rev_base = of_iomap(np, 1);
  329. if (!data->rev_base) {
  330. pr_err("L%d: failed to map revision register\n", *cache_level);
  331. ret = -ENOMEM;
  332. goto err;
  333. }
  334. data->op_base = of_iomap(np, 2);
  335. if (!data->op_base) {
  336. pr_err("L%d: failed to map operation register\n", *cache_level);
  337. ret = -ENOMEM;
  338. goto err;
  339. }
  340. data->way_ctrl_base = data->ctrl_base + 0xc00;
  341. if (*cache_level == 2) {
  342. u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
  343. /*
  344. * The size of range operation is limited to (1 << 22) or less
  345. * for PH-sLD8 or older SoCs.
  346. */
  347. if (revision <= 0x16)
  348. data->range_op_max_size = (u32)1 << 22;
  349. /*
  350. * Unfortunatly, the offset address of active way control base
  351. * varies from SoC to SoC.
  352. */
  353. switch (revision) {
  354. case 0x11: /* sLD3 */
  355. data->way_ctrl_base = data->ctrl_base + 0x870;
  356. break;
  357. case 0x12: /* LD4 */
  358. case 0x16: /* sld8 */
  359. data->way_ctrl_base = data->ctrl_base + 0x840;
  360. break;
  361. default:
  362. break;
  363. }
  364. }
  365. data->range_op_max_size -= data->line_size;
  366. INIT_LIST_HEAD(&data->list);
  367. list_add_tail(&data->list, &uniphier_cache_list); /* no mutex */
  368. /*
  369. * OK, this level has been successfully initialized. Look for the next
  370. * level cache. Do not roll back even if the initialization of the
  371. * next level cache fails because we want to continue with available
  372. * cache levels.
  373. */
  374. next_np = of_find_next_cache_node(np);
  375. if (next_np) {
  376. (*cache_level)++;
  377. ret = __uniphier_cache_init(next_np, cache_level);
  378. }
  379. of_node_put(next_np);
  380. return ret;
  381. err:
  382. iounmap(data->op_base);
  383. iounmap(data->rev_base);
  384. iounmap(data->ctrl_base);
  385. kfree(data);
  386. return ret;
  387. }
  388. int __init uniphier_cache_init(void)
  389. {
  390. struct device_node *np = NULL;
  391. unsigned int cache_level;
  392. int ret = 0;
  393. /* look for level 2 cache */
  394. while ((np = of_find_matching_node(np, uniphier_cache_match)))
  395. if (!of_property_read_u32(np, "cache-level", &cache_level) &&
  396. cache_level == 2)
  397. break;
  398. if (!np)
  399. return -ENODEV;
  400. ret = __uniphier_cache_init(np, &cache_level);
  401. of_node_put(np);
  402. if (ret) {
  403. /*
  404. * Error out iif L2 initialization fails. Continue with any
  405. * error on L3 or outer because they are optional.
  406. */
  407. if (cache_level == 2) {
  408. pr_err("failed to initialize L2 cache\n");
  409. return ret;
  410. }
  411. cache_level--;
  412. ret = 0;
  413. }
  414. outer_cache.inv_range = uniphier_cache_inv_range;
  415. outer_cache.clean_range = uniphier_cache_clean_range;
  416. outer_cache.flush_range = uniphier_cache_flush_range;
  417. outer_cache.flush_all = uniphier_cache_flush_all;
  418. outer_cache.disable = uniphier_cache_disable;
  419. outer_cache.sync = uniphier_cache_sync;
  420. uniphier_cache_enable();
  421. pr_info("enabled outer cache (cache level: %d)\n", cache_level);
  422. return ret;
  423. }