sc-mips.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*
  2. * Copyright (C) 2006 Chris Dearman (chris@mips.com),
  3. */
  4. #include <linux/init.h>
  5. #include <linux/kernel.h>
  6. #include <linux/sched.h>
  7. #include <linux/mm.h>
  8. #include <asm/cpu-type.h>
  9. #include <asm/mipsregs.h>
  10. #include <asm/bcache.h>
  11. #include <asm/cacheops.h>
  12. #include <asm/page.h>
  13. #include <asm/pgtable.h>
  14. #include <asm/mmu_context.h>
  15. #include <asm/r4kcache.h>
  16. #include <asm/mips-cm.h>
  17. /*
  18. * MIPS32/MIPS64 L2 cache handling
  19. */
  20. /*
  21. * Writeback and invalidate the secondary cache before DMA.
  22. */
  23. static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
  24. {
  25. blast_scache_range(addr, addr + size);
  26. }
  27. /*
  28. * Invalidate the secondary cache before DMA.
  29. */
  30. static void mips_sc_inv(unsigned long addr, unsigned long size)
  31. {
  32. unsigned long lsize = cpu_scache_line_size();
  33. unsigned long almask = ~(lsize - 1);
  34. cache_op(Hit_Writeback_Inv_SD, addr & almask);
  35. cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
  36. blast_inv_scache_range(addr, addr + size);
  37. }
  38. static void mips_sc_enable(void)
  39. {
  40. /* L2 cache is permanently enabled */
  41. }
  42. static void mips_sc_disable(void)
  43. {
  44. /* L2 cache is permanently enabled */
  45. }
  46. static void mips_sc_prefetch_enable(void)
  47. {
  48. unsigned long pftctl;
  49. if (mips_cm_revision() < CM_REV_CM2_5)
  50. return;
  51. /*
  52. * If there is one or more L2 prefetch unit present then enable
  53. * prefetching for both code & data, for all ports.
  54. */
  55. pftctl = read_gcr_l2_pft_control();
  56. if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK) {
  57. pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK;
  58. pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK;
  59. pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN_MSK;
  60. write_gcr_l2_pft_control(pftctl);
  61. pftctl = read_gcr_l2_pft_control_b();
  62. pftctl |= CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK;
  63. pftctl |= CM_GCR_L2_PFT_CONTROL_B_CEN_MSK;
  64. write_gcr_l2_pft_control_b(pftctl);
  65. }
  66. }
  67. static void mips_sc_prefetch_disable(void)
  68. {
  69. unsigned long pftctl;
  70. if (mips_cm_revision() < CM_REV_CM2_5)
  71. return;
  72. pftctl = read_gcr_l2_pft_control();
  73. pftctl &= ~CM_GCR_L2_PFT_CONTROL_PFTEN_MSK;
  74. write_gcr_l2_pft_control(pftctl);
  75. pftctl = read_gcr_l2_pft_control_b();
  76. pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK;
  77. pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_CEN_MSK;
  78. write_gcr_l2_pft_control_b(pftctl);
  79. }
  80. static bool mips_sc_prefetch_is_enabled(void)
  81. {
  82. unsigned long pftctl;
  83. if (mips_cm_revision() < CM_REV_CM2_5)
  84. return false;
  85. pftctl = read_gcr_l2_pft_control();
  86. if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK))
  87. return false;
  88. return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN_MSK);
  89. }
  90. static struct bcache_ops mips_sc_ops = {
  91. .bc_enable = mips_sc_enable,
  92. .bc_disable = mips_sc_disable,
  93. .bc_wback_inv = mips_sc_wback_inv,
  94. .bc_inv = mips_sc_inv,
  95. .bc_prefetch_enable = mips_sc_prefetch_enable,
  96. .bc_prefetch_disable = mips_sc_prefetch_disable,
  97. .bc_prefetch_is_enabled = mips_sc_prefetch_is_enabled,
  98. };
  99. /*
  100. * Check if the L2 cache controller is activated on a particular platform.
  101. * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
  102. * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the
  103. * cache being disabled. However there is no guarantee for this to be
  104. * true on all platforms. In an act of stupidity the spec defined bits
  105. * 12..15 as implementation defined so below function will eventually have
  106. * to be replaced by a platform specific probe.
  107. */
  108. static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
  109. {
  110. unsigned int config2 = read_c0_config2();
  111. unsigned int tmp;
  112. /* Check the bypass bit (L2B) */
  113. switch (current_cpu_type()) {
  114. case CPU_34K:
  115. case CPU_74K:
  116. case CPU_1004K:
  117. case CPU_1074K:
  118. case CPU_INTERAPTIV:
  119. case CPU_PROAPTIV:
  120. case CPU_P5600:
  121. case CPU_BMIPS5000:
  122. case CPU_QEMU_GENERIC:
  123. case CPU_P6600:
  124. if (config2 & (1 << 12))
  125. return 0;
  126. }
  127. tmp = (config2 >> 4) & 0x0f;
  128. if (0 < tmp && tmp <= 7)
  129. c->scache.linesz = 2 << tmp;
  130. else
  131. return 0;
  132. return 1;
  133. }
  134. static int __init mips_sc_probe_cm3(void)
  135. {
  136. struct cpuinfo_mips *c = &current_cpu_data;
  137. unsigned long cfg = read_gcr_l2_config();
  138. unsigned long sets, line_sz, assoc;
  139. if (cfg & CM_GCR_L2_CONFIG_BYPASS_MSK)
  140. return 0;
  141. sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
  142. sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
  143. if (sets)
  144. c->scache.sets = 64 << sets;
  145. line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
  146. line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
  147. if (line_sz)
  148. c->scache.linesz = 2 << line_sz;
  149. assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
  150. assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
  151. c->scache.ways = assoc + 1;
  152. c->scache.waysize = c->scache.sets * c->scache.linesz;
  153. c->scache.waybit = __ffs(c->scache.waysize);
  154. if (c->scache.linesz) {
  155. c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
  156. return 1;
  157. }
  158. return 0;
  159. }
  160. static inline int __init mips_sc_probe(void)
  161. {
  162. struct cpuinfo_mips *c = &current_cpu_data;
  163. unsigned int config1, config2;
  164. unsigned int tmp;
  165. /* Mark as not present until probe completed */
  166. c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
  167. if (mips_cm_revision() >= CM_REV_CM3)
  168. return mips_sc_probe_cm3();
  169. /* Ignore anything but MIPSxx processors */
  170. if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
  171. MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
  172. MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)))
  173. return 0;
  174. /* Does this MIPS32/MIPS64 CPU have a config2 register? */
  175. config1 = read_c0_config1();
  176. if (!(config1 & MIPS_CONF_M))
  177. return 0;
  178. config2 = read_c0_config2();
  179. if (!mips_sc_is_activated(c))
  180. return 0;
  181. tmp = (config2 >> 8) & 0x0f;
  182. if (tmp <= 7)
  183. c->scache.sets = 64 << tmp;
  184. else
  185. return 0;
  186. tmp = (config2 >> 0) & 0x0f;
  187. if (tmp <= 7)
  188. c->scache.ways = tmp + 1;
  189. else
  190. return 0;
  191. c->scache.waysize = c->scache.sets * c->scache.linesz;
  192. c->scache.waybit = __ffs(c->scache.waysize);
  193. c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
  194. return 1;
  195. }
  196. int mips_sc_init(void)
  197. {
  198. int found = mips_sc_probe();
  199. if (found) {
  200. mips_sc_enable();
  201. mips_sc_prefetch_enable();
  202. bcops = &mips_sc_ops;
  203. }
  204. return found;
  205. }