pmsa-v7.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /*
  2. * Based on linux/arch/arm/mm/nommu.c
  3. *
  4. * ARM PMSAv7 supporting functions.
  5. */
  6. #include <linux/bitops.h>
  7. #include <linux/memblock.h>
  8. #include <linux/string.h>
  9. #include <asm/cacheflush.h>
  10. #include <asm/cp15.h>
  11. #include <asm/cputype.h>
  12. #include <asm/mpu.h>
  13. #include <asm/sections.h>
  14. #include "mm.h"
  15. struct region {
  16. phys_addr_t base;
  17. phys_addr_t size;
  18. unsigned long subreg;
  19. };
  20. static struct region __initdata mem[MPU_MAX_REGIONS];
  21. #ifdef CONFIG_XIP_KERNEL
  22. static struct region __initdata xip[MPU_MAX_REGIONS];
  23. #endif
  24. static unsigned int __initdata mpu_min_region_order;
  25. static unsigned int __initdata mpu_max_regions;
  26. static int __init __mpu_min_region_order(void);
  27. static int __init __mpu_max_regions(void);
  28. #ifndef CONFIG_CPU_V7M
  29. #define DRBAR __ACCESS_CP15(c6, 0, c1, 0)
  30. #define IRBAR __ACCESS_CP15(c6, 0, c1, 1)
  31. #define DRSR __ACCESS_CP15(c6, 0, c1, 2)
  32. #define IRSR __ACCESS_CP15(c6, 0, c1, 3)
  33. #define DRACR __ACCESS_CP15(c6, 0, c1, 4)
  34. #define IRACR __ACCESS_CP15(c6, 0, c1, 5)
  35. #define RNGNR __ACCESS_CP15(c6, 0, c2, 0)
  36. /* Region number */
  37. static inline void rgnr_write(u32 v)
  38. {
  39. write_sysreg(v, RNGNR);
  40. }
  41. /* Data-side / unified region attributes */
  42. /* Region access control register */
  43. static inline void dracr_write(u32 v)
  44. {
  45. write_sysreg(v, DRACR);
  46. }
  47. /* Region size register */
  48. static inline void drsr_write(u32 v)
  49. {
  50. write_sysreg(v, DRSR);
  51. }
  52. /* Region base address register */
  53. static inline void drbar_write(u32 v)
  54. {
  55. write_sysreg(v, DRBAR);
  56. }
  57. static inline u32 drbar_read(void)
  58. {
  59. return read_sysreg(DRBAR);
  60. }
  61. /* Optional instruction-side region attributes */
  62. /* I-side Region access control register */
  63. static inline void iracr_write(u32 v)
  64. {
  65. write_sysreg(v, IRACR);
  66. }
  67. /* I-side Region size register */
  68. static inline void irsr_write(u32 v)
  69. {
  70. write_sysreg(v, IRSR);
  71. }
  72. /* I-side Region base address register */
  73. static inline void irbar_write(u32 v)
  74. {
  75. write_sysreg(v, IRBAR);
  76. }
  77. static inline u32 irbar_read(void)
  78. {
  79. return read_sysreg(IRBAR);
  80. }
  81. #else
  82. static inline void rgnr_write(u32 v)
  83. {
  84. writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RNR);
  85. }
  86. /* Data-side / unified region attributes */
  87. /* Region access control register */
  88. static inline void dracr_write(u32 v)
  89. {
  90. u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(15, 0);
  91. writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + PMSAv7_RASR);
  92. }
  93. /* Region size register */
  94. static inline void drsr_write(u32 v)
  95. {
  96. u32 racr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(31, 16);
  97. writel_relaxed(v | racr, BASEADDR_V7M_SCB + PMSAv7_RASR);
  98. }
  99. /* Region base address register */
  100. static inline void drbar_write(u32 v)
  101. {
  102. writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RBAR);
  103. }
  104. static inline u32 drbar_read(void)
  105. {
  106. return readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RBAR);
  107. }
  108. /* ARMv7-M only supports a unified MPU, so I-side operations are nop */
  109. static inline void iracr_write(u32 v) {}
  110. static inline void irsr_write(u32 v) {}
  111. static inline void irbar_write(u32 v) {}
  112. static inline unsigned long irbar_read(void) {return 0;}
  113. #endif
  114. static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region)
  115. {
  116. unsigned long subreg, bslots, sslots;
  117. phys_addr_t abase = base & ~(size - 1);
  118. phys_addr_t asize = base + size - abase;
  119. phys_addr_t p2size = 1 << __fls(asize);
  120. phys_addr_t bdiff, sdiff;
  121. if (p2size != asize)
  122. p2size *= 2;
  123. bdiff = base - abase;
  124. sdiff = p2size - asize;
  125. subreg = p2size / PMSAv7_NR_SUBREGS;
  126. if ((bdiff % subreg) || (sdiff % subreg))
  127. return false;
  128. bslots = bdiff / subreg;
  129. sslots = sdiff / subreg;
  130. if (bslots || sslots) {
  131. int i;
  132. if (subreg < PMSAv7_MIN_SUBREG_SIZE)
  133. return false;
  134. if (bslots + sslots > PMSAv7_NR_SUBREGS)
  135. return false;
  136. for (i = 0; i < bslots; i++)
  137. _set_bit(i, &region->subreg);
  138. for (i = 1; i <= sslots; i++)
  139. _set_bit(PMSAv7_NR_SUBREGS - i, &region->subreg);
  140. }
  141. region->base = abase;
  142. region->size = p2size;
  143. return true;
  144. }
  145. static int __init allocate_region(phys_addr_t base, phys_addr_t size,
  146. unsigned int limit, struct region *regions)
  147. {
  148. int count = 0;
  149. phys_addr_t diff = size;
  150. int attempts = MPU_MAX_REGIONS;
  151. while (diff) {
  152. /* Try cover region as is (maybe with help of subregions) */
  153. if (try_split_region(base, size, &regions[count])) {
  154. count++;
  155. base += size;
  156. diff -= size;
  157. size = diff;
  158. } else {
  159. /*
  160. * Maximum aligned region might overflow phys_addr_t
  161. * if "base" is 0. Hence we keep everything below 4G
  162. * until we take the smaller of the aligned region
  163. * size ("asize") and rounded region size ("p2size"),
  164. * one of which is guaranteed to be smaller than the
  165. * maximum physical address.
  166. */
  167. phys_addr_t asize = (base - 1) ^ base;
  168. phys_addr_t p2size = (1 << __fls(diff)) - 1;
  169. size = asize < p2size ? asize + 1 : p2size + 1;
  170. }
  171. if (count > limit)
  172. break;
  173. if (!attempts)
  174. break;
  175. attempts--;
  176. }
  177. return count;
  178. }
  179. /* MPU initialisation functions */
  180. void __init pmsav7_adjust_lowmem_bounds(void)
  181. {
  182. phys_addr_t specified_mem_size = 0, total_mem_size = 0;
  183. struct memblock_region *reg;
  184. bool first = true;
  185. phys_addr_t mem_start;
  186. phys_addr_t mem_end;
  187. unsigned int mem_max_regions;
  188. int num, i;
  189. /* Free-up PMSAv7_PROBE_REGION */
  190. mpu_min_region_order = __mpu_min_region_order();
  191. /* How many regions are supported */
  192. mpu_max_regions = __mpu_max_regions();
  193. mem_max_regions = min((unsigned int)MPU_MAX_REGIONS, mpu_max_regions);
  194. /* We need to keep one slot for background region */
  195. mem_max_regions--;
  196. #ifndef CONFIG_CPU_V7M
  197. /* ... and one for vectors */
  198. mem_max_regions--;
  199. #endif
  200. #ifdef CONFIG_XIP_KERNEL
  201. /* plus some regions to cover XIP ROM */
  202. num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR,
  203. mem_max_regions, xip);
  204. mem_max_regions -= num;
  205. #endif
  206. for_each_memblock(memory, reg) {
  207. if (first) {
  208. phys_addr_t phys_offset = PHYS_OFFSET;
  209. /*
  210. * Initially only use memory continuous from
  211. * PHYS_OFFSET */
  212. if (reg->base != phys_offset)
  213. panic("First memory bank must be contiguous from PHYS_OFFSET");
  214. mem_start = reg->base;
  215. mem_end = reg->base + reg->size;
  216. specified_mem_size = reg->size;
  217. first = false;
  218. } else {
  219. /*
  220. * memblock auto merges contiguous blocks, remove
  221. * all blocks afterwards in one go (we can't remove
  222. * blocks separately while iterating)
  223. */
  224. pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
  225. &mem_end, &reg->base);
  226. memblock_remove(reg->base, 0 - reg->base);
  227. break;
  228. }
  229. }
  230. memset(mem, 0, sizeof(mem));
  231. num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
  232. for (i = 0; i < num; i++) {
  233. unsigned long subreg = mem[i].size / PMSAv7_NR_SUBREGS;
  234. total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg);
  235. pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n",
  236. &mem[i].base, &mem[i].size, PMSAv7_NR_SUBREGS, &mem[i].subreg);
  237. }
  238. if (total_mem_size != specified_mem_size) {
  239. pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
  240. &specified_mem_size, &total_mem_size);
  241. memblock_remove(mem_start + total_mem_size,
  242. specified_mem_size - total_mem_size);
  243. }
  244. }
  245. static int __init __mpu_max_regions(void)
  246. {
  247. /*
  248. * We don't support a different number of I/D side regions so if we
  249. * have separate instruction and data memory maps then return
  250. * whichever side has a smaller number of supported regions.
  251. */
  252. u32 dregions, iregions, mpuir;
  253. mpuir = read_cpuid_mputype();
  254. dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
  255. /* Check for separate d-side and i-side memory maps */
  256. if (mpuir & MPUIR_nU)
  257. iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION;
  258. /* Use the smallest of the two maxima */
  259. return min(dregions, iregions);
  260. }
  261. static int __init mpu_iside_independent(void)
  262. {
  263. /* MPUIR.nU specifies whether there is *not* a unified memory map */
  264. return read_cpuid_mputype() & MPUIR_nU;
  265. }
  266. static int __init __mpu_min_region_order(void)
  267. {
  268. u32 drbar_result, irbar_result;
  269. /* We've kept a region free for this probing */
  270. rgnr_write(PMSAv7_PROBE_REGION);
  271. isb();
  272. /*
  273. * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
  274. * region order
  275. */
  276. drbar_write(0xFFFFFFFC);
  277. drbar_result = irbar_result = drbar_read();
  278. drbar_write(0x0);
  279. /* If the MPU is non-unified, we use the larger of the two minima*/
  280. if (mpu_iside_independent()) {
  281. irbar_write(0xFFFFFFFC);
  282. irbar_result = irbar_read();
  283. irbar_write(0x0);
  284. }
  285. isb(); /* Ensure that MPU region operations have completed */
  286. /* Return whichever result is larger */
  287. return __ffs(max(drbar_result, irbar_result));
  288. }
  289. static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
  290. unsigned int size_order, unsigned int properties,
  291. unsigned int subregions, bool need_flush)
  292. {
  293. u32 size_data;
  294. /* We kept a region free for probing resolution of MPU regions*/
  295. if (number > mpu_max_regions
  296. || number >= MPU_MAX_REGIONS)
  297. return -ENOENT;
  298. if (size_order > 32)
  299. return -ENOMEM;
  300. if (size_order < mpu_min_region_order)
  301. return -ENOMEM;
  302. /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
  303. size_data = ((size_order - 1) << PMSAv7_RSR_SZ) | 1 << PMSAv7_RSR_EN;
  304. size_data |= subregions << PMSAv7_RSR_SD;
  305. if (need_flush)
  306. flush_cache_all();
  307. dsb(); /* Ensure all previous data accesses occur with old mappings */
  308. rgnr_write(number);
  309. isb();
  310. drbar_write(start);
  311. dracr_write(properties);
  312. isb(); /* Propagate properties before enabling region */
  313. drsr_write(size_data);
  314. /* Check for independent I-side registers */
  315. if (mpu_iside_independent()) {
  316. irbar_write(start);
  317. iracr_write(properties);
  318. isb();
  319. irsr_write(size_data);
  320. }
  321. isb();
  322. /* Store region info (we treat i/d side the same, so only store d) */
  323. mpu_rgn_info.rgns[number].dracr = properties;
  324. mpu_rgn_info.rgns[number].drbar = start;
  325. mpu_rgn_info.rgns[number].drsr = size_data;
  326. mpu_rgn_info.used++;
  327. return 0;
  328. }
  329. /*
  330. * Set up default MPU regions, doing nothing if there is no MPU
  331. */
  332. void __init pmsav7_setup(void)
  333. {
  334. int i, region = 0, err = 0;
  335. /* Setup MPU (order is important) */
  336. /* Background */
  337. err |= mpu_setup_region(region++, 0, 32,
  338. PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0RW,
  339. 0, false);
  340. #ifdef CONFIG_XIP_KERNEL
  341. /* ROM */
  342. for (i = 0; i < ARRAY_SIZE(xip); i++) {
  343. /*
  344. * In case we overwrite RAM region we set earlier in
  345. * head-nommu.S (which is cachable) all subsequent
  346. * data access till we setup RAM bellow would be done
  347. * with BG region (which is uncachable), thus we need
  348. * to clean and invalidate cache.
  349. */
  350. bool need_flush = region == PMSAv7_RAM_REGION;
  351. if (!xip[i].size)
  352. continue;
  353. err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
  354. PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL,
  355. xip[i].subreg, need_flush);
  356. }
  357. #endif
  358. /* RAM */
  359. for (i = 0; i < ARRAY_SIZE(mem); i++) {
  360. if (!mem[i].size)
  361. continue;
  362. err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
  363. PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL,
  364. mem[i].subreg, false);
  365. }
  366. /* Vectors */
  367. #ifndef CONFIG_CPU_V7M
  368. err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
  369. PMSAv7_AP_PL1RW_PL0NA | PMSAv7_RGN_NORMAL,
  370. 0, false);
  371. #endif
  372. if (err) {
  373. panic("MPU region initialization failure! %d", err);
  374. } else {
  375. pr_info("Using ARMv7 PMSA Compliant MPU. "
  376. "Region independence: %s, Used %d of %d regions\n",
  377. mpu_iside_independent() ? "Yes" : "No",
  378. mpu_rgn_info.used, mpu_max_regions);
  379. }
  380. }