cplbmgr.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. /*
  2. * Blackfin CPLB exception handling for when MPU in on
  3. *
  4. * Copyright 2008-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <asm/blackfin.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/cplb.h>
  13. #include <asm/cplbinit.h>
  14. #include <asm/mmu_context.h>
  15. /*
  16. * WARNING
  17. *
  18. * This file is compiled with certain -ffixed-reg options. We have to
  19. * make sure not to call any functions here that could clobber these
  20. * registers.
  21. */
  22. int page_mask_nelts;
  23. int page_mask_order;
  24. unsigned long *current_rwx_mask[NR_CPUS];
  25. int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
  26. int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
  27. int nr_cplb_flush[NR_CPUS];
  28. #ifdef CONFIG_EXCPT_IRQ_SYSC_L1
  29. #define MGR_ATTR __attribute__((l1_text))
  30. #else
  31. #define MGR_ATTR
  32. #endif
  33. /*
  34. * Given the contents of the status register, return the index of the
  35. * CPLB that caused the fault.
  36. */
  37. static inline int faulting_cplb_index(int status)
  38. {
  39. int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
  40. return 30 - signbits;
  41. }
  42. /*
  43. * Given the contents of the status register and the DCPLB_DATA contents,
  44. * return true if a write access should be permitted.
  45. */
  46. static inline int write_permitted(int status, unsigned long data)
  47. {
  48. if (status & FAULT_USERSUPV)
  49. return !!(data & CPLB_SUPV_WR);
  50. else
  51. return !!(data & CPLB_USER_WR);
  52. }
  53. /* Counters to implement round-robin replacement. */
  54. static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
  55. /*
  56. * Find an ICPLB entry to be evicted and return its index.
  57. */
  58. MGR_ATTR static int evict_one_icplb(unsigned int cpu)
  59. {
  60. int i;
  61. for (i = first_switched_icplb; i < MAX_CPLBS; i++)
  62. if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  63. return i;
  64. i = first_switched_icplb + icplb_rr_index[cpu];
  65. if (i >= MAX_CPLBS) {
  66. i -= MAX_CPLBS - first_switched_icplb;
  67. icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
  68. }
  69. icplb_rr_index[cpu]++;
  70. return i;
  71. }
  72. MGR_ATTR static int evict_one_dcplb(unsigned int cpu)
  73. {
  74. int i;
  75. for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
  76. if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  77. return i;
  78. i = first_switched_dcplb + dcplb_rr_index[cpu];
  79. if (i >= MAX_CPLBS) {
  80. i -= MAX_CPLBS - first_switched_dcplb;
  81. dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
  82. }
  83. dcplb_rr_index[cpu]++;
  84. return i;
  85. }
  86. MGR_ATTR static noinline int dcplb_miss(unsigned int cpu)
  87. {
  88. unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
  89. int status = bfin_read_DCPLB_STATUS();
  90. unsigned long *mask;
  91. int idx;
  92. unsigned long d_data;
  93. nr_dcplb_miss[cpu]++;
  94. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  95. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  96. if (bfin_addr_dcacheable(addr)) {
  97. d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  98. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  99. d_data |= CPLB_L1_AOW | CPLB_WT;
  100. # endif
  101. }
  102. #endif
  103. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  104. addr = L2_START;
  105. d_data = L2_DMEMORY;
  106. } else if (addr >= physical_mem_end) {
  107. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
  108. #if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
  109. mask = current_rwx_mask[cpu];
  110. if (mask) {
  111. int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
  112. int idx = page >> 5;
  113. int bit = 1 << (page & 31);
  114. if (mask[idx] & bit)
  115. d_data |= CPLB_USER_RD;
  116. }
  117. #endif
  118. } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  119. && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
  120. addr &= ~(1 * 1024 * 1024 - 1);
  121. d_data &= ~PAGE_SIZE_4KB;
  122. d_data |= PAGE_SIZE_1MB;
  123. } else
  124. return CPLB_PROT_VIOL;
  125. } else if (addr >= _ramend) {
  126. d_data |= CPLB_USER_RD | CPLB_USER_WR;
  127. if (reserved_mem_dcache_on)
  128. d_data |= CPLB_L1_CHBL;
  129. } else {
  130. mask = current_rwx_mask[cpu];
  131. if (mask) {
  132. int page = addr >> PAGE_SHIFT;
  133. int idx = page >> 5;
  134. int bit = 1 << (page & 31);
  135. if (mask[idx] & bit)
  136. d_data |= CPLB_USER_RD;
  137. mask += page_mask_nelts;
  138. if (mask[idx] & bit)
  139. d_data |= CPLB_USER_WR;
  140. }
  141. }
  142. idx = evict_one_dcplb(cpu);
  143. addr &= PAGE_MASK;
  144. dcplb_tbl[cpu][idx].addr = addr;
  145. dcplb_tbl[cpu][idx].data = d_data;
  146. _disable_dcplb();
  147. bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
  148. bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
  149. _enable_dcplb();
  150. return 0;
  151. }
  152. MGR_ATTR static noinline int icplb_miss(unsigned int cpu)
  153. {
  154. unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
  155. int status = bfin_read_ICPLB_STATUS();
  156. int idx;
  157. unsigned long i_data;
  158. nr_icplb_miss[cpu]++;
  159. /* If inside the uncached DMA region, fault. */
  160. if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
  161. return CPLB_PROT_VIOL;
  162. if (status & FAULT_USERSUPV)
  163. nr_icplb_supv_miss[cpu]++;
  164. /*
  165. * First, try to find a CPLB that matches this address. If we
  166. * find one, then the fact that we're in the miss handler means
  167. * that the instruction crosses a page boundary.
  168. */
  169. for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
  170. if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
  171. unsigned long this_addr = icplb_tbl[cpu][idx].addr;
  172. if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
  173. addr += PAGE_SIZE;
  174. break;
  175. }
  176. }
  177. }
  178. i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
  179. #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
  180. /*
  181. * Normal RAM, and possibly the reserved memory area, are
  182. * cacheable.
  183. */
  184. if (addr < _ramend ||
  185. (addr < physical_mem_end && reserved_mem_icache_on))
  186. i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  187. #endif
  188. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  189. addr = L2_START;
  190. i_data = L2_IMEMORY;
  191. } else if (addr >= physical_mem_end) {
  192. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
  193. if (!(status & FAULT_USERSUPV)) {
  194. unsigned long *mask = current_rwx_mask[cpu];
  195. if (mask) {
  196. int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
  197. int idx = page >> 5;
  198. int bit = 1 << (page & 31);
  199. mask += 2 * page_mask_nelts;
  200. if (mask[idx] & bit)
  201. i_data |= CPLB_USER_RD;
  202. }
  203. }
  204. } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  205. && (status & FAULT_USERSUPV)) {
  206. addr &= ~(1 * 1024 * 1024 - 1);
  207. i_data &= ~PAGE_SIZE_4KB;
  208. i_data |= PAGE_SIZE_1MB;
  209. } else
  210. return CPLB_PROT_VIOL;
  211. } else if (addr >= _ramend) {
  212. i_data |= CPLB_USER_RD;
  213. if (reserved_mem_icache_on)
  214. i_data |= CPLB_L1_CHBL;
  215. } else {
  216. /*
  217. * Two cases to distinguish - a supervisor access must
  218. * necessarily be for a module page; we grant it
  219. * unconditionally (could do better here in the future).
  220. * Otherwise, check the x bitmap of the current process.
  221. */
  222. if (!(status & FAULT_USERSUPV)) {
  223. unsigned long *mask = current_rwx_mask[cpu];
  224. if (mask) {
  225. int page = addr >> PAGE_SHIFT;
  226. int idx = page >> 5;
  227. int bit = 1 << (page & 31);
  228. mask += 2 * page_mask_nelts;
  229. if (mask[idx] & bit)
  230. i_data |= CPLB_USER_RD;
  231. }
  232. }
  233. }
  234. idx = evict_one_icplb(cpu);
  235. addr &= PAGE_MASK;
  236. icplb_tbl[cpu][idx].addr = addr;
  237. icplb_tbl[cpu][idx].data = i_data;
  238. _disable_icplb();
  239. bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
  240. bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
  241. _enable_icplb();
  242. return 0;
  243. }
  244. MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu)
  245. {
  246. int status = bfin_read_DCPLB_STATUS();
  247. nr_dcplb_prot[cpu]++;
  248. if (status & FAULT_RW) {
  249. int idx = faulting_cplb_index(status);
  250. unsigned long data = dcplb_tbl[cpu][idx].data;
  251. if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
  252. write_permitted(status, data)) {
  253. data |= CPLB_DIRTY;
  254. dcplb_tbl[cpu][idx].data = data;
  255. bfin_write32(DCPLB_DATA0 + idx * 4, data);
  256. return 0;
  257. }
  258. }
  259. return CPLB_PROT_VIOL;
  260. }
  261. MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
  262. {
  263. int cause = seqstat & 0x3f;
  264. unsigned int cpu = raw_smp_processor_id();
  265. switch (cause) {
  266. case 0x23:
  267. return dcplb_protection_fault(cpu);
  268. case 0x2C:
  269. return icplb_miss(cpu);
  270. case 0x26:
  271. return dcplb_miss(cpu);
  272. default:
  273. return 1;
  274. }
  275. }
  276. void flush_switched_cplbs(unsigned int cpu)
  277. {
  278. int i;
  279. unsigned long flags;
  280. nr_cplb_flush[cpu]++;
  281. flags = hard_local_irq_save();
  282. _disable_icplb();
  283. for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
  284. icplb_tbl[cpu][i].data = 0;
  285. bfin_write32(ICPLB_DATA0 + i * 4, 0);
  286. }
  287. _enable_icplb();
  288. _disable_dcplb();
  289. for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
  290. dcplb_tbl[cpu][i].data = 0;
  291. bfin_write32(DCPLB_DATA0 + i * 4, 0);
  292. }
  293. _enable_dcplb();
  294. hard_local_irq_restore(flags);
  295. }
  296. void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
  297. {
  298. int i;
  299. unsigned long addr = (unsigned long)masks;
  300. unsigned long d_data;
  301. unsigned long flags;
  302. if (!masks) {
  303. current_rwx_mask[cpu] = masks;
  304. return;
  305. }
  306. flags = hard_local_irq_save();
  307. current_rwx_mask[cpu] = masks;
  308. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  309. addr = L2_START;
  310. d_data = L2_DMEMORY;
  311. } else {
  312. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  313. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  314. d_data |= CPLB_L1_CHBL;
  315. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  316. d_data |= CPLB_L1_AOW | CPLB_WT;
  317. # endif
  318. #endif
  319. }
  320. _disable_dcplb();
  321. for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
  322. dcplb_tbl[cpu][i].addr = addr;
  323. dcplb_tbl[cpu][i].data = d_data;
  324. bfin_write32(DCPLB_DATA0 + i * 4, d_data);
  325. bfin_write32(DCPLB_ADDR0 + i * 4, addr);
  326. addr += PAGE_SIZE;
  327. }
  328. _enable_dcplb();
  329. hard_local_irq_restore(flags);
  330. }