cplbmgr.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. /*
  2. * Blackfin CPLB exception handling for when MPU in on
  3. *
  4. * Copyright 2008-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <asm/blackfin.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/cplb.h>
  13. #include <asm/cplbinit.h>
  14. #include <asm/mmu_context.h>
  15. /*
  16. * WARNING
  17. *
  18. * This file is compiled with certain -ffixed-reg options. We have to
  19. * make sure not to call any functions here that could clobber these
  20. * registers.
  21. */
  22. int page_mask_nelts;
  23. int page_mask_order;
  24. unsigned long *current_rwx_mask[NR_CPUS];
  25. int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
  26. int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
  27. int nr_cplb_flush[NR_CPUS];
  28. #ifdef CONFIG_EXCPT_IRQ_SYSC_L1
  29. #define MGR_ATTR __attribute__((l1_text))
  30. #else
  31. #define MGR_ATTR
  32. #endif
  33. /*
  34. * Given the contents of the status register, return the index of the
  35. * CPLB that caused the fault.
  36. */
  37. static inline int faulting_cplb_index(int status)
  38. {
  39. int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
  40. return 30 - signbits;
  41. }
  42. /*
  43. * Given the contents of the status register and the DCPLB_DATA contents,
  44. * return true if a write access should be permitted.
  45. */
  46. static inline int write_permitted(int status, unsigned long data)
  47. {
  48. if (status & FAULT_USERSUPV)
  49. return !!(data & CPLB_SUPV_WR);
  50. else
  51. return !!(data & CPLB_USER_WR);
  52. }
  53. /* Counters to implement round-robin replacement. */
  54. static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
  55. /*
  56. * Find an ICPLB entry to be evicted and return its index.
  57. */
  58. MGR_ATTR static int evict_one_icplb(unsigned int cpu)
  59. {
  60. int i;
  61. for (i = first_switched_icplb; i < MAX_CPLBS; i++)
  62. if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  63. return i;
  64. i = first_switched_icplb + icplb_rr_index[cpu];
  65. if (i >= MAX_CPLBS) {
  66. i -= MAX_CPLBS - first_switched_icplb;
  67. icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
  68. }
  69. icplb_rr_index[cpu]++;
  70. return i;
  71. }
  72. MGR_ATTR static int evict_one_dcplb(unsigned int cpu)
  73. {
  74. int i;
  75. for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
  76. if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
  77. return i;
  78. i = first_switched_dcplb + dcplb_rr_index[cpu];
  79. if (i >= MAX_CPLBS) {
  80. i -= MAX_CPLBS - first_switched_dcplb;
  81. dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
  82. }
  83. dcplb_rr_index[cpu]++;
  84. return i;
  85. }
  86. MGR_ATTR static noinline int dcplb_miss(unsigned int cpu)
  87. {
  88. unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
  89. int status = bfin_read_DCPLB_STATUS();
  90. unsigned long *mask;
  91. int idx;
  92. unsigned long d_data;
  93. nr_dcplb_miss[cpu]++;
  94. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  95. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  96. if (bfin_addr_dcacheable(addr)) {
  97. d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  98. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  99. d_data |= CPLB_L1_AOW | CPLB_WT;
  100. # endif
  101. }
  102. #endif
  103. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  104. addr = L2_START;
  105. d_data = L2_DMEMORY;
  106. } else if (addr >= physical_mem_end) {
  107. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
  108. mask = current_rwx_mask[cpu];
  109. if (mask) {
  110. int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
  111. int idx = page >> 5;
  112. int bit = 1 << (page & 31);
  113. if (mask[idx] & bit)
  114. d_data |= CPLB_USER_RD;
  115. }
  116. } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  117. && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
  118. addr &= ~(1 * 1024 * 1024 - 1);
  119. d_data &= ~PAGE_SIZE_4KB;
  120. d_data |= PAGE_SIZE_1MB;
  121. } else
  122. return CPLB_PROT_VIOL;
  123. } else if (addr >= _ramend) {
  124. d_data |= CPLB_USER_RD | CPLB_USER_WR;
  125. if (reserved_mem_dcache_on)
  126. d_data |= CPLB_L1_CHBL;
  127. } else {
  128. mask = current_rwx_mask[cpu];
  129. if (mask) {
  130. int page = addr >> PAGE_SHIFT;
  131. int idx = page >> 5;
  132. int bit = 1 << (page & 31);
  133. if (mask[idx] & bit)
  134. d_data |= CPLB_USER_RD;
  135. mask += page_mask_nelts;
  136. if (mask[idx] & bit)
  137. d_data |= CPLB_USER_WR;
  138. }
  139. }
  140. idx = evict_one_dcplb(cpu);
  141. addr &= PAGE_MASK;
  142. dcplb_tbl[cpu][idx].addr = addr;
  143. dcplb_tbl[cpu][idx].data = d_data;
  144. _disable_dcplb();
  145. bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
  146. bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
  147. _enable_dcplb();
  148. return 0;
  149. }
  150. MGR_ATTR static noinline int icplb_miss(unsigned int cpu)
  151. {
  152. unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
  153. int status = bfin_read_ICPLB_STATUS();
  154. int idx;
  155. unsigned long i_data;
  156. nr_icplb_miss[cpu]++;
  157. /* If inside the uncached DMA region, fault. */
  158. if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
  159. return CPLB_PROT_VIOL;
  160. if (status & FAULT_USERSUPV)
  161. nr_icplb_supv_miss[cpu]++;
  162. /*
  163. * First, try to find a CPLB that matches this address. If we
  164. * find one, then the fact that we're in the miss handler means
  165. * that the instruction crosses a page boundary.
  166. */
  167. for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
  168. if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
  169. unsigned long this_addr = icplb_tbl[cpu][idx].addr;
  170. if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
  171. addr += PAGE_SIZE;
  172. break;
  173. }
  174. }
  175. }
  176. i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
  177. #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
  178. /*
  179. * Normal RAM, and possibly the reserved memory area, are
  180. * cacheable.
  181. */
  182. if (addr < _ramend ||
  183. (addr < physical_mem_end && reserved_mem_icache_on))
  184. i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
  185. #endif
  186. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  187. addr = L2_START;
  188. i_data = L2_IMEMORY;
  189. } else if (addr >= physical_mem_end) {
  190. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
  191. if (!(status & FAULT_USERSUPV)) {
  192. unsigned long *mask = current_rwx_mask[cpu];
  193. if (mask) {
  194. int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
  195. int idx = page >> 5;
  196. int bit = 1 << (page & 31);
  197. mask += 2 * page_mask_nelts;
  198. if (mask[idx] & bit)
  199. i_data |= CPLB_USER_RD;
  200. }
  201. }
  202. } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
  203. && (status & FAULT_USERSUPV)) {
  204. addr &= ~(1 * 1024 * 1024 - 1);
  205. i_data &= ~PAGE_SIZE_4KB;
  206. i_data |= PAGE_SIZE_1MB;
  207. } else
  208. return CPLB_PROT_VIOL;
  209. } else if (addr >= _ramend) {
  210. i_data |= CPLB_USER_RD;
  211. if (reserved_mem_icache_on)
  212. i_data |= CPLB_L1_CHBL;
  213. } else {
  214. /*
  215. * Two cases to distinguish - a supervisor access must
  216. * necessarily be for a module page; we grant it
  217. * unconditionally (could do better here in the future).
  218. * Otherwise, check the x bitmap of the current process.
  219. */
  220. if (!(status & FAULT_USERSUPV)) {
  221. unsigned long *mask = current_rwx_mask[cpu];
  222. if (mask) {
  223. int page = addr >> PAGE_SHIFT;
  224. int idx = page >> 5;
  225. int bit = 1 << (page & 31);
  226. mask += 2 * page_mask_nelts;
  227. if (mask[idx] & bit)
  228. i_data |= CPLB_USER_RD;
  229. }
  230. }
  231. }
  232. idx = evict_one_icplb(cpu);
  233. addr &= PAGE_MASK;
  234. icplb_tbl[cpu][idx].addr = addr;
  235. icplb_tbl[cpu][idx].data = i_data;
  236. _disable_icplb();
  237. bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
  238. bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
  239. _enable_icplb();
  240. return 0;
  241. }
  242. MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu)
  243. {
  244. int status = bfin_read_DCPLB_STATUS();
  245. nr_dcplb_prot[cpu]++;
  246. if (status & FAULT_RW) {
  247. int idx = faulting_cplb_index(status);
  248. unsigned long data = dcplb_tbl[cpu][idx].data;
  249. if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
  250. write_permitted(status, data)) {
  251. data |= CPLB_DIRTY;
  252. dcplb_tbl[cpu][idx].data = data;
  253. bfin_write32(DCPLB_DATA0 + idx * 4, data);
  254. return 0;
  255. }
  256. }
  257. return CPLB_PROT_VIOL;
  258. }
  259. MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
  260. {
  261. int cause = seqstat & 0x3f;
  262. unsigned int cpu = raw_smp_processor_id();
  263. switch (cause) {
  264. case 0x23:
  265. return dcplb_protection_fault(cpu);
  266. case 0x2C:
  267. return icplb_miss(cpu);
  268. case 0x26:
  269. return dcplb_miss(cpu);
  270. default:
  271. return 1;
  272. }
  273. }
  274. void flush_switched_cplbs(unsigned int cpu)
  275. {
  276. int i;
  277. unsigned long flags;
  278. nr_cplb_flush[cpu]++;
  279. flags = hard_local_irq_save();
  280. _disable_icplb();
  281. for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
  282. icplb_tbl[cpu][i].data = 0;
  283. bfin_write32(ICPLB_DATA0 + i * 4, 0);
  284. }
  285. _enable_icplb();
  286. _disable_dcplb();
  287. for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
  288. dcplb_tbl[cpu][i].data = 0;
  289. bfin_write32(DCPLB_DATA0 + i * 4, 0);
  290. }
  291. _enable_dcplb();
  292. hard_local_irq_restore(flags);
  293. }
  294. void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
  295. {
  296. int i;
  297. unsigned long addr = (unsigned long)masks;
  298. unsigned long d_data;
  299. unsigned long flags;
  300. if (!masks) {
  301. current_rwx_mask[cpu] = masks;
  302. return;
  303. }
  304. flags = hard_local_irq_save();
  305. current_rwx_mask[cpu] = masks;
  306. if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
  307. addr = L2_START;
  308. d_data = L2_DMEMORY;
  309. } else {
  310. d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
  311. #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
  312. d_data |= CPLB_L1_CHBL;
  313. # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
  314. d_data |= CPLB_L1_AOW | CPLB_WT;
  315. # endif
  316. #endif
  317. }
  318. _disable_dcplb();
  319. for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
  320. dcplb_tbl[cpu][i].addr = addr;
  321. dcplb_tbl[cpu][i].data = d_data;
  322. bfin_write32(DCPLB_DATA0 + i * 4, d_data);
  323. bfin_write32(DCPLB_ADDR0 + i * 4, addr);
  324. addr += PAGE_SIZE;
  325. }
  326. _enable_dcplb();
  327. hard_local_irq_restore(flags);
  328. }