dv-bfin_mmu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. /* Blackfin Memory Management Unit (MMU) model.
  2. Copyright (C) 2010-2015 Free Software Foundation, Inc.
  3. Contributed by Analog Devices, Inc.
  4. This file is part of simulators.
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. #include "config.h"
  16. #include "sim-main.h"
  17. #include "sim-options.h"
  18. #include "devices.h"
  19. #include "dv-bfin_mmu.h"
  20. #include "dv-bfin_cec.h"
  21. /* XXX: Should this really be two blocks of registers ? PRM describes
  22. these as two Content Addressable Memory (CAM) blocks. */
  23. struct bfin_mmu
  24. {
  25. bu32 base;
  26. /* Order after here is important -- matches hardware MMR layout. */
  27. bu32 sram_base_address;
  28. bu32 dmem_control, dcplb_fault_status, dcplb_fault_addr;
  29. char _dpad0[0x100 - 0x0 - (4 * 4)];
  30. bu32 dcplb_addr[16];
  31. char _dpad1[0x200 - 0x100 - (4 * 16)];
  32. bu32 dcplb_data[16];
  33. char _dpad2[0x300 - 0x200 - (4 * 16)];
  34. bu32 dtest_command;
  35. char _dpad3[0x400 - 0x300 - (4 * 1)];
  36. bu32 dtest_data[2];
  37. char _dpad4[0x1000 - 0x400 - (4 * 2)];
  38. bu32 idk; /* Filler MMR; hardware simply ignores. */
  39. bu32 imem_control, icplb_fault_status, icplb_fault_addr;
  40. char _ipad0[0x100 - 0x0 - (4 * 4)];
  41. bu32 icplb_addr[16];
  42. char _ipad1[0x200 - 0x100 - (4 * 16)];
  43. bu32 icplb_data[16];
  44. char _ipad2[0x300 - 0x200 - (4 * 16)];
  45. bu32 itest_command;
  46. char _ipad3[0x400 - 0x300 - (4 * 1)];
  47. bu32 itest_data[2];
  48. };
  49. #define mmr_base() offsetof(struct bfin_mmu, sram_base_address)
  50. #define mmr_offset(mmr) (offsetof(struct bfin_mmu, mmr) - mmr_base())
  51. #define mmr_idx(mmr) (mmr_offset (mmr) / 4)
  52. static const char * const mmr_names[BFIN_COREMMR_MMU_SIZE / 4] =
  53. {
  54. "SRAM_BASE_ADDRESS", "DMEM_CONTROL", "DCPLB_FAULT_STATUS", "DCPLB_FAULT_ADDR",
  55. [mmr_idx (dcplb_addr[0])] = "DCPLB_ADDR0",
  56. "DCPLB_ADDR1", "DCPLB_ADDR2", "DCPLB_ADDR3", "DCPLB_ADDR4", "DCPLB_ADDR5",
  57. "DCPLB_ADDR6", "DCPLB_ADDR7", "DCPLB_ADDR8", "DCPLB_ADDR9", "DCPLB_ADDR10",
  58. "DCPLB_ADDR11", "DCPLB_ADDR12", "DCPLB_ADDR13", "DCPLB_ADDR14", "DCPLB_ADDR15",
  59. [mmr_idx (dcplb_data[0])] = "DCPLB_DATA0",
  60. "DCPLB_DATA1", "DCPLB_DATA2", "DCPLB_DATA3", "DCPLB_DATA4", "DCPLB_DATA5",
  61. "DCPLB_DATA6", "DCPLB_DATA7", "DCPLB_DATA8", "DCPLB_DATA9", "DCPLB_DATA10",
  62. "DCPLB_DATA11", "DCPLB_DATA12", "DCPLB_DATA13", "DCPLB_DATA14", "DCPLB_DATA15",
  63. [mmr_idx (dtest_command)] = "DTEST_COMMAND",
  64. [mmr_idx (dtest_data[0])] = "DTEST_DATA0", "DTEST_DATA1",
  65. [mmr_idx (imem_control)] = "IMEM_CONTROL", "ICPLB_FAULT_STATUS", "ICPLB_FAULT_ADDR",
  66. [mmr_idx (icplb_addr[0])] = "ICPLB_ADDR0",
  67. "ICPLB_ADDR1", "ICPLB_ADDR2", "ICPLB_ADDR3", "ICPLB_ADDR4", "ICPLB_ADDR5",
  68. "ICPLB_ADDR6", "ICPLB_ADDR7", "ICPLB_ADDR8", "ICPLB_ADDR9", "ICPLB_ADDR10",
  69. "ICPLB_ADDR11", "ICPLB_ADDR12", "ICPLB_ADDR13", "ICPLB_ADDR14", "ICPLB_ADDR15",
  70. [mmr_idx (icplb_data[0])] = "ICPLB_DATA0",
  71. "ICPLB_DATA1", "ICPLB_DATA2", "ICPLB_DATA3", "ICPLB_DATA4", "ICPLB_DATA5",
  72. "ICPLB_DATA6", "ICPLB_DATA7", "ICPLB_DATA8", "ICPLB_DATA9", "ICPLB_DATA10",
  73. "ICPLB_DATA11", "ICPLB_DATA12", "ICPLB_DATA13", "ICPLB_DATA14", "ICPLB_DATA15",
  74. [mmr_idx (itest_command)] = "ITEST_COMMAND",
  75. [mmr_idx (itest_data[0])] = "ITEST_DATA0", "ITEST_DATA1",
  76. };
  77. #define mmr_name(off) (mmr_names[(off) / 4] ? : "<INV>")
  78. static bool bfin_mmu_skip_cplbs = false;
  79. static unsigned
  80. bfin_mmu_io_write_buffer (struct hw *me, const void *source,
  81. int space, address_word addr, unsigned nr_bytes)
  82. {
  83. struct bfin_mmu *mmu = hw_data (me);
  84. bu32 mmr_off;
  85. bu32 value;
  86. bu32 *valuep;
  87. value = dv_load_4 (source);
  88. mmr_off = addr - mmu->base;
  89. valuep = (void *)((unsigned long)mmu + mmr_base() + mmr_off);
  90. HW_TRACE_WRITE ();
  91. switch (mmr_off)
  92. {
  93. case mmr_offset(dmem_control):
  94. case mmr_offset(imem_control):
  95. /* XXX: IMC/DMC bit should add/remove L1 cache regions ... */
  96. case mmr_offset(dtest_data[0]) ... mmr_offset(dtest_data[1]):
  97. case mmr_offset(itest_data[0]) ... mmr_offset(itest_data[1]):
  98. case mmr_offset(dcplb_addr[0]) ... mmr_offset(dcplb_addr[15]):
  99. case mmr_offset(dcplb_data[0]) ... mmr_offset(dcplb_data[15]):
  100. case mmr_offset(icplb_addr[0]) ... mmr_offset(icplb_addr[15]):
  101. case mmr_offset(icplb_data[0]) ... mmr_offset(icplb_data[15]):
  102. *valuep = value;
  103. break;
  104. case mmr_offset(sram_base_address):
  105. case mmr_offset(dcplb_fault_status):
  106. case mmr_offset(dcplb_fault_addr):
  107. case mmr_offset(idk):
  108. case mmr_offset(icplb_fault_status):
  109. case mmr_offset(icplb_fault_addr):
  110. /* Discard writes to these. */
  111. break;
  112. case mmr_offset(itest_command):
  113. /* XXX: Not supported atm. */
  114. if (value)
  115. hw_abort (me, "ITEST_COMMAND unimplemented");
  116. break;
  117. case mmr_offset(dtest_command):
  118. /* Access L1 memory indirectly. */
  119. *valuep = value;
  120. if (value)
  121. {
  122. bu32 addr = mmu->sram_base_address |
  123. ((value >> (26 - 11)) & (1 << 11)) | /* addr bit 11 (Way0/Way1) */
  124. ((value >> (24 - 21)) & (1 << 21)) | /* addr bit 21 (Data/Inst) */
  125. ((value >> (23 - 15)) & (1 << 15)) | /* addr bit 15 (Data Bank) */
  126. ((value >> (16 - 12)) & (3 << 12)) | /* addr bits 13:12 (Subbank) */
  127. (value & 0x47F8); /* addr bits 14 & 10:3 */
  128. if (!(value & TEST_DATA_ARRAY))
  129. hw_abort (me, "DTEST_COMMAND tag array unimplemented");
  130. if (value & 0xfa7cb801)
  131. hw_abort (me, "DTEST_COMMAND bits undefined");
  132. if (value & TEST_WRITE)
  133. sim_write (hw_system (me), addr, (void *)mmu->dtest_data, 8);
  134. else
  135. sim_read (hw_system (me), addr, (void *)mmu->dtest_data, 8);
  136. }
  137. break;
  138. default:
  139. dv_bfin_mmr_invalid (me, addr, nr_bytes, true);
  140. break;
  141. }
  142. return nr_bytes;
  143. }
  144. static unsigned
  145. bfin_mmu_io_read_buffer (struct hw *me, void *dest,
  146. int space, address_word addr, unsigned nr_bytes)
  147. {
  148. struct bfin_mmu *mmu = hw_data (me);
  149. bu32 mmr_off;
  150. bu32 *valuep;
  151. mmr_off = addr - mmu->base;
  152. valuep = (void *)((unsigned long)mmu + mmr_base() + mmr_off);
  153. HW_TRACE_READ ();
  154. switch (mmr_off)
  155. {
  156. case mmr_offset(dmem_control):
  157. case mmr_offset(imem_control):
  158. case mmr_offset(dtest_command):
  159. case mmr_offset(dtest_data[0]) ... mmr_offset(dtest_data[2]):
  160. case mmr_offset(itest_command):
  161. case mmr_offset(itest_data[0]) ... mmr_offset(itest_data[2]):
  162. /* XXX: should do something here. */
  163. case mmr_offset(dcplb_addr[0]) ... mmr_offset(dcplb_addr[15]):
  164. case mmr_offset(dcplb_data[0]) ... mmr_offset(dcplb_data[15]):
  165. case mmr_offset(icplb_addr[0]) ... mmr_offset(icplb_addr[15]):
  166. case mmr_offset(icplb_data[0]) ... mmr_offset(icplb_data[15]):
  167. case mmr_offset(sram_base_address):
  168. case mmr_offset(dcplb_fault_status):
  169. case mmr_offset(dcplb_fault_addr):
  170. case mmr_offset(idk):
  171. case mmr_offset(icplb_fault_status):
  172. case mmr_offset(icplb_fault_addr):
  173. dv_store_4 (dest, *valuep);
  174. break;
  175. default:
  176. while (1) /* Core MMRs -> exception -> doesn't return. */
  177. dv_bfin_mmr_invalid (me, addr, nr_bytes, false);
  178. break;
  179. }
  180. return nr_bytes;
  181. }
  182. static void
  183. attach_bfin_mmu_regs (struct hw *me, struct bfin_mmu *mmu)
  184. {
  185. address_word attach_address;
  186. int attach_space;
  187. unsigned attach_size;
  188. reg_property_spec reg;
  189. if (hw_find_property (me, "reg") == NULL)
  190. hw_abort (me, "Missing \"reg\" property");
  191. if (!hw_find_reg_array_property (me, "reg", 0, &reg))
  192. hw_abort (me, "\"reg\" property must contain three addr/size entries");
  193. hw_unit_address_to_attach_address (hw_parent (me),
  194. &reg.address,
  195. &attach_space, &attach_address, me);
  196. hw_unit_size_to_attach_size (hw_parent (me), &reg.size, &attach_size, me);
  197. if (attach_size != BFIN_COREMMR_MMU_SIZE)
  198. hw_abort (me, "\"reg\" size must be %#x", BFIN_COREMMR_MMU_SIZE);
  199. hw_attach_address (hw_parent (me),
  200. 0, attach_space, attach_address, attach_size, me);
  201. mmu->base = attach_address;
  202. }
  203. static void
  204. bfin_mmu_finish (struct hw *me)
  205. {
  206. struct bfin_mmu *mmu;
  207. mmu = HW_ZALLOC (me, struct bfin_mmu);
  208. set_hw_data (me, mmu);
  209. set_hw_io_read_buffer (me, bfin_mmu_io_read_buffer);
  210. set_hw_io_write_buffer (me, bfin_mmu_io_write_buffer);
  211. attach_bfin_mmu_regs (me, mmu);
  212. /* Initialize the MMU. */
  213. mmu->sram_base_address = 0xff800000 - 0;
  214. /*(4 * 1024 * 1024 * CPU_INDEX (hw_system_cpu (me)));*/
  215. mmu->dmem_control = 0x00000001;
  216. mmu->imem_control = 0x00000001;
  217. }
  218. const struct hw_descriptor dv_bfin_mmu_descriptor[] =
  219. {
  220. {"bfin_mmu", bfin_mmu_finish,},
  221. {NULL, NULL},
  222. };
  223. /* Device option parsing. */
  224. static DECLARE_OPTION_HANDLER (bfin_mmu_option_handler);
  225. enum {
  226. OPTION_MMU_SKIP_TABLES = OPTION_START,
  227. };
  228. const OPTION bfin_mmu_options[] =
  229. {
  230. { {"mmu-skip-cplbs", no_argument, NULL, OPTION_MMU_SKIP_TABLES },
  231. '\0', NULL, "Skip parsing of CPLB tables (big speed increase)",
  232. bfin_mmu_option_handler, NULL },
  233. { {NULL, no_argument, NULL, 0}, '\0', NULL, NULL, NULL, NULL }
  234. };
  235. static SIM_RC
  236. bfin_mmu_option_handler (SIM_DESC sd, sim_cpu *current_cpu, int opt,
  237. char *arg, int is_command)
  238. {
  239. switch (opt)
  240. {
  241. case OPTION_MMU_SKIP_TABLES:
  242. bfin_mmu_skip_cplbs = true;
  243. return SIM_RC_OK;
  244. default:
  245. sim_io_eprintf (sd, "Unknown Blackfin MMU option %d\n", opt);
  246. return SIM_RC_FAIL;
  247. }
  248. }
  249. #define MMU_STATE(cpu) DV_STATE_CACHED (cpu, mmu)
  250. static void
  251. _mmu_log_ifault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 pc, bool supv)
  252. {
  253. mmu->icplb_fault_addr = pc;
  254. mmu->icplb_fault_status = supv << 17;
  255. }
  256. void
  257. mmu_log_ifault (SIM_CPU *cpu)
  258. {
  259. _mmu_log_ifault (cpu, MMU_STATE (cpu), PCREG, cec_get_ivg (cpu) >= 0);
  260. }
  261. static void
  262. _mmu_log_fault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 addr, bool write,
  263. bool inst, bool miss, bool supv, bool dag1, bu32 faults)
  264. {
  265. bu32 *fault_status, *fault_addr;
  266. /* No logging in non-OS mode. */
  267. if (!mmu)
  268. return;
  269. fault_status = inst ? &mmu->icplb_fault_status : &mmu->dcplb_fault_status;
  270. fault_addr = inst ? &mmu->icplb_fault_addr : &mmu->dcplb_fault_addr;
  271. /* ICPLB regs always get updated. */
  272. if (!inst)
  273. _mmu_log_ifault (cpu, mmu, PCREG, supv);
  274. *fault_addr = addr;
  275. *fault_status =
  276. (miss << 19) |
  277. (dag1 << 18) |
  278. (supv << 17) |
  279. (write << 16) |
  280. faults;
  281. }
  282. static void
  283. _mmu_process_fault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 addr, bool write,
  284. bool inst, bool unaligned, bool miss, bool supv, bool dag1)
  285. {
  286. int excp;
  287. /* See order in mmu_check_addr() */
  288. if (unaligned)
  289. excp = inst ? VEC_MISALI_I : VEC_MISALI_D;
  290. else if (addr >= BFIN_SYSTEM_MMR_BASE)
  291. excp = VEC_ILL_RES;
  292. else if (!mmu)
  293. excp = inst ? VEC_CPLB_I_M : VEC_CPLB_M;
  294. else
  295. {
  296. /* Misses are hardware errors. */
  297. cec_hwerr (cpu, HWERR_EXTERN_ADDR);
  298. return;
  299. }
  300. _mmu_log_fault (cpu, mmu, addr, write, inst, miss, supv, dag1, 0);
  301. cec_exception (cpu, excp);
  302. }
  303. void
  304. mmu_process_fault (SIM_CPU *cpu, bu32 addr, bool write, bool inst,
  305. bool unaligned, bool miss)
  306. {
  307. SIM_DESC sd = CPU_STATE (cpu);
  308. struct bfin_mmu *mmu;
  309. if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT)
  310. mmu = NULL;
  311. else
  312. mmu = MMU_STATE (cpu);
  313. _mmu_process_fault (cpu, mmu, addr, write, inst, unaligned, miss,
  314. cec_is_supervisor_mode (cpu),
  315. BFIN_CPU_STATE.multi_pc == PCREG + 6);
  316. }
  317. /* Return values:
  318. -2: no known problems
  319. -1: valid
  320. 0: miss
  321. 1: protection violation
  322. 2: multiple hits
  323. 3: unaligned
  324. 4: miss; hwerr */
  325. static int
  326. mmu_check_implicit_addr (SIM_CPU *cpu, bu32 addr, bool inst, int size,
  327. bool supv, bool dag1)
  328. {
  329. bool l1 = ((addr & 0xFF000000) == 0xFF000000);
  330. bu32 amask = (addr & 0xFFF00000);
  331. if (addr & (size - 1))
  332. return 3;
  333. /* MMRs may never be executable or accessed from usermode. */
  334. if (addr >= BFIN_SYSTEM_MMR_BASE)
  335. {
  336. if (inst)
  337. return 0;
  338. else if (!supv || dag1)
  339. return 1;
  340. else
  341. return -1;
  342. }
  343. else if (inst)
  344. {
  345. /* Some regions are not executable. */
  346. /* XXX: Should this be in the model data ? Core B 561 ? */
  347. if (l1)
  348. return (amask == 0xFFA00000) ? -1 : 1;
  349. }
  350. else
  351. {
  352. /* Some regions are not readable. */
  353. /* XXX: Should this be in the model data ? Core B 561 ? */
  354. if (l1)
  355. return (amask != 0xFFA00000) ? -1 : 4;
  356. }
  357. return -2;
  358. }
  359. /* Exception order per the PRM (first has highest):
  360. Inst Multiple CPLB Hits
  361. Inst Misaligned Access
  362. Inst Protection Violation
  363. Inst CPLB Miss
  364. Only the alignment matters in non-OS mode though. */
  365. static int
  366. _mmu_check_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst, int size)
  367. {
  368. SIM_DESC sd = CPU_STATE (cpu);
  369. struct bfin_mmu *mmu;
  370. bu32 *fault_status, *fault_addr, *mem_control, *cplb_addr, *cplb_data;
  371. bu32 faults;
  372. bool supv, do_excp, dag1;
  373. int i, hits;
  374. supv = cec_is_supervisor_mode (cpu);
  375. dag1 = (BFIN_CPU_STATE.multi_pc == PCREG + 6);
  376. if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT || bfin_mmu_skip_cplbs)
  377. {
  378. int ret = mmu_check_implicit_addr (cpu, addr, inst, size, supv, dag1);
  379. /* Valid hits and misses are OK in non-OS envs. */
  380. if (ret < 0)
  381. return 0;
  382. _mmu_process_fault (cpu, NULL, addr, write, inst, (ret == 3), false, supv, dag1);
  383. }
  384. mmu = MMU_STATE (cpu);
  385. fault_status = inst ? &mmu->icplb_fault_status : &mmu->dcplb_fault_status;
  386. fault_addr = inst ? &mmu->icplb_fault_addr : &mmu->dcplb_fault_addr;
  387. mem_control = inst ? &mmu->imem_control : &mmu->dmem_control;
  388. cplb_addr = inst ? &mmu->icplb_addr[0] : &mmu->dcplb_addr[0];
  389. cplb_data = inst ? &mmu->icplb_data[0] : &mmu->dcplb_data[0];
  390. faults = 0;
  391. hits = 0;
  392. do_excp = false;
  393. /* CPLBs disabled -> little to do. */
  394. if (!(*mem_control & ENCPLB))
  395. {
  396. hits = 1;
  397. goto implicit_check;
  398. }
  399. /* Check all the CPLBs first. */
  400. for (i = 0; i < 16; ++i)
  401. {
  402. const bu32 pages[4] = { 0x400, 0x1000, 0x100000, 0x400000 };
  403. bu32 addr_lo, addr_hi;
  404. /* Skip invalid entries. */
  405. if (!(cplb_data[i] & CPLB_VALID))
  406. continue;
  407. /* See if this entry covers this address. */
  408. addr_lo = cplb_addr[i];
  409. addr_hi = cplb_addr[i] + pages[(cplb_data[i] & PAGE_SIZE) >> 16];
  410. if (addr < addr_lo || addr >= addr_hi)
  411. continue;
  412. ++hits;
  413. faults |= (1 << i);
  414. if (write)
  415. {
  416. if (!supv && !(cplb_data[i] & CPLB_USER_WR))
  417. do_excp = true;
  418. if (supv && !(cplb_data[i] & CPLB_SUPV_WR))
  419. do_excp = true;
  420. if ((cplb_data[i] & (CPLB_WT | CPLB_L1_CHBL | CPLB_DIRTY)) == CPLB_L1_CHBL)
  421. do_excp = true;
  422. }
  423. else
  424. {
  425. if (!supv && !(cplb_data[i] & CPLB_USER_RD))
  426. do_excp = true;
  427. }
  428. }
  429. /* Handle default/implicit CPLBs. */
  430. if (!do_excp && hits < 2)
  431. {
  432. int ihits;
  433. implicit_check:
  434. ihits = mmu_check_implicit_addr (cpu, addr, inst, size, supv, dag1);
  435. switch (ihits)
  436. {
  437. /* No faults and one match -> good to go. */
  438. case -1: return 0;
  439. case -2:
  440. if (hits == 1)
  441. return 0;
  442. break;
  443. case 4:
  444. cec_hwerr (cpu, HWERR_EXTERN_ADDR);
  445. return 0;
  446. default:
  447. hits = ihits;
  448. }
  449. }
  450. else
  451. /* Normalize hit count so hits==2 is always multiple hit exception. */
  452. hits = MIN (2, hits);
  453. _mmu_log_fault (cpu, mmu, addr, write, inst, hits == 0, supv, dag1, faults);
  454. if (inst)
  455. {
  456. int iexcps[] = { VEC_CPLB_I_M, VEC_CPLB_I_VL, VEC_CPLB_I_MHIT, VEC_MISALI_I };
  457. return iexcps[hits];
  458. }
  459. else
  460. {
  461. int dexcps[] = { VEC_CPLB_M, VEC_CPLB_VL, VEC_CPLB_MHIT, VEC_MISALI_D };
  462. return dexcps[hits];
  463. }
  464. }
  465. void
  466. mmu_check_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst, int size)
  467. {
  468. int excp = _mmu_check_addr (cpu, addr, write, inst, size);
  469. if (excp)
  470. cec_exception (cpu, excp);
  471. }
  472. void
  473. mmu_check_cache_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst)
  474. {
  475. bu32 cacheaddr;
  476. int excp;
  477. cacheaddr = addr & ~(BFIN_L1_CACHE_BYTES - 1);
  478. excp = _mmu_check_addr (cpu, cacheaddr, write, inst, BFIN_L1_CACHE_BYTES);
  479. if (excp == 0)
  480. return;
  481. /* Most exceptions are ignored with cache funcs. */
  482. /* XXX: Not sure if we should be ignoring CPLB misses. */
  483. if (inst)
  484. {
  485. if (excp == VEC_CPLB_I_VL)
  486. return;
  487. }
  488. else
  489. {
  490. if (excp == VEC_CPLB_VL)
  491. return;
  492. }
  493. cec_exception (cpu, excp);
  494. }