cache.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669
  1. /* frv cache model.
  2. Copyright (C) 1999-2015 Free Software Foundation, Inc.
  3. Contributed by Red Hat.
  4. This file is part of the GNU simulators.
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. #define WANT_CPU frvbf
  16. #define WANT_CPU_FRVBF
  17. #include "libiberty.h"
  18. #include "sim-main.h"
  19. #include "cache.h"
  20. #include "bfd.h"
  21. void
  22. frv_cache_init (SIM_CPU *cpu, FRV_CACHE *cache)
  23. {
  24. int elements;
  25. int i, j;
  26. SIM_DESC sd;
  27. /* Set defaults for fields which are not initialized. */
  28. sd = CPU_STATE (cpu);
  29. switch (STATE_ARCHITECTURE (sd)->mach)
  30. {
  31. case bfd_mach_fr400:
  32. case bfd_mach_fr450:
  33. if (cache->configured_sets == 0)
  34. cache->configured_sets = 512;
  35. if (cache->configured_ways == 0)
  36. cache->configured_ways = 2;
  37. if (cache->line_size == 0)
  38. cache->line_size = 32;
  39. if (cache->memory_latency == 0)
  40. cache->memory_latency = 20;
  41. break;
  42. case bfd_mach_fr550:
  43. if (cache->configured_sets == 0)
  44. cache->configured_sets = 128;
  45. if (cache->configured_ways == 0)
  46. cache->configured_ways = 4;
  47. if (cache->line_size == 0)
  48. cache->line_size = 64;
  49. if (cache->memory_latency == 0)
  50. cache->memory_latency = 20;
  51. break;
  52. default:
  53. if (cache->configured_sets == 0)
  54. cache->configured_sets = 64;
  55. if (cache->configured_ways == 0)
  56. cache->configured_ways = 4;
  57. if (cache->line_size == 0)
  58. cache->line_size = 64;
  59. if (cache->memory_latency == 0)
  60. cache->memory_latency = 20;
  61. break;
  62. }
  63. frv_cache_reconfigure (cpu, cache);
  64. /* First allocate the cache storage based on the given dimensions. */
  65. elements = cache->sets * cache->ways;
  66. cache->tag_storage = (FRV_CACHE_TAG *)
  67. zalloc (elements * sizeof (*cache->tag_storage));
  68. cache->data_storage = (char *) xmalloc (elements * cache->line_size);
  69. /* Initialize the pipelines and status buffers. */
  70. for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
  71. {
  72. cache->pipeline[i].requests = NULL;
  73. cache->pipeline[i].status.flush.valid = 0;
  74. cache->pipeline[i].status.return_buffer.valid = 0;
  75. cache->pipeline[i].status.return_buffer.data
  76. = (char *) xmalloc (cache->line_size);
  77. for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
  78. cache->pipeline[i].stages[j].request = NULL;
  79. }
  80. cache->BARS.valid = 0;
  81. cache->NARS.valid = 0;
  82. /* Now set the cache state. */
  83. cache->cpu = cpu;
  84. cache->statistics.accesses = 0;
  85. cache->statistics.hits = 0;
  86. }
  87. void
  88. frv_cache_term (FRV_CACHE *cache)
  89. {
  90. /* Free the cache storage. */
  91. free (cache->tag_storage);
  92. free (cache->data_storage);
  93. free (cache->pipeline[LS].status.return_buffer.data);
  94. free (cache->pipeline[LD].status.return_buffer.data);
  95. }
  96. /* Reset the cache configuration based on registers in the cpu. */
  97. void
  98. frv_cache_reconfigure (SIM_CPU *current_cpu, FRV_CACHE *cache)
  99. {
  100. int ihsr8;
  101. int icdm;
  102. SIM_DESC sd;
  103. /* Set defaults for fields which are not initialized. */
  104. sd = CPU_STATE (current_cpu);
  105. switch (STATE_ARCHITECTURE (sd)->mach)
  106. {
  107. case bfd_mach_fr550:
  108. if (cache == CPU_INSN_CACHE (current_cpu))
  109. {
  110. ihsr8 = GET_IHSR8 ();
  111. icdm = GET_IHSR8_ICDM (ihsr8);
  112. /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */
  113. if (icdm)
  114. {
  115. cache->sets = cache->sets * cache->ways;
  116. cache->ways = 1;
  117. break;
  118. }
  119. }
  120. /* fall through */
  121. default:
  122. /* Set the cache to its original settings. */
  123. cache->sets = cache->configured_sets;
  124. cache->ways = cache->configured_ways;
  125. break;
  126. }
  127. }
  128. /* Determine whether the given cache is enabled. */
  129. int
  130. frv_cache_enabled (FRV_CACHE *cache)
  131. {
  132. SIM_CPU *current_cpu = cache->cpu;
  133. int hsr0 = GET_HSR0 ();
  134. if (GET_HSR0_ICE (hsr0) && cache == CPU_INSN_CACHE (current_cpu))
  135. return 1;
  136. if (GET_HSR0_DCE (hsr0) && cache == CPU_DATA_CACHE (current_cpu))
  137. return 1;
  138. return 0;
  139. }
  140. /* Determine whether the given address is RAM access, assuming that HSR0.RME
  141. is set. */
  142. static int
  143. ram_access (FRV_CACHE *cache, USI address)
  144. {
  145. int ihsr8;
  146. int cwe;
  147. USI start, end, way_size;
  148. SIM_CPU *current_cpu = cache->cpu;
  149. SIM_DESC sd = CPU_STATE (current_cpu);
  150. switch (STATE_ARCHITECTURE (sd)->mach)
  151. {
  152. case bfd_mach_fr550:
  153. /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */
  154. ihsr8 = GET_IHSR8 ();
  155. if (cache == CPU_INSN_CACHE (current_cpu))
  156. {
  157. start = 0xfe000000;
  158. end = 0xfe008000;
  159. cwe = GET_IHSR8_ICWE (ihsr8);
  160. }
  161. else
  162. {
  163. start = 0xfe400000;
  164. end = 0xfe408000;
  165. cwe = GET_IHSR8_DCWE (ihsr8);
  166. }
  167. way_size = (end - start) / 4;
  168. end -= way_size * cwe;
  169. return address >= start && address < end;
  170. default:
  171. break;
  172. }
  173. return 1; /* RAM access */
  174. }
  175. /* Determine whether the given address should be accessed without using
  176. the cache. */
  177. static int
  178. non_cache_access (FRV_CACHE *cache, USI address)
  179. {
  180. int hsr0;
  181. SIM_DESC sd;
  182. SIM_CPU *current_cpu = cache->cpu;
  183. sd = CPU_STATE (current_cpu);
  184. switch (STATE_ARCHITECTURE (sd)->mach)
  185. {
  186. case bfd_mach_fr400:
  187. case bfd_mach_fr450:
  188. if (address >= 0xff000000
  189. || address >= 0xfe000000 && address <= 0xfeffffff)
  190. return 1; /* non-cache access */
  191. break;
  192. case bfd_mach_fr550:
  193. if (address >= 0xff000000
  194. || address >= 0xfeff0000 && address <= 0xfeffffff)
  195. return 1; /* non-cache access */
  196. if (cache == CPU_INSN_CACHE (current_cpu))
  197. {
  198. if (address >= 0xfe000000 && address <= 0xfe007fff)
  199. return 1; /* non-cache access */
  200. }
  201. else if (address >= 0xfe400000 && address <= 0xfe407fff)
  202. return 1; /* non-cache access */
  203. break;
  204. default:
  205. if (address >= 0xff000000
  206. || address >= 0xfeff0000 && address <= 0xfeffffff)
  207. return 1; /* non-cache access */
  208. if (cache == CPU_INSN_CACHE (current_cpu))
  209. {
  210. if (address >= 0xfe000000 && address <= 0xfe003fff)
  211. return 1; /* non-cache access */
  212. }
  213. else if (address >= 0xfe400000 && address <= 0xfe403fff)
  214. return 1; /* non-cache access */
  215. break;
  216. }
  217. hsr0 = GET_HSR0 ();
  218. if (GET_HSR0_RME (hsr0))
  219. return ram_access (cache, address);
  220. return 0; /* cache-access */
  221. }
  222. /* Find the cache line corresponding to the given address.
  223. If it is found then 'return_tag' is set to point to the tag for that line
  224. and 1 is returned.
  225. If it is not found, 'return_tag' is set to point to the tag for the least
  226. recently used line and 0 is returned.
  227. */
  228. static int
  229. get_tag (FRV_CACHE *cache, SI address, FRV_CACHE_TAG **return_tag)
  230. {
  231. int set;
  232. int way;
  233. int bits;
  234. USI tag;
  235. FRV_CACHE_TAG *found;
  236. FRV_CACHE_TAG *available;
  237. ++cache->statistics.accesses;
  238. /* First calculate which set this address will fall into. Do this by
  239. shifting out the bits representing the offset within the line and
  240. then keeping enough bits to index the set. */
  241. set = address & ~(cache->line_size - 1);
  242. for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
  243. set >>= 1;
  244. set &= (cache->sets - 1);
  245. /* Now search the set for a valid tag which matches this address. At the
  246. same time make note of the least recently used tag, which we will return
  247. if no match is found. */
  248. available = NULL;
  249. tag = CACHE_ADDRESS_TAG (cache, address);
  250. for (way = 0; way < cache->ways; ++way)
  251. {
  252. found = CACHE_TAG (cache, set, way);
  253. /* This tag is available as the least recently used if it is the
  254. least recently used seen so far and it is not locked. */
  255. if (! found->locked && (available == NULL || available->lru > found->lru))
  256. available = found;
  257. if (found->valid && found->tag == tag)
  258. {
  259. *return_tag = found;
  260. ++cache->statistics.hits;
  261. return 1; /* found it */
  262. }
  263. }
  264. *return_tag = available;
  265. return 0; /* not found */
  266. }
  267. /* Write the given data out to memory. */
  268. static void
  269. write_data_to_memory (FRV_CACHE *cache, SI address, char *data, int length)
  270. {
  271. SIM_CPU *cpu = cache->cpu;
  272. IADDR pc = CPU_PC_GET (cpu);
  273. int write_index = 0;
  274. switch (length)
  275. {
  276. case 1:
  277. default:
  278. PROFILE_COUNT_WRITE (cpu, address, MODE_QI);
  279. break;
  280. case 2:
  281. PROFILE_COUNT_WRITE (cpu, address, MODE_HI);
  282. break;
  283. case 4:
  284. PROFILE_COUNT_WRITE (cpu, address, MODE_SI);
  285. break;
  286. case 8:
  287. PROFILE_COUNT_WRITE (cpu, address, MODE_DI);
  288. break;
  289. }
  290. for (write_index = 0; write_index < length; ++write_index)
  291. {
  292. /* TODO: Better way to copy memory than a byte at a time? */
  293. sim_core_write_unaligned_1 (cpu, pc, write_map, address + write_index,
  294. data[write_index]);
  295. }
  296. }
  297. /* Write a cache line out to memory. */
  298. static void
  299. write_line_to_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
  300. {
  301. SI address = tag->tag;
  302. int set = CACHE_TAG_SET_NUMBER (cache, tag);
  303. int bits;
  304. for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
  305. set <<= 1;
  306. address |= set;
  307. write_data_to_memory (cache, address, tag->line, cache->line_size);
  308. }
  309. static void
  310. read_data_from_memory (SIM_CPU *current_cpu, SI address, char *buffer,
  311. int length)
  312. {
  313. PCADDR pc = CPU_PC_GET (current_cpu);
  314. int i;
  315. PROFILE_COUNT_READ (current_cpu, address, MODE_QI);
  316. for (i = 0; i < length; ++i)
  317. {
  318. /* TODO: Better way to copy memory than a byte at a time? */
  319. buffer[i] = sim_core_read_unaligned_1 (current_cpu, pc, read_map,
  320. address + i);
  321. }
  322. }
  323. /* Fill the given cache line from memory. */
  324. static void
  325. fill_line_from_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag, SI address)
  326. {
  327. PCADDR pc;
  328. int line_alignment;
  329. SI read_address;
  330. SIM_CPU *current_cpu = cache->cpu;
  331. /* If this line is already valid and the cache is in copy-back mode, then
  332. write this line to memory before refilling it.
  333. Check the dirty bit first, since it is less likely to be set. */
  334. if (tag->dirty && tag->valid)
  335. {
  336. int hsr0 = GET_HSR0 ();
  337. if (GET_HSR0_CBM (hsr0))
  338. write_line_to_memory (cache, tag);
  339. }
  340. else if (tag->line == NULL)
  341. {
  342. int line_index = tag - cache->tag_storage;
  343. tag->line = cache->data_storage + (line_index * cache->line_size);
  344. }
  345. pc = CPU_PC_GET (current_cpu);
  346. line_alignment = cache->line_size - 1;
  347. read_address = address & ~line_alignment;
  348. read_data_from_memory (current_cpu, read_address, tag->line,
  349. cache->line_size);
  350. tag->tag = CACHE_ADDRESS_TAG (cache, address);
  351. tag->valid = 1;
  352. }
  353. /* Update the LRU information for the tags in the same set as the given tag. */
  354. static void
  355. set_most_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
  356. {
  357. /* All tags in the same set are contiguous, so find the beginning of the
  358. set by aligning to the size of a set. */
  359. FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
  360. FRV_CACHE_TAG *limit = item + cache->ways;
  361. while (item < limit)
  362. {
  363. if (item->lru > tag->lru)
  364. --item->lru;
  365. ++item;
  366. }
  367. tag->lru = cache->ways; /* Mark as most recently used. */
  368. }
  369. /* Update the LRU information for the tags in the same set as the given tag. */
  370. static void
  371. set_least_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
  372. {
  373. /* All tags in the same set are contiguous, so find the beginning of the
  374. set by aligning to the size of a set. */
  375. FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
  376. FRV_CACHE_TAG *limit = item + cache->ways;
  377. while (item < limit)
  378. {
  379. if (item->lru != 0 && item->lru < tag->lru)
  380. ++item->lru;
  381. ++item;
  382. }
  383. tag->lru = 0; /* Mark as least recently used. */
  384. }
  385. /* Find the line containing the given address and load it if it is not
  386. already loaded.
  387. Returns the tag of the requested line. */
  388. static FRV_CACHE_TAG *
  389. find_or_retrieve_cache_line (FRV_CACHE *cache, SI address)
  390. {
  391. /* See if this data is already in the cache. */
  392. FRV_CACHE_TAG *tag;
  393. int found = get_tag (cache, address, &tag);
  394. /* Fill the line from memory, if it is not valid. */
  395. if (! found)
  396. {
  397. /* The tag could be NULL is all ways in the set were used and locked. */
  398. if (tag == NULL)
  399. return tag;
  400. fill_line_from_memory (cache, tag, address);
  401. tag->dirty = 0;
  402. }
  403. /* Update the LRU information for the tags in this set. */
  404. set_most_recently_used (cache, tag);
  405. return tag;
  406. }
  407. static void
  408. copy_line_to_return_buffer (FRV_CACHE *cache, int pipe, FRV_CACHE_TAG *tag,
  409. SI address)
  410. {
  411. /* A cache line was available for the data.
  412. Copy the data from the cache line to the output buffer. */
  413. memcpy (cache->pipeline[pipe].status.return_buffer.data,
  414. tag->line, cache->line_size);
  415. cache->pipeline[pipe].status.return_buffer.address
  416. = address & ~(cache->line_size - 1);
  417. cache->pipeline[pipe].status.return_buffer.valid = 1;
  418. }
  419. static void
  420. copy_memory_to_return_buffer (FRV_CACHE *cache, int pipe, SI address)
  421. {
  422. address &= ~(cache->line_size - 1);
  423. read_data_from_memory (cache->cpu, address,
  424. cache->pipeline[pipe].status.return_buffer.data,
  425. cache->line_size);
  426. cache->pipeline[pipe].status.return_buffer.address = address;
  427. cache->pipeline[pipe].status.return_buffer.valid = 1;
  428. }
  429. static void
  430. set_return_buffer_reqno (FRV_CACHE *cache, int pipe, unsigned reqno)
  431. {
  432. cache->pipeline[pipe].status.return_buffer.reqno = reqno;
  433. }
  434. /* Read data from the given cache.
  435. Returns the number of cycles required to obtain the data. */
  436. int
  437. frv_cache_read (FRV_CACHE *cache, int pipe, SI address)
  438. {
  439. FRV_CACHE_TAG *tag;
  440. if (non_cache_access (cache, address))
  441. {
  442. copy_memory_to_return_buffer (cache, pipe, address);
  443. return 1;
  444. }
  445. tag = find_or_retrieve_cache_line (cache, address);
  446. if (tag == NULL)
  447. return 0; /* Indicate non-cache-access. */
  448. /* A cache line was available for the data.
  449. Copy the data from the cache line to the output buffer. */
  450. copy_line_to_return_buffer (cache, pipe, tag, address);
  451. return 1; /* TODO - number of cycles unknown */
  452. }
  453. /* Writes data through the given cache.
  454. The data is assumed to be in target endian order.
  455. Returns the number of cycles required to write the data. */
  456. int
  457. frv_cache_write (FRV_CACHE *cache, SI address, char *data, unsigned length)
  458. {
  459. int copy_back;
  460. /* See if this data is already in the cache. */
  461. SIM_CPU *current_cpu = cache->cpu;
  462. USI hsr0 = GET_HSR0 ();
  463. FRV_CACHE_TAG *tag;
  464. int found;
  465. if (non_cache_access (cache, address))
  466. {
  467. write_data_to_memory (cache, address, data, length);
  468. return 1;
  469. }
  470. found = get_tag (cache, address, &tag);
  471. /* Write the data to the cache line if one was available and if it is
  472. either a hit or a miss in copy-back mode.
  473. The tag may be NULL if all ways were in use and locked on a miss.
  474. */
  475. copy_back = GET_HSR0_CBM (GET_HSR0 ());
  476. if (tag != NULL && (found || copy_back))
  477. {
  478. int line_offset;
  479. /* Load the line from memory first, if it was a miss. */
  480. if (! found)
  481. fill_line_from_memory (cache, tag, address);
  482. line_offset = address & (cache->line_size - 1);
  483. memcpy (tag->line + line_offset, data, length);
  484. tag->dirty = 1;
  485. /* Update the LRU information for the tags in this set. */
  486. set_most_recently_used (cache, tag);
  487. }
  488. /* Write the data to memory if there was no line available or we are in
  489. write-through (not copy-back mode). */
  490. if (tag == NULL || ! copy_back)
  491. {
  492. write_data_to_memory (cache, address, data, length);
  493. if (tag != NULL)
  494. tag->dirty = 0;
  495. }
  496. return 1; /* TODO - number of cycles unknown */
  497. }
  498. /* Preload the cache line containing the given address. Lock the
  499. data if requested.
  500. Returns the number of cycles required to write the data. */
  501. int
  502. frv_cache_preload (FRV_CACHE *cache, SI address, USI length, int lock)
  503. {
  504. int offset;
  505. int lines;
  506. if (non_cache_access (cache, address))
  507. return 1;
  508. /* preload at least 1 line. */
  509. if (length == 0)
  510. length = 1;
  511. offset = address & (cache->line_size - 1);
  512. lines = 1 + (offset + length - 1) / cache->line_size;
  513. /* Careful with this loop -- length is unsigned. */
  514. for (/**/; lines > 0; --lines)
  515. {
  516. FRV_CACHE_TAG *tag = find_or_retrieve_cache_line (cache, address);
  517. if (lock && tag != NULL)
  518. tag->locked = 1;
  519. address += cache->line_size;
  520. }
  521. return 1; /* TODO - number of cycles unknown */
  522. }
  523. /* Unlock the cache line containing the given address.
  524. Returns the number of cycles required to unlock the line. */
  525. int
  526. frv_cache_unlock (FRV_CACHE *cache, SI address)
  527. {
  528. FRV_CACHE_TAG *tag;
  529. int found;
  530. if (non_cache_access (cache, address))
  531. return 1;
  532. found = get_tag (cache, address, &tag);
  533. if (found)
  534. tag->locked = 0;
  535. return 1; /* TODO - number of cycles unknown */
  536. }
  537. static void
  538. invalidate_return_buffer (FRV_CACHE *cache, SI address)
  539. {
  540. /* If this address is in one of the return buffers, then invalidate that
  541. return buffer. */
  542. address &= ~(cache->line_size - 1);
  543. if (address == cache->pipeline[LS].status.return_buffer.address)
  544. cache->pipeline[LS].status.return_buffer.valid = 0;
  545. if (address == cache->pipeline[LD].status.return_buffer.address)
  546. cache->pipeline[LD].status.return_buffer.valid = 0;
  547. }
  548. /* Invalidate the cache line containing the given address. Flush the
  549. data if requested.
  550. Returns the number of cycles required to write the data. */
  551. int
  552. frv_cache_invalidate (FRV_CACHE *cache, SI address, int flush)
  553. {
  554. /* See if this data is already in the cache. */
  555. FRV_CACHE_TAG *tag;
  556. int found;
  557. /* Check for non-cache access. This operation is still perfromed even if
  558. the cache is not currently enabled. */
  559. if (non_cache_access (cache, address))
  560. return 1;
  561. /* If the line is found, invalidate it. If a flush is requested, then flush
  562. it if it is dirty. */
  563. found = get_tag (cache, address, &tag);
  564. if (found)
  565. {
  566. SIM_CPU *cpu;
  567. /* If a flush is requested, then flush it if it is dirty. */
  568. if (tag->dirty && flush)
  569. write_line_to_memory (cache, tag);
  570. set_least_recently_used (cache, tag);
  571. tag->valid = 0;
  572. tag->locked = 0;
  573. /* If this is the insn cache, then flush the cpu's scache as well. */
  574. cpu = cache->cpu;
  575. if (cache == CPU_INSN_CACHE (cpu))
  576. scache_flush_cpu (cpu);
  577. }
  578. invalidate_return_buffer (cache, address);
  579. return 1; /* TODO - number of cycles unknown */
  580. }
  581. /* Invalidate the entire cache. Flush the data if requested. */
  582. int
  583. frv_cache_invalidate_all (FRV_CACHE *cache, int flush)
  584. {
  585. /* See if this data is already in the cache. */
  586. int elements = cache->sets * cache->ways;
  587. FRV_CACHE_TAG *tag = cache->tag_storage;
  588. SIM_CPU *cpu;
  589. int i;
  590. for(i = 0; i < elements; ++i, ++tag)
  591. {
  592. /* If a flush is requested, then flush it if it is dirty. */
  593. if (tag->valid && tag->dirty && flush)
  594. write_line_to_memory (cache, tag);
  595. tag->valid = 0;
  596. tag->locked = 0;
  597. }
  598. /* If this is the insn cache, then flush the cpu's scache as well. */
  599. cpu = cache->cpu;
  600. if (cache == CPU_INSN_CACHE (cpu))
  601. scache_flush_cpu (cpu);
  602. /* Invalidate both return buffers. */
  603. cache->pipeline[LS].status.return_buffer.valid = 0;
  604. cache->pipeline[LD].status.return_buffer.valid = 0;
  605. return 1; /* TODO - number of cycles unknown */
  606. }
  607. /* ---------------------------------------------------------------------------
  608. Functions for operating the cache in cycle accurate mode.
  609. ------------------------------------------------------------------------- */
  610. /* Convert a VLIW slot to a cache pipeline index. */
  611. static int
  612. convert_slot_to_index (int slot)
  613. {
  614. switch (slot)
  615. {
  616. case UNIT_I0:
  617. case UNIT_C:
  618. return LS;
  619. case UNIT_I1:
  620. return LD;
  621. default:
  622. abort ();
  623. }
  624. return 0;
  625. }
  626. /* Allocate free chains of cache requests. */
  627. #define FREE_CHAIN_SIZE 16
  628. static FRV_CACHE_REQUEST *frv_cache_request_free_chain = NULL;
  629. static FRV_CACHE_REQUEST *frv_store_request_free_chain = NULL;
  630. static void
  631. allocate_new_cache_requests (void)
  632. {
  633. int i;
  634. frv_cache_request_free_chain = xmalloc (FREE_CHAIN_SIZE
  635. * sizeof (FRV_CACHE_REQUEST));
  636. for (i = 0; i < FREE_CHAIN_SIZE - 1; ++i)
  637. {
  638. frv_cache_request_free_chain[i].next
  639. = & frv_cache_request_free_chain[i + 1];
  640. }
  641. frv_cache_request_free_chain[FREE_CHAIN_SIZE - 1].next = NULL;
  642. }
  643. /* Return the next free request in the queue for the given cache pipeline. */
  644. static FRV_CACHE_REQUEST *
  645. new_cache_request (void)
  646. {
  647. FRV_CACHE_REQUEST *req;
  648. /* Allocate new elements for the free chain if necessary. */
  649. if (frv_cache_request_free_chain == NULL)
  650. allocate_new_cache_requests ();
  651. req = frv_cache_request_free_chain;
  652. frv_cache_request_free_chain = req->next;
  653. return req;
  654. }
  655. /* Return the given cache request to the free chain. */
  656. static void
  657. free_cache_request (FRV_CACHE_REQUEST *req)
  658. {
  659. if (req->kind == req_store)
  660. {
  661. req->next = frv_store_request_free_chain;
  662. frv_store_request_free_chain = req;
  663. }
  664. else
  665. {
  666. req->next = frv_cache_request_free_chain;
  667. frv_cache_request_free_chain = req;
  668. }
  669. }
  670. /* Search the free chain for an existing store request with a buffer that's
  671. large enough. */
  672. static FRV_CACHE_REQUEST *
  673. new_store_request (int length)
  674. {
  675. FRV_CACHE_REQUEST *prev = NULL;
  676. FRV_CACHE_REQUEST *req;
  677. for (req = frv_store_request_free_chain; req != NULL; req = req->next)
  678. {
  679. if (req->u.store.length == length)
  680. break;
  681. prev = req;
  682. }
  683. if (req != NULL)
  684. {
  685. if (prev == NULL)
  686. frv_store_request_free_chain = req->next;
  687. else
  688. prev->next = req->next;
  689. return req;
  690. }
  691. /* No existing request buffer was found, so make a new one. */
  692. req = new_cache_request ();
  693. req->kind = req_store;
  694. req->u.store.data = xmalloc (length);
  695. req->u.store.length = length;
  696. return req;
  697. }
  698. /* Remove the given request from the given pipeline. */
  699. static void
  700. pipeline_remove_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
  701. {
  702. FRV_CACHE_REQUEST *next = request->next;
  703. FRV_CACHE_REQUEST *prev = request->prev;
  704. if (prev == NULL)
  705. p->requests = next;
  706. else
  707. prev->next = next;
  708. if (next != NULL)
  709. next->prev = prev;
  710. }
  711. /* Add the given request to the given pipeline. */
  712. static void
  713. pipeline_add_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
  714. {
  715. FRV_CACHE_REQUEST *prev = NULL;
  716. FRV_CACHE_REQUEST *item;
  717. /* Add the request in priority order. 0 is the highest priority. */
  718. for (item = p->requests; item != NULL; item = item->next)
  719. {
  720. if (item->priority > request->priority)
  721. break;
  722. prev = item;
  723. }
  724. request->next = item;
  725. request->prev = prev;
  726. if (prev == NULL)
  727. p->requests = request;
  728. else
  729. prev->next = request;
  730. if (item != NULL)
  731. item->prev = request;
  732. }
  733. /* Requeu the given request from the last of the given pipeline. */
  734. static void
  735. pipeline_requeue_request (FRV_CACHE_PIPELINE *p)
  736. {
  737. FRV_CACHE_STAGE *stage = & p->stages[LAST_STAGE];
  738. FRV_CACHE_REQUEST *req = stage->request;
  739. stage->request = NULL;
  740. pipeline_add_request (p, req);
  741. }
  742. /* Return the priority lower than the lowest one in this cache pipeline.
  743. 0 is the highest priority. */
  744. static int
  745. next_priority (FRV_CACHE *cache, FRV_CACHE_PIPELINE *pipeline)
  746. {
  747. int i, j;
  748. int pipe;
  749. int lowest = 0;
  750. FRV_CACHE_REQUEST *req;
  751. /* Check the priorities of any queued items. */
  752. for (req = pipeline->requests; req != NULL; req = req->next)
  753. if (req->priority > lowest)
  754. lowest = req->priority;
  755. /* Check the priorities of items in the pipeline stages. */
  756. for (i = FIRST_STAGE; i < FRV_CACHE_STAGES; ++i)
  757. {
  758. FRV_CACHE_STAGE *stage = & pipeline->stages[i];
  759. if (stage->request != NULL && stage->request->priority > lowest)
  760. lowest = stage->request->priority;
  761. }
  762. /* Check the priorities of load requests waiting in WAR. These are one
  763. higher than the request that spawned them. */
  764. for (i = 0; i < NUM_WARS; ++i)
  765. {
  766. FRV_CACHE_WAR *war = & pipeline->WAR[i];
  767. if (war->valid && war->priority > lowest)
  768. lowest = war->priority + 1;
  769. }
  770. /* Check the priorities of any BARS or NARS associated with this pipeline.
  771. These are one higher than the request that spawned them. */
  772. pipe = pipeline - cache->pipeline;
  773. if (cache->BARS.valid && cache->BARS.pipe == pipe
  774. && cache->BARS.priority > lowest)
  775. lowest = cache->BARS.priority + 1;
  776. if (cache->NARS.valid && cache->NARS.pipe == pipe
  777. && cache->NARS.priority > lowest)
  778. lowest = cache->NARS.priority + 1;
  779. /* Return a priority 2 lower than the lowest found. This allows a WAR
  780. request to be generated with a priority greater than this but less than
  781. the next higher priority request. */
  782. return lowest + 2;
  783. }
  784. static void
  785. add_WAR_request (FRV_CACHE_PIPELINE* pipeline, FRV_CACHE_WAR *war)
  786. {
  787. /* Add the load request to the indexed pipeline. */
  788. FRV_CACHE_REQUEST *req = new_cache_request ();
  789. req->kind = req_WAR;
  790. req->reqno = war->reqno;
  791. req->priority = war->priority;
  792. req->address = war->address;
  793. req->u.WAR.preload = war->preload;
  794. req->u.WAR.lock = war->lock;
  795. pipeline_add_request (pipeline, req);
  796. }
  797. /* Remove the next request from the given pipeline and return it. */
  798. static FRV_CACHE_REQUEST *
  799. pipeline_next_request (FRV_CACHE_PIPELINE *p)
  800. {
  801. FRV_CACHE_REQUEST *first = p->requests;
  802. if (first != NULL)
  803. pipeline_remove_request (p, first);
  804. return first;
  805. }
  806. /* Return the request which is at the given stage of the given pipeline. */
  807. static FRV_CACHE_REQUEST *
  808. pipeline_stage_request (FRV_CACHE_PIPELINE *p, int stage)
  809. {
  810. return p->stages[stage].request;
  811. }
  812. static void
  813. advance_pipelines (FRV_CACHE *cache)
  814. {
  815. int stage;
  816. int pipe;
  817. FRV_CACHE_PIPELINE *pipelines = cache->pipeline;
  818. /* Free the final stage requests. */
  819. for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
  820. {
  821. FRV_CACHE_REQUEST *req = pipelines[pipe].stages[LAST_STAGE].request;
  822. if (req != NULL)
  823. free_cache_request (req);
  824. }
  825. /* Shuffle the requests along the pipeline. */
  826. for (stage = LAST_STAGE; stage > FIRST_STAGE; --stage)
  827. {
  828. for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
  829. pipelines[pipe].stages[stage] = pipelines[pipe].stages[stage - 1];
  830. }
  831. /* Add a new request to the pipeline. */
  832. for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
  833. pipelines[pipe].stages[FIRST_STAGE].request
  834. = pipeline_next_request (& pipelines[pipe]);
  835. }
  836. /* Handle a request for a load from the given address. */
  837. void
  838. frv_cache_request_load (FRV_CACHE *cache, unsigned reqno, SI address, int slot)
  839. {
  840. FRV_CACHE_REQUEST *req;
  841. /* slot is a UNIT_*. Convert it to a cache pipeline index. */
  842. int pipe = convert_slot_to_index (slot);
  843. FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
  844. /* Add the load request to the indexed pipeline. */
  845. req = new_cache_request ();
  846. req->kind = req_load;
  847. req->reqno = reqno;
  848. req->priority = next_priority (cache, pipeline);
  849. req->address = address;
  850. pipeline_add_request (pipeline, req);
  851. }
  852. void
  853. frv_cache_request_store (FRV_CACHE *cache, SI address,
  854. int slot, char *data, unsigned length)
  855. {
  856. FRV_CACHE_REQUEST *req;
  857. /* slot is a UNIT_*. Convert it to a cache pipeline index. */
  858. int pipe = convert_slot_to_index (slot);
  859. FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
  860. /* Add the load request to the indexed pipeline. */
  861. req = new_store_request (length);
  862. req->kind = req_store;
  863. req->reqno = NO_REQNO;
  864. req->priority = next_priority (cache, pipeline);
  865. req->address = address;
  866. req->u.store.length = length;
  867. memcpy (req->u.store.data, data, length);
  868. pipeline_add_request (pipeline, req);
  869. invalidate_return_buffer (cache, address);
  870. }
  871. /* Handle a request to invalidate the cache line containing the given address.
  872. Flush the data if requested. */
  873. void
  874. frv_cache_request_invalidate (FRV_CACHE *cache, unsigned reqno, SI address,
  875. int slot, int all, int flush)
  876. {
  877. FRV_CACHE_REQUEST *req;
  878. /* slot is a UNIT_*. Convert it to a cache pipeline index. */
  879. int pipe = convert_slot_to_index (slot);
  880. FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
  881. /* Add the load request to the indexed pipeline. */
  882. req = new_cache_request ();
  883. req->kind = req_invalidate;
  884. req->reqno = reqno;
  885. req->priority = next_priority (cache, pipeline);
  886. req->address = address;
  887. req->u.invalidate.all = all;
  888. req->u.invalidate.flush = flush;
  889. pipeline_add_request (pipeline, req);
  890. }
  891. /* Handle a request to preload the cache line containing the given address. */
  892. void
  893. frv_cache_request_preload (FRV_CACHE *cache, SI address,
  894. int slot, int length, int lock)
  895. {
  896. FRV_CACHE_REQUEST *req;
  897. /* slot is a UNIT_*. Convert it to a cache pipeline index. */
  898. int pipe = convert_slot_to_index (slot);
  899. FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
  900. /* Add the load request to the indexed pipeline. */
  901. req = new_cache_request ();
  902. req->kind = req_preload;
  903. req->reqno = NO_REQNO;
  904. req->priority = next_priority (cache, pipeline);
  905. req->address = address;
  906. req->u.preload.length = length;
  907. req->u.preload.lock = lock;
  908. pipeline_add_request (pipeline, req);
  909. invalidate_return_buffer (cache, address);
  910. }
  911. /* Handle a request to unlock the cache line containing the given address. */
  912. void
  913. frv_cache_request_unlock (FRV_CACHE *cache, SI address, int slot)
  914. {
  915. FRV_CACHE_REQUEST *req;
  916. /* slot is a UNIT_*. Convert it to a cache pipeline index. */
  917. int pipe = convert_slot_to_index (slot);
  918. FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
  919. /* Add the load request to the indexed pipeline. */
  920. req = new_cache_request ();
  921. req->kind = req_unlock;
  922. req->reqno = NO_REQNO;
  923. req->priority = next_priority (cache, pipeline);
  924. req->address = address;
  925. pipeline_add_request (pipeline, req);
  926. }
  927. /* Check whether this address interferes with a pending request of
  928. higher priority. */
  929. static int
  930. address_interference (FRV_CACHE *cache, SI address, FRV_CACHE_REQUEST *req,
  931. int pipe)
  932. {
  933. int i, j;
  934. int line_mask = ~(cache->line_size - 1);
  935. int other_pipe;
  936. int priority = req->priority;
  937. FRV_CACHE_REQUEST *other_req;
  938. SI other_address;
  939. SI all_address;
  940. address &= line_mask;
  941. all_address = -1 & line_mask;
  942. /* Check for collisions in the queue for this pipeline. */
  943. for (other_req = cache->pipeline[pipe].requests;
  944. other_req != NULL;
  945. other_req = other_req->next)
  946. {
  947. other_address = other_req->address & line_mask;
  948. if ((address == other_address || address == all_address)
  949. && priority > other_req->priority)
  950. return 1;
  951. }
  952. /* Check for a collision in the the other pipeline. */
  953. other_pipe = pipe ^ 1;
  954. other_req = cache->pipeline[other_pipe].stages[LAST_STAGE].request;
  955. if (other_req != NULL)
  956. {
  957. other_address = other_req->address & line_mask;
  958. if (address == other_address || address == all_address)
  959. return 1;
  960. }
  961. /* Check for a collision with load requests waiting in WAR. */
  962. for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
  963. {
  964. for (j = 0; j < NUM_WARS; ++j)
  965. {
  966. FRV_CACHE_WAR *war = & cache->pipeline[i].WAR[j];
  967. if (war->valid
  968. && (address == (war->address & line_mask)
  969. || address == all_address)
  970. && priority > war->priority)
  971. return 1;
  972. }
  973. /* If this is not a WAR request, then yield to any WAR requests in
  974. either pipeline or to a higher priority request in the same pipeline.
  975. */
  976. if (req->kind != req_WAR)
  977. {
  978. for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
  979. {
  980. other_req = cache->pipeline[i].stages[j].request;
  981. if (other_req != NULL)
  982. {
  983. if (other_req->kind == req_WAR)
  984. return 1;
  985. if (i == pipe
  986. && (address == (other_req->address & line_mask)
  987. || address == all_address)
  988. && priority > other_req->priority)
  989. return 1;
  990. }
  991. }
  992. }
  993. }
  994. /* Check for a collision with load requests waiting in ARS. */
  995. if (cache->BARS.valid
  996. && (address == (cache->BARS.address & line_mask)
  997. || address == all_address)
  998. && priority > cache->BARS.priority)
  999. return 1;
  1000. if (cache->NARS.valid
  1001. && (address == (cache->NARS.address & line_mask)
  1002. || address == all_address)
  1003. && priority > cache->NARS.priority)
  1004. return 1;
  1005. return 0;
  1006. }
  1007. /* Wait for a free WAR register in BARS or NARS. */
  1008. static void
  1009. wait_for_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
  1010. {
  1011. FRV_CACHE_WAR war;
  1012. FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
  1013. if (! cache->BARS.valid)
  1014. {
  1015. cache->BARS.pipe = pipe;
  1016. cache->BARS.reqno = req->reqno;
  1017. cache->BARS.address = req->address;
  1018. cache->BARS.priority = req->priority - 1;
  1019. switch (req->kind)
  1020. {
  1021. case req_load:
  1022. cache->BARS.preload = 0;
  1023. cache->BARS.lock = 0;
  1024. break;
  1025. case req_store:
  1026. cache->BARS.preload = 1;
  1027. cache->BARS.lock = 0;
  1028. break;
  1029. case req_preload:
  1030. cache->BARS.preload = 1;
  1031. cache->BARS.lock = req->u.preload.lock;
  1032. break;
  1033. }
  1034. cache->BARS.valid = 1;
  1035. return;
  1036. }
  1037. if (! cache->NARS.valid)
  1038. {
  1039. cache->NARS.pipe = pipe;
  1040. cache->NARS.reqno = req->reqno;
  1041. cache->NARS.address = req->address;
  1042. cache->NARS.priority = req->priority - 1;
  1043. switch (req->kind)
  1044. {
  1045. case req_load:
  1046. cache->NARS.preload = 0;
  1047. cache->NARS.lock = 0;
  1048. break;
  1049. case req_store:
  1050. cache->NARS.preload = 1;
  1051. cache->NARS.lock = 0;
  1052. break;
  1053. case req_preload:
  1054. cache->NARS.preload = 1;
  1055. cache->NARS.lock = req->u.preload.lock;
  1056. break;
  1057. }
  1058. cache->NARS.valid = 1;
  1059. return;
  1060. }
  1061. /* All wait registers are busy, so resubmit this request. */
  1062. pipeline_requeue_request (pipeline);
  1063. }
  1064. /* Find a free WAR register and wait for memory to fetch the data. */
  1065. static void
  1066. wait_in_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
  1067. {
  1068. int war;
  1069. FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
  1070. /* Find a valid WAR to hold this request. */
  1071. for (war = 0; war < NUM_WARS; ++war)
  1072. if (! pipeline->WAR[war].valid)
  1073. break;
  1074. if (war >= NUM_WARS)
  1075. {
  1076. wait_for_WAR (cache, pipe, req);
  1077. return;
  1078. }
  1079. pipeline->WAR[war].address = req->address;
  1080. pipeline->WAR[war].reqno = req->reqno;
  1081. pipeline->WAR[war].priority = req->priority - 1;
  1082. pipeline->WAR[war].latency = cache->memory_latency + 1;
  1083. switch (req->kind)
  1084. {
  1085. case req_load:
  1086. pipeline->WAR[war].preload = 0;
  1087. pipeline->WAR[war].lock = 0;
  1088. break;
  1089. case req_store:
  1090. pipeline->WAR[war].preload = 1;
  1091. pipeline->WAR[war].lock = 0;
  1092. break;
  1093. case req_preload:
  1094. pipeline->WAR[war].preload = 1;
  1095. pipeline->WAR[war].lock = req->u.preload.lock;
  1096. break;
  1097. }
  1098. pipeline->WAR[war].valid = 1;
  1099. }
  1100. static void
  1101. handle_req_load (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
  1102. {
  1103. FRV_CACHE_TAG *tag;
  1104. SI address = req->address;
  1105. /* If this address interferes with an existing request, then requeue it. */
  1106. if (address_interference (cache, address, req, pipe))
  1107. {
  1108. pipeline_requeue_request (& cache->pipeline[pipe]);
  1109. return;
  1110. }
  1111. if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
  1112. {
  1113. int found = get_tag (cache, address, &tag);
  1114. /* If the data was found, return it to the caller. */
  1115. if (found)
  1116. {
  1117. set_most_recently_used (cache, tag);
  1118. copy_line_to_return_buffer (cache, pipe, tag, address);
  1119. set_return_buffer_reqno (cache, pipe, req->reqno);
  1120. return;
  1121. }
  1122. }
  1123. /* The data is not in the cache or this is a non-cache access. We need to
  1124. wait for the memory unit to fetch it. Store this request in the WAR in
  1125. the meantime. */
  1126. wait_in_WAR (cache, pipe, req);
  1127. }
  1128. static void
  1129. handle_req_preload (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
  1130. {
  1131. int found;
  1132. FRV_CACHE_WAR war;
  1133. FRV_CACHE_TAG *tag;
  1134. int length;
  1135. int lock;
  1136. int offset;
  1137. int lines;
  1138. int line;
  1139. SI address = req->address;
  1140. SI cur_address;
  1141. if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
  1142. return;
  1143. /* preload at least 1 line. */
  1144. length = req->u.preload.length;
  1145. if (length == 0)
  1146. length = 1;
  1147. /* Make sure that this request does not interfere with a pending request. */
  1148. offset = address & (cache->line_size - 1);
  1149. lines = 1 + (offset + length - 1) / cache->line_size;
  1150. cur_address = address & ~(cache->line_size - 1);
  1151. for (line = 0; line < lines; ++line)
  1152. {
  1153. /* If this address interferes with an existing request,
  1154. then requeue it. */
  1155. if (address_interference (cache, cur_address, req, pipe))
  1156. {
  1157. pipeline_requeue_request (& cache->pipeline[pipe]);
  1158. return;
  1159. }
  1160. cur_address += cache->line_size;
  1161. }
  1162. /* Now process each cache line. */
  1163. /* Careful with this loop -- length is unsigned. */
  1164. lock = req->u.preload.lock;
  1165. cur_address = address & ~(cache->line_size - 1);
  1166. for (line = 0; line < lines; ++line)
  1167. {
  1168. /* If the data was found, then lock it if requested. */
  1169. found = get_tag (cache, cur_address, &tag);
  1170. if (found)
  1171. {
  1172. if (lock)
  1173. tag->locked = 1;
  1174. }
  1175. else
  1176. {
  1177. /* The data is not in the cache. We need to wait for the memory
  1178. unit to fetch it. Store this request in the WAR in the meantime.
  1179. */
  1180. wait_in_WAR (cache, pipe, req);
  1181. }
  1182. cur_address += cache->line_size;
  1183. }
  1184. }
  1185. static void
  1186. handle_req_store (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
  1187. {
  1188. SIM_CPU *current_cpu;
  1189. FRV_CACHE_TAG *tag;
  1190. int found;
  1191. int copy_back;
  1192. SI address = req->address;
  1193. char *data = req->u.store.data;
  1194. int length = req->u.store.length;
  1195. /* If this address interferes with an existing request, then requeue it. */
  1196. if (address_interference (cache, address, req, pipe))
  1197. {
  1198. pipeline_requeue_request (& cache->pipeline[pipe]);
  1199. return;
  1200. }
  1201. /* Non-cache access. Write the data directly to memory. */
  1202. if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
  1203. {
  1204. write_data_to_memory (cache, address, data, length);
  1205. return;
  1206. }
  1207. /* See if the data is in the cache. */
  1208. found = get_tag (cache, address, &tag);
  1209. /* Write the data to the cache line if one was available and if it is
  1210. either a hit or a miss in copy-back mode.
  1211. The tag may be NULL if all ways were in use and locked on a miss.
  1212. */
  1213. current_cpu = cache->cpu;
  1214. copy_back = GET_HSR0_CBM (GET_HSR0 ());
  1215. if (tag != NULL && (found || copy_back))
  1216. {
  1217. int line_offset;
  1218. /* Load the line from memory first, if it was a miss. */
  1219. if (! found)
  1220. {
  1221. /* We need to wait for the memory unit to fetch the data.
  1222. Store this request in the WAR and requeue the store request. */
  1223. wait_in_WAR (cache, pipe, req);
  1224. pipeline_requeue_request (& cache->pipeline[pipe]);
  1225. /* Decrement the counts of accesses and hits because when the requeued
  1226. request is processed again, it will appear to be a new access and
  1227. a hit. */
  1228. --cache->statistics.accesses;
  1229. --cache->statistics.hits;
  1230. return;
  1231. }
  1232. line_offset = address & (cache->line_size - 1);
  1233. memcpy (tag->line + line_offset, data, length);
  1234. invalidate_return_buffer (cache, address);
  1235. tag->dirty = 1;
  1236. /* Update the LRU information for the tags in this set. */
  1237. set_most_recently_used (cache, tag);
  1238. }
  1239. /* Write the data to memory if there was no line available or we are in
  1240. write-through (not copy-back mode). */
  1241. if (tag == NULL || ! copy_back)
  1242. {
  1243. write_data_to_memory (cache, address, data, length);
  1244. if (tag != NULL)
  1245. tag->dirty = 0;
  1246. }
  1247. }
  1248. static void
  1249. handle_req_invalidate (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
  1250. {
  1251. FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
  1252. SI address = req->address;
  1253. SI interfere_address = req->u.invalidate.all ? -1 : address;
  1254. /* If this address interferes with an existing request, then requeue it. */
  1255. if (address_interference (cache, interfere_address, req, pipe))
  1256. {
  1257. pipeline_requeue_request (pipeline);
  1258. return;
  1259. }
  1260. /* Invalidate the cache line now. This function already checks for
  1261. non-cache access. */
  1262. if (req->u.invalidate.all)
  1263. frv_cache_invalidate_all (cache, req->u.invalidate.flush);
  1264. else
  1265. frv_cache_invalidate (cache, address, req->u.invalidate.flush);
  1266. if (req->u.invalidate.flush)
  1267. {
  1268. pipeline->status.flush.reqno = req->reqno;
  1269. pipeline->status.flush.address = address;
  1270. pipeline->status.flush.valid = 1;
  1271. }
  1272. }
  1273. static void
  1274. handle_req_unlock (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
  1275. {
  1276. FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
  1277. SI address = req->address;
  1278. /* If this address interferes with an existing request, then requeue it. */
  1279. if (address_interference (cache, address, req, pipe))
  1280. {
  1281. pipeline_requeue_request (pipeline);
  1282. return;
  1283. }
  1284. /* Unlock the cache line. This function checks for non-cache access. */
  1285. frv_cache_unlock (cache, address);
  1286. }
  1287. static void
  1288. handle_req_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
  1289. {
  1290. char *buffer;
  1291. FRV_CACHE_TAG *tag;
  1292. SI address = req->address;
  1293. if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
  1294. {
  1295. /* Look for the data in the cache. The statistics of cache hit or
  1296. miss have already been recorded, so save and restore the stats before
  1297. and after obtaining the cache line. */
  1298. FRV_CACHE_STATISTICS save_stats = cache->statistics;
  1299. tag = find_or_retrieve_cache_line (cache, address);
  1300. cache->statistics = save_stats;
  1301. if (tag != NULL)
  1302. {
  1303. if (! req->u.WAR.preload)
  1304. {
  1305. copy_line_to_return_buffer (cache, pipe, tag, address);
  1306. set_return_buffer_reqno (cache, pipe, req->reqno);
  1307. }
  1308. else
  1309. {
  1310. invalidate_return_buffer (cache, address);
  1311. if (req->u.WAR.lock)
  1312. tag->locked = 1;
  1313. }
  1314. return;
  1315. }
  1316. }
  1317. /* All cache lines in the set were locked, so just copy the data to the
  1318. return buffer directly. */
  1319. if (! req->u.WAR.preload)
  1320. {
  1321. copy_memory_to_return_buffer (cache, pipe, address);
  1322. set_return_buffer_reqno (cache, pipe, req->reqno);
  1323. }
  1324. }
  1325. /* Resolve any conflicts and/or execute the given requests. */
  1326. static void
  1327. arbitrate_requests (FRV_CACHE *cache)
  1328. {
  1329. int pipe;
  1330. /* Simply execute the requests in the final pipeline stages. */
  1331. for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
  1332. {
  1333. FRV_CACHE_REQUEST *req
  1334. = pipeline_stage_request (& cache->pipeline[pipe], LAST_STAGE);
  1335. /* Make sure that there is a request to handle. */
  1336. if (req == NULL)
  1337. continue;
  1338. /* Handle the request. */
  1339. switch (req->kind)
  1340. {
  1341. case req_load:
  1342. handle_req_load (cache, pipe, req);
  1343. break;
  1344. case req_store:
  1345. handle_req_store (cache, pipe, req);
  1346. break;
  1347. case req_invalidate:
  1348. handle_req_invalidate (cache, pipe, req);
  1349. break;
  1350. case req_preload:
  1351. handle_req_preload (cache, pipe, req);
  1352. break;
  1353. case req_unlock:
  1354. handle_req_unlock (cache, pipe, req);
  1355. break;
  1356. case req_WAR:
  1357. handle_req_WAR (cache, pipe, req);
  1358. break;
  1359. default:
  1360. abort ();
  1361. }
  1362. }
  1363. }
  1364. /* Move a waiting ARS register to a free WAR register. */
  1365. static void
  1366. move_ARS_to_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_WAR *war)
  1367. {
  1368. /* If BARS is valid for this pipe, then move it to the given WAR. Move
  1369. NARS to BARS if it is valid. */
  1370. if (cache->BARS.valid && cache->BARS.pipe == pipe)
  1371. {
  1372. war->address = cache->BARS.address;
  1373. war->reqno = cache->BARS.reqno;
  1374. war->priority = cache->BARS.priority;
  1375. war->preload = cache->BARS.preload;
  1376. war->lock = cache->BARS.lock;
  1377. war->latency = cache->memory_latency + 1;
  1378. war->valid = 1;
  1379. if (cache->NARS.valid)
  1380. {
  1381. cache->BARS = cache->NARS;
  1382. cache->NARS.valid = 0;
  1383. }
  1384. else
  1385. cache->BARS.valid = 0;
  1386. return;
  1387. }
  1388. /* If NARS is valid for this pipe, then move it to the given WAR. */
  1389. if (cache->NARS.valid && cache->NARS.pipe == pipe)
  1390. {
  1391. war->address = cache->NARS.address;
  1392. war->reqno = cache->NARS.reqno;
  1393. war->priority = cache->NARS.priority;
  1394. war->preload = cache->NARS.preload;
  1395. war->lock = cache->NARS.lock;
  1396. war->latency = cache->memory_latency + 1;
  1397. war->valid = 1;
  1398. cache->NARS.valid = 0;
  1399. }
  1400. }
  1401. /* Decrease the latencies of the various states in the cache. */
  1402. static void
  1403. decrease_latencies (FRV_CACHE *cache)
  1404. {
  1405. int pipe, j;
  1406. /* Check the WAR registers. */
  1407. for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
  1408. {
  1409. FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
  1410. for (j = 0; j < NUM_WARS; ++j)
  1411. {
  1412. FRV_CACHE_WAR *war = & pipeline->WAR[j];
  1413. if (war->valid)
  1414. {
  1415. --war->latency;
  1416. /* If the latency has expired, then submit a WAR request to the
  1417. pipeline. */
  1418. if (war->latency <= 0)
  1419. {
  1420. add_WAR_request (pipeline, war);
  1421. war->valid = 0;
  1422. move_ARS_to_WAR (cache, pipe, war);
  1423. }
  1424. }
  1425. }
  1426. }
  1427. }
  1428. /* Run the cache for the given number of cycles. */
  1429. void
  1430. frv_cache_run (FRV_CACHE *cache, int cycles)
  1431. {
  1432. int i;
  1433. for (i = 0; i < cycles; ++i)
  1434. {
  1435. advance_pipelines (cache);
  1436. arbitrate_requests (cache);
  1437. decrease_latencies (cache);
  1438. }
  1439. }
  1440. int
  1441. frv_cache_read_passive_SI (FRV_CACHE *cache, SI address, SI *value)
  1442. {
  1443. SI offset;
  1444. FRV_CACHE_TAG *tag;
  1445. if (non_cache_access (cache, address))
  1446. return 0;
  1447. {
  1448. FRV_CACHE_STATISTICS save_stats = cache->statistics;
  1449. int found = get_tag (cache, address, &tag);
  1450. cache->statistics = save_stats;
  1451. if (! found)
  1452. return 0; /* Indicate non-cache-access. */
  1453. }
  1454. /* A cache line was available for the data.
  1455. Extract the target data from the line. */
  1456. offset = address & (cache->line_size - 1);
  1457. *value = T2H_4 (*(SI *)(tag->line + offset));
  1458. return 1;
  1459. }
  1460. /* Check the return buffers of the data cache to see if the requested data is
  1461. available. */
  1462. int
  1463. frv_cache_data_in_buffer (FRV_CACHE* cache, int pipe, SI address,
  1464. unsigned reqno)
  1465. {
  1466. return cache->pipeline[pipe].status.return_buffer.valid
  1467. && cache->pipeline[pipe].status.return_buffer.reqno == reqno
  1468. && cache->pipeline[pipe].status.return_buffer.address <= address
  1469. && cache->pipeline[pipe].status.return_buffer.address + cache->line_size
  1470. > address;
  1471. }
  1472. /* Check to see if the requested data has been flushed. */
  1473. int
  1474. frv_cache_data_flushed (FRV_CACHE* cache, int pipe, SI address, unsigned reqno)
  1475. {
  1476. return cache->pipeline[pipe].status.flush.valid
  1477. && cache->pipeline[pipe].status.flush.reqno == reqno
  1478. && cache->pipeline[pipe].status.flush.address <= address
  1479. && cache->pipeline[pipe].status.flush.address + cache->line_size
  1480. > address;
  1481. }