profile.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082
  1. /* frv simulator machine independent profiling code.
  2. Copyright (C) 1998-2015 Free Software Foundation, Inc.
  3. Contributed by Red Hat
  4. This file is part of the GNU simulators.
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #define WANT_CPU
  17. #define WANT_CPU_FRVBF
  18. #include "sim-main.h"
  19. #include "bfd.h"
  20. #if WITH_PROFILE_MODEL_P
  21. #include "profile.h"
  22. #include "profile-fr400.h"
  23. #include "profile-fr500.h"
  24. #include "profile-fr550.h"
  25. static void
  26. reset_gr_flags (SIM_CPU *cpu, INT gr)
  27. {
  28. SIM_DESC sd = CPU_STATE (cpu);
  29. if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400
  30. || STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450)
  31. fr400_reset_gr_flags (cpu, gr);
  32. /* Other machines have no gr flags right now. */
  33. }
  34. static void
  35. reset_fr_flags (SIM_CPU *cpu, INT fr)
  36. {
  37. SIM_DESC sd = CPU_STATE (cpu);
  38. if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400
  39. || STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450)
  40. fr400_reset_fr_flags (cpu, fr);
  41. else if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
  42. fr500_reset_fr_flags (cpu, fr);
  43. }
  44. static void
  45. reset_acc_flags (SIM_CPU *cpu, INT acc)
  46. {
  47. SIM_DESC sd = CPU_STATE (cpu);
  48. if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400
  49. || STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450)
  50. fr400_reset_acc_flags (cpu, acc);
  51. /* Other machines have no acc flags right now. */
  52. }
  53. static void
  54. reset_cc_flags (SIM_CPU *cpu, INT cc)
  55. {
  56. SIM_DESC sd = CPU_STATE (cpu);
  57. if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
  58. fr500_reset_cc_flags (cpu, cc);
  59. /* Other machines have no cc flags. */
  60. }
  61. void
  62. set_use_is_gr_complex (SIM_CPU *cpu, INT gr)
  63. {
  64. if (gr != -1)
  65. {
  66. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  67. reset_gr_flags (cpu, gr);
  68. ps->cur_gr_complex |= (((DI)1) << gr);
  69. }
  70. }
  71. void
  72. set_use_not_gr_complex (SIM_CPU *cpu, INT gr)
  73. {
  74. if (gr != -1)
  75. {
  76. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  77. ps->cur_gr_complex &= ~(((DI)1) << gr);
  78. }
  79. }
  80. int
  81. use_is_gr_complex (SIM_CPU *cpu, INT gr)
  82. {
  83. if (gr != -1)
  84. {
  85. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  86. return ps->cur_gr_complex & (((DI)1) << gr);
  87. }
  88. return 0;
  89. }
  90. /* Globals flag indicates whether this insn is being modeled. */
  91. enum FRV_INSN_MODELING model_insn = FRV_INSN_NO_MODELING;
  92. /* static buffer for the name of the currently most restrictive hazard. */
  93. static char hazard_name[100] = "";
  94. /* Print information about the wait applied to an entire VLIW insn. */
  95. FRV_INSN_FETCH_BUFFER frv_insn_fetch_buffer[]
  96. = {
  97. {1, NO_REQNO}, {1, NO_REQNO} /* init with impossible address. */
  98. };
  99. enum cache_request
  100. {
  101. cache_load,
  102. cache_invalidate,
  103. cache_flush,
  104. cache_preload,
  105. cache_unlock
  106. };
  107. /* A queue of load requests from the data cache. Use to keep track of loads
  108. which are still pending. */
  109. /* TODO -- some of these are mutually exclusive and can use a union. */
  110. typedef struct
  111. {
  112. FRV_CACHE *cache;
  113. unsigned reqno;
  114. SI address;
  115. int length;
  116. int is_signed;
  117. int regnum;
  118. int cycles;
  119. int regtype;
  120. int lock;
  121. int all;
  122. int slot;
  123. int active;
  124. enum cache_request request;
  125. } CACHE_QUEUE_ELEMENT;
  126. #define CACHE_QUEUE_SIZE 64 /* TODO -- make queue dynamic */
  127. struct
  128. {
  129. unsigned reqno;
  130. int ix;
  131. CACHE_QUEUE_ELEMENT q[CACHE_QUEUE_SIZE];
  132. } cache_queue = {0, 0};
  133. /* Queue a request for a load from the cache. The load will be queued as
  134. 'inactive' and will be requested after the given number
  135. of cycles have passed from the point the load is activated. */
  136. void
  137. request_cache_load (SIM_CPU *cpu, INT regnum, int regtype, int cycles)
  138. {
  139. CACHE_QUEUE_ELEMENT *q;
  140. FRV_VLIW *vliw;
  141. int slot;
  142. /* For a conditional load which was not executed, CPU_LOAD_LENGTH will be
  143. zero. */
  144. if (CPU_LOAD_LENGTH (cpu) == 0)
  145. return;
  146. if (cache_queue.ix >= CACHE_QUEUE_SIZE)
  147. abort (); /* TODO: Make the queue dynamic */
  148. q = & cache_queue.q[cache_queue.ix];
  149. ++cache_queue.ix;
  150. q->reqno = cache_queue.reqno++;
  151. q->request = cache_load;
  152. q->cache = CPU_DATA_CACHE (cpu);
  153. q->address = CPU_LOAD_ADDRESS (cpu);
  154. q->length = CPU_LOAD_LENGTH (cpu);
  155. q->is_signed = CPU_LOAD_SIGNED (cpu);
  156. q->regnum = regnum;
  157. q->regtype = regtype;
  158. q->cycles = cycles;
  159. q->active = 0;
  160. vliw = CPU_VLIW (cpu);
  161. slot = vliw->next_slot - 1;
  162. q->slot = (*vliw->current_vliw)[slot];
  163. CPU_LOAD_LENGTH (cpu) = 0;
  164. }
  165. /* Queue a request to flush the cache. The request will be queued as
  166. 'inactive' and will be requested after the given number
  167. of cycles have passed from the point the request is activated. */
  168. void
  169. request_cache_flush (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
  170. {
  171. CACHE_QUEUE_ELEMENT *q;
  172. FRV_VLIW *vliw;
  173. int slot;
  174. if (cache_queue.ix >= CACHE_QUEUE_SIZE)
  175. abort (); /* TODO: Make the queue dynamic */
  176. q = & cache_queue.q[cache_queue.ix];
  177. ++cache_queue.ix;
  178. q->reqno = cache_queue.reqno++;
  179. q->request = cache_flush;
  180. q->cache = cache;
  181. q->address = CPU_LOAD_ADDRESS (cpu);
  182. q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
  183. q->cycles = cycles;
  184. q->active = 0;
  185. vliw = CPU_VLIW (cpu);
  186. slot = vliw->next_slot - 1;
  187. q->slot = (*vliw->current_vliw)[slot];
  188. }
  189. /* Queue a request to invalidate the cache. The request will be queued as
  190. 'inactive' and will be requested after the given number
  191. of cycles have passed from the point the request is activated. */
  192. void
  193. request_cache_invalidate (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
  194. {
  195. CACHE_QUEUE_ELEMENT *q;
  196. FRV_VLIW *vliw;
  197. int slot;
  198. if (cache_queue.ix >= CACHE_QUEUE_SIZE)
  199. abort (); /* TODO: Make the queue dynamic */
  200. q = & cache_queue.q[cache_queue.ix];
  201. ++cache_queue.ix;
  202. q->reqno = cache_queue.reqno++;
  203. q->request = cache_invalidate;
  204. q->cache = cache;
  205. q->address = CPU_LOAD_ADDRESS (cpu);
  206. q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
  207. q->cycles = cycles;
  208. q->active = 0;
  209. vliw = CPU_VLIW (cpu);
  210. slot = vliw->next_slot - 1;
  211. q->slot = (*vliw->current_vliw)[slot];
  212. }
  213. /* Queue a request to preload the cache. The request will be queued as
  214. 'inactive' and will be requested after the given number
  215. of cycles have passed from the point the request is activated. */
  216. void
  217. request_cache_preload (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
  218. {
  219. CACHE_QUEUE_ELEMENT *q;
  220. FRV_VLIW *vliw;
  221. int slot;
  222. if (cache_queue.ix >= CACHE_QUEUE_SIZE)
  223. abort (); /* TODO: Make the queue dynamic */
  224. q = & cache_queue.q[cache_queue.ix];
  225. ++cache_queue.ix;
  226. q->reqno = cache_queue.reqno++;
  227. q->request = cache_preload;
  228. q->cache = cache;
  229. q->address = CPU_LOAD_ADDRESS (cpu);
  230. q->length = CPU_LOAD_LENGTH (cpu);
  231. q->lock = CPU_LOAD_LOCK (cpu);
  232. q->cycles = cycles;
  233. q->active = 0;
  234. vliw = CPU_VLIW (cpu);
  235. slot = vliw->next_slot - 1;
  236. q->slot = (*vliw->current_vliw)[slot];
  237. CPU_LOAD_LENGTH (cpu) = 0;
  238. }
  239. /* Queue a request to unlock the cache. The request will be queued as
  240. 'inactive' and will be requested after the given number
  241. of cycles have passed from the point the request is activated. */
  242. void
  243. request_cache_unlock (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
  244. {
  245. CACHE_QUEUE_ELEMENT *q;
  246. FRV_VLIW *vliw;
  247. int slot;
  248. if (cache_queue.ix >= CACHE_QUEUE_SIZE)
  249. abort (); /* TODO: Make the queue dynamic */
  250. q = & cache_queue.q[cache_queue.ix];
  251. ++cache_queue.ix;
  252. q->reqno = cache_queue.reqno++;
  253. q->request = cache_unlock;
  254. q->cache = cache;
  255. q->address = CPU_LOAD_ADDRESS (cpu);
  256. q->cycles = cycles;
  257. q->active = 0;
  258. vliw = CPU_VLIW (cpu);
  259. slot = vliw->next_slot - 1;
  260. q->slot = (*vliw->current_vliw)[slot];
  261. }
  262. static void
  263. submit_cache_request (CACHE_QUEUE_ELEMENT *q)
  264. {
  265. switch (q->request)
  266. {
  267. case cache_load:
  268. frv_cache_request_load (q->cache, q->reqno, q->address, q->slot);
  269. break;
  270. case cache_flush:
  271. frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
  272. q->all, 1/*flush*/);
  273. break;
  274. case cache_invalidate:
  275. frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
  276. q->all, 0/*flush*/);
  277. break;
  278. case cache_preload:
  279. frv_cache_request_preload (q->cache, q->address, q->slot,
  280. q->length, q->lock);
  281. break;
  282. case cache_unlock:
  283. frv_cache_request_unlock (q->cache, q->address, q->slot);
  284. break;
  285. default:
  286. abort ();
  287. }
  288. }
  289. /* Activate all inactive load requests. */
  290. static void
  291. activate_cache_requests (SIM_CPU *cpu)
  292. {
  293. int i;
  294. for (i = 0; i < cache_queue.ix; ++i)
  295. {
  296. CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
  297. if (! q->active)
  298. {
  299. q->active = 1;
  300. /* Submit the request now if the cycle count is zero. */
  301. if (q->cycles == 0)
  302. submit_cache_request (q);
  303. }
  304. }
  305. }
  306. /* Check to see if a load is pending which affects the given register(s).
  307. */
  308. int
  309. load_pending_for_register (SIM_CPU *cpu, int regnum, int words, int regtype)
  310. {
  311. int i;
  312. for (i = 0; i < cache_queue.ix; ++i)
  313. {
  314. CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
  315. /* Must be the same kind of register. */
  316. if (! q->active || q->request != cache_load || q->regtype != regtype)
  317. continue;
  318. /* If the registers numbers are equal, then we have a match. */
  319. if (q->regnum == regnum)
  320. return 1; /* load pending */
  321. /* Check for overlap of a load with a multi-word register. */
  322. if (regnum < q->regnum)
  323. {
  324. if (regnum + words > q->regnum)
  325. return 1;
  326. }
  327. /* Check for overlap of a multi-word load with the register. */
  328. else
  329. {
  330. int data_words = (q->length + sizeof (SI) - 1) / sizeof (SI);
  331. if (q->regnum + data_words > regnum)
  332. return 1;
  333. }
  334. }
  335. return 0; /* no load pending */
  336. }
  337. /* Check to see if a cache flush pending which affects the given address. */
  338. static int
  339. flush_pending_for_address (SIM_CPU *cpu, SI address)
  340. {
  341. int line_mask = ~(CPU_DATA_CACHE (cpu)->line_size - 1);
  342. int i;
  343. for (i = 0; i < cache_queue.ix; ++i)
  344. {
  345. CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
  346. /* Must be the same kind of request and active. */
  347. if (! q->active || q->request != cache_flush)
  348. continue;
  349. /* If the addresses are equal, then we have a match. */
  350. if ((q->address & line_mask) == (address & line_mask))
  351. return 1; /* flush pending */
  352. }
  353. return 0; /* no flush pending */
  354. }
  355. static void
  356. remove_cache_queue_element (SIM_CPU *cpu, int i)
  357. {
  358. /* If we are removing the load of a FR register, then remember which one(s).
  359. */
  360. CACHE_QUEUE_ELEMENT q = cache_queue.q[i];
  361. for (--cache_queue.ix; i < cache_queue.ix; ++i)
  362. cache_queue.q[i] = cache_queue.q[i + 1];
  363. /* If we removed a load of a FR register, check to see if any other loads
  364. of that register is still queued. If not, then apply the queued post
  365. processing time of that register to its latency. Also apply
  366. 1 extra cycle of latency to the register since it was a floating point
  367. load. */
  368. if (q.request == cache_load && q.regtype != REGTYPE_NONE)
  369. {
  370. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  371. int data_words = (q.length + sizeof (SI) - 1) / sizeof (SI);
  372. int j;
  373. for (j = 0; j < data_words; ++j)
  374. {
  375. int regnum = q.regnum + j;
  376. if (! load_pending_for_register (cpu, regnum, 1, q.regtype))
  377. {
  378. if (q.regtype == REGTYPE_FR)
  379. {
  380. int *fr = ps->fr_busy;
  381. fr[regnum] += 1 + ps->fr_ptime[regnum];
  382. ps->fr_ptime[regnum] = 0;
  383. }
  384. }
  385. }
  386. }
  387. }
  388. /* Copy data from the cache buffer to the target register(s). */
  389. static void
  390. copy_load_data (SIM_CPU *current_cpu, FRV_CACHE *cache, int slot,
  391. CACHE_QUEUE_ELEMENT *q)
  392. {
  393. switch (q->length)
  394. {
  395. case 1:
  396. if (q->regtype == REGTYPE_FR)
  397. {
  398. if (q->is_signed)
  399. {
  400. QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
  401. SET_H_FR (q->regnum, value);
  402. }
  403. else
  404. {
  405. UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
  406. SET_H_FR (q->regnum, value);
  407. }
  408. }
  409. else
  410. {
  411. if (q->is_signed)
  412. {
  413. QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
  414. SET_H_GR (q->regnum, value);
  415. }
  416. else
  417. {
  418. UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
  419. SET_H_GR (q->regnum, value);
  420. }
  421. }
  422. break;
  423. case 2:
  424. if (q->regtype == REGTYPE_FR)
  425. {
  426. if (q->is_signed)
  427. {
  428. HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
  429. SET_H_FR (q->regnum, value);
  430. }
  431. else
  432. {
  433. UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
  434. SET_H_FR (q->regnum, value);
  435. }
  436. }
  437. else
  438. {
  439. if (q->is_signed)
  440. {
  441. HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
  442. SET_H_GR (q->regnum, value);
  443. }
  444. else
  445. {
  446. UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
  447. SET_H_GR (q->regnum, value);
  448. }
  449. }
  450. break;
  451. case 4:
  452. if (q->regtype == REGTYPE_FR)
  453. {
  454. SET_H_FR (q->regnum,
  455. CACHE_RETURN_DATA (cache, slot, q->address, SF, 4));
  456. }
  457. else
  458. {
  459. SET_H_GR (q->regnum,
  460. CACHE_RETURN_DATA (cache, slot, q->address, SI, 4));
  461. }
  462. break;
  463. case 8:
  464. if (q->regtype == REGTYPE_FR)
  465. {
  466. SET_H_FR_DOUBLE (q->regnum,
  467. CACHE_RETURN_DATA (cache, slot, q->address, DF, 8));
  468. }
  469. else
  470. {
  471. SET_H_GR_DOUBLE (q->regnum,
  472. CACHE_RETURN_DATA (cache, slot, q->address, DI, 8));
  473. }
  474. break;
  475. case 16:
  476. if (q->regtype == REGTYPE_FR)
  477. frvbf_h_fr_quad_set_handler (current_cpu, q->regnum,
  478. CACHE_RETURN_DATA_ADDRESS (cache, slot,
  479. q->address,
  480. 16));
  481. else
  482. frvbf_h_gr_quad_set_handler (current_cpu, q->regnum,
  483. CACHE_RETURN_DATA_ADDRESS (cache, slot,
  484. q->address,
  485. 16));
  486. break;
  487. default:
  488. abort ();
  489. }
  490. }
  491. static int
  492. request_complete (SIM_CPU *cpu, CACHE_QUEUE_ELEMENT *q)
  493. {
  494. FRV_CACHE* cache;
  495. if (! q->active || q->cycles > 0)
  496. return 0;
  497. cache = CPU_DATA_CACHE (cpu);
  498. switch (q->request)
  499. {
  500. case cache_load:
  501. /* For loads, we must wait until the data is returned from the cache. */
  502. if (frv_cache_data_in_buffer (cache, 0, q->address, q->reqno))
  503. {
  504. copy_load_data (cpu, cache, 0, q);
  505. return 1;
  506. }
  507. if (frv_cache_data_in_buffer (cache, 1, q->address, q->reqno))
  508. {
  509. copy_load_data (cpu, cache, 1, q);
  510. return 1;
  511. }
  512. break;
  513. case cache_flush:
  514. /* We must wait until the data is flushed. */
  515. if (frv_cache_data_flushed (cache, 0, q->address, q->reqno))
  516. return 1;
  517. if (frv_cache_data_flushed (cache, 1, q->address, q->reqno))
  518. return 1;
  519. break;
  520. default:
  521. /* All other requests are complete once they've been made. */
  522. return 1;
  523. }
  524. return 0;
  525. }
  526. /* Run the insn and data caches through the given number of cycles, taking
  527. note of load requests which are fullfilled as a result. */
  528. static void
  529. run_caches (SIM_CPU *cpu, int cycles)
  530. {
  531. FRV_CACHE* data_cache = CPU_DATA_CACHE (cpu);
  532. FRV_CACHE* insn_cache = CPU_INSN_CACHE (cpu);
  533. int i;
  534. /* For each cycle, run the caches, noting which requests have been fullfilled
  535. and submitting new requests on their designated cycles. */
  536. for (i = 0; i < cycles; ++i)
  537. {
  538. int j;
  539. /* Run the caches through 1 cycle. */
  540. frv_cache_run (data_cache, 1);
  541. frv_cache_run (insn_cache, 1);
  542. /* Note whether prefetched insn data has been loaded yet. */
  543. for (j = LS; j < FRV_CACHE_PIPELINES; ++j)
  544. {
  545. if (frv_insn_fetch_buffer[j].reqno != NO_REQNO
  546. && frv_cache_data_in_buffer (insn_cache, j,
  547. frv_insn_fetch_buffer[j].address,
  548. frv_insn_fetch_buffer[j].reqno))
  549. frv_insn_fetch_buffer[j].reqno = NO_REQNO;
  550. }
  551. /* Check to see which requests have been satisfied and which should
  552. be submitted now. */
  553. for (j = 0; j < cache_queue.ix; ++j)
  554. {
  555. CACHE_QUEUE_ELEMENT *q = & cache_queue.q[j];
  556. if (! q->active)
  557. continue;
  558. /* If a load has been satisfied, complete the operation and remove it
  559. from the queue. */
  560. if (request_complete (cpu, q))
  561. {
  562. remove_cache_queue_element (cpu, j);
  563. --j;
  564. continue;
  565. }
  566. /* Decrease the cycle count of each queued request.
  567. Submit a request for each queued request whose cycle count has
  568. become zero. */
  569. --q->cycles;
  570. if (q->cycles == 0)
  571. submit_cache_request (q);
  572. }
  573. }
  574. }
  575. static void
  576. apply_latency_adjustments (SIM_CPU *cpu)
  577. {
  578. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  579. int i;
  580. /* update the latencies of the registers. */
  581. int *fr = ps->fr_busy;
  582. int *acc = ps->acc_busy;
  583. for (i = 0; i < 64; ++i)
  584. {
  585. if (ps->fr_busy_adjust[i] > 0)
  586. *fr -= ps->fr_busy_adjust[i]; /* OK if it goes negative. */
  587. if (ps->acc_busy_adjust[i] > 0)
  588. *acc -= ps->acc_busy_adjust[i]; /* OK if it goes negative. */
  589. ++fr;
  590. ++acc;
  591. }
  592. }
  593. /* Account for the number of cycles which have just passed in the latency of
  594. various system elements. Works for negative cycles too so that latency
  595. can be extended in the case of insn fetch latency.
  596. If negative or zero, then no adjustment is necessary. */
  597. static void
  598. update_latencies (SIM_CPU *cpu, int cycles)
  599. {
  600. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  601. int i;
  602. /* update the latencies of the registers. */
  603. int *fdiv;
  604. int *fsqrt;
  605. int *idiv;
  606. int *flt;
  607. int *media;
  608. int *ccr;
  609. int *gr = ps->gr_busy;
  610. int *fr = ps->fr_busy;
  611. int *acc = ps->acc_busy;
  612. int *spr;
  613. /* This loop handles GR, FR and ACC registers. */
  614. for (i = 0; i < 64; ++i)
  615. {
  616. if (*gr <= cycles)
  617. {
  618. *gr = 0;
  619. reset_gr_flags (cpu, i);
  620. }
  621. else
  622. *gr -= cycles;
  623. /* If the busy drops to 0, then mark the register as
  624. "not in use". */
  625. if (*fr <= cycles)
  626. {
  627. int *fr_lat = ps->fr_latency + i;
  628. *fr = 0;
  629. ps->fr_busy_adjust[i] = 0;
  630. /* Only clear flags if this register has no target latency. */
  631. if (*fr_lat == 0)
  632. reset_fr_flags (cpu, i);
  633. }
  634. else
  635. *fr -= cycles;
  636. /* If the busy drops to 0, then mark the register as
  637. "not in use". */
  638. if (*acc <= cycles)
  639. {
  640. int *acc_lat = ps->acc_latency + i;
  641. *acc = 0;
  642. ps->acc_busy_adjust[i] = 0;
  643. /* Only clear flags if this register has no target latency. */
  644. if (*acc_lat == 0)
  645. reset_acc_flags (cpu, i);
  646. }
  647. else
  648. *acc -= cycles;
  649. ++gr;
  650. ++fr;
  651. ++acc;
  652. }
  653. /* This loop handles CCR registers. */
  654. ccr = ps->ccr_busy;
  655. for (i = 0; i < 8; ++i)
  656. {
  657. if (*ccr <= cycles)
  658. {
  659. *ccr = 0;
  660. reset_cc_flags (cpu, i);
  661. }
  662. else
  663. *ccr -= cycles;
  664. ++ccr;
  665. }
  666. /* This loop handles SPR registers. */
  667. spr = ps->spr_busy;
  668. for (i = 0; i < 4096; ++i)
  669. {
  670. if (*spr <= cycles)
  671. *spr = 0;
  672. else
  673. *spr -= cycles;
  674. ++spr;
  675. }
  676. /* This loop handles resources. */
  677. idiv = ps->idiv_busy;
  678. fdiv = ps->fdiv_busy;
  679. fsqrt = ps->fsqrt_busy;
  680. for (i = 0; i < 2; ++i)
  681. {
  682. *idiv = (*idiv <= cycles) ? 0 : (*idiv - cycles);
  683. *fdiv = (*fdiv <= cycles) ? 0 : (*fdiv - cycles);
  684. *fsqrt = (*fsqrt <= cycles) ? 0 : (*fsqrt - cycles);
  685. ++idiv;
  686. ++fdiv;
  687. ++fsqrt;
  688. }
  689. /* Float and media units can occur in 4 slots on some machines. */
  690. flt = ps->float_busy;
  691. media = ps->media_busy;
  692. for (i = 0; i < 4; ++i)
  693. {
  694. *flt = (*flt <= cycles) ? 0 : (*flt - cycles);
  695. *media = (*media <= cycles) ? 0 : (*media - cycles);
  696. ++flt;
  697. ++media;
  698. }
  699. }
  700. /* Print information about the wait for the given number of cycles. */
  701. void
  702. frv_model_trace_wait_cycles (SIM_CPU *cpu, int cycles, const char *hazard_name)
  703. {
  704. if (TRACE_INSN_P (cpu) && cycles > 0)
  705. {
  706. SIM_DESC sd = CPU_STATE (cpu);
  707. trace_printf (sd, cpu, "**** %s wait %d cycles ***\n",
  708. hazard_name, cycles);
  709. }
  710. }
  711. void
  712. trace_vliw_wait_cycles (SIM_CPU *cpu)
  713. {
  714. if (TRACE_INSN_P (cpu))
  715. {
  716. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  717. frv_model_trace_wait_cycles (cpu, ps->vliw_wait, hazard_name);
  718. }
  719. }
  720. /* Wait for the given number of cycles. */
  721. void
  722. frv_model_advance_cycles (SIM_CPU *cpu, int cycles)
  723. {
  724. PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
  725. update_latencies (cpu, cycles);
  726. run_caches (cpu, cycles);
  727. PROFILE_MODEL_TOTAL_CYCLES (p) += cycles;
  728. }
  729. void
  730. handle_resource_wait (SIM_CPU *cpu)
  731. {
  732. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  733. if (ps->vliw_wait != 0)
  734. frv_model_advance_cycles (cpu, ps->vliw_wait);
  735. if (ps->vliw_load_stall > ps->vliw_wait)
  736. ps->vliw_load_stall -= ps->vliw_wait;
  737. else
  738. ps->vliw_load_stall = 0;
  739. }
  740. /* Account for the number of cycles until these resources will be available
  741. again. */
  742. static void
  743. update_target_latencies (SIM_CPU *cpu)
  744. {
  745. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  746. int i;
  747. /* update the latencies of the registers. */
  748. int *ccr_lat;
  749. int *gr_lat = ps->gr_latency;
  750. int *fr_lat = ps->fr_latency;
  751. int *acc_lat = ps->acc_latency;
  752. int *spr_lat;
  753. int *ccr;
  754. int *gr = ps->gr_busy;
  755. int *fr = ps->fr_busy;
  756. int *acc = ps->acc_busy;
  757. int *spr;
  758. /* This loop handles GR, FR and ACC registers. */
  759. for (i = 0; i < 64; ++i)
  760. {
  761. if (*gr_lat)
  762. {
  763. *gr = *gr_lat;
  764. *gr_lat = 0;
  765. }
  766. if (*fr_lat)
  767. {
  768. *fr = *fr_lat;
  769. *fr_lat = 0;
  770. }
  771. if (*acc_lat)
  772. {
  773. *acc = *acc_lat;
  774. *acc_lat = 0;
  775. }
  776. ++gr; ++gr_lat;
  777. ++fr; ++fr_lat;
  778. ++acc; ++acc_lat;
  779. }
  780. /* This loop handles CCR registers. */
  781. ccr = ps->ccr_busy;
  782. ccr_lat = ps->ccr_latency;
  783. for (i = 0; i < 8; ++i)
  784. {
  785. if (*ccr_lat)
  786. {
  787. *ccr = *ccr_lat;
  788. *ccr_lat = 0;
  789. }
  790. ++ccr; ++ccr_lat;
  791. }
  792. /* This loop handles SPR registers. */
  793. spr = ps->spr_busy;
  794. spr_lat = ps->spr_latency;
  795. for (i = 0; i < 4096; ++i)
  796. {
  797. if (*spr_lat)
  798. {
  799. *spr = *spr_lat;
  800. *spr_lat = 0;
  801. }
  802. ++spr; ++spr_lat;
  803. }
  804. }
  805. /* Run the caches until all pending cache flushes are complete. */
  806. static void
  807. wait_for_flush (SIM_CPU *cpu)
  808. {
  809. SI address = CPU_LOAD_ADDRESS (cpu);
  810. int wait = 0;
  811. while (flush_pending_for_address (cpu, address))
  812. {
  813. frv_model_advance_cycles (cpu, 1);
  814. ++wait;
  815. }
  816. if (TRACE_INSN_P (cpu) && wait)
  817. {
  818. sprintf (hazard_name, "Data cache flush address %p:", address);
  819. frv_model_trace_wait_cycles (cpu, wait, hazard_name);
  820. }
  821. }
  822. /* Initialize cycle counting for an insn.
  823. FIRST_P is non-zero if this is the first insn in a set of parallel
  824. insns. */
  825. void
  826. frvbf_model_insn_before (SIM_CPU *cpu, int first_p)
  827. {
  828. SIM_DESC sd = CPU_STATE (cpu);
  829. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  830. ps->vliw_wait = 0;
  831. ps->post_wait = 0;
  832. memset (ps->fr_busy_adjust, 0, sizeof (ps->fr_busy_adjust));
  833. memset (ps->acc_busy_adjust, 0, sizeof (ps->acc_busy_adjust));
  834. if (first_p)
  835. {
  836. ps->vliw_insns++;
  837. ps->vliw_cycles = 0;
  838. ps->vliw_branch_taken = 0;
  839. ps->vliw_load_stall = 0;
  840. }
  841. switch (STATE_ARCHITECTURE (sd)->mach)
  842. {
  843. case bfd_mach_fr400:
  844. case bfd_mach_fr450:
  845. fr400_model_insn_before (cpu, first_p);
  846. break;
  847. case bfd_mach_fr500:
  848. fr500_model_insn_before (cpu, first_p);
  849. break;
  850. case bfd_mach_fr550:
  851. fr550_model_insn_before (cpu, first_p);
  852. break;
  853. default:
  854. break;
  855. }
  856. if (first_p)
  857. wait_for_flush (cpu);
  858. }
  859. /* Record the cycles computed for an insn.
  860. LAST_P is non-zero if this is the last insn in a set of parallel insns,
  861. and we update the total cycle count.
  862. CYCLES is the cycle count of the insn. */
  863. void
  864. frvbf_model_insn_after (SIM_CPU *cpu, int last_p, int cycles)
  865. {
  866. PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
  867. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  868. SIM_DESC sd = CPU_STATE (cpu);
  869. PROFILE_MODEL_CUR_INSN_CYCLES (p) = cycles;
  870. /* The number of cycles for a VLIW insn is the maximum number of cycles
  871. used by any individual insn within it. */
  872. if (cycles > ps->vliw_cycles)
  873. ps->vliw_cycles = cycles;
  874. if (last_p)
  875. {
  876. /* This is the last insn in a VLIW insn. */
  877. struct frv_interrupt_timer *timer = & frv_interrupt_state.timer;
  878. activate_cache_requests (cpu); /* before advancing cycles. */
  879. apply_latency_adjustments (cpu); /* must go first. */
  880. update_target_latencies (cpu); /* must go next. */
  881. frv_model_advance_cycles (cpu, ps->vliw_cycles);
  882. PROFILE_MODEL_LOAD_STALL_CYCLES (p) += ps->vliw_load_stall;
  883. /* Check the interrupt timer. cycles contains the total cycle count. */
  884. if (timer->enabled)
  885. {
  886. cycles = PROFILE_MODEL_TOTAL_CYCLES (p);
  887. if (timer->current % timer->value
  888. + (cycles - timer->current) >= timer->value)
  889. frv_queue_external_interrupt (cpu, timer->interrupt);
  890. timer->current = cycles;
  891. }
  892. ps->past_first_p = 0; /* Next one will be the first in a new VLIW. */
  893. ps->branch_address = -1;
  894. }
  895. else
  896. ps->past_first_p = 1;
  897. switch (STATE_ARCHITECTURE (sd)->mach)
  898. {
  899. case bfd_mach_fr400:
  900. case bfd_mach_fr450:
  901. fr400_model_insn_after (cpu, last_p, cycles);
  902. break;
  903. case bfd_mach_fr500:
  904. fr500_model_insn_after (cpu, last_p, cycles);
  905. break;
  906. case bfd_mach_fr550:
  907. fr550_model_insn_after (cpu, last_p, cycles);
  908. break;
  909. default:
  910. break;
  911. }
  912. }
  913. USI
  914. frvbf_model_branch (SIM_CPU *current_cpu, PCADDR target, int hint)
  915. {
  916. /* Record the hint and branch address for use in profiling. */
  917. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
  918. ps->branch_hint = hint;
  919. ps->branch_address = target;
  920. }
  921. /* Top up the latency of the given GR by the given number of cycles. */
  922. void
  923. update_GR_latency (SIM_CPU *cpu, INT out_GR, int cycles)
  924. {
  925. if (out_GR >= 0)
  926. {
  927. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  928. int *gr = ps->gr_latency;
  929. if (gr[out_GR] < cycles)
  930. gr[out_GR] = cycles;
  931. }
  932. }
  933. void
  934. decrease_GR_busy (SIM_CPU *cpu, INT in_GR, int cycles)
  935. {
  936. if (in_GR >= 0)
  937. {
  938. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  939. int *gr = ps->gr_busy;
  940. gr[in_GR] -= cycles;
  941. }
  942. }
  943. /* Top up the latency of the given double GR by the number of cycles. */
  944. void
  945. update_GRdouble_latency (SIM_CPU *cpu, INT out_GR, int cycles)
  946. {
  947. if (out_GR >= 0)
  948. {
  949. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  950. int *gr = ps->gr_latency;
  951. if (gr[out_GR] < cycles)
  952. gr[out_GR] = cycles;
  953. if (out_GR < 63 && gr[out_GR + 1] < cycles)
  954. gr[out_GR + 1] = cycles;
  955. }
  956. }
  957. void
  958. update_GR_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
  959. {
  960. if (out_GR >= 0)
  961. {
  962. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  963. int *gr = ps->gr_latency;
  964. /* The latency of the GR will be at least the number of cycles used
  965. by the insn. */
  966. if (gr[out_GR] < cycles)
  967. gr[out_GR] = cycles;
  968. /* The latency will also depend on how long it takes to retrieve the
  969. data from the cache or memory. Assume that the load is issued
  970. after the last cycle of the insn. */
  971. request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
  972. }
  973. }
  974. void
  975. update_GRdouble_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
  976. {
  977. if (out_GR >= 0)
  978. {
  979. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  980. int *gr = ps->gr_latency;
  981. /* The latency of the GR will be at least the number of cycles used
  982. by the insn. */
  983. if (gr[out_GR] < cycles)
  984. gr[out_GR] = cycles;
  985. if (out_GR < 63 && gr[out_GR + 1] < cycles)
  986. gr[out_GR + 1] = cycles;
  987. /* The latency will also depend on how long it takes to retrieve the
  988. data from the cache or memory. Assume that the load is issued
  989. after the last cycle of the insn. */
  990. request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
  991. }
  992. }
  993. void
  994. update_GR_latency_for_swap (SIM_CPU *cpu, INT out_GR, int cycles)
  995. {
  996. update_GR_latency_for_load (cpu, out_GR, cycles);
  997. }
  998. /* Top up the latency of the given FR by the given number of cycles. */
  999. void
  1000. update_FR_latency (SIM_CPU *cpu, INT out_FR, int cycles)
  1001. {
  1002. if (out_FR >= 0)
  1003. {
  1004. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1005. int *fr = ps->fr_latency;
  1006. if (fr[out_FR] < cycles)
  1007. fr[out_FR] = cycles;
  1008. }
  1009. }
  1010. /* Top up the latency of the given double FR by the number of cycles. */
  1011. void
  1012. update_FRdouble_latency (SIM_CPU *cpu, INT out_FR, int cycles)
  1013. {
  1014. if (out_FR >= 0)
  1015. {
  1016. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1017. int *fr = ps->fr_latency;
  1018. if (fr[out_FR] < cycles)
  1019. fr[out_FR] = cycles;
  1020. if (out_FR < 63 && fr[out_FR + 1] < cycles)
  1021. fr[out_FR + 1] = cycles;
  1022. }
  1023. }
  1024. void
  1025. update_FR_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
  1026. {
  1027. if (out_FR >= 0)
  1028. {
  1029. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1030. int *fr = ps->fr_latency;
  1031. /* The latency of the FR will be at least the number of cycles used
  1032. by the insn. */
  1033. if (fr[out_FR] < cycles)
  1034. fr[out_FR] = cycles;
  1035. /* The latency will also depend on how long it takes to retrieve the
  1036. data from the cache or memory. Assume that the load is issued
  1037. after the last cycle of the insn. */
  1038. request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
  1039. }
  1040. }
  1041. void
  1042. update_FRdouble_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
  1043. {
  1044. if (out_FR >= 0)
  1045. {
  1046. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1047. int *fr = ps->fr_latency;
  1048. /* The latency of the FR will be at least the number of cycles used
  1049. by the insn. */
  1050. if (fr[out_FR] < cycles)
  1051. fr[out_FR] = cycles;
  1052. if (out_FR < 63 && fr[out_FR + 1] < cycles)
  1053. fr[out_FR + 1] = cycles;
  1054. /* The latency will also depend on how long it takes to retrieve the
  1055. data from the cache or memory. Assume that the load is issued
  1056. after the last cycle of the insn. */
  1057. request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
  1058. }
  1059. }
  1060. /* Top up the post-processing time of the given FR by the given number of
  1061. cycles. */
  1062. void
  1063. update_FR_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
  1064. {
  1065. if (out_FR >= 0)
  1066. {
  1067. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1068. /* If a load is pending on this register, then add the cycles to
  1069. the post processing time for this register. Otherwise apply it
  1070. directly to the latency of the register. */
  1071. if (! load_pending_for_register (cpu, out_FR, 1, REGTYPE_FR))
  1072. {
  1073. int *fr = ps->fr_latency;
  1074. fr[out_FR] += cycles;
  1075. }
  1076. else
  1077. ps->fr_ptime[out_FR] += cycles;
  1078. }
  1079. }
  1080. void
  1081. update_FRdouble_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
  1082. {
  1083. if (out_FR >= 0)
  1084. {
  1085. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1086. /* If a load is pending on this register, then add the cycles to
  1087. the post processing time for this register. Otherwise apply it
  1088. directly to the latency of the register. */
  1089. if (! load_pending_for_register (cpu, out_FR, 2, REGTYPE_FR))
  1090. {
  1091. int *fr = ps->fr_latency;
  1092. fr[out_FR] += cycles;
  1093. if (out_FR < 63)
  1094. fr[out_FR + 1] += cycles;
  1095. }
  1096. else
  1097. {
  1098. ps->fr_ptime[out_FR] += cycles;
  1099. if (out_FR < 63)
  1100. ps->fr_ptime[out_FR + 1] += cycles;
  1101. }
  1102. }
  1103. }
  1104. /* Top up the post-processing time of the given ACC by the given number of
  1105. cycles. */
  1106. void
  1107. update_ACC_ptime (SIM_CPU *cpu, INT out_ACC, int cycles)
  1108. {
  1109. if (out_ACC >= 0)
  1110. {
  1111. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1112. /* No load can be pending on this register. Apply the cycles
  1113. directly to the latency of the register. */
  1114. int *acc = ps->acc_latency;
  1115. acc[out_ACC] += cycles;
  1116. }
  1117. }
  1118. /* Top up the post-processing time of the given SPR by the given number of
  1119. cycles. */
  1120. void
  1121. update_SPR_ptime (SIM_CPU *cpu, INT out_SPR, int cycles)
  1122. {
  1123. if (out_SPR >= 0)
  1124. {
  1125. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1126. /* No load can be pending on this register. Apply the cycles
  1127. directly to the latency of the register. */
  1128. int *spr = ps->spr_latency;
  1129. spr[out_SPR] += cycles;
  1130. }
  1131. }
  1132. void
  1133. decrease_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
  1134. {
  1135. if (out_ACC >= 0)
  1136. {
  1137. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1138. int *acc = ps->acc_busy;
  1139. acc[out_ACC] -= cycles;
  1140. if (ps->acc_busy_adjust[out_ACC] >= 0
  1141. && cycles > ps->acc_busy_adjust[out_ACC])
  1142. ps->acc_busy_adjust[out_ACC] = cycles;
  1143. }
  1144. }
  1145. void
  1146. increase_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
  1147. {
  1148. if (out_ACC >= 0)
  1149. {
  1150. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1151. int *acc = ps->acc_busy;
  1152. acc[out_ACC] += cycles;
  1153. }
  1154. }
  1155. void
  1156. enforce_full_acc_latency (SIM_CPU *cpu, INT in_ACC)
  1157. {
  1158. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1159. ps->acc_busy_adjust [in_ACC] = -1;
  1160. }
  1161. void
  1162. decrease_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
  1163. {
  1164. if (out_FR >= 0)
  1165. {
  1166. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1167. int *fr = ps->fr_busy;
  1168. fr[out_FR] -= cycles;
  1169. if (ps->fr_busy_adjust[out_FR] >= 0
  1170. && cycles > ps->fr_busy_adjust[out_FR])
  1171. ps->fr_busy_adjust[out_FR] = cycles;
  1172. }
  1173. }
  1174. void
  1175. increase_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
  1176. {
  1177. if (out_FR >= 0)
  1178. {
  1179. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1180. int *fr = ps->fr_busy;
  1181. fr[out_FR] += cycles;
  1182. }
  1183. }
  1184. /* Top up the latency of the given ACC by the given number of cycles. */
  1185. void
  1186. update_ACC_latency (SIM_CPU *cpu, INT out_ACC, int cycles)
  1187. {
  1188. if (out_ACC >= 0)
  1189. {
  1190. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1191. int *acc = ps->acc_latency;
  1192. if (acc[out_ACC] < cycles)
  1193. acc[out_ACC] = cycles;
  1194. }
  1195. }
  1196. /* Top up the latency of the given CCR by the given number of cycles. */
  1197. void
  1198. update_CCR_latency (SIM_CPU *cpu, INT out_CCR, int cycles)
  1199. {
  1200. if (out_CCR >= 0)
  1201. {
  1202. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1203. int *ccr = ps->ccr_latency;
  1204. if (ccr[out_CCR] < cycles)
  1205. ccr[out_CCR] = cycles;
  1206. }
  1207. }
  1208. /* Top up the latency of the given SPR by the given number of cycles. */
  1209. void
  1210. update_SPR_latency (SIM_CPU *cpu, INT out_SPR, int cycles)
  1211. {
  1212. if (out_SPR >= 0)
  1213. {
  1214. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1215. int *spr = ps->spr_latency;
  1216. if (spr[out_SPR] < cycles)
  1217. spr[out_SPR] = cycles;
  1218. }
  1219. }
  1220. /* Top up the latency of the given integer division resource by the given
  1221. number of cycles. */
  1222. void
  1223. update_idiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
  1224. {
  1225. /* operate directly on the busy cycles since each resource can only
  1226. be used once in a VLIW insn. */
  1227. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1228. int *r = ps->idiv_busy;
  1229. r[in_resource] = cycles;
  1230. }
  1231. /* Set the latency of the given resource to the given number of cycles. */
  1232. void
  1233. update_fdiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
  1234. {
  1235. /* operate directly on the busy cycles since each resource can only
  1236. be used once in a VLIW insn. */
  1237. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1238. int *r = ps->fdiv_busy;
  1239. r[in_resource] = cycles;
  1240. }
  1241. /* Set the latency of the given resource to the given number of cycles. */
  1242. void
  1243. update_fsqrt_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
  1244. {
  1245. /* operate directly on the busy cycles since each resource can only
  1246. be used once in a VLIW insn. */
  1247. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1248. int *r = ps->fsqrt_busy;
  1249. r[in_resource] = cycles;
  1250. }
  1251. /* Set the latency of the given resource to the given number of cycles. */
  1252. void
  1253. update_float_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
  1254. {
  1255. /* operate directly on the busy cycles since each resource can only
  1256. be used once in a VLIW insn. */
  1257. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1258. int *r = ps->float_busy;
  1259. r[in_resource] = cycles;
  1260. }
  1261. void
  1262. update_media_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
  1263. {
  1264. /* operate directly on the busy cycles since each resource can only
  1265. be used once in a VLIW insn. */
  1266. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1267. int *r = ps->media_busy;
  1268. r[in_resource] = cycles;
  1269. }
  1270. /* Set the branch penalty to the given number of cycles. */
  1271. void
  1272. update_branch_penalty (SIM_CPU *cpu, int cycles)
  1273. {
  1274. /* operate directly on the busy cycles since only one branch can occur
  1275. in a VLIW insn. */
  1276. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1277. ps->branch_penalty = cycles;
  1278. }
  1279. /* Check the availability of the given GR register and update the number
  1280. of cycles the current VLIW insn must wait until it is available. */
  1281. void
  1282. vliw_wait_for_GR (SIM_CPU *cpu, INT in_GR)
  1283. {
  1284. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1285. int *gr = ps->gr_busy;
  1286. /* If the latency of the register is greater than the current wait
  1287. then update the current wait. */
  1288. if (in_GR >= 0 && gr[in_GR] > ps->vliw_wait)
  1289. {
  1290. if (TRACE_INSN_P (cpu))
  1291. sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
  1292. ps->vliw_wait = gr[in_GR];
  1293. }
  1294. }
  1295. /* Check the availability of the given GR register and update the number
  1296. of cycles the current VLIW insn must wait until it is available. */
  1297. void
  1298. vliw_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
  1299. {
  1300. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1301. int *gr = ps->gr_busy;
  1302. /* If the latency of the register is greater than the current wait
  1303. then update the current wait. */
  1304. if (in_GR >= 0)
  1305. {
  1306. if (gr[in_GR] > ps->vliw_wait)
  1307. {
  1308. if (TRACE_INSN_P (cpu))
  1309. sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
  1310. ps->vliw_wait = gr[in_GR];
  1311. }
  1312. if (in_GR < 63 && gr[in_GR + 1] > ps->vliw_wait)
  1313. {
  1314. if (TRACE_INSN_P (cpu))
  1315. sprintf (hazard_name, "Data hazard for gr%d:", in_GR + 1);
  1316. ps->vliw_wait = gr[in_GR + 1];
  1317. }
  1318. }
  1319. }
  1320. /* Check the availability of the given FR register and update the number
  1321. of cycles the current VLIW insn must wait until it is available. */
  1322. void
  1323. vliw_wait_for_FR (SIM_CPU *cpu, INT in_FR)
  1324. {
  1325. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1326. int *fr = ps->fr_busy;
  1327. /* If the latency of the register is greater than the current wait
  1328. then update the current wait. */
  1329. if (in_FR >= 0 && fr[in_FR] > ps->vliw_wait)
  1330. {
  1331. if (TRACE_INSN_P (cpu))
  1332. sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
  1333. ps->vliw_wait = fr[in_FR];
  1334. }
  1335. }
  1336. /* Check the availability of the given GR register and update the number
  1337. of cycles the current VLIW insn must wait until it is available. */
  1338. void
  1339. vliw_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
  1340. {
  1341. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1342. int *fr = ps->fr_busy;
  1343. /* If the latency of the register is greater than the current wait
  1344. then update the current wait. */
  1345. if (in_FR >= 0)
  1346. {
  1347. if (fr[in_FR] > ps->vliw_wait)
  1348. {
  1349. if (TRACE_INSN_P (cpu))
  1350. sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
  1351. ps->vliw_wait = fr[in_FR];
  1352. }
  1353. if (in_FR < 63 && fr[in_FR + 1] > ps->vliw_wait)
  1354. {
  1355. if (TRACE_INSN_P (cpu))
  1356. sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
  1357. ps->vliw_wait = fr[in_FR + 1];
  1358. }
  1359. }
  1360. }
  1361. /* Check the availability of the given CCR register and update the number
  1362. of cycles the current VLIW insn must wait until it is available. */
  1363. void
  1364. vliw_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
  1365. {
  1366. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1367. int *ccr = ps->ccr_busy;
  1368. /* If the latency of the register is greater than the current wait
  1369. then update the current wait. */
  1370. if (in_CCR >= 0 && ccr[in_CCR] > ps->vliw_wait)
  1371. {
  1372. if (TRACE_INSN_P (cpu))
  1373. {
  1374. if (in_CCR > 3)
  1375. sprintf (hazard_name, "Data hazard for icc%d:", in_CCR-4);
  1376. else
  1377. sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
  1378. }
  1379. ps->vliw_wait = ccr[in_CCR];
  1380. }
  1381. }
  1382. /* Check the availability of the given ACC register and update the number
  1383. of cycles the current VLIW insn must wait until it is available. */
  1384. void
  1385. vliw_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
  1386. {
  1387. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1388. int *acc = ps->acc_busy;
  1389. /* If the latency of the register is greater than the current wait
  1390. then update the current wait. */
  1391. if (in_ACC >= 0 && acc[in_ACC] > ps->vliw_wait)
  1392. {
  1393. if (TRACE_INSN_P (cpu))
  1394. sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
  1395. ps->vliw_wait = acc[in_ACC];
  1396. }
  1397. }
  1398. /* Check the availability of the given SPR register and update the number
  1399. of cycles the current VLIW insn must wait until it is available. */
  1400. void
  1401. vliw_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
  1402. {
  1403. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1404. int *spr = ps->spr_busy;
  1405. /* If the latency of the register is greater than the current wait
  1406. then update the current wait. */
  1407. if (in_SPR >= 0 && spr[in_SPR] > ps->vliw_wait)
  1408. {
  1409. if (TRACE_INSN_P (cpu))
  1410. sprintf (hazard_name, "Data hazard for spr %d:", in_SPR);
  1411. ps->vliw_wait = spr[in_SPR];
  1412. }
  1413. }
  1414. /* Check the availability of the given integer division resource and update
  1415. the number of cycles the current VLIW insn must wait until it is available.
  1416. */
  1417. void
  1418. vliw_wait_for_idiv_resource (SIM_CPU *cpu, INT in_resource)
  1419. {
  1420. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1421. int *r = ps->idiv_busy;
  1422. /* If the latency of the resource is greater than the current wait
  1423. then update the current wait. */
  1424. if (r[in_resource] > ps->vliw_wait)
  1425. {
  1426. if (TRACE_INSN_P (cpu))
  1427. {
  1428. sprintf (hazard_name, "Resource hazard for integer division in slot I%d:", in_resource);
  1429. }
  1430. ps->vliw_wait = r[in_resource];
  1431. }
  1432. }
  1433. /* Check the availability of the given float division resource and update
  1434. the number of cycles the current VLIW insn must wait until it is available.
  1435. */
  1436. void
  1437. vliw_wait_for_fdiv_resource (SIM_CPU *cpu, INT in_resource)
  1438. {
  1439. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1440. int *r = ps->fdiv_busy;
  1441. /* If the latency of the resource is greater than the current wait
  1442. then update the current wait. */
  1443. if (r[in_resource] > ps->vliw_wait)
  1444. {
  1445. if (TRACE_INSN_P (cpu))
  1446. {
  1447. sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", in_resource);
  1448. }
  1449. ps->vliw_wait = r[in_resource];
  1450. }
  1451. }
  1452. /* Check the availability of the given float square root resource and update
  1453. the number of cycles the current VLIW insn must wait until it is available.
  1454. */
  1455. void
  1456. vliw_wait_for_fsqrt_resource (SIM_CPU *cpu, INT in_resource)
  1457. {
  1458. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1459. int *r = ps->fsqrt_busy;
  1460. /* If the latency of the resource is greater than the current wait
  1461. then update the current wait. */
  1462. if (r[in_resource] > ps->vliw_wait)
  1463. {
  1464. if (TRACE_INSN_P (cpu))
  1465. {
  1466. sprintf (hazard_name, "Resource hazard for square root in slot F%d:", in_resource);
  1467. }
  1468. ps->vliw_wait = r[in_resource];
  1469. }
  1470. }
  1471. /* Check the availability of the given float unit resource and update
  1472. the number of cycles the current VLIW insn must wait until it is available.
  1473. */
  1474. void
  1475. vliw_wait_for_float_resource (SIM_CPU *cpu, INT in_resource)
  1476. {
  1477. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1478. int *r = ps->float_busy;
  1479. /* If the latency of the resource is greater than the current wait
  1480. then update the current wait. */
  1481. if (r[in_resource] > ps->vliw_wait)
  1482. {
  1483. if (TRACE_INSN_P (cpu))
  1484. {
  1485. sprintf (hazard_name, "Resource hazard for floating point unit in slot F%d:", in_resource);
  1486. }
  1487. ps->vliw_wait = r[in_resource];
  1488. }
  1489. }
  1490. /* Check the availability of the given media unit resource and update
  1491. the number of cycles the current VLIW insn must wait until it is available.
  1492. */
  1493. void
  1494. vliw_wait_for_media_resource (SIM_CPU *cpu, INT in_resource)
  1495. {
  1496. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1497. int *r = ps->media_busy;
  1498. /* If the latency of the resource is greater than the current wait
  1499. then update the current wait. */
  1500. if (r[in_resource] > ps->vliw_wait)
  1501. {
  1502. if (TRACE_INSN_P (cpu))
  1503. {
  1504. sprintf (hazard_name, "Resource hazard for media unit in slot M%d:", in_resource);
  1505. }
  1506. ps->vliw_wait = r[in_resource];
  1507. }
  1508. }
  1509. /* Run the caches until all requests for the given register(s) are satisfied. */
  1510. void
  1511. load_wait_for_GR (SIM_CPU *cpu, INT in_GR)
  1512. {
  1513. if (in_GR >= 0)
  1514. {
  1515. int wait = 0;
  1516. while (load_pending_for_register (cpu, in_GR, 1/*words*/, REGTYPE_NONE))
  1517. {
  1518. frv_model_advance_cycles (cpu, 1);
  1519. ++wait;
  1520. }
  1521. if (wait)
  1522. {
  1523. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1524. ps->vliw_wait += wait;
  1525. ps->vliw_load_stall += wait;
  1526. if (TRACE_INSN_P (cpu))
  1527. sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
  1528. }
  1529. }
  1530. }
  1531. void
  1532. load_wait_for_FR (SIM_CPU *cpu, INT in_FR)
  1533. {
  1534. if (in_FR >= 0)
  1535. {
  1536. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1537. int *fr;
  1538. int wait = 0;
  1539. while (load_pending_for_register (cpu, in_FR, 1/*words*/, REGTYPE_FR))
  1540. {
  1541. frv_model_advance_cycles (cpu, 1);
  1542. ++wait;
  1543. }
  1544. /* Post processing time may have been added to the register's
  1545. latency after the loads were processed. Account for that too.
  1546. */
  1547. fr = ps->fr_busy;
  1548. if (fr[in_FR])
  1549. {
  1550. wait += fr[in_FR];
  1551. frv_model_advance_cycles (cpu, fr[in_FR]);
  1552. }
  1553. /* Update the vliw_wait with the number of cycles we waited for the
  1554. load and any post-processing. */
  1555. if (wait)
  1556. {
  1557. ps->vliw_wait += wait;
  1558. ps->vliw_load_stall += wait;
  1559. if (TRACE_INSN_P (cpu))
  1560. sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
  1561. }
  1562. }
  1563. }
  1564. void
  1565. load_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
  1566. {
  1567. if (in_GR >= 0)
  1568. {
  1569. int wait = 0;
  1570. while (load_pending_for_register (cpu, in_GR, 2/*words*/, REGTYPE_NONE))
  1571. {
  1572. frv_model_advance_cycles (cpu, 1);
  1573. ++wait;
  1574. }
  1575. if (wait)
  1576. {
  1577. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1578. ps->vliw_wait += wait;
  1579. ps->vliw_load_stall += wait;
  1580. if (TRACE_INSN_P (cpu))
  1581. sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
  1582. }
  1583. }
  1584. }
  1585. void
  1586. load_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
  1587. {
  1588. if (in_FR >= 0)
  1589. {
  1590. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1591. int *fr;
  1592. int wait = 0;
  1593. while (load_pending_for_register (cpu, in_FR, 2/*words*/, REGTYPE_FR))
  1594. {
  1595. frv_model_advance_cycles (cpu, 1);
  1596. ++wait;
  1597. }
  1598. /* Post processing time may have been added to the registers'
  1599. latencies after the loads were processed. Account for that too.
  1600. */
  1601. fr = ps->fr_busy;
  1602. if (fr[in_FR])
  1603. {
  1604. wait += fr[in_FR];
  1605. frv_model_advance_cycles (cpu, fr[in_FR]);
  1606. }
  1607. if (in_FR < 63)
  1608. {
  1609. if (fr[in_FR + 1])
  1610. {
  1611. wait += fr[in_FR + 1];
  1612. frv_model_advance_cycles (cpu, fr[in_FR + 1]);
  1613. }
  1614. }
  1615. /* Update the vliw_wait with the number of cycles we waited for the
  1616. load and any post-processing. */
  1617. if (wait)
  1618. {
  1619. ps->vliw_wait += wait;
  1620. ps->vliw_load_stall += wait;
  1621. if (TRACE_INSN_P (cpu))
  1622. sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
  1623. }
  1624. }
  1625. }
  1626. void
  1627. enforce_full_fr_latency (SIM_CPU *cpu, INT in_FR)
  1628. {
  1629. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1630. ps->fr_busy_adjust [in_FR] = -1;
  1631. }
  1632. /* Calculate how long the post processing for a floating point insn must
  1633. wait for resources to become available. */
  1634. int
  1635. post_wait_for_FR (SIM_CPU *cpu, INT in_FR)
  1636. {
  1637. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1638. int *fr = ps->fr_busy;
  1639. if (in_FR >= 0 && fr[in_FR] > ps->post_wait)
  1640. {
  1641. ps->post_wait = fr[in_FR];
  1642. if (TRACE_INSN_P (cpu))
  1643. sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
  1644. }
  1645. }
  1646. /* Calculate how long the post processing for a floating point insn must
  1647. wait for resources to become available. */
  1648. int
  1649. post_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
  1650. {
  1651. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1652. int *fr = ps->fr_busy;
  1653. if (in_FR >= 0)
  1654. {
  1655. if (fr[in_FR] > ps->post_wait)
  1656. {
  1657. ps->post_wait = fr[in_FR];
  1658. if (TRACE_INSN_P (cpu))
  1659. sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
  1660. }
  1661. if (in_FR < 63 && fr[in_FR + 1] > ps->post_wait)
  1662. {
  1663. ps->post_wait = fr[in_FR + 1];
  1664. if (TRACE_INSN_P (cpu))
  1665. sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
  1666. }
  1667. }
  1668. }
  1669. int
  1670. post_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
  1671. {
  1672. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1673. int *acc = ps->acc_busy;
  1674. if (in_ACC >= 0 && acc[in_ACC] > ps->post_wait)
  1675. {
  1676. ps->post_wait = acc[in_ACC];
  1677. if (TRACE_INSN_P (cpu))
  1678. sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
  1679. }
  1680. }
  1681. int
  1682. post_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
  1683. {
  1684. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1685. int *ccr = ps->ccr_busy;
  1686. if (in_CCR >= 0 && ccr[in_CCR] > ps->post_wait)
  1687. {
  1688. ps->post_wait = ccr[in_CCR];
  1689. if (TRACE_INSN_P (cpu))
  1690. {
  1691. if (in_CCR > 3)
  1692. sprintf (hazard_name, "Data hazard for icc%d:", in_CCR - 4);
  1693. else
  1694. sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
  1695. }
  1696. }
  1697. }
  1698. int
  1699. post_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
  1700. {
  1701. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1702. int *spr = ps->spr_busy;
  1703. if (in_SPR >= 0 && spr[in_SPR] > ps->post_wait)
  1704. {
  1705. ps->post_wait = spr[in_SPR];
  1706. if (TRACE_INSN_P (cpu))
  1707. sprintf (hazard_name, "Data hazard for spr[%d]:", in_SPR);
  1708. }
  1709. }
  1710. int
  1711. post_wait_for_fdiv (SIM_CPU *cpu, INT slot)
  1712. {
  1713. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1714. int *fdiv = ps->fdiv_busy;
  1715. /* Multiple floating point divisions in the same slot need only wait 1
  1716. extra cycle. */
  1717. if (fdiv[slot] > 0 && 1 > ps->post_wait)
  1718. {
  1719. ps->post_wait = 1;
  1720. if (TRACE_INSN_P (cpu))
  1721. {
  1722. sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", slot);
  1723. }
  1724. }
  1725. }
  1726. int
  1727. post_wait_for_fsqrt (SIM_CPU *cpu, INT slot)
  1728. {
  1729. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1730. int *fsqrt = ps->fsqrt_busy;
  1731. /* Multiple floating point square roots in the same slot need only wait 1
  1732. extra cycle. */
  1733. if (fsqrt[slot] > 0 && 1 > ps->post_wait)
  1734. {
  1735. ps->post_wait = 1;
  1736. if (TRACE_INSN_P (cpu))
  1737. {
  1738. sprintf (hazard_name, "Resource hazard for square root in slot F%d:", slot);
  1739. }
  1740. }
  1741. }
  1742. int
  1743. post_wait_for_float (SIM_CPU *cpu, INT slot)
  1744. {
  1745. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1746. int *flt = ps->float_busy;
  1747. /* Multiple floating point square roots in the same slot need only wait 1
  1748. extra cycle. */
  1749. if (flt[slot] > ps->post_wait)
  1750. {
  1751. ps->post_wait = flt[slot];
  1752. if (TRACE_INSN_P (cpu))
  1753. {
  1754. sprintf (hazard_name, "Resource hazard for floating point unit in slot F%d:", slot);
  1755. }
  1756. }
  1757. }
  1758. int
  1759. post_wait_for_media (SIM_CPU *cpu, INT slot)
  1760. {
  1761. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1762. int *media = ps->media_busy;
  1763. /* Multiple floating point square roots in the same slot need only wait 1
  1764. extra cycle. */
  1765. if (media[slot] > ps->post_wait)
  1766. {
  1767. ps->post_wait = media[slot];
  1768. if (TRACE_INSN_P (cpu))
  1769. {
  1770. sprintf (hazard_name, "Resource hazard for media unit in slot M%d:", slot);
  1771. }
  1772. }
  1773. }
  1774. /* Print cpu-specific profile information. */
  1775. #define COMMAS(n) sim_add_commas (comma_buf, sizeof (comma_buf), (n))
  1776. static void
  1777. print_cache (SIM_CPU *cpu, FRV_CACHE *cache, const char *cache_name)
  1778. {
  1779. SIM_DESC sd = CPU_STATE (cpu);
  1780. if (cache != NULL)
  1781. {
  1782. char comma_buf[20];
  1783. unsigned accesses;
  1784. sim_io_printf (sd, " %s Cache\n\n", cache_name);
  1785. accesses = cache->statistics.accesses;
  1786. sim_io_printf (sd, " Total accesses: %s\n", COMMAS (accesses));
  1787. if (accesses != 0)
  1788. {
  1789. float rate;
  1790. unsigned hits = cache->statistics.hits;
  1791. sim_io_printf (sd, " Hits: %s\n", COMMAS (hits));
  1792. rate = (float)hits / accesses;
  1793. sim_io_printf (sd, " Hit rate: %.2f%%\n", rate * 100);
  1794. }
  1795. }
  1796. else
  1797. sim_io_printf (sd, " Model %s has no %s cache\n",
  1798. MODEL_NAME (CPU_MODEL (cpu)), cache_name);
  1799. sim_io_printf (sd, "\n");
  1800. }
  1801. /* This table must correspond to the UNIT_ATTR table in
  1802. opcodes/frv-desc.h. Only the units up to UNIT_C need be
  1803. listed since the others cannot occur after mapping. */
  1804. static char *
  1805. slot_names[] =
  1806. {
  1807. "none",
  1808. "I0", "I1", "I01", "I2", "I3", "IALL",
  1809. "FM0", "FM1", "FM01", "FM2", "FM3", "FMALL", "FMLOW",
  1810. "B0", "B1", "B01",
  1811. "C"
  1812. };
  1813. static void
  1814. print_parallel (SIM_CPU *cpu, int verbose)
  1815. {
  1816. SIM_DESC sd = CPU_STATE (cpu);
  1817. PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
  1818. FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
  1819. unsigned total, vliw;
  1820. char comma_buf[20];
  1821. float average;
  1822. sim_io_printf (sd, "Model %s Parallelization\n\n",
  1823. MODEL_NAME (CPU_MODEL (cpu)));
  1824. total = PROFILE_TOTAL_INSN_COUNT (p);
  1825. sim_io_printf (sd, " Total instructions: %s\n", COMMAS (total));
  1826. vliw = ps->vliw_insns;
  1827. sim_io_printf (sd, " VLIW instructions: %s\n", COMMAS (vliw));
  1828. average = (float)total / vliw;
  1829. sim_io_printf (sd, " Average VLIW length: %.2f\n", average);
  1830. average = (float)PROFILE_MODEL_TOTAL_CYCLES (p) / vliw;
  1831. sim_io_printf (sd, " Cycles per VLIW instruction: %.2f\n", average);
  1832. average = (float)total / PROFILE_MODEL_TOTAL_CYCLES (p);
  1833. sim_io_printf (sd, " Instructions per cycle: %.2f\n", average);
  1834. if (verbose)
  1835. {
  1836. int i;
  1837. int max_val = 0;
  1838. int max_name_len = 0;
  1839. for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
  1840. {
  1841. if (INSNS_IN_SLOT (i))
  1842. {
  1843. int len;
  1844. if (INSNS_IN_SLOT (i) > max_val)
  1845. max_val = INSNS_IN_SLOT (i);
  1846. len = strlen (slot_names[i]);
  1847. if (len > max_name_len)
  1848. max_name_len = len;
  1849. }
  1850. }
  1851. if (max_val > 0)
  1852. {
  1853. sim_io_printf (sd, "\n");
  1854. sim_io_printf (sd, " Instructions per slot:\n");
  1855. sim_io_printf (sd, "\n");
  1856. for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
  1857. {
  1858. if (INSNS_IN_SLOT (i) != 0)
  1859. {
  1860. sim_io_printf (sd, " %*s: %*s: ",
  1861. max_name_len, slot_names[i],
  1862. max_val < 10000 ? 5 : 10,
  1863. COMMAS (INSNS_IN_SLOT (i)));
  1864. sim_profile_print_bar (sd, cpu, PROFILE_HISTOGRAM_WIDTH,
  1865. INSNS_IN_SLOT (i),
  1866. max_val);
  1867. sim_io_printf (sd, "\n");
  1868. }
  1869. }
  1870. } /* details to print */
  1871. } /* verbose */
  1872. sim_io_printf (sd, "\n");
  1873. }
  1874. void
  1875. frv_profile_info (SIM_CPU *cpu, int verbose)
  1876. {
  1877. /* FIXME: Need to add smp support. */
  1878. PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
  1879. #if WITH_PROFILE_PARALLEL_P
  1880. if (PROFILE_FLAGS (p) [PROFILE_PARALLEL_IDX])
  1881. print_parallel (cpu, verbose);
  1882. #endif
  1883. #if WITH_PROFILE_CACHE_P
  1884. if (PROFILE_FLAGS (p) [PROFILE_CACHE_IDX])
  1885. {
  1886. SIM_DESC sd = CPU_STATE (cpu);
  1887. sim_io_printf (sd, "Model %s Cache Statistics\n\n",
  1888. MODEL_NAME (CPU_MODEL (cpu)));
  1889. print_cache (cpu, CPU_INSN_CACHE (cpu), "Instruction");
  1890. print_cache (cpu, CPU_DATA_CACHE (cpu), "Data");
  1891. }
  1892. #endif /* WITH_PROFILE_CACHE_P */
  1893. }
  1894. /* A hack to get registers referenced for profiling. */
  1895. SI frv_ref_SI (SI ref) {return ref;}
  1896. #endif /* WITH_PROFILE_MODEL_P */