generation_gc.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844
  1. /*
  2. * Part of Scheme 48 1.9. See file COPYING for notices and license.
  3. *
  4. * Authors: David Frese, Marcus Crestani, Artem Mironov, Robert Ransom,
  5. * Mike Sperber, Martin Gasbichler
  6. */
  7. #include "generation_gc.h"
  8. #include <stdlib.h>
  9. #include <string.h> /* memcpy */
  10. #include "scheme48heap.h"
  11. #include "areas.h"
  12. #include "data.h"
  13. #include "memory.h"
  14. #include "page_alloc.h"
  15. #include "utils.h"
  16. #include "memory_map.h"
  17. #include "page_constants.h"
  18. #include "area_roots.h"
  19. #include <assert.h>
  20. #include "gc_config.h"
  21. #include "measure.h"
  22. #include "remset.h"
  23. #include <event.h> // s48_run_time
  24. #define FOR_ALL_AREAS(areas, command) \
  25. do { \
  26. Area* area = areas; \
  27. while (area != NULL) { \
  28. Area* next = area->next; \
  29. command; \
  30. area = next; \
  31. } \
  32. } while(0)
  33. typedef struct Generation {
  34. int index;
  35. Space* current_space;
  36. Space* other_space;
  37. /* size of the generation after its last collection */
  38. unsigned long last_size;
  39. /* Number of collections from this generation*/
  40. unsigned long self_count;
  41. /* Number of all collections (age_count) */
  42. unsigned long age_count;
  43. } Generation;
  44. static Space* make_space(int generation_index) {
  45. Space* space = (Space*)calloc(1, sizeof(Space));
  46. if (space == NULL) {
  47. s48_gc_error("make_space: out of memory");
  48. }
  49. space->generation_index = generation_index;
  50. return space;
  51. }
  52. typedef struct {
  53. /* a list of areas */
  54. Area* large;
  55. /* always one of the ones below or one of the small
  56. areas in the first generation in some cases */
  57. Area* small;
  58. Area* small_below;
  59. Area* small_above;
  60. /* only one area */
  61. Area* weaks;
  62. } CreationSpace;
  63. static CreationSpace creation_space;
  64. static unsigned long current_water_mark; /* pages */
  65. /* from young to old */
  66. static Generation generations[S48_GENERATIONS_COUNT];
  67. static char heap_is_initialized = 0;
  68. static char gc_forbid_count = 0;
  69. static unsigned long gc_count = 0;
  70. static long gc_seconds = 0;
  71. static long gc_mseconds = 0;
  72. static void recreate_creation_space() {
  73. unsigned long s_below;
  74. unsigned long s_above;
  75. /* free current areas */
  76. if (creation_space.small_below != NULL)
  77. s48_free_area(creation_space.small_below);
  78. if (creation_space.small_above != NULL)
  79. s48_free_area(creation_space.small_above);
  80. if (creation_space.large != NULL)
  81. s48_free_areas(creation_space.large);
  82. creation_space.large = NULL;
  83. if (creation_space.weaks != NULL)
  84. s48_free_area(creation_space.weaks);
  85. /* create some new ones */
  86. s_below = current_water_mark;
  87. if (s_below != 0)
  88. creation_space.small_below = s48_allocate_area(s_below, s_below, 0, AREA_TYPE_SIZE_SMALL);
  89. else
  90. creation_space.small_below = NULL;
  91. s_above = S48_CREATION_SPACE_SIZE - current_water_mark;
  92. if (s_above != 0)
  93. creation_space.small_above = s48_allocate_area(s_above, s_above, 0, AREA_TYPE_SIZE_SMALL);
  94. else
  95. creation_space.small_above = NULL;
  96. if (creation_space.small_below == NULL) {
  97. creation_space.small = creation_space.small_above;
  98. }
  99. else {
  100. creation_space.small = creation_space.small_below;
  101. }
  102. creation_space.weaks = s48_allocate_area(S48_MINIMUM_WEAK_AREA_SIZE,
  103. S48_MAXIMUM_WEAK_AREA_SIZE,
  104. 0,
  105. AREA_TYPE_SIZE_WEAKS);
  106. }
  107. /* FPage 1 */
  108. void s48_initialize_bibop_heap() {
  109. int i;
  110. if (heap_is_initialized == 1) return;
  111. s48_initialize_page_allocation();
  112. current_water_mark = S48_DEFAULT_WATER_MARK;
  113. creation_space.small_below = NULL;
  114. creation_space.small_above = NULL;
  115. creation_space.large = NULL;
  116. creation_space.weaks = NULL;
  117. recreate_creation_space();
  118. for (i = 0; i < S48_GENERATIONS_COUNT; i++) {
  119. generations[i].index = i;
  120. generations[i].current_space = make_space(i);
  121. generations[i].other_space = make_space(i);
  122. generations[i].last_size = 0;
  123. generations[i].self_count = 0;
  124. generations[i].age_count = 0;
  125. }
  126. heap_is_initialized++;
  127. }
  128. /* this adds AREA to the "known" heap. Used by find_all for example. */
  129. void s48_integrate_area(Area* area) {
  130. Space* space = generations[0].current_space;
  131. Area** a;
  132. /* put it behind the first area of the large or small list of the
  133. first generation */
  134. if ((area->frontier - area->start) < S48_SMALL_OBJECT_LIMIT) {
  135. if (space->small_area == NULL)
  136. a = &space->small_area;
  137. else
  138. a = &space->small_area->next;
  139. } else {
  140. if (space->large_area == NULL)
  141. a = &space->large_area;
  142. else
  143. a = &space->large_area->next;
  144. }
  145. area->generation_index = 0;
  146. area->next = *a;
  147. *a = area;
  148. }
  149. #if (S48_ADJUST_WATER_MARK)
  150. static unsigned long aging_space_survival;
  151. static float last_aging_space_survival = 0; /* initial value does not
  152. matter */
  153. static void adjust_water_mark(float aging_space_survival) {
  154. /* the easies solution would be to set the water-mark according to
  155. this survival-percent value, but let's try to get some
  156. convergence. And we tried to weaken extreme values */
  157. last_aging_space_survival = ((aging_space_survival
  158. + (gc_count * last_aging_space_survival)) /
  159. (gc_count + 1));
  160. /* maybe take int_max(gc_count, 1000) or something... */
  161. current_water_mark =
  162. BYTES_TO_PAGES((unsigned long)
  163. (PAGES_TO_BYTES_I_KNOW_THIS_CAN_OVERFLOW(S48_CREATION_SPACE_SIZE)
  164. * last_aging_space_survival));
  165. /* if the water-mark would be at the top, then nothing would be
  166. copied into the aging_space, and we could not adjust the
  167. water-mark in the future. */
  168. if (current_water_mark == S48_CREATION_SPACE_SIZE)
  169. current_water_mark--;
  170. }
  171. #endif
  172. /********************************************************************
  173. Starting a Collection
  174. ********************************************************************/
  175. #define BROKEN_HEART_P S48_STOB_P
  176. static unsigned long calc_generation_area_size(Generation* g);
  177. static unsigned long calc_creation_space_size(CreationSpace* c);
  178. static long heap_size_count;
  179. static void heap_size_step(s48_address start, s48_address end) {
  180. heap_size_count += end - start;
  181. }
  182. long s48_heap_size() {
  183. int i;
  184. unsigned long size = 0;
  185. for (i=0; i < S48_GENERATIONS_COUNT; i++){
  186. size += calc_generation_area_size(&generations[i]);
  187. }
  188. size += calc_creation_space_size(&creation_space);
  189. return size;
  190. }
  191. static unsigned long calc_creation_space_size(CreationSpace* c) {
  192. unsigned long size = 0;
  193. FOR_ALL_AREAS(c->large,
  194. size += (area->end - area->start));
  195. FOR_ALL_AREAS(c->small_below,
  196. size += (area->end - area->start));
  197. FOR_ALL_AREAS(c->small_above,
  198. size += (area->end - area->start));
  199. FOR_ALL_AREAS(c->weaks,
  200. size += (area->end - area->start));
  201. return size;
  202. }
  203. /* calc the WHOLE allocated heap (Semispace) */
  204. static unsigned long calc_generation_area_size(Generation* g) {
  205. unsigned long size = 0;
  206. FOR_ALL_AREAS(g->current_space->small_area,
  207. size += (area->end - area->start));
  208. FOR_ALL_AREAS(g->current_space->large_area,
  209. size += (area->end - area->start));
  210. FOR_ALL_AREAS(g->current_space->weaks_area,
  211. size += (area->end - area->start));
  212. return size;
  213. }
  214. long s48_heap_live_size() {
  215. heap_size_count = 0;
  216. s48_walk_heap(&heap_size_step);
  217. return heap_size_count;
  218. }
  219. /* An extant is either not a stored object, or it has a forwarding
  220. pointer, or it is in an area that is not currently being collected.
  221. */
  222. char s48_extantP(s48_value thing) {
  223. if ((!S48_STOB_P(thing)) || BROKEN_HEART_P(S48_STOB_HEADER(thing))) {
  224. return (0 == 0);
  225. } else {
  226. Area* area = s48_memory_map_ref(S48_ADDRESS_AT_HEADER(thing));
  227. return ( (area == NULL) || (GC_ACTION_IGNORE == area->action) );
  228. }
  229. }
  230. static void clean_weak_pointers(Area* areas) {
  231. while (areas != NULL) {
  232. s48_address end = areas->frontier;
  233. s48_address addr = S48_ADDRESS_INC(areas->start);
  234. for (; addr < end; addr += S48_CELLS_TO_A_UNITS(2)) {
  235. s48_value stob = *((s48_value*)addr);
  236. if (!s48_extantP(stob))
  237. *((s48_value*)addr) = S48_FALSE;
  238. else {
  239. /* maybe the object location has changed */
  240. if (S48_STOB_P(stob) && BROKEN_HEART_P(S48_STOB_HEADER(stob)))
  241. *((s48_value*)addr) = S48_STOB_HEADER(stob);
  242. }
  243. }
  244. areas = areas->next;
  245. }
  246. }
  247. static void set_targets(Space* space, Space* target) {
  248. FOR_ALL_AREAS(space->small_area, area->target_space = target);
  249. FOR_ALL_AREAS(space->large_area, area->target_space = target);
  250. FOR_ALL_AREAS(space->weaks_area, area->target_space = target);
  251. }
  252. /* forward declaration */
  253. static unsigned long calc_areas_number(Area* next_area);
  254. #define FOR_YOUNG_AREAS(areas, command) \
  255. do { \
  256. Area* area = areas; \
  257. unsigned long count; \
  258. unsigned long areas_number; \
  259. unsigned long old_areas; \
  260. count = 0; \
  261. areas_number = calc_areas_number(areas); \
  262. old_areas = areas_number / S48_PART_OF_OLD_AREAS ; \
  263. while (area != NULL) { \
  264. Area* next = area->next; \
  265. if (count > old_areas) command; \
  266. count++; \
  267. area = next; \
  268. } \
  269. } while(0)
  270. /* The youngest areas will be recycled in the same generation*/
  271. static void reset_young_targets(Space* space, Space* target) {
  272. FOR_YOUNG_AREAS(space->small_area, area->target_space = target);
  273. /* Large Objects should be allways promoted as they are */
  274. /* FOR_YOUNG_AREAS(space->large_area, area->target_space = target); */
  275. FOR_YOUNG_AREAS(space->weaks_area, area->target_space = target);
  276. }
  277. static void set_gc_actions(Space* space, gc_action_t small,
  278. gc_action_t large, gc_action_t weaks) {
  279. FOR_ALL_AREAS(space->small_area, area->action = small);
  280. FOR_ALL_AREAS(space->large_area, area->action = large);
  281. FOR_ALL_AREAS(space->weaks_area, area->action = weaks);
  282. }
  283. /* How many Area(s) are in the linked listz of areas */
  284. static unsigned long calc_areas_number(Area* next_area){
  285. unsigned long the_areas_number=0;
  286. while (next_area){
  287. the_areas_number++;
  288. next_area = next_area->next;
  289. }
  290. #if (BIBOP_LOG)
  291. s48_bibop_log("Areas Number: %i", the_areas_number);
  292. #endif
  293. return the_areas_number;
  294. }
  295. /* calc only the used heap (Semispace) */
  296. static unsigned long calc_generation_size(Generation* g) {
  297. unsigned long size = 0;
  298. FOR_ALL_AREAS(g->current_space->small_area,
  299. size += (area->frontier - area->start));
  300. FOR_ALL_AREAS(g->current_space->large_area,
  301. size += (area->frontier - area->start));
  302. FOR_ALL_AREAS(g->current_space->weaks_area,
  303. size += (area->frontier - area->start));
  304. return size;
  305. }
  306. static unsigned long calc_generation_other_space_size(Generation* g) {
  307. unsigned long size = 0;
  308. FOR_ALL_AREAS(g->other_space->small_area,
  309. size += (area->frontier - area->start));
  310. FOR_ALL_AREAS(g->other_space->large_area,
  311. size += (area->frontier - area->start));
  312. FOR_ALL_AREAS(g->other_space->weaks_area,
  313. size += (area->frontier - area->start));
  314. return size;
  315. }
  316. /* FPage 6 - 7 - 8 */
  317. inline static void init_areas(int count) {
  318. int i, current_target,
  319. creation_space_target_small_below_generation_index,
  320. creation_space_target_generation_index;
  321. unsigned long current_size;
  322. /* Generation indices for the creation_space */
  323. #if (S48_GENERATIONS_COUNT > 1)
  324. creation_space_target_small_below_generation_index = 1;
  325. #else
  326. creation_space_target_small_below_generation_index = 0;
  327. #endif
  328. creation_space_target_generation_index = 0;
  329. /* REMARK: At the very first collection, the image is loaded, which
  330. has source compiled code that rarely changes. At this point there
  331. was no execution of the main program yet. We can hold the
  332. surviving objects of the first collection (of the creation_space
  333. or of the first generation - or both) in a protected generation
  334. (preferrably the oldest one) which is collected never again.
  335. (alternatively collecting it after a large number of collections)
  336. For this purpose we need at least 3 generations! The option to
  337. activate this is S48_USE_STATIC_SPACE */
  338. #if (S48_USE_STATIC_SPACE)
  339. if (s48_gc_count() == 0) {
  340. creation_space_target_small_below_generation_index = S48_GENERATIONS_COUNT - 1;
  341. creation_space_target_generation_index = creation_space_target_small_below_generation_index;
  342. }
  343. #endif
  344. /* FPage 6 */
  345. /* initialize the creation_space */
  346. /* the objects of the small_below area that will survive the
  347. collection will be moved into an older generation */
  348. if (creation_space.small_below != NULL) {
  349. assert(creation_space.small_below->next == NULL);
  350. creation_space.small_below->target_space =
  351. generations[creation_space_target_small_below_generation_index].current_space;
  352. creation_space.small_below->action = GC_ACTION_COPY_SMALL;
  353. }
  354. /* the objects of the small_above area, large area and weaks area,
  355. that will survive the collection will be moved (or marked) into
  356. the youngest (first) generation, to be soon recollected */
  357. if (creation_space.small_above != NULL) {
  358. assert(creation_space.small_above->next == NULL);
  359. creation_space.small_above->target_space =
  360. generations[creation_space_target_generation_index].current_space;
  361. creation_space.small_above->action = GC_ACTION_COPY_SMALL;
  362. }
  363. assert(creation_space.weaks->next == NULL);
  364. creation_space.weaks->target_space = generations[creation_space_target_generation_index].current_space;
  365. creation_space.weaks->action = GC_ACTION_COPY_WEAK;
  366. FOR_ALL_AREAS(creation_space.large,
  367. area->target_space = generations[creation_space_target_generation_index].current_space;
  368. area->action = GC_ACTION_MARK_LARGE );
  369. /* FPage 7 */
  370. /* Promotion policy - Which generation should the live objects be
  371. copied to? */
  372. #if (BIBOP_LOG)
  373. /* all current sizes */
  374. #if (S48_PROMOTION_THRESHOLD)
  375. s48_bibop_log("S48_PROMOTION_THRESHOLD: %i", S48_PROMOTION_THRESHOLD);
  376. #endif
  377. #if (S48_PROMOTION_HEAP_LIMIT)
  378. s48_bibop_log("S48_PROMOTION_HEAP_LIMIT: %i", S48_PROMOTION_HEAP_LIMIT);
  379. #endif
  380. #if (S48_PROMOTION_AGE_LIMIT)
  381. s48_bibop_log("S48_PROMOTION_AGE_LIMIT: %i", S48_PROMOTION_AGE_LIMIT);
  382. #endif
  383. for (i = S48_GENERATIONS_COUNT - 1; i > -1; i--) {
  384. /* Look out! Spaces are allready swapped !!! */
  385. current_size = calc_generation_other_space_size(&generations[i]);
  386. #if (S48_PROMOTION_THRESHOLD)
  387. s48_bibop_log("Generation %i : %i ", i, (current_size - generations[i].last_size));
  388. #endif
  389. #if (S48_PROMOTION_HEAP_LIMIT)
  390. s48_bibop_log("Generation %i : %i ", i, current_size);
  391. #endif
  392. #if (S48_PROMOTION_AGE_LIMIT)
  393. s48_bibop_log("Generation %i : Age Count %i ", i, generations[i].age_count);
  394. s48_bibop_log("Generation %i : Self Count %i ", i, generations[i].self_count);
  395. #endif
  396. }
  397. #endif /* #if (BIBOP_LOG) */
  398. /* initialize the areas that will be collected. */
  399. for (i = 0; i < count; i++) {
  400. /* trace everything */
  401. FOR_ALL_AREAS(generations[i].current_space->small_area,
  402. area->trace = area->start);
  403. FOR_ALL_AREAS(generations[i].current_space->large_area,
  404. area->trace = area->start);
  405. /* targets of the other_spaces are the current_space of the choosen
  406. generation according to the promotion option */
  407. /* Look out! Spaces are allready swapped !!! */
  408. current_size = calc_generation_other_space_size(&generations[i]);
  409. #if (S48_PROMOTION_THRESHOLD)
  410. current_target = ( (current_size - generations[i].last_size)
  411. > S48_PROMOTION_THRESHOLD)
  412. ? i + 1
  413. : i;
  414. #elif (S48_PROMOTION_HEAP_LIMIT)
  415. /* Look out! Spaces are allready swapped !!! */
  416. current_target = (current_size > S48_PROMOTION_HEAP_LIMIT)
  417. ? i + 1
  418. : i;
  419. #elif (S48_PROMOTION_AGE_LIMIT)
  420. current_target = (generations[i].self_count > 0 &&
  421. generations[i].self_count % S48_PROMOTION_AGE_LIMIT == 0)
  422. ? i + 1
  423. : i;
  424. #else
  425. #error "BIBOP GC configuration error: no promotion policy defined"
  426. #endif
  427. #if (S48_USE_STATIC_SPACE)
  428. current_target = (s48_gc_count() == 0) ? creation_space_target_small_below_generation_index : current_target ;
  429. #endif
  430. /* Adjust index j (for the last generation) */
  431. #if (S48_USE_STATIC_SPACE)
  432. current_target = (current_target >= S48_GENERATIONS_COUNT - 1) ? S48_GENERATIONS_COUNT - 2 : current_target ;
  433. #else
  434. current_target = (current_target >= S48_GENERATIONS_COUNT) ? S48_GENERATIONS_COUNT - 1 : current_target ;
  435. #endif
  436. /* promotion targets */
  437. set_targets(generations[i].other_space, generations[current_target].current_space);
  438. /* Wilson's opportunistic object promotion targets */
  439. if ( i != current_target ) {
  440. reset_young_targets(generations[i].other_space,
  441. generations[i].current_space);
  442. }
  443. #if (BIBOP_LOG)
  444. s48_bibop_log("generations[%i].other_space -> generations[%i].current_space",
  445. i, current_target);
  446. #endif
  447. /* actions: the ones that will be evacuated now */
  448. set_gc_actions(generations[i].other_space, GC_ACTION_COPY_SMALL,
  449. GC_ACTION_MARK_LARGE, GC_ACTION_COPY_WEAK);
  450. /* ignore the ones that will be filled now */
  451. set_gc_actions(generations[i].current_space, GC_ACTION_IGNORE,
  452. GC_ACTION_IGNORE, GC_ACTION_IGNORE);
  453. }
  454. /* FPage 8 */
  455. /* initialize the areas that are not collected this time */
  456. for (i = count; i < S48_GENERATIONS_COUNT; i++) {
  457. /* trace only what will be added to these */
  458. /* maybe only the first "old" one will have to be traced ?? */
  459. FOR_ALL_AREAS(generations[i].current_space->small_area,
  460. area->trace = area->frontier);
  461. FOR_ALL_AREAS(generations[i].current_space->large_area,
  462. area->trace = area->frontier);
  463. /* the other spaces should be empty anyway */
  464. set_gc_actions(generations[i].other_space, GC_ACTION_ERROR,
  465. GC_ACTION_ERROR, GC_ACTION_ERROR);
  466. set_gc_actions(generations[i].current_space, GC_ACTION_IGNORE,
  467. GC_ACTION_IGNORE, GC_ACTION_IGNORE);
  468. }
  469. }
  470. inline static void clear_space(Space* space) {
  471. FOR_ALL_AREAS(space->small_area, s48_free_area(area)); space->small_area = NULL;
  472. FOR_ALL_AREAS(space->large_area, s48_free_area(area)); space->large_area = NULL;
  473. FOR_ALL_AREAS(space->weaks_area, s48_free_area(area)); space->weaks_area = NULL;
  474. }
  475. static char trace_areas(Area* areas) {
  476. char hit = 0;
  477. while (areas != NULL) {
  478. while (1) {
  479. s48_address start = areas->trace;
  480. s48_address end = areas->frontier;
  481. if (start != end) {
  482. s48_internal_trace_locationsB(areas, TRUE, start, end, "trace_areas");
  483. areas->trace = end;
  484. hit = 1;
  485. } else
  486. break;
  487. }
  488. areas = areas->next;
  489. }
  490. return hit;
  491. }
  492. static void do_gc() {
  493. char hit;
  494. do {
  495. int i;
  496. char hit0, hit1;
  497. hit = FALSE;
  498. /* maybe it's enough to trace up to max_gen+1 */
  499. for (i = 0; i < S48_GENERATIONS_COUNT; i++) {
  500. hit0 = trace_areas(generations[i].current_space->small_area);
  501. hit1 = trace_areas(generations[i].current_space->large_area);
  502. hit = hit0 || hit1 || hit;
  503. }
  504. } while ( hit );
  505. }
  506. inline static void swap(Generation* g) {
  507. Space* tmp = g->current_space;
  508. g->current_space = g->other_space;
  509. g->other_space = tmp;
  510. }
  511. #if (MEASURE_GC)
  512. /* We include this here, because the measurement code uses static
  513. variables from here a lot */
  514. #include "measure.ci"
  515. #endif
  516. long s48_gc_count() {
  517. return gc_count;
  518. }
  519. long s48_gc_run_time(long* mseconds) {
  520. *mseconds = gc_mseconds;
  521. return gc_seconds;
  522. }
  523. /* collect the first COUNT generations */
  524. /* FPage 5 ... */
  525. static void collect(int count, psbool emergency) {
  526. int i;
  527. #if (S48_USE_STATIC_SPACE)
  528. psbool major_p = (count == (S48_GENERATIONS_COUNT - 1));
  529. #else
  530. psbool major_p = (count == S48_GENERATIONS_COUNT);
  531. #endif
  532. /* this it for the water mark changing at the end of the collection */
  533. #if (S48_ADJUST_WATER_MARK)
  534. aging_space_survival = 0;
  535. aging_space_before = 0;
  536. FOR_ALL_AREAS(generations[0].current_space->small_area,
  537. aging_space_before += (area->frontier - area->start));
  538. #endif
  539. /* swap spaces and update age_count first */
  540. /* FPage 5 */
  541. for (i = 0; i < count; i++) {
  542. swap(&generations[i]);
  543. generations[i].age_count++;
  544. #if (BIBOP_LOG)
  545. s48_bibop_log("swapped current <-> other generation %i", i);
  546. #endif
  547. }
  548. /* update self_count for the generation from wich the collection
  549. starts */
  550. generations[count-1].self_count++;
  551. /* Initialize actions, targets and trace pointers */
  552. /* FPage 6 - 7 - 8 */
  553. init_areas(count);
  554. /* trace all roots to the younger generations */
  555. #if (S48_USE_REMEMBERED_SETS)
  556. for (i = 0; i < count; i++) {
  557. RemSet* rs; RemSet* nrs;
  558. FOR_ALL_AREAS(generations[i].other_space->small_area,
  559. s48_trace_remset(area->remset));
  560. FOR_ALL_AREAS(generations[i].other_space->weaks_area,
  561. s48_trace_remset(area->remset));
  562. /* beause large areas are "reused", the remembered set has to be
  563. freed explicitly */
  564. FOR_ALL_AREAS(generations[i].other_space->large_area,
  565. nrs = s48_make_remset();
  566. rs = area->remset;
  567. area->remset = nrs;
  568. s48_trace_remset(rs);
  569. s48_free_remset(rs);
  570. );
  571. }
  572. #endif
  573. /* FPage 9 ... */
  574. for (i = count; i < S48_GENERATIONS_COUNT; i++) {
  575. #if (BIBOP_LOG)
  576. s48_bibop_log("Tracing roots from current-space of generation %i\n", i);
  577. #endif
  578. /* FPage 9 ... area_roots.c */
  579. s48_trace_areas_roots(generations[i].current_space->small_area);
  580. s48_trace_areas_roots(generations[i].current_space->large_area);
  581. }
  582. s48_gc_root();
  583. /* do the tracing until everything is done */
  584. do_gc();
  585. /* clean up*/
  586. for (i = 0; i < S48_GENERATIONS_COUNT; i++) {
  587. clean_weak_pointers(generations[i].current_space->weaks_area);
  588. }
  589. s48_post_gc_cleanup(major_p, emergency);
  590. /* for objects resurrected in some post-gc-cleanup, trace again */
  591. do_gc();
  592. for (i = 0; i < count; i++) {
  593. clear_space(generations[i].other_space);
  594. generations[i].last_size = calc_generation_size(&generations[i]);
  595. }
  596. /* reset creation space */
  597. #if (S48_ADJUST_WATER_MARK)
  598. if (aging_space_before != 0)
  599. adjust_water_mark((float)aging_space_survival / aging_space_before);
  600. #endif
  601. recreate_creation_space();
  602. gc_count++;
  603. }
  604. static psbool do_collect(psbool force_major, psbool emergency);
  605. /* FPage 4 - 5 */
  606. void s48_collect(psbool force_major) {
  607. /*
  608. The BIBOP GC's heap gets an absolute maximal size with the -h flag
  609. of scheme48.
  610. -h <heap_size> : means <heap_size> cells (0 means no size limit).
  611. Without the -h flag, the heap size gets a default value
  612. (init.c). We have to calculate a minimal heap size, set by the
  613. special configuration of BIBOP (gc_config.h), to decide during the
  614. initialization (s48_initialize_bibop_heap()) if the given
  615. <heap_size> is reasonable or not. This is done after the
  616. allocation of the image_areas (s48_initialize_image_areas()). If
  617. the maximal heap size is too small we increase it to a reasonable
  618. value (the user is informed about that).
  619. The variable 'actual_heap_size' (in cells) is updated before each
  620. collection and represents the total size of all used areas (but
  621. without allocated free areas, and without the memory used for the
  622. various structures like Area, Metapage, Generation etc.). If this
  623. actual heap size rises above the user defined (or default) maximal
  624. heap size, we halt the program.
  625. */
  626. unsigned long user_defined_hsize, heap_live_size;
  627. psbool was_major;
  628. long start_seconds, start_mseconds, end_seconds, end_mseconds;
  629. start_seconds = s48_run_time(&start_mseconds);
  630. was_major = do_collect(force_major, FALSE);
  631. heap_live_size = S48_BYTES_TO_CELLS(s48_heap_live_size());
  632. user_defined_hsize = s48_max_heap_size();
  633. if ((user_defined_hsize > 0) &&
  634. (heap_live_size > (user_defined_hsize *
  635. ((100.0 - S48_EMERGENCY_PERCENTAGE)/100.0)))) {
  636. /* try again with a major collection. If it's still too much
  637. afterwards, quit. */
  638. if (! was_major)
  639. do_collect(TRUE, TRUE);
  640. heap_live_size = S48_BYTES_TO_CELLS(s48_heap_live_size());
  641. if (heap_live_size > user_defined_hsize)
  642. s48_gc_error("Scheme 48 heap overflow (max heap size %i cells)\n",
  643. user_defined_hsize);
  644. }
  645. end_seconds = s48_run_time(&end_mseconds);
  646. if (end_mseconds >= start_mseconds) {
  647. gc_seconds = gc_seconds + (end_seconds - start_seconds);
  648. gc_mseconds = gc_mseconds + (end_mseconds - start_mseconds);
  649. }
  650. else {
  651. gc_seconds = gc_seconds + ((end_seconds - start_seconds) - 1);
  652. gc_mseconds = gc_mseconds + ((1000 + end_mseconds) - start_mseconds);
  653. }
  654. }
  655. static psbool do_collect(psbool force_major, psbool emergency) {
  656. /* Which generations should be collected? */
  657. /* collect up to the oldest generation that has grown enough since
  658. its last collection. */
  659. /* The youngest generation is collected always */
  660. /* FPage 5 */
  661. int c; /* generation number */
  662. #if (BIBOP_LOG)
  663. /* all current sizes */
  664. #if (S48_COLLECTION_THRESHOLD)
  665. s48_bibop_log("S48_COLLECTION_THRESHOLD: %i", S48_COLLECTION_THRESHOLD);
  666. #endif
  667. #if (S48_COLLECTION_HEAP_LIMIT)
  668. s48_bibop_log("S48_COLLECTION_HEAP_LIMIT: %i", S48_COLLECTION_HEAP_LIMIT);
  669. #endif
  670. #if (S48_COLLECTION_AGE_LIMIT)
  671. s48_bibop_log("S48_COLLECTION_AGE_LIMIT: %i", S48_COLLECTION_AGE_LIMIT);
  672. #endif
  673. { int i;
  674. for (i = S48_GENERATIONS_COUNT - 1; i > -1; i--) {
  675. unsigned long current_size;
  676. current_size = calc_generation_size(&generations[i]);
  677. #if (S48_COLLECTION_THRESHOLD)
  678. s48_bibop_log("Generation %i : %i ", i, (current_size - generations[i].last_size));
  679. #endif
  680. #if (S48_COLLECTION_HEAP_LIMIT)
  681. s48_bibop_log("Generation %i : %i ", i, current_size);
  682. #endif
  683. #if (S48_COLLECTION_AGE_LIMIT)
  684. s48_bibop_log("Generation %i : %i ", i, generations[i].age_count);
  685. #endif
  686. }}
  687. #endif /* #if (BIBOP_LOG) */
  688. /* Which generation should be collected? */
  689. #if (S48_USE_STATIC_SPACE)
  690. c = S48_GENERATIONS_COUNT - 1;
  691. #else
  692. c = S48_GENERATIONS_COUNT;
  693. #endif
  694. if (! force_major) {
  695. for (; c > 1; c--) {
  696. unsigned long current_size;
  697. current_size = calc_generation_size(&generations[c-1]);
  698. #if (S48_COLLECTION_THRESHOLD)
  699. if ((current_size - generations[c-1].last_size) > S48_COLLECTION_THRESHOLD)
  700. break;
  701. #endif
  702. #if (S48_COLLECTION_HEAP_LIMIT)
  703. if (current_size > S48_COLLECTION_HEAP_LIMIT)
  704. break;
  705. #endif
  706. #if (S48_COLLECTION_AGE_LIMIT)
  707. if (generations[c-1].self_count > 0 &&
  708. generations[c-1].self_count % S48_COLLECTION_AGE_LIMIT == 0)
  709. break;
  710. #endif
  711. }
  712. }
  713. #if (BIBOP_LOG)
  714. s48_bibop_log("Generation choosen: %i", c-1);
  715. s48_bibop_log("Starting Collection...");
  716. #endif
  717. #if (MEASURE_GC)
  718. measure_before_collection(c);
  719. #endif
  720. /*************************************/
  721. /* Now is going to be really collected */
  722. /* FPage 5 ... */
  723. collect(c, emergency);
  724. /*************************************/
  725. #if (MEASURE_GC)
  726. measure_after_collection(c);
  727. #endif
  728. #if (BIBOP_LOG)
  729. s48_bibop_log("Collection done!");
  730. #endif
  731. #if (S48_USE_STATIC_SPACE)
  732. return (c == S48_GENERATIONS_COUNT-1);
  733. #else
  734. return (c == S48_GENERATIONS_COUNT);
  735. #endif
  736. }
  737. /*********************************************************************
  738. Tracing
  739. ********************************************************************/
  740. /* forward declarations */
  741. inline static void mark_large(Area* area, Space* to_space);
  742. inline static Area* allocate_small_area(Space* space,
  743. unsigned long size_in_bytes);
  744. inline static Area* allocate_weak_area(Space* space);
  745. inline static Area* allocate_large_area(Space* space,
  746. unsigned long size_in_bytes);
  747. /* the value STOB has been written to location ADDR */
  748. inline static void call_internal_write_barrier(Area* maybe_area, char area_looked_up, s48_address addr,
  749. s48_value stob, Area* to_area) {
  750. if (!area_looked_up) maybe_area = s48_memory_map_ref(addr);
  751. /* if maybe_area is still NULL, it must have been a write to a
  752. location outside of the heap, e.g. a temporary pointer or
  753. something in the root-set; we can ignore it. */
  754. if ((maybe_area != NULL) && (maybe_area->generation_index > 0))
  755. s48_internal_write_barrier(maybe_area, addr, stob, to_area);
  756. }
  757. inline static void call_internal_write_barrier2(Area* maybe_area, char area_looked_up, s48_address addr,
  758. s48_value stob) {
  759. call_internal_write_barrier(maybe_area, area_looked_up, addr, stob,
  760. s48_memory_map_ref(S48_ADDRESS_AT_HEADER(stob)));
  761. }
  762. #if (S48_HAVE_TRANSPORT_LINK_CELLS)
  763. static Area* make_small_available_in_no_gc(Space* space,
  764. unsigned long size_in_bytes) {
  765. Area* area = space->small_area;
  766. if (size_in_bytes > AREA_REMAINING(area)) {
  767. area = allocate_small_area(space, size_in_bytes);
  768. }
  769. return area;
  770. }
  771. static s48_address allocate_small_in_no_gc(Space* space,
  772. unsigned long size_in_bytes) {
  773. Area* area = make_small_available_in_no_gc(space, size_in_bytes);
  774. s48_address addr = area->frontier;
  775. area->frontier += S48_BYTES_TO_A_UNITS(size_in_bytes);
  776. return addr;
  777. }
  778. static s48_value make_stob(long type, unsigned long size_in_cells) {
  779. /* Must work during a collection! */
  780. unsigned long size_in_bytes = S48_CELLS_TO_BYTES(size_in_cells);
  781. /* Allocate a place for it */
  782. s48_address addr = allocate_small_in_no_gc(
  783. generations[0].current_space,
  784. S48_STOB_OVERHEAD_IN_BYTES + size_in_bytes);
  785. /* Initialize */
  786. assert(S48_STOB_OVERHEAD_IN_BYTES == sizeof(s48_value));
  787. *((s48_value*)addr) = S48_MAKE_HEADER(type, size_in_bytes);
  788. memset(addr + S48_STOB_OVERHEAD_IN_A_UNITS, 0, size_in_bytes);
  789. return S48_ADDRESS_TO_STOB_DESCRIPTOR(addr + S48_STOB_OVERHEAD_IN_A_UNITS);
  790. }
  791. static s48_value make_pair(s48_value car, s48_value cdr) {
  792. s48_value result = make_stob(S48_STOBTYPE_PAIR, 2);
  793. S48_UNSAFE_SET_CAR(result, car);
  794. S48_UNSAFE_SET_CDR(result, cdr);
  795. return result;
  796. }
  797. static void append_tconcB(s48_value tconc, s48_value elem) {
  798. /* A tconc is a pair, whose car points to the first pair of a list
  799. and whose cdr points to the last pair of this list. */
  800. /* elem must already be in the "to space"! */
  801. s48_value tconc_tail = S48_UNSAFE_CDR(tconc);
  802. assert(S48_PAIR_P(tconc));
  803. /* Though the tconc must already be in the "to space", it's cdr (and
  804. car) could still point to the "from space". But that does not
  805. matter here, because if it still has to be copied, it's (already
  806. correct) contents will be ignored in the tracing. And because we
  807. only write pointers to objects in the "to space", nothing has to
  808. be traced additionally here. */
  809. if (S48_PAIR_P(tconc_tail)) {
  810. /* create a new pair */
  811. s48_value newpair = make_pair(S48_FALSE, S48_FALSE);
  812. /* enqueue the tlc (=elem) in the tconc queue */
  813. S48_UNSAFE_SET_CAR(tconc_tail, elem);
  814. S48_UNSAFE_SET_CDR(tconc_tail, newpair);
  815. S48_UNSAFE_SET_CDR(tconc, newpair); /* new tail */
  816. }
  817. /* else: silently ignoring malformed tconc */
  818. }
  819. static void trace_transport_link_cell(Area* maybe_area, char area_looked_up,
  820. s48_address contents_pointer,
  821. unsigned long size_in_a_units) {
  822. s48_value tlc = S48_ADDRESS_TO_STOB_DESCRIPTOR(contents_pointer);
  823. s48_value old_key;
  824. char key_moved_p;
  825. assert(S48_TRANSPORT_LINK_CELL_P(tlc));
  826. old_key = S48_UNSAFE_TRANSPORT_LINK_CELL_KEY(tlc);
  827. /* ...trace the current tlc to make sure that every pointer is up-to-date. */
  828. s48_internal_trace_locationsB(
  829. maybe_area, area_looked_up, contents_pointer,
  830. contents_pointer + size_in_a_units,
  831. "trace_transport_link_cell");
  832. /* Hint: We will not recognize large keys "moving" into an older
  833. generation; but the tlc-logic is only interested in keys changing
  834. their address anyway. So that does not matter */
  835. key_moved_p = (S48_UNSAFE_TRANSPORT_LINK_CELL_KEY(tlc) != old_key);
  836. if (key_moved_p) {
  837. s48_value tconc = S48_UNSAFE_TRANSPORT_LINK_CELL_TCONC(tlc);
  838. /* If the tconc field is a pair... */
  839. if (S48_FALSE_P(tconc))
  840. {} /* ignore */
  841. else if (S48_PAIR_P(tconc)
  842. && S48_PAIR_P(S48_UNSAFE_CAR(tconc))
  843. && S48_PAIR_P(S48_UNSAFE_CDR(tconc))) {
  844. /* ...then add the tlc to the end of the tconc queue. */
  845. append_tconcB(tconc, tlc);
  846. /* ...and set the tconc field to null (false). */
  847. S48_UNSAFE_SET_TRANSPORT_LINK_CELL_TCONC(tlc, S48_FALSE);
  848. }
  849. else
  850. {} /*printf("Warning: malformed tlc at %p.\n", S48_ADDRESS_AT_HEADER(tlc));*/
  851. }
  852. assert(S48_TRANSPORT_LINK_CELL_P(tlc));
  853. }
  854. #endif /* S48_HAVE_TRANSPORT_LINK_CELLS */
  855. /* EKG checks for broken hearts - only used internally in
  856. s48_trace_locationsB */
  857. #define EKG(label) {\
  858. long header = S48_STOB_HEADER(trace_stob_stob);\
  859. if (BROKEN_HEART_P((s48_value)header)) {\
  860. *((s48_value*)addr) = header;\
  861. call_internal_write_barrier2(maybe_area, area_looked_up, addr, \
  862. (s48_value)header);\
  863. addr = next;\
  864. goto loop;\
  865. } else {\
  866. copy_header = header;\
  867. copy_thing = trace_stob_stob;\
  868. goto label;\
  869. }\
  870. }
  871. void do_copy_object(s48_address addr, /* addr of pointer */
  872. Area * maybe_area, /* laying in area, if known */
  873. char area_looked_up, /* area known? */
  874. Area * from_area, /* pointing in area */
  875. s48_value copy_thing, /* stob descriptor */
  876. s48_value copy_header, /* stob header */
  877. Area * copy_area /* target area */
  878. ) {
  879. /* we start writing at the frontier location */
  880. s48_address frontier = copy_area->frontier;
  881. /* the data, means after the header, will be written at this location */
  882. s48_address data_addr = frontier + S48_STOB_OVERHEAD_IN_A_UNITS;
  883. /* Since the s48_address is allways 4 bytes, the lower 2 bits are allways 00 */
  884. /* We use these 2 bits for the STOB-TAG: 11 to make a scheme-stob */
  885. s48_value new = S48_ADDRESS_TO_STOB_DESCRIPTOR(data_addr);
  886. assert(s48_memory_map_ref(S48_ADDRESS_AT_HEADER(new)) == copy_area);
  887. #if (S48_ADJUST_WATER_MARK)
  888. /* count small object-sizes, that survive in the first generation */
  889. if ((from_area->generation_index == 0) &&
  890. (from_area != creation_space.small_below) &&
  891. (from_area != creation_space.small_above) &&
  892. (from_area->action == GC_ACTION_COPY_SMALL))
  893. aging_space_survival += S48_HEADER_LENGTH_IN_A_UNITS(copy_header) +
  894. S48_STOB_OVERHEAD_IN_BYTES;
  895. #endif
  896. /* count every surviving obj */
  897. #if (MEASURE_GC)
  898. all_surviving_obj += S48_HEADER_LENGTH_IN_A_UNITS(copy_header) +
  899. S48_STOB_OVERHEAD_IN_BYTES;
  900. #endif
  901. /* copy the object to the new location */
  902. /* first the header at the frontier location */
  903. *((s48_value*)frontier) = copy_header;
  904. /* and then the data (thing addresss after header) at the data_addr
  905. location */
  906. assert(AREA_REMAINING(copy_area) >= (S48_HEADER_LENGTH_IN_BYTES(copy_header)
  907. + S48_STOB_OVERHEAD_IN_BYTES));
  908. memcpy((void*)data_addr, S48_ADDRESS_AFTER_HEADER(copy_thing, void),
  909. S48_HEADER_LENGTH_IN_BYTES(copy_header));
  910. /* break heart */
  911. /* alternative: S48_STOB_HEADER(copy_thing) = new;*/
  912. *((s48_value*)S48_ADDRESS_AT_HEADER(copy_thing)) = new;
  913. /* advance the allocation pointer */
  914. copy_area->frontier = data_addr + S48_HEADER_LENGTH_IN_A_UNITS(copy_header);
  915. /* overwrite the old stob with the new one */
  916. *((s48_value*)addr) = new;
  917. /* if we are tracing an area of an older generation call write_barrier */
  918. call_internal_write_barrier(maybe_area, area_looked_up, addr, new, copy_area);
  919. }
  920. /* Copy everything pointed to from somewhere between START (inclusive)
  921. and END (exclusive).
  922. */
  923. void s48_internal_trace_locationsB(Area* maybe_area, char area_looked_up, s48_address start,
  924. s48_address end, char* called_from) {
  925. s48_address addr = start;
  926. s48_address next;
  927. s48_value thing;
  928. s48_value trace_stob_stob;
  929. long copy_header;
  930. s48_value copy_thing;
  931. Area* copy_area;
  932. Area* from_area;
  933. Space* copy_to_space;
  934. loop: {
  935. if (addr < end) {
  936. thing = *((s48_value*) addr);
  937. next = S48_ADDRESS_INC(addr);
  938. if (S48_HEADER_P(thing)) {
  939. if (S48_B_VECTOR_HEADER_P(thing)) {
  940. addr = next + S48_HEADER_LENGTH_IN_A_UNITS(thing);
  941. }
  942. else if (S48_HEADER_TYPE(thing) == S48_STOBTYPE_CONTINUATION) {
  943. long size = S48_HEADER_LENGTH_IN_A_UNITS(thing);
  944. extern void s48_trace_continuation(char *, long); /* BIBOP-specific */
  945. s48_trace_continuation(next, size);
  946. addr = next + size;
  947. }
  948. #if (S48_HAVE_TRANSPORT_LINK_CELLS)
  949. else if (S48_HEADER_TYPE(thing) == S48_STOBTYPE_TRANSPORT_LINK_CELL) {
  950. long size = S48_HEADER_LENGTH_IN_A_UNITS(thing);
  951. trace_transport_link_cell(maybe_area, area_looked_up, next, size);
  952. addr = next + size;
  953. }
  954. #endif
  955. else {
  956. addr = next;
  957. }
  958. goto loop;
  959. } else if (! S48_STOB_P(thing)) {
  960. addr = next;
  961. goto loop;
  962. } else {
  963. /* it's a stob */
  964. trace_stob_stob = thing;
  965. goto trace_stob;
  966. }
  967. }
  968. return;
  969. }
  970. trace_stob: { /* parameter: trace_stob_stob */
  971. from_area = s48_memory_map_ref(S48_ADDRESS_AT_HEADER(trace_stob_stob));
  972. if (from_area != NULL) {
  973. switch (from_area->action) {
  974. case GC_ACTION_COPY_SMALL: {
  975. copy_to_space = from_area->target_space;
  976. EKG(copy_small);
  977. } break;
  978. case GC_ACTION_COPY_MIXED: {
  979. copy_to_space = from_area->target_space;
  980. EKG(copy_mixed);
  981. } break;
  982. case GC_ACTION_COPY_WEAK: {
  983. copy_to_space = from_area->target_space;
  984. EKG(copy_weak_pointer);
  985. } break;
  986. case GC_ACTION_IGNORE: {
  987. call_internal_write_barrier(maybe_area, area_looked_up, addr, trace_stob_stob, from_area);
  988. addr = next;
  989. goto loop;
  990. } break;
  991. case GC_ACTION_MARK_LARGE: {
  992. copy_to_space = from_area->target_space;
  993. mark_large(from_area, copy_to_space);
  994. /* a large object has been "copied" */
  995. call_internal_write_barrier(maybe_area, area_looked_up, addr, trace_stob_stob, from_area);
  996. addr = next;
  997. goto loop;
  998. } break;
  999. case GC_ACTION_ERROR: {
  1000. s48_gc_error("got error gc-action in the %i generation", from_area->generation_index + 1);
  1001. return; /* Never reached */
  1002. } break;
  1003. default: {
  1004. s48_gc_error("got unexpected gc-action %d in the %i generation", from_area->action, from_area->generation_index + 1);
  1005. return; /* Never reached */
  1006. }
  1007. }
  1008. }
  1009. else {
  1010. s48_gc_error("illegal stob descriptor found while tracing address %p called from %s",
  1011. addr, called_from);
  1012. return; /* Never reached */
  1013. }
  1014. }
  1015. assert(FALSE); /* we should never get here */
  1016. /* Find out which is the actual copy_area for small, large, etc. object */
  1017. copy_small: { /* parameter: copy_to_space, copy_header, copy_thing */
  1018. /* get the current Area of the copy_space, means target_space */
  1019. Area* area = copy_to_space->small_area;
  1020. unsigned long size_in_bytes = (S48_HEADER_LENGTH_IN_A_UNITS(copy_header)
  1021. + S48_STOB_OVERHEAD_IN_A_UNITS);
  1022. if (size_in_bytes <= AREA_REMAINING(area))
  1023. /* If the object fits then this is the copy_area ...*/
  1024. copy_area = area;
  1025. else
  1026. /* otherwise, allocate a small area in this space */
  1027. copy_area = allocate_small_area(copy_to_space, size_in_bytes);
  1028. goto copy_object;
  1029. }
  1030. copy_large: { /* parameter: copy_to_space, copy_header, copy_thing */
  1031. copy_area = allocate_large_area( copy_to_space,
  1032. S48_HEADER_LENGTH_IN_BYTES(copy_header) +
  1033. S48_STOB_OVERHEAD_IN_BYTES );
  1034. goto copy_object;
  1035. }
  1036. copy_mixed: { /* parameter: copy_to_space, copy_header, copy_thing */
  1037. if (S48_STOBTYPE_WEAK_POINTER == S48_HEADER_TYPE(copy_header))
  1038. goto copy_weak_pointer; /* uses copy_to_space, copy_thing! */
  1039. else if (S48_HEADER_LENGTH_IN_BYTES(copy_header) < S48_SMALL_OBJECT_LIMIT)
  1040. goto copy_small; /* uses copy_to_space, copy_thing, copy_header! */
  1041. else
  1042. goto copy_large; /* dito */
  1043. }
  1044. copy_weak_pointer: { /* parameter: copy_to_space, copy_thing */
  1045. Area* area = copy_to_space->weaks_area;
  1046. /*copy_header = WEAK_POINTER_HEADER;*/
  1047. if ((unsigned long) (S48_HEADER_LENGTH_IN_A_UNITS(copy_header)
  1048. + S48_STOB_OVERHEAD_IN_A_UNITS)
  1049. < AREA_REMAINING(area))
  1050. copy_area = area;
  1051. else
  1052. copy_area = allocate_weak_area(copy_to_space);
  1053. goto copy_object;
  1054. }
  1055. copy_object: { /* parameter: from_area, copy_thing, copy_header, copy_area */
  1056. do_copy_object(addr, maybe_area, area_looked_up, from_area, copy_thing, copy_header, copy_area);
  1057. /* continue behind that stob */
  1058. addr = next;
  1059. goto loop;
  1060. }
  1061. } /* end: trace_locationsB */
  1062. /* Traces between START (inclusive) and END (exclusive). */
  1063. void s48_trace_locationsB(s48_address start, s48_address end) {
  1064. s48_internal_trace_locationsB(NULL, FALSE, start, end, "s48_trace_locationsB");
  1065. }
  1066. /* s48_trace_value passes the location of STOB to
  1067. s48_trace_locationsB. */
  1068. s48_value s48_trace_value(s48_value stob) {
  1069. s48_address addr = (s48_address)&stob;
  1070. (void)s48_trace_locationsB(addr, S48_ADDRESS_INC(addr));
  1071. /* stob now holds the new location of the value... */
  1072. return stob;
  1073. }
  1074. /* s48_trace_stob_contentsB passes the contents of a d-vector stob to
  1075. s48_trace_locations. Never call this with b-vectors! */
  1076. void s48_trace_stob_contentsB(s48_value stob) {
  1077. s48_address start = (s48_address)S48_ADDRESS_AFTER_HEADER(stob, void);
  1078. unsigned long size = S48_BYTES_TO_A_UNITS(S48_STOB_BYTE_LENGTH(stob));
  1079. s48_trace_locationsB(start, (start + size));
  1080. }
  1081. /* creating new areas during gc */
  1082. inline static Area* allocate_small_area(Space* space,
  1083. unsigned long size_in_bytes) {
  1084. Area* area = s48_allocate_area(ulong_max(S48_MINIMUM_SMALL_AREA_SIZE,
  1085. BYTES_TO_PAGES(size_in_bytes)),
  1086. ulong_max(S48_MAXIMUM_SMALL_AREA_SIZE,
  1087. BYTES_TO_PAGES(size_in_bytes)),
  1088. (unsigned char)space->generation_index,
  1089. AREA_TYPE_SIZE_SMALL);
  1090. area->action = GC_ACTION_IGNORE;
  1091. area->next = space->small_area;
  1092. space->small_area = area;
  1093. return area;
  1094. }
  1095. inline static Area* allocate_large_area(Space* space,
  1096. unsigned long size_in_bytes) {
  1097. unsigned long pages = BYTES_TO_PAGES(size_in_bytes);
  1098. Area* area = s48_allocate_area(pages,
  1099. pages,
  1100. (unsigned char)space->generation_index,
  1101. AREA_TYPE_SIZE_LARGE);
  1102. area->action = GC_ACTION_IGNORE;
  1103. area->next = space->large_area;
  1104. space->large_area = area;
  1105. return area;
  1106. }
  1107. inline static Area* allocate_weak_area(Space* space) {
  1108. Area* area = s48_allocate_area(S48_MINIMUM_WEAK_AREA_SIZE,
  1109. S48_MAXIMUM_WEAK_AREA_SIZE,
  1110. (unsigned char)space->generation_index,
  1111. AREA_TYPE_SIZE_WEAKS);
  1112. area->action = GC_ACTION_IGNORE;
  1113. area->next = space->weaks_area;
  1114. space->weaks_area = area;
  1115. return area;
  1116. }
  1117. /* Remove AREA from from-space's list and put it on to-space's. Ignore
  1118. AREA from now on.
  1119. */
  1120. inline static void delete_large_area(Area* large_area) {
  1121. char hit = FALSE;
  1122. FOR_ALL_AREAS(creation_space.large,
  1123. if (area == large_area) hit = TRUE);
  1124. if (hit)
  1125. creation_space.large = s48_delete_area(creation_space.large, large_area);
  1126. else {
  1127. Space* from_space = generations[large_area->generation_index].other_space;
  1128. from_space->large_area = s48_delete_area(from_space->large_area, large_area);
  1129. }
  1130. }
  1131. inline static void mark_large(Area* area, Space* to_space) {
  1132. delete_large_area(area);
  1133. area->next = to_space->large_area;
  1134. to_space->large_area = area;
  1135. area->action = GC_ACTION_IGNORE;
  1136. area->generation_index = to_space->generation_index;
  1137. area->trace = area->start;
  1138. }
  1139. /*********************************************************************
  1140. Allocation
  1141. *********************************************************************/
  1142. long s48_available() {
  1143. /* it's not 100% sure that all these cells can be allocated, because
  1144. if an object does not fit into area_below, the remaining space is
  1145. discarded. Is this a bad thing ?? */
  1146. /* If the heap can grow, the remaining memory in the creation space
  1147. is available. If it can't, we have to consider, that the actually
  1148. allocated heap (s48_heap_size) cannot grow above
  1149. s48_max_heap_size(). So less space is really available. */
  1150. long max_heap_size = s48_max_heap_size();
  1151. long available_creation_space =
  1152. S48_BYTES_TO_CELLS(AREA_REMAINING(creation_space.small_below)+
  1153. AREA_REMAINING(creation_space.small_above));
  1154. if (max_heap_size == 0)
  1155. return available_creation_space;
  1156. else {
  1157. long virtually_available =
  1158. max_heap_size - S48_BYTES_TO_CELLS(s48_heap_live_size());
  1159. if (virtually_available < available_creation_space)
  1160. return virtually_available;
  1161. else
  1162. return available_creation_space;
  1163. }
  1164. }
  1165. void s48_forbid_gcB() {
  1166. gc_forbid_count++;
  1167. }
  1168. void s48_allow_gcB() {
  1169. /*assert(gc_forbid_count > 0);*/
  1170. gc_forbid_count--;
  1171. }
  1172. /* Small Objects */
  1173. /* FPage 2 - 3 - 4 */
  1174. void s48_make_availableAgc(long len_in_bytes) {
  1175. #if (BIBOP_LOG)
  1176. int i; /* for the generations-loop */
  1177. int before_size[S48_GENERATIONS_COUNT];
  1178. int after_size[S48_GENERATIONS_COUNT];
  1179. #endif
  1180. /* let's see if we run out of space in the current area... */
  1181. if (AREA_REMAINING(creation_space.small) < len_in_bytes) {
  1182. /* if we are under the water-mark, then continue above it... */
  1183. if ((creation_space.small == creation_space.small_below) &&
  1184. (len_in_bytes <= AREA_REMAINING(creation_space.small_above))) {
  1185. /* FPage 2 */
  1186. creation_space.small = creation_space.small_above;
  1187. }
  1188. /* While the image is still loading, and the creation_space is
  1189. full, then the creation_space.small points to the small area
  1190. created in current_space of the first generation. In this case
  1191. the allocation is going on in this small area without memory
  1192. limit. When the image has already been read, then a gc is
  1193. allowed and the very next allocation of an object triggers the
  1194. first collection.
  1195. Just for the history: with a
  1196. S48_CREATION_SPACE_SIZE of 512 KB (128 Pages as default), after
  1197. the image is been loaded, there are 89.133 objects in the heap:
  1198. - 14.177 objects in small_above area
  1199. - 16.579 objects in the small_below area
  1200. - 58.377 objects(!) in 19 areas in the current_space
  1201. of the first generation.
  1202. That is, only 1/3 comes into the creation_space and the rest
  1203. 2/3 into the first generation, which causes a big delay by the
  1204. first collection. I'll change this, by increasing the
  1205. creation_space_size 3 times.
  1206. From now on, the creation_space alone is responsible for the
  1207. next collections: if we are above it already, and are allowed
  1208. to collect some garbage, then do it. */
  1209. else {
  1210. if (gc_forbid_count == 0) {
  1211. /* FPage 4 */
  1212. #if (BIBOP_LOG)
  1213. s48_bibop_log("CREATION SPACE WATER MARK");
  1214. s48_bibop_log("creation_space.small_above: %i pages",
  1215. S48_CREATION_SPACE_SIZE - current_water_mark);
  1216. s48_bibop_log("creation_space.small_below: %i pages\n",
  1217. current_water_mark);
  1218. /*s48_bibop_log("NEW OBJECTS");
  1219. s48_bibop_log("Bytes small_above = %i", get_creation_space_small_above());
  1220. s48_bibop_log("Bytes small_below = %i", get_creation_space_small_below());*/
  1221. if (s48_gc_count() == 0) {
  1222. s48_bibop_log("Bytes small_gen = %i",
  1223. calc_generation_size(&generations[0]));
  1224. }
  1225. s48_bibop_log("");
  1226. /* save the current size before the collection */
  1227. for (i = 0; i < S48_GENERATIONS_COUNT; i++) {
  1228. before_size[i] = calc_generation_size(&generations[i]);
  1229. }
  1230. #endif
  1231. s48_collect(FALSE);
  1232. #if (BIBOP_LOG)
  1233. /* save the current size after the collection */
  1234. for (i = 0; i < S48_GENERATIONS_COUNT; i++) {
  1235. after_size[i] = calc_generation_size(&generations[i]);
  1236. }
  1237. s48_bibop_log("AFTER COLLECTION");
  1238. for (i = S48_GENERATIONS_COUNT - 1; i > -1; i--) {
  1239. s48_bibop_log("gen: %i, last size %i, current size %i",
  1240. i,
  1241. before_size[i],
  1242. after_size[i]);
  1243. }
  1244. s48_bibop_log("");
  1245. #endif
  1246. }
  1247. /* if a gc is not allowed, or if after the collection, the
  1248. creation-space is still too small, just use the first
  1249. generation to allocate space, and allocate a new area if
  1250. needed. */
  1251. if ((gc_forbid_count != 0) ||
  1252. (AREA_REMAINING(creation_space.small) < len_in_bytes)) {
  1253. /* FPage 3 */
  1254. creation_space.small = generations[0].current_space->small_area;
  1255. if (AREA_REMAINING(creation_space.small) < len_in_bytes) {
  1256. Area* new_area =
  1257. s48_allocate_area(ulong_max(S48_MINIMUM_SMALL_AREA_SIZE,
  1258. BYTES_TO_PAGES(len_in_bytes)),
  1259. ulong_max(S48_MAXIMUM_SMALL_AREA_SIZE,
  1260. BYTES_TO_PAGES(len_in_bytes)),
  1261. 0,
  1262. AREA_TYPE_SIZE_SMALL);
  1263. new_area->next = generations[0].current_space->small_area;
  1264. generations[0].current_space->small_area = new_area;
  1265. creation_space.small = new_area;
  1266. }
  1267. }
  1268. }
  1269. }
  1270. if (AREA_REMAINING(creation_space.small) < len_in_bytes)
  1271. s48_gc_error("out of memory in s48_make_availableAgc(%d)", len_in_bytes);
  1272. }
  1273. s48_address s48_allocate_small(long len_in_bytes) {
  1274. s48_address result;
  1275. /* catch misuse of this function */
  1276. /*assert(len_in_bytes <= S48_SMALL_OBJECT_LIMIT);*/
  1277. result = creation_space.small->frontier;
  1278. creation_space.small->frontier += S48_BYTES_TO_A_UNITS(len_in_bytes);
  1279. assert(creation_space.small->frontier <= creation_space.small->end);
  1280. return result;
  1281. }
  1282. /* Large Objects */
  1283. static void s48_make_large_availableAgc(long len_in_bytes) {
  1284. unsigned long current_size = 0;
  1285. /* maybe keep tracking the size while allocating... */
  1286. FOR_ALL_AREAS(creation_space.large,
  1287. current_size += (area->frontier - area->start));
  1288. if ((gc_forbid_count == 0) &&
  1289. (current_size > S48_MAXIMUM_LARGE_CREATION_SPACE_SIZE)) {
  1290. s48_collect(FALSE);
  1291. }
  1292. else {
  1293. ;
  1294. }
  1295. }
  1296. static s48_address s48_allocate_large(long len_in_bytes) {
  1297. unsigned long len_in_pages = BYTES_TO_PAGES(len_in_bytes);
  1298. Area *area;
  1299. if (PAGES_TO_BYTES_LOSES_P(len_in_pages)) {
  1300. /* pretend we're just out of memory */
  1301. return NULL;
  1302. };
  1303. area = s48_allocate_area_without_crashing(len_in_pages,
  1304. len_in_pages,
  1305. 0,
  1306. AREA_TYPE_SIZE_LARGE);
  1307. if (area == NULL) {
  1308. /* out of memory */
  1309. return NULL;
  1310. };
  1311. area->frontier = area->start + len_in_bytes;
  1312. area->next = creation_space.large;
  1313. creation_space.large = area;
  1314. return area->start;
  1315. }
  1316. /* "Mixed" Objects */
  1317. inline static s48_address allocate_mixedAgc(long len_in_bytes) {
  1318. if (len_in_bytes <= S48_SMALL_OBJECT_LIMIT) {
  1319. s48_make_availableAgc(len_in_bytes);
  1320. return s48_allocate_small(len_in_bytes);
  1321. } else {
  1322. s48_make_large_availableAgc(len_in_bytes);
  1323. return s48_allocate_large(len_in_bytes);
  1324. }
  1325. }
  1326. /*
  1327. The Allocator (s48_allocate_tracedAgc)
  1328. - If called from the portable Dumper: the len_in_bytes is of one
  1329. object only. All objects are allocated one after the other.
  1330. - If called from the TSC-Dumper: the len_in_bytes is of the whole
  1331. image. At this phase a gc is forbidden. So if the creation_space is
  1332. not big enough, we got a problem. So could increase the size of the
  1333. creation_space to allocate the whole image, but we can't copy it
  1334. blind, cause the static_space is divided in 3 areas: small, large and
  1335. weaks obejcts. We have to find these 3 kinds of obejct separately !!!
  1336. */
  1337. s48_address s48_allocate_tracedAgc(long len_in_bytes) {
  1338. return allocate_mixedAgc(len_in_bytes);
  1339. }
  1340. s48_address s48_allocate_untracedAgc(long len_in_bytes) {
  1341. return allocate_mixedAgc(len_in_bytes);
  1342. }
  1343. /* Unmovable objects are allocated directly in a new large area, which
  1344. are never moved in a collection. */
  1345. psbool s48_gc_can_allocate_unmovableP() { return PSTRUE; }
  1346. s48_address s48_allocate_traced_unmovableAgc(long len_in_bytes) {
  1347. s48_make_large_availableAgc(len_in_bytes);
  1348. return s48_allocate_large(len_in_bytes);
  1349. }
  1350. s48_address s48_allocate_untraced_unmovableAgc(long len_in_bytes) {
  1351. s48_make_large_availableAgc(len_in_bytes);
  1352. return s48_allocate_large(len_in_bytes);
  1353. }
  1354. psbool s48_unmovableP(s48_value stob) {
  1355. Area* area = s48_memory_map_ref(S48_ADDRESS_AT_HEADER(stob));
  1356. return ((area != NULL) &&
  1357. (area->area_type_size == AREA_TYPE_SIZE_LARGE)) ? PSTRUE : PSFALSE;
  1358. }
  1359. /* Weak Pointers */
  1360. s48_address s48_allocate_weakAgc(long len_in_bytes) {
  1361. Area* area = creation_space.weaks;
  1362. s48_address result;
  1363. if (AREA_REMAINING(area) < len_in_bytes) {
  1364. if (gc_forbid_count == 0) {
  1365. s48_collect(FALSE);
  1366. area = creation_space.weaks;
  1367. }
  1368. else {
  1369. Area** areap = &generations[0].current_space->weaks_area;
  1370. if (AREA_REMAINING(*areap) < len_in_bytes) {
  1371. Area* new_area = s48_allocate_area(S48_MINIMUM_WEAK_AREA_SIZE,
  1372. S48_MAXIMUM_WEAK_AREA_SIZE,
  1373. 0, AREA_TYPE_SIZE_WEAKS);
  1374. new_area->next = *areap;
  1375. *areap = new_area;
  1376. }
  1377. area = *areap;
  1378. }
  1379. }
  1380. if (AREA_REMAINING(area) < len_in_bytes)
  1381. /* this should be impossible */
  1382. s48_gc_error("out of memory in s48_allocate_weakAgc(%d).", len_in_bytes);
  1383. result = area->frontier;
  1384. area->frontier += S48_BYTES_TO_A_UNITS(len_in_bytes);
  1385. return (result);
  1386. }
  1387. /*********************************************************************
  1388. Walking down the heap
  1389. *********************************************************************/
  1390. inline static void walk_areas(void (*do_part)(s48_address, s48_address),
  1391. Area* areas) {
  1392. while (areas != NULL) {
  1393. do_part(areas->start, areas->frontier);
  1394. areas = areas->next;
  1395. }
  1396. }
  1397. void s48_walk_heap(void (*do_part)(s48_address, s48_address)) {
  1398. int i;
  1399. walk_areas(do_part, creation_space.small_below);
  1400. walk_areas(do_part, creation_space.small_above);
  1401. walk_areas(do_part, creation_space.large);
  1402. walk_areas(do_part, creation_space.weaks);
  1403. for (i = 0; i < S48_GENERATIONS_COUNT; i++) {
  1404. walk_areas(do_part, generations[i].current_space->small_area);
  1405. walk_areas(do_part, generations[i].current_space->large_area);
  1406. walk_areas(do_part, generations[i].current_space->weaks_area);
  1407. }
  1408. }
  1409. /* Special area initialization for the BIBOP undumper in the last
  1410. generation */
  1411. /*****************************************************************************/
  1412. void s48_initialize_image_areas(long small_bytes, long small_hp_d,
  1413. long large_bytes, long large_hp_d,
  1414. long weaks_bytes, long weaks_hp_d) {
  1415. int image_generation = S48_GENERATIONS_COUNT - 1;
  1416. s48_address start;
  1417. s48_address small_end;
  1418. s48_address large_end;
  1419. s48_address end;
  1420. long img_bytes;
  1421. long i;
  1422. /*Wrong image format ? */
  1423. if ((small_bytes < 0) || (large_bytes < 0) || (weaks_bytes < 0)) return;
  1424. /*Get a block */
  1425. img_bytes = small_bytes + large_bytes + weaks_bytes;
  1426. s48_allocate_image_area(img_bytes, &start, &end);
  1427. if (img_bytes != (end - start)) {
  1428. s48_gc_error("Image block is not OK!");
  1429. }
  1430. small_end = start + small_bytes;
  1431. large_end = small_end + large_bytes;
  1432. /* Set the start addresses */
  1433. s48_set_new_small_start_addrB(start);
  1434. s48_set_new_large_start_addrB(small_end);
  1435. s48_set_new_weaks_start_addrB(large_end);
  1436. /* Split this block and assign it to the last generation's areas */
  1437. if (small_bytes > 0) {
  1438. Area* small_img;
  1439. small_img = s48_make_area(start, small_end,
  1440. start + S48_BYTES_TO_A_UNITS(small_hp_d),
  1441. image_generation, AREA_TYPE_SIZE_SMALL);
  1442. small_img->action = GC_ACTION_IGNORE;
  1443. generations[image_generation].current_space->small_area = small_img;
  1444. }
  1445. if (large_bytes > 0) {
  1446. Area* large_img;
  1447. large_img = s48_make_area(small_end, large_end,
  1448. small_end + S48_BYTES_TO_A_UNITS(large_hp_d),
  1449. image_generation, AREA_TYPE_SIZE_LARGE);
  1450. large_img->action = GC_ACTION_IGNORE;
  1451. generations[image_generation].current_space->large_area = large_img;
  1452. }
  1453. if (weaks_bytes > 0) {
  1454. Area* weaks_img;
  1455. weaks_img = s48_make_area(large_end, end,
  1456. large_end + S48_BYTES_TO_A_UNITS(weaks_hp_d),
  1457. image_generation, AREA_TYPE_SIZE_WEAKS);
  1458. weaks_img->action = GC_ACTION_IGNORE;
  1459. generations[image_generation].current_space->weaks_area = weaks_img;
  1460. }
  1461. return;
  1462. }
  1463. /*********************************************************************/
  1464. long s48_get_new_small_size(void) {
  1465. /*********************************************************************/
  1466. s48_address start;
  1467. s48_address end;
  1468. start = generations[S48_GENERATIONS_COUNT - 1].current_space->small_area->start;
  1469. end = generations[S48_GENERATIONS_COUNT - 1].current_space->small_area->end;
  1470. return end - start;
  1471. }
  1472. /*********************************************************************/
  1473. long s48_get_new_large_size(void) {
  1474. /*********************************************************************/
  1475. s48_address start;
  1476. s48_address end;
  1477. start = generations[S48_GENERATIONS_COUNT - 1].current_space->large_area->start;
  1478. end = generations[S48_GENERATIONS_COUNT - 1].current_space->large_area->end;
  1479. return end - start;
  1480. }
  1481. /*********************************************************************/
  1482. long s48_get_new_weaks_size(void) {
  1483. /*********************************************************************/
  1484. s48_address start;
  1485. s48_address end;
  1486. start = generations[S48_GENERATIONS_COUNT - 1].current_space->weaks_area->start;
  1487. end = generations[S48_GENERATIONS_COUNT - 1].current_space->weaks_area->end;
  1488. return end - start;
  1489. }
  1490. /*********************************************************************/
  1491. char * s48_get_new_small_end_addr(void) {
  1492. /*********************************************************************/
  1493. return generations[S48_GENERATIONS_COUNT - 1].current_space->small_area->end;
  1494. }
  1495. /*********************************************************************/
  1496. char * s48_get_new_large_end_addr(void) {
  1497. /*********************************************************************/
  1498. return generations[S48_GENERATIONS_COUNT - 1].current_space->large_area->end;
  1499. }
  1500. /*********************************************************************/
  1501. char * s48_get_new_weaks_end_addr(void) {
  1502. /*********************************************************************/
  1503. return generations[S48_GENERATIONS_COUNT - 1].current_space->weaks_area->end;
  1504. }
  1505. /*********************************************************************/
  1506. void s48_check_heap_sizeB() {
  1507. /*********************************************************************/
  1508. unsigned long max_size = s48_max_heap_size(); /* cells */
  1509. extern long s48_min_heap_size(void);
  1510. unsigned long min_size = s48_min_heap_size(); /* cells */
  1511. /* Check the given heap size (flag -h) and the actual one */
  1512. if ((max_size != 0) && (min_size > max_size)) {
  1513. s48_set_max_heap_sizeB( min_size );
  1514. fprintf(stderr,
  1515. "Maximum heap size %ld is too small, using %ld cells instead.\n", max_size,
  1516. s48_max_heap_size());
  1517. }
  1518. }