mark_rts.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. *
  5. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  6. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  7. *
  8. * Permission is hereby granted to use or copy this program
  9. * for any purpose, provided the above notices are retained on all copies.
  10. * Permission to modify the code and to distribute modified code is granted,
  11. * provided the above notices are retained, and a notice that the code was
  12. * modified is included with the above copyright notice.
  13. */
  14. # include <stdio.h>
  15. # include "private/gc_priv.h"
  16. /* Data structure for list of root sets. */
  17. /* We keep a hash table, so that we can filter out duplicate additions. */
  18. /* Under Win32, we need to do a better job of filtering overlaps, so */
  19. /* we resort to sequential search, and pay the price. */
  20. /* This is really declared in gc_priv.h:
  21. struct roots {
  22. ptr_t r_start;
  23. ptr_t r_end;
  24. # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  25. struct roots * r_next;
  26. # endif
  27. GC_bool r_tmp;
  28. -- Delete before registering new dynamic libraries
  29. };
  30. struct roots GC_static_roots[MAX_ROOT_SETS];
  31. */
  32. int GC_no_dls = 0; /* Register dynamic library data segments. */
  33. static int n_root_sets = 0;
  34. /* GC_static_roots[0..n_root_sets) contains the valid root sets. */
  35. # if !defined(NO_DEBUGGING)
  36. /* For debugging: */
  37. void GC_print_static_roots()
  38. {
  39. register int i;
  40. size_t total = 0;
  41. for (i = 0; i < n_root_sets; i++) {
  42. GC_printf2("From 0x%lx to 0x%lx ",
  43. (unsigned long) GC_static_roots[i].r_start,
  44. (unsigned long) GC_static_roots[i].r_end);
  45. if (GC_static_roots[i].r_tmp) {
  46. GC_printf0(" (temporary)\n");
  47. } else {
  48. GC_printf0("\n");
  49. }
  50. total += GC_static_roots[i].r_end - GC_static_roots[i].r_start;
  51. }
  52. GC_printf1("Total size: %ld\n", (unsigned long) total);
  53. if (GC_root_size != total) {
  54. GC_printf1("GC_root_size incorrect: %ld!!\n",
  55. (unsigned long) GC_root_size);
  56. }
  57. }
  58. # endif /* NO_DEBUGGING */
  59. /* Primarily for debugging support: */
  60. /* Is the address p in one of the registered static */
  61. /* root sections? */
  62. GC_bool GC_is_static_root(p)
  63. ptr_t p;
  64. {
  65. static int last_root_set = MAX_ROOT_SETS;
  66. register int i;
  67. if (last_root_set < n_root_sets
  68. && p >= GC_static_roots[last_root_set].r_start
  69. && p < GC_static_roots[last_root_set].r_end) return(TRUE);
  70. for (i = 0; i < n_root_sets; i++) {
  71. if (p >= GC_static_roots[i].r_start
  72. && p < GC_static_roots[i].r_end) {
  73. last_root_set = i;
  74. return(TRUE);
  75. }
  76. }
  77. return(FALSE);
  78. }
  79. #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  80. /*
  81. # define LOG_RT_SIZE 6
  82. # define RT_SIZE (1 << LOG_RT_SIZE) -- Power of 2, may be != MAX_ROOT_SETS
  83. struct roots * GC_root_index[RT_SIZE];
  84. -- Hash table header. Used only to check whether a range is
  85. -- already present.
  86. -- really defined in gc_priv.h
  87. */
  88. static int rt_hash(addr)
  89. char * addr;
  90. {
  91. word result = (word) addr;
  92. # if CPP_WORDSZ > 8*LOG_RT_SIZE
  93. result ^= result >> 8*LOG_RT_SIZE;
  94. # endif
  95. # if CPP_WORDSZ > 4*LOG_RT_SIZE
  96. result ^= result >> 4*LOG_RT_SIZE;
  97. # endif
  98. result ^= result >> 2*LOG_RT_SIZE;
  99. result ^= result >> LOG_RT_SIZE;
  100. result &= (RT_SIZE-1);
  101. return(result);
  102. }
  103. /* Is a range starting at b already in the table? If so return a */
  104. /* pointer to it, else NIL. */
  105. struct roots * GC_roots_present(b)
  106. char *b;
  107. {
  108. register int h = rt_hash(b);
  109. register struct roots *p = GC_root_index[h];
  110. while (p != 0) {
  111. if (p -> r_start == (ptr_t)b) return(p);
  112. p = p -> r_next;
  113. }
  114. return(FALSE);
  115. }
  116. /* Add the given root structure to the index. */
  117. static void add_roots_to_index(p)
  118. struct roots *p;
  119. {
  120. register int h = rt_hash(p -> r_start);
  121. p -> r_next = GC_root_index[h];
  122. GC_root_index[h] = p;
  123. }
  124. # else /* MSWIN32 || MSWINCE || CYGWIN32 */
  125. # define add_roots_to_index(p)
  126. # endif
  127. word GC_root_size = 0;
  128. void GC_add_roots(b, e)
  129. char * b; char * e;
  130. {
  131. DCL_LOCK_STATE;
  132. DISABLE_SIGNALS();
  133. LOCK();
  134. GC_add_roots_inner(b, e, FALSE);
  135. UNLOCK();
  136. ENABLE_SIGNALS();
  137. }
  138. /* Add [b,e) to the root set. Adding the same interval a second time */
  139. /* is a moderately fast noop, and hence benign. We do not handle */
  140. /* different but overlapping intervals efficiently. (We do handle */
  141. /* them correctly.) */
  142. /* Tmp specifies that the interval may be deleted before */
  143. /* reregistering dynamic libraries. */
  144. void GC_add_roots_inner(b, e, tmp)
  145. char * b; char * e;
  146. GC_bool tmp;
  147. {
  148. struct roots * old;
  149. # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
  150. /* Spend the time to ensure that there are no overlapping */
  151. /* or adjacent intervals. */
  152. /* This could be done faster with e.g. a */
  153. /* balanced tree. But the execution time here is */
  154. /* virtually guaranteed to be dominated by the time it */
  155. /* takes to scan the roots. */
  156. {
  157. register int i;
  158. for (i = 0; i < n_root_sets; i++) {
  159. old = GC_static_roots + i;
  160. if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
  161. if ((ptr_t)b < old -> r_start) {
  162. old -> r_start = (ptr_t)b;
  163. GC_root_size += (old -> r_start - (ptr_t)b);
  164. }
  165. if ((ptr_t)e > old -> r_end) {
  166. old -> r_end = (ptr_t)e;
  167. GC_root_size += ((ptr_t)e - old -> r_end);
  168. }
  169. old -> r_tmp &= tmp;
  170. break;
  171. }
  172. }
  173. if (i < n_root_sets) {
  174. /* merge other overlapping intervals */
  175. struct roots *other;
  176. for (i++; i < n_root_sets; i++) {
  177. other = GC_static_roots + i;
  178. b = (char *)(other -> r_start);
  179. e = (char *)(other -> r_end);
  180. if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
  181. if ((ptr_t)b < old -> r_start) {
  182. old -> r_start = (ptr_t)b;
  183. GC_root_size += (old -> r_start - (ptr_t)b);
  184. }
  185. if ((ptr_t)e > old -> r_end) {
  186. old -> r_end = (ptr_t)e;
  187. GC_root_size += ((ptr_t)e - old -> r_end);
  188. }
  189. old -> r_tmp &= other -> r_tmp;
  190. /* Delete this entry. */
  191. GC_root_size -= (other -> r_end - other -> r_start);
  192. other -> r_start = GC_static_roots[n_root_sets-1].r_start;
  193. other -> r_end = GC_static_roots[n_root_sets-1].r_end;
  194. n_root_sets--;
  195. }
  196. }
  197. return;
  198. }
  199. }
  200. # else
  201. old = GC_roots_present(b);
  202. if (old != 0) {
  203. if ((ptr_t)e <= old -> r_end) /* already there */ return;
  204. /* else extend */
  205. GC_root_size += (ptr_t)e - old -> r_end;
  206. old -> r_end = (ptr_t)e;
  207. return;
  208. }
  209. # endif
  210. if (n_root_sets == MAX_ROOT_SETS) {
  211. ABORT("Too many root sets\n");
  212. }
  213. GC_static_roots[n_root_sets].r_start = (ptr_t)b;
  214. GC_static_roots[n_root_sets].r_end = (ptr_t)e;
  215. GC_static_roots[n_root_sets].r_tmp = tmp;
  216. # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  217. GC_static_roots[n_root_sets].r_next = 0;
  218. # endif
  219. add_roots_to_index(GC_static_roots + n_root_sets);
  220. GC_root_size += (ptr_t)e - (ptr_t)b;
  221. n_root_sets++;
  222. }
  223. static GC_bool roots_were_cleared = FALSE;
  224. void GC_clear_roots GC_PROTO((void))
  225. {
  226. DCL_LOCK_STATE;
  227. DISABLE_SIGNALS();
  228. LOCK();
  229. roots_were_cleared = TRUE;
  230. n_root_sets = 0;
  231. GC_root_size = 0;
  232. # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  233. {
  234. register int i;
  235. for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
  236. }
  237. # endif
  238. UNLOCK();
  239. ENABLE_SIGNALS();
  240. }
  241. /* Internal use only; lock held. */
  242. static void GC_remove_root_at_pos(i)
  243. int i;
  244. {
  245. GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
  246. GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
  247. GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
  248. GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
  249. n_root_sets--;
  250. }
  251. #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  252. static void GC_rebuild_root_index()
  253. {
  254. register int i;
  255. for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
  256. for (i = 0; i < n_root_sets; i++)
  257. add_roots_to_index(GC_static_roots + i);
  258. }
  259. #endif
  260. /* Internal use only; lock held. */
  261. void GC_remove_tmp_roots()
  262. {
  263. register int i;
  264. for (i = 0; i < n_root_sets; ) {
  265. if (GC_static_roots[i].r_tmp) {
  266. GC_remove_root_at_pos(i);
  267. } else {
  268. i++;
  269. }
  270. }
  271. #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  272. GC_rebuild_root_index();
  273. #endif
  274. }
  275. #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
  276. void GC_remove_roots(b, e)
  277. char * b; char * e;
  278. {
  279. DCL_LOCK_STATE;
  280. DISABLE_SIGNALS();
  281. LOCK();
  282. GC_remove_roots_inner(b, e);
  283. UNLOCK();
  284. ENABLE_SIGNALS();
  285. }
  286. /* Should only be called when the lock is held */
  287. void GC_remove_roots_inner(b,e)
  288. char * b; char * e;
  289. {
  290. int i;
  291. for (i = 0; i < n_root_sets; ) {
  292. if (GC_static_roots[i].r_start >= (ptr_t)b && GC_static_roots[i].r_end <= (ptr_t)e) {
  293. GC_remove_root_at_pos(i);
  294. } else {
  295. i++;
  296. }
  297. }
  298. GC_rebuild_root_index();
  299. }
  300. #endif /* !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) */
  301. #if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION) || defined(CYGWIN32)
  302. /* Workaround for the OS mapping and unmapping behind our back: */
  303. /* Is the address p in one of the temporary static root sections? */
  304. GC_bool GC_is_tmp_root(p)
  305. ptr_t p;
  306. {
  307. static int last_root_set = MAX_ROOT_SETS;
  308. register int i;
  309. if (last_root_set < n_root_sets
  310. && p >= GC_static_roots[last_root_set].r_start
  311. && p < GC_static_roots[last_root_set].r_end)
  312. return GC_static_roots[last_root_set].r_tmp;
  313. for (i = 0; i < n_root_sets; i++) {
  314. if (p >= GC_static_roots[i].r_start
  315. && p < GC_static_roots[i].r_end) {
  316. last_root_set = i;
  317. return GC_static_roots[i].r_tmp;
  318. }
  319. }
  320. return(FALSE);
  321. }
  322. #endif /* MSWIN32 || _WIN32_WCE_EMULATION || defined(CYGWIN32) */
  323. ptr_t GC_approx_sp()
  324. {
  325. VOLATILE word dummy;
  326. dummy = 42; /* Force stack to grow if necessary. Otherwise the */
  327. /* later accesses might cause the kernel to think we're */
  328. /* doing something wrong. */
  329. # ifdef _MSC_VER
  330. # pragma warning(disable:4172)
  331. # endif
  332. #ifdef __GNUC__
  333. /* Eliminate a warning from GCC about taking the address of a
  334. local variable. */
  335. return __builtin_frame_address (0);
  336. #else
  337. return ((ptr_t)(&dummy));
  338. #endif /* __GNUC__ */
  339. # ifdef _MSC_VER
  340. # pragma warning(default:4172)
  341. # endif
  342. }
  343. /*
  344. * Data structure for excluded static roots.
  345. * Real declaration is in gc_priv.h.
  346. struct exclusion {
  347. ptr_t e_start;
  348. ptr_t e_end;
  349. };
  350. struct exclusion GC_excl_table[MAX_EXCLUSIONS];
  351. -- Array of exclusions, ascending
  352. -- address order.
  353. */
  354. size_t GC_excl_table_entries = 0; /* Number of entries in use. */
  355. /* Return the first exclusion range that includes an address >= start_addr */
  356. /* Assumes the exclusion table contains at least one entry (namely the */
  357. /* GC data structures). */
  358. struct exclusion * GC_next_exclusion(start_addr)
  359. ptr_t start_addr;
  360. {
  361. size_t low = 0;
  362. size_t high = GC_excl_table_entries - 1;
  363. size_t mid;
  364. while (high > low) {
  365. mid = (low + high) >> 1;
  366. /* low <= mid < high */
  367. if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
  368. low = mid + 1;
  369. } else {
  370. high = mid;
  371. }
  372. }
  373. if ((word) GC_excl_table[low].e_end <= (word) start_addr) return 0;
  374. return GC_excl_table + low;
  375. }
  376. void GC_exclude_static_roots(start, finish)
  377. GC_PTR start;
  378. GC_PTR finish;
  379. {
  380. struct exclusion * next;
  381. size_t next_index, i;
  382. if (0 == GC_excl_table_entries) {
  383. next = 0;
  384. } else {
  385. next = GC_next_exclusion(start);
  386. }
  387. if (0 != next) {
  388. if ((word)(next -> e_start) < (word) finish) {
  389. /* incomplete error check. */
  390. ABORT("exclusion ranges overlap");
  391. }
  392. if ((word)(next -> e_start) == (word) finish) {
  393. /* extend old range backwards */
  394. next -> e_start = (ptr_t)start;
  395. return;
  396. }
  397. next_index = next - GC_excl_table;
  398. for (i = GC_excl_table_entries; i > next_index; --i) {
  399. GC_excl_table[i] = GC_excl_table[i-1];
  400. }
  401. } else {
  402. next_index = GC_excl_table_entries;
  403. }
  404. if (GC_excl_table_entries == MAX_EXCLUSIONS) ABORT("Too many exclusions");
  405. GC_excl_table[next_index].e_start = (ptr_t)start;
  406. GC_excl_table[next_index].e_end = (ptr_t)finish;
  407. ++GC_excl_table_entries;
  408. }
  409. /* Invoke push_conditional on ranges that are not excluded. */
  410. void GC_push_conditional_with_exclusions(bottom, top, all)
  411. ptr_t bottom;
  412. ptr_t top;
  413. int all;
  414. {
  415. struct exclusion * next;
  416. ptr_t excl_start;
  417. while (bottom < top) {
  418. next = GC_next_exclusion(bottom);
  419. if (0 == next || (excl_start = next -> e_start) >= top) {
  420. GC_push_conditional(bottom, top, all);
  421. return;
  422. }
  423. if (excl_start > bottom) GC_push_conditional(bottom, excl_start, all);
  424. bottom = next -> e_end;
  425. }
  426. }
  427. /*
  428. * In the absence of threads, push the stack contents.
  429. * In the presence of threads, push enough of the current stack
  430. * to ensure that callee-save registers saved in collector frames have been
  431. * seen.
  432. */
  433. void GC_push_current_stack(cold_gc_frame)
  434. ptr_t cold_gc_frame;
  435. {
  436. # if defined(THREADS)
  437. if (0 == cold_gc_frame) return;
  438. # ifdef STACK_GROWS_DOWN
  439. GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
  440. /* For IA64, the register stack backing store is handled */
  441. /* in the thread-specific code. */
  442. # else
  443. GC_push_all_eager( cold_gc_frame, GC_approx_sp() );
  444. # endif
  445. # else
  446. # ifdef STACK_GROWS_DOWN
  447. GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom,
  448. cold_gc_frame );
  449. # ifdef IA64
  450. /* We also need to push the register stack backing store. */
  451. /* This should really be done in the same way as the */
  452. /* regular stack. For now we fudge it a bit. */
  453. /* Note that the backing store grows up, so we can't use */
  454. /* GC_push_all_stack_partially_eager. */
  455. {
  456. extern word GC_save_regs_ret_val;
  457. /* Previously set to backing store pointer. */
  458. ptr_t bsp = (ptr_t) GC_save_regs_ret_val;
  459. ptr_t cold_gc_bs_pointer;
  460. if (GC_all_interior_pointers) {
  461. cold_gc_bs_pointer = bsp - 2048;
  462. if (cold_gc_bs_pointer < BACKING_STORE_BASE) {
  463. cold_gc_bs_pointer = BACKING_STORE_BASE;
  464. } else {
  465. GC_push_all_stack(BACKING_STORE_BASE, cold_gc_bs_pointer);
  466. }
  467. } else {
  468. cold_gc_bs_pointer = BACKING_STORE_BASE;
  469. }
  470. GC_push_all_eager(cold_gc_bs_pointer, bsp);
  471. /* All values should be sufficiently aligned that we */
  472. /* dont have to worry about the boundary. */
  473. }
  474. # endif
  475. # else
  476. GC_push_all_stack_partially_eager( GC_stackbottom, GC_approx_sp(),
  477. cold_gc_frame );
  478. # endif
  479. # endif /* !THREADS */
  480. }
  481. /*
  482. * Push GC internal roots. Only called if there is some reason to believe
  483. * these would not otherwise get registered.
  484. */
  485. void GC_push_gc_structures GC_PROTO((void))
  486. {
  487. GC_push_finalizer_structures();
  488. GC_push_stubborn_structures();
  489. # if defined(THREADS)
  490. GC_push_thread_structures();
  491. # endif
  492. }
  493. #ifdef THREAD_LOCAL_ALLOC
  494. void GC_mark_thread_local_free_lists();
  495. #endif
  496. void GC_cond_register_dynamic_libraries()
  497. {
  498. # if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
  499. || defined(CYGWIN32) || defined(PCR)) && !defined(SRC_M3)
  500. GC_remove_tmp_roots();
  501. if (!GC_no_dls) GC_register_dynamic_libraries();
  502. # else
  503. GC_no_dls = TRUE;
  504. # endif
  505. }
  506. /*
  507. * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
  508. * on groups of pointers) on every top level accessible pointer.
  509. * If all is FALSE, arrange to push only possibly altered values.
  510. * Cold_gc_frame is an address inside a GC frame that
  511. * remains valid until all marking is complete.
  512. * A zero value indicates that it's OK to miss some
  513. * register values.
  514. */
  515. void GC_push_roots(all, cold_gc_frame)
  516. GC_bool all;
  517. ptr_t cold_gc_frame;
  518. {
  519. int i;
  520. int kind;
  521. /*
  522. * Next push static data. This must happen early on, since it's
  523. * not robust against mark stack overflow.
  524. */
  525. /* Reregister dynamic libraries, in case one got added. */
  526. /* There is some argument for doing this as late as possible, */
  527. /* especially on win32, where it can change asynchronously. */
  528. /* In those cases, we do it here. But on other platforms, it's */
  529. /* not safe with the world stopped, so we do it earlier. */
  530. # if !defined(REGISTER_LIBRARIES_EARLY)
  531. GC_cond_register_dynamic_libraries();
  532. # endif
  533. /* Mark everything in static data areas */
  534. for (i = 0; i < n_root_sets; i++) {
  535. GC_push_conditional_with_exclusions(
  536. GC_static_roots[i].r_start,
  537. GC_static_roots[i].r_end, all);
  538. }
  539. /* Mark all free list header blocks, if those were allocated from */
  540. /* the garbage collected heap. This makes sure they don't */
  541. /* disappear if we are not marking from static data. It also */
  542. /* saves us the trouble of scanning them, and possibly that of */
  543. /* marking the freelists. */
  544. for (kind = 0; kind < GC_n_kinds; kind++) {
  545. GC_PTR base = GC_base(GC_obj_kinds[kind].ok_freelist);
  546. if (0 != base) {
  547. GC_set_mark_bit(base);
  548. }
  549. }
  550. /* Mark from GC internal roots if those might otherwise have */
  551. /* been excluded. */
  552. if (GC_no_dls || roots_were_cleared) {
  553. GC_push_gc_structures();
  554. }
  555. /* Mark thread local free lists, even if their mark */
  556. /* descriptor excludes the link field. */
  557. /* If the world is not stopped, this is unsafe. It is */
  558. /* also unnecessary, since we will do this again with the */
  559. /* world stopped. */
  560. # ifdef THREAD_LOCAL_ALLOC
  561. if (GC_world_stopped) GC_mark_thread_local_free_lists();
  562. # endif
  563. /*
  564. * Now traverse stacks, and mark from register contents.
  565. * These must be done last, since they can legitimately overflow
  566. * the mark stack.
  567. */
  568. # ifdef USE_GENERIC_PUSH_REGS
  569. GC_generic_push_regs(cold_gc_frame);
  570. /* Also pushes stack, so that we catch callee-save registers */
  571. /* saved inside the GC_push_regs frame. */
  572. # else
  573. /*
  574. * push registers - i.e., call GC_push_one(r) for each
  575. * register contents r.
  576. */
  577. GC_push_regs(); /* usually defined in machine_dep.c */
  578. GC_push_current_stack(cold_gc_frame);
  579. /* In the threads case, this only pushes collector frames. */
  580. /* In the case of linux threads on IA64, the hot section of */
  581. /* the main stack is marked here, but the register stack */
  582. /* backing store is handled in the threads-specific code. */
  583. # endif
  584. if (GC_push_other_roots != 0) (*GC_push_other_roots)();
  585. /* In the threads case, this also pushes thread stacks. */
  586. /* Note that without interior pointer recognition lots */
  587. /* of stuff may have been pushed already, and this */
  588. /* should be careful about mark stack overflows. */
  589. }