finalize.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
  5. * Copyright (C) 2007 Free Software Foundation, Inc
  6. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  7. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  8. *
  9. * Permission is hereby granted to use or copy this program
  10. * for any purpose, provided the above notices are retained on all copies.
  11. * Permission to modify the code and to distribute modified code is granted,
  12. * provided the above notices are retained, and a notice that the code was
  13. * modified is included with the above copyright notice.
  14. */
  15. /* Boehm, February 1, 1996 1:19 pm PST */
  16. # define I_HIDE_POINTERS
  17. # include "private/gc_pmark.h"
  18. # ifdef FINALIZE_ON_DEMAND
  19. int GC_finalize_on_demand = 1;
  20. # else
  21. int GC_finalize_on_demand = 0;
  22. # endif
  23. # ifdef JAVA_FINALIZATION
  24. int GC_java_finalization = 1;
  25. # else
  26. int GC_java_finalization = 0;
  27. # endif
  28. /* Type of mark procedure used for marking from finalizable object. */
  29. /* This procedure normally does not mark the object, only its */
  30. /* descendents. */
  31. typedef void finalization_mark_proc(/* ptr_t finalizable_obj_ptr */);
  32. # define HASH3(addr,size,log_size) \
  33. ((((word)(addr) >> 3) ^ ((word)(addr) >> (3+(log_size)))) \
  34. & ((size) - 1))
  35. #define HASH2(addr,log_size) HASH3(addr, 1 << log_size, log_size)
  36. struct hash_chain_entry {
  37. word hidden_key;
  38. struct hash_chain_entry * next;
  39. };
  40. unsigned GC_finalization_failures = 0;
  41. /* Number of finalization requests that failed for lack of memory. */
  42. static struct disappearing_link {
  43. struct hash_chain_entry prolog;
  44. # define dl_hidden_link prolog.hidden_key
  45. /* Field to be cleared. */
  46. # define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
  47. # define dl_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
  48. word dl_hidden_obj; /* Pointer to object base */
  49. } **dl_head = 0;
  50. static signed_word log_dl_table_size = -1;
  51. /* Binary log of */
  52. /* current size of array pointed to by dl_head. */
  53. /* -1 ==> size is 0. */
  54. word GC_dl_entries = 0; /* Number of entries currently in disappearing */
  55. /* link table. */
  56. static struct finalizable_object {
  57. struct hash_chain_entry prolog;
  58. # define fo_hidden_base prolog.hidden_key
  59. /* Pointer to object base. */
  60. /* No longer hidden once object */
  61. /* is on finalize_now queue. */
  62. # define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
  63. # define fo_set_next(x,y) (x) -> prolog.next = (struct hash_chain_entry *)(y)
  64. GC_finalization_proc fo_fn; /* Finalizer. */
  65. ptr_t fo_client_data;
  66. word fo_object_size; /* In bytes. */
  67. finalization_mark_proc * fo_mark_proc; /* Mark-through procedure */
  68. } **fo_head = 0;
  69. struct finalizable_object * GC_finalize_now = 0;
  70. /* LIst of objects that should be finalized now. */
  71. static signed_word log_fo_table_size = -1;
  72. word GC_fo_entries = 0;
  73. void GC_push_finalizer_structures GC_PROTO((void))
  74. {
  75. GC_push_all((ptr_t)(&dl_head), (ptr_t)(&dl_head) + sizeof(word));
  76. GC_push_all((ptr_t)(&fo_head), (ptr_t)(&fo_head) + sizeof(word));
  77. GC_push_all((ptr_t)(&GC_finalize_now),
  78. (ptr_t)(&GC_finalize_now) + sizeof(word));
  79. }
  80. /* Double the size of a hash table. *size_ptr is the log of its current */
  81. /* size. May be a noop. */
  82. /* *table is a pointer to an array of hash headers. If we succeed, we */
  83. /* update both *table and *log_size_ptr. */
  84. /* Lock is held. Signals are disabled. */
  85. void GC_grow_table(table, log_size_ptr)
  86. struct hash_chain_entry ***table;
  87. signed_word * log_size_ptr;
  88. {
  89. register word i;
  90. register struct hash_chain_entry *p;
  91. int log_old_size = *log_size_ptr;
  92. register int log_new_size = log_old_size + 1;
  93. word old_size = ((log_old_size == -1)? 0: (1 << log_old_size));
  94. register word new_size = 1 << log_new_size;
  95. struct hash_chain_entry **new_table = (struct hash_chain_entry **)
  96. GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
  97. (size_t)new_size * sizeof(struct hash_chain_entry *), NORMAL);
  98. if (new_table == 0) {
  99. if (table == 0) {
  100. ABORT("Insufficient space for initial table allocation");
  101. } else {
  102. return;
  103. }
  104. }
  105. for (i = 0; i < old_size; i++) {
  106. p = (*table)[i];
  107. while (p != 0) {
  108. register ptr_t real_key = (ptr_t)REVEAL_POINTER(p -> hidden_key);
  109. register struct hash_chain_entry *next = p -> next;
  110. register int new_hash = HASH3(real_key, new_size, log_new_size);
  111. p -> next = new_table[new_hash];
  112. new_table[new_hash] = p;
  113. p = next;
  114. }
  115. }
  116. *log_size_ptr = log_new_size;
  117. *table = new_table;
  118. }
  119. # if defined(__STDC__) || defined(__cplusplus)
  120. int GC_register_disappearing_link(GC_PTR * link)
  121. # else
  122. int GC_register_disappearing_link(link)
  123. GC_PTR * link;
  124. # endif
  125. {
  126. ptr_t base;
  127. base = (ptr_t)GC_base((GC_PTR)link);
  128. if (base == 0)
  129. ABORT("Bad arg to GC_register_disappearing_link");
  130. return(GC_general_register_disappearing_link(link, base));
  131. }
  132. # if defined(__STDC__) || defined(__cplusplus)
  133. int GC_general_register_disappearing_link(GC_PTR * link,
  134. GC_PTR obj)
  135. # else
  136. int GC_general_register_disappearing_link(link, obj)
  137. GC_PTR * link;
  138. GC_PTR obj;
  139. # endif
  140. {
  141. struct disappearing_link *curr_dl;
  142. int index;
  143. struct disappearing_link * new_dl;
  144. DCL_LOCK_STATE;
  145. if ((word)link & (ALIGNMENT-1))
  146. ABORT("Bad arg to GC_general_register_disappearing_link");
  147. # ifdef THREADS
  148. DISABLE_SIGNALS();
  149. LOCK();
  150. # endif
  151. if (log_dl_table_size == -1
  152. || GC_dl_entries > ((word)1 << log_dl_table_size)) {
  153. # ifndef THREADS
  154. DISABLE_SIGNALS();
  155. # endif
  156. GC_grow_table((struct hash_chain_entry ***)(&dl_head),
  157. &log_dl_table_size);
  158. # ifdef CONDPRINT
  159. if (GC_print_stats) {
  160. GC_printf1("Grew dl table to %lu entries\n",
  161. (unsigned long)(1 << log_dl_table_size));
  162. }
  163. # endif
  164. # ifndef THREADS
  165. ENABLE_SIGNALS();
  166. # endif
  167. }
  168. index = HASH2(link, log_dl_table_size);
  169. curr_dl = dl_head[index];
  170. for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
  171. if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
  172. curr_dl -> dl_hidden_obj = HIDE_POINTER(obj);
  173. # ifdef THREADS
  174. UNLOCK();
  175. ENABLE_SIGNALS();
  176. # endif
  177. return(1);
  178. }
  179. }
  180. new_dl = (struct disappearing_link *)
  181. GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
  182. if (0 == new_dl) {
  183. # ifdef THREADS
  184. UNLOCK();
  185. ENABLE_SIGNALS();
  186. # endif
  187. new_dl = (struct disappearing_link *)
  188. GC_oom_fn(sizeof(struct disappearing_link));
  189. if (0 == new_dl) {
  190. GC_finalization_failures++;
  191. return(0);
  192. }
  193. /* It's not likely we'll make it here, but ... */
  194. # ifdef THREADS
  195. DISABLE_SIGNALS();
  196. LOCK();
  197. # endif
  198. }
  199. new_dl -> dl_hidden_obj = HIDE_POINTER(obj);
  200. new_dl -> dl_hidden_link = HIDE_POINTER(link);
  201. dl_set_next(new_dl, dl_head[index]);
  202. dl_head[index] = new_dl;
  203. GC_dl_entries++;
  204. # ifdef THREADS
  205. UNLOCK();
  206. ENABLE_SIGNALS();
  207. # endif
  208. return(0);
  209. }
  210. # if defined(__STDC__) || defined(__cplusplus)
  211. int GC_unregister_disappearing_link(GC_PTR * link)
  212. # else
  213. int GC_unregister_disappearing_link(link)
  214. GC_PTR * link;
  215. # endif
  216. {
  217. struct disappearing_link *curr_dl, *prev_dl;
  218. int index;
  219. DCL_LOCK_STATE;
  220. DISABLE_SIGNALS();
  221. LOCK();
  222. index = HASH2(link, log_dl_table_size);
  223. if (((unsigned long)link & (ALIGNMENT-1))) goto out;
  224. prev_dl = 0; curr_dl = dl_head[index];
  225. while (curr_dl != 0) {
  226. if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
  227. if (prev_dl == 0) {
  228. dl_head[index] = dl_next(curr_dl);
  229. } else {
  230. dl_set_next(prev_dl, dl_next(curr_dl));
  231. }
  232. GC_dl_entries--;
  233. UNLOCK();
  234. ENABLE_SIGNALS();
  235. # ifdef DBG_HDRS_ALL
  236. dl_set_next(curr_dl, 0);
  237. # else
  238. GC_free((GC_PTR)curr_dl);
  239. # endif
  240. return(1);
  241. }
  242. prev_dl = curr_dl;
  243. curr_dl = dl_next(curr_dl);
  244. }
  245. out:
  246. UNLOCK();
  247. ENABLE_SIGNALS();
  248. return(0);
  249. }
  250. /* Possible finalization_marker procedures. Note that mark stack */
  251. /* overflow is handled by the caller, and is not a disaster. */
  252. GC_API void GC_normal_finalize_mark_proc(p)
  253. ptr_t p;
  254. {
  255. hdr * hhdr = HDR(p);
  256. PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top,
  257. &(GC_mark_stack[GC_mark_stack_size]));
  258. }
  259. /* This only pays very partial attention to the mark descriptor. */
  260. /* It does the right thing for normal and atomic objects, and treats */
  261. /* most others as normal. */
  262. GC_API void GC_ignore_self_finalize_mark_proc(p)
  263. ptr_t p;
  264. {
  265. hdr * hhdr = HDR(p);
  266. word descr = hhdr -> hb_descr;
  267. ptr_t q, r;
  268. ptr_t scan_limit;
  269. ptr_t target_limit = p + WORDS_TO_BYTES(hhdr -> hb_sz) - 1;
  270. if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
  271. scan_limit = p + descr - sizeof(word);
  272. } else {
  273. scan_limit = target_limit + 1 - sizeof(word);
  274. }
  275. for (q = p; q <= scan_limit; q += ALIGNMENT) {
  276. r = *(ptr_t *)q;
  277. if (r < p || r > target_limit) {
  278. GC_PUSH_ONE_HEAP((word)r, q);
  279. }
  280. }
  281. }
  282. /*ARGSUSED*/
  283. GC_API void GC_null_finalize_mark_proc(p)
  284. ptr_t p;
  285. {
  286. }
  287. /* Possible finalization_marker procedures. Note that mark stack */
  288. /* overflow is handled by the caller, and is not a disaster. */
  289. GC_API void GC_unreachable_finalize_mark_proc(p)
  290. ptr_t p;
  291. {
  292. return GC_normal_finalize_mark_proc(p);
  293. }
  294. /* Register a finalization function. See gc.h for details. */
  295. /* in the nonthreads case, we try to avoid disabling signals, */
  296. /* since it can be expensive. Threads packages typically */
  297. /* make it cheaper. */
  298. /* The last parameter is a procedure that determines */
  299. /* marking for finalization ordering. Any objects marked */
  300. /* by that procedure will be guaranteed to not have been */
  301. /* finalized when this finalizer is invoked. */
  302. GC_API void GC_register_finalizer_inner(obj, fn, cd, ofn, ocd, mp)
  303. GC_PTR obj;
  304. GC_finalization_proc fn;
  305. GC_PTR cd;
  306. GC_finalization_proc * ofn;
  307. GC_PTR * ocd;
  308. finalization_mark_proc * mp;
  309. {
  310. ptr_t base;
  311. struct finalizable_object * curr_fo, * prev_fo;
  312. int index;
  313. struct finalizable_object *new_fo;
  314. hdr *hhdr;
  315. DCL_LOCK_STATE;
  316. # ifdef THREADS
  317. DISABLE_SIGNALS();
  318. LOCK();
  319. # endif
  320. if (log_fo_table_size == -1
  321. || GC_fo_entries > ((word)1 << log_fo_table_size)) {
  322. # ifndef THREADS
  323. DISABLE_SIGNALS();
  324. # endif
  325. GC_grow_table((struct hash_chain_entry ***)(&fo_head),
  326. &log_fo_table_size);
  327. # ifdef CONDPRINT
  328. if (GC_print_stats) {
  329. GC_printf1("Grew fo table to %lu entries\n",
  330. (unsigned long)(1 << log_fo_table_size));
  331. }
  332. # endif
  333. # ifndef THREADS
  334. ENABLE_SIGNALS();
  335. # endif
  336. }
  337. /* in the THREADS case signals are disabled and we hold allocation */
  338. /* lock; otherwise neither is true. Proceed carefully. */
  339. base = (ptr_t)obj;
  340. index = HASH2(base, log_fo_table_size);
  341. prev_fo = 0; curr_fo = fo_head[index];
  342. while (curr_fo != 0) {
  343. if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
  344. /* Interruption by a signal in the middle of this */
  345. /* should be safe. The client may see only *ocd */
  346. /* updated, but we'll declare that to be his */
  347. /* problem. */
  348. if (ocd) *ocd = (GC_PTR) curr_fo -> fo_client_data;
  349. if (ofn) *ofn = curr_fo -> fo_fn;
  350. /* Delete the structure for base. */
  351. if (prev_fo == 0) {
  352. fo_head[index] = fo_next(curr_fo);
  353. } else {
  354. fo_set_next(prev_fo, fo_next(curr_fo));
  355. }
  356. if (fn == 0) {
  357. GC_fo_entries--;
  358. /* May not happen if we get a signal. But a high */
  359. /* estimate will only make the table larger than */
  360. /* necessary. */
  361. # if !defined(THREADS) && !defined(DBG_HDRS_ALL)
  362. GC_free((GC_PTR)curr_fo);
  363. # endif
  364. } else {
  365. curr_fo -> fo_fn = fn;
  366. curr_fo -> fo_client_data = (ptr_t)cd;
  367. curr_fo -> fo_mark_proc = mp;
  368. /* Reinsert it. We deleted it first to maintain */
  369. /* consistency in the event of a signal. */
  370. if (prev_fo == 0) {
  371. fo_head[index] = curr_fo;
  372. } else {
  373. fo_set_next(prev_fo, curr_fo);
  374. }
  375. }
  376. # ifdef THREADS
  377. UNLOCK();
  378. ENABLE_SIGNALS();
  379. # endif
  380. return;
  381. }
  382. prev_fo = curr_fo;
  383. curr_fo = fo_next(curr_fo);
  384. }
  385. if (ofn) *ofn = 0;
  386. if (ocd) *ocd = 0;
  387. if (fn == 0) {
  388. # ifdef THREADS
  389. UNLOCK();
  390. ENABLE_SIGNALS();
  391. # endif
  392. return;
  393. }
  394. GET_HDR(base, hhdr);
  395. if (0 == hhdr) {
  396. /* We won't collect it, hence finalizer wouldn't be run. */
  397. # ifdef THREADS
  398. UNLOCK();
  399. ENABLE_SIGNALS();
  400. # endif
  401. return;
  402. }
  403. new_fo = (struct finalizable_object *)
  404. GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
  405. if (0 == new_fo) {
  406. # ifdef THREADS
  407. UNLOCK();
  408. ENABLE_SIGNALS();
  409. # endif
  410. new_fo = (struct finalizable_object *)
  411. GC_oom_fn(sizeof(struct finalizable_object));
  412. if (0 == new_fo) {
  413. GC_finalization_failures++;
  414. return;
  415. }
  416. /* It's not likely we'll make it here, but ... */
  417. # ifdef THREADS
  418. DISABLE_SIGNALS();
  419. LOCK();
  420. # endif
  421. }
  422. new_fo -> fo_hidden_base = (word)HIDE_POINTER(base);
  423. new_fo -> fo_fn = fn;
  424. new_fo -> fo_client_data = (ptr_t)cd;
  425. new_fo -> fo_object_size = hhdr -> hb_sz;
  426. new_fo -> fo_mark_proc = mp;
  427. fo_set_next(new_fo, fo_head[index]);
  428. GC_fo_entries++;
  429. fo_head[index] = new_fo;
  430. # ifdef THREADS
  431. UNLOCK();
  432. ENABLE_SIGNALS();
  433. # endif
  434. }
  435. # if defined(__STDC__)
  436. void GC_register_finalizer(void * obj,
  437. GC_finalization_proc fn, void * cd,
  438. GC_finalization_proc *ofn, void ** ocd)
  439. # else
  440. void GC_register_finalizer(obj, fn, cd, ofn, ocd)
  441. GC_PTR obj;
  442. GC_finalization_proc fn;
  443. GC_PTR cd;
  444. GC_finalization_proc * ofn;
  445. GC_PTR * ocd;
  446. # endif
  447. {
  448. GC_register_finalizer_inner(obj, fn, cd, ofn,
  449. ocd, GC_normal_finalize_mark_proc);
  450. }
  451. # if defined(__STDC__)
  452. void GC_register_finalizer_ignore_self(void * obj,
  453. GC_finalization_proc fn, void * cd,
  454. GC_finalization_proc *ofn, void ** ocd)
  455. # else
  456. void GC_register_finalizer_ignore_self(obj, fn, cd, ofn, ocd)
  457. GC_PTR obj;
  458. GC_finalization_proc fn;
  459. GC_PTR cd;
  460. GC_finalization_proc * ofn;
  461. GC_PTR * ocd;
  462. # endif
  463. {
  464. GC_register_finalizer_inner(obj, fn, cd, ofn,
  465. ocd, GC_ignore_self_finalize_mark_proc);
  466. }
  467. # if defined(__STDC__)
  468. void GC_register_finalizer_no_order(void * obj,
  469. GC_finalization_proc fn, void * cd,
  470. GC_finalization_proc *ofn, void ** ocd)
  471. # else
  472. void GC_register_finalizer_no_order(obj, fn, cd, ofn, ocd)
  473. GC_PTR obj;
  474. GC_finalization_proc fn;
  475. GC_PTR cd;
  476. GC_finalization_proc * ofn;
  477. GC_PTR * ocd;
  478. # endif
  479. {
  480. GC_register_finalizer_inner(obj, fn, cd, ofn,
  481. ocd, GC_null_finalize_mark_proc);
  482. }
  483. # if defined(__STDC__)
  484. void GC_register_finalizer_unreachable(void * obj,
  485. GC_finalization_proc fn, void * cd,
  486. GC_finalization_proc *ofn, void ** ocd)
  487. # else
  488. void GC_register_finalizer_unreachable(obj, fn, cd, ofn, ocd)
  489. GC_PTR obj;
  490. GC_finalization_proc fn;
  491. GC_PTR cd;
  492. GC_finalization_proc * ofn;
  493. GC_PTR * ocd;
  494. # endif
  495. {
  496. GC_register_finalizer_inner(obj, fn, cd, ofn,
  497. ocd, GC_unreachable_finalize_mark_proc);
  498. }
  499. #ifndef NO_DEBUGGING
  500. void GC_dump_finalization()
  501. {
  502. struct disappearing_link * curr_dl;
  503. struct finalizable_object * curr_fo;
  504. ptr_t real_ptr, real_link;
  505. int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
  506. int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
  507. int i;
  508. GC_printf0("Disappearing links:\n");
  509. for (i = 0; i < dl_size; i++) {
  510. for (curr_dl = dl_head[i]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
  511. real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
  512. real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
  513. GC_printf2("Object: 0x%lx, Link:0x%lx\n", real_ptr, real_link);
  514. }
  515. }
  516. GC_printf0("Finalizers:\n");
  517. for (i = 0; i < fo_size; i++) {
  518. for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
  519. real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
  520. GC_printf1("Finalizable object: 0x%lx\n", real_ptr);
  521. }
  522. }
  523. }
  524. #endif
  525. /* Called with world stopped. Cause disappearing links to disappear, */
  526. /* and invoke finalizers. */
  527. void GC_finalize()
  528. {
  529. struct disappearing_link * curr_dl, * prev_dl, * next_dl;
  530. struct finalizable_object * curr_fo, * prev_fo, * next_fo;
  531. ptr_t real_ptr, real_link;
  532. register int i;
  533. int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
  534. int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
  535. /* Make disappearing links disappear */
  536. for (i = 0; i < dl_size; i++) {
  537. curr_dl = dl_head[i];
  538. prev_dl = 0;
  539. while (curr_dl != 0) {
  540. real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
  541. real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
  542. if (!GC_is_marked(real_ptr)) {
  543. *(word *)real_link = 0;
  544. next_dl = dl_next(curr_dl);
  545. if (prev_dl == 0) {
  546. dl_head[i] = next_dl;
  547. } else {
  548. dl_set_next(prev_dl, next_dl);
  549. }
  550. GC_clear_mark_bit((ptr_t)curr_dl);
  551. GC_dl_entries--;
  552. curr_dl = next_dl;
  553. } else {
  554. prev_dl = curr_dl;
  555. curr_dl = dl_next(curr_dl);
  556. }
  557. }
  558. }
  559. /* Mark all objects reachable via chains of 1 or more pointers */
  560. /* from finalizable objects. */
  561. GC_ASSERT(GC_mark_state == MS_NONE);
  562. for (i = 0; i < fo_size; i++) {
  563. for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
  564. real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
  565. if (!GC_is_marked(real_ptr)) {
  566. GC_MARKED_FOR_FINALIZATION(real_ptr);
  567. GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
  568. if (GC_is_marked(real_ptr)) {
  569. WARN("Finalization cycle involving %lx\n", real_ptr);
  570. }
  571. }
  572. }
  573. }
  574. /* Enqueue for finalization all objects that are still */
  575. /* unreachable. */
  576. GC_words_finalized = 0;
  577. for (i = 0; i < fo_size; i++) {
  578. curr_fo = fo_head[i];
  579. prev_fo = 0;
  580. while (curr_fo != 0) {
  581. real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
  582. if (!GC_is_marked(real_ptr)) {
  583. if (!GC_java_finalization) {
  584. GC_set_mark_bit(real_ptr);
  585. }
  586. /* Delete from hash table */
  587. next_fo = fo_next(curr_fo);
  588. if (prev_fo == 0) {
  589. fo_head[i] = next_fo;
  590. } else {
  591. fo_set_next(prev_fo, next_fo);
  592. }
  593. GC_fo_entries--;
  594. /* Add to list of objects awaiting finalization. */
  595. fo_set_next(curr_fo, GC_finalize_now);
  596. GC_finalize_now = curr_fo;
  597. /* unhide object pointer so any future collections will */
  598. /* see it. */
  599. curr_fo -> fo_hidden_base =
  600. (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
  601. GC_words_finalized +=
  602. ALIGNED_WORDS(curr_fo -> fo_object_size)
  603. + ALIGNED_WORDS(sizeof(struct finalizable_object));
  604. GC_ASSERT(GC_is_marked(GC_base((ptr_t)curr_fo)));
  605. curr_fo = next_fo;
  606. } else {
  607. prev_fo = curr_fo;
  608. curr_fo = fo_next(curr_fo);
  609. }
  610. }
  611. }
  612. if (GC_java_finalization) {
  613. /* make sure we mark everything reachable from objects finalized
  614. using the no_order mark_proc */
  615. for (curr_fo = GC_finalize_now;
  616. curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
  617. real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
  618. if (!GC_is_marked(real_ptr)) {
  619. if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
  620. GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
  621. }
  622. if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
  623. GC_set_mark_bit(real_ptr);
  624. }
  625. }
  626. }
  627. /* now revive finalize-when-unreachable objects reachable from
  628. other finalizable objects */
  629. curr_fo = GC_finalize_now;
  630. prev_fo = 0;
  631. while (curr_fo != 0) {
  632. next_fo = fo_next(curr_fo);
  633. if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
  634. real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
  635. if (!GC_is_marked(real_ptr)) {
  636. GC_set_mark_bit(real_ptr);
  637. } else {
  638. if (prev_fo == 0)
  639. GC_finalize_now = next_fo;
  640. else
  641. fo_set_next(prev_fo, next_fo);
  642. curr_fo -> fo_hidden_base =
  643. (word) HIDE_POINTER(curr_fo -> fo_hidden_base);
  644. GC_words_finalized -=
  645. ALIGNED_WORDS(curr_fo -> fo_object_size)
  646. + ALIGNED_WORDS(sizeof(struct finalizable_object));
  647. i = HASH2(real_ptr, log_fo_table_size);
  648. fo_set_next (curr_fo, fo_head[i]);
  649. GC_fo_entries++;
  650. fo_head[i] = curr_fo;
  651. curr_fo = prev_fo;
  652. }
  653. }
  654. prev_fo = curr_fo;
  655. curr_fo = next_fo;
  656. }
  657. }
  658. /* Remove dangling disappearing links. */
  659. for (i = 0; i < dl_size; i++) {
  660. curr_dl = dl_head[i];
  661. prev_dl = 0;
  662. while (curr_dl != 0) {
  663. real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link));
  664. if (real_link != 0 && !GC_is_marked(real_link)) {
  665. next_dl = dl_next(curr_dl);
  666. if (prev_dl == 0) {
  667. dl_head[i] = next_dl;
  668. } else {
  669. dl_set_next(prev_dl, next_dl);
  670. }
  671. GC_clear_mark_bit((ptr_t)curr_dl);
  672. GC_dl_entries--;
  673. curr_dl = next_dl;
  674. } else {
  675. prev_dl = curr_dl;
  676. curr_dl = dl_next(curr_dl);
  677. }
  678. }
  679. }
  680. }
  681. #ifndef JAVA_FINALIZATION_NOT_NEEDED
  682. /* Enqueue all remaining finalizers to be run - Assumes lock is
  683. * held, and signals are disabled */
  684. void GC_enqueue_all_finalizers()
  685. {
  686. struct finalizable_object * curr_fo, * prev_fo, * next_fo;
  687. ptr_t real_ptr;
  688. register int i;
  689. int fo_size;
  690. fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
  691. GC_words_finalized = 0;
  692. for (i = 0; i < fo_size; i++) {
  693. curr_fo = fo_head[i];
  694. prev_fo = 0;
  695. while (curr_fo != 0) {
  696. real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
  697. GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
  698. GC_set_mark_bit(real_ptr);
  699. /* Delete from hash table */
  700. next_fo = fo_next(curr_fo);
  701. if (prev_fo == 0) {
  702. fo_head[i] = next_fo;
  703. } else {
  704. fo_set_next(prev_fo, next_fo);
  705. }
  706. GC_fo_entries--;
  707. /* Add to list of objects awaiting finalization. */
  708. fo_set_next(curr_fo, GC_finalize_now);
  709. GC_finalize_now = curr_fo;
  710. /* unhide object pointer so any future collections will */
  711. /* see it. */
  712. curr_fo -> fo_hidden_base =
  713. (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
  714. GC_words_finalized +=
  715. ALIGNED_WORDS(curr_fo -> fo_object_size)
  716. + ALIGNED_WORDS(sizeof(struct finalizable_object));
  717. curr_fo = next_fo;
  718. }
  719. }
  720. return;
  721. }
  722. /* Invoke all remaining finalizers that haven't yet been run.
  723. * This is needed for strict compliance with the Java standard,
  724. * which can make the runtime guarantee that all finalizers are run.
  725. * Unfortunately, the Java standard implies we have to keep running
  726. * finalizers until there are no more left, a potential infinite loop.
  727. * YUCK.
  728. * Note that this is even more dangerous than the usual Java
  729. * finalizers, in that objects reachable from static variables
  730. * may have been finalized when these finalizers are run.
  731. * Finalizers run at this point must be prepared to deal with a
  732. * mostly broken world.
  733. * This routine is externally callable, so is called without
  734. * the allocation lock.
  735. */
  736. GC_API void GC_finalize_all()
  737. {
  738. DCL_LOCK_STATE;
  739. DISABLE_SIGNALS();
  740. LOCK();
  741. while (GC_fo_entries > 0) {
  742. GC_enqueue_all_finalizers();
  743. UNLOCK();
  744. ENABLE_SIGNALS();
  745. GC_INVOKE_FINALIZERS();
  746. DISABLE_SIGNALS();
  747. LOCK();
  748. }
  749. UNLOCK();
  750. ENABLE_SIGNALS();
  751. }
  752. #endif
  753. /* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
  754. /* finalizers can only be called from some kind of `safe state' and */
  755. /* getting into that safe state is expensive.) */
  756. int GC_should_invoke_finalizers GC_PROTO((void))
  757. {
  758. return GC_finalize_now != 0;
  759. }
  760. /* Invoke finalizers for all objects that are ready to be finalized. */
  761. /* Should be called without allocation lock. */
  762. int GC_invoke_finalizers()
  763. {
  764. struct finalizable_object * curr_fo;
  765. int count = 0;
  766. word mem_freed_before;
  767. DCL_LOCK_STATE;
  768. while (GC_finalize_now != 0) {
  769. # ifdef THREADS
  770. DISABLE_SIGNALS();
  771. LOCK();
  772. # endif
  773. if (count == 0) {
  774. mem_freed_before = GC_mem_freed;
  775. }
  776. curr_fo = GC_finalize_now;
  777. # ifdef THREADS
  778. if (curr_fo != 0) GC_finalize_now = fo_next(curr_fo);
  779. UNLOCK();
  780. ENABLE_SIGNALS();
  781. if (curr_fo == 0) break;
  782. # else
  783. GC_finalize_now = fo_next(curr_fo);
  784. # endif
  785. fo_set_next(curr_fo, 0);
  786. (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
  787. curr_fo -> fo_client_data);
  788. curr_fo -> fo_client_data = 0;
  789. ++count;
  790. # ifdef UNDEFINED
  791. /* This is probably a bad idea. It throws off accounting if */
  792. /* nearly all objects are finalizable. O.w. it shouldn't */
  793. /* matter. */
  794. GC_free((GC_PTR)curr_fo);
  795. # endif
  796. }
  797. if (count != 0 && mem_freed_before != GC_mem_freed) {
  798. LOCK();
  799. GC_finalizer_mem_freed += (GC_mem_freed - mem_freed_before);
  800. UNLOCK();
  801. }
  802. return count;
  803. }
  804. void (* GC_finalizer_notifier)() = (void (*) GC_PROTO((void)))0;
  805. static GC_word last_finalizer_notification = 0;
  806. void GC_notify_or_invoke_finalizers GC_PROTO((void))
  807. {
  808. /* This is a convenient place to generate backtraces if appropriate, */
  809. /* since that code is not callable with the allocation lock. */
  810. # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
  811. static word last_back_trace_gc_no = 1; /* Skip first one. */
  812. if (GC_gc_no > last_back_trace_gc_no) {
  813. word i;
  814. # ifdef KEEP_BACK_PTRS
  815. LOCK();
  816. /* Stops when GC_gc_no wraps; that's OK. */
  817. last_back_trace_gc_no = (word)(-1); /* disable others. */
  818. for (i = 0; i < GC_backtraces; ++i) {
  819. /* FIXME: This tolerates concurrent heap mutation, */
  820. /* which may cause occasional mysterious results. */
  821. /* We need to release the GC lock, since GC_print_callers */
  822. /* acquires it. It probably shouldn't. */
  823. UNLOCK();
  824. GC_generate_random_backtrace_no_gc();
  825. LOCK();
  826. }
  827. last_back_trace_gc_no = GC_gc_no;
  828. UNLOCK();
  829. # endif
  830. # ifdef MAKE_BACK_GRAPH
  831. if (GC_print_back_height)
  832. GC_print_back_graph_stats();
  833. # endif
  834. }
  835. # endif
  836. if (GC_finalize_now == 0) return;
  837. if (!GC_finalize_on_demand) {
  838. (void) GC_invoke_finalizers();
  839. # ifndef THREADS
  840. GC_ASSERT(GC_finalize_now == 0);
  841. # endif /* Otherwise GC can run concurrently and add more */
  842. return;
  843. }
  844. if (GC_finalizer_notifier != (void (*) GC_PROTO((void)))0
  845. && last_finalizer_notification != GC_gc_no) {
  846. last_finalizer_notification = GC_gc_no;
  847. GC_finalizer_notifier();
  848. }
  849. }
  850. # ifdef __STDC__
  851. GC_PTR GC_call_with_alloc_lock(GC_fn_type fn,
  852. GC_PTR client_data)
  853. # else
  854. GC_PTR GC_call_with_alloc_lock(fn, client_data)
  855. GC_fn_type fn;
  856. GC_PTR client_data;
  857. # endif
  858. {
  859. GC_PTR result;
  860. DCL_LOCK_STATE;
  861. # ifdef THREADS
  862. DISABLE_SIGNALS();
  863. LOCK();
  864. SET_LOCK_HOLDER();
  865. # endif
  866. result = (*fn)(client_data);
  867. # ifdef THREADS
  868. # ifndef GC_ASSERTIONS
  869. UNSET_LOCK_HOLDER();
  870. # endif /* o.w. UNLOCK() does it implicitly */
  871. UNLOCK();
  872. ENABLE_SIGNALS();
  873. # endif
  874. return(result);
  875. }
  876. #if !defined(NO_DEBUGGING)
  877. void GC_print_finalization_stats()
  878. {
  879. struct finalizable_object *fo = GC_finalize_now;
  880. size_t ready = 0;
  881. GC_printf2("%lu finalization table entries; %lu disappearing links\n",
  882. GC_fo_entries, GC_dl_entries);
  883. for (; 0 != fo; fo = fo_next(fo)) ++ready;
  884. GC_printf1("%lu objects are eligible for immediate finalization\n", ready);
  885. }
  886. #endif /* NO_DEBUGGING */