stubborn.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. *
  5. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  6. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  7. *
  8. * Permission is hereby granted to use or copy this program
  9. * for any purpose, provided the above notices are retained on all copies.
  10. * Permission to modify the code and to distribute modified code is granted,
  11. * provided the above notices are retained, and a notice that the code was
  12. * modified is included with the above copyright notice.
  13. */
  14. /* Boehm, July 31, 1995 5:02 pm PDT */
  15. #include "private/gc_priv.h"
  16. # ifdef STUBBORN_ALLOC
  17. /* Stubborn object (hard to change, nearly immutable) allocation. */
  18. extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
  19. #define GENERAL_MALLOC(lb,k) \
  20. (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
  21. /* Data structure representing immutable objects that */
  22. /* are still being initialized. */
  23. /* This is a bit baroque in order to avoid acquiring */
  24. /* the lock twice for a typical allocation. */
  25. GC_PTR * GC_changing_list_start;
  26. void GC_push_stubborn_structures GC_PROTO((void))
  27. {
  28. GC_push_all((ptr_t)(&GC_changing_list_start),
  29. (ptr_t)(&GC_changing_list_start) + sizeof(GC_PTR *));
  30. }
  31. # ifdef THREADS
  32. VOLATILE GC_PTR * VOLATILE GC_changing_list_current;
  33. # else
  34. GC_PTR * GC_changing_list_current;
  35. # endif
  36. /* Points at last added element. Also (ab)used for */
  37. /* synchronization. Updates and reads are assumed atomic. */
  38. GC_PTR * GC_changing_list_limit;
  39. /* Points at the last word of the buffer, which is always 0 */
  40. /* All entries in (GC_changing_list_current, */
  41. /* GC_changing_list_limit] are 0 */
  42. void GC_stubborn_init()
  43. {
  44. # define INIT_SIZE 10
  45. GC_changing_list_start = (GC_PTR *)
  46. GC_INTERNAL_MALLOC(
  47. (word)(INIT_SIZE * sizeof(GC_PTR)),
  48. PTRFREE);
  49. BZERO(GC_changing_list_start,
  50. INIT_SIZE * sizeof(GC_PTR));
  51. if (GC_changing_list_start == 0) {
  52. GC_err_printf0("Insufficient space to start up\n");
  53. ABORT("GC_stubborn_init: put of space");
  54. }
  55. GC_changing_list_current = GC_changing_list_start;
  56. GC_changing_list_limit = GC_changing_list_start + INIT_SIZE - 1;
  57. * GC_changing_list_limit = 0;
  58. }
  59. /* Compact and possibly grow GC_uninit_list. The old copy is */
  60. /* left alone. Lock must be held. */
  61. /* When called GC_changing_list_current == GC_changing_list_limit */
  62. /* which is one past the current element. */
  63. /* When we finish GC_changing_list_current again points one past last */
  64. /* element. */
  65. /* Invariant while this is running: GC_changing_list_current */
  66. /* points at a word containing 0. */
  67. /* Returns FALSE on failure. */
  68. GC_bool GC_compact_changing_list()
  69. {
  70. register GC_PTR *p, *q;
  71. register word count = 0;
  72. word old_size = (char **)GC_changing_list_limit
  73. - (char **)GC_changing_list_start+1;
  74. /* The casts are needed as a workaround for an Amiga bug */
  75. register word new_size = old_size;
  76. GC_PTR * new_list;
  77. for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
  78. if (*p != 0) count++;
  79. }
  80. if (2 * count > old_size) new_size = 2 * count;
  81. new_list = (GC_PTR *)
  82. GC_INTERNAL_MALLOC(
  83. new_size * sizeof(GC_PTR), PTRFREE);
  84. /* PTRFREE is a lie. But we don't want the collector to */
  85. /* consider these. We do want the list itself to be */
  86. /* collectable. */
  87. if (new_list == 0) return(FALSE);
  88. BZERO(new_list, new_size * sizeof(GC_PTR));
  89. q = new_list;
  90. for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
  91. if (*p != 0) *q++ = *p;
  92. }
  93. GC_changing_list_start = new_list;
  94. GC_changing_list_limit = new_list + new_size - 1;
  95. GC_changing_list_current = q;
  96. return(TRUE);
  97. }
  98. /* Add p to changing list. Clear p on failure. */
  99. # define ADD_CHANGING(p) \
  100. { \
  101. register struct hblk * h = HBLKPTR(p); \
  102. register word index = PHT_HASH(h); \
  103. \
  104. set_pht_entry_from_index(GC_changed_pages, index); \
  105. } \
  106. if (*GC_changing_list_current != 0 \
  107. && ++GC_changing_list_current == GC_changing_list_limit) { \
  108. if (!GC_compact_changing_list()) (p) = 0; \
  109. } \
  110. *GC_changing_list_current = p;
  111. void GC_change_stubborn(p)
  112. GC_PTR p;
  113. {
  114. DCL_LOCK_STATE;
  115. DISABLE_SIGNALS();
  116. LOCK();
  117. ADD_CHANGING(p);
  118. UNLOCK();
  119. ENABLE_SIGNALS();
  120. }
  121. void GC_end_stubborn_change(p)
  122. GC_PTR p;
  123. {
  124. # ifdef THREADS
  125. register VOLATILE GC_PTR * my_current = GC_changing_list_current;
  126. # else
  127. register GC_PTR * my_current = GC_changing_list_current;
  128. # endif
  129. register GC_bool tried_quick;
  130. DCL_LOCK_STATE;
  131. if (*my_current == p) {
  132. /* Hopefully the normal case. */
  133. /* Compaction could not have been running when we started. */
  134. *my_current = 0;
  135. # ifdef THREADS
  136. if (my_current == GC_changing_list_current) {
  137. /* Compaction can't have run in the interim. */
  138. /* We got away with the quick and dirty approach. */
  139. return;
  140. }
  141. tried_quick = TRUE;
  142. # else
  143. return;
  144. # endif
  145. } else {
  146. tried_quick = FALSE;
  147. }
  148. DISABLE_SIGNALS();
  149. LOCK();
  150. my_current = GC_changing_list_current;
  151. for (; my_current >= GC_changing_list_start; my_current--) {
  152. if (*my_current == p) {
  153. *my_current = 0;
  154. UNLOCK();
  155. ENABLE_SIGNALS();
  156. return;
  157. }
  158. }
  159. if (!tried_quick) {
  160. GC_err_printf1("Bad arg to GC_end_stubborn_change: 0x%lx\n",
  161. (unsigned long)p);
  162. ABORT("Bad arg to GC_end_stubborn_change");
  163. }
  164. UNLOCK();
  165. ENABLE_SIGNALS();
  166. }
  167. /* Allocate lb bytes of composite (pointerful) data */
  168. /* No pointer fields may be changed after a call to */
  169. /* GC_end_stubborn_change(p) where p is the value */
  170. /* returned by GC_malloc_stubborn. */
  171. # ifdef __STDC__
  172. GC_PTR GC_malloc_stubborn(size_t lb)
  173. # else
  174. GC_PTR GC_malloc_stubborn(lb)
  175. size_t lb;
  176. # endif
  177. {
  178. register ptr_t op;
  179. register ptr_t *opp;
  180. register word lw;
  181. ptr_t result;
  182. DCL_LOCK_STATE;
  183. if( SMALL_OBJ(lb) ) {
  184. # ifdef MERGE_SIZES
  185. lw = GC_size_map[lb];
  186. # else
  187. lw = ALIGNED_WORDS(lb);
  188. # endif
  189. opp = &(GC_sobjfreelist[lw]);
  190. FASTLOCK();
  191. if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
  192. FASTUNLOCK();
  193. result = GC_generic_malloc((word)lb, STUBBORN);
  194. goto record;
  195. }
  196. *opp = obj_link(op);
  197. obj_link(op) = 0;
  198. GC_words_allocd += lw;
  199. result = (GC_PTR) op;
  200. ADD_CHANGING(result);
  201. FASTUNLOCK();
  202. return((GC_PTR)result);
  203. } else {
  204. result = (GC_PTR)
  205. GC_generic_malloc((word)lb, STUBBORN);
  206. }
  207. record:
  208. DISABLE_SIGNALS();
  209. LOCK();
  210. ADD_CHANGING(result);
  211. UNLOCK();
  212. ENABLE_SIGNALS();
  213. return((GC_PTR)GC_clear_stack(result));
  214. }
  215. /* Functions analogous to GC_read_dirty and GC_page_was_dirty. */
  216. /* Report pages on which stubborn objects were changed. */
  217. void GC_read_changed()
  218. {
  219. register GC_PTR * p = GC_changing_list_start;
  220. register GC_PTR q;
  221. register struct hblk * h;
  222. register word index;
  223. if (p == 0) /* initializing */ return;
  224. BCOPY(GC_changed_pages, GC_prev_changed_pages,
  225. (sizeof GC_changed_pages));
  226. BZERO(GC_changed_pages, (sizeof GC_changed_pages));
  227. for (; p <= GC_changing_list_current; p++) {
  228. if ((q = *p) != 0) {
  229. h = HBLKPTR(q);
  230. index = PHT_HASH(h);
  231. set_pht_entry_from_index(GC_changed_pages, index);
  232. }
  233. }
  234. }
  235. GC_bool GC_page_was_changed(h)
  236. struct hblk * h;
  237. {
  238. register word index = PHT_HASH(h);
  239. return(get_pht_entry_from_index(GC_prev_changed_pages, index));
  240. }
  241. /* Remove unreachable entries from changed list. Should only be */
  242. /* called with mark bits consistent and lock held. */
  243. void GC_clean_changing_list()
  244. {
  245. register GC_PTR * p = GC_changing_list_start;
  246. register GC_PTR q;
  247. register ptr_t r;
  248. register unsigned long count = 0;
  249. register unsigned long dropped_count = 0;
  250. if (p == 0) /* initializing */ return;
  251. for (; p <= GC_changing_list_current; p++) {
  252. if ((q = *p) != 0) {
  253. count++;
  254. r = (ptr_t)GC_base(q);
  255. if (r == 0 || !GC_is_marked(r)) {
  256. *p = 0;
  257. dropped_count++;
  258. }
  259. }
  260. }
  261. # ifdef PRINTSTATS
  262. if (count > 0) {
  263. GC_printf2("%lu entries in changing list: reclaimed %lu\n",
  264. (unsigned long)count, (unsigned long)dropped_count);
  265. }
  266. # endif
  267. }
  268. #else /* !STUBBORN_ALLOC */
  269. # ifdef __STDC__
  270. GC_PTR GC_malloc_stubborn(size_t lb)
  271. # else
  272. GC_PTR GC_malloc_stubborn(lb)
  273. size_t lb;
  274. # endif
  275. {
  276. return(GC_malloc(lb));
  277. }
  278. /*ARGSUSED*/
  279. void GC_end_stubborn_change(p)
  280. GC_PTR p;
  281. {
  282. }
  283. /*ARGSUSED*/
  284. void GC_change_stubborn(p)
  285. GC_PTR p;
  286. {
  287. }
  288. void GC_push_stubborn_structures GC_PROTO((void))
  289. {
  290. }
  291. #endif